You are here: Home / Projects / EVE / runv / Попытка создания контейнеров runv

Попытка создания контейнеров runv

by Petr Fedchenkov last modified Dec 27, 2019 11:13 AM

Согласно https://github.com/hyperhq/runv/blob/master/README.md

 

Дата: 25.12.2019

Запущен в ВМ ESXI 6.7

Версии:

root@xenrunv:/# xl dmesg | grep -i hvm
(XEN) HVM: ASIDs enabled.
(XEN) HVM: VMX enabled
(XEN) HVM: Hardware Assisted Paging (HAP) detected
(XEN) HVM: HAP page sizes: 4kB, 2MB

Создание непосредственно через runv:

giggsoff@xenrunv:~/containerbundle$ mkdir rootfs
giggsoff@xenrunv:~/containerbundle$ docker export $(docker create busybox) | tar -C rootfs -xvf -
…
giggsoff@xenrunv:~/containerbundle$ runv spec
giggsoff@xenrunv:~/containerbundle$ ls
config.json  rootfs
giggsoff@xenrunv:~/containerbundle$ sudo runv --debug --kernel /var/lib/hyper/kernel --initrd /var/lib/hyper/hyper-initrd.img run mycontainer
[sudo] password for giggsoff:
got child pid: 7327
check whether child proc is created by libxl: 0
got child pid: 0

 

При этом подсказка командного интерпретатора не появляется.

 

В другом терминале:

root@xenrunv:~# xl list
Name                                        ID   Mem VCPUs      State   Time(s)
Domain-0                                     0  2048     6     r-----     593.6
vm-qyqCnShRcJ                               22   127     1     -b----       2.2
root@xenrunv:~# xl list -l 22
[
    {
        "domid": 22,
        "config": {
            "c_info": {
                "type": "hvm",
                "name": "vm-qyqCnShRcJ",
                "uuid": "48403dce-87d0-4edd-92ca-7837a5b51266",
                "run_hotplug_scripts": "False"
            },
            "b_info": {
                "max_vcpus": 1,
                "avail_vcpus": [
                    0
                ],
                "max_memkb": 131072,
                "target_memkb": 131072,
                "video_memkb": 0,
                "shadow_memkb": 2048,
                "extra": [
                    "-device",
                    "virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=3",
                    "-chardev",
                    "socket,id=charch0,path=/var/run/hyper/vm-qyqCnShRcJ/hyper.sock,server,nowait",
                    "-device",
                    "virtserialport,bus=virtio-serial0.0,nr=1,chardev=charch0,id=channel0,name=sh.hyper.channel.0",
                    "-chardev",
                    "socket,id=charch1,path=/var/run/hyper/vm-qyqCnShRcJ/tty.sock,server,nowait",
                    "-device",
                    "virtserialport,bus=virtio-serial0.0,nr=2,chardev=charch1,id=channel1,name=sh.hyper.channel.1",
                    "-fsdev",
                    "local,id=virtio9p,path=/var/run/hyper/vm-qyqCnShRcJ/share_dir,security_model=none",
                    "-device",
                    "virtio-9p-pci,fsdev=virtio9p,mount_tag=share_dir"
                ],
                "sched_params": {
                    "sched": "credit",
                    "weight": 1000,
                    "cap": 0
                },
                "kernel": "/var/lib/hyper/kernel",
                "cmdline": "console=ttyS0 pci=nomsi",
                "ramdisk": "/var/lib/hyper/hyper-initrd.img",
                "type.hvm": {
                    "pae": "True",
                    "apic": "False",
                    "acpi": "True",
                    "nographic": "True",
                    "vga": {
                        "kind": "none"
                    },
                    "vnc": {
                        "enable": "False"
                    },
                    "sdl": {
                        "enable": "False"
                    },
                    "spice": {
 
                    },
                    "serial": "unix:/var/run/hyper/vm-qyqCnShRcJ/console.sock,server,nowait",
                    "boot": "c",
                    "rdm": {
 
                    }
                },
                "arch_arm": {
 
                }
            }
        }
    }
]
root@xenrunv:~# runv list
ID            PID         STATUS      BUNDLE                           CREATED                OWNER
mycontainer   7348        created     /home/giggsoff/containerbundle   1970-01-01T00:00:00Z   root
root@xenrunv:~# runv state mycontainer
{
  "ociVersion": "1.0.1",
  "id": "mycontainer",
  "pid": 7348,
  "bundlePath": "/home/giggsoff/containerbundle",
  "rootfsPath": "/home/giggsoff/containerbundle/rootfs",
  "status": "created",
  "created": "1970-01-01T00:00:00Z",
  "owner": "root"
}

 

Для выключения необходимо в разных терминалах запустить runv delete… и xl destroy… При этом выдаётся текст вида в консоль, где был запущен runv:

got child pid: 7340
check whether child proc is created by libxl: -14
got child pid: 7348
check whether child proc is created by libxl: -14
E1225 13:07:55.849923    7315 vm_states.go:186] SB[vm-qyqCnShRcJ] Shutting down because of an exception: %!(EXTRA string=Destroy pod failed: &status.statusError{Code:2, Message:"send ctl channel error, the hyperstart might have closed", Details:[]*any.Any(nil)})
E1225 13:07:55.850438    7315 sandbox.go:155] StopPod fail: chan: true, response: &{vm-qyqCnShRcJ false Response Chan is broken}
E1225 13:07:55.886053    7315 delete.go:69] cmdDeleteContainer() failed to associated to the vm, err: &os.PathError{Op:"readlink", Path:"/run/runv/mycontainer/sandbox", Err:0x2}
Create new container failed: rpc error: code = Unknown desc = hyperstart closed

 

 

Создание через docker:

giggsoff@xenrunv:~/containerbundle$ cat /etc/docker/daemon.json
{
  "default-runtime": "runv",
  "runtimes": {
    "runv": {
      "path": "runv"
    }
  }
}
giggsoff@xenrunv:~/containerbundle$ sudo systemctl restart docker
giggsoff@xenrunv:~/containerbundle$ docker pull busybox
Using default tag: latest
latest: Pulling from library/busybox
Digest: sha256:1828edd60c5efd34b2bf5dd3282ec0cc04d47b2ff9caa0b6d4f07a21d1c08084
Status: Image is up to date for busybox:latest
giggsoff@xenrunv:~/containerbundle$ docker run --rm -it busybox

 

При этом подсказка командного интерпретатора не появляется.

 

 

В другом терминале:

 

root@xenrunv:/boot# xl list
Name                                        ID   Mem VCPUs      State   Time(s)
Domain-0                                     0  2048     6     r-----     771.1
vm-VEYVnifEqN                               25   127     1     -b----       2.3
root@xenrunv:/boot# docker ps --all
CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
feb5a54f4982        busybox             "sh"                51 seconds ago      Created                                 cranky_engelbart
root@xenrunv:/boot# xl list -l 25
[
    {
        "domid": 25,
        "config": {
            "c_info": {
                "type": "hvm",
                "name": "vm-VEYVnifEqN",
                "uuid": "be3dfa4a-b811-4ef5-a4a0-780f50a9219f",
                "run_hotplug_scripts": "False"
            },
            "b_info": {
                "max_vcpus": 1,
                "avail_vcpus": [
                    0
                ],
                "max_memkb": 131072,
                "target_memkb": 131072,
                "video_memkb": 0,
                "shadow_memkb": 2048,
                "extra": [
                    "-device",
                    "virtio-serial-pci,id=virtio-serial0,bus=pci.0,addr=3",
                    "-chardev",
                    "socket,id=charch0,path=/var/run/hyper/vm-VEYVnifEqN/hyper.sock,server,nowait",
                    "-device",
                    "virtserialport,bus=virtio-serial0.0,nr=1,chardev=charch0,id=channel0,name=sh.hyper.channel.0",
                    "-chardev",
                    "socket,id=charch1,path=/var/run/hyper/vm-VEYVnifEqN/tty.sock,server,nowait",
                    "-device",
                    "virtserialport,bus=virtio-serial0.0,nr=2,chardev=charch1,id=channel1,name=sh.hyper.channel.1",
                    "-fsdev",
                    "local,id=virtio9p,path=/var/run/hyper/vm-VEYVnifEqN/share_dir,security_model=none",
                    "-device",
                    "virtio-9p-pci,fsdev=virtio9p,mount_tag=share_dir"
                ],
                "sched_params": {
                    "sched": "credit",
                    "weight": 1000,
                    "cap": 0
                },
                "kernel": "/var/lib/hyper/kernel",
                "cmdline": "console=ttyS0 pci=nomsi",
                "ramdisk": "/var/lib/hyper/hyper-initrd.img",
                "type.hvm": {
                    "pae": "True",
                    "apic": "False",
                    "acpi": "True",
                    "nographic": "True",
                    "vga": {
                        "kind": "none"
                    },
                    "vnc": {
                        "enable": "False"
                    },
                    "sdl": {
                        "enable": "False"
                    },
                    "spice": {
 
                    },
                    "serial": "unix:/var/run/hyper/vm-VEYVnifEqN/console.sock,server,nowait",
                    "boot": "c",
                    "rdm": {
 
                    }
                },
                "arch_arm": {
 
                }
            },
            "nics": [
                {
                    "devid": 0,
                    "mtu": 1492,
                    "model": "e1000",
                    "mac": "02:42:ac:11:00:02",
                    "ip": "172.17.0.2/16",
                    "bridge": "runv0",
                    "ifname": "vm-VEYVnifEq0",
                    "nictype": "vif_ioemu",
                    "gatewaydev": "runv0"
                }
            ]
        }
    }
]
root@xenrunv:/boot# ping -c 5 172.17.0.2
PING 172.17.0.2 (172.17.0.2) 56(84) bytes of data.
64 bytes from 172.17.0.2: icmp_seq=1 ttl=64 time=0.076 ms
64 bytes from 172.17.0.2: icmp_seq=2 ttl=64 time=0.102 ms
64 bytes from 172.17.0.2: icmp_seq=3 ttl=64 time=0.079 ms
64 bytes from 172.17.0.2: icmp_seq=4 ttl=64 time=0.107 ms
64 bytes from 172.17.0.2: icmp_seq=5 ttl=64 time=0.101 ms
 
--- 172.17.0.2 ping statistics ---
5 packets transmitted, 5 received, 0% packet loss, time 4077ms
rtt min/avg/max/mdev = 0.076/0.093/0.107/0.012 ms

 

 

В процессе удаления (также через 2 окна терминала) появляется:

docker: Error response from daemon: OCI runtime create failed: unable to retrieve OCI runtime error (open /run/containerd/io.containerd.runtime.v1.linux/moby/feb5a54f49825ba2f6bd7e88ab80b5a27c235ff89e1e61c435ce64b1fceb6622/log.json: no such file or directory): runv did not terminate sucessfully: E1225 13:33:34.119488    8363 network.go:139] rpc error: code = Unknown desc = json: failed to send <add interface> command to hyperstart: hyperstart closed
: unknown.