weixin_39986973
2020-12-01 21:52 阅读 6

Can't create kata container with 16G memory

Description of problem

containerd-shim-kata-v2 Unable to create 16 kata container, error message

level=error msg=\"failed online device\" debug_console=false error=\"write /sys/devices/system/memory/memory36/online: operation not permitted\" name=kata-agent pid=59 source=agent subsystem=udevlistener uevent-action=add uevent-devname= uevent-devpath=/devices/system/memory/memory36 uevent-seqnum=979 uevent-subsystem=memory"

But I can create kata with 8G or below memory

Expected result

Can create 16G kata container

Actual result

`

Show kata-collect-data.sh details

# Meta details Running `kata-collect-data.sh` version `1.10.1 (commit )` at `2020-07-12.19:06:49.467408596+0800`. --- Runtime is `/bin/kata-runtime`. # `kata-env` Output of "`/bin/kata-runtime kata-env`":

toml
[Meta]
  Version = "1.0.23"

[Runtime]
  Debug = true
  Trace = false
  DisableGuestSeccomp = true
  DisableNewNetNs = false
  SandboxCgroupOnly = false
  Path = "/usr/bin/kata-runtime"
  [Runtime.Version]
    Semver = "1.10.1"
    Commit = ""
    OCI = "1.0.1-dev"
  [Runtime.Config]
    Path = "/usr/share/defaults/kata-containers/configuration.toml"

[Hypervisor]
  MachineType = "pc"
  Version = "QEMU emulator version 4.1.1\nCopyright (c) 2003-2019 Fabrice Bellard and the QEMU Project developers"
  Path = "/usr/bin/qemu-vanilla-system-x86_64"
  BlockDeviceDriver = "virtio-scsi"
  EntropySource = "/dev/urandom"
  Msize9p = 8192
  MemorySlots = 10
  Debug = true
  UseVSock = false
  SharedFS = "virtio-9p"

[Image]
  Path = "/usr/share/kata-containers/kata-containers-image.img"

[Kernel]
  Path = "/usr/share/kata-containers/vmlinuz-4.18.0-7.1.mt20200709.git9b36caf.container"
  Parameters = "systemd.unit=kata-containers.target systemd.mask=systemd-networkd.service systemd.mask=systemd-networkd.socket agent.log=debug vsyscall=emulate rw"

[Initrd]
  Path = ""

[Proxy]
  Type = "kataProxy"
  Version = "kata-proxy version 1.10.1"
  Path = "/usr/libexec/kata-containers/kata-proxy"
  Debug = true

[Shim]
  Type = "kataShim"
  Version = "kata-shim version 1.10.1"
  Path = "/usr/libexec/kata-containers/kata-shim"
  Debug = true

[Agent]
  Type = "kata"
  Debug = true
  Trace = false
  TraceMode = ""
  TraceType = ""

[Host]
  Kernel = "4.18.0-80.mt20191225.323.el8_0.x86_64"
  Architecture = "amd64"
  VMContainerCapable = true
  SupportVSocks = true
  [Host.Distro]
    Name = "CentOS Linux"
    Version = "7"
  [Host.CPU]
    Vendor = "GenuineIntel"
    Model = "Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz"

[Netmon]
  Version = "kata-netmon version 1.10.1"
  Path = "/usr/libexec/kata-containers/kata-netmon"
  Debug = true
  Enable = false
--- # Runtime config files ## Runtime default config files

/etc/kata-containers/configuration.toml
/usr/share/defaults/kata-containers/configuration.toml
## Runtime config file contents Config file `/etc/kata-containers/configuration.toml` not found Output of "`cat "/usr/share/defaults/kata-containers/configuration.toml"`":
toml
# Copyright (c) 2017-2019 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#

# XXX: WARNING: this file is auto-generated.
# XXX:
# XXX: Source file: "cli/config/configuration-qemu.toml.in"
# XXX: Project:
# XXX:   Name: Kata Containers
# XXX:   Type: kata

[hypervisor.qemu]
path = "/usr/bin/qemu-vanilla-system-x86_64"
kernel = "/usr/share/kata-containers/vmlinuz.container"
image = "/usr/share/kata-containers/kata-containers.img"
machine_type = "pc"

# Optional space-separated list of options to pass to the guest kernel.
# For example, use `kernel_params = "vsyscall=emulate"` if you are having
# trouble running pre-2.15 glibc.
#
# WARNING: - any parameter specified here will take priority over the default
# parameter value of the same name used to start the virtual machine.
# Do not set values here unless you understand the impact of doing so as you
# may stop the virtual machine from booting.
# To see the list of default parameters, enable hypervisor debug, create a
# container and look for 'default-kernel-parameters' log entries.
kernel_params = "vsyscall=emulate rw"

# Path to the firmware.
# If you want that qemu uses the default firmware leave this option empty
firmware = ""

# Machine accelerators
# comma-separated list of machine accelerators to pass to the hypervisor.
# For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
machine_accelerators=""

# Default number of vCPUs per SB/VM:
# unspecified or 0                --> will be set to 1
# < 0                             --> will be set to the actual number of physical cores
# > 0 <= number of physical cores --> will be set to the specified number
# > number of physical cores      --> will be set to the actual number of physical cores
default_vcpus = 1

# Default maximum number of vCPUs per SB/VM:
# unspecified or == 0             --> will be set to the actual number of physical cores or to the maximum number
#                                     of vCPUs supported by KVM if that number is exceeded
# > 0 <= number of physical cores --> will be set to the specified number
# > number of physical cores      --> will be set to the actual number of physical cores or to the maximum number
#                                     of vCPUs supported by KVM if that number is exceeded
# WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when
# the actual number of physical cores is greater than it.
# WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU
# the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs
# can be added to a SB/VM, but the memory footprint will be big. Another example, with
# `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of
# vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
# unless you know what are you doing.
default_maxvcpus = 0

# Bridges can be used to hot plug devices.
# Limitations:
# * Currently only pci bridges are supported
# * Until 30 devices per bridge can be hot plugged.
# * Until 5 PCI bridges can be cold plugged per VM.
#   This limitation could be a bug in qemu or in the kernel
# Default number of bridges per SB/VM:
# unspecified or 0   --> will be set to 1
# > 1 <= 5           --> will be set to the specified number
# > 5                --> will be set to 5
default_bridges = 1

# Default memory size in MiB for SB/VM.
# If unspecified then it will be set 256 MiB.
default_memory = 256
#
# Default memory slots per SB/VM.
# If unspecified then it will be set 10.
# This is will determine the times that memory will be hotadded to sandbox/VM.
#memory_slots = 10

# The size in MiB will be plused to max memory of hypervisor.
# It is the memory address space for the NVDIMM devie.
# If set block storage driver (block_device_driver) to "nvdimm",
# should set memory_offset to the size of block device.
# Default 0
#memory_offset = 0

# Disable block device from being used for a container's rootfs.
# In case of a storage driver like devicemapper where a container's
# root file system is backed by a block device, the block device is passed
# directly to the hypervisor for performance reasons.
# This flag prevents the block device from being passed to the hypervisor,
# 9pfs is used instead to pass the rootfs.
disable_block_device_use = false

# Shared file system type:
#   - virtio-9p (default)
#   - virtio-fs
shared_fs = "virtio-9p"

# Path to vhost-user-fs daemon.
virtio_fs_daemon = "/usr/bin/virtiofsd"

# Default size of DAX cache in MiB
virtio_fs_cache_size = 1024

# Extra args for virtiofsd daemon
#
# Format example:
#   ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"]
#
# see `virtiofsd -h` for possible options.
virtio_fs_extra_args = []

# Cache mode:
#
#  - none
#    Metadata, data, and pathname lookup are not cached in guest. They are
#    always fetched from host and any changes are immediately pushed to host.
#
#  - auto
#    Metadata and pathname lookup cache expires after a configured amount of
#    time (default is 1 second). Data is cached while the file is open (close
#    to open consistency).
#
#  - always
#    Metadata, data, and pathname lookup are cached in guest and never expire.
virtio_fs_cache = "always"

# Block storage driver to be used for the hypervisor in case the container
# rootfs is backed by a block device. This is virtio-scsi, virtio-blk
# or nvdimm.
block_device_driver = "virtio-scsi"

# Specifies cache-related options will be set to block devices or not.
# Default false
#block_device_cache_set = true

# Specifies cache-related options for block devices.
# Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
# Default false
#block_device_cache_direct = true

# Specifies cache-related options for block devices.
# Denotes whether flush requests for the device are ignored.
# Default false
#block_device_cache_noflush = true

# Enable iothreads (data-plane) to be used. This causes IO to be
# handled in a separate IO thread. This is currently only implemented
# for SCSI.
#
enable_iothreads = false

# Enable pre allocation of VM RAM, default false
# Enabling this will result in lower container density
# as all of the memory will be allocated and locked
# This is useful when you want to reserve all the memory
# upfront or in the cases where you want memory latencies
# to be very predictable
# Default false
#enable_mem_prealloc = true

# Enable huge pages for VM RAM, default false
# Enabling this will result in the VM memory
# being allocated using huge pages.
# This is useful when you want to use vhost-user network
# stacks within the container. This will automatically
# result in memory pre allocation
#enable_hugepages = true

# Enable file based guest memory support. The default is an empty string which
# will disable this feature. In the case of virtio-fs, this is enabled
# automatically and '/dev/shm' is used as the backing folder.
# This option will be ignored if VM templating is enabled.
#file_mem_backend = ""

# Enable swap of vm memory. Default false.
# The behaviour is undefined if mem_prealloc is also set to true
#enable_swap = true

# This option changes the default hypervisor and kernel parameters
# to enable debug output where available. This extra output is added
# to the proxy logs, but only when proxy debug is also enabled.
#
# Default false
enable_debug = true

# Disable the customizations done in the runtime when it detects
# that it is running on top a VMM. This will result in the runtime
# behaving as it would when running on bare metal.
#
#disable_nesting_checks = true

# This is the msize used for 9p shares. It is the number of bytes
# used for 9p packet payload.
#msize_9p = 8192

# If true and vsocks are supported, use vsocks to communicate directly
# with the agent and no proxy is started, otherwise use unix
# sockets and start a proxy to communicate with the agent.
# Default false
#use_vsock = true

# VFIO devices are hotplugged on a bridge by default.
# Enable hotplugging on root bus. This may be required for devices with
# a large PCI bar, as this is a current limitation with hotplugging on
# a bridge. This value is valid for "pc" machine type.
# Default false
#hotplug_vfio_on_root_bus = true

# If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
# security (vhost-net runs ring0) for network I/O performance.
#disable_vhost_net = true

#
# Default entropy source.
# The path to a host source of entropy (including a real hardware RNG)
# /dev/urandom and /dev/random are two main options.
# Be aware that /dev/random is a blocking source of entropy.  If the host
# runs out of entropy, the VMs boot time will increase leading to get startup
# timeouts.
# The source of entropy /dev/urandom is non-blocking and provides a
# generally acceptable source of entropy. It should work well for pretty much
# all practical purposes.
#entropy_source= "/dev/urandom"

# Path to OCI hook binaries in the *guest rootfs*.
# This does not affect host-side hooks which must instead be added to
# the OCI spec passed to the runtime.
#
# You can create a rootfs with hooks by customizing the osbuilder scripts:
# https://github.com/kata-containers/osbuilder
#
# Hooks must be stored in a subdirectory of guest_hook_path according to their
# hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
# The agent will scan these directories for executable files and add them, in
# lexicographical order, to the lifecycle of the guest container.
# Hooks are executed in the runtime namespace of the guest. See the official documentation:
# https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
# Warnings will be logged if any error is encountered will scanning for hooks,
# but it will not abort container execution.
guest_hook_path = "/usr/libexec/oci/hooks.d"

[factory]
# VM templating support. Once enabled, new VMs are created from template
# using vm cloning. They will share the same initial kernel, initramfs and
# agent memory by mapping it readonly. It helps speeding up new container
# creation and saves a lot of memory if there are many kata containers running
# on the same host.
#
# When disabled, new VMs are created from scratch.
#
# Note: Requires "initrd=" to be set ("image=" is not supported).
#
# Default false
#enable_template = true

# Specifies the path of template.
#
# Default "/run/vc/vm/template"
#template_path = "/run/vc/vm/template"

# The number of caches of VMCache:
# unspecified or == 0   --> VMCache is disabled
# > 0                   --> will be set to the specified number
#
# VMCache is a function that creates VMs as caches before using it.
# It helps speed up new container creation.
# The function consists of a server and some clients communicating
# through Unix socket.  The protocol is gRPC in protocols/cache/cache.proto.
# The VMCache server will create some VMs and cache them by factory cache.
# It will convert the VM to gRPC format and transport it when gets
# requestion from clients.
# Factory grpccache is the VMCache client.  It will request gRPC format
# VM and convert it back to a VM.  If VMCache function is enabled,
# kata-runtime will request VM from factory grpccache when it creates
# a new sandbox.
#
# Default 0
#vm_cache_number = 0

# Specify the address of the Unix socket that is used by VMCache.
#
# Default /var/run/kata-containers/cache.sock
#vm_cache_endpoint = "/var/run/kata-containers/cache.sock"

[proxy.kata]
path = "/usr/libexec/kata-containers/kata-proxy"

# If enabled, proxy messages will be sent to the system log
# (default: disabled)
enable_debug = true

[shim.kata]
path = "/usr/libexec/kata-containers/kata-shim"

# If enabled, shim messages will be sent to the system log
# (default: disabled)
enable_debug = true

# If enabled, the shim will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started).
#
# Note: By default, the shim runs in a separate network namespace. Therefore,
# to allow it to send trace details to the Jaeger agent running on the host,
# it is necessary to set 'disable_new_netns=true' so that it runs in the host
# network namespace.
#
# (default: disabled)
#enable_tracing = true

[agent.kata]
# If enabled, make the agent display debug-level messages.
# (default: disabled)
enable_debug = true

# Enable agent tracing.
#
# If enabled, the default trace mode is "dynamic" and the
# default trace type is "isolated". The trace mode and type are set
# explicity with the `trace_type=` and `trace_mode=` options.
#
# Notes:
#
# - Tracing is ONLY enabled when `enable_tracing` is set: explicitly
#   setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing`
#   will NOT activate agent tracing.
#
# - See https://github.com/kata-containers/agent/blob/master/TRACING.md for
#   full details.
#
# (default: disabled)
#enable_tracing = true
#
#trace_mode = "dynamic"
#trace_type = "isolated"

# Comma separated list of kernel modules and their parameters.
# These modules will be loaded in the guest kernel using modprobe(8).
# The following example can be used to load two kernel modules with parameters
#  - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
# The first word is considered as the module name and the rest as its parameters.
# Container will not be started when:
#  * A kernel module is specified and the modprobe command is not installed in the guest
#    or it fails loading the module.
#  * The module is not available in the guest or it doesn't met the guest kernel
#    requirements, like architecture and version.
#
kernel_modules=[]


[netmon]
# If enabled, the network monitoring process gets started when the
# sandbox is created. This allows for the detection of some additional
# network being added to the existing network namespace, after the
# sandbox has been created.
# (default: disabled)
#enable_netmon = true

# Specify the path to the netmon binary.
path = "/usr/libexec/kata-containers/kata-netmon"

# If enabled, netmon messages will be sent to the system log
# (default: disabled)
enable_debug = true

[runtime]
# If enabled, the runtime will log additional debug messages to the
# system log
# (default: disabled)
enable_debug = true
#
# Internetworking model
# Determines how the VM should be connected to the
# the container network interface
# Options:
#
#   - macvtap
#     Used when the Container network interface can be bridged using
#     macvtap.
#
#   - none
#     Used when customize network. Only creates a tap device. No veth pair.
#
#   - tcfilter
#     Uses tc filter rules to redirect traffic from the network interface
#     provided by plugin to a tap interface connected to the VM.
#
internetworking_model="macvtap"

# disable guest seccomp
# Determines whether container seccomp profiles are passed to the virtual
# machine and applied by the kata agent. If set to true, seccomp is not applied
# within the guest
# (default: true)
disable_guest_seccomp=true

# If enabled, the runtime will create opentracing.io traces and spans.
# (See https://www.jaegertracing.io/docs/getting-started).
# (default: disabled)
#enable_tracing = true

# If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
# This option may have some potential impacts to your host. It should only be used when you know what you're doing.
# `disable_new_netns` conflicts with `enable_netmon`
# `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
# with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
# (like OVS) directly.
# If you are using docker, `disable_new_netns` only works with `docker run --net=none`
# (default: false)
#disable_new_netns = true

# if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
# The container cgroups in the host are not created, just one single cgroup per sandbox.
# The sandbox cgroup is not constrained by the runtime
# The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
# The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
# See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType
sandbox_cgroup_only=false

# Enabled experimental feature list, format: ["a", "b"].
# Experimental features are features not stable enough for production,
# They may break compatibility, and are prepared for a big version bump.
# Supported experimental features:
# 1. "newstore": new persist storage driver which breaks backward compatibility,
#               expected to move out of experimental in 2.0.0.
# (default: [])
experimental=[]
--- # KSM throttler ## version Output of "` --version`":

/bin/kata-collect-data.sh: line 178: --version: command not found
## systemd service # Image details
yaml
---
osbuilder:
  url: "https://github.com/kata-containers/osbuilder"
  version: "unknown"
rootfs-creation-time: "2020-06-30T02:30:52.592214132+0000Z"
description: "osbuilder rootfs"
file-format-version: "0.0.2"
architecture: "x86_64"
base-distro:
  name: "Centos"
  version: "7"
  packages:
    default:
      - "chrony"
      - "iptables"
      - "systemd"
      - "yajl"
    extra:
      - "bash"
      - "coreutils"
      - "net-tools"
      - "vim-minimal"
agent:
  url: "https://github.com/kata-containers/agent"
  name: "kata-agent"
  version: "1.10.1-5640d4c42c179bdabc011f4133bae04f7fd3e7dc"
  agent-is-init-daemon: "no"
--- # Initrd details No initrd --- # Logfiles ## Runtime logs Recent runtime problems found in system journal:

time="2020-07-07T17:28:17.417797199+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kill name=kata-runtime pid=313918 source=katautils
time="2020-07-07T17:28:17.53226139+08:00" level=warning msg="no such file or directory: /run/kata-containers/shared/sandboxes/13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15/13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15/rootfs"
time="2020-07-07T17:28:17.537913573+08:00" level=warning msg="Agent did not stop sandbox" arch=amd64 command=kill container=13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15 error="rpc error: code = Unknown desc = fork/exec /usr/libexec/oci/hooks.d/poststop/oci-systemd-hook: no such file or directory" name=kata-runtime pid=313918 sandbox=13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15 sandboxid=13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15 source=virtcontainers subsystem=sandbox
time="2020-07-07T17:28:17.540532561+08:00" level=info msg="sanner return error: read unix @->/run/vc/vm/13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15/qmp.sock: use of closed network connection" arch=amd64 command=kill container=13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15 name=kata-runtime pid=313918 sandbox=13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15 source=virtcontainers subsystem=qmp
time="2020-07-07T17:28:17.576583125+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kill name=kata-runtime pid=313933 source=katautils
time="2020-07-07T17:28:17.61777812+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=delete name=kata-runtime pid=313947 source=katautils
time="2020-07-07T17:28:17.628614858+08:00" level=warning msg="sandox cgroups path is empty" arch=amd64 command=delete container=13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15 name=kata-runtime pid=313947 sandbox=13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15 source=virtcontainers subsystem=sandbox
time="2020-07-07T17:28:18.017272946+08:00" level=error msg="failed to cleanup vm share path /run/kata-containers/shared/sandboxes/13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15" arch=amd64 command=delete container=13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15 error="remove /run/kata-containers/shared/sandboxes/13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15/7e7dafbed5b20ec675cca1125371fe60e68b2a2f0d4eccfdb57e71e5834152b3-ff26fa59370e3fd9-serviceaccount: device or resource busy" name=kata-runtime pid=313947 sandbox=13c0a6a60f15ef01489233d15c2fca3fe442030a3f8ef300e33ccd2ebc729e15 source=virtcontainers subsystem=kata_agent
time="2020-07-07T17:28:19.975529619+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kill name=kata-runtime pid=314150 source=katautils
time="2020-07-07T17:28:20.113459929+08:00" level=error msg="rpc error: code = Unknown desc = fork/exec /usr/libexec/oci/hooks.d/poststop/oci-systemd-hook: no such file or directory" arch=amd64 command=kill container=5d83f790ad3068e5134c0d9cb2550a5fc3b1b46fe23466880077a9e1f380681e name=kata-runtime pid=314150 sandbox=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d source=runtime
time="2020-07-07T17:28:20.149589262+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kill name=kata-runtime pid=314190 source=katautils
time="2020-07-07T17:28:20.19308302+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kill name=kata-runtime pid=314204 source=katautils
time="2020-07-07T17:28:20.198502593+08:00" level=error msg="container not running" arch=amd64 command=kill container=5d83f790ad3068e5134c0d9cb2550a5fc3b1b46fe23466880077a9e1f380681e name=kata-runtime pid=314204 sandbox=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d source=runtime
time="2020-07-07T17:28:20.231648341+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=delete name=kata-runtime pid=314211 source=katautils
time="2020-07-07T17:28:20.416717421+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kill name=kata-runtime pid=314290 source=katautils
time="2020-07-07T17:28:20.532044236+08:00" level=warning msg="no such file or directory: /run/kata-containers/shared/sandboxes/6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d/6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d/rootfs"
time="2020-07-07T17:28:20.53819644+08:00" level=warning msg="Agent did not stop sandbox" arch=amd64 command=kill container=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d error="rpc error: code = Unknown desc = fork/exec /usr/libexec/oci/hooks.d/poststop/oci-systemd-hook: no such file or directory" name=kata-runtime pid=314290 sandbox=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d sandboxid=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d source=virtcontainers subsystem=sandbox
time="2020-07-07T17:28:20.540993878+08:00" level=info msg="sanner return error: read unix @->/run/vc/vm/6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d/qmp.sock: use of closed network connection" arch=amd64 command=kill container=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d name=kata-runtime pid=314290 sandbox=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d source=virtcontainers subsystem=qmp
time="2020-07-07T17:28:20.578556378+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kill name=kata-runtime pid=314320 source=katautils
time="2020-07-07T17:28:20.62312935+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=delete name=kata-runtime pid=314336 source=katautils
time="2020-07-07T17:28:20.633449245+08:00" level=warning msg="sandox cgroups path is empty" arch=amd64 command=delete container=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d name=kata-runtime pid=314336 sandbox=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d source=virtcontainers subsystem=sandbox
time="2020-07-07T17:28:20.850291626+08:00" level=error msg="failed to cleanup vm share path /run/kata-containers/shared/sandboxes/6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d" arch=amd64 command=delete container=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d error="remove /run/kata-containers/shared/sandboxes/6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d/5d83f790ad3068e5134c0d9cb2550a5fc3b1b46fe23466880077a9e1f380681e-43a73ec4ea6ceb5c-serviceaccount: device or resource busy" name=kata-runtime pid=314336 sandbox=6b43ec0812edc1fbb53ef7c067afcdc2029583c0ed51da67b1d0dd1a52104b7d source=virtcontainers subsystem=kata_agent
time="2020-07-07T17:28:33.141407414+08:00" level=warning msg="Agent did not stop sandbox" arch=amd64 command=kill container=3f08bc356e212e945e2dcee47f174d3324be232340f8c0734a2486f773130f5c error="rpc error: code = Unavailable desc = transport is closing" name=kata-runtime pid=313837 sandbox=3f08bc356e212e945e2dcee47f174d3324be232340f8c0734a2486f773130f5c sandboxid=3f08bc356e212e945e2dcee47f174d3324be232340f8c0734a2486f773130f5c source=virtcontainers subsystem=sandbox
time="2020-07-07T17:28:33.144770055+08:00" level=info msg="sanner return error: read unix @->/run/vc/vm/3f08bc356e212e945e2dcee47f174d3324be232340f8c0734a2486f773130f5c/qmp.sock: use of closed network connection" arch=amd64 command=kill container=3f08bc356e212e945e2dcee47f174d3324be232340f8c0734a2486f773130f5c name=kata-runtime pid=313837 sandbox=3f08bc356e212e945e2dcee47f174d3324be232340f8c0734a2486f773130f5c source=virtcontainers subsystem=qmp
time="2020-07-07T17:28:33.184846317+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kill name=kata-runtime pid=314739 source=katautils
time="2020-07-07T17:28:33.242740258+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=delete name=kata-runtime pid=314754 source=katautils
time="2020-07-07T17:28:33.252962382+08:00" level=warning msg="sandox cgroups path is empty" arch=amd64 command=delete container=3f08bc356e212e945e2dcee47f174d3324be232340f8c0734a2486f773130f5c name=kata-runtime pid=314754 sandbox=3f08bc356e212e945e2dcee47f174d3324be232340f8c0734a2486f773130f5c source=virtcontainers subsystem=sandbox
time="2020-07-08T11:44:33.30715151+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=list name=kata-runtime pid=222979 source=katautils
time="2020-07-08T11:49:39.851737087+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-overhead name=kata-runtime pid=226821 source=katautils
time="2020-07-08T11:49:39.871612032+08:00" level=error msg="sandbox cgroup path is emtpy" arch=amd64 command=kata-overhead container=f6ec0705ac32d7aa4ac146921dae3ef922ff4d5850f9fc6d746b4ac6703e7cbd name=kata-runtime pid=226821 sandbox=f6ec0705ac32d7aa4ac146921dae3ef922ff4d5850f9fc6d746b4ac6703e7cbd source=runtime
time="2020-07-08T11:49:48.924923095+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-overhead name=kata-runtime pid=226864 source=katautils
time="2020-07-08T11:49:48.925083957+08:00" level=error msg="Container ID (ankuai.com/sankuai/centos:offline_java_200611_centos7) does not exist" arch=amd64 command=kata-overhead container="ankuai.com/sankuai/centos:offline_java_200611_centos7" name=kata-runtime pid=226864 source=runtime
time="2020-07-08T11:49:57.451887383+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-overhead name=kata-runtime pid=226926 source=katautils
time="2020-07-08T11:49:57.470631086+08:00" level=error msg="sandbox cgroup path is emtpy" arch=amd64 command=kata-overhead container=085fb4be25ef9c153a28176d22050fc70afa9a9c2885152f7fd60659d3ed0238 name=kata-runtime pid=226926 sandbox=085fb4be25ef9c153a28176d22050fc70afa9a9c2885152f7fd60659d3ed0238 source=runtime
time="2020-07-08T11:51:22.051744578+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-overhead name=kata-runtime pid=228311 source=katautils
time="2020-07-08T11:51:22.05187515+08:00" level=error msg="Container ID (085fb4be25ef9c153a28176d22050fc70afa9a9c2885152f7fd60659d3ed02381) does not exist" arch=amd64 command=kata-overhead container=085fb4be25ef9c153a28176d22050fc70afa9a9c2885152f7fd60659d3ed02381 name=kata-runtime pid=228311 source=runtime
time="2020-07-08T11:51:25.371774483+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-overhead name=kata-runtime pid=228524 source=katautils
time="2020-07-08T11:51:25.390179303+08:00" level=error msg="sandbox cgroup path is emtpy" arch=amd64 command=kata-overhead container=085fb4be25ef9c153a28176d22050fc70afa9a9c2885152f7fd60659d3ed0238 name=kata-runtime pid=228524 sandbox=085fb4be25ef9c153a28176d22050fc70afa9a9c2885152f7fd60659d3ed0238 source=runtime
time="2020-07-08T11:53:22.348184757+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-overhead name=kata-runtime pid=229806 source=katautils
time="2020-07-08T11:53:22.366433184+08:00" level=error msg="sandbox cgroup path is emtpy" arch=amd64 command=kata-overhead container=085fb4be25ef9c153a28176d22050fc70afa9a9c2885152f7fd60659d3ed0238 name=kata-runtime pid=229806 sandbox=085fb4be25ef9c153a28176d22050fc70afa9a9c2885152f7fd60659d3ed0238 source=runtime
time="2020-07-08T11:53:30.324071576+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-overhead name=kata-runtime pid=230012 source=katautils
time="2020-07-08T11:53:30.343770301+08:00" level=error msg="sandbox cgroup path is emtpy" arch=amd64 command=kata-overhead container=8319ad5e5ed1b962058060c2f850a6f09cdb321dd4bbcb3a70d89f20bc3bed05 name=kata-runtime pid=230012 sandbox=68163daba9fb93a6f165d801a856b900ca84576195010b709ceee4150fdc34c4 source=runtime
time="2020-07-08T14:42:24.930216884+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=exec name=kata-runtime pid=38979 source=katautils
time="2020-07-10T20:17:48.511561873+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=list name=kata-runtime pid=98218 source=katautils
time="2020-07-10T20:17:58.046087024+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=state name=kata-runtime pid=98310 source=katautils
time="2020-07-10T20:18:14.928126284+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=ps name=kata-runtime pid=98545 source=katautils
time="2020-07-10T20:18:36.167022796+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-network name=kata-runtime pid=99426 source=katautils
time="2020-07-10T20:18:41.692407673+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-network name=kata-runtime pid=99452 source=katautils
time="2020-07-10T20:18:41.692553468+08:00" level=error msg="Missing container ID" arch=amd64 command=kata-network name=kata-runtime pid=99452 source=runtime
time="2020-07-10T20:18:54.459947648+08:00" level=warning msg="VM memory (256MB) smaller than image \"/usr/share/kata-containers/kata-containers-image.img\" size (512MB)" arch=amd64 command=kata-network name=kata-runtime pid=99585 source=katautils
## Proxy logs Recent proxy problems found in system journal:

time="2020-06-29T13:55:51.201953357+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/a9cad652715cfaeb7f06a78b2fa9d68c9c3ddeff4633a68ddce5eb8a3cd6ec0b/kata.sock: use of closed network connection" name=kata-proxy pid=39741 sandbox=a9cad652715cfaeb7f06a78b2fa9d68c9c3ddeff4633a68ddce5eb8a3cd6ec0b source=proxy
time="2020-06-29T14:08:14.746184932+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/98af864bc40acf799ac273a19ed21cd62e5f0f36a852ae71f03a71c2fcf18fb2/kata.sock: use of closed network connection" name=kata-proxy pid=32641 sandbox=98af864bc40acf799ac273a19ed21cd62e5f0f36a852ae71f03a71c2fcf18fb2 source=proxy
time="2020-06-29T14:08:14.746214925+08:00" level=fatal msg="channel error" error="accept unix /run/vc/sbs/98af864bc40acf799ac273a19ed21cd62e5f0f36a852ae71f03a71c2fcf18fb2/proxy.sock: use of closed network connection" name=kata-proxy pid=32641 sandbox=98af864bc40acf799ac273a19ed21cd62e5f0f36a852ae71f03a71c2fcf18fb2 source=proxy
time="2020-06-29T14:08:31.118770954+08:00" level=fatal msg="channel error" error="accept unix /run/vc/sbs/461c5efbc18766f4c8b8ba8232b0f94a7554192e4a6f150245ea05bbdc635e87/proxy.sock: use of closed network connection" name=kata-proxy pid=57094 sandbox=461c5efbc18766f4c8b8ba8232b0f94a7554192e4a6f150245ea05bbdc635e87 source=proxy
time="2020-06-29T15:24:42.5232107+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/2eeae233636eae85f036e1550e67cc94200277ebc4fa58ea8a8d8843be1de090/kata.sock: use of closed network connection" name=kata-proxy pid=116928 sandbox=2eeae233636eae85f036e1550e67cc94200277ebc4fa58ea8a8d8843be1de090 source=proxy
time="2020-06-29T17:38:42.312184372+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/4b4e24ca3908577f52804a335341803e168da6657fcdaae2291a189509d5368e/kata.sock: use of closed network connection" name=kata-proxy pid=139484 sandbox=4b4e24ca3908577f52804a335341803e168da6657fcdaae2291a189509d5368e source=proxy
time="2020-06-29T23:18:40.89442067+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/7a028296d390a23415932ed86a24db23d95c57f0cf06febc0b2fd86807c66cb0/kata.sock: use of closed network connection" name=kata-proxy pid=186818 sandbox=7a028296d390a23415932ed86a24db23d95c57f0cf06febc0b2fd86807c66cb0 source=proxy
time="2020-06-29T23:26:09.433476251+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/cd36e1789024a40d667bc9129b15d1d24ce954f8885175f5628cc4e938f1f442/kata.sock: use of closed network connection" name=kata-proxy pid=194208 sandbox=cd36e1789024a40d667bc9129b15d1d24ce954f8885175f5628cc4e938f1f442 source=proxy
time="2020-06-30T00:57:27.338586671+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/886009cd8cf3c2595e79741094fb1d9d81c868479f30d2040cce2c5418e81787/kata.sock: use of closed network connection" name=kata-proxy pid=271933 sandbox=886009cd8cf3c2595e79741094fb1d9d81c868479f30d2040cce2c5418e81787 source=proxy
time="2020-06-30T01:47:29.141799628+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/9a16b6992c030b2e90a63b508017d4dc6519d8d3a7dca165e884e3dd7fbd1cb4/kata.sock: use of closed network connection" name=kata-proxy pid=314793 sandbox=9a16b6992c030b2e90a63b508017d4dc6519d8d3a7dca165e884e3dd7fbd1cb4 source=proxy
time="2020-06-30T02:10:27.405506311+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/4011a406bb2fe70b7295b9bb3599ad1f913e2ffc96e3d554b047369ff3dab9fd/kata.sock: use of closed network connection" name=kata-proxy pid=320170 sandbox=4011a406bb2fe70b7295b9bb3599ad1f913e2ffc96e3d554b047369ff3dab9fd source=proxy
time="2020-06-30T10:16:28.506242194+08:00" level=fatal msg="channel error" error="accept unix /run/vc/sbs/de677f3cd0f957b13301e6b6f4ac67c8b55709357d2010c31c22fc96fc260259/proxy.sock: use of closed network connection" name=kata-proxy pid=142172 sandbox=de677f3cd0f957b13301e6b6f4ac67c8b55709357d2010c31c22fc96fc260259 source=proxy
time="2020-06-30T10:17:13.516914596+08:00" level=fatal msg="failed to handle exit signal" error="close unix @->/run/vc/vm/252d879df0b7e3ac66b3ae7565418c3702eee817422ef2737e6a634f5f52deae/kata.sock: use of closed network connection" name=kata-proxy pid=112645 sandbox=252d879df0b7e3ac66b3ae7565418c3702eee817422ef2737e6a634f5f52deae source=proxy
time="2020-06-30T11:49:57.320354823+08:00" level=fatal msg="channel error" error="accept unix /run/vc/sbs/e797af069a1d81b6fe1d2f1bddd70aecfba22c4fa02ade8e80765b2f464fc15d/proxy.sock: use of closed network connection" name=kata-proxy pid=161024 sandbox=e797af069a1d81b6fe1d2f1bddd70aecfba22c4fa02ade8e80765b2f464fc15d source=proxy
## Shim logs No recent shim problems found in system journal. ## Throttler logs No recent throttler problems found in system journal. --- # Container manager details No `docker` No `kubectl` No `crio` Have `containerd` ## containerd Output of "`containerd --version`":

containerd github.com/containerd/containerd 1.3.4 containerd-1.3.4-mt20200709.gitcb9c1d4
Output of "`systemctl show containerd`":

Type=notify
Restart=always
NotifyAccess=main
RestartUSec=100ms
TimeoutStartUSec=1min 30s
TimeoutStopUSec=1min 30s
WatchdogUSec=0
WatchdogTimestamp=Sun 2020-07-12 15:46:08 CST
WatchdogTimestampMonotonic=1134375747164
StartLimitInterval=10000000
StartLimitBurst=5
StartLimitAction=none
FailureAction=none
PermissionsStartOnly=no
RootDirectoryStartOnly=no
RemainAfterExit=no
GuessMainPID=yes
MainPID=312253
ControlPID=0
FileDescriptorStoreMax=0
StatusErrno=0
Result=success
ExecMainStartTimestamp=Sun 2020-07-12 15:46:08 CST
ExecMainStartTimestampMonotonic=1134375543684
ExecMainExitTimestampMonotonic=0
ExecMainPID=312253
ExecMainCode=0
ExecMainStatus=0
ExecStartPre={ path=/sbin/modprobe ; argv[]=/sbin/modprobe overlay ; ignore_errors=yes ; start_time=[Sun 2020-07-12 15:46:08 CST] ; stop_time=[Sun 2020-07-12 15:46:08 CST] ; pid=312250 ; code=exited ; status=0 }
ExecStart={ path=/bin/containerd ; argv[]=/bin/containerd ; ignore_errors=no ; start_time=[Sun 2020-07-12 15:46:08 CST] ; stop_time=[n/a] ; pid=312253 ; code=(null) ; status=0/0 }
Slice=system.slice
ControlGroup=/system.slice/containerd.service
MemoryCurrent=19563077632
TasksCurrent=621
Delegate=yes
CPUAccounting=no
CPUShares=18446744073709551615
StartupCPUShares=18446744073709551615
CPUQuotaPerSecUSec=infinity
BlockIOAccounting=no
BlockIOWeight=18446744073709551615
StartupBlockIOWeight=18446744073709551615
MemoryAccounting=no
MemoryLimit=18446744073709551615
DevicePolicy=auto
TasksAccounting=no
TasksMax=18446744073709551615
UMask=0022
LimitCPU=18446744073709551615
LimitFSIZE=18446744073709551615
LimitDATA=18446744073709551615
LimitSTACK=18446744073709551615
LimitCORE=18446744073709551615
LimitRSS=18446744073709551615
LimitNOFILE=1048576
LimitAS=18446744073709551615
LimitNPROC=18446744073709551615
LimitMEMLOCK=65536
LimitLOCKS=18446744073709551615
LimitSIGPENDING=1546405
LimitMSGQUEUE=819200
LimitNICE=0
LimitRTPRIO=0
LimitRTTIME=18446744073709551615
OOMScoreAdjust=0
Nice=0
IOScheduling=0
CPUSchedulingPolicy=0
CPUSchedulingPriority=0
TimerSlackNSec=50000
CPUSchedulingResetOnFork=no
NonBlocking=no
StandardInput=null
StandardOutput=journal
StandardError=inherit
TTYReset=no
TTYVHangup=no
TTYVTDisallocate=no
SyslogPriority=30
SyslogLevelPrefix=yes
SecureBits=0
CapabilityBoundingSet=18446744073709551615
AmbientCapabilities=0
MountFlags=0
PrivateTmp=no
PrivateNetwork=no
PrivateDevices=no
ProtectHome=no
ProtectSystem=no
SameProcessGroup=no
IgnoreSIGPIPE=yes
NoNewPrivileges=no
SystemCallErrorNumber=0
RuntimeDirectoryMode=0755
KillMode=process
KillSignal=15
SendSIGKILL=yes
SendSIGHUP=no
Id=containerd.service
Names=containerd.service
Requires=basic.target
Wants=system.slice
WantedBy=multi-user.target
Conflicts=shutdown.target
Before=shutdown.target multi-user.target
After=basic.target system.slice local-fs.target network.target systemd-journald.socket
Documentation=https://containerd.io
Description=containerd container runtime
LoadState=loaded
ActiveState=active
SubState=running
FragmentPath=/usr/lib/systemd/system/containerd.service
UnitFileState=enabled
UnitFilePreset=disabled
InactiveExitTimestamp=Sun 2020-07-12 15:46:08 CST
InactiveExitTimestampMonotonic=1134375540799
ActiveEnterTimestamp=Sun 2020-07-12 15:46:08 CST
ActiveEnterTimestampMonotonic=1134375747422
ActiveExitTimestamp=Sun 2020-07-12 15:46:08 CST
ActiveExitTimestampMonotonic=1134375533686
InactiveEnterTimestamp=Sun 2020-07-12 15:46:08 CST
InactiveEnterTimestampMonotonic=1134375539170
CanStart=yes
CanStop=yes
CanReload=no
CanIsolate=no
StopWhenUnneeded=no
RefuseManualStart=no
RefuseManualStop=no
AllowIsolate=no
DefaultDependencies=yes
OnFailureJobMode=replace
IgnoreOnIsolate=no
IgnoreOnSnapshot=no
NeedDaemonReload=no
JobTimeoutUSec=0
JobTimeoutAction=none
ConditionResult=yes
AssertResult=yes
ConditionTimestamp=Sun 2020-07-12 15:46:08 CST
ConditionTimestampMonotonic=1134375540042
AssertTimestamp=Sun 2020-07-12 15:46:08 CST
AssertTimestampMonotonic=1134375540043
Transient=no
Output of "`cat /etc/containerd/config.toml`":

version = 2
root = "/opt/containerd"
state = "/opt/run/containerd"
plugin_dir = ""
disabled_plugins = []
required_plugins = []
oom_score = 0

[grpc]
  address = "/run/containerd/containerd.sock"
  tcp_address = ""
  tcp_tls_cert = ""
  tcp_tls_key = ""
  uid = 0
  gid = 0
  max_recv_message_size = 16777216
  max_send_message_size = 16777216

[ttrpc]
  address = ""
  uid = 0
  gid = 0

[debug]
  address = ""
  uid = 0
  gid = 0
  level = "debug"

[metrics]
  address = ""
  grpc_histogram = false

[cgroup]
  path = ""

[timeouts]
  "io.containerd.timeout.shim.cleanup" = "5s"
  "io.containerd.timeout.shim.load" = "5s"
  "io.containerd.timeout.shim.shutdown" = "3s"
  "io.containerd.timeout.task.state" = "2s"

[plugins]
  [plugins."io.containerd.gc.v1.scheduler"]
    pause_threshold = 0.02
    deletion_threshold = 0
    mutation_threshold = 100
    schedule_delay = "0s"
    startup_delay = "100ms"
  [plugins."io.containerd.grpc.v1.cri"]
    disable_tcp_service = true
    stream_server_address = "127.0.0.1"
    stream_server_port = "0"
    stream_idle_timeout = "4h0m0s"
    enable_selinux = false
    sandbox_image = "registryonline-hulk.sankuai.com/google_containers/pause-amd64:3.0"
    stats_collect_period = 10
    systemd_cgroup = false
    enable_tls_streaming = false
    max_container_log_line_size = 16384
    disable_cgroup = false
    disable_apparmor = false
    restrict_oom_score_adj = false
    max_concurrent_downloads = 3
    disable_proc_mount = false
    [plugins."io.containerd.grpc.v1.cri".containerd]
      snapshotter = "devmapper"
      #snapshotter = "overlayfs"
    #  default_runtime_name = "runc"
      no_pivot = false
      [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime]
        #runtime_type = "io.containerd.kata.v2"
    runtime_type = "io.containerd.runc.v2"
        runtime_engine = ""
        runtime_root = ""
      #  privileged_without_host_devices = false
      [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
        runtime_type = "io.containerd.kata.v2"
      #runtime_type = "io.containerd.runtime.v1.linux"
        #runtime_engine = "/usr/bin/kata-runtime"
        #runtime_root = ""
       # privileged_without_host_devices = false
      [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
        [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
          runtime_type = "io.containerd.runc.v2"
          #runtime_engine = ""
          #runtime_root = ""
        #  privileged_without_host_devices = false
    [plugins."io.containerd.grpc.v1.cri".cni]
      bin_dir = "/opt/meituan/cni/bin"
      conf_dir = "/opt/meituan/cni/net.d"
      max_conf_num = 1
      conf_template = ""
    [plugins."io.containerd.grpc.v1.cri".registry]
      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry-hulk.sankuai.com"]
          #endpoint = [http://registry-hulk.sankuai.com]
    [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
      tls_cert_file = ""
      tls_key_file = ""
  [plugins."io.containerd.internal.v1.opt"]
    path = "/opt/containerd"
  [plugins."io.containerd.internal.v1.restart"]
    interval = "10s"
  [plugins."io.containerd.metadata.v1.bolt"]
    content_sharing_policy = "shared"
  [plugins."io.containerd.monitor.v1.cgroups"]
    no_prometheus = false
  [plugins."io.containerd.runtime.v1.linux"]
    shim = "containerd-shim"
    runtime = "runc"
    runtime_root = ""
    no_shim = false
    shim_debug = true
  [plugins."io.containerd.runtime.v2.task"]
    platforms = ["linux/amd64"]
  [plugins."io.containerd.service.v1.diff-service"]
    default = ["walking"]
  [plugins."io.containerd.snapshotter.v1.devmapper"]
    root_path = ""
    pool_name = "dockervg-thinpool"
    base_image_size = "8192MB"
`

该提问来源于开源项目:kata-containers/runtime

  • 点赞
  • 写回答
  • 关注问题
  • 收藏
  • 复制链接分享

7条回答 默认 最新

  • weixin_39576294 weixin_39576294 2020-12-01 21:52

    hi , how many memory dos your host has ?

    And what's command did you use to create the container, and can you share the full log around the time error occurred?

    点赞 评论 复制链接分享
  • weixin_39986973 weixin_39986973 2020-12-01 21:52

    `Jul 16 11:53:27 myhost kata[112138]: time="2020-07-16T11:53:27.835043612+08:00" level=debug msg="reading guest console" ID=5acc5b0cf910ab0401c88d380b03214190eb06bfcce027d9873aad3d68194940 sandbox=5acc5b0cf910ab0401c88d380b03214190eb06bfcce027d9873aad3d68194940 source=virtcontainers subsystem=kata_agent vmconsole="[ 1.584404] acpi PNP0C80:01: acpi_memory_enable_device() error"

    Jul 16 11:53:27 myhost containerd[42145]: time="2020-07-16T11:53:27.834978484+08:00" level=debug msg="reading guest console" ID=5acc5b0cf910ab0401c88d380b03214190eb06bfcce027d9873aad3d68194940 sandbox=5acc5b0cf910ab0401c88d380b03214190eb06bfcce027d9873aad3d68194940 source=virtcontainers subsystem=kata_agent vmconsole="[ 1.584259] acpi PNP0C80:01: add_memory failed"

    Jul 16 11:53:27 myhost containerd[42145]: time="2020-07-16T11:53:27.835043612+08:00" level=debug msg="reading guest console" ID=5acc5b0cf910ab0401c88d380b03214190eb06bfcce027d9873aad3d68194940 sandbox=5acc5b0cf910ab0401c88d380b03214190eb06bfcce027d9873aad3d68194940 source=virtcontainers subsystem=kata_agent vmconsole="[ 1.584404] acpi PNP0C80:01: acpi_memory_enable_device() error"

    Jul 16 11:53:27 myhost containerd[42145]: time="2020-07-16T11:53:27.835214720+08:00" level=info msg="{\"timestamp\": {\"seconds\": 1594871607, \"microseconds\": 835138}, \"event\": \"ACPI_DEVICE_OST\", \"data\": {\"info\": {\"device\": \"dimmmem1\", \"source\": 1, \"status\": 1, \"slot\": \"1\", \"slot-type\": \"DIMM\"}}}" ID=5acc5b0cf910ab0401c88d380b03214190eb06bfcce027d9873aad3d68194940 source=virtcontainers subsystem=qmp`

    acpi PNP0C80:01: acpi_memory_enable_device() error acpi PNP0C80:01: add_memory failed

    It should be that the memory hotplug failed

    点赞 评论 复制链接分享
  • weixin_39986973 weixin_39986973 2020-12-01 21:52

    The machine is 377G memory, I can create a kata of 16G memory, but can not create 64G. The remaining memory is 360G. So it’s not a problem of insufficient memory

    点赞 评论 复制链接分享
  • weixin_39986973 weixin_39986973 2020-12-01 21:52

    /usr/bin/qemu-vanilla-system-x86_64 -name sandbox-ae5a3336c86699b192d15eb6e451775dd4546d8b87e08315b8af31a080e41308 -uuid 93109402-e943-47cc-a2e0-8d6d12cbd670 -machine pc,accel=kvm,kernel_irqchip,nvdimm -cpu host -qmp unix:/run/vc/vm/ae5a3336c86699b192d15eb6e451775dd4546d8b87e08315b8af31a080e41308/qmp.sock,server,nowait -m 1024M,slots=10,maxmem=387711M -device pci-bridge,bus=pci.0,id=pci-bridge-0,chassis_nr=1,shpc=on,addr=2,romfile= -device virtio-serial-pci,disable-modern=false,id=serial0,romfile= -device virtconsole,chardev=charconsole0,id=console0 -chardev socket,id=charconsole0,path=/run/vc/vm/ae5a3336c86699b192d15eb6e451775dd4546d8b87e08315b8af31a080e41308/console.sock,server,nowait -device nvdimm,id=nv0,memdev=mem0 -object memory-backend-file,id=mem0,mem-path=/usr/share/kata-containers/kata-containers-image.img,size=536870912 -device virtio-scsi-pci,id=scsi0,disable-modern=false,romfile= -object rng-random,id=rng0,filename=/dev/urandom -device virtio-rng,rng=rng0,romfile= -device virtserialport,chardev=charch0,id=channel0,name=agent.channel.0 -chardev socket,id=charch0,path=/run/vc/vm/ae5a3336c86699b192d15eb6e451775dd4546d8b87e08315b8af31a080e41308/kata.sock,server,nowait -device virtio-9p-pci,disable-modern=false,fsdev=extra-9p-kataShared,mount_tag=kataShared,romfile= -fsdev local,id=extra-9p-kataShared,path=/run/kata-containers/shared/sandboxes/ae5a3336c86699b192d15eb6e451775dd4546d8b87e08315b8af31a080e41308,security_model=none -netdev tap,id=network-0,vhost=on,vhostfds=3,fds=4 -device driver=virtio-net-pci,netdev=network-0,mac=0a:58:0a:1d:96:0a,disable-modern=false,mq=on,vectors=4,romfile= -global kvm-pit.lost_tick_policy=discard -vga none -no-user-config -nodefaults -nographic -daemonize -object memory-backend-ram,id=dimm1,size=1024M -numa node,memdev=dimm1 -kernel /usr/share/kata-containers/vmlinuz-4.18.0-7.1.mt20200709.git9b36caf.container -append tsc=reliable no_timer_check rcupdate.rcu_expedited=1 i8042.direct=1 i8042.dumbkbd=1 i8042.nopnp=1 i8042.noaux=1 noreplace-smp reboot=k console=hvc0 console=hvc1 iommu=off cryptomgr.notests net.ifnames=0 pci=lastbus=0 root=/dev/pmem0p1 rootflags=dax,data=ordered,errors=remount-ro ro rootfstype=ext4 quiet systemd.show_status=false panic=1 nr_cpus=48 agent.use_vsock=false systemd.unit=kata-containers.target systemd.mask=systemd-networkd.service systemd.mask=systemd-networkd.socket vsyscall=emulate rw -pidfile /run/vc/vm/ae5a3336c86699b192d15eb6e451775dd4546d8b87e08315b8af31a080e41308/pid -smp 1,cores=1,threads=1,sockets=48,maxcpus=48

    点赞 评论 复制链接分享
  • weixin_39576294 weixin_39576294 2020-12-01 21:52

    Hi , can you try set default_memory to a larger value and test again?

    Or helping https://github.com/kata-containers/runtime/issues/1244 may help you also.

    点赞 评论 复制链接分享
  • weixin_39986973 weixin_39986973 2020-12-01 21:52

    Hi , can you try set default_memory to a larger value and test again?

    Yes, when I increase default_memory, I can create 16G of memory, but I still cannot create 128G or more memory

    点赞 评论 复制链接分享
  • weixin_39986973 weixin_39986973 2020-12-01 21:52

    Hi , can you try set default_memory to a larger value and test again?

    Or helping #1244 may help you also.

    CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE This config option has been opened,There is enough remaining memory to ensure successful hotplug.It should be related to the kernel

    点赞 评论 复制链接分享

相关推荐