Switch to opencontainers/cgroups

This removes libcontainer/cgroups packages and starts
using those from github.com/opencontainers/cgroups repo.

Mostly generated by:

  git rm -f libcontainer/cgroups

  find . -type f -name "*.go" -exec sed -i \
    's|github.com/opencontainers/runc/libcontainer/cgroups|github.com/opencontainers/cgroups|g' \
    {} +

  go get github.com/opencontainers/cgroups@v0.0.1
  make vendor
  gofumpt -w .

Signed-off-by: Kir Kolyshkin <kolyshkin@gmail.com>
This commit is contained in:
Kir Kolyshkin
2025-02-25 17:32:28 -08:00
parent 85ba66f059
commit a75076b4a4
136 changed files with 707 additions and 6724 deletions

View File

@@ -8,8 +8,8 @@ import (
"sync"
"time"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/types"

3
go.mod
View File

@@ -4,7 +4,6 @@ go 1.23.0
require (
github.com/checkpoint-restore/go-criu/v6 v6.3.0
github.com/cilium/ebpf v0.17.3
github.com/containerd/console v1.0.4
github.com/coreos/go-systemd/v22 v22.5.0
github.com/cyphar/filepath-securejoin v0.4.1
@@ -15,6 +14,7 @@ require (
github.com/moby/sys/user v0.3.0
github.com/moby/sys/userns v0.1.0
github.com/mrunalp/fileutils v0.5.1
github.com/opencontainers/cgroups v0.0.1
github.com/opencontainers/runtime-spec v1.2.1
github.com/opencontainers/selinux v1.11.1
github.com/seccomp/libseccomp-golang v0.10.0
@@ -34,6 +34,7 @@ exclude (
)
require (
github.com/cilium/ebpf v0.17.3 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/vishvananda/netns v0.0.4 // indirect

2
go.sum
View File

@@ -49,6 +49,8 @@ github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/mrunalp/fileutils v0.5.1 h1:F+S7ZlNKnrwHfSwdlgNSkKo67ReVf8o9fel6C3dkm/Q=
github.com/mrunalp/fileutils v0.5.1/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/opencontainers/cgroups v0.0.1 h1:MXjMkkFpKv6kpuirUa4USFBas573sSAY082B4CiHEVA=
github.com/opencontainers/cgroups v0.0.1/go.mod h1:s8lktyhlGUqM7OSRL5P7eAW6Wb+kWPNvt4qvVfzA5vs=
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=

View File

@@ -36,7 +36,7 @@ this package into your code:
```go
import (
_ "github.com/opencontainers/runc/libcontainer/cgroups/devices"
_ "github.com/opencontainers/cgroups/devices"
)
```

View File

@@ -1,21 +0,0 @@
package cgroups
import (
"testing"
)
func TestParseCgroups(t *testing.T) {
// We don't need to use /proc/thread-self here because runc always runs
// with every thread in the same cgroup. This lets us avoid having to do
// runtime.LockOSThread.
cgroups, err := ParseCgroupFile("/proc/self/cgroup")
if err != nil {
t.Fatal(err)
}
if IsCgroup2UnifiedMode() {
return
}
if _, ok := cgroups["cpu"]; !ok {
t.Fail()
}
}

View File

@@ -1,336 +0,0 @@
package devices
import (
"strings"
"testing"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
)
func hash(s, comm string) string {
var res []string
for _, l := range strings.Split(s, "\n") {
trimmed := strings.TrimSpace(l)
if trimmed == "" || strings.HasPrefix(trimmed, comm) {
continue
}
res = append(res, trimmed)
}
return strings.Join(res, "\n")
}
func testDeviceFilter(t testing.TB, devices []*devices.Rule, expectedStr string) {
insts, _, err := deviceFilter(devices)
if err != nil {
t.Fatalf("%s: %v (devices: %+v)", t.Name(), err, devices)
}
s := insts.String()
if expectedStr != "" {
hashed := hash(s, "//")
expectedHashed := hash(expectedStr, "//")
if expectedHashed != hashed {
t.Fatalf("expected:\n%q\ngot\n%q", expectedHashed, hashed)
}
}
}
func TestDeviceFilter_Nil(t *testing.T) {
expected := `
// load parameters into registers
0: LdXMemW dst: r2 src: r1 off: 0 imm: 0
1: AndImm32 dst: r2 imm: 65535
2: LdXMemW dst: r3 src: r1 off: 0 imm: 0
3: RShImm32 dst: r3 imm: 16
4: LdXMemW dst: r4 src: r1 off: 4 imm: 0
5: LdXMemW dst: r5 src: r1 off: 8 imm: 0
block-0:
// return 0 (reject)
6: MovImm32 dst: r0 imm: 0
7: Exit
`
testDeviceFilter(t, nil, expected)
}
func TestDeviceFilter_BuiltInAllowList(t *testing.T) {
// This is a copy of all rules from
// github.com/opencontainers/runc/libcontainer/specconv.AllowedDevices.
devices := []*devices.Rule{
{
Type: devices.CharDevice,
Major: devices.Wildcard,
Minor: devices.Wildcard,
Permissions: "m",
Allow: true,
},
{
Type: devices.BlockDevice,
Major: devices.Wildcard,
Minor: devices.Wildcard,
Permissions: "m",
Allow: true,
},
{
Type: devices.CharDevice,
Major: 1,
Minor: 3,
Permissions: "rwm",
Allow: true,
},
{
Type: devices.CharDevice,
Major: 1,
Minor: 8,
Permissions: "rwm",
Allow: true,
},
{
Type: devices.CharDevice,
Major: 1,
Minor: 7,
Permissions: "rwm",
Allow: true,
},
{
Type: devices.CharDevice,
Major: 5,
Minor: 0,
Permissions: "rwm",
Allow: true,
},
{
Type: devices.CharDevice,
Major: 1,
Minor: 5,
Permissions: "rwm",
Allow: true,
},
{
Type: devices.CharDevice,
Major: 1,
Minor: 9,
Permissions: "rwm",
Allow: true,
},
{
Type: devices.CharDevice,
Major: 136,
Minor: devices.Wildcard,
Permissions: "rwm",
Allow: true,
},
{
Type: devices.CharDevice,
Major: 5,
Minor: 2,
Permissions: "rwm",
Allow: true,
},
{
Type: devices.CharDevice,
Major: 10,
Minor: 200,
Permissions: "rwm",
Allow: true,
},
}
expected := `
// load parameters into registers
0: LdXMemW dst: r2 src: r1 off: 0 imm: 0
1: AndImm32 dst: r2 imm: 65535
2: LdXMemW dst: r3 src: r1 off: 0 imm: 0
3: RShImm32 dst: r3 imm: 16
4: LdXMemW dst: r4 src: r1 off: 4 imm: 0
5: LdXMemW dst: r5 src: r1 off: 8 imm: 0
block-0:
// (b, wildcard, wildcard, m, true)
6: JNEImm dst: r2 off: -1 imm: 1 <block-1>
7: MovReg32 dst: r1 src: r3
8: AndImm32 dst: r1 imm: 1
9: JNEReg dst: r1 off: -1 src: r3 <block-1>
10: MovImm32 dst: r0 imm: 1
11: Exit
block-1:
// (c, wildcard, wildcard, m, true)
12: JNEImm dst: r2 off: -1 imm: 2 <block-2>
13: MovReg32 dst: r1 src: r3
14: AndImm32 dst: r1 imm: 1
15: JNEReg dst: r1 off: -1 src: r3 <block-2>
16: MovImm32 dst: r0 imm: 1
17: Exit
block-2:
18: JNEImm dst: r2 off: -1 imm: 2 <block-3>
19: JNEImm dst: r4 off: -1 imm: 1 <block-3>
20: JNEImm dst: r5 off: -1 imm: 3 <block-3>
21: MovImm32 dst: r0 imm: 1
22: Exit
block-3:
23: JNEImm dst: r2 off: -1 imm: 2 <block-4>
24: JNEImm dst: r4 off: -1 imm: 1 <block-4>
25: JNEImm dst: r5 off: -1 imm: 5 <block-4>
26: MovImm32 dst: r0 imm: 1
27: Exit
block-4:
28: JNEImm dst: r2 off: -1 imm: 2 <block-5>
29: JNEImm dst: r4 off: -1 imm: 1 <block-5>
30: JNEImm dst: r5 off: -1 imm: 7 <block-5>
31: MovImm32 dst: r0 imm: 1
32: Exit
block-5:
33: JNEImm dst: r2 off: -1 imm: 2 <block-6>
34: JNEImm dst: r4 off: -1 imm: 1 <block-6>
35: JNEImm dst: r5 off: -1 imm: 8 <block-6>
36: MovImm32 dst: r0 imm: 1
37: Exit
block-6:
38: JNEImm dst: r2 off: -1 imm: 2 <block-7>
39: JNEImm dst: r4 off: -1 imm: 1 <block-7>
40: JNEImm dst: r5 off: -1 imm: 9 <block-7>
41: MovImm32 dst: r0 imm: 1
42: Exit
block-7:
43: JNEImm dst: r2 off: -1 imm: 2 <block-8>
44: JNEImm dst: r4 off: -1 imm: 5 <block-8>
45: JNEImm dst: r5 off: -1 imm: 0 <block-8>
46: MovImm32 dst: r0 imm: 1
47: Exit
block-8:
48: JNEImm dst: r2 off: -1 imm: 2 <block-9>
49: JNEImm dst: r4 off: -1 imm: 5 <block-9>
50: JNEImm dst: r5 off: -1 imm: 2 <block-9>
51: MovImm32 dst: r0 imm: 1
52: Exit
block-9:
// tuntap (c, 10, 200, rwm, true)
53: JNEImm dst: r2 off: -1 imm: 2 <block-10>
54: JNEImm dst: r4 off: -1 imm: 10 <block-10>
55: JNEImm dst: r5 off: -1 imm: 200 <block-10>
56: MovImm32 dst: r0 imm: 1
57: Exit
block-10:
// /dev/pts (c, 136, wildcard, rwm, true)
58: JNEImm dst: r2 off: -1 imm: 2 <block-11>
59: JNEImm dst: r4 off: -1 imm: 136 <block-11>
60: MovImm32 dst: r0 imm: 1
61: Exit
block-11:
62: MovImm32 dst: r0 imm: 0
63: Exit
`
testDeviceFilter(t, devices, expected)
}
func TestDeviceFilter_Privileged(t *testing.T) {
devices := []*devices.Rule{
{
Type: 'a',
Major: -1,
Minor: -1,
Permissions: "rwm",
Allow: true,
},
}
expected := `
// load parameters into registers
0: LdXMemW dst: r2 src: r1 off: 0 imm: 0
1: AndImm32 dst: r2 imm: 65535
2: LdXMemW dst: r3 src: r1 off: 0 imm: 0
3: RShImm32 dst: r3 imm: 16
4: LdXMemW dst: r4 src: r1 off: 4 imm: 0
5: LdXMemW dst: r5 src: r1 off: 8 imm: 0
block-0:
// return 1 (accept)
6: MovImm32 dst: r0 imm: 1
7: Exit
`
testDeviceFilter(t, devices, expected)
}
func TestDeviceFilter_PrivilegedExceptSingleDevice(t *testing.T) {
devices := []*devices.Rule{
{
Type: 'a',
Major: -1,
Minor: -1,
Permissions: "rwm",
Allow: true,
},
{
Type: 'b',
Major: 8,
Minor: 0,
Permissions: "rwm",
Allow: false,
},
}
expected := `
// load parameters into registers
0: LdXMemW dst: r2 src: r1 off: 0 imm: 0
1: AndImm32 dst: r2 imm: 65535
2: LdXMemW dst: r3 src: r1 off: 0 imm: 0
3: RShImm32 dst: r3 imm: 16
4: LdXMemW dst: r4 src: r1 off: 4 imm: 0
5: LdXMemW dst: r5 src: r1 off: 8 imm: 0
block-0:
// return 0 (reject) if type==b && major == 8 && minor == 0
6: JNEImm dst: r2 off: -1 imm: 1 <block-1>
7: JNEImm dst: r4 off: -1 imm: 8 <block-1>
8: JNEImm dst: r5 off: -1 imm: 0 <block-1>
9: MovImm32 dst: r0 imm: 0
10: Exit
block-1:
// return 1 (accept)
11: MovImm32 dst: r0 imm: 1
12: Exit
`
testDeviceFilter(t, devices, expected)
}
func TestDeviceFilter_Weird(t *testing.T) {
devices := []*devices.Rule{
{
Type: 'b',
Major: 8,
Minor: 1,
Permissions: "rwm",
Allow: false,
},
{
Type: 'a',
Major: -1,
Minor: -1,
Permissions: "rwm",
Allow: true,
},
{
Type: 'b',
Major: 8,
Minor: 2,
Permissions: "rwm",
Allow: false,
},
}
// 8/1 is allowed, 8/2 is not allowed.
// This conforms to runc v1.0.0-rc.9 (cgroup1) behavior.
expected := `
// load parameters into registers
0: LdXMemW dst: r2 src: r1 off: 0 imm: 0
1: AndImm32 dst: r2 imm: 65535
2: LdXMemW dst: r3 src: r1 off: 0 imm: 0
3: RShImm32 dst: r3 imm: 16
4: LdXMemW dst: r4 src: r1 off: 4 imm: 0
5: LdXMemW dst: r5 src: r1 off: 8 imm: 0
block-0:
// return 0 (reject) if type==b && major == 8 && minor == 2
6: JNEImm dst: r2 off: -1 imm: 1 <block-1>
7: JNEImm dst: r4 off: -1 imm: 8 <block-1>
8: JNEImm dst: r5 off: -1 imm: 2 <block-1>
9: MovImm32 dst: r0 imm: 0
10: Exit
block-1:
// return 1 (accept)
11: MovImm32 dst: r0 imm: 1
12: Exit
`
testDeviceFilter(t, devices, expected)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,279 +0,0 @@
package devices
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
)
// TestPodSkipDevicesUpdate checks that updating a pod having SkipDevices: true
// does not result in spurious "permission denied" errors in a container
// running under the pod. The test is somewhat similar in nature to the
// @test "update devices [minimal transition rules]" in tests/integration,
// but uses a pod.
func TestPodSkipDevicesUpdate(t *testing.T) {
if !systemd.IsRunningSystemd() {
t.Skip("Test requires systemd.")
}
if os.Geteuid() != 0 {
t.Skip("Test requires root.")
}
podName := "system-runc_test_pod" + t.Name() + ".slice"
podConfig := &cgroups.Cgroup{
Systemd: true,
Parent: "system.slice",
Name: podName,
Resources: &cgroups.Resources{
PidsLimit: 42,
Memory: 32 * 1024 * 1024,
SkipDevices: true,
},
}
// Create "pod" cgroup (a systemd slice to hold containers).
pm := newManager(t, podConfig)
if err := pm.Apply(-1); err != nil {
t.Fatal(err)
}
if err := pm.Set(podConfig.Resources); err != nil {
t.Fatal(err)
}
containerConfig := &cgroups.Cgroup{
Parent: podName,
ScopePrefix: "test",
Name: "PodSkipDevicesUpdate",
Resources: &cgroups.Resources{
Devices: []*devices.Rule{
// Allow access to /dev/null.
{
Type: devices.CharDevice,
Major: 1,
Minor: 3,
Permissions: "rwm",
Allow: true,
},
},
},
}
// Create a "container" within the "pod" cgroup.
// This is not a real container, just a process in the cgroup.
cmd := exec.Command("sleep", "infinity")
cmd.Env = append(os.Environ(), "LANG=C")
var stderr bytes.Buffer
cmd.Stderr = &stderr
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
// Make sure to not leave a zombie.
defer func() {
// These may fail, we don't care.
_ = cmd.Process.Kill()
_ = cmd.Wait()
}()
// Put the process into a cgroup.
cm := newManager(t, containerConfig)
if err := cm.Apply(cmd.Process.Pid); err != nil {
t.Fatal(err)
}
// Check that we put the "container" into the "pod" cgroup.
if !strings.HasPrefix(cm.Path("devices"), pm.Path("devices")) {
t.Fatalf("expected container cgroup path %q to be under pod cgroup path %q",
cm.Path("devices"), pm.Path("devices"))
}
if err := cm.Set(containerConfig.Resources); err != nil {
t.Fatal(err)
}
// Now update the pod a few times.
for i := 0; i < 42; i++ {
podConfig.Resources.PidsLimit++
podConfig.Resources.Memory += 1024 * 1024
if err := pm.Set(podConfig.Resources); err != nil {
t.Fatal(err)
}
}
// Kill the "container".
if err := cmd.Process.Kill(); err != nil {
t.Fatal(err)
}
_ = cmd.Wait()
// "Container" stderr should be empty.
if stderr.Len() != 0 {
t.Fatalf("container stderr not empty: %s", stderr.String())
}
}
func testSkipDevices(t *testing.T, skipDevices bool, expected []string) {
if !systemd.IsRunningSystemd() {
t.Skip("Test requires systemd.")
}
if os.Geteuid() != 0 {
t.Skip("Test requires root.")
}
podConfig := &cgroups.Cgroup{
Parent: "system.slice",
Name: "system-runc_test_pods.slice",
Resources: &cgroups.Resources{
SkipDevices: skipDevices,
},
}
// Create "pods" cgroup (a systemd slice to hold containers).
pm := newManager(t, podConfig)
if err := pm.Apply(-1); err != nil {
t.Fatal(err)
}
if err := pm.Set(podConfig.Resources); err != nil {
t.Fatal(err)
}
config := &cgroups.Cgroup{
Parent: "system-runc_test_pods.slice",
ScopePrefix: "test",
Name: "SkipDevices",
Resources: &cgroups.Resources{
Devices: []*devices.Rule{
// Allow access to /dev/full only.
{
Type: devices.CharDevice,
Major: 1,
Minor: 7,
Permissions: "rwm",
Allow: true,
},
},
},
}
// Create a "container" within the "pods" cgroup.
// This is not a real container, just a process in the cgroup.
cmd := exec.Command("bash", "-c", "read; echo > /dev/full; cat /dev/null; true")
cmd.Env = append(os.Environ(), "LANG=C")
stdinR, stdinW, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
cmd.Stdin = stdinR
var stderr bytes.Buffer
cmd.Stderr = &stderr
err = cmd.Start()
stdinR.Close()
defer stdinW.Close()
if err != nil {
t.Fatal(err)
}
// Make sure to not leave a zombie.
defer func() {
// These may fail, we don't care.
_, _ = stdinW.WriteString("hey\n")
_ = cmd.Wait()
}()
// Put the process into a cgroup.
m := newManager(t, config)
if err := m.Apply(cmd.Process.Pid); err != nil {
t.Fatal(err)
}
// Check that we put the "container" into the "pod" cgroup.
if !strings.HasPrefix(m.Path("devices"), pm.Path("devices")) {
t.Fatalf("expected container cgroup path %q to be under pod cgroup path %q",
m.Path("devices"), pm.Path("devices"))
}
if err := m.Set(config.Resources); err != nil {
// failed to write "c 1:7 rwm": write /sys/fs/cgroup/devices/system.slice/system-runc_test_pods.slice/test-SkipDevices.scope/devices.allow: operation not permitted
if skipDevices == false && strings.HasSuffix(err.Error(), "/devices.allow: operation not permitted") {
// Cgroup v1 devices controller gives EPERM on trying
// to enable devices that are not enabled
// (skipDevices=false) in a parent cgroup.
// If this happens, test is passing.
return
}
t.Fatal(err)
}
// Check that we can access /dev/full but not /dev/zero.
if _, err := stdinW.WriteString("wow\n"); err != nil {
t.Fatal(err)
}
if err := cmd.Wait(); err != nil {
t.Fatal(err)
}
for _, exp := range expected {
if !strings.Contains(stderr.String(), exp) {
t.Errorf("expected %q, got: %s", exp, stderr.String())
}
}
}
func TestSkipDevicesTrue(t *testing.T) {
testSkipDevices(t, true, []string{
"echo: write error: No space left on device",
"cat: /dev/null: Operation not permitted",
})
}
func TestSkipDevicesFalse(t *testing.T) {
// If SkipDevices is not set for the parent slice, access to both
// devices should fail. This is done to assess the test correctness.
// For cgroup v1, we check for m.Set returning EPERM.
// For cgroup v2, we check for the errors below.
testSkipDevices(t, false, []string{
"/dev/full: Operation not permitted",
"cat: /dev/null: Operation not permitted",
})
}
func testFindDeviceGroup() error {
const (
major = 136
group = "char-pts"
)
res, err := findDeviceGroup(devices.CharDevice, major)
if res != group || err != nil {
return fmt.Errorf("expected %v, nil, got %v, %w", group, res, err)
}
return nil
}
func TestFindDeviceGroup(t *testing.T) {
if err := testFindDeviceGroup(); err != nil {
t.Fatal(err)
}
}
func BenchmarkFindDeviceGroup(b *testing.B) {
for i := 0; i < b.N; i++ {
if err := testFindDeviceGroup(); err != nil {
b.Fatal(err)
}
}
}
func newManager(t *testing.T, config *cgroups.Cgroup) (m cgroups.Manager) {
t.Helper()
var err error
if cgroups.IsCgroup2UnifiedMode() {
m, err = systemd.NewUnifiedManager(config, "")
} else {
m, err = systemd.NewLegacyManager(config, nil)
}
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { _ = m.Destroy() })
return m
}

View File

@@ -1,68 +0,0 @@
package devices
import (
"os"
"path"
"testing"
"github.com/moby/sys/userns"
"github.com/opencontainers/runc/libcontainer/cgroups"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
func init() {
testingSkipFinalCheck = true
cgroups.TestMode = true
}
func TestSetV1Allow(t *testing.T) {
if userns.RunningInUserNS() {
t.Skip("userns detected; setV1 does nothing")
}
dir := t.TempDir()
for file, contents := range map[string]string{
"devices.allow": "",
"devices.deny": "",
"devices.list": "a *:* rwm",
} {
err := os.WriteFile(path.Join(dir, file), []byte(contents), 0o600)
if err != nil {
t.Fatal(err)
}
}
r := &cgroups.Resources{
Devices: []*devices.Rule{
{
Type: devices.CharDevice,
Major: 1,
Minor: 5,
Permissions: devices.Permissions("rwm"),
Allow: true,
},
},
}
if err := setV1(dir, r); err != nil {
t.Fatal(err)
}
// The default deny rule must be written.
value, err := fscommon.GetCgroupParamString(dir, "devices.deny")
if err != nil {
t.Fatal(err)
}
if value[0] != 'a' {
t.Errorf("Got the wrong value (%q), set devices.deny failed.", value)
}
// Permitted rule must be written.
if value, err := fscommon.GetCgroupParamString(dir, "devices.allow"); err != nil {
t.Fatal(err)
} else if value != "c 1:5 rwm" {
t.Errorf("Got the wrong value (%q), set devices.allow failed.", value)
}
}

View File

@@ -1,93 +0,0 @@
package cgroups
import (
"errors"
"fmt"
"os"
"path/filepath"
"strconv"
"testing"
"time"
)
func TestWriteCgroupFileHandlesInterrupt(t *testing.T) {
const (
memoryCgroupMount = "/sys/fs/cgroup/memory"
memoryLimit = "memory.limit_in_bytes"
)
if _, err := os.Stat(memoryCgroupMount); err != nil {
// most probably cgroupv2
t.Skip(err)
}
cgroupName := fmt.Sprintf("test-eint-%d", time.Now().Nanosecond())
cgroupPath := filepath.Join(memoryCgroupMount, cgroupName)
if err := os.MkdirAll(cgroupPath, 0o755); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(cgroupPath)
if _, err := os.Stat(filepath.Join(cgroupPath, memoryLimit)); err != nil {
// either cgroupv2, or memory controller is not available
t.Skip(err)
}
for i := 0; i < 100000; i++ {
limit := 1024*1024 + i
if err := WriteFile(cgroupPath, memoryLimit, strconv.Itoa(limit)); err != nil {
t.Fatalf("Failed to write %d on attempt %d: %+v", limit, i, err)
}
}
}
func TestOpenat2(t *testing.T) {
if !IsCgroup2UnifiedMode() {
// The reason is many test cases below test opening files from
// the top-level directory, where cgroup v1 has no files.
t.Skip("test requires cgroup v2")
}
// Make sure we test openat2, not its fallback.
openFallback = func(_ string, _ int, _ os.FileMode) (*os.File, error) {
return nil, errors.New("fallback")
}
defer func() { openFallback = openAndCheck }()
for _, tc := range []struct{ dir, file string }{
{"/sys/fs/cgroup", "cgroup.controllers"},
{"/sys/fs/cgroup", "/cgroup.controllers"},
{"/sys/fs/cgroup/", "cgroup.controllers"},
{"/sys/fs/cgroup/", "/cgroup.controllers"},
{"/", "/sys/fs/cgroup/cgroup.controllers"},
{"/", "sys/fs/cgroup/cgroup.controllers"},
{"/sys/fs/cgroup/cgroup.controllers", ""},
} {
fd, err := OpenFile(tc.dir, tc.file, os.O_RDONLY)
if err != nil {
t.Errorf("case %+v: %v", tc, err)
}
fd.Close()
}
}
func BenchmarkWriteFile(b *testing.B) {
TestMode = true
defer func() { TestMode = false }()
dir := b.TempDir()
tc := []string{
"one",
"one\ntwo\nthree",
"10:200 foo=bar boo=far\n300:1200 something=other\ndefault 45000\n",
"\n\n\n\n\n\n\n\n",
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
for _, val := range tc {
if err := WriteFileByLine(dir, "file", val); err != nil {
b.Fatal(err)
}
}
}
}

View File

@@ -1,862 +0,0 @@
package fs
import (
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
const (
sectorsRecursiveContents = `8:0 1024`
sectorsRecursiveContentsBFQ = `8:0 2048`
serviceBytesRecursiveContents = `8:0 Read 100
8:0 Write 200
8:0 Sync 300
8:0 Async 500
8:0 Total 500
Total 500`
serviceBytesRecursiveContentsBFQ = `8:0 Read 1100
8:0 Write 1200
8:0 Sync 1300
8:0 Async 1500
8:0 Total 1500
Total 1500`
servicedRecursiveContents = `8:0 Read 10
8:0 Write 40
8:0 Sync 20
8:0 Async 30
8:0 Total 50
Total 50`
servicedRecursiveContentsBFQ = `8:0 Read 11
8:0 Write 41
8:0 Sync 21
8:0 Async 31
8:0 Total 51
Total 51`
queuedRecursiveContents = `8:0 Read 1
8:0 Write 4
8:0 Sync 2
8:0 Async 3
8:0 Total 5
Total 5`
queuedRecursiveContentsBFQ = `8:0 Read 2
8:0 Write 3
8:0 Sync 4
8:0 Async 5
8:0 Total 6
Total 6`
serviceTimeRecursiveContents = `8:0 Read 173959
8:0 Write 0
8:0 Sync 0
8:0 Async 173959
8:0 Total 17395
Total 17395`
serviceTimeRecursiveContentsBFQ = `8:0 Read 173959
8:0 Write 0
8:0 Sync 0
8:0 Async 173
8:0 Total 174
Total 174`
waitTimeRecursiveContents = `8:0 Read 15571
8:0 Write 0
8:0 Sync 0
8:0 Async 15571
8:0 Total 15571`
waitTimeRecursiveContentsBFQ = `8:0 Read 1557
8:0 Write 0
8:0 Sync 0
8:0 Async 1557
8:0 Total 1557`
mergedRecursiveContents = `8:0 Read 5
8:0 Write 10
8:0 Sync 0
8:0 Async 0
8:0 Total 15
Total 15`
mergedRecursiveContentsBFQ = `8:0 Read 51
8:0 Write 101
8:0 Sync 0
8:0 Async 0
8:0 Total 151
Total 151`
timeRecursiveContents = `8:0 8`
timeRecursiveContentsBFQ = `8:0 16`
throttleServiceBytes = `8:0 Read 11030528
8:0 Write 23
8:0 Sync 42
8:0 Async 11030528
8:0 Total 11030528
252:0 Read 11030528
252:0 Write 23
252:0 Sync 42
252:0 Async 11030528
252:0 Total 11030528
Total 22061056`
throttleServiceBytesRecursive = `8:0 Read 110305281
8:0 Write 231
8:0 Sync 421
8:0 Async 110305281
8:0 Total 110305281
252:0 Read 110305281
252:0 Write 231
252:0 Sync 421
252:0 Async 110305281
252:0 Total 110305281
Total 220610561`
throttleServiced = `8:0 Read 164
8:0 Write 23
8:0 Sync 42
8:0 Async 164
8:0 Total 164
252:0 Read 164
252:0 Write 23
252:0 Sync 42
252:0 Async 164
252:0 Total 164
Total 328`
throttleServicedRecursive = `8:0 Read 1641
8:0 Write 231
8:0 Sync 421
8:0 Async 1641
8:0 Total 1641
252:0 Read 1641
252:0 Write 231
252:0 Sync 421
252:0 Async 1641
252:0 Total 1641
Total 3281`
)
var blkioBFQDebugStatsTestFiles = map[string]string{
"blkio.bfq.io_service_bytes_recursive": serviceBytesRecursiveContentsBFQ,
"blkio.bfq.io_serviced_recursive": servicedRecursiveContentsBFQ,
"blkio.bfq.io_queued_recursive": queuedRecursiveContentsBFQ,
"blkio.bfq.io_service_time_recursive": serviceTimeRecursiveContentsBFQ,
"blkio.bfq.io_wait_time_recursive": waitTimeRecursiveContentsBFQ,
"blkio.bfq.io_merged_recursive": mergedRecursiveContentsBFQ,
"blkio.bfq.time_recursive": timeRecursiveContentsBFQ,
"blkio.bfq.sectors_recursive": sectorsRecursiveContentsBFQ,
}
var blkioBFQStatsTestFiles = map[string]string{
"blkio.bfq.io_service_bytes_recursive": serviceBytesRecursiveContentsBFQ,
"blkio.bfq.io_serviced_recursive": servicedRecursiveContentsBFQ,
}
var blkioCFQStatsTestFiles = map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
}
type blkioStatFailureTestCase struct {
desc string
filename string
}
func appendBlkioStatEntry(blkioStatEntries *[]cgroups.BlkioStatEntry, major, minor, value uint64, op string) { //nolint:unparam
*blkioStatEntries = append(*blkioStatEntries, cgroups.BlkioStatEntry{Major: major, Minor: minor, Value: value, Op: op})
}
func TestBlkioSetWeight(t *testing.T) {
const (
weightBefore = 100
weightAfter = 200
)
for _, legacyIOScheduler := range []bool{false, true} {
// Populate cgroup
path := tempDir(t, "blkio")
weightFilename := "blkio.bfq.weight"
if legacyIOScheduler {
weightFilename = "blkio.weight"
}
writeFileContents(t, path, map[string]string{
weightFilename: strconv.Itoa(weightBefore),
})
// Apply new configuration
r := &cgroups.Resources{
BlkioWeight: weightAfter,
}
blkio := &BlkioGroup{}
if err := blkio.Set(path, r); err != nil {
t.Fatal(err)
}
// Verify results
if weightFilename != blkio.weightFilename {
t.Fatalf("weight filename detection failed: expected %q, detected %q", weightFilename, blkio.weightFilename)
}
value, err := fscommon.GetCgroupParamUint(path, weightFilename)
if err != nil {
t.Fatal(err)
}
if value != weightAfter {
t.Fatalf("Got the wrong value, set %s failed.", weightFilename)
}
}
}
func TestBlkioSetWeightDevice(t *testing.T) {
const (
weightDeviceBefore = "8:0 400"
)
for _, legacyIOScheduler := range []bool{false, true} {
// Populate cgroup
path := tempDir(t, "blkio")
weightFilename := "blkio.bfq.weight"
weightDeviceFilename := "blkio.bfq.weight_device"
if legacyIOScheduler {
weightFilename = "blkio.weight"
weightDeviceFilename = "blkio.weight_device"
}
writeFileContents(t, path, map[string]string{
weightFilename: "",
weightDeviceFilename: weightDeviceBefore,
})
// Apply new configuration
wd := cgroups.NewWeightDevice(8, 0, 500, 0)
weightDeviceAfter := wd.WeightString()
r := &cgroups.Resources{
BlkioWeightDevice: []*cgroups.WeightDevice{wd},
}
blkio := &BlkioGroup{}
if err := blkio.Set(path, r); err != nil {
t.Fatal(err)
}
// Verify results
if weightDeviceFilename != blkio.weightDeviceFilename {
t.Fatalf("weight_device filename detection failed: expected %q, detected %q", weightDeviceFilename, blkio.weightDeviceFilename)
}
value, err := fscommon.GetCgroupParamString(path, weightDeviceFilename)
if err != nil {
t.Fatal(err)
}
if value != weightDeviceAfter {
t.Fatalf("Got the wrong value, set %s failed.", weightDeviceFilename)
}
}
}
// regression #274
func TestBlkioSetMultipleWeightDevice(t *testing.T) {
path := tempDir(t, "blkio")
const (
weightDeviceBefore = "8:0 400"
)
wd1 := cgroups.NewWeightDevice(8, 0, 500, 0)
wd2 := cgroups.NewWeightDevice(8, 16, 500, 0)
// we cannot actually set and check both because normal os.WriteFile
// when writing to cgroup file will overwrite the whole file content instead
// of updating it as the kernel is doing. Just check the second device
// is present will suffice for the test to ensure multiple writes are done.
weightDeviceAfter := wd2.WeightString()
blkio := &BlkioGroup{}
blkio.detectWeightFilenames(path)
if blkio.weightDeviceFilename != "blkio.bfq.weight_device" {
t.Fatalf("when blkio controller is unavailable, expected to use \"blkio.bfq.weight_device\", tried to use %q", blkio.weightDeviceFilename)
}
writeFileContents(t, path, map[string]string{
blkio.weightDeviceFilename: weightDeviceBefore,
})
r := &cgroups.Resources{
BlkioWeightDevice: []*cgroups.WeightDevice{wd1, wd2},
}
if err := blkio.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, blkio.weightDeviceFilename)
if err != nil {
t.Fatal(err)
}
if value != weightDeviceAfter {
t.Fatalf("Got the wrong value, set %s failed.", blkio.weightDeviceFilename)
}
}
func TestBlkioBFQDebugStats(t *testing.T) {
path := tempDir(t, "blkio")
writeFileContents(t, path, blkioBFQDebugStatsTestFiles)
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.BlkioStats{}
appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 2048, "")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1100, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1200, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1300, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1500, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1500, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 11, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 41, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 21, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 31, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 51, "Total")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Read")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Write")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Sync")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Async")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 6, "Total")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 174, "Total")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 1557, "Read")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 1557, "Async")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 1557, "Total")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 51, "Read")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 101, "Write")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 151, "Total")
appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 16, "")
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
}
func TestBlkioMultipleStatsFiles(t *testing.T) {
path := tempDir(t, "blkio")
writeFileContents(t, path, blkioBFQDebugStatsTestFiles)
writeFileContents(t, path, blkioCFQStatsTestFiles)
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.BlkioStats{}
appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 2048, "")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1100, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1200, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1300, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1500, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1500, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 11, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 41, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 21, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 31, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 51, "Total")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Read")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Write")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Sync")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Async")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 6, "Total")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 174, "Total")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 1557, "Read")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 1557, "Async")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 1557, "Total")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 51, "Read")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 101, "Write")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 151, "Total")
appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 16, "")
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
}
func TestBlkioBFQStats(t *testing.T) {
path := tempDir(t, "blkio")
writeFileContents(t, path, blkioBFQStatsTestFiles)
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.BlkioStats{}
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1100, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1200, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1300, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1500, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 1500, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 11, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 41, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 21, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 31, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 51, "Total")
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
}
func TestBlkioStatsNoFilesBFQDebug(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testCases := []blkioStatFailureTestCase{
{
desc: "missing blkio.bfq.io_service_bytes_recursive file",
filename: "blkio.bfq.io_service_bytes_recursive",
},
{
desc: "missing blkio.bfq.io_serviced_recursive file",
filename: "blkio.bfq.io_serviced_recursive",
},
{
desc: "missing blkio.bfq.io_queued_recursive file",
filename: "blkio.bfq.io_queued_recursive",
},
{
desc: "missing blkio.bfq.sectors_recursive file",
filename: "blkio.bfq.sectors_recursive",
},
{
desc: "missing blkio.bfq.io_service_time_recursive file",
filename: "blkio.bfq.io_service_time_recursive",
},
{
desc: "missing blkio.bfq.io_wait_time_recursive file",
filename: "blkio.bfq.io_wait_time_recursive",
},
{
desc: "missing blkio.bfq.io_merged_recursive file",
filename: "blkio.bfq.io_merged_recursive",
},
{
desc: "missing blkio.bfq.time_recursive file",
filename: "blkio.bfq.time_recursive",
},
}
for _, testCase := range testCases {
path := tempDir(t, "cpuset")
tempBlkioTestFiles := map[string]string{}
for i, v := range blkioBFQDebugStatsTestFiles {
tempBlkioTestFiles[i] = v
}
delete(tempBlkioTestFiles, testCase.filename)
writeFileContents(t, path, tempBlkioTestFiles)
cpuset := &CpusetGroup{}
actualStats := *cgroups.NewStats()
err := cpuset.GetStats(path, &actualStats)
if err != nil {
t.Errorf("%s: want no error, got: %+v", testCase.desc, err)
}
}
}
func TestBlkioCFQStats(t *testing.T) {
path := tempDir(t, "blkio")
writeFileContents(t, path, blkioCFQStatsTestFiles)
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
// Verify expected stats.
expectedStats := cgroups.BlkioStats{}
appendBlkioStatEntry(&expectedStats.SectorsRecursive, 8, 0, 1024, "")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 100, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 200, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 300, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 500, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 10, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 40, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 20, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 30, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 50, "Total")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 1, "Read")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 4, "Write")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 2, "Sync")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 3, "Async")
appendBlkioStatEntry(&expectedStats.IoQueuedRecursive, 8, 0, 5, "Total")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 173959, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceTimeRecursive, 8, 0, 17395, "Total")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Read")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Write")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Async")
appendBlkioStatEntry(&expectedStats.IoWaitTimeRecursive, 8, 0, 15571, "Total")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 5, "Read")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 10, "Write")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Sync")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 0, "Async")
appendBlkioStatEntry(&expectedStats.IoMergedRecursive, 8, 0, 15, "Total")
appendBlkioStatEntry(&expectedStats.IoTimeRecursive, 8, 0, 8, "")
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
}
func TestBlkioStatsNoFilesCFQ(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
testCases := []blkioStatFailureTestCase{
{
desc: "missing blkio.io_service_bytes_recursive file",
filename: "blkio.io_service_bytes_recursive",
},
{
desc: "missing blkio.io_serviced_recursive file",
filename: "blkio.io_serviced_recursive",
},
{
desc: "missing blkio.io_queued_recursive file",
filename: "blkio.io_queued_recursive",
},
{
desc: "missing blkio.sectors_recursive file",
filename: "blkio.sectors_recursive",
},
{
desc: "missing blkio.io_service_time_recursive file",
filename: "blkio.io_service_time_recursive",
},
{
desc: "missing blkio.io_wait_time_recursive file",
filename: "blkio.io_wait_time_recursive",
},
{
desc: "missing blkio.io_merged_recursive file",
filename: "blkio.io_merged_recursive",
},
{
desc: "missing blkio.time_recursive file",
filename: "blkio.time_recursive",
},
}
for _, testCase := range testCases {
path := tempDir(t, "cpuset")
tempBlkioTestFiles := map[string]string{}
for i, v := range blkioCFQStatsTestFiles {
tempBlkioTestFiles[i] = v
}
delete(tempBlkioTestFiles, testCase.filename)
writeFileContents(t, path, tempBlkioTestFiles)
cpuset := &CpusetGroup{}
actualStats := *cgroups.NewStats()
err := cpuset.GetStats(path, &actualStats)
if err != nil {
t.Errorf("%s: want no error, got %+v", testCase.desc, err)
}
}
}
func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) {
path := tempDir(t, "blkio")
writeFileContents(t, path, map[string]string{
"blkio.io_service_bytes_recursive": "8:0 Read 100 100",
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}
func TestBlkioStatsUnexpectedFieldType(t *testing.T) {
path := tempDir(t, "blkio")
writeFileContents(t, path, map[string]string{
"blkio.io_service_bytes_recursive": "8:0 Read Write",
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
"blkio.io_service_time_recursive": serviceTimeRecursiveContents,
"blkio.io_wait_time_recursive": waitTimeRecursiveContents,
"blkio.io_merged_recursive": mergedRecursiveContents,
"blkio.time_recursive": timeRecursiveContents,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}
func TestThrottleRecursiveBlkioStats(t *testing.T) {
path := tempDir(t, "blkio")
writeFileContents(t, path, map[string]string{
"blkio.io_service_bytes_recursive": "",
"blkio.io_serviced_recursive": "",
"blkio.io_queued_recursive": "",
"blkio.sectors_recursive": "",
"blkio.io_service_time_recursive": "",
"blkio.io_wait_time_recursive": "",
"blkio.io_merged_recursive": "",
"blkio.time_recursive": "",
"blkio.throttle.io_service_bytes_recursive": throttleServiceBytesRecursive,
"blkio.throttle.io_serviced_recursive": throttleServicedRecursive,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
// Verify expected stats.
expectedStats := cgroups.BlkioStats{}
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 110305281, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 231, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 421, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 110305281, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 110305281, "Total")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 110305281, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 231, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 421, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 110305281, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 110305281, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 1641, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 231, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 421, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 1641, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 1641, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 1641, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 231, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 421, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 1641, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 1641, "Total")
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
}
func TestThrottleBlkioStats(t *testing.T) {
path := tempDir(t, "blkio")
writeFileContents(t, path, map[string]string{
"blkio.io_service_bytes_recursive": "",
"blkio.io_serviced_recursive": "",
"blkio.io_queued_recursive": "",
"blkio.sectors_recursive": "",
"blkio.io_service_time_recursive": "",
"blkio.io_wait_time_recursive": "",
"blkio.io_merged_recursive": "",
"blkio.time_recursive": "",
"blkio.throttle.io_service_bytes": throttleServiceBytes,
"blkio.throttle.io_serviced": throttleServiced,
})
blkio := &BlkioGroup{}
actualStats := *cgroups.NewStats()
err := blkio.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
// Verify expected stats.
expectedStats := cgroups.BlkioStats{}
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 23, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 42, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 8, 0, 11030528, "Total")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Read")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 23, "Write")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 42, "Sync")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Async")
appendBlkioStatEntry(&expectedStats.IoServiceBytesRecursive, 252, 0, 11030528, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 23, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 42, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 8, 0, 164, "Total")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Read")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 23, "Write")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 42, "Sync")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Async")
appendBlkioStatEntry(&expectedStats.IoServicedRecursive, 252, 0, 164, "Total")
expectBlkioStatsEquals(t, expectedStats, actualStats.BlkioStats)
}
func TestBlkioSetThrottleReadBpsDevice(t *testing.T) {
path := tempDir(t, "blkio")
const (
throttleBefore = `8:0 1024`
)
td := cgroups.NewThrottleDevice(8, 0, 2048)
throttleAfter := td.String()
writeFileContents(t, path, map[string]string{
"blkio.throttle.read_bps_device": throttleBefore,
})
r := &cgroups.Resources{
BlkioThrottleReadBpsDevice: []*cgroups.ThrottleDevice{td},
}
blkio := &BlkioGroup{}
if err := blkio.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, "blkio.throttle.read_bps_device")
if err != nil {
t.Fatal(err)
}
if value != throttleAfter {
t.Fatal("Got the wrong value, set blkio.throttle.read_bps_device failed.")
}
}
func TestBlkioSetThrottleWriteBpsDevice(t *testing.T) {
path := tempDir(t, "blkio")
const (
throttleBefore = `8:0 1024`
)
td := cgroups.NewThrottleDevice(8, 0, 2048)
throttleAfter := td.String()
writeFileContents(t, path, map[string]string{
"blkio.throttle.write_bps_device": throttleBefore,
})
r := &cgroups.Resources{
BlkioThrottleWriteBpsDevice: []*cgroups.ThrottleDevice{td},
}
blkio := &BlkioGroup{}
if err := blkio.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, "blkio.throttle.write_bps_device")
if err != nil {
t.Fatal(err)
}
if value != throttleAfter {
t.Fatal("Got the wrong value, set blkio.throttle.write_bps_device failed.")
}
}
func TestBlkioSetThrottleReadIOpsDevice(t *testing.T) {
path := tempDir(t, "blkio")
const (
throttleBefore = `8:0 1024`
)
td := cgroups.NewThrottleDevice(8, 0, 2048)
throttleAfter := td.String()
writeFileContents(t, path, map[string]string{
"blkio.throttle.read_iops_device": throttleBefore,
})
r := &cgroups.Resources{
BlkioThrottleReadIOPSDevice: []*cgroups.ThrottleDevice{td},
}
blkio := &BlkioGroup{}
if err := blkio.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, "blkio.throttle.read_iops_device")
if err != nil {
t.Fatal(err)
}
if value != throttleAfter {
t.Fatal("Got the wrong value, set blkio.throttle.read_iops_device failed.")
}
}
func TestBlkioSetThrottleWriteIOpsDevice(t *testing.T) {
path := tempDir(t, "blkio")
const (
throttleBefore = `8:0 1024`
)
td := cgroups.NewThrottleDevice(8, 0, 2048)
throttleAfter := td.String()
writeFileContents(t, path, map[string]string{
"blkio.throttle.write_iops_device": throttleBefore,
})
r := &cgroups.Resources{
BlkioThrottleWriteIOPSDevice: []*cgroups.ThrottleDevice{td},
}
blkio := &BlkioGroup{}
if err := blkio.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, "blkio.throttle.write_iops_device")
if err != nil {
t.Fatal(err)
}
if value != throttleAfter {
t.Fatal("Got the wrong value, set blkio.throttle.write_iops_device failed.")
}
}

View File

@@ -1,226 +0,0 @@
package fs
import (
"fmt"
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
func TestCpuSetShares(t *testing.T) {
path := tempDir(t, "cpu")
const (
sharesBefore = 1024
sharesAfter = 512
)
writeFileContents(t, path, map[string]string{
"cpu.shares": strconv.Itoa(sharesBefore),
})
r := &cgroups.Resources{
CpuShares: sharesAfter,
}
cpu := &CpuGroup{}
if err := cpu.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamUint(path, "cpu.shares")
if err != nil {
t.Fatal(err)
}
if value != sharesAfter {
t.Fatal("Got the wrong value, set cpu.shares failed.")
}
}
func TestCpuSetBandWidth(t *testing.T) {
path := tempDir(t, "cpu")
const (
quotaBefore = 8000
quotaAfter = 5000
burstBefore = 2000
periodBefore = 10000
periodAfter = 7000
rtRuntimeBefore = 8000
rtRuntimeAfter = 5000
rtPeriodBefore = 10000
rtPeriodAfter = 7000
)
burstAfter := uint64(1000)
writeFileContents(t, path, map[string]string{
"cpu.cfs_quota_us": strconv.Itoa(quotaBefore),
"cpu.cfs_burst_us": strconv.Itoa(burstBefore),
"cpu.cfs_period_us": strconv.Itoa(periodBefore),
"cpu.rt_runtime_us": strconv.Itoa(rtRuntimeBefore),
"cpu.rt_period_us": strconv.Itoa(rtPeriodBefore),
})
r := &cgroups.Resources{
CpuQuota: quotaAfter,
CpuBurst: &burstAfter,
CpuPeriod: periodAfter,
CpuRtRuntime: rtRuntimeAfter,
CpuRtPeriod: rtPeriodAfter,
}
cpu := &CpuGroup{}
if err := cpu.Set(path, r); err != nil {
t.Fatal(err)
}
quota, err := fscommon.GetCgroupParamUint(path, "cpu.cfs_quota_us")
if err != nil {
t.Fatal(err)
}
if quota != quotaAfter {
t.Fatal("Got the wrong value, set cpu.cfs_quota_us failed.")
}
burst, err := fscommon.GetCgroupParamUint(path, "cpu.cfs_burst_us")
if err != nil {
t.Fatal(err)
}
if burst != burstAfter {
t.Fatal("Got the wrong value, set cpu.cfs_burst_us failed.")
}
period, err := fscommon.GetCgroupParamUint(path, "cpu.cfs_period_us")
if err != nil {
t.Fatal(err)
}
if period != periodAfter {
t.Fatal("Got the wrong value, set cpu.cfs_period_us failed.")
}
rtRuntime, err := fscommon.GetCgroupParamUint(path, "cpu.rt_runtime_us")
if err != nil {
t.Fatal(err)
}
if rtRuntime != rtRuntimeAfter {
t.Fatal("Got the wrong value, set cpu.rt_runtime_us failed.")
}
rtPeriod, err := fscommon.GetCgroupParamUint(path, "cpu.rt_period_us")
if err != nil {
t.Fatal(err)
}
if rtPeriod != rtPeriodAfter {
t.Fatal("Got the wrong value, set cpu.rt_period_us failed.")
}
}
func TestCpuStats(t *testing.T) {
path := tempDir(t, "cpu")
const (
nrPeriods = 2000
nrThrottled = 200
throttledTime = uint64(18446744073709551615)
)
cpuStatContent := fmt.Sprintf("nr_periods %d\nnr_throttled %d\nthrottled_time %d\n",
nrPeriods, nrThrottled, throttledTime)
writeFileContents(t, path, map[string]string{
"cpu.stat": cpuStatContent,
})
cpu := &CpuGroup{}
actualStats := *cgroups.NewStats()
err := cpu.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.ThrottlingData{
Periods: nrPeriods,
ThrottledPeriods: nrThrottled,
ThrottledTime: throttledTime,
}
expectThrottlingDataEquals(t, expectedStats, actualStats.CpuStats.ThrottlingData)
}
func TestNoCpuStatFile(t *testing.T) {
path := tempDir(t, "cpu")
cpu := &CpuGroup{}
actualStats := *cgroups.NewStats()
err := cpu.GetStats(path, &actualStats)
if err != nil {
t.Fatal("Expected not to fail, but did")
}
}
func TestInvalidCpuStat(t *testing.T) {
path := tempDir(t, "cpu")
cpuStatContent := `nr_periods 2000
nr_throttled 200
throttled_time fortytwo`
writeFileContents(t, path, map[string]string{
"cpu.stat": cpuStatContent,
})
cpu := &CpuGroup{}
actualStats := *cgroups.NewStats()
err := cpu.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failed stat parsing.")
}
}
func TestCpuSetRtSchedAtApply(t *testing.T) {
path := tempDir(t, "cpu")
const (
rtRuntimeBefore = 0
rtRuntimeAfter = 5000
rtPeriodBefore = 0
rtPeriodAfter = 7000
)
writeFileContents(t, path, map[string]string{
"cpu.rt_runtime_us": strconv.Itoa(rtRuntimeBefore),
"cpu.rt_period_us": strconv.Itoa(rtPeriodBefore),
})
r := &cgroups.Resources{
CpuRtRuntime: rtRuntimeAfter,
CpuRtPeriod: rtPeriodAfter,
}
cpu := &CpuGroup{}
if err := cpu.Apply(path, r, 1234); err != nil {
t.Fatal(err)
}
rtRuntime, err := fscommon.GetCgroupParamUint(path, "cpu.rt_runtime_us")
if err != nil {
t.Fatal(err)
}
if rtRuntime != rtRuntimeAfter {
t.Fatal("Got the wrong value, set cpu.rt_runtime_us failed.")
}
rtPeriod, err := fscommon.GetCgroupParamUint(path, "cpu.rt_period_us")
if err != nil {
t.Fatal(err)
}
if rtPeriod != rtPeriodAfter {
t.Fatal("Got the wrong value, set cpu.rt_period_us failed.")
}
pid, err := fscommon.GetCgroupParamUint(path, "cgroup.procs")
if err != nil {
t.Fatal(err)
}
if pid != 1234 {
t.Fatal("Got the wrong value, set cgroup.procs failed.")
}
}

View File

@@ -1,112 +0,0 @@
package fs
import (
"reflect"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const (
cpuAcctUsageContents = "12262454190222160"
cpuAcctUsagePerCPUContents = "1564936537989058 1583937096487821 1604195415465681 1596445226820187 1481069084155629 1478735613864327 1477610593414743 1476362015778086"
cpuAcctStatContents = "user 452278264\nsystem 291429664"
cpuAcctUsageAll = `cpu user system
0 962250696038415 637727786389114
1 981956408513304 638197595421064
2 1002658817529022 638956774598358
3 994937703492523 637985531181620
4 874843781648690 638837766495476
5 872544369885276 638763309884944
6 870104915696359 640081778921247
7 870202363887496 638716766259495
`
)
func TestCpuacctStats(t *testing.T) {
path := tempDir(t, "cpuacct")
writeFileContents(t, path, map[string]string{
"cpuacct.usage": cpuAcctUsageContents,
"cpuacct.usage_percpu": cpuAcctUsagePerCPUContents,
"cpuacct.stat": cpuAcctStatContents,
"cpuacct.usage_all": cpuAcctUsageAll,
})
cpuacct := &CpuacctGroup{}
actualStats := *cgroups.NewStats()
err := cpuacct.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.CpuUsage{
TotalUsage: uint64(12262454190222160),
PercpuUsage: []uint64{
1564936537989058, 1583937096487821, 1604195415465681, 1596445226820187,
1481069084155629, 1478735613864327, 1477610593414743, 1476362015778086,
},
PercpuUsageInKernelmode: []uint64{
637727786389114, 638197595421064, 638956774598358, 637985531181620,
638837766495476, 638763309884944, 640081778921247, 638716766259495,
},
PercpuUsageInUsermode: []uint64{
962250696038415, 981956408513304, 1002658817529022, 994937703492523,
874843781648690, 872544369885276, 870104915696359, 870202363887496,
},
UsageInKernelmode: (uint64(291429664) * nsInSec) / clockTicks,
UsageInUsermode: (uint64(452278264) * nsInSec) / clockTicks,
}
if !reflect.DeepEqual(expectedStats, actualStats.CpuStats.CpuUsage) {
t.Errorf("Expected CPU usage %#v but found %#v\n",
expectedStats, actualStats.CpuStats.CpuUsage)
}
}
func TestCpuacctStatsWithoutUsageAll(t *testing.T) {
path := tempDir(t, "cpuacct")
writeFileContents(t, path, map[string]string{
"cpuacct.usage": cpuAcctUsageContents,
"cpuacct.usage_percpu": cpuAcctUsagePerCPUContents,
"cpuacct.stat": cpuAcctStatContents,
})
cpuacct := &CpuacctGroup{}
actualStats := *cgroups.NewStats()
err := cpuacct.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.CpuUsage{
TotalUsage: uint64(12262454190222160),
PercpuUsage: []uint64{
1564936537989058, 1583937096487821, 1604195415465681, 1596445226820187,
1481069084155629, 1478735613864327, 1477610593414743, 1476362015778086,
},
PercpuUsageInKernelmode: []uint64{},
PercpuUsageInUsermode: []uint64{},
UsageInKernelmode: (uint64(291429664) * nsInSec) / clockTicks,
UsageInUsermode: (uint64(452278264) * nsInSec) / clockTicks,
}
if !reflect.DeepEqual(expectedStats, actualStats.CpuStats.CpuUsage) {
t.Errorf("Expected CPU usage %#v but found %#v\n",
expectedStats, actualStats.CpuStats.CpuUsage)
}
}
func BenchmarkGetCpuUsageBreakdown(b *testing.B) {
path := tempDir(b, "cpuacct")
writeFileContents(b, path, map[string]string{
"cpuacct.stat": cpuAcctStatContents,
})
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _, err := getCpuUsageBreakdown(path)
if err != nil {
b.Fatal(err)
}
}
}

View File

@@ -1,241 +0,0 @@
package fs
import (
"reflect"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
const (
cpus = "0-2,7,12-14\n"
cpuExclusive = "1\n"
mems = "1-4,6,9\n"
memHardwall = "0\n"
memExclusive = "0\n"
memoryMigrate = "1\n"
memorySpreadPage = "0\n"
memorySpeadSlab = "1\n"
memoryPressure = "34377\n"
schedLoadBalance = "1\n"
schedRelaxDomainLevel = "-1\n"
)
var cpusetTestFiles = map[string]string{
"cpuset.cpus": cpus,
"cpuset.cpu_exclusive": cpuExclusive,
"cpuset.mems": mems,
"cpuset.mem_hardwall": memHardwall,
"cpuset.mem_exclusive": memExclusive,
"cpuset.memory_migrate": memoryMigrate,
"cpuset.memory_spread_page": memorySpreadPage,
"cpuset.memory_spread_slab": memorySpeadSlab,
"cpuset.memory_pressure": memoryPressure,
"cpuset.sched_load_balance": schedLoadBalance,
"cpuset.sched_relax_domain_level": schedRelaxDomainLevel,
}
func TestCPUSetSetCpus(t *testing.T) {
path := tempDir(t, "cpuset")
const (
cpusBefore = "0"
cpusAfter = "1-3"
)
writeFileContents(t, path, map[string]string{
"cpuset.cpus": cpusBefore,
})
r := &cgroups.Resources{
CpusetCpus: cpusAfter,
}
cpuset := &CpusetGroup{}
if err := cpuset.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, "cpuset.cpus")
if err != nil {
t.Fatal(err)
}
if value != cpusAfter {
t.Fatal("Got the wrong value, set cpuset.cpus failed.")
}
}
func TestCPUSetSetMems(t *testing.T) {
path := tempDir(t, "cpuset")
const (
memsBefore = "0"
memsAfter = "1"
)
writeFileContents(t, path, map[string]string{
"cpuset.mems": memsBefore,
})
r := &cgroups.Resources{
CpusetMems: memsAfter,
}
cpuset := &CpusetGroup{}
if err := cpuset.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, "cpuset.mems")
if err != nil {
t.Fatal(err)
}
if value != memsAfter {
t.Fatal("Got the wrong value, set cpuset.mems failed.")
}
}
func TestCPUSetStatsCorrect(t *testing.T) {
path := tempDir(t, "cpuset")
writeFileContents(t, path, cpusetTestFiles)
cpuset := &CpusetGroup{}
actualStats := *cgroups.NewStats()
err := cpuset.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.CPUSetStats{
CPUs: []uint16{0, 1, 2, 7, 12, 13, 14},
CPUExclusive: 1,
Mems: []uint16{1, 2, 3, 4, 6, 9},
MemoryMigrate: 1,
MemHardwall: 0,
MemExclusive: 0,
MemorySpreadPage: 0,
MemorySpreadSlab: 1,
MemoryPressure: 34377,
SchedLoadBalance: 1,
SchedRelaxDomainLevel: -1,
}
if !reflect.DeepEqual(expectedStats, actualStats.CPUSetStats) {
t.Fatalf("Expected Cpuset stats usage %#v but found %#v",
expectedStats, actualStats.CPUSetStats)
}
}
func TestCPUSetStatsMissingFiles(t *testing.T) {
for _, testCase := range []struct {
desc string
filename, contents string
removeFile bool
}{
{
desc: "empty cpus file",
filename: "cpuset.cpus",
contents: "",
removeFile: false,
},
{
desc: "empty mems file",
filename: "cpuset.mems",
contents: "",
removeFile: false,
},
{
desc: "corrupted cpus file",
filename: "cpuset.cpus",
contents: "0-3,*4^2",
removeFile: false,
},
{
desc: "corrupted mems file",
filename: "cpuset.mems",
contents: "0,1,2-5,8-7",
removeFile: false,
},
{
desc: "missing cpu_exclusive file",
filename: "cpuset.cpu_exclusive",
contents: "",
removeFile: true,
},
{
desc: "missing memory_migrate file",
filename: "cpuset.memory_migrate",
contents: "",
removeFile: true,
},
{
desc: "missing mem_hardwall file",
filename: "cpuset.mem_hardwall",
contents: "",
removeFile: true,
},
{
desc: "missing mem_exclusive file",
filename: "cpuset.mem_exclusive",
contents: "",
removeFile: true,
},
{
desc: "missing memory_spread_page file",
filename: "cpuset.memory_spread_page",
contents: "",
removeFile: true,
},
{
desc: "missing memory_spread_slab file",
filename: "cpuset.memory_spread_slab",
contents: "",
removeFile: true,
},
{
desc: "missing memory_pressure file",
filename: "cpuset.memory_pressure",
contents: "",
removeFile: true,
},
{
desc: "missing sched_load_balance file",
filename: "cpuset.sched_load_balance",
contents: "",
removeFile: true,
},
{
desc: "missing sched_relax_domain_level file",
filename: "cpuset.sched_relax_domain_level",
contents: "",
removeFile: true,
},
} {
t.Run(testCase.desc, func(t *testing.T) {
path := tempDir(t, "cpuset")
tempCpusetTestFiles := map[string]string{}
for i, v := range cpusetTestFiles {
tempCpusetTestFiles[i] = v
}
if testCase.removeFile {
delete(tempCpusetTestFiles, testCase.filename)
writeFileContents(t, path, tempCpusetTestFiles)
cpuset := &CpusetGroup{}
actualStats := *cgroups.NewStats()
err := cpuset.GetStats(path, &actualStats)
if err != nil {
t.Errorf("failed unexpectedly: %q", err)
}
} else {
tempCpusetTestFiles[testCase.filename] = testCase.contents
writeFileContents(t, path, tempCpusetTestFiles)
cpuset := &CpusetGroup{}
actualStats := *cgroups.NewStats()
err := cpuset.GetStats(path, &actualStats)
if err == nil {
t.Error("failed to return expected error")
}
}
})
}
}

View File

@@ -1,46 +0,0 @@
package fs
import (
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
func TestFreezerSetState(t *testing.T) {
path := tempDir(t, "freezer")
writeFileContents(t, path, map[string]string{
"freezer.state": string(cgroups.Frozen),
})
r := &cgroups.Resources{
Freezer: cgroups.Thawed,
}
freezer := &FreezerGroup{}
if err := freezer.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, "freezer.state")
if err != nil {
t.Fatal(err)
}
if value != string(cgroups.Thawed) {
t.Fatal("Got the wrong value, set freezer.state failed.")
}
}
func TestFreezerSetInvalidState(t *testing.T) {
path := tempDir(t, "freezer")
const invalidArg cgroups.FreezerState = "Invalid"
r := &cgroups.Resources{
Freezer: invalidArg,
}
freezer := &FreezerGroup{}
if err := freezer.Set(path, r); err == nil {
t.Fatal("Failed to return invalid argument error")
}
}

View File

@@ -1,49 +0,0 @@
package fs
import (
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
func BenchmarkGetStats(b *testing.B) {
if cgroups.IsCgroup2UnifiedMode() {
b.Skip("cgroup v2 is not supported")
}
// Unset TestMode as we work with real cgroupfs here,
// and we want OpenFile to perform the fstype check.
cgroups.TestMode = false
defer func() {
cgroups.TestMode = true
}()
cg := &cgroups.Cgroup{
Path: "/some/kind/of/a/path/here",
Resources: &cgroups.Resources{},
}
m, err := NewManager(cg, nil)
if err != nil {
b.Fatal(err)
}
err = m.Apply(-1)
if err != nil {
b.Fatal(err)
}
defer func() {
_ = m.Destroy()
}()
var st *cgroups.Stats
b.ResetTimer()
for i := 0; i < b.N; i++ {
st, err = m.GetStats()
if err != nil {
b.Fatal(err)
}
}
if st.CpuStats.CpuUsage.TotalUsage != 0 {
b.Fatalf("stats: %+v", st)
}
}

View File

@@ -1,176 +0,0 @@
package fs
import (
"fmt"
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
const (
hugetlbUsageContents = "128\n"
hugetlbMaxUsageContents = "256\n"
hugetlbFailcnt = "100\n"
)
const (
usage = "hugetlb.%s.usage_in_bytes"
limit = "hugetlb.%s.limit_in_bytes"
maxUsage = "hugetlb.%s.max_usage_in_bytes"
failcnt = "hugetlb.%s.failcnt"
rsvdUsage = "hugetlb.%s.rsvd.usage_in_bytes"
rsvdLimit = "hugetlb.%s.rsvd.limit_in_bytes"
rsvdMaxUsage = "hugetlb.%s.rsvd.max_usage_in_bytes"
rsvdFailcnt = "hugetlb.%s.rsvd.failcnt"
)
func TestHugetlbSetHugetlb(t *testing.T) {
path := tempDir(t, "hugetlb")
const (
hugetlbBefore = 256
hugetlbAfter = 512
)
for _, pageSize := range cgroups.HugePageSizes() {
writeFileContents(t, path, map[string]string{
fmt.Sprintf(limit, pageSize): strconv.Itoa(hugetlbBefore),
})
}
r := &cgroups.Resources{}
for _, pageSize := range cgroups.HugePageSizes() {
r.HugetlbLimit = []*cgroups.HugepageLimit{
{
Pagesize: pageSize,
Limit: hugetlbAfter,
},
}
hugetlb := &HugetlbGroup{}
if err := hugetlb.Set(path, r); err != nil {
t.Fatal(err)
}
}
for _, pageSize := range cgroups.HugePageSizes() {
for _, f := range []string{limit, rsvdLimit} {
limit := fmt.Sprintf(f, pageSize)
value, err := fscommon.GetCgroupParamUint(path, limit)
if err != nil {
t.Fatal(err)
}
if value != hugetlbAfter {
t.Fatalf("Set %s failed. Expected: %v, Got: %v", limit, hugetlbAfter, value)
}
}
}
}
func TestHugetlbStats(t *testing.T) {
path := tempDir(t, "hugetlb")
for _, pageSize := range cgroups.HugePageSizes() {
writeFileContents(t, path, map[string]string{
fmt.Sprintf(usage, pageSize): hugetlbUsageContents,
fmt.Sprintf(maxUsage, pageSize): hugetlbMaxUsageContents,
fmt.Sprintf(failcnt, pageSize): hugetlbFailcnt,
})
}
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.HugetlbStats{Usage: 128, MaxUsage: 256, Failcnt: 100}
for _, pageSize := range cgroups.HugePageSizes() {
expectHugetlbStatEquals(t, expectedStats, actualStats.HugetlbStats[pageSize])
}
}
func TestHugetlbRStatsRsvd(t *testing.T) {
path := tempDir(t, "hugetlb")
for _, pageSize := range cgroups.HugePageSizes() {
writeFileContents(t, path, map[string]string{
fmt.Sprintf(rsvdUsage, pageSize): hugetlbUsageContents,
fmt.Sprintf(rsvdMaxUsage, pageSize): hugetlbMaxUsageContents,
fmt.Sprintf(rsvdFailcnt, pageSize): hugetlbFailcnt,
})
}
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.HugetlbStats{Usage: 128, MaxUsage: 256, Failcnt: 100}
for _, pageSize := range cgroups.HugePageSizes() {
expectHugetlbStatEquals(t, expectedStats, actualStats.HugetlbStats[pageSize])
}
}
func TestHugetlbStatsNoUsageFile(t *testing.T) {
path := tempDir(t, "hugetlb")
writeFileContents(t, path, map[string]string{
maxUsage: hugetlbMaxUsageContents,
})
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestHugetlbStatsNoMaxUsageFile(t *testing.T) {
path := tempDir(t, "hugetlb")
for _, pageSize := range cgroups.HugePageSizes() {
writeFileContents(t, path, map[string]string{
fmt.Sprintf(usage, pageSize): hugetlbUsageContents,
})
}
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestHugetlbStatsBadUsageFile(t *testing.T) {
path := tempDir(t, "hugetlb")
for _, pageSize := range cgroups.HugePageSizes() {
writeFileContents(t, path, map[string]string{
fmt.Sprintf(usage, pageSize): "bad",
maxUsage: hugetlbMaxUsageContents,
})
}
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestHugetlbStatsBadMaxUsageFile(t *testing.T) {
path := tempDir(t, "hugetlb")
writeFileContents(t, path, map[string]string{
usage: hugetlbUsageContents,
maxUsage: "bad",
})
hugetlb := &HugetlbGroup{}
actualStats := *cgroups.NewStats()
err := hugetlb.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}

View File

@@ -1,506 +0,0 @@
package fs
import (
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
const (
memoryStatContents = `cache 512
rss 1024`
memoryUsageContents = "2048\n"
memoryMaxUsageContents = "4096\n"
memoryFailcnt = "100\n"
memoryLimitContents = "8192\n"
memoryUseHierarchyContents = "1\n"
memoryNUMAStatContents = `total=44611 N0=32631 N1=7501 N2=1982 N3=2497
file=44428 N0=32614 N1=7335 N2=1982 N3=2497
anon=183 N0=17 N1=166 N2=0 N3=0
unevictable=0 N0=0 N1=0 N2=0 N3=0
hierarchical_total=768133 N0=509113 N1=138887 N2=20464 N3=99669
hierarchical_file=722017 N0=496516 N1=119997 N2=20181 N3=85323
hierarchical_anon=46096 N0=12597 N1=18890 N2=283 N3=14326
hierarchical_unevictable=20 N0=0 N1=0 N2=0 N3=20
`
memoryNUMAStatNoHierarchyContents = `total=44611 N0=32631 N1=7501 N2=1982 N3=2497
file=44428 N0=32614 N1=7335 N2=1982 N3=2497
anon=183 N0=17 N1=166 N2=0 N3=0
unevictable=0 N0=0 N1=0 N2=0 N3=0
`
// Some custom kernels has extra fields that should be ignored
memoryNUMAStatExtraContents = `numa_locality 0 0 0 0 0 0 0 0 0 0
numa_exectime 0
whatever=100 N0=0
`
)
func TestMemorySetMemory(t *testing.T) {
path := tempDir(t, "memory")
const (
memoryBefore = 314572800 // 300M
memoryAfter = 524288000 // 500M
reservationBefore = 209715200 // 200M
reservationAfter = 314572800 // 300M
)
writeFileContents(t, path, map[string]string{
"memory.limit_in_bytes": strconv.Itoa(memoryBefore),
"memory.soft_limit_in_bytes": strconv.Itoa(reservationBefore),
})
r := &cgroups.Resources{
Memory: memoryAfter,
MemoryReservation: reservationAfter,
}
memory := &MemoryGroup{}
if err := memory.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamUint(path, "memory.limit_in_bytes")
if err != nil {
t.Fatal(err)
}
if value != memoryAfter {
t.Fatal("Got the wrong value, set memory.limit_in_bytes failed.")
}
value, err = fscommon.GetCgroupParamUint(path, "memory.soft_limit_in_bytes")
if err != nil {
t.Fatal(err)
}
if value != reservationAfter {
t.Fatal("Got the wrong value, set memory.soft_limit_in_bytes failed.")
}
}
func TestMemorySetMemoryswap(t *testing.T) {
path := tempDir(t, "memory")
const (
memoryswapBefore = 314572800 // 300M
memoryswapAfter = 524288000 // 500M
)
writeFileContents(t, path, map[string]string{
"memory.memsw.limit_in_bytes": strconv.Itoa(memoryswapBefore),
})
r := &cgroups.Resources{
MemorySwap: memoryswapAfter,
}
memory := &MemoryGroup{}
if err := memory.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamUint(path, "memory.memsw.limit_in_bytes")
if err != nil {
t.Fatal(err)
}
if value != memoryswapAfter {
t.Fatal("Got the wrong value, set memory.memsw.limit_in_bytes failed.")
}
}
func TestMemorySetMemoryLargerThanSwap(t *testing.T) {
path := tempDir(t, "memory")
const (
memoryBefore = 314572800 // 300M
memoryswapBefore = 524288000 // 500M
memoryAfter = 629145600 // 600M
memoryswapAfter = 838860800 // 800M
)
writeFileContents(t, path, map[string]string{
"memory.limit_in_bytes": strconv.Itoa(memoryBefore),
"memory.memsw.limit_in_bytes": strconv.Itoa(memoryswapBefore),
// Set will call getMemoryData when memory and swap memory are
// both set, fake these fields so we don't get error.
"memory.usage_in_bytes": "0",
"memory.max_usage_in_bytes": "0",
"memory.failcnt": "0",
})
r := &cgroups.Resources{
Memory: memoryAfter,
MemorySwap: memoryswapAfter,
}
memory := &MemoryGroup{}
if err := memory.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamUint(path, "memory.limit_in_bytes")
if err != nil {
t.Fatal(err)
}
if value != memoryAfter {
t.Fatal("Got the wrong value, set memory.limit_in_bytes failed.")
}
value, err = fscommon.GetCgroupParamUint(path, "memory.memsw.limit_in_bytes")
if err != nil {
t.Fatal(err)
}
if value != memoryswapAfter {
t.Fatal("Got the wrong value, set memory.memsw.limit_in_bytes failed.")
}
}
func TestMemorySetSwapSmallerThanMemory(t *testing.T) {
path := tempDir(t, "memory")
const (
memoryBefore = 629145600 // 600M
memoryswapBefore = 838860800 // 800M
memoryAfter = 314572800 // 300M
memoryswapAfter = 524288000 // 500M
)
writeFileContents(t, path, map[string]string{
"memory.limit_in_bytes": strconv.Itoa(memoryBefore),
"memory.memsw.limit_in_bytes": strconv.Itoa(memoryswapBefore),
})
r := &cgroups.Resources{
Memory: memoryAfter,
MemorySwap: memoryswapAfter,
}
memory := &MemoryGroup{}
if err := memory.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamUint(path, "memory.limit_in_bytes")
if err != nil {
t.Fatal(err)
}
if value != memoryAfter {
t.Fatalf("Got the wrong value (%d != %d), set memory.limit_in_bytes failed", value, memoryAfter)
}
value, err = fscommon.GetCgroupParamUint(path, "memory.memsw.limit_in_bytes")
if err != nil {
t.Fatal(err)
}
if value != memoryswapAfter {
t.Fatalf("Got the wrong value (%d != %d), set memory.memsw.limit_in_bytes failed", value, memoryswapAfter)
}
}
func TestMemorySetMemorySwappinessDefault(t *testing.T) {
path := tempDir(t, "memory")
swappinessBefore := 60 // default is 60
swappinessAfter := uint64(0)
writeFileContents(t, path, map[string]string{
"memory.swappiness": strconv.Itoa(swappinessBefore),
})
r := &cgroups.Resources{
MemorySwappiness: &swappinessAfter,
}
memory := &MemoryGroup{}
if err := memory.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamUint(path, "memory.swappiness")
if err != nil {
t.Fatal(err)
}
if value != swappinessAfter {
t.Fatalf("Got the wrong value (%d), set memory.swappiness = %d failed.", value, swappinessAfter)
}
}
func TestMemoryStats(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.failcnt": memoryFailcnt,
"memory.memsw.usage_in_bytes": memoryUsageContents,
"memory.memsw.max_usage_in_bytes": memoryMaxUsageContents,
"memory.memsw.failcnt": memoryFailcnt,
"memory.memsw.limit_in_bytes": memoryLimitContents,
"memory.kmem.usage_in_bytes": memoryUsageContents,
"memory.kmem.max_usage_in_bytes": memoryMaxUsageContents,
"memory.kmem.failcnt": memoryFailcnt,
"memory.kmem.limit_in_bytes": memoryLimitContents,
"memory.use_hierarchy": memoryUseHierarchyContents,
"memory.numa_stat": memoryNUMAStatContents + memoryNUMAStatExtraContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
expectedStats := cgroups.MemoryStats{
Cache: 512,
Usage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192},
SwapUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192},
SwapOnlyUsage: cgroups.MemoryData{Usage: 0, MaxUsage: 0, Failcnt: 0, Limit: 0},
KernelUsage: cgroups.MemoryData{Usage: 2048, MaxUsage: 4096, Failcnt: 100, Limit: 8192},
Stats: map[string]uint64{"cache": 512, "rss": 1024},
UseHierarchy: true,
PageUsageByNUMA: cgroups.PageUsageByNUMA{
PageUsageByNUMAInner: cgroups.PageUsageByNUMAInner{
Total: cgroups.PageStats{Total: 44611, Nodes: map[uint8]uint64{0: 32631, 1: 7501, 2: 1982, 3: 2497}},
File: cgroups.PageStats{Total: 44428, Nodes: map[uint8]uint64{0: 32614, 1: 7335, 2: 1982, 3: 2497}},
Anon: cgroups.PageStats{Total: 183, Nodes: map[uint8]uint64{0: 17, 1: 166, 2: 0, 3: 0}},
Unevictable: cgroups.PageStats{Total: 0, Nodes: map[uint8]uint64{0: 0, 1: 0, 2: 0, 3: 0}},
},
Hierarchical: cgroups.PageUsageByNUMAInner{
Total: cgroups.PageStats{Total: 768133, Nodes: map[uint8]uint64{0: 509113, 1: 138887, 2: 20464, 3: 99669}},
File: cgroups.PageStats{Total: 722017, Nodes: map[uint8]uint64{0: 496516, 1: 119997, 2: 20181, 3: 85323}},
Anon: cgroups.PageStats{Total: 46096, Nodes: map[uint8]uint64{0: 12597, 1: 18890, 2: 283, 3: 14326}},
Unevictable: cgroups.PageStats{Total: 20, Nodes: map[uint8]uint64{0: 0, 1: 0, 2: 0, 3: 20}},
},
},
}
expectMemoryStatEquals(t, expectedStats, actualStats.MemoryStats)
}
func TestMemoryStatsNoStatFile(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(path, &actualStats)
if err != nil {
t.Fatal(err)
}
}
func TestMemoryStatsNoUsageFile(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.stat": memoryStatContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsNoMaxUsageFile(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsNoLimitInBytesFile(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsBadStatFile(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.stat": "rss rss",
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsBadUsageFile(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": "bad",
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsBadMaxUsageFile(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": "bad",
"memory.limit_in_bytes": memoryLimitContents,
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemoryStatsBadLimitInBytesFile(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.stat": memoryStatContents,
"memory.usage_in_bytes": memoryUsageContents,
"memory.max_usage_in_bytes": memoryMaxUsageContents,
"memory.limit_in_bytes": "bad",
})
memory := &MemoryGroup{}
actualStats := *cgroups.NewStats()
err := memory.GetStats(path, &actualStats)
if err == nil {
t.Fatal("Expected failure")
}
}
func TestMemorySetOomControl(t *testing.T) {
path := tempDir(t, "memory")
const (
oomKillDisable = 1 // disable oom killer, default is 0
)
writeFileContents(t, path, map[string]string{
"memory.oom_control": strconv.Itoa(oomKillDisable),
})
memory := &MemoryGroup{}
r := &cgroups.Resources{}
if err := memory.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamUint(path, "memory.oom_control")
if err != nil {
t.Fatal(err)
}
if value != oomKillDisable {
t.Fatalf("Got the wrong value, set memory.oom_control failed.")
}
}
func TestNoHierarchicalNumaStat(t *testing.T) {
path := tempDir(t, "memory")
writeFileContents(t, path, map[string]string{
"memory.numa_stat": memoryNUMAStatNoHierarchyContents + memoryNUMAStatExtraContents,
})
actualStats, err := getPageUsageByNUMA(path)
if err != nil {
t.Fatal(err)
}
pageUsageByNUMA := cgroups.PageUsageByNUMA{
PageUsageByNUMAInner: cgroups.PageUsageByNUMAInner{
Total: cgroups.PageStats{Total: 44611, Nodes: map[uint8]uint64{0: 32631, 1: 7501, 2: 1982, 3: 2497}},
File: cgroups.PageStats{Total: 44428, Nodes: map[uint8]uint64{0: 32614, 1: 7335, 2: 1982, 3: 2497}},
Anon: cgroups.PageStats{Total: 183, Nodes: map[uint8]uint64{0: 17, 1: 166, 2: 0, 3: 0}},
Unevictable: cgroups.PageStats{Total: 0, Nodes: map[uint8]uint64{0: 0, 1: 0, 2: 0, 3: 0}},
},
Hierarchical: cgroups.PageUsageByNUMAInner{},
}
expectPageUsageByNUMAEquals(t, pageUsageByNUMA, actualStats)
}
func TestBadNumaStat(t *testing.T) {
memoryNUMAStatBadContents := []struct {
desc, contents string
}{
{
desc: "Nx where x is not a number",
contents: `total=44611 N0=44611,
file=44428 Nx=0
`,
}, {
desc: "Nx where x > 255",
contents: `total=44611 N333=444`,
}, {
desc: "Nx argument missing",
contents: `total=44611 N0=123 N1=`,
}, {
desc: "Nx argument is not a number",
contents: `total=44611 N0=123 N1=a`,
}, {
desc: "Missing = after Nx",
contents: `total=44611 N0=123 N1`,
}, {
desc: "No Nx at non-first position",
contents: `total=44611 N0=32631
file=44428 N0=32614
anon=183 N0=12 badone
`,
},
}
path := tempDir(t, "memory")
for _, c := range memoryNUMAStatBadContents {
writeFileContents(t, path, map[string]string{
"memory.numa_stat": c.contents,
})
_, err := getPageUsageByNUMA(path)
if err == nil {
t.Errorf("case %q: expected error, got nil", c.desc)
}
}
}
func TestWithoutNumaStat(t *testing.T) {
path := tempDir(t, "memory")
actualStats, err := getPageUsageByNUMA(path)
if err != nil {
t.Fatal(err)
}
expectPageUsageByNUMAEquals(t, cgroups.PageUsageByNUMA{}, actualStats)
}

View File

@@ -1,41 +0,0 @@
package fs
import (
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
const (
classidBefore = 0x100002
classidAfter = 0x100001
)
func TestNetClsSetClassid(t *testing.T) {
path := tempDir(t, "net_cls")
writeFileContents(t, path, map[string]string{
"net_cls.classid": strconv.FormatUint(classidBefore, 10),
})
r := &cgroups.Resources{
NetClsClassid: classidAfter,
}
netcls := &NetClsGroup{}
if err := netcls.Set(path, r); err != nil {
t.Fatal(err)
}
// As we are in mock environment, we can't get correct value of classid from
// net_cls.classid.
// So. we just judge if we successfully write classid into file
value, err := fscommon.GetCgroupParamUint(path, "net_cls.classid")
if err != nil {
t.Fatal(err)
}
if value != classidAfter {
t.Fatal("Got the wrong value, set net_cls.classid failed.")
}
}

View File

@@ -1,36 +0,0 @@
package fs
import (
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
var prioMap = []*cgroups.IfPrioMap{
{
Interface: "test",
Priority: 5,
},
}
func TestNetPrioSetIfPrio(t *testing.T) {
path := tempDir(t, "net_prio")
r := &cgroups.Resources{
NetPrioIfpriomap: prioMap,
}
netPrio := &NetPrioGroup{}
if err := netPrio.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, "net_prio.ifpriomap")
if err != nil {
t.Fatal(err)
}
if !strings.Contains(value, "test 5") {
t.Fatal("Got the wrong value, set net_prio.ifpriomap failed.")
}
}

View File

@@ -1,104 +0,0 @@
package fs
import (
"path/filepath"
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/internal/path"
)
func TestInvalidCgroupPath(t *testing.T) {
if cgroups.IsCgroup2UnifiedMode() {
t.Skip("cgroup v2 is not supported")
}
root, err := rootPath()
if err != nil {
t.Fatalf("couldn't get cgroup root: %v", err)
}
testCases := []struct {
test string
path, name, parent string
}{
{
test: "invalid cgroup path",
path: "../../../../../../../../../../some/path",
},
{
test: "invalid absolute cgroup path",
path: "/../../../../../../../../../../some/path",
},
{
test: "invalid cgroup parent",
parent: "../../../../../../../../../../some/path",
name: "name",
},
{
test: "invalid absolute cgroup parent",
parent: "/../../../../../../../../../../some/path",
name: "name",
},
{
test: "invalid cgroup name",
parent: "parent",
name: "../../../../../../../../../../some/path",
},
{
test: "invalid absolute cgroup name",
parent: "parent",
name: "/../../../../../../../../../../some/path",
},
{
test: "invalid cgroup name and parent",
parent: "../../../../../../../../../../some/path",
name: "../../../../../../../../../../some/path",
},
{
test: "invalid absolute cgroup name and parent",
parent: "/../../../../../../../../../../some/path",
name: "/../../../../../../../../../../some/path",
},
}
for _, tc := range testCases {
t.Run(tc.test, func(t *testing.T) {
config := &cgroups.Cgroup{Path: tc.path, Name: tc.name, Parent: tc.parent}
inner, err := path.Inner(config)
if err != nil {
t.Fatalf("couldn't get cgroup data: %v", err)
}
// Make sure the final inner path doesn't go outside the cgroup mountpoint.
if strings.HasPrefix(inner, "..") {
t.Errorf("SECURITY: cgroup innerPath is outside cgroup mountpoint!")
}
// Double-check, using an actual cgroup.
deviceRoot := filepath.Join(root, "devices")
devicePath, err := subsysPath(root, inner, "devices")
if err != nil {
t.Fatalf("couldn't get cgroup path: %v", err)
}
if !strings.HasPrefix(devicePath, deviceRoot) {
t.Errorf("SECURITY: cgroup path() is outside cgroup mountpoint!")
}
})
}
}
func TestTryDefaultCgroupRoot(t *testing.T) {
res := tryDefaultCgroupRoot()
exp := defaultCgroupRoot
if cgroups.IsCgroup2UnifiedMode() {
// checking that tryDefaultCgroupRoot does return ""
// in case /sys/fs/cgroup is not cgroup v1 root dir.
exp = ""
}
if res != exp {
t.Errorf("tryDefaultCgroupRoot: want %q, got %q", exp, res)
}
}

View File

@@ -1,108 +0,0 @@
package fs
import (
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
)
const (
maxUnlimited = -1
maxLimited = 1024
)
func TestPidsSetMax(t *testing.T) {
path := tempDir(t, "pids")
writeFileContents(t, path, map[string]string{
"pids.max": "max",
})
r := &cgroups.Resources{
PidsLimit: maxLimited,
}
pids := &PidsGroup{}
if err := pids.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamUint(path, "pids.max")
if err != nil {
t.Fatal(err)
}
if value != maxLimited {
t.Fatalf("Expected %d, got %d for setting pids.max - limited", maxLimited, value)
}
}
func TestPidsSetUnlimited(t *testing.T) {
path := tempDir(t, "pids")
writeFileContents(t, path, map[string]string{
"pids.max": strconv.Itoa(maxLimited),
})
r := &cgroups.Resources{
PidsLimit: maxUnlimited,
}
pids := &PidsGroup{}
if err := pids.Set(path, r); err != nil {
t.Fatal(err)
}
value, err := fscommon.GetCgroupParamString(path, "pids.max")
if err != nil {
t.Fatal(err)
}
if value != "max" {
t.Fatalf("Expected %s, got %s for setting pids.max - unlimited", "max", value)
}
}
func TestPidsStats(t *testing.T) {
path := tempDir(t, "pids")
writeFileContents(t, path, map[string]string{
"pids.current": strconv.Itoa(1337),
"pids.max": strconv.Itoa(maxLimited),
})
pids := &PidsGroup{}
stats := *cgroups.NewStats()
if err := pids.GetStats(path, &stats); err != nil {
t.Fatal(err)
}
if stats.PidsStats.Current != 1337 {
t.Fatalf("Expected %d, got %d for pids.current", 1337, stats.PidsStats.Current)
}
if stats.PidsStats.Limit != maxLimited {
t.Fatalf("Expected %d, got %d for pids.max", maxLimited, stats.PidsStats.Limit)
}
}
func TestPidsStatsUnlimited(t *testing.T) {
path := tempDir(t, "pids")
writeFileContents(t, path, map[string]string{
"pids.current": strconv.Itoa(4096),
"pids.max": "max",
})
pids := &PidsGroup{}
stats := *cgroups.NewStats()
if err := pids.GetStats(path, &stats); err != nil {
t.Fatal(err)
}
if stats.PidsStats.Current != 4096 {
t.Fatalf("Expected %d, got %d for pids.current", 4096, stats.PidsStats.Current)
}
if stats.PidsStats.Limit != 0 {
t.Fatalf("Expected %d, got %d for pids.max", 0, stats.PidsStats.Limit)
}
}

View File

@@ -1,138 +0,0 @@
package fs
import (
"errors"
"fmt"
"reflect"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
func blkioStatEntryEquals(expected, actual []cgroups.BlkioStatEntry) error {
if len(expected) != len(actual) {
return errors.New("blkioStatEntries length do not match")
}
for i, expValue := range expected {
actValue := actual[i]
if expValue != actValue {
return fmt.Errorf("expected: %v, actual: %v", expValue, actValue)
}
}
return nil
}
func expectBlkioStatsEquals(t *testing.T, expected, actual cgroups.BlkioStats) {
t.Helper()
if err := blkioStatEntryEquals(expected.IoServiceBytesRecursive, actual.IoServiceBytesRecursive); err != nil {
t.Errorf("blkio IoServiceBytesRecursive do not match: %s", err)
}
if err := blkioStatEntryEquals(expected.IoServicedRecursive, actual.IoServicedRecursive); err != nil {
t.Errorf("blkio IoServicedRecursive do not match: %s", err)
}
if err := blkioStatEntryEquals(expected.IoQueuedRecursive, actual.IoQueuedRecursive); err != nil {
t.Errorf("blkio IoQueuedRecursive do not match: %s", err)
}
if err := blkioStatEntryEquals(expected.SectorsRecursive, actual.SectorsRecursive); err != nil {
t.Errorf("blkio SectorsRecursive do not match: %s", err)
}
if err := blkioStatEntryEquals(expected.IoServiceTimeRecursive, actual.IoServiceTimeRecursive); err != nil {
t.Errorf("blkio IoServiceTimeRecursive do not match: %s", err)
}
if err := blkioStatEntryEquals(expected.IoWaitTimeRecursive, actual.IoWaitTimeRecursive); err != nil {
t.Errorf("blkio IoWaitTimeRecursive do not match: %s", err)
}
if err := blkioStatEntryEquals(expected.IoMergedRecursive, actual.IoMergedRecursive); err != nil {
t.Errorf("blkio IoMergedRecursive do not match: expected: %v, actual: %v", expected.IoMergedRecursive, actual.IoMergedRecursive)
}
if err := blkioStatEntryEquals(expected.IoTimeRecursive, actual.IoTimeRecursive); err != nil {
t.Errorf("blkio IoTimeRecursive do not match: %s", err)
}
}
func expectThrottlingDataEquals(t *testing.T, expected, actual cgroups.ThrottlingData) {
t.Helper()
if expected != actual {
t.Errorf("Expected throttling data: %v, actual: %v", expected, actual)
}
}
func expectHugetlbStatEquals(t *testing.T, expected, actual cgroups.HugetlbStats) {
t.Helper()
if expected != actual {
t.Errorf("Expected hugetlb stats: %v, actual: %v", expected, actual)
}
}
func expectMemoryStatEquals(t *testing.T, expected, actual cgroups.MemoryStats) {
t.Helper()
expectMemoryDataEquals(t, expected.Usage, actual.Usage)
expectMemoryDataEquals(t, expected.SwapUsage, actual.SwapUsage)
expectMemoryDataEquals(t, expected.KernelUsage, actual.KernelUsage)
expectPageUsageByNUMAEquals(t, expected.PageUsageByNUMA, actual.PageUsageByNUMA)
if expected.UseHierarchy != actual.UseHierarchy {
t.Errorf("Expected memory use hierarchy: %v, actual: %v", expected.UseHierarchy, actual.UseHierarchy)
}
for key, expValue := range expected.Stats {
actValue, ok := actual.Stats[key]
if !ok {
t.Errorf("Expected memory stat key %s not found", key)
}
if expValue != actValue {
t.Errorf("Expected memory stat value: %d, actual: %d", expValue, actValue)
}
}
}
func expectMemoryDataEquals(t *testing.T, expected, actual cgroups.MemoryData) {
t.Helper()
if expected.Usage != actual.Usage {
t.Errorf("Expected memory usage: %d, actual: %d", expected.Usage, actual.Usage)
}
if expected.MaxUsage != actual.MaxUsage {
t.Errorf("Expected memory max usage: %d, actual: %d", expected.MaxUsage, actual.MaxUsage)
}
if expected.Failcnt != actual.Failcnt {
t.Errorf("Expected memory failcnt %d, actual: %d", expected.Failcnt, actual.Failcnt)
}
if expected.Limit != actual.Limit {
t.Errorf("Expected memory limit: %d, actual: %d", expected.Limit, actual.Limit)
}
}
func expectPageUsageByNUMAEquals(t *testing.T, expected, actual cgroups.PageUsageByNUMA) {
t.Helper()
if !reflect.DeepEqual(expected.Total, actual.Total) {
t.Errorf("Expected total page usage by NUMA: %#v, actual: %#v", expected.Total, actual.Total)
}
if !reflect.DeepEqual(expected.File, actual.File) {
t.Errorf("Expected file page usage by NUMA: %#v, actual: %#v", expected.File, actual.File)
}
if !reflect.DeepEqual(expected.Anon, actual.Anon) {
t.Errorf("Expected anon page usage by NUMA: %#v, actual: %#v", expected.Anon, actual.Anon)
}
if !reflect.DeepEqual(expected.Unevictable, actual.Unevictable) {
t.Errorf("Expected unevictable page usage by NUMA: %#v, actual: %#v", expected.Unevictable, actual.Unevictable)
}
if !reflect.DeepEqual(expected.Hierarchical.Total, actual.Hierarchical.Total) {
t.Errorf("Expected hierarchical total page usage by NUMA: %#v, actual: %#v", expected.Hierarchical.Total, actual.Hierarchical.Total)
}
if !reflect.DeepEqual(expected.Hierarchical.File, actual.Hierarchical.File) {
t.Errorf("Expected hierarchical file page usage by NUMA: %#v, actual: %#v", expected.Hierarchical.File, actual.Hierarchical.File)
}
if !reflect.DeepEqual(expected.Hierarchical.Anon, actual.Hierarchical.Anon) {
t.Errorf("Expected hierarchical anon page usage by NUMA: %#v, actual: %#v", expected.Hierarchical.Anon, actual.Hierarchical.Anon)
}
if !reflect.DeepEqual(expected.Hierarchical.Unevictable, actual.Hierarchical.Unevictable) {
t.Errorf("Expected hierarchical total page usage by NUMA: %#v, actual: %#v", expected.Hierarchical.Unevictable, actual.Hierarchical.Unevictable)
}
}

View File

@@ -1,39 +0,0 @@
/*
Utility for testing cgroup operations.
Creates a mock of the cgroup filesystem for the duration of the test.
*/
package fs
import (
"os"
"path/filepath"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
func init() {
cgroups.TestMode = true
}
// tempDir creates a new test directory for the specified subsystem.
func tempDir(t testing.TB, subsystem string) string {
path := filepath.Join(t.TempDir(), subsystem)
// Ensure the full mock cgroup path exists.
if err := os.Mkdir(path, 0o755); err != nil {
t.Fatal(err)
}
return path
}
// writeFileContents writes the specified contents on the mock of the specified
// cgroup files.
func writeFileContents(t testing.TB, path string, fileContents map[string]string) {
for file, contents := range fileContents {
err := cgroups.WriteFile(path, file, contents)
if err != nil {
t.Fatal(err)
}
}
}

View File

@@ -1,93 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs2
import (
"path/filepath"
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
func TestParseCgroupFromReader(t *testing.T) {
cases := map[string]string{
"0::/user.slice/user-1001.slice/session-1.scope\n": "/user.slice/user-1001.slice/session-1.scope",
"2:cpuset:/foo\n1:name=systemd:/\n": "",
"2:cpuset:/foo\n1:name=systemd:/\n0::/user.slice/user-1001.slice/session-1.scope\n": "/user.slice/user-1001.slice/session-1.scope",
}
for s, expected := range cases {
g, err := parseCgroupFromReader(strings.NewReader(s))
if expected != "" {
if g != expected {
t.Errorf("expected %q, got %q", expected, g)
}
if err != nil {
t.Error(err)
}
} else {
if err == nil {
t.Error("error is expected")
}
}
}
}
func TestDefaultDirPath(t *testing.T) {
if !cgroups.IsCgroup2UnifiedMode() {
t.Skip("need cgroupv2")
}
// same code as in defaultDirPath()
ownCgroup, err := parseCgroupFile("/proc/self/cgroup")
if err != nil {
// Not a test failure, but rather some weird
// environment so we can't run this test.
t.Skipf("can't get own cgroup: %v", err)
}
ownCgroup = filepath.Dir(ownCgroup)
cases := []struct {
cgPath string
cgParent string
cgName string
expected string
}{
{
cgPath: "/foo/bar",
expected: "/sys/fs/cgroup/foo/bar",
},
{
cgPath: "foo/bar",
expected: filepath.Join(UnifiedMountpoint, ownCgroup, "foo/bar"),
},
}
for _, c := range cases {
cg := &cgroups.Cgroup{
Path: c.cgPath,
Parent: c.cgParent,
Name: c.cgName,
}
got, err := defaultDirPath(cg)
if err != nil {
t.Fatal(err)
}
if got != c.expected {
t.Fatalf("expected %q, got %q", c.expected, got)
}
}
}

View File

@@ -1,81 +0,0 @@
package fs2
import (
"os"
"path/filepath"
"reflect"
"sort"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const exampleIoStatData = `254:1 rbytes=6901432320 wbytes=14245535744 rios=263278 wios=248603 dbytes=0 dios=0
254:0 rbytes=2702336 wbytes=0 rios=97 wios=0 dbytes=0 dios=0
259:0 rbytes=6911345664 wbytes=14245536256 rios=264538 wios=244914 dbytes=530485248 dios=2`
var exampleIoStatsParsed = cgroups.BlkioStats{
IoServiceBytesRecursive: []cgroups.BlkioStatEntry{
{Major: 254, Minor: 1, Value: 6901432320, Op: "Read"},
{Major: 254, Minor: 1, Value: 14245535744, Op: "Write"},
{Major: 254, Minor: 0, Value: 2702336, Op: "Read"},
{Major: 254, Minor: 0, Value: 0, Op: "Write"},
{Major: 259, Minor: 0, Value: 6911345664, Op: "Read"},
{Major: 259, Minor: 0, Value: 14245536256, Op: "Write"},
},
IoServicedRecursive: []cgroups.BlkioStatEntry{
{Major: 254, Minor: 1, Value: 263278, Op: "Read"},
{Major: 254, Minor: 1, Value: 248603, Op: "Write"},
{Major: 254, Minor: 0, Value: 97, Op: "Read"},
{Major: 254, Minor: 0, Value: 0, Op: "Write"},
{Major: 259, Minor: 0, Value: 264538, Op: "Read"},
{Major: 259, Minor: 0, Value: 244914, Op: "Write"},
},
}
func lessBlkioStatEntry(a, b cgroups.BlkioStatEntry) bool {
if a.Major != b.Major {
return a.Major < b.Major
}
if a.Minor != b.Minor {
return a.Minor < b.Minor
}
if a.Op != b.Op {
return a.Op < b.Op
}
return a.Value < b.Value
}
func sortBlkioStats(stats *cgroups.BlkioStats) {
for _, table := range []*[]cgroups.BlkioStatEntry{
&stats.IoServicedRecursive,
&stats.IoServiceBytesRecursive,
} {
sort.SliceStable(*table, func(i, j int) bool { return lessBlkioStatEntry((*table)[i], (*table)[j]) })
}
}
func TestStatIo(t *testing.T) {
// We're using a fake cgroupfs.
cgroups.TestMode = true
fakeCgroupDir := t.TempDir()
statPath := filepath.Join(fakeCgroupDir, "io.stat")
if err := os.WriteFile(statPath, []byte(exampleIoStatData), 0o644); err != nil {
t.Fatal(err)
}
var gotStats cgroups.Stats
if err := statIo(fakeCgroupDir, &gotStats); err != nil {
t.Error(err)
}
// Sort the output since statIo uses a map internally.
sortBlkioStats(&gotStats.BlkioStats)
sortBlkioStats(&exampleIoStatsParsed)
if !reflect.DeepEqual(gotStats.BlkioStats, exampleIoStatsParsed) {
t.Errorf("parsed cgroupv2 io.stat doesn't match expected result: \ngot %#v\nexpected %#v\n", gotStats.BlkioStats, exampleIoStatsParsed)
}
}

View File

@@ -1,155 +0,0 @@
package fs2
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const exampleMemoryStatData = `anon 790425600
file 6502666240
kernel_stack 7012352
pagetables 8867840
percpu 2445520
sock 40960
shmem 6721536
file_mapped 656187392
file_dirty 1122304
file_writeback 0
swapcached 10
anon_thp 438304768
file_thp 0
shmem_thp 0
inactive_anon 892223488
active_anon 2973696
inactive_file 5307346944
active_file 1179316224
unevictable 31477760
slab_reclaimable 348866240
slab_unreclaimable 10099808
slab 358966048
workingset_refault_anon 0
workingset_refault_file 0
workingset_activate_anon 0
workingset_activate_file 0
workingset_restore_anon 0
workingset_restore_file 0
workingset_nodereclaim 0
pgfault 103216687
pgmajfault 6879
pgrefill 0
pgscan 0
pgsteal 0
pgactivate 1110217
pgdeactivate 292
pglazyfree 267
pglazyfreed 0
thp_fault_alloc 57411
thp_collapse_alloc 443`
func TestStatMemoryPodCgroupNotFound(t *testing.T) {
// We're using a fake cgroupfs.
cgroups.TestMode = true
fakeCgroupDir := t.TempDir()
// only write memory.stat to ensure pod cgroup usage
// still reads memory.current.
statPath := filepath.Join(fakeCgroupDir, "memory.stat")
if err := os.WriteFile(statPath, []byte(exampleMemoryStatData), 0o644); err != nil {
t.Fatal(err)
}
gotStats := cgroups.NewStats()
// use a fake root path to mismatch the file we wrote.
// this triggers the non-root path which should fail to find memory.current.
err := statMemory(fakeCgroupDir, gotStats)
if err == nil {
t.Errorf("expected error when statting memory for cgroupv2 root, but was nil")
}
if !strings.Contains(err.Error(), "memory.current: no such file or directory") {
t.Errorf("expected error to contain 'memory.current: no such file or directory', but was %s", err.Error())
}
}
func TestStatMemoryPodCgroup(t *testing.T) {
// We're using a fake cgroupfs.
cgroups.TestMode = true
fakeCgroupDir := t.TempDir()
statPath := filepath.Join(fakeCgroupDir, "memory.stat")
if err := os.WriteFile(statPath, []byte(exampleMemoryStatData), 0o644); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(fakeCgroupDir, "memory.current"), []byte("123456789"), 0o644); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(fakeCgroupDir, "memory.max"), []byte("999999999"), 0o644); err != nil {
t.Fatal(err)
}
if err := os.WriteFile(filepath.Join(fakeCgroupDir, "memory.peak"), []byte("987654321"), 0o644); err != nil {
t.Fatal(err)
}
gotStats := cgroups.NewStats()
// use a fake root path to trigger the pod cgroup lookup.
err := statMemory(fakeCgroupDir, gotStats)
if err != nil {
t.Errorf("expected no error when statting memory for cgroupv2 root, but got %#+v", err)
}
// result should be "memory.current"
var expectedUsageBytes uint64 = 123456789
if gotStats.MemoryStats.Usage.Usage != expectedUsageBytes {
t.Errorf("parsed cgroupv2 memory.stat doesn't match expected result: \ngot %#v\nexpected %#v\n", gotStats.MemoryStats.Usage.Usage, expectedUsageBytes)
}
// result should be "memory.max"
var expectedLimitBytes uint64 = 999999999
if gotStats.MemoryStats.Usage.Limit != expectedLimitBytes {
t.Errorf("parsed cgroupv2 memory.stat doesn't match expected result: \ngot %#v\nexpected %#v\n", gotStats.MemoryStats.Usage.Limit, expectedLimitBytes)
}
// result should be "memory.peak"
var expectedMaxUsageBytes uint64 = 987654321
if gotStats.MemoryStats.Usage.MaxUsage != expectedMaxUsageBytes {
t.Errorf("parsed cgroupv2 memory.stat doesn't match expected result: \ngot %#v\nexpected %#v\n", gotStats.MemoryStats.Usage.MaxUsage, expectedMaxUsageBytes)
}
}
func TestRootStatsFromMeminfo(t *testing.T) {
stats := &cgroups.Stats{
MemoryStats: cgroups.MemoryStats{
Stats: map[string]uint64{
"anon": 790425600,
"file": 6502666240,
},
},
}
if err := rootStatsFromMeminfo(stats); err != nil {
t.Fatal(err)
}
// result is anon + file
var expectedUsageBytes uint64 = 7293091840
if stats.MemoryStats.Usage.Usage != expectedUsageBytes {
t.Errorf("parsed cgroupv2 memory.stat doesn't match expected result: \ngot %d\nexpected %d\n", stats.MemoryStats.Usage.Usage, expectedUsageBytes)
}
// swap is adjusted to mem+swap
if stats.MemoryStats.SwapUsage.Usage < stats.MemoryStats.Usage.Usage {
t.Errorf("swap usage %d should be at least mem usage %d", stats.MemoryStats.SwapUsage.Usage, stats.MemoryStats.Usage.Usage)
}
if stats.MemoryStats.SwapUsage.Limit < stats.MemoryStats.Usage.Limit {
t.Errorf("swap limit %d should be at least mem limit %d", stats.MemoryStats.SwapUsage.Limit, stats.MemoryStats.Usage.Limit)
}
}

View File

@@ -1,103 +0,0 @@
package fs2
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const exampleMiscCurrentData = `res_a 123
res_b 456
res_c 42`
const exampleMiscEventsData = `res_a.max 1
res_b.max 2
res_c.max 3`
func TestStatMiscPodCgroupEmpty(t *testing.T) {
// We're using a fake cgroupfs.
cgroups.TestMode = true
fakeCgroupDir := t.TempDir()
// create empty misc.current and misc.events files to test the common case
// where no misc resource keys are available
for _, file := range []string{"misc.current", "misc.events"} {
if _, err := os.Create(filepath.Join(fakeCgroupDir, file)); err != nil {
t.Fatal(err)
}
}
gotStats := cgroups.NewStats()
err := statMisc(fakeCgroupDir, gotStats)
if err != nil {
t.Errorf("expected no error when statting empty misc.current/misc.events for cgroupv2, but got %#v", err)
}
if len(gotStats.MiscStats) != 0 {
t.Errorf("parsed cgroupv2 misc.* returns unexpected resources: got %#v but expected nothing", gotStats.MiscStats)
}
}
func TestStatMiscPodCgroupNotFound(t *testing.T) {
// We're using a fake cgroupfs.
cgroups.TestMode = true
fakeCgroupDir := t.TempDir()
// only write misc.current to ensure pod cgroup usage
// still reads misc.events.
statPath := filepath.Join(fakeCgroupDir, "misc.current")
if err := os.WriteFile(statPath, []byte(exampleMiscCurrentData), 0o644); err != nil {
t.Fatal(err)
}
gotStats := cgroups.NewStats()
// use a fake root path to mismatch the file we wrote.
// this triggers the non-root path which should fail to find misc.events.
err := statMisc(fakeCgroupDir, gotStats)
if err == nil {
t.Errorf("expected error when statting misc.current for cgroupv2 root, but was nil")
}
if !strings.Contains(err.Error(), "misc.events: no such file or directory") {
t.Errorf("expected error to contain 'misc.events: no such file or directory', but was %s", err.Error())
}
}
func TestStatMiscPodCgroup(t *testing.T) {
// We're using a fake cgroupfs.
cgroups.TestMode = true
fakeCgroupDir := t.TempDir()
currentPath := filepath.Join(fakeCgroupDir, "misc.current")
if err := os.WriteFile(currentPath, []byte(exampleMiscCurrentData), 0o644); err != nil {
t.Fatal(err)
}
eventsPath := filepath.Join(fakeCgroupDir, "misc.events")
if err := os.WriteFile(eventsPath, []byte(exampleMiscEventsData), 0o644); err != nil {
t.Fatal(err)
}
gotStats := cgroups.NewStats()
// use a fake root path to trigger the pod cgroup lookup.
err := statMisc(fakeCgroupDir, gotStats)
if err != nil {
t.Errorf("expected no error when statting misc for cgroupv2 root, but got %#+v", err)
}
// make sure all res_* from exampleMisc*Data are returned
if len(gotStats.MiscStats) != 3 {
t.Errorf("parsed cgroupv2 misc doesn't return all expected resources: \ngot %#v\nexpected %#v\n", len(gotStats.MiscStats), 3)
}
var expectedUsageBytes uint64 = 42
if gotStats.MiscStats["res_c"].Usage != expectedUsageBytes {
t.Errorf("parsed cgroupv2 misc.current for res_c doesn't match expected result: \ngot %#v\nexpected %#v\n", gotStats.MiscStats["res_c"].Usage, expectedUsageBytes)
}
}

View File

@@ -1,47 +0,0 @@
package fs2
import (
"os"
"path/filepath"
"reflect"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
func TestStatCPUPSI(t *testing.T) {
const examplePSIData = `some avg10=1.71 avg60=2.36 avg300=2.57 total=230548833
full avg10=1.00 avg60=1.01 avg300=1.00 total=157622356`
// We're using a fake cgroupfs.
cgroups.TestMode = true
fakeCgroupDir := t.TempDir()
statPath := filepath.Join(fakeCgroupDir, "cpu.pressure")
if err := os.WriteFile(statPath, []byte(examplePSIData), 0o644); err != nil {
t.Fatal(err)
}
st, err := statPSI(fakeCgroupDir, "cpu.pressure")
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(*st, cgroups.PSIStats{
Some: cgroups.PSIData{
Avg10: 1.71,
Avg60: 2.36,
Avg300: 2.57,
Total: 230548833,
},
Full: cgroups.PSIData{
Avg10: 1.00,
Avg60: 1.01,
Avg300: 1.00,
Total: 157622356,
},
}) {
t.Errorf("unexpected PSI result: %+v", st)
}
}

View File

@@ -1,57 +0,0 @@
package fscommon
import (
"os"
"path/filepath"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
/* Roadmap for future */
// (Low-priority) TODO: Check if it is possible to virtually mimic an actual RDMA device.
// TODO: Think of more edge-cases to add.
// TestRdmaSet performs an E2E test of RdmaSet(), parseRdmaKV() using dummy device and a dummy cgroup file-system.
// Note: Following test does not guarantees that your host supports RDMA since this mocks underlying infrastructure.
func TestRdmaSet(t *testing.T) {
testCgroupPath := filepath.Join(t.TempDir(), "rdma")
// Ensure the full mock cgroup path exists.
err := os.Mkdir(testCgroupPath, 0o755)
if err != nil {
t.Fatal(err)
}
rdmaDevice := "mlx5_1"
maxHandles := uint32(100)
maxObjects := uint32(300)
rdmaStubResource := &cgroups.Resources{
Rdma: map[string]cgroups.LinuxRdma{
rdmaDevice: {
HcaHandles: &maxHandles,
HcaObjects: &maxObjects,
},
},
}
if err := RdmaSet(testCgroupPath, rdmaStubResource); err != nil {
t.Fatal(err)
}
// The default rdma.max must be written.
rdmaEntries, err := readRdmaEntries(testCgroupPath, "rdma.max")
if err != nil {
t.Fatal(err)
}
if len(rdmaEntries) != 1 {
t.Fatal("rdma_test: Got the wrong values while parsing entries from rdma.max")
}
if rdmaEntries[0].HcaHandles != maxHandles {
t.Fatalf("rdma_test: Got the wrong value for hca_handles")
}
if rdmaEntries[0].HcaObjects != maxObjects {
t.Fatalf("rdma_test: Got the wrong value for hca_Objects")
}
}

View File

@@ -1,95 +0,0 @@
package fscommon
import (
"math"
"os"
"path/filepath"
"strconv"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
const (
cgroupFile = "cgroup.file"
floatValue = 2048.0
floatString = "2048"
)
func init() {
cgroups.TestMode = true
}
func TestGetCgroupParamsInt(t *testing.T) {
// Setup tempdir.
tempDir := t.TempDir()
tempFile := filepath.Join(tempDir, cgroupFile)
// Success.
if err := os.WriteFile(tempFile, []byte(floatString), 0o755); err != nil {
t.Fatal(err)
}
value, err := GetCgroupParamUint(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != floatValue {
t.Fatalf("Expected %d to equal %f", value, floatValue)
}
// Success with new line.
err = os.WriteFile(tempFile, []byte(floatString+"\n"), 0o755)
if err != nil {
t.Fatal(err)
}
value, err = GetCgroupParamUint(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != floatValue {
t.Fatalf("Expected %d to equal %f", value, floatValue)
}
// Success with negative values
err = os.WriteFile(tempFile, []byte("-12345"), 0o755)
if err != nil {
t.Fatal(err)
}
value, err = GetCgroupParamUint(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != 0 {
t.Fatalf("Expected %d to equal %d", value, 0)
}
// Success with negative values lesser than min int64
s := strconv.FormatFloat(math.MinInt64, 'f', -1, 64)
err = os.WriteFile(tempFile, []byte(s), 0o755)
if err != nil {
t.Fatal(err)
}
value, err = GetCgroupParamUint(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != 0 {
t.Fatalf("Expected %d to equal %d", value, 0)
}
// Not a float.
err = os.WriteFile(tempFile, []byte("not-a-float"), 0o755)
if err != nil {
t.Fatal(err)
}
_, err = GetCgroupParamUint(tempDir, cgroupFile)
if err == nil {
t.Fatal("Expecting error, got none")
}
// Unknown file.
err = os.Remove(tempFile)
if err != nil {
t.Fatal(err)
}
_, err = GetCgroupParamUint(tempDir, cgroupFile)
if err == nil {
t.Fatal("Expecting error, got none")
}
}

View File

@@ -1,17 +0,0 @@
package cgroups
import (
"testing"
)
func BenchmarkGetAllPids(b *testing.B) {
total := 0
for i := 0; i < b.N; i++ {
i, err := GetAllPids("/sys/fs/cgroup")
if err != nil {
b.Fatal(err)
}
total += len(i)
}
b.Logf("iter: %d, total: %d", b.N, total)
}

View File

@@ -1,55 +0,0 @@
package manager
import (
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
)
// TestNilResources checks that a cgroup manager do not panic when
// config.Resources is nil. While it does not make sense to use a
// manager with no resources, it should not result in a panic.
//
// This tests either v1 or v2 fs cgroup manager, depending on which
// cgroup version is available.
func TestNilResources(t *testing.T) {
testNilResources(t, false)
}
// TestNilResourcesSystemd is the same as TestNilResources,
// only checking the systemd cgroup manager.
func TestNilResourcesSystemd(t *testing.T) {
if !systemd.IsRunningSystemd() {
t.Skip("requires systemd")
}
testNilResources(t, true)
}
func testNilResources(t *testing.T, systemd bool) {
cg := &cgroups.Cgroup{} // .Resources is nil
cg.Systemd = systemd
mgr, err := New(cg)
if err != nil {
// Some managers require non-nil Resources during
// instantiation -- provide and retry. In such case
// we're mostly testing Set(nil) below.
cg.Resources = &cgroups.Resources{}
mgr, err = New(cg)
if err != nil {
t.Fatal(err)
}
}
_ = mgr.Apply(-1)
_ = mgr.Set(nil)
_ = mgr.Freeze(cgroups.Thawed)
_ = mgr.Exists()
_, _ = mgr.GetAllPids()
_, _ = mgr.GetCgroups()
_, _ = mgr.GetFreezerState()
_ = mgr.Path("")
_ = mgr.GetPaths()
_, _ = mgr.GetStats()
_, _ = mgr.OOMKillCount()
_ = mgr.Destroy()
}

View File

@@ -1,55 +0,0 @@
package systemd
import (
"bytes"
"testing"
)
func TestRangeToBits(t *testing.T) {
testCases := []struct {
in string
out []byte
isErr bool
}{
{in: "", isErr: true},
{in: "0", out: []byte{1}},
{in: "1", out: []byte{2}},
{in: "0-1", out: []byte{3}},
{in: "0,1", out: []byte{3}},
{in: ",0,1,", out: []byte{3}},
{in: "0-3", out: []byte{0x0f}},
{in: "0,1,2-3", out: []byte{0x0f}},
{in: "4-7", out: []byte{0xf0}},
{in: "0-7", out: []byte{0xff}},
{in: "0-15", out: []byte{0xff, 0xff}},
{in: "16", out: []byte{0, 0, 1}},
{in: "0-3,32-33", out: []byte{0x0f, 0, 0, 0, 3}},
// extra spaces and tabs are ok
{in: "1, 2, 1-2", out: []byte{6}},
{in: " , 1 , 3 , 5-7, ", out: []byte{0xea}},
// somewhat large values
{in: "128-130,1", out: []byte{2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7}},
{in: "-", isErr: true},
{in: "1-", isErr: true},
{in: "-3", isErr: true},
// bad range (start > end)
{in: "54-53", isErr: true},
// kernel does not allow extra spaces inside a range
{in: "1 - 2", isErr: true},
}
for _, tc := range testCases {
out, err := RangeToBits(tc.in)
if err != nil {
if !tc.isErr {
t.Errorf("case %q: unexpected error: %v", tc.in, err)
}
continue
}
if !bytes.Equal(out, tc.out) {
t.Errorf("case %q: expected %v, got %v", tc.in, tc.out, out)
}
}
}

View File

@@ -1,354 +0,0 @@
package systemd
import (
"bufio"
"bytes"
"os"
"os/exec"
"strings"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"golang.org/x/sys/unix"
)
func TestFreezeBeforeSet(t *testing.T) {
requireV1(t)
testCases := []struct {
desc string
// Test input.
cg *cgroups.Cgroup
preFreeze bool
// Expected values.
// Before unit creation (Apply).
freeze0, thaw0 bool
// After unit creation.
freeze1, thaw1 bool
}{
{
// A slice with SkipDevices.
desc: "slice,skip-devices",
cg: &cgroups.Cgroup{
Name: "system-runc_test_freeze_1.slice",
Parent: "system.slice",
Resources: &cgroups.Resources{
SkipDevices: true,
},
},
// Expected.
freeze0: false,
thaw0: false,
freeze1: false,
thaw1: false,
},
{
// A scope with SkipDevices. Not a realistic scenario with runc
// (as container can't have SkipDevices == true), but possible
// for a standalone cgroup manager.
desc: "scope,skip-devices",
cg: &cgroups.Cgroup{
ScopePrefix: "test",
Name: "testFreeze2",
Parent: "system.slice",
Resources: &cgroups.Resources{
SkipDevices: true,
},
},
// Expected.
freeze0: false,
thaw0: false,
freeze1: false,
thaw1: false,
},
{
// A slice that is about to be frozen in Set.
desc: "slice,will-freeze",
cg: &cgroups.Cgroup{
Name: "system-runc_test_freeze_3.slice",
Parent: "system.slice",
Resources: &cgroups.Resources{
Freezer: cgroups.Frozen,
},
},
// Expected.
freeze0: true,
thaw0: false,
freeze1: true,
thaw1: false,
},
{
// A pre-frozen slice that should stay frozen.
desc: "slice,pre-frozen,will-freeze",
cg: &cgroups.Cgroup{
Name: "system-runc_test_freeze_4.slice",
Parent: "system.slice",
Resources: &cgroups.Resources{
Freezer: cgroups.Frozen,
},
},
preFreeze: true,
// Expected.
freeze0: true, // not actually frozen yet.
thaw0: false,
freeze1: false,
thaw1: false,
},
{
// A pre-frozen scope with skip devices set.
desc: "scope,pre-frozen,skip-devices",
cg: &cgroups.Cgroup{
ScopePrefix: "test",
Name: "testFreeze5",
Parent: "system.slice",
Resources: &cgroups.Resources{
SkipDevices: true,
},
},
preFreeze: true,
// Expected.
freeze0: false,
thaw0: false,
freeze1: false,
thaw1: false,
},
{
// A pre-frozen scope which will be thawed.
desc: "scope,pre-frozen",
cg: &cgroups.Cgroup{
ScopePrefix: "test",
Name: "testFreeze6",
Parent: "system.slice",
Resources: &cgroups.Resources{},
},
preFreeze: true,
// Expected.
freeze0: true, // not actually frozen yet.
thaw0: true,
freeze1: false,
thaw1: false,
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.desc, func(t *testing.T) {
m, err := NewLegacyManager(tc.cg, nil)
if err != nil {
t.Fatal(err)
}
defer m.Destroy() //nolint:errcheck
// Checks for a non-existent unit.
freeze, thaw, err := m.freezeBeforeSet(getUnitName(tc.cg), tc.cg.Resources)
if err != nil {
t.Fatal(err)
}
if freeze != tc.freeze0 || thaw != tc.thaw0 {
t.Errorf("before Apply (non-existent unit): expected freeze: %v, thaw: %v, got freeze: %v, thaw: %v",
tc.freeze0, tc.thaw0, freeze, thaw)
}
// Create systemd unit.
pid := -1
if strings.HasSuffix(getUnitName(tc.cg), ".scope") {
// Scopes require a process inside.
cmd := exec.Command("bash", "-c", "sleep 1m")
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
pid = cmd.Process.Pid
// Make sure to not leave a zombie.
defer func() {
// These may fail, we don't care.
_ = cmd.Process.Kill()
_ = cmd.Wait()
}()
}
if err := m.Apply(pid); err != nil {
t.Fatal(err)
}
if tc.preFreeze {
if err := m.Freeze(cgroups.Frozen); err != nil {
t.Error(err)
return // no more checks
}
}
freeze, thaw, err = m.freezeBeforeSet(getUnitName(tc.cg), tc.cg.Resources)
if err != nil {
t.Error(err)
return // no more checks
}
if freeze != tc.freeze1 || thaw != tc.thaw1 {
t.Errorf("expected freeze: %v, thaw: %v, got freeze: %v, thaw: %v",
tc.freeze1, tc.thaw1, freeze, thaw)
}
// Destroy() timeouts on a frozen container, so we need to thaw it.
if tc.preFreeze {
if err := m.Freeze(cgroups.Thawed); err != nil {
t.Error(err)
}
}
// Destroy() does not kill processes in cgroup, so we should.
if pid != -1 {
if err = unix.Kill(pid, unix.SIGKILL); err != nil {
t.Errorf("unable to kill pid %d: %s", pid, err)
}
}
// Not really needed, but may help catch some bugs.
if err := m.Destroy(); err != nil {
t.Errorf("destroy: %s", err)
}
})
}
}
// requireV1 skips the test unless a set of requirements (cgroup v1,
// systemd, root) is met.
func requireV1(t *testing.T) {
t.Helper()
if cgroups.IsCgroup2UnifiedMode() {
t.Skip("Test requires cgroup v1.")
}
if !IsRunningSystemd() {
t.Skip("Test requires systemd.")
}
if os.Geteuid() != 0 {
t.Skip("Test requires root.")
}
}
func TestFreezePodCgroup(t *testing.T) {
if !IsRunningSystemd() {
t.Skip("Test requires systemd.")
}
if os.Geteuid() != 0 {
t.Skip("Test requires root.")
}
podConfig := &cgroups.Cgroup{
Parent: "system.slice",
Name: "system-runc_test_pod.slice",
Resources: &cgroups.Resources{
SkipDevices: true,
Freezer: cgroups.Frozen,
},
}
// Create a "pod" cgroup (a systemd slice to hold containers),
// which is frozen initially.
pm := newManager(t, podConfig)
if err := pm.Apply(-1); err != nil {
t.Fatal(err)
}
if err := pm.Set(podConfig.Resources); err != nil {
t.Fatal(err)
}
// Check the pod is frozen.
pf, err := pm.GetFreezerState()
if err != nil {
t.Fatal(err)
}
if pf != cgroups.Frozen {
t.Fatalf("expected pod to be frozen, got %v", pf)
}
// Create a "container" within the "pod" cgroup.
// This is not a real container, just a process in the cgroup.
containerConfig := &cgroups.Cgroup{
Parent: "system-runc_test_pod.slice",
ScopePrefix: "test",
Name: "inner-container",
Resources: &cgroups.Resources{},
}
cmd := exec.Command("bash", "-c", "while read; do echo $REPLY; done")
cmd.Env = append(os.Environ(), "LANG=C")
// Setup stdin.
stdinR, stdinW, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
cmd.Stdin = stdinR
// Setup stdout.
stdoutR, stdoutW, err := os.Pipe()
if err != nil {
t.Fatal(err)
}
cmd.Stdout = stdoutW
rdr := bufio.NewReader(stdoutR)
// Setup stderr.
var stderr bytes.Buffer
cmd.Stderr = &stderr
err = cmd.Start()
stdinR.Close()
stdoutW.Close()
defer func() {
_ = stdinW.Close()
_ = stdoutR.Close()
}()
if err != nil {
t.Fatal(err)
}
// Make sure to not leave a zombie.
defer func() {
// These may fail, we don't care.
_ = cmd.Process.Kill()
_ = cmd.Wait()
}()
// Put the process into a cgroup.
cm := newManager(t, containerConfig)
if err := cm.Apply(cmd.Process.Pid); err != nil {
t.Fatal(err)
}
if err := cm.Set(containerConfig.Resources); err != nil {
t.Fatal(err)
}
// Check that we put the "container" into the "pod" cgroup.
if !strings.HasPrefix(cm.Path("freezer"), pm.Path("freezer")) {
t.Fatalf("expected container cgroup path %q to be under pod cgroup path %q",
cm.Path("freezer"), pm.Path("freezer"))
}
// Check the container is not reported as frozen despite the frozen parent.
cf, err := cm.GetFreezerState()
if err != nil {
t.Fatal(err)
}
if cf != cgroups.Thawed {
t.Fatalf("expected container to be thawed, got %v", cf)
}
// Unfreeze the pod.
if err := pm.Freeze(cgroups.Thawed); err != nil {
t.Fatal(err)
}
cf, err = cm.GetFreezerState()
if err != nil {
t.Fatal(err)
}
if cf != cgroups.Thawed {
t.Fatalf("expected container to be thawed, got %v", cf)
}
// Check the "container" works.
marker := "one two\n"
_, err = stdinW.WriteString(marker)
if err != nil {
t.Fatal(err)
}
reply, err := rdr.ReadString('\n')
if err != nil {
t.Fatalf("reading from container: %v", err)
}
if reply != marker {
t.Fatalf("expected %q, got %q", marker, reply)
}
}

View File

@@ -1,180 +0,0 @@
package systemd
import (
"os"
"reflect"
"testing"
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
"github.com/opencontainers/runc/libcontainer/cgroups"
)
func newManager(t *testing.T, config *cgroups.Cgroup) (m cgroups.Manager) {
t.Helper()
var err error
if cgroups.IsCgroup2UnifiedMode() {
m, err = NewUnifiedManager(config, "")
} else {
m, err = NewLegacyManager(config, nil)
}
if err != nil {
t.Fatal(err)
}
t.Cleanup(func() { _ = m.Destroy() })
return m
}
func TestSystemdVersion(t *testing.T) {
systemdVersionTests := []struct {
verStr string
expectedVer int
expectErr bool
}{
{`"219"`, 219, false},
{`"v245.4-1.fc32"`, 245, false},
{`"241-1"`, 241, false},
{`"v241-1"`, 241, false},
{`333.45"`, 333, false},
{`v321-0`, 321, false},
{"NaN", -1, true},
{"", -1, true},
{"v", -1, true},
}
for _, sdTest := range systemdVersionTests {
ver, err := systemdVersionAtoi(sdTest.verStr)
if !sdTest.expectErr && err != nil {
t.Errorf("systemdVersionAtoi(%s); want nil; got %v", sdTest.verStr, err)
}
if sdTest.expectErr && err == nil {
t.Errorf("systemdVersionAtoi(%s); wanted failure; got nil", sdTest.verStr)
}
if ver != sdTest.expectedVer {
t.Errorf("systemdVersionAtoi(%s); want %d; got %d", sdTest.verStr, sdTest.expectedVer, ver)
}
}
}
func TestValidUnitTypes(t *testing.T) {
testCases := []struct {
unitName string
expectedUnitType string
}{
{"system.slice", "Slice"},
{"kubepods.slice", "Slice"},
{"testing-container:ab.scope", "Scope"},
}
for _, sdTest := range testCases {
unitType := getUnitType(sdTest.unitName)
if unitType != sdTest.expectedUnitType {
t.Errorf("getUnitType(%s); want %q; got %q", sdTest.unitName, sdTest.expectedUnitType, unitType)
}
}
}
func TestUnitExistsIgnored(t *testing.T) {
if !IsRunningSystemd() {
t.Skip("Test requires systemd.")
}
if os.Geteuid() != 0 {
t.Skip("Test requires root.")
}
podConfig := &cgroups.Cgroup{
Parent: "system.slice",
Name: "system-runc_test_exists.slice",
Resources: &cgroups.Resources{},
}
// Create "pods" cgroup (a systemd slice to hold containers).
pm := newManager(t, podConfig)
// create twice to make sure "UnitExists" error is ignored.
for i := 0; i < 2; i++ {
if err := pm.Apply(-1); err != nil {
t.Fatal(err)
}
}
}
func TestUnifiedResToSystemdProps(t *testing.T) {
if !IsRunningSystemd() {
t.Skip("Test requires systemd.")
}
if !cgroups.IsCgroup2UnifiedMode() {
t.Skip("cgroup v2 is required")
}
cm := newDbusConnManager(os.Geteuid() != 0)
testCases := []struct {
name string
minVer int
res map[string]string
expError bool
expProps []systemdDbus.Property
}{
{
name: "empty map",
res: map[string]string{},
},
{
name: "only cpu.idle=1",
minVer: cpuIdleSupportedVersion,
res: map[string]string{
"cpu.idle": "1",
},
expProps: []systemdDbus.Property{
newProp("CPUWeight", uint64(0)),
},
},
{
name: "only cpu.idle=0",
minVer: cpuIdleSupportedVersion,
res: map[string]string{
"cpu.idle": "0",
},
},
{
name: "cpu.idle=1 and cpu.weight=1000",
minVer: cpuIdleSupportedVersion,
res: map[string]string{
"cpu.idle": "1",
"cpu.weight": "1000",
},
expProps: []systemdDbus.Property{
newProp("CPUWeight", uint64(0)),
},
},
{
name: "cpu.idle=0 and cpu.weight=1000",
minVer: cpuIdleSupportedVersion,
res: map[string]string{
"cpu.idle": "0",
"cpu.weight": "1000",
},
expProps: []systemdDbus.Property{
newProp("CPUWeight", uint64(1000)),
},
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
if tc.minVer != 0 && systemdVersion(cm) < tc.minVer {
t.Skipf("requires systemd >= %d", tc.minVer)
}
props, err := unifiedResToSystemdProps(cm, tc.res)
if err != nil && !tc.expError {
t.Fatalf("expected no error, got: %v", err)
}
if err == nil && tc.expError {
t.Fatal("expected error, got nil")
}
if !reflect.DeepEqual(tc.expProps, props) {
t.Errorf("wrong properties (exp %+v, got %+v)", tc.expProps, props)
}
})
}
}

View File

@@ -1,691 +0,0 @@
package cgroups
import (
"bytes"
"errors"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/moby/sys/mountinfo"
"golang.org/x/sys/unix"
)
const fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw
16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel
17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755
18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw
19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw
20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel
21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000
22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755
23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755
24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw
26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children
27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children
28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children
29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children
30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children
31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children
32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children
33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children
34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children
35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered
36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel
38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel
39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel
40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw
41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw
42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw
43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw
45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered
46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered
47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered
48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered
121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw
165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered
167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered
171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered
175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered
179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered
183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered
187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered
191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered
195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered
199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered
203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered
207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered
211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered
215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered
219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered
223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered
227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered
231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered
235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered
239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered
243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered
247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered
31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1`
const systemdMountinfo = `115 83 0:32 / / rw,relatime - aufs none rw,si=c0bd3d3,dio,dirperm1
116 115 0:35 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
117 115 0:36 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755
118 117 0:37 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666
119 115 0:38 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
120 119 0:39 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755
121 120 0:19 /system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd
122 120 0:20 /system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices
123 120 0:21 /system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer
124 120 0:22 /system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory
125 120 0:23 /system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,net_cls,net_prio
126 120 0:24 /system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio
127 120 0:25 /system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset,clone_children
128 120 0:26 /system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu,cpuacct
129 120 0:27 /system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event,release_agent=/run/cgmanager/agents/cgm-release-agent.perf_event
130 115 43:0 /var/lib/docker/volumes/a44a712176377f57c094397330ee04387284c478364eb25f4c3d25f775f25c26/_data /var/lib/docker rw,relatime - ext4 /dev/nbd0 rw,data=ordered
131 115 43:0 /var/lib/docker/containers/dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/nbd0 rw,data=ordered
132 115 43:0 /var/lib/docker/containers/dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e/hostname /etc/hostname rw,relatime - ext4 /dev/nbd0 rw,data=ordered
133 115 43:0 /var/lib/docker/containers/dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e/hosts /etc/hosts rw,relatime - ext4 /dev/nbd0 rw,data=ordered
134 117 0:33 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k
135 117 0:13 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
136 117 0:12 /1 /dev/console rw,nosuid,noexec,relatime - devpts none rw,gid=5,mode=620,ptmxmode=000
84 115 0:40 / /tmp rw,relatime - tmpfs none rw`
const bedrockMountinfo = `120 17 0:28 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:16 - tmpfs tmpfs ro,mode=755
124 28 0:28 / /bedrock/strata/arch/sys/fs/cgroup rw,nosuid,nodev,noexec shared:16 - tmpfs tmpfs ro,mode=755
123 53 0:28 / /bedrock/strata/fallback/sys/fs/cgroup rw,nosuid,nodev,noexec shared:16 - tmpfs tmpfs ro,mode=755
122 71 0:28 / /bedrock/strata/gentoo/sys/fs/cgroup rw,nosuid,nodev,noexec shared:16 - tmpfs tmpfs ro,mode=755
121 89 0:28 / /bedrock/strata/kde/sys/fs/cgroup rw,nosuid,nodev,noexec shared:16 - tmpfs tmpfs ro,mode=755
125 120 0:29 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd
129 124 0:29 / /bedrock/strata/arch/sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd
128 123 0:29 / /bedrock/strata/fallback/sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd
127 122 0:29 / /bedrock/strata/gentoo/sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd
126 121 0:29 / /bedrock/strata/kde/sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,xattr,release_agent=/lib/systemd/systemd-cgroups-agent,name=systemd
140 120 0:32 / /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:48 - cgroup cgroup rw,net_cls,net_prio
144 124 0:32 / /bedrock/strata/arch/sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:48 - cgroup cgroup rw,net_cls,net_prio
143 123 0:32 / /bedrock/strata/fallback/sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:48 - cgroup cgroup rw,net_cls,net_prio
142 122 0:32 / /bedrock/strata/gentoo/sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:48 - cgroup cgroup rw,net_cls,net_prio
141 121 0:32 / /bedrock/strata/kde/sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:48 - cgroup cgroup rw,net_cls,net_prio
145 120 0:33 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:49 - cgroup cgroup rw,blkio
149 124 0:33 / /bedrock/strata/arch/sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:49 - cgroup cgroup rw,blkio
148 123 0:33 / /bedrock/strata/fallback/sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:49 - cgroup cgroup rw,blkio
147 122 0:33 / /bedrock/strata/gentoo/sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:49 - cgroup cgroup rw,blkio
146 121 0:33 / /bedrock/strata/kde/sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:49 - cgroup cgroup rw,blkio
150 120 0:34 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:50 - cgroup cgroup rw,cpu,cpuacct
154 124 0:34 / /bedrock/strata/arch/sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:50 - cgroup cgroup rw,cpu,cpuacct
153 123 0:34 / /bedrock/strata/fallback/sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:50 - cgroup cgroup rw,cpu,cpuacct
152 122 0:34 / /bedrock/strata/gentoo/sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:50 - cgroup cgroup rw,cpu,cpuacct
151 121 0:34 / /bedrock/strata/kde/sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:50 - cgroup cgroup rw,cpu,cpuacct
155 120 0:35 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:51 - cgroup cgroup rw,cpuset
159 124 0:35 / /bedrock/strata/arch/sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:51 - cgroup cgroup rw,cpuset
158 123 0:35 / /bedrock/strata/fallback/sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:51 - cgroup cgroup rw,cpuset
157 122 0:35 / /bedrock/strata/gentoo/sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:51 - cgroup cgroup rw,cpuset
156 121 0:35 / /bedrock/strata/kde/sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:51 - cgroup cgroup rw,cpuset
160 120 0:36 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:52 - cgroup cgroup rw,devices
164 124 0:36 / /bedrock/strata/arch/sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:52 - cgroup cgroup rw,devices
163 123 0:36 / /bedrock/strata/fallback/sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:52 - cgroup cgroup rw,devices
162 122 0:36 / /bedrock/strata/gentoo/sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:52 - cgroup cgroup rw,devices
161 121 0:36 / /bedrock/strata/kde/sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:52 - cgroup cgroup rw,devices
165 120 0:37 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:53 - cgroup cgroup rw,memory
169 124 0:37 / /bedrock/strata/arch/sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:53 - cgroup cgroup rw,memory
168 123 0:37 / /bedrock/strata/fallback/sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:53 - cgroup cgroup rw,memory
167 122 0:37 / /bedrock/strata/gentoo/sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:53 - cgroup cgroup rw,memory
166 121 0:37 / /bedrock/strata/kde/sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:53 - cgroup cgroup rw,memory
170 120 0:38 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:54 - cgroup cgroup rw,freezer
174 124 0:38 / /bedrock/strata/arch/sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:54 - cgroup cgroup rw,freezer
173 123 0:38 / /bedrock/strata/fallback/sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:54 - cgroup cgroup rw,freezer
172 122 0:38 / /bedrock/strata/gentoo/sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:54 - cgroup cgroup rw,freezer
171 121 0:38 / /bedrock/strata/kde/sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:54 - cgroup cgroup rw,freezer
175 120 0:39 / /sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:55 - cgroup cgroup rw,pids
179 124 0:39 / /bedrock/strata/arch/sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:55 - cgroup cgroup rw,pids
178 123 0:39 / /bedrock/strata/fallback/sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:55 - cgroup cgroup rw,pids
177 122 0:39 / /bedrock/strata/gentoo/sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:55 - cgroup cgroup rw,pids
176 121 0:39 / /bedrock/strata/kde/sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:55 - cgroup cgroup rw,pids
180 120 0:40 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:56 - cgroup cgroup rw,perf_event
184 124 0:40 / /bedrock/strata/arch/sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:56 - cgroup cgroup rw,perf_event
183 123 0:40 / /bedrock/strata/fallback/sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:56 - cgroup cgroup rw,perf_event
182 122 0:40 / /bedrock/strata/gentoo/sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:56 - cgroup cgroup rw,perf_event
181 121 0:40 / /bedrock/strata/kde/sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:56 - cgroup cgroup rw,perf_event`
const cgroup2Mountinfo = `18 64 0:18 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel
19 64 0:4 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw
20 64 0:6 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8171204k,nr_inodes=2042801,mode=755
21 18 0:19 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw
22 20 0:20 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel
23 20 0:21 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000
24 64 0:22 / /run rw,nosuid,nodev shared:24 - tmpfs tmpfs rw,seclabel,mode=755
25 18 0:23 / /sys/fs/cgroup ro,nosuid,nodev,noexec shared:8 - tmpfs tmpfs ro,seclabel,mode=755
26 25 0:24 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:9 - cgroup2 cgroup rw
27 18 0:25 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw,seclabel
28 18 0:26 / /sys/firmware/efi/efivars rw,nosuid,nodev,noexec,relatime shared:21 - efivarfs efivarfs rw
29 25 0:27 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,cpu,cpuacct
30 25 0:28 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,memory
31 25 0:29 / /sys/fs/cgroup/net_cls,net_prio rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,net_cls,net_prio
32 25 0:30 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,blkio
33 25 0:31 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,perf_event
34 25 0:32 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,hugetlb
35 25 0:33 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,freezer
36 25 0:34 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,cpuset
37 25 0:35 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,devices
38 25 0:36 / /sys/fs/cgroup/pids rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,pids
61 18 0:37 / /sys/kernel/config rw,relatime shared:22 - configfs configfs rw
64 0 253:0 / / rw,relatime shared:1 - ext4 /dev/mapper/fedora_dhcp--16--129-root rw,seclabel,data=ordered
39 18 0:17 / /sys/fs/selinux rw,relatime shared:23 - selinuxfs selinuxfs rw
40 20 0:16 / /dev/mqueue rw,relatime shared:25 - mqueue mqueue rw,seclabel
41 20 0:39 / /dev/hugepages rw,relatime shared:26 - hugetlbfs hugetlbfs rw,seclabel
`
func TestGetCgroupMounts(t *testing.T) {
type testData struct {
mountInfo string
root string
// all is the total number of records expected with all=true,
// or 0 for no extra records expected (most cases).
all int
subsystems map[string]bool
}
testTable := []testData{
{
mountInfo: fedoraMountinfo,
root: "/",
subsystems: map[string]bool{
"name=systemd": false,
"cpuset": false,
"cpu": false,
"cpuacct": false,
"memory": false,
"devices": false,
"freezer": false,
"net_cls": false,
"blkio": false,
"perf_event": false,
"hugetlb": false,
},
},
{
mountInfo: systemdMountinfo,
root: "/system.slice/docker-dc4eaa1a34ec4d593bc0125d31eea823a1d76ae483aeb1409cca80304e34da2e.scope",
subsystems: map[string]bool{
"name=systemd": false,
"cpuset": false,
"cpu": false,
"cpuacct": false,
"memory": false,
"devices": false,
"freezer": false,
"net_cls": false,
"net_prio": false,
"blkio": false,
"perf_event": false,
},
},
{
mountInfo: bedrockMountinfo,
root: "/",
all: 50,
subsystems: map[string]bool{
"name=systemd": false,
"cpuset": false,
"cpu": false,
"cpuacct": false,
"memory": false,
"devices": false,
"freezer": false,
"net_cls": false,
"net_prio": false,
"blkio": false,
"perf_event": false,
"pids": false,
},
},
}
for _, td := range testTable {
mi, err := mountinfo.GetMountsFromReader(
bytes.NewBufferString(td.mountInfo),
mountinfo.FSTypeFilter("cgroup"),
)
if err != nil {
t.Fatal(err)
}
cgMounts, err := getCgroupMountsHelper(td.subsystems, mi, false)
if err != nil {
t.Fatal(err)
}
cgMap := make(map[string]Mount)
for _, m := range cgMounts {
for _, ss := range m.Subsystems {
cgMap[ss] = m
}
}
for ss := range td.subsystems {
ss = strings.TrimPrefix(ss, CgroupNamePrefix)
m, ok := cgMap[ss]
if !ok {
t.Fatalf("%s not found", ss)
}
if m.Root != td.root {
t.Fatalf("unexpected root for %s: %s", ss, m.Root)
}
if !strings.HasPrefix(m.Mountpoint, "/sys/fs/cgroup/") && !strings.Contains(m.Mountpoint, ss) {
t.Fatalf("unexpected mountpoint for %s: %s", ss, m.Mountpoint)
}
var ssFound bool
for _, mss := range m.Subsystems {
if mss == ss {
ssFound = true
break
}
}
if !ssFound {
t.Fatalf("subsystem %s not found in Subsystems field %v", ss, m.Subsystems)
}
}
// Test the all=true case.
// Reset the test input.
for k := range td.subsystems {
td.subsystems[k] = false
}
cgMountsAll, err := getCgroupMountsHelper(td.subsystems, mi, true)
if err != nil {
t.Fatal(err)
}
if td.all == 0 {
// Results with and without "all" should be the same.
if len(cgMounts) != len(cgMountsAll) || !reflect.DeepEqual(cgMounts, cgMountsAll) {
t.Errorf("expected same results, got (all=false) %v, (all=true) %v", cgMounts, cgMountsAll)
}
} else {
// Make sure we got all records.
if len(cgMountsAll) != td.all {
t.Errorf("expected %d records, got %d (%+v)", td.all, len(cgMountsAll), cgMountsAll)
}
}
}
}
func BenchmarkGetCgroupMounts(b *testing.B) {
subsystems := map[string]bool{
"cpuset": false,
"cpu": false,
"cpuacct": false,
"memory": false,
"devices": false,
"freezer": false,
"net_cls": false,
"blkio": false,
"perf_event": false,
"hugetlb": false,
}
mi, err := mountinfo.GetMountsFromReader(
bytes.NewBufferString(fedoraMountinfo),
mountinfo.FSTypeFilter("cgroup"),
)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
if _, err := getCgroupMountsHelper(subsystems, mi, false); err != nil {
b.Fatal(err)
}
}
}
func TestParseCgroupString(t *testing.T) {
testCases := []struct {
input string
expectedError error
expectedOutput map[string]string
}{
{
// Taken from a CoreOS instance running systemd 225 with CPU/Mem
// accounting enabled in systemd
input: `9:blkio:/
8:freezer:/
7:perf_event:/
6:devices:/system.slice/system-sshd.slice
5:cpuset:/
4:cpu,cpuacct:/system.slice/system-sshd.slice/sshd@126-10.240.0.15:22-xxx.yyy.zzz.aaa:33678.service
3:net_cls,net_prio:/
2:memory:/system.slice/system-sshd.slice/sshd@126-10.240.0.15:22-xxx.yyy.zzz.aaa:33678.service
1:name=systemd:/system.slice/system-sshd.slice/sshd@126-10.240.0.15:22-xxx.yyy.zzz.aaa:33678.service`,
expectedOutput: map[string]string{
"name=systemd": "/system.slice/system-sshd.slice/sshd@126-10.240.0.15:22-xxx.yyy.zzz.aaa:33678.service",
"blkio": "/",
"freezer": "/",
"perf_event": "/",
"devices": "/system.slice/system-sshd.slice",
"cpuset": "/",
"cpu": "/system.slice/system-sshd.slice/sshd@126-10.240.0.15:22-xxx.yyy.zzz.aaa:33678.service",
"cpuacct": "/system.slice/system-sshd.slice/sshd@126-10.240.0.15:22-xxx.yyy.zzz.aaa:33678.service",
"net_cls": "/",
"net_prio": "/",
"memory": "/system.slice/system-sshd.slice/sshd@126-10.240.0.15:22-xxx.yyy.zzz.aaa:33678.service",
},
},
{
input: `malformed input`,
expectedError: errors.New(`invalid cgroup entry: must contain at least two colons: malformed input`),
},
}
for ndx, testCase := range testCases {
out, err := parseCgroupFromReader(strings.NewReader(testCase.input))
if err != nil {
if testCase.expectedError == nil || testCase.expectedError.Error() != err.Error() {
t.Errorf("%v: expected error %v, got error %v", ndx, testCase.expectedError, err)
}
} else {
if !reflect.DeepEqual(testCase.expectedOutput, out) {
t.Errorf("%v: expected output %v, got error %v", ndx, testCase.expectedOutput, out)
}
}
}
}
func TestIgnoreCgroup2Mount(t *testing.T) {
subsystems := map[string]bool{
"cpuset": false,
"cpu": false,
"cpuacct": false,
"memory": false,
"devices": false,
"freezer": false,
"net_cls": false,
"blkio": false,
"perf_event": false,
"pids": false,
"name=systemd": false,
}
mi, err := mountinfo.GetMountsFromReader(
bytes.NewBufferString(cgroup2Mountinfo),
mountinfo.FSTypeFilter("cgroup"),
)
if err != nil {
t.Fatal(err)
}
cgMounts, err := getCgroupMountsHelper(subsystems, mi, false)
if err != nil {
t.Fatal(err)
}
for _, m := range cgMounts {
if m.Mountpoint == "/sys/fs/cgroup/systemd" {
t.Errorf("parsed a cgroup2 mount at /sys/fs/cgroup/systemd instead of ignoring it")
}
}
}
func TestFindCgroupMountpointAndRoot(t *testing.T) {
fakeMountInfo := `35 27 0:29 / /foo rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,devices
35 27 0:29 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,devices`
testCases := []struct {
cgroupPath string
output string
}{
{cgroupPath: "/sys/fs", output: "/sys/fs/cgroup/devices"},
{cgroupPath: "", output: "/foo"},
}
mi, err := mountinfo.GetMountsFromReader(
bytes.NewBufferString(fakeMountInfo),
mountinfo.FSTypeFilter("cgroup"),
)
if err != nil {
t.Fatal(err)
}
for _, c := range testCases {
mountpoint, _, _ := findCgroupMountpointAndRootFromMI(mi, c.cgroupPath, "devices")
if mountpoint != c.output {
t.Errorf("expected %s, got %s", c.output, mountpoint)
}
}
}
func BenchmarkGetHugePageSizeImpl(b *testing.B) {
var (
input = []string{"hugepages-1048576kB", "hugepages-2048kB", "hugepages-32768kB", "hugepages-64kB"}
output []string
err error
)
for i := 0; i < b.N; i++ {
output, err = getHugePageSizeFromFilenames(input)
}
if err != nil || len(output) != len(input) {
b.Fatal("unexpected results")
}
}
func TestGetHugePageSizeImpl(t *testing.T) {
testCases := []struct {
doc string
input []string
output []string
isErr bool
}{
{
doc: "normal input",
input: []string{"hugepages-1048576kB", "hugepages-2048kB", "hugepages-32768kB", "hugepages-64kB"},
output: []string{"1GB", "2MB", "32MB", "64KB"},
},
{
doc: "empty input",
input: []string{},
output: []string{},
},
{
doc: "not a number",
input: []string{"hugepages-akB"},
isErr: true,
},
{
doc: "no prefix (silently skipped)",
input: []string{"1024kB"},
},
{
doc: "invalid prefix (silently skipped)",
input: []string{"whatever-1024kB"},
},
{
doc: "invalid suffix",
input: []string{"hugepages-1024gB"},
isErr: true,
},
{
doc: "no suffix",
input: []string{"hugepages-1024"},
isErr: true,
},
{
doc: "mixed valid and invalid entries",
input: []string{"hugepages-4194304kB", "hugepages-2048kB", "hugepages-akB", "hugepages-64kB"},
output: []string{"4GB", "2MB", "64KB"},
isErr: true,
},
{
doc: "more mixed valid and invalid entries",
input: []string{"hugepages-2048kB", "hugepages-kB", "hugepages-64kB"},
output: []string{"2MB", "64KB"},
isErr: true,
},
}
for _, c := range testCases {
c := c
t.Run(c.doc, func(t *testing.T) {
output, err := getHugePageSizeFromFilenames(c.input)
t.Log("input:", c.input, "; output:", output, "; err:", err)
if err != nil {
if !c.isErr {
t.Errorf("input %v, expected nil, got error: %v", c.input, err)
}
// no more checks
return
}
if c.isErr {
t.Errorf("input %v, expected error, got error: nil, output: %v", c.input, output)
}
// check output
if len(output) != len(c.output) || (len(output) > 0 && !reflect.DeepEqual(output, c.output)) {
t.Errorf("input %v, expected %v, got %v", c.input, c.output, output)
}
})
}
}
func TestConvertCPUSharesToCgroupV2Value(t *testing.T) {
cases := map[uint64]uint64{
0: 0,
2: 1,
262144: 10000,
}
for i, expected := range cases {
got := ConvertCPUSharesToCgroupV2Value(i)
if got != expected {
t.Errorf("expected ConvertCPUSharesToCgroupV2Value(%d) to be %d, got %d", i, expected, got)
}
}
}
func TestConvertMemorySwapToCgroupV2Value(t *testing.T) {
cases := []struct {
descr string
memswap, memory int64
expected int64
expErr bool
}{
{
descr: "all unset",
memswap: 0,
memory: 0,
expected: 0,
},
{
descr: "unlimited memory+swap, unset memory",
memswap: -1,
memory: 0,
expected: -1,
},
{
descr: "unlimited memory",
memswap: 300,
memory: -1,
expected: 300,
},
{
descr: "all unlimited",
memswap: -1,
memory: -1,
expected: -1,
},
{
descr: "negative memory+swap",
memswap: -2,
memory: 0,
expErr: true,
},
{
descr: "unlimited memory+swap, set memory",
memswap: -1,
memory: 1000,
expected: -1,
},
{
descr: "memory+swap == memory",
memswap: 1000,
memory: 1000,
expected: 0,
},
{
descr: "memory+swap > memory",
memswap: 500,
memory: 200,
expected: 300,
},
{
descr: "memory+swap < memory",
memswap: 300,
memory: 400,
expErr: true,
},
{
descr: "unset memory",
memswap: 300,
memory: 0,
expErr: true,
},
{
descr: "negative memory",
memswap: 300,
memory: -300,
expErr: true,
},
}
for _, c := range cases {
c := c
t.Run(c.descr, func(t *testing.T) {
swap, err := ConvertMemorySwapToCgroupV2Value(c.memswap, c.memory)
if c.expErr {
if err == nil {
t.Errorf("memswap: %d, memory %d, expected error, got %d, nil", c.memswap, c.memory, swap)
}
// No more checks.
return
}
if err != nil {
t.Errorf("memswap: %d, memory %d, expected success, got error %s", c.memswap, c.memory, err)
}
if swap != c.expected {
t.Errorf("memswap: %d, memory %d, expected %d, got %d", c.memswap, c.memory, c.expected, swap)
}
})
}
}
func TestConvertBlkIOToIOWeightValue(t *testing.T) {
cases := map[uint16]uint64{
0: 0,
10: 1,
1000: 10000,
}
for i, expected := range cases {
got := ConvertBlkIOToIOWeightValue(i)
if got != expected {
t.Errorf("expected ConvertBlkIOToIOWeightValue(%d) to be %d, got %d", i, expected, got)
}
}
}
// TestRemovePathReadOnly is to test remove a non-existent dir in a ro mount point.
// The similar issue example: https://github.com/opencontainers/runc/issues/4518
func TestRemovePathReadOnly(t *testing.T) {
dirTo := t.TempDir()
err := unix.Mount(t.TempDir(), dirTo, "", unix.MS_BIND, "")
if err != nil {
t.Skip("no permission of mount")
}
defer func() {
_ = unix.Unmount(dirTo, 0)
}()
err = unix.Mount("", dirTo, "", unix.MS_REMOUNT|unix.MS_BIND|unix.MS_RDONLY, "")
if err != nil {
t.Skip("no permission of mount")
}
nonExistentDir := filepath.Join(dirTo, "non-existent-dir")
err = rmdir(nonExistentDir, true)
if !errors.Is(err, unix.EROFS) {
t.Fatalf("expected the error of removing a non-existent dir %s in a ro mount point with rmdir to be unix.EROFS, but got: %v", nonExistentDir, err)
}
err = RemovePath(nonExistentDir)
if err != nil {
t.Fatalf("expected the error of removing a non-existent dir %s in a ro mount point with RemovePath to be nil, but got: %v", nonExistentDir, err)
}
}

View File

@@ -1,6 +1,6 @@
package configs // Deprecated: use [github.com/opencontainers/runc/libcontainer/cgroups].
package configs // Deprecated: use [github.com/opencontainers/cgroups].
import "github.com/opencontainers/runc/libcontainer/cgroups"
import "github.com/opencontainers/cgroups"
type (
Cgroup = cgroups.Cgroup

View File

@@ -10,7 +10,7 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
devices "github.com/opencontainers/cgroups/devices/config"
"github.com/opencontainers/runtime-spec/specs-go"
)

View File

@@ -8,7 +8,7 @@ import (
"strings"
"sync"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runtime-spec/specs-go"

View File

@@ -20,7 +20,7 @@ import (
"github.com/vishvananda/netlink/nl"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/exeseal"
"github.com/opencontainers/runc/libcontainer/intelrdt"

View File

@@ -5,7 +5,7 @@ import (
"os"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
)

View File

@@ -23,7 +23,7 @@ import (
"golang.org/x/sys/unix"
"google.golang.org/protobuf/proto"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/utils"
)

View File

@@ -1,8 +1,8 @@
package devices
import "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
import "github.com/opencontainers/cgroups/devices/config"
// Deprecated: use [github.com/opencontainers/runc/libcontainer/cgroups/devices/config].
// Deprecated: use [github.com/opencontainers/cgroups/devices/config].
const (
Wildcard = config.Wildcard
WildcardDevice = config.WildcardDevice
@@ -11,7 +11,7 @@ const (
FifoDevice = config.FifoDevice
)
// Deprecated: use [github.com/opencontainers/runc/libcontainer/cgroups/devices/config].
// Deprecated: use [github.com/opencontainers/cgroups/devices/config].
type (
Device = config.Device
Permissions = config.Permissions

View File

@@ -9,8 +9,8 @@ import (
securejoin "github.com/cyphar/filepath-securejoin"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/manager"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/manager"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/configs/validate"
"github.com/opencontainers/runc/libcontainer/intelrdt"

View File

@@ -7,7 +7,7 @@ import (
"reflect"
"testing"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/utils"
"github.com/opencontainers/runtime-spec/specs-go"

View File

@@ -19,8 +19,8 @@ import (
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer/capabilities"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/opencontainers/runc/libcontainer/utils"

View File

@@ -14,9 +14,9 @@ import (
"syscall"
"testing"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/systemd"
"github.com/opencontainers/runc/libcontainer"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/internal/userns"
"github.com/opencontainers/runc/libcontainer/utils"

View File

@@ -6,7 +6,7 @@ import (
"github.com/opencontainers/runc/libcontainer"
//nolint:revive // Enable cgroup manager to manage devices
_ "github.com/opencontainers/runc/libcontainer/cgroups/devices"
_ "github.com/opencontainers/cgroups/devices"
_ "github.com/opencontainers/runc/libcontainer/nsenter"
)

View File

@@ -6,8 +6,8 @@ import (
"testing"
"time"
"github.com/opencontainers/runc/libcontainer/cgroups"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
"github.com/opencontainers/cgroups"
devices "github.com/opencontainers/cgroups/devices/config"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/specconv"
"golang.org/x/sys/unix"

View File

@@ -6,9 +6,9 @@ import (
"strings"
"testing"
devices "github.com/opencontainers/cgroups/devices/config"
"github.com/opencontainers/cgroups/systemd"
"github.com/opencontainers/runc/libcontainer"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
)
func testUpdateDevices(t *testing.T, systemd bool) {

View File

@@ -13,7 +13,7 @@ import (
"github.com/moby/sys/mountinfo"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/cgroups/fscommon"
"github.com/opencontainers/runc/libcontainer/configs"
)

View File

@@ -6,7 +6,7 @@ import (
"path/filepath"
"unsafe"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/cgroups/fscommon"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)

View File

@@ -19,8 +19,8 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/fs2"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/libcontainer/internal/userns"

View File

@@ -21,9 +21,9 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/cgroups"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
"github.com/opencontainers/runc/libcontainer/cgroups/fs2"
"github.com/opencontainers/cgroups"
devices "github.com/opencontainers/cgroups/devices/config"
"github.com/opencontainers/cgroups/fs2"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/utils"
)

View File

@@ -5,7 +5,7 @@ import (
"path/filepath"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runtime-spec/specs-go"
)

View File

@@ -14,8 +14,8 @@ import (
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
dbus "github.com/godbus/dbus/v5"
"github.com/opencontainers/runc/libcontainer/cgroups"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
"github.com/opencontainers/cgroups"
devices "github.com/opencontainers/cgroups/devices/config"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/internal/userns"
"github.com/opencontainers/runc/libcontainer/seccomp"

View File

@@ -6,7 +6,7 @@ import (
"testing"
dbus "github.com/godbus/dbus/v5"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
devices "github.com/opencontainers/cgroups/devices/config"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runc/libcontainer/configs/validate"
"github.com/opencontainers/runtime-spec/specs-go"

View File

@@ -5,7 +5,7 @@ import (
"os"
"path/filepath"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer/configs"
"github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"

View File

@@ -1,7 +1,7 @@
package libcontainer
import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer/intelrdt"
"github.com/opencontainers/runc/types"
)

View File

@@ -11,7 +11,7 @@ import (
"strings"
//nolint:revive // Enable cgroup manager to manage devices
_ "github.com/opencontainers/runc/libcontainer/cgroups/devices"
_ "github.com/opencontainers/cgroups/devices"
"github.com/opencontainers/runc/libcontainer/seccomp"
"github.com/opencontainers/runtime-spec/specs-go"

View File

@@ -7,7 +7,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
"github.com/opencontainers/cgroups/systemd"
)
func shouldUseRootlessCgroupManager(context *cli.Context) (bool, error) {

View File

@@ -7,14 +7,14 @@ import (
"github.com/sirupsen/logrus"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/systemd"
)
func usage() {
fmt.Print(`Open Container Initiative tests/cmd/sd-helper
sd-helper is a tool that uses runc/libcontainer/cgroups/systemd package
sd-helper is a tool that uses github.com/opencontainers/groups/systemd package
functionality to communicate to systemd in order to perform various operations.
Currently this is limited to starting and stopping systemd transient slice
units.

View File

@@ -221,7 +221,7 @@ EOF
if [ -v HAVE_SWAP ]; then
# Test case for https://github.com/opencontainers/runc/pull/592,
# checking libcontainer/cgroups/fs/memory.go:setMemoryAndSwap.
# checking github.com/opencontainers/cgroups/fs/memory.go:setMemoryAndSwap.
runc update test_update --memory 30M --memory-swap 50M
[ "$status" -eq 0 ]

View File

@@ -1,7 +1,7 @@
package types
import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/runc/libcontainer/intelrdt"
)

View File

@@ -7,7 +7,7 @@ import (
"os"
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/sirupsen/logrus"
"github.com/docker/go-units"

1
vendor/github.com/opencontainers/cgroups/CODEOWNERS generated vendored Normal file
View File

@@ -0,0 +1 @@
* @maintainer1 @maintainer2 @maintainer3

View File

@@ -0,0 +1,150 @@
# Contribution Guidelines
Development happens on GitHub.
Issues are used for bugs and actionable items and longer discussions can happen on the [mailing list](#mailing-list).
The content of this repository is licensed under the [Apache License, Version 2.0](LICENSE).
## Code of Conduct
Participation in the Open Container community is governed by [Open Container Code of Conduct][code-of-conduct].
## Meetings
The contributors and maintainers of all OCI projects have monthly meetings at 2:00 PM (USA Pacific) on the first Wednesday of every month.
There is an [iCalendar][rfc5545] format for the meetings [here][meeting.ics].
Everyone is welcome to participate via [UberConference web][UberConference] or audio-only: +1 415 968 0849 (no PIN needed).
An initial agenda will be posted to the [mailing list](#mailing-list) in the week before each meeting, and everyone is welcome to propose additional topics or suggest other agenda alterations there.
Minutes from past meetings are archived [here][minutes].
## Mailing list
You can subscribe and browse the mailing list on [Google Groups][mailing-list].
## IRC
OCI discussion happens on #opencontainers on [Freenode][] ([logs][irc-logs]).
## Git
### Security issues
If you are reporting a security issue, do not create an issue or file a pull
request on GitHub. Instead, disclose the issue responsibly by sending an email
to security@opencontainers.org (which is inhabited only by the maintainers of
the various OCI projects).
### Pull requests are always welcome
We are always thrilled to receive pull requests, and do our best to
process them as fast as possible. Not sure if that typo is worth a pull
request? Do it! We will appreciate it.
If your pull request is not accepted on the first try, don't be
discouraged! If there's a problem with the implementation, hopefully you
received feedback on what to improve.
We're trying very hard to keep the project lean and focused. We don't want it
to do everything for everybody. This means that we might decide against
incorporating a new feature.
### Conventions
Fork the repo and make changes on your fork in a feature branch.
For larger bugs and enhancements, consider filing a leader issue or mailing-list thread for discussion that is independent of the implementation.
Small changes or changes that have been discussed on the [project mailing list](#mailing-list) may be submitted without a leader issue.
If the project has a test suite, submit unit tests for your changes. Take a
look at existing tests for inspiration. Run the full test suite on your branch
before submitting a pull request.
Update the documentation when creating or modifying features. Test
your documentation changes for clarity, concision, and correctness, as
well as a clean documentation build.
Pull requests descriptions should be as clear as possible and include a
reference to all the issues that they address.
Commit messages must start with a capitalized and short summary
written in the imperative, followed by an optional, more detailed
explanatory text which is separated from the summary by an empty line.
Code review comments may be added to your pull request. Discuss, then make the
suggested modifications and push additional commits to your feature branch. Be
sure to post a comment after pushing. The new commits will show up in the pull
request automatically, but the reviewers will not be notified unless you
comment.
Before the pull request is merged, make sure that you squash your commits into
logical units of work using `git rebase -i` and `git push -f`. After every
commit the test suite (if any) should be passing. Include documentation changes
in the same commit so that a revert would remove all traces of the feature or
fix.
Commits that fix or close an issue should include a reference like `Closes #XXX`
or `Fixes #XXX`, which will automatically close the issue when merged.
### Sign your work
The sign-off is a simple line at the end of the explanation for the
patch, which certifies that you wrote it or otherwise have the right to
pass it on as an open-source patch. The rules are pretty simple: if you
can certify the below (from [developercertificate.org][]):
```
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.
```
then you just add a line to every git commit message:
Signed-off-by: Joe Smith <joe@gmail.com>
using your real name (sorry, no pseudonyms or anonymous contributions.)
You can add the sign off when creating the git commit via `git commit -s`.
[code-of-conduct]: https://github.com/opencontainers/tob/blob/d2f9d68c1332870e40693fe077d311e0742bc73d/code-of-conduct.md
[developercertificate.org]: http://developercertificate.org/
[Freenode]: https://freenode.net/
[irc-logs]: http://ircbot.wl.linuxfoundation.org/eavesdrop/%23opencontainers/
[mailing-list]: https://groups.google.com/a/opencontainers.org/forum/#!forum/dev
[meeting.ics]: https://github.com/opencontainers/runtime-spec/blob/master/meeting.ics
[minutes]: http://ircbot.wl.linuxfoundation.org/meetings/opencontainers/
[rfc5545]: https://tools.ietf.org/html/rfc5545
[UberConference]: https://www.uberconference.com/opencontainers

63
vendor/github.com/opencontainers/cgroups/GOVERNANCE.md generated vendored Normal file
View File

@@ -0,0 +1,63 @@
# Project governance
The [OCI charter][charter] §5.b.viii tasks an OCI Project's maintainers (listed in the repository's MAINTAINERS file and sometimes referred to as "the TDC", [§5.e][charter]) with:
> Creating, maintaining and enforcing governance guidelines for the TDC, approved by the maintainers, and which shall be posted visibly for the TDC.
This section describes generic rules and procedures for fulfilling that mandate.
## Proposing a motion
A maintainer SHOULD propose a motion on the dev@opencontainers.org mailing list (except [security issues](#security-issues)) with another maintainer as a co-sponsor.
## Voting
Voting on a proposed motion SHOULD happen on the dev@opencontainers.org mailing list (except [security issues](#security-issues)) with maintainers posting LGTM or REJECT.
Maintainers MAY also explicitly not vote by posting ABSTAIN (which is useful to revert a previous vote).
Maintainers MAY post multiple times (e.g. as they revise their position based on feedback), but only their final post counts in the tally.
A proposed motion is adopted if two-thirds of votes cast, a quorum having voted, are in favor of the release.
Voting SHOULD remain open for a week to collect feedback from the wider community and allow the maintainers to digest the proposed motion.
Under exceptional conditions (e.g. non-major security fix releases) proposals which reach quorum with unanimous support MAY be adopted earlier.
A maintainer MAY choose to reply with REJECT.
A maintainer posting a REJECT MUST include a list of concerns or links to written documentation for those concerns (e.g. GitHub issues or mailing-list threads).
The maintainers SHOULD try to resolve the concerns and wait for the rejecting maintainer to change their opinion to LGTM.
However, a motion MAY be adopted with REJECTs, as outlined in the previous paragraphs.
## Quorum
A quorum is established when at least two-thirds of maintainers have voted.
For projects that are not specifications, a [motion to release](#release-approval) MAY be adopted if the tally is at least three LGTMs and no REJECTs, even if three votes does not meet the usual two-thirds quorum.
## Amendments
The [project governance](#project-governance) rules and procedures MAY be amended or replaced using the procedures themselves.
The MAINTAINERS of this project governance document is the total set of MAINTAINERS from all Open Containers projects (go-digest, image-spec, image-tools, runC, runtime-spec, runtime-tools, and selinux).
## Subject templates
Maintainers are busy and get lots of email.
To make project proposals recognizable, proposed motions SHOULD use the following subject templates.
### Proposing a motion
> [{project} VOTE]: {motion description} (closes {end of voting window})
For example:
> [runtime-spec VOTE]: Tag 0647920 as 1.0.0-rc (closes 2016-06-03 20:00 UTC)
### Tallying results
After voting closes, a maintainer SHOULD post a tally to the motion thread with a subject template like:
> [{project} {status}]: {motion description} (+{LGTMs} -{REJECTs} #{ABSTAINs})
Where `{status}` is either `adopted` or `rejected`.
For example:
> [runtime-spec adopted]: Tag 0647920 as 1.0.0-rc (+6 -0 #3)
[charter]: https://www.opencontainers.org/about/governance

201
vendor/github.com/opencontainers/cgroups/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

8
vendor/github.com/opencontainers/cgroups/MAINTAINERS generated vendored Normal file
View File

@@ -0,0 +1,8 @@
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp> (@AkihiroSuda)
Aleksa Sarai <cyphar@cyphar.com> (@cyphar)
Kir Kolyshkin <kolyshkin@gmail.com> (@kolyshkin)
Mrunal Patel <mpatel@redhat.com> (@mrunalp)
Sebastiaan van Stijn <github@gone.nl> (@thaJeztah)
Odin Ugedal <odin@uged.al> (@odinuge)
Peter Hunt <pehunt@redhat.com> (@haircommander)
Davanum Srinivas <davanum@gmail.com> (@dims)

View File

@@ -0,0 +1,92 @@
## Introduction
Dear maintainer. Thank you for investing the time and energy to help
make this project as useful as possible. Maintaining a project is difficult,
sometimes unrewarding work. Sure, you will get to contribute cool
features to the project. But most of your time will be spent reviewing,
cleaning up, documenting, answering questions, justifying design
decisions - while everyone has all the fun! But remember - the quality
of the maintainers work is what distinguishes the good projects from the
great. So please be proud of your work, even the unglamourous parts,
and encourage a culture of appreciation and respect for *every* aspect
of improving the project - not just the hot new features.
This document is a manual for maintainers old and new. It explains what
is expected of maintainers, how they should work, and what tools are
available to them.
This is a living document - if you see something out of date or missing,
speak up!
## What are a maintainer's responsibilities?
It is every maintainer's responsibility to:
* Expose a clear roadmap for improving their component.
* Deliver prompt feedback and decisions on pull requests.
* Be available to anyone with questions, bug reports, criticism etc. on their component.
This includes IRC and GitHub issues and pull requests.
* Make sure their component respects the philosophy, design and roadmap of the project.
## How are decisions made?
This project is an open-source project with an open design philosophy. This
means that the repository is the source of truth for EVERY aspect of the
project, including its philosophy, design, roadmap and APIs. *If it's
part of the project, it's in the repo. It's in the repo, it's part of
the project.*
As a result, all decisions can be expressed as changes to the
repository. An implementation change is a change to the source code. An
API change is a change to the API specification. A philosophy change is
a change to the philosophy manifesto. And so on.
All decisions affecting this project, big and small, follow the same procedure:
1. Discuss a proposal on the [mailing list](CONTRIBUTING.md#mailing-list).
Anyone can do this.
2. Open a pull request.
Anyone can do this.
3. Discuss the pull request.
Anyone can do this.
4. Endorse (`LGTM`) or oppose (`Rejected`) the pull request.
The relevant maintainers do this (see below [Who decides what?](#who-decides-what)).
Changes that affect project management (changing policy, cutting releases, etc.) are [proposed and voted on the mailing list](GOVERNANCE.md).
5. Merge or close the pull request.
The relevant maintainers do this.
### I'm a maintainer, should I make pull requests too?
Yes. Nobody should ever push to master directly. All changes should be
made through a pull request.
## Who decides what?
All decisions are pull requests, and the relevant maintainers make
decisions by accepting or refusing the pull request. Review and acceptance
by anyone is denoted by adding a comment in the pull request: `LGTM`.
However, only currently listed `MAINTAINERS` are counted towards the required
two LGTMs. In addition, if a maintainer has created a pull request, they cannot
count toward the two LGTM rule (to ensure equal amounts of review for every pull
request, no matter who wrote it).
Overall the maintainer system works because of mutual respect.
The maintainers trust one another to act in the best interests of the project.
Sometimes maintainers can disagree and this is part of a healthy project to represent the points of view of various people.
In the case where maintainers cannot find agreement on a specific change, maintainers should use the [governance procedure](GOVERNANCE.md) to attempt to reach a consensus.
### How are maintainers added?
The best maintainers have a vested interest in the project. Maintainers
are first and foremost contributors that have shown they are committed to
the long term success of the project. Contributors wanting to become
maintainers are expected to be deeply involved in contributing code,
pull request review, and triage of issues in the project for more than two months.
Just contributing does not make you a maintainer, it is about building trust with the current maintainers of the project and being a person that they can depend on to act in the best interest of the project.
The final vote to add a new maintainer should be approved by the [governance procedure](GOVERNANCE.md).
### How are maintainers removed?
When a maintainer is unable to perform the [required duties](#what-are-a-maintainers-responsibilities) they can be removed by the [governance procedure](GOVERNANCE.md).
Issues related to a maintainer's performance should be discussed with them among the other maintainers so that they are not surprised by a pull request removing them.

11
vendor/github.com/opencontainers/cgroups/README.md generated vendored Normal file
View File

@@ -0,0 +1,11 @@
# OCI Project Template
Useful boilerplate and organizational information for all OCI projects.
* README (this file)
* [The Apache License, Version 2.0](LICENSE)
* [A list of maintainers](MAINTAINERS)
* [Maintainer guidelines](MAINTAINERS_GUIDE.md)
* [Contributor guidelines](CONTRIBUTING.md)
* [Project governance](GOVERNANCE.md)
* [Release procedures](RELEASES.md)

51
vendor/github.com/opencontainers/cgroups/RELEASES.md generated vendored Normal file
View File

@@ -0,0 +1,51 @@
# Releases
The release process hopes to encourage early, consistent consensus-building during project development.
The mechanisms used are regular community communication on the mailing list about progress, scheduled meetings for issue resolution and release triage, and regularly paced and communicated releases.
Releases are proposed and adopted or rejected using the usual [project governance](GOVERNANCE.md) rules and procedures.
An anti-pattern that we want to avoid is heavy development or discussions "late cycle" around major releases.
We want to build a community that is involved and communicates consistently through all releases instead of relying on "silent periods" as a judge of stability.
## Parallel releases
A single project MAY consider several motions to release in parallel.
However each motion to release after the initial 0.1.0 MUST be based on a previous release that has already landed.
For example, runtime-spec maintainers may propose a v1.0.0-rc2 on the 1st of the month and a v0.9.1 bugfix on the 2nd of the month.
They may not propose a v1.0.0-rc3 until the v1.0.0-rc2 is accepted (on the 7th if the vote initiated on the 1st passes).
## Specifications
The OCI maintains three categories of projects: specifications, applications, and conformance-testing tools.
However, specification releases have special restrictions in the [OCI charter][charter]:
* They are the target of backwards compatibility (§7.g), and
* They are subject to the OFWa patent grant (§8.d and e).
To avoid unfortunate side effects (onerous backwards compatibity requirements or Member resignations), the following additional procedures apply to specification releases:
### Planning a release
Every OCI specification project SHOULD hold meetings that involve maintainers reviewing pull requests, debating outstanding issues, and planning releases.
This meeting MUST be advertised on the project README and MAY happen on a phone call, video conference, or on IRC.
Maintainers MUST send updates to the dev@opencontainers.org with results of these meetings.
Before the specification reaches v1.0.0, the meetings SHOULD be weekly.
Once a specification has reached v1.0.0, the maintainers may alter the cadence, but a meeting MUST be held within four weeks of the previous meeting.
The release plans, corresponding milestones and estimated due dates MUST be published on GitHub (e.g. https://github.com/opencontainers/runtime-spec/milestones).
GitHub milestones and issues are only used for community organization and all releases MUST follow the [project governance](GOVERNANCE.md) rules and procedures.
### Timelines
Specifications have a variety of different timelines in their lifecycle.
* Pre-v1.0.0 specifications SHOULD release on a monthly cadence to garner feedback.
* Major specification releases MUST release at least three release candidates spaced a minimum of one week apart.
This means a major release like a v1.0.0 or v2.0.0 release will take 1 month at minimum: one week for rc1, one week for rc2, one week for rc3, and one week for the major release itself.
Maintainers SHOULD strive to make zero breaking changes during this cycle of release candidates and SHOULD restart the three-candidate count when a breaking change is introduced.
For example if a breaking change is introduced in v1.0.0-rc2 then the series would end with v1.0.0-rc4 and v1.0.0.
* Minor and patch releases SHOULD be made on an as-needed basis.
[charter]: https://www.opencontainers.org/about/governance

View File

@@ -16,7 +16,7 @@ var (
// DevicesSetV1 and DevicesSetV2 are functions to set devices for
// cgroup v1 and v2, respectively. Unless
// [github.com/opencontainers/runc/libcontainer/cgroups/devices]
// [github.com/opencontainers/cgroups/devices]
// package is imported, it is set to nil, so cgroup managers can't
// manage devices.
DevicesSetV1 func(path string, r *Resources) error

View File

@@ -2,7 +2,7 @@ package cgroups
import (
systemdDbus "github.com/coreos/go-systemd/v22/dbus"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
devices "github.com/opencontainers/cgroups/devices/config"
)
type FreezerState string

View File

@@ -13,7 +13,7 @@ import (
"strconv"
"github.com/cilium/ebpf/asm"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
devices "github.com/opencontainers/cgroups/devices/config"
"golang.org/x/sys/unix"
)

View File

@@ -5,8 +5,8 @@
package devices
import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/systemd"
)
func init() {

View File

@@ -26,7 +26,7 @@ import (
"strconv"
"strings"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
devices "github.com/opencontainers/cgroups/devices/config"
)
// deviceMeta is a Rule without the Allow or Permissions fields, and no

View File

@@ -11,8 +11,8 @@ import (
"github.com/godbus/dbus/v5"
"github.com/sirupsen/logrus"
"github.com/opencontainers/runc/libcontainer/cgroups"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
"github.com/opencontainers/cgroups"
devices "github.com/opencontainers/cgroups/devices/config"
)
// systemdProperties takes the configured device rules and generates a

View File

@@ -6,8 +6,8 @@ import (
"reflect"
"github.com/moby/sys/userns"
"github.com/opencontainers/runc/libcontainer/cgroups"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
"github.com/opencontainers/cgroups"
devices "github.com/opencontainers/cgroups/devices/config"
)
var testingSkipFinalCheck bool

View File

@@ -6,8 +6,8 @@ import (
"github.com/moby/sys/userns"
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/cgroups"
devices "github.com/opencontainers/runc/libcontainer/cgroups/devices/config"
"github.com/opencontainers/cgroups"
devices "github.com/opencontainers/cgroups/devices/config"
)
func isRWM(perms devices.Permissions) bool {

View File

@@ -7,7 +7,7 @@ import (
"strconv"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
)
type BlkioGroup struct {

View File

@@ -7,8 +7,8 @@ import (
"os"
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/fscommon"
"golang.org/x/sys/unix"
)

View File

@@ -6,8 +6,8 @@ import (
"strconv"
"strings"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/fscommon"
)
const (

View File

@@ -10,8 +10,8 @@ import (
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/fscommon"
)
var (

View File

@@ -1,7 +1,7 @@
package fs
import (
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
)
type DevicesGroup struct{}

View File

@@ -3,7 +3,7 @@ package fs
import (
"fmt"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/cgroups/fscommon"
)
type parseError = fscommon.ParseError

View File

@@ -7,7 +7,7 @@ import (
"strings"
"time"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/cgroups"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)

View File

@@ -8,8 +8,8 @@ import (
"golang.org/x/sys/unix"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/fscommon"
)
var subsystems = []subsystem{

View File

@@ -5,8 +5,8 @@ import (
"os"
"strconv"
"github.com/opencontainers/runc/libcontainer/cgroups"
"github.com/opencontainers/runc/libcontainer/cgroups/fscommon"
"github.com/opencontainers/cgroups"
"github.com/opencontainers/cgroups/fscommon"
)
type HugetlbGroup struct{}

Some files were not shown because too many files have changed in this diff Show More