Skip to content

Commit 2bc77b8

Browse files
committed
Adds Windows resource limits support
This will allow running Windows Containers to have their resource limits updated through containerd. The CPU resource limits support has been added for Windows Server 20H2 and newer, on older versions hcsshim will raise an Unimplemented error. Signed-off-by: Claudiu Belu <cbelu@cloudbasesolutions.com>
1 parent 5b29542 commit 2bc77b8

10 files changed

+233
-159
lines changed

integration/container_update_resources_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ func TestUpdateContainerResources(t *testing.T) {
6767
t.Log("Update container memory limit after created")
6868
err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{
6969
MemoryLimitInBytes: 400 * 1024 * 1024,
70-
})
70+
}, nil)
7171
require.NoError(t, err)
7272

7373
t.Log("Check memory limit in container OCI spec")
@@ -90,7 +90,7 @@ func TestUpdateContainerResources(t *testing.T) {
9090
t.Log("Update container memory limit after started")
9191
err = runtimeService.UpdateContainerResources(cn, &runtime.LinuxContainerResources{
9292
MemoryLimitInBytes: 800 * 1024 * 1024,
93-
})
93+
}, nil)
9494
require.NoError(t, err)
9595

9696
t.Log("Check memory limit in container OCI spec")

integration/cri-api/pkg/apis/services.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ type ContainerManager interface {
6363
// ContainerStatus returns the status of the container.
6464
ContainerStatus(containerID string, opts ...grpc.CallOption) (*runtimeapi.ContainerStatus, error)
6565
// UpdateContainerResources updates the cgroup resources for the container.
66-
UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, opts ...grpc.CallOption) error
66+
UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, windowsResources *runtimeapi.WindowsContainerResources, opts ...grpc.CallOption) error
6767
// ExecSync executes a command in the container, and returns the stdout output.
6868
// If command exits with a non-zero exit code, an error is returned.
6969
ExecSync(containerID string, cmd []string, timeout time.Duration, opts ...grpc.CallOption) (stdout []byte, stderr []byte, err error)

integration/remote/remote_runtime.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -361,14 +361,15 @@ func (r *RuntimeService) ContainerStatus(containerID string, opts ...grpc.CallOp
361361
}
362362

363363
// UpdateContainerResources updates a containers resource config
364-
func (r *RuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, opts ...grpc.CallOption) error {
364+
func (r *RuntimeService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources, windowsResources *runtimeapi.WindowsContainerResources, opts ...grpc.CallOption) error {
365365
klog.V(10).Infof("[RuntimeService] UpdateContainerResources (containerID=%v, timeout=%v)", containerID, r.timeout)
366366
ctx, cancel := getContextWithTimeout(r.timeout)
367367
defer cancel()
368368

369369
_, err := r.runtimeClient.UpdateContainerResources(ctx, &runtimeapi.UpdateContainerResourcesRequest{
370370
ContainerId: containerID,
371371
Linux: resources,
372+
Windows: windowsResources,
372373
}, opts...)
373374
if err != nil {
374375
klog.Errorf("UpdateContainerResources %q from runtime service failed: %v", containerID, err)

integration/truncindex_test.go

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -111,12 +111,15 @@ func TestTruncIndex(t *testing.T) {
111111
require.NoError(t, err)
112112
assert.Equal(t, cn, cStats.Attributes.Id)
113113

114+
t.Logf("Update container memory limit after started")
114115
if goruntime.GOOS != "windows" {
115-
// TODO(claudiub): remove this when UpdateContainerResources works on running Windows Containers.
116-
// https://github.com/containerd/containerd/issues/5187
117-
t.Logf("Update container memory limit after started")
118116
err = runtimeService.UpdateContainerResources(cnTruncIndex, &runtimeapi.LinuxContainerResources{
119117
MemoryLimitInBytes: 50 * 1024 * 1024,
118+
}, nil)
119+
assert.NoError(t, err)
120+
} else {
121+
err = runtimeService.UpdateContainerResources(cnTruncIndex, nil, &runtimeapi.WindowsContainerResources{
122+
MemoryLimitInBytes: 50 * 1024 * 1024,
120123
})
121124
assert.NoError(t, err)
122125
}

pkg/cri/opts/spec_windows.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -174,9 +174,6 @@ func WithWindowsResources(resources *runtime.WindowsContainerResources) oci.Spec
174174
if s.Windows.Resources == nil {
175175
s.Windows.Resources = &runtimespec.WindowsResources{}
176176
}
177-
if s.Windows.Resources.CPU == nil {
178-
s.Windows.Resources.CPU = &runtimespec.WindowsCPUResources{}
179-
}
180177
if s.Windows.Resources.Memory == nil {
181178
s.Windows.Resources.Memory = &runtimespec.WindowsMemoryResources{}
182179
}
@@ -187,6 +184,9 @@ func WithWindowsResources(resources *runtime.WindowsContainerResources) oci.Spec
187184
max = uint16(resources.GetCpuMaximum())
188185
limit = uint64(resources.GetMemoryLimitInBytes())
189186
)
187+
if s.Windows.Resources.CPU == nil && (count != 0 || shares != 0 || max != 0) {
188+
s.Windows.Resources.CPU = &runtimespec.WindowsCPUResources{}
189+
}
190190
if count != 0 {
191191
s.Windows.Resources.CPU.Count = &count
192192
}
Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
//go:build !darwin && !freebsd
2+
// +build !darwin,!freebsd
3+
4+
/*
5+
Copyright The containerd Authors.
6+
7+
Licensed under the Apache License, Version 2.0 (the "License");
8+
you may not use this file except in compliance with the License.
9+
You may obtain a copy of the License at
10+
11+
http://www.apache.org/licenses/LICENSE-2.0
12+
13+
Unless required by applicable law or agreed to in writing, software
14+
distributed under the License is distributed on an "AS IS" BASIS,
15+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16+
See the License for the specific language governing permissions and
17+
limitations under the License.
18+
*/
19+
20+
package server
21+
22+
import (
23+
gocontext "context"
24+
25+
"github.com/containerd/containerd"
26+
"github.com/containerd/containerd/containers"
27+
"github.com/containerd/containerd/errdefs"
28+
"github.com/containerd/containerd/log"
29+
"github.com/containerd/typeurl"
30+
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
31+
"github.com/pkg/errors"
32+
"golang.org/x/net/context"
33+
runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
34+
35+
containerstore "github.com/containerd/containerd/pkg/cri/store/container"
36+
ctrdutil "github.com/containerd/containerd/pkg/cri/util"
37+
)
38+
39+
// UpdateContainerResources updates ContainerConfig of the container.
40+
func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) {
41+
container, err := c.containerStore.Get(r.GetContainerId())
42+
if err != nil {
43+
return nil, errors.Wrap(err, "failed to find container")
44+
}
45+
// Update resources in status update transaction, so that:
46+
// 1) There won't be race condition with container start.
47+
// 2) There won't be concurrent resource update to the same container.
48+
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
49+
return status, c.updateContainerResources(ctx, container, r, status)
50+
}); err != nil {
51+
return nil, errors.Wrap(err, "failed to update resources")
52+
}
53+
return &runtime.UpdateContainerResourcesResponse{}, nil
54+
}
55+
56+
func (c *criService) updateContainerResources(ctx context.Context,
57+
cntr containerstore.Container,
58+
r *runtime.UpdateContainerResourcesRequest,
59+
status containerstore.Status) (retErr error) {
60+
id := cntr.ID
61+
// Do not update the container when there is a removal in progress.
62+
if status.Removing {
63+
return errors.Errorf("container %q is in removing state", id)
64+
}
65+
66+
// Update container spec. If the container is not started yet, updating
67+
// spec makes sure that the resource limits are correct when start;
68+
// if the container is already started, updating spec is still required,
69+
// the spec will become our source of truth for resource limits.
70+
oldSpec, err := cntr.Container.Spec(ctx)
71+
if err != nil {
72+
return errors.Wrap(err, "failed to get container spec")
73+
}
74+
newSpec, err := updateOCIResource(ctx, oldSpec, r, c.config)
75+
if err != nil {
76+
return errors.Wrap(err, "failed to update resource in spec")
77+
}
78+
79+
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
80+
return err
81+
}
82+
defer func() {
83+
if retErr != nil {
84+
deferCtx, deferCancel := ctrdutil.DeferContext()
85+
defer deferCancel()
86+
// Reset spec on error.
87+
if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil {
88+
log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id)
89+
}
90+
}
91+
}()
92+
93+
// If container is not running, only update spec is enough, new resource
94+
// limit will be applied when container start.
95+
if status.State() != runtime.ContainerState_CONTAINER_RUNNING {
96+
return nil
97+
}
98+
99+
task, err := cntr.Container.Task(ctx, nil)
100+
if err != nil {
101+
if errdefs.IsNotFound(err) {
102+
// Task exited already.
103+
return nil
104+
}
105+
return errors.Wrap(err, "failed to get task")
106+
}
107+
// newSpec.Linux / newSpec.Windows won't be nil
108+
if err := task.Update(ctx, containerd.WithResources(getResources(newSpec))); err != nil {
109+
if errdefs.IsNotFound(err) {
110+
// Task exited already.
111+
return nil
112+
}
113+
return errors.Wrap(err, "failed to update resources")
114+
}
115+
return nil
116+
}
117+
118+
// updateContainerSpec updates container spec.
119+
func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error {
120+
any, err := typeurl.MarshalAny(spec)
121+
if err != nil {
122+
return errors.Wrapf(err, "failed to marshal spec %+v", spec)
123+
}
124+
if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error {
125+
c.Spec = any
126+
return nil
127+
}); err != nil {
128+
return errors.Wrap(err, "failed to update container spec")
129+
}
130+
return nil
131+
}

pkg/cri/server/container_update_resources_linux.go

Lines changed: 9 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -17,122 +17,20 @@
1717
package server
1818

1919
import (
20-
gocontext "context"
21-
22-
"github.com/containerd/containerd"
23-
"github.com/containerd/containerd/containers"
24-
"github.com/containerd/containerd/errdefs"
25-
"github.com/containerd/containerd/log"
26-
"github.com/containerd/typeurl"
2720
runtimespec "github.com/opencontainers/runtime-spec/specs-go"
2821
"github.com/pkg/errors"
2922
"golang.org/x/net/context"
3023
runtime "k8s.io/cri-api/pkg/apis/runtime/v1"
3124

25+
criconfig "github.com/containerd/containerd/pkg/cri/config"
3226
"github.com/containerd/containerd/pkg/cri/opts"
33-
containerstore "github.com/containerd/containerd/pkg/cri/store/container"
3427
"github.com/containerd/containerd/pkg/cri/util"
35-
ctrdutil "github.com/containerd/containerd/pkg/cri/util"
3628
)
3729

38-
// UpdateContainerResources updates ContainerConfig of the container.
39-
func (c *criService) UpdateContainerResources(ctx context.Context, r *runtime.UpdateContainerResourcesRequest) (retRes *runtime.UpdateContainerResourcesResponse, retErr error) {
40-
container, err := c.containerStore.Get(r.GetContainerId())
41-
if err != nil {
42-
return nil, errors.Wrap(err, "failed to find container")
43-
}
44-
// Update resources in status update transaction, so that:
45-
// 1) There won't be race condition with container start.
46-
// 2) There won't be concurrent resource update to the same container.
47-
if err := container.Status.Update(func(status containerstore.Status) (containerstore.Status, error) {
48-
return status, c.updateContainerResources(ctx, container, r.GetLinux(), status)
49-
}); err != nil {
50-
return nil, errors.Wrap(err, "failed to update resources")
51-
}
52-
return &runtime.UpdateContainerResourcesResponse{}, nil
53-
}
54-
55-
func (c *criService) updateContainerResources(ctx context.Context,
56-
cntr containerstore.Container,
57-
resources *runtime.LinuxContainerResources,
58-
status containerstore.Status) (retErr error) {
59-
id := cntr.ID
60-
// Do not update the container when there is a removal in progress.
61-
if status.Removing {
62-
return errors.Errorf("container %q is in removing state", id)
63-
}
64-
65-
// Update container spec. If the container is not started yet, updating
66-
// spec makes sure that the resource limits are correct when start;
67-
// if the container is already started, updating spec is still required,
68-
// the spec will become our source of truth for resource limits.
69-
oldSpec, err := cntr.Container.Spec(ctx)
70-
if err != nil {
71-
return errors.Wrap(err, "failed to get container spec")
72-
}
73-
newSpec, err := updateOCILinuxResource(ctx, oldSpec, resources,
74-
c.config.TolerateMissingHugetlbController, c.config.DisableHugetlbController)
75-
if err != nil {
76-
return errors.Wrap(err, "failed to update resource in spec")
77-
}
78-
79-
if err := updateContainerSpec(ctx, cntr.Container, newSpec); err != nil {
80-
return err
81-
}
82-
defer func() {
83-
if retErr != nil {
84-
deferCtx, deferCancel := ctrdutil.DeferContext()
85-
defer deferCancel()
86-
// Reset spec on error.
87-
if err := updateContainerSpec(deferCtx, cntr.Container, oldSpec); err != nil {
88-
log.G(ctx).WithError(err).Errorf("Failed to update spec %+v for container %q", oldSpec, id)
89-
}
90-
}
91-
}()
30+
// updateOCIResource updates container resource limit.
31+
func updateOCIResource(ctx context.Context, spec *runtimespec.Spec, r *runtime.UpdateContainerResourcesRequest,
32+
config criconfig.Config) (*runtimespec.Spec, error) {
9233

93-
// If container is not running, only update spec is enough, new resource
94-
// limit will be applied when container start.
95-
if status.State() != runtime.ContainerState_CONTAINER_RUNNING {
96-
return nil
97-
}
98-
99-
task, err := cntr.Container.Task(ctx, nil)
100-
if err != nil {
101-
if errdefs.IsNotFound(err) {
102-
// Task exited already.
103-
return nil
104-
}
105-
return errors.Wrap(err, "failed to get task")
106-
}
107-
// newSpec.Linux won't be nil
108-
if err := task.Update(ctx, containerd.WithResources(newSpec.Linux.Resources)); err != nil {
109-
if errdefs.IsNotFound(err) {
110-
// Task exited already.
111-
return nil
112-
}
113-
return errors.Wrap(err, "failed to update resources")
114-
}
115-
return nil
116-
}
117-
118-
// updateContainerSpec updates container spec.
119-
func updateContainerSpec(ctx context.Context, cntr containerd.Container, spec *runtimespec.Spec) error {
120-
any, err := typeurl.MarshalAny(spec)
121-
if err != nil {
122-
return errors.Wrapf(err, "failed to marshal spec %+v", spec)
123-
}
124-
if err := cntr.Update(ctx, func(ctx gocontext.Context, client *containerd.Client, c *containers.Container) error {
125-
c.Spec = any
126-
return nil
127-
}); err != nil {
128-
return errors.Wrap(err, "failed to update container spec")
129-
}
130-
return nil
131-
}
132-
133-
// updateOCILinuxResource updates container resource limit.
134-
func updateOCILinuxResource(ctx context.Context, spec *runtimespec.Spec, new *runtime.LinuxContainerResources,
135-
tolerateMissingHugetlbController, disableHugetlbController bool) (*runtimespec.Spec, error) {
13634
// Copy to make sure old spec is not changed.
13735
var cloned runtimespec.Spec
13836
if err := util.DeepCopy(&cloned, spec); err != nil {
@@ -141,8 +39,12 @@ func updateOCILinuxResource(ctx context.Context, spec *runtimespec.Spec, new *ru
14139
if cloned.Linux == nil {
14240
cloned.Linux = &runtimespec.Linux{}
14341
}
144-
if err := opts.WithResources(new, tolerateMissingHugetlbController, disableHugetlbController)(ctx, nil, nil, &cloned); err != nil {
42+
if err := opts.WithResources(r.GetLinux(), config.TolerateMissingHugetlbController, config.DisableHugetlbController)(ctx, nil, nil, &cloned); err != nil {
14543
return nil, errors.Wrap(err, "unable to set linux container resources")
14644
}
14745
return &cloned, nil
14846
}
47+
48+
func getResources(spec *runtimespec.Spec) interface{} {
49+
return spec.Linux.Resources
50+
}

0 commit comments

Comments
 (0)