Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add cpu_hard_limit and cpu_cfs_period options #149

Merged
merged 3 commits into from
Feb 15, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ FEATURES:
* config: Map host devices into container. [[GH-41](https://github.com/hashicorp/nomad-driver-podman/pull/41)]
* config: Stream logs via API, support journald log driver. [[GH-99](https://github.com/hashicorp/nomad-driver-podman/pull/99)]
* config: Privileged containers.
* config: Add `cpu_hard_limit` and `cpu_cfs_period` options [[GH-149](https://github.com/hashicorp/nomad-driver-podman/pull/149)]
* config: Allow mounting rootfs as read-only. [[GH-133](https://github.com/hashicorp/nomad-driver-podman/pull/133)]

BUG FIXES:
Expand Down
26 changes: 15 additions & 11 deletions config.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,19 @@ var (
"username": hclspec.NewAttr("username", "string", false),
"password": hclspec.NewAttr("password", "string", false),
})),
"command": hclspec.NewAttr("command", "string", false),
"cap_add": hclspec.NewAttr("cap_add", "list(string)", false),
"cap_drop": hclspec.NewAttr("cap_drop", "list(string)", false),
"devices": hclspec.NewAttr("devices", "list(string)", false),
"entrypoint": hclspec.NewAttr("entrypoint", "string", false),
"working_dir": hclspec.NewAttr("working_dir", "string", false),
"hostname": hclspec.NewAttr("hostname", "string", false),
"image": hclspec.NewAttr("image", "string", true),
"init": hclspec.NewAttr("init", "bool", false),
"init_path": hclspec.NewAttr("init_path", "string", false),
"labels": hclspec.NewAttr("labels", "list(map(string))", false),
"command": hclspec.NewAttr("command", "string", false),
"cap_add": hclspec.NewAttr("cap_add", "list(string)", false),
"cap_drop": hclspec.NewAttr("cap_drop", "list(string)", false),
"cpu_hard_limit": hclspec.NewAttr("cpu_hard_limit", "bool", false),
"cpu_cfs_period": hclspec.NewAttr("cpu_cfs_period", "number", false),
"devices": hclspec.NewAttr("devices", "list(string)", false),
"entrypoint": hclspec.NewAttr("entrypoint", "string", false),
"working_dir": hclspec.NewAttr("working_dir", "string", false),
"hostname": hclspec.NewAttr("hostname", "string", false),
"image": hclspec.NewAttr("image", "string", true),
"init": hclspec.NewAttr("init", "bool", false),
"init_path": hclspec.NewAttr("init_path", "string", false),
"labels": hclspec.NewAttr("labels", "list(map(string))", false),
"logging": hclspec.NewBlock("logging", false, hclspec.NewObject(map[string]*hclspec.Spec{
"driver": hclspec.NewAttr("driver", "string", false),
"options": hclspec.NewAttr("options", "list(map(string))", false),
Expand Down Expand Up @@ -131,9 +133,11 @@ type TaskConfig struct {
MemoryReservation string `codec:"memory_reservation"`
MemorySwap string `codec:"memory_swap"`
NetworkMode string `codec:"network_mode"`
CPUCFSPeriod uint64 `codec:"cpu_cfs_period"`
MemorySwappiness int64 `codec:"memory_swappiness"`
PortMap hclutils.MapStrInt `codec:"port_map"`
Sysctl hclutils.MapStrStr `codec:"sysctl"`
CPUHardLimit bool `codec:"cpu_hard_limit"`
Init bool `codec:"init"`
Tty bool `codec:"tty"`
ForcePull bool `codec:"force_pull"`
Expand Down
17 changes: 17 additions & 0 deletions config_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,3 +81,20 @@ func TestConfig_ForcePull(t *testing.T) {
parser.ParseHCL(t, validHCL, &tc)
require.EqualValues(t, true, tc.ForcePull)
}

func TestConfig_CPUHardLimit(t *testing.T) {
parser := hclutils.NewConfigParser(taskConfigSpec)

validHCL := `
config {
image = "docker://redis"
cpu_hard_limit = true
cpu_cfs_period = 200000
}
`

var tc *TaskConfig
parser.ParseHCL(t, validHCL, &tc)
require.EqualValues(t, true, tc.CPUHardLimit)
require.EqualValues(t, 200000, tc.CPUCFSPeriod)
}
34 changes: 34 additions & 0 deletions driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"fmt"
"net"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
Expand Down Expand Up @@ -419,6 +420,11 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive
CPU: &spec.LinuxCPU{},
}

err = setCPUResources(driverConfig, cfg.Resources.LinuxResources, createOpts.ContainerResourceConfig.ResourceLimits.CPU)
if err != nil {
return nil, nil, err
}

hard, soft, err := memoryLimits(cfg.Resources.NomadResources.Memory, driverConfig.MemoryReservation)
if err != nil {
return nil, nil, err
Expand Down Expand Up @@ -645,6 +651,34 @@ func memoryLimits(r drivers.MemoryResources, reservation string) (hard, soft *in
return nil, reserved, nil
}

func setCPUResources(cfg TaskConfig, systemResources *drivers.LinuxResources, taskCPU *spec.LinuxCPU) error {
if !cfg.CPUHardLimit {
return nil
}

period := cfg.CPUCFSPeriod
if period > 1000000 {
return fmt.Errorf("invalid value for cpu_cfs_period, %d is bigger than 1000000", period)
}
if period == 0 {
period = 100000 // matches cgroup default
}
if period < 1000 {
period = 1000
}

numCores := runtime.NumCPU()
quota := int64(systemResources.PercentTicks*float64(period)) * int64(numCores)
if quota < 1000 {
quota = 1000
}

taskCPU.Period = &period
taskCPU.Quota = &quota

return nil
}

func memoryInBytes(strmem string) (int64, error) {
l := len(strmem)
if l < 2 {
Expand Down
95 changes: 94 additions & 1 deletion driver_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"io/ioutil"
"os"
"os/exec"
"runtime"
"strings"

"path/filepath"
Expand All @@ -25,6 +26,7 @@ import (
"github.com/hashicorp/nomad/plugins/drivers"
dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
tu "github.com/hashicorp/nomad/testutil"
spec "github.com/opencontainers/runtime-spec/specs-go"
"github.com/stretchr/testify/require"
)

Expand All @@ -45,8 +47,11 @@ func createBasicResources() *drivers.Resources {
},
},
LinuxResources: &drivers.LinuxResources{
CPUShares: 512,
CPUPeriod: 100000,
CPUQuota: 100000,
CPUShares: 500,
MemoryLimitBytes: 256 * 1024 * 1024,
PercentTicks: float64(500) / float64(2000),
},
}
return &res
Expand Down Expand Up @@ -1829,6 +1834,94 @@ func createInspectImage(t *testing.T, image, reference string) {
require.Equal(t, idRef, idTest)
}

func Test_cpuLimits(t *testing.T) {
numCores := runtime.NumCPU()
cases := []struct {
name string
systemResources drivers.LinuxResources
hardLimit bool
period uint64
expectedQuota int64
expectedPeriod uint64
}{
{
name: "no hard limit",
systemResources: drivers.LinuxResources{
PercentTicks: 1.0,
CPUPeriod: 100000,
},
hardLimit: false,
expectedQuota: 100000,
expectedPeriod: 100000,
},
{
name: "hard limit max quota",
systemResources: drivers.LinuxResources{
PercentTicks: 1.0,
CPUPeriod: 100000,
},
hardLimit: true,
expectedQuota: 100000,
expectedPeriod: 100000,
},
{
name: "hard limit, half quota",
systemResources: drivers.LinuxResources{
PercentTicks: 0.5,
CPUPeriod: 100000,
},
hardLimit: true,
expectedQuota: 50000,
expectedPeriod: 100000,
},
{
name: "hard limit, custom period",
systemResources: drivers.LinuxResources{
PercentTicks: 1.0,
CPUPeriod: 100000,
},
hardLimit: true,
period: 20000,
expectedQuota: 20000,
expectedPeriod: 20000,
},
{
name: "hard limit, half quota, custom period",
systemResources: drivers.LinuxResources{
PercentTicks: 0.5,
CPUPeriod: 100000,
},
hardLimit: true,
period: 20000,
expectedQuota: 10000,
expectedPeriod: 20000,
},
}

for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
taskCPU := &spec.LinuxCPU{}
cfg := TaskConfig{
CPUHardLimit: c.hardLimit,
CPUCFSPeriod: c.period,
}
err := setCPUResources(cfg, &c.systemResources, taskCPU)
require.Nil(t, err)

if c.hardLimit {
require.NotNil(t, taskCPU.Quota)
require.Equal(t, c.expectedQuota*int64(numCores), *taskCPU.Quota)

require.NotNil(t, taskCPU.Period)
require.Equal(t, c.expectedPeriod, *taskCPU.Period)
} else {
require.Nil(t, taskCPU.Quota)
require.Nil(t, taskCPU.Period)
}
})
}
}

func Test_memoryLimits(t *testing.T) {
cases := []struct {
name string
Expand Down