From 1282575d0593c5a2ba8dad5107fb53c71215d5fa Mon Sep 17 00:00:00 2001 From: James Rasell Date: Mon, 13 May 2024 12:17:42 +0100 Subject: [PATCH] client: expose network namespace CNI config as task env vars. This change exposes CNI configuration details of a network namespace as environment variables. This allows a task to use these value to configure itself; a potential use case is to run a Raft application binding to IP and Port details configured using the bridge network mode. --- client/allocrunner/alloc_runner.go | 6 ++ client/allocrunner/taskrunner/task_runner.go | 19 ++++++ client/taskenv/env.go | 34 ++++++++++ client/taskenv/env_test.go | 62 +++++++++++++++++++ .../inputs/docker_bridged_basic.nomad | 29 +++++++++ e2e/networking/networking.go | 24 +++++++ 6 files changed, 174 insertions(+) create mode 100644 e2e/networking/inputs/docker_bridged_basic.nomad diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index c56d46e7881..1d70212e293 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -928,6 +928,12 @@ func (ar *allocRunner) SetNetworkStatus(s *structs.AllocNetworkStatus) { ans := s.Copy() ar.state.NetworkStatus = ans ar.hookResources.SetAllocNetworkStatus(ans) + + // Iterate each task runner and add the status information. This allows the + // task to build the environment variables with this information available. + for _, tr := range ar.tasks { + tr.SetNetworkStatus(ans) + } } func (ar *allocRunner) NetworkStatus() *structs.AllocNetworkStatus { diff --git a/client/allocrunner/taskrunner/task_runner.go b/client/allocrunner/taskrunner/task_runner.go index 53c7b196db5..43cff72a89f 100644 --- a/client/allocrunner/taskrunner/task_runner.go +++ b/client/allocrunner/taskrunner/task_runner.go @@ -258,6 +258,12 @@ type TaskRunner struct { networkIsolationLock sync.Mutex networkIsolationSpec *drivers.NetworkIsolationSpec + // allocNetworkStatus is provided from the allocrunner and allows us to + // include this information as env vars for the task. When manipulating + // this the allocNetworkStatusLock should be used. + allocNetworkStatusLock sync.Mutex + allocNetworkStatus *structs.AllocNetworkStatus + // serviceRegWrapper is the handler wrapper that is used by service hooks // to perform service and check registration and deregistration. serviceRegWrapper *wrapper.HandlerWrapper @@ -1456,6 +1462,19 @@ func (tr *TaskRunner) SetNetworkIsolation(n *drivers.NetworkIsolationSpec) { tr.networkIsolationLock.Unlock() } +// SetNetworkStatus is called from the allocrunner to propagate the +// network status of an allocation. This call occurs once the network hook has +// run and allows this information to be exported as env vars within the +// taskenv. +func (tr *TaskRunner) SetNetworkStatus(s *structs.AllocNetworkStatus) { + tr.allocNetworkStatusLock.Lock() + tr.allocNetworkStatus = s + tr.allocNetworkStatusLock.Unlock() + + // Update the taskenv builder. + tr.envBuilder = tr.envBuilder.SetNetworkStatus(s) +} + // triggerUpdate if there isn't already an update pending. Should be called // instead of calling updateHooks directly to serialize runs of update hooks. // TaskRunner state should be updated prior to triggering update hooks. diff --git a/client/taskenv/env.go b/client/taskenv/env.go index 845c7c52e09..d3c9eb38bce 100644 --- a/client/taskenv/env.go +++ b/client/taskenv/env.go @@ -120,6 +120,11 @@ const ( // UpstreamPrefix is the prefix for passing upstream IP and ports to the alloc UpstreamPrefix = "NOMAD_UPSTREAM_" + // AllocPrefix is a general purpose alloc prefix. It is currently used as + // the env var prefix used to export network namespace information + // including IP, Port, and interface. + AllocPrefix = "NOMAD_ALLOC_" + // VaultToken is the environment variable for passing the Vault token VaultToken = "VAULT_TOKEN" @@ -446,6 +451,9 @@ type Builder struct { // and affect network env vars. networks []*structs.NetworkResource + networkStatus *structs.AllocNetworkStatus + allocatedPorts structs.AllocatedPorts + // hookEnvs are env vars set by hooks and stored by hook name to // support adding/removing vars from multiple hooks (eg HookA adds A:1, // HookB adds A:2, HookA removes A, A should equal 2) @@ -565,6 +573,12 @@ func (b *Builder) buildEnv(allocDir, localDir, secretsDir string, // Build the Consul Connect upstream env vars buildUpstreamsEnv(envMap, b.upstreams) + // Build the network namespace information if we have the required detail + // available. + if b.networkStatus != nil && b.allocatedPorts != nil { + addNomadAllocNetwork(envMap, b.allocatedPorts, b.networkStatus) + } + // Build the Vault Token if b.injectVaultToken && b.vaultToken != "" { envMap[VaultToken] = b.vaultToken @@ -816,6 +830,7 @@ func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder { // Add any allocated host ports if alloc.AllocatedResources.Shared.Ports != nil { + b.allocatedPorts = alloc.AllocatedResources.Shared.Ports addPorts(b.otherPorts, alloc.AllocatedResources.Shared.Ports) } } @@ -960,6 +975,13 @@ func (b *Builder) setUpstreamsLocked(upstreams []structs.ConsulUpstream) *Builde return b } +func (b *Builder) SetNetworkStatus(netStatus *structs.AllocNetworkStatus) *Builder { + b.mu.Lock() + defer b.mu.Unlock() + b.networkStatus = netStatus + return b +} + // buildUpstreamsEnv builds NOMAD_UPSTREAM_{IP,PORT,ADDR}_{destination} vars func buildUpstreamsEnv(envMap map[string]string, upstreams []structs.ConsulUpstream) { // Proxy sidecars always bind to localhost @@ -978,6 +1000,18 @@ func buildUpstreamsEnv(envMap map[string]string, upstreams []structs.ConsulUpstr } } +// addNomadAllocNetwork builds NOMAD_ALLOC_{IP,INTERFACE,ADDR}_{port_label} +// vars. NOMAD_ALLOC_PORT_* is handled within addPorts and therefore omitted +// from this function. +func addNomadAllocNetwork(envMap map[string]string, p structs.AllocatedPorts, netStatus *structs.AllocNetworkStatus) { + for _, allocatedPort := range p { + portStr := strconv.Itoa(allocatedPort.To) + envMap[AllocPrefix+"INTERFACE_"+allocatedPort.Label] = netStatus.InterfaceName + envMap[AllocPrefix+"IP_"+allocatedPort.Label] = netStatus.Address + envMap[AllocPrefix+"ADDR_"+allocatedPort.Label] = net.JoinHostPort(netStatus.Address, portStr) + } +} + // SetPortMapEnvs sets the PortMap related environment variables on the map func SetPortMapEnvs(envs map[string]string, ports map[string]int) map[string]string { if envs == nil { diff --git a/client/taskenv/env_test.go b/client/taskenv/env_test.go index d5654c5b9bb..a2e6db61a50 100644 --- a/client/taskenv/env_test.go +++ b/client/taskenv/env_test.go @@ -339,6 +339,13 @@ func TestEnvironment_AllValues(t *testing.T) { &drivers.DriverNetwork{PortMap: map[string]int{"https": 443}}, ) + // Setting the network status ensures we trigger the addNomadAllocNetwork + // for the test. + env = env.SetNetworkStatus(&structs.AllocNetworkStatus{ + InterfaceName: "eth0", + Address: "172.26.64.19", + }) + // Add a host environment variable which matches a task variable. It means // we can test to ensure the allocation ID variable from the task overrides // that found on the host. The second entry tests to ensure other host env @@ -438,6 +445,9 @@ func TestEnvironment_AllValues(t *testing.T) { "NOMAD_PORT_admin": "9000", "NOMAD_ALLOC_PORT_admin": "9000", "NOMAD_HOST_PORT_admin": "32000", + "NOMAD_ALLOC_INTERFACE_admin": "eth0", + "NOMAD_ALLOC_IP_admin": "172.26.64.19", + "NOMAD_ALLOC_ADDR_admin": "172.26.64.19:9000", // Env vars from the host. "LC_CTYPE": "C.UTF-8", @@ -814,6 +824,58 @@ func TestEnvironment_Upstreams(t *testing.T) { require.Equal(t, "1234", env["bar"]) } +func Test_addNetNamespacePort(t *testing.T) { + testCases := []struct { + inputPorts structs.AllocatedPorts + inputNetwork *structs.AllocNetworkStatus + expectedOutput map[string]string + name string + }{ + { + inputPorts: structs.AllocatedPorts{ + {Label: "http", To: 80}, + }, + inputNetwork: &structs.AllocNetworkStatus{ + InterfaceName: "eth0", + Address: "172.26.64.11", + }, + expectedOutput: map[string]string{ + "NOMAD_ALLOC_INTERFACE_http": "eth0", + "NOMAD_ALLOC_IP_http": "172.26.64.11", + "NOMAD_ALLOC_ADDR_http": "172.26.64.11:80", + }, + name: "single input port", + }, + { + inputPorts: structs.AllocatedPorts{ + {Label: "http", To: 80}, + {Label: "https", To: 443}, + }, + inputNetwork: &structs.AllocNetworkStatus{ + InterfaceName: "eth0", + Address: "172.26.64.11", + }, + expectedOutput: map[string]string{ + "NOMAD_ALLOC_INTERFACE_http": "eth0", + "NOMAD_ALLOC_IP_http": "172.26.64.11", + "NOMAD_ALLOC_ADDR_http": "172.26.64.11:80", + "NOMAD_ALLOC_INTERFACE_https": "eth0", + "NOMAD_ALLOC_IP_https": "172.26.64.11", + "NOMAD_ALLOC_ADDR_https": "172.26.64.11:443", + }, + name: "multiple input ports", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + inputMap := make(map[string]string) + addNomadAllocNetwork(inputMap, tc.inputPorts, tc.inputNetwork) + assert.Equal(t, tc.expectedOutput, inputMap, tc.name) + }) + } +} + func TestEnvironment_SetPortMapEnvs(t *testing.T) { ci.Parallel(t) diff --git a/e2e/networking/inputs/docker_bridged_basic.nomad b/e2e/networking/inputs/docker_bridged_basic.nomad new file mode 100644 index 00000000000..5de9ca99394 --- /dev/null +++ b/e2e/networking/inputs/docker_bridged_basic.nomad @@ -0,0 +1,29 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +job "networking" { + datacenters = ["dc1", "dc2"] + + constraint { + attribute = "${attr.kernel.name}" + value = "linux" + } + + group "bridged" { + network { + mode = "bridge" + port "dummy" { + to = 13130 + } + } + + task "sleep" { + driver = "docker" + config { + image = "busybox:1" + command = "/bin/sleep" + args = ["300"] + } + } + } +} diff --git a/e2e/networking/networking.go b/e2e/networking/networking.go index dabe3543f57..99ebce6fc4c 100644 --- a/e2e/networking/networking.go +++ b/e2e/networking/networking.go @@ -95,3 +95,27 @@ func (tc *NetworkingE2ETest) TestNetworking_DockerBridgedHostnameInterpolation(f f.NoError(err, "failed to run hostname exec command") f.Contains(hostsOutput, "mylittlepony-0", "/etc/hosts doesn't contain hostname entry") } + +func (tc *NetworkingE2ETest) TestNetworking_DockerBridgedCNIEnvVars(f *framework.F) { + + jobID := "test-networking-" + uuid.Generate()[0:8] + f.NoError(e2eutil.Register(jobID, "networking/inputs/docker_bridged_basic.nomad")) + tc.jobIDs = append(tc.jobIDs, jobID) + f.NoError(e2eutil.WaitForAllocStatusExpected(jobID, "default", []string{"running"}), + "job should be running with 1 alloc") + + // Grab the allocations for the job. + allocs, _, err := tc.Nomad().Jobs().Allocations(jobID, false, nil) + f.NoError(err, "failed to get allocs for job") + f.Len(allocs, 1, "job should have one alloc") + + // Run the env command within the allocation. + envOutput, err := e2eutil.AllocExec(allocs[0].ID, "sleep", "env", "default", nil) + f.NoError(err, "failed to run env exec command") + + // Check all the network namespace env vars are present. + f.Contains(envOutput, "NOMAD_ALLOC_INTERFACE_dummy", "namespace interface env var not found") + f.Contains(envOutput, "NOMAD_ALLOC_IP_dummy", "namespace ip env var not found") + f.Contains(envOutput, "NOMAD_ALLOC_PORT_dummy", "namespace port env var not found") + f.Contains(envOutput, "NOMAD_ALLOC_ADDR_dummy", "namespace addr env var not found") +}