From 6dda08a4affa98b44c4fb9c949d78b63a6be306d Mon Sep 17 00:00:00 2001 From: Cheithanya Date: Tue, 15 Oct 2024 17:08:46 +0530 Subject: [PATCH] support for unorchestrated rootfull Podman using OCI hook Signed-off-by: Cheithanya --- Dockerfile.init | 33 ++- KubeArmor/build/entrypoint.sh | 6 + KubeArmor/common/common.go | 5 +- KubeArmor/core/dockerHandler.go | 3 +- KubeArmor/core/hook_handler.go | 250 ++++++++++++++++++ KubeArmor/core/kubeArmor.go | 2 + KubeArmor/core/podmanHandler.go | 162 ++++++++++++ KubeArmor/deployHook/main.go | 41 +++ KubeArmor/go.mod | 133 ++++++++-- KubeArmor/hook/main.go | 402 +++++++++++++++++++++++++++++ KubeArmor/hook/podman.go | 82 ++++++ KubeArmor/monitor/systemMonitor.go | 4 +- KubeArmor/types/types.go | 17 ++ docker-compose.yaml | 86 ++++++ 14 files changed, 1194 insertions(+), 32 deletions(-) create mode 100644 KubeArmor/build/entrypoint.sh create mode 100644 KubeArmor/core/hook_handler.go create mode 100644 KubeArmor/core/podmanHandler.go create mode 100644 KubeArmor/deployHook/main.go create mode 100644 KubeArmor/hook/main.go create mode 100644 KubeArmor/hook/podman.go create mode 100644 docker-compose.yaml diff --git a/Dockerfile.init b/Dockerfile.init index 290f81788f..edcad02e48 100644 --- a/Dockerfile.init +++ b/Dockerfile.init @@ -2,6 +2,29 @@ # Copyright 2021 Authors of KubeArmor ### Make compiler image + +FROM golang:1.22-alpine3.20 AS builder +RUN apk --no-cache update +RUN apk add --no-cache git clang llvm make gcc protobuf +RUN apk add --no-cache linux-headers pkgconfig +RUN apk add --no-cache gpgme-dev +RUN apk add --no-cache btrfs-progs-dev +ARG GOARCH +ARG GOOS + +WORKDIR /KubeArmor + +COPY . . +WORKDIR /KubeArmor/KubeArmor + +RUN go mod download + +WORKDIR /KubeArmor/KubeArmor/deployHook +RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GO111MODULE=on go build -o deployHook . + +WORKDIR /KubeArmor/KubeArmor/hook +RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GO111MODULE=on go build -tags 'containers_image_openpgp' -o hook . + FROM redhat/ubi9-minimal as kubearmor-init ARG VERSION=latest @@ -34,7 +57,15 @@ RUN groupadd --gid 1000 default \ COPY LICENSE /licenses/license.txt COPY ./KubeArmor/BPF /KubeArmor/BPF/ COPY ./KubeArmor/build/compile.sh /KubeArmor/compile.sh +COPY --from=builder /KubeArmor/KubeArmor/hook/hook /hook +COPY --from=builder /KubeArmor/KubeArmor/deployHook/deployHook /KubeArmor/deployHook + +# Copy the custom entrypoint script +COPY ./KubeArmor/build/entrypoint.sh /KubeArmor/entrypoint.sh +RUN chmod +x /KubeArmor/entrypoint.sh + RUN chown -R default:default /KubeArmor USER 1000 -ENTRYPOINT ["/KubeArmor/compile.sh"] + +ENTRYPOINT ["/KubeArmor/entrypoint.sh"] diff --git a/KubeArmor/build/entrypoint.sh b/KubeArmor/build/entrypoint.sh new file mode 100644 index 0000000000..73bbb8b82d --- /dev/null +++ b/KubeArmor/build/entrypoint.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +/KubeArmor/compile.sh + +/KubeArmor/deployHook diff --git a/KubeArmor/common/common.go b/KubeArmor/common/common.go index 284a549838..3af93c2d22 100644 --- a/KubeArmor/common/common.go +++ b/KubeArmor/common/common.go @@ -413,7 +413,7 @@ func IsK8sEnv() bool { } // ContainerRuntimeSocketKeys contains FIFO ordered keys of container runtimes -var ContainerRuntimeSocketKeys = []string{"docker", "containerd", "cri-o"} +var ContainerRuntimeSocketKeys = []string{"docker", "containerd", "cri-o","podman"} // ContainerRuntimeSocketMap Structure var ContainerRuntimeSocketMap = map[string][]string{ @@ -432,6 +432,9 @@ var ContainerRuntimeSocketMap = map[string][]string{ "/var/run/crio/crio.sock", "/run/crio/crio.sock", }, + "podman":{ + "/run/podman/podman.sock", + }, } // GetCRISocket Function diff --git a/KubeArmor/core/dockerHandler.go b/KubeArmor/core/dockerHandler.go index 87980df627..c93afe75ae 100644 --- a/KubeArmor/core/dockerHandler.go +++ b/KubeArmor/core/dockerHandler.go @@ -15,6 +15,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/client" "github.com/kubearmor/KubeArmor/KubeArmor/common" @@ -266,7 +267,7 @@ func (dm *KubeArmorDaemon) GetAlreadyDeployedDockerContainers() { } } - if containerList, err := Docker.DockerClient.ContainerList(context.Background(), types.ContainerListOptions{}); err == nil { + if containerList, err := Docker.DockerClient.ContainerList(context.Background(), container.ListOptions{}); err == nil { for _, dcontainer := range containerList { // get container information from docker client container, err := Docker.GetContainerInfo(dcontainer.ID, dm.OwnerInfo) diff --git a/KubeArmor/core/hook_handler.go b/KubeArmor/core/hook_handler.go new file mode 100644 index 0000000000..5dbb2a6d5f --- /dev/null +++ b/KubeArmor/core/hook_handler.go @@ -0,0 +1,250 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor + +package core + +import ( + "encoding/json" + "errors" + "io" + "log" + "net" + "os" + "path/filepath" + "sync/atomic" + + kl "github.com/kubearmor/KubeArmor/KubeArmor/common" + cfg "github.com/kubearmor/KubeArmor/KubeArmor/config" + "github.com/kubearmor/KubeArmor/KubeArmor/types" +) + +const kubearmorDir = "/var/run/kubearmor" + +// ContainerEngineHandler defines the interface that any container engine must implement if supports OCI hook +type ContainerEngineHandler interface { + HandleCreateContainer(container types.Container) + HandleDeleteContainer(containerID string) +} + +type PodmanHandler struct { + daemon *KubeArmorDaemon +} +func NewPodmanHandler(dm *KubeArmorDaemon) *PodmanHandler { + return &PodmanHandler{daemon: dm} +} +func (p *PodmanHandler) HandleCreateContainer(container types.Container) { + p.daemon.UpdatePodmanContainer(container.ContainerID, container, "create") +} +func (p *PodmanHandler) HandleDeleteContainer(containerID string) { + p.daemon.UpdatePodmanContainer(containerID, p.daemon.Containers[containerID], "destroy") +} + + +type CRIOHandler struct { + daemon *KubeArmorDaemon +} +func NewCRIOHandler(dm *KubeArmorDaemon) *CRIOHandler { + return &CRIOHandler{daemon: dm} +} +func (c *CRIOHandler) HandleCreateContainer(container types.Container) { + c.daemon.handleContainerCreate(container) +} +func (c *CRIOHandler) HandleDeleteContainer(containerID string) { + c.daemon.handleContainerDelete(containerID) +} + +// ListenToHook starts listening on a UNIX socket and waits for container hooks +// to pass new containers +func (dm *KubeArmorDaemon) ListenToHook() { + if err := os.MkdirAll(kubearmorDir, 0750); err != nil { + log.Fatal(err) + } + + listenPath := filepath.Join(kubearmorDir, "ka.sock") + err := os.Remove(listenPath) // in case kubearmor crashed and the socket wasn't removed (cleaning the socket file if got crashed) + if err != nil && !errors.Is(err, os.ErrNotExist) { + log.Fatal(err) + } + + socket, err := net.Listen("unix", listenPath) + if err != nil { + log.Fatal(err) + } + + // Set the permissions of ka.sock to 777 so that rootless podman with user level priviledges can also communicate with the socket + if err := os.Chmod(listenPath, 0777); err != nil { + log.Fatalf("failed to set permissions on %s: %v", listenPath, err) + } + + defer socket.Close() + defer os.Remove(listenPath) + ready := &atomic.Bool{} + + for { + conn, err := socket.Accept() + if err != nil { + log.Fatal(err) + } + + go dm.handleConn(conn, ready) + } + +} + +// handleConn gets container details from container hooks. +func (dm *KubeArmorDaemon) handleConn(conn net.Conn, ready *atomic.Bool) { + // We need to makes sure that no containers accepted until all containers created before KubeArmor + // are sent first. This is done mainly to avoid race conditions between hooks sending in + // data that some containers were deleted only for process responsible for sending previous containers + // to send that these containers are created. Which will leave KubeArmor in an incorrect state. + defer conn.Close() + buf := make([]byte, 4096) + + for { + n, err := conn.Read(buf) + if err == io.EOF { + return + } + if err != nil { + log.Fatal(err) + } + + data := types.HookRequest{} + + err = json.Unmarshal(buf[:n], &data) + if err != nil { + log.Fatal(err) + } + + if data.Detached { + // we want KubeArmor to start accepting containers after + // all previous container are set + defer ready.Store(true) + } else if !ready.Load() { + _, err = conn.Write([]byte("err")) + if err == io.EOF { + return + } else if err != nil { + log.Println(err) + return + } + continue + } + _, err = conn.Write([]byte("ok")) + if err == io.EOF { + return + } else if err != nil { + log.Println(err) + return + } + + containerLabels,_ := kl.GetLabelsFromString(data.Container.Labels) + // Determine which engine is being used (Podman or CRI-O for now support OCI hooks) + var handler ContainerEngineHandler + if containerLabels["containerType"] == "podman" { + handler = NewPodmanHandler(dm) + } else { + handler = NewCRIOHandler(dm) + } + + // Handle the container create or delete event + if data.Operation == types.HookContainerCreate { + handler.HandleCreateContainer(data.Container) + } else { + handler.HandleDeleteContainer(data.Container.ContainerID) + } + + } +} +func (dm *KubeArmorDaemon) handleContainerCreate(container types.Container) { + endpoint := types.EndPoint{} + + dm.Logger.Printf("added %s", container.ContainerID) + + dm.ContainersLock.Lock() + defer dm.ContainersLock.Unlock() + if _, ok := dm.Containers[container.ContainerID]; !ok { + dm.Containers[container.ContainerID] = container + } else if dm.Containers[container.ContainerID].PidNS == 0 && dm.Containers[container.ContainerID].MntNS == 0 { + c := dm.Containers[container.ContainerID] + c.MntNS = container.MntNS + c.PidNS = container.PidNS + c.AppArmorProfile = container.AppArmorProfile + dm.Containers[c.ContainerID] = c + + dm.EndPointsLock.Lock() + for idx, endPoint := range dm.EndPoints { + if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) { + + // update apparmor profiles + if !kl.ContainsElement(endPoint.AppArmorProfiles, container.AppArmorProfile) { + dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles, container.AppArmorProfile) + } + + if container.Privileged && dm.EndPoints[idx].PrivilegedContainers != nil { + dm.EndPoints[idx].PrivilegedContainers[container.ContainerName] = struct{}{} + } + + endpoint = dm.EndPoints[idx] + + break + } + } + dm.EndPointsLock.Unlock() + } + + if len(dm.OwnerInfo) > 0 { + container.Owner = dm.OwnerInfo[container.EndPointName] + } + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + dm.SystemMonitor.AddContainerIDToNsMap(container.ContainerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.RegisterContainer(container.ContainerID, container.PidNS, container.MntNS) + + if len(endpoint.SecurityPolicies) > 0 { // struct can be empty or no policies registered for the endpoint yet + dm.Logger.UpdateSecurityPolicies("ADDED", endpoint) + if dm.RuntimeEnforcer != nil && endpoint.PolicyEnabled == types.KubeArmorPolicyEnabled { + // enforce security policies + dm.RuntimeEnforcer.UpdateSecurityPolicies(endpoint) + } + } + } +} +func (dm *KubeArmorDaemon) handleContainerDelete(containerID string) { + dm.ContainersLock.Lock() + container, ok := dm.Containers[containerID] + dm.Logger.Printf("deleted %s", containerID) + if !ok { + dm.ContainersLock.Unlock() + return + } + delete(dm.Containers, containerID) + dm.ContainersLock.Unlock() + + dm.EndPointsLock.Lock() + for idx, endPoint := range dm.EndPoints { + if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) { + + // update apparmor profiles + for idxA, profile := range endPoint.AppArmorProfiles { + if profile == container.AppArmorProfile { + dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles[:idxA], dm.EndPoints[idx].AppArmorProfiles[idxA+1:]...) + break + } + } + + break + } + } + dm.EndPointsLock.Unlock() + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + // update NsMap + dm.SystemMonitor.DeleteContainerIDFromNsMap(containerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.UnregisterContainer(containerID) + } + +} + + + diff --git a/KubeArmor/core/kubeArmor.go b/KubeArmor/core/kubeArmor.go index 9951150ba4..f891515ced 100644 --- a/KubeArmor/core/kubeArmor.go +++ b/KubeArmor/core/kubeArmor.go @@ -591,6 +591,8 @@ func KubeArmor() { } else if strings.Contains(cfg.GlobalCfg.CRISocket, "cri-o") { // monitor crio events go dm.MonitorCrioEvents() + } else if strings.Contains(cfg.GlobalCfg.CRISocket, "podman") { + go dm.ListenToHook() } else { dm.Logger.Warnf("Failed to monitor containers: %s is not a supported CRI socket.", cfg.GlobalCfg.CRISocket) enableContainerPolicy = false diff --git a/KubeArmor/core/podmanHandler.go b/KubeArmor/core/podmanHandler.go new file mode 100644 index 0000000000..f21fa5e11b --- /dev/null +++ b/KubeArmor/core/podmanHandler.go @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 Authors of KubeArmor + +// Package core is responsible for initiating and maintaining interactions between external entities like K8s,CRIs and internal KubeArmor entities like eBPF Monitor and Log Feeders +package core + +import ( + "github.com/kubearmor/KubeArmor/KubeArmor/common" + kl "github.com/kubearmor/KubeArmor/KubeArmor/common" + cfg "github.com/kubearmor/KubeArmor/KubeArmor/config" + "github.com/kubearmor/KubeArmor/KubeArmor/state" + tp "github.com/kubearmor/KubeArmor/KubeArmor/types" +) + +// UpdatePodmanContainer Function +func (dm *KubeArmorDaemon) UpdatePodmanContainer(containerID string, container tp.Container, action string) bool { + + if action == "create" { + + if container.ContainerID == "" { + return false + } + + endPoint := tp.EndPoint{} + + dm.ContainersLock.Lock() + if _, ok := dm.Containers[container.ContainerID]; !ok { + dm.Containers[container.ContainerID] = container + dm.ContainersLock.Unlock() + + containerLabels, containerIdentities := common.GetLabelsFromString(container.Labels) + dm.EndPointsLock.Lock() + + endPoint.EndPointName = container.ContainerName + endPoint.ContainerName = container.ContainerName + endPoint.NamespaceName = container.NamespaceName + endPoint.Containers = []string{container.ContainerID} + endPoint.Labels = containerLabels + endPoint.Identities = containerIdentities + endPoint.PolicyEnabled = tp.KubeArmorPolicyEnabled + endPoint.ProcessVisibilityEnabled = true + endPoint.FileVisibilityEnabled = true + endPoint.NetworkVisibilityEnabled = true + endPoint.CapabilitiesVisibilityEnabled = true + + endPoint.AppArmorProfiles = []string{"kubearmor_" + container.ContainerName} + + globalDefaultPosture := tp.DefaultPosture{ + FileAction: cfg.GlobalCfg.DefaultFilePosture, + NetworkAction: cfg.GlobalCfg.DefaultNetworkPosture, + CapabilitiesAction: cfg.GlobalCfg.DefaultCapabilitiesPosture, + } + endPoint.DefaultPosture = globalDefaultPosture + + dm.SecurityPoliciesLock.RLock() + for _, secPol := range dm.SecurityPolicies { + if kl.MatchIdentities(secPol.Spec.Selector.Identities, endPoint.Identities) { + endPoint.SecurityPolicies = append(endPoint.SecurityPolicies, secPol) + } + } + dm.SecurityPoliciesLock.RUnlock() + + dm.EndPoints = append(dm.EndPoints, endPoint) + dm.EndPointsLock.Unlock() + + } else { + dm.ContainersLock.Unlock() + return false + } + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + // for throttling + dm.SystemMonitor.Logger.ContainerNsKey[containerID] = common.OuterKey{ + MntNs: container.MntNS, + PidNs: container.PidNS, + } + + // update NsMap + dm.SystemMonitor.AddContainerIDToNsMap(containerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.RegisterContainer(containerID, container.PidNS, container.MntNS) + + + if len(endPoint.SecurityPolicies) > 0 { // struct can be empty or no policies registered for the endPoint yet + dm.Logger.UpdateSecurityPolicies("ADDED", endPoint) + if dm.RuntimeEnforcer != nil && endPoint.PolicyEnabled == tp.KubeArmorPolicyEnabled { + dm.Logger.Printf("Enforcing security policies for container ID %s",containerID) + // enforce security policies + dm.RuntimeEnforcer.UpdateSecurityPolicies(endPoint) + } + } + } + + if cfg.GlobalCfg.StateAgent { + container.Status = "running" + go dm.StateAgent.PushContainerEvent(container, state.EventAdded) + } + + dm.Logger.Printf("Detected a container (added/%.12s/pidns=%d/mntns=%d)", containerID, container.PidNS, container.MntNS) + + } else if action == "destroy" { + dm.ContainersLock.Lock() + container, ok := dm.Containers[containerID] + if !ok { + dm.ContainersLock.Unlock() + return false + } + dm.EndPointsLock.Lock() + dm.MatchandRemoveContainerFromEndpoint(containerID) + dm.EndPointsLock.Unlock() + delete(dm.Containers, containerID) + dm.ContainersLock.Unlock() + + dm.EndPointsLock.Lock() + // remove apparmor profile for that endpoint + for idx, endPoint := range dm.EndPoints { + if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) { + + // update apparmor profiles + for idxA, profile := range endPoint.AppArmorProfiles { + if profile == container.AppArmorProfile { + dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles[:idxA], dm.EndPoints[idx].AppArmorProfiles[idxA+1:]...) + break + } + } + + break + } + } + dm.EndPointsLock.Unlock() + // delete endpoint if no security rules and containers + idx := 0 + endpointsLength := len(dm.EndPoints) + for idx < endpointsLength { + endpoint := dm.EndPoints[idx] + if container.NamespaceName == endpoint.NamespaceName && container.ContainerName == endpoint.EndPointName && + len(endpoint.SecurityPolicies) == 0 && len(endpoint.Containers) == 0 { + dm.EndPoints = append(dm.EndPoints[:idx], dm.EndPoints[idx+1:]...) + endpointsLength-- + idx-- + } + idx++ + } + + if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy { + outkey := dm.SystemMonitor.Logger.ContainerNsKey[containerID] + dm.Logger.DeleteAlertMapKey(outkey) + delete(dm.SystemMonitor.Logger.ContainerNsKey, containerID) + // update NsMap + dm.SystemMonitor.DeleteContainerIDFromNsMap(containerID, container.NamespaceName, container.PidNS, container.MntNS) + dm.RuntimeEnforcer.UnregisterContainer(containerID) + } + + if cfg.GlobalCfg.StateAgent { + container.Status = "terminated" + go dm.StateAgent.PushContainerEvent(container, state.EventDeleted) + } + + dm.Logger.Printf("Detected a container (removed/%.12s/pidns=%d/mntns=%d)", containerID, container.PidNS, container.MntNS) + } + + return true +} diff --git a/KubeArmor/deployHook/main.go b/KubeArmor/deployHook/main.go new file mode 100644 index 0000000000..ca2786def1 --- /dev/null +++ b/KubeArmor/deployHook/main.go @@ -0,0 +1,41 @@ +package main + +import ( + "io" + "log" + "os" + "path/filepath" +) + +func applyPodmanHook() error { + kaDir := "/usr/share/kubearmor" + if err := os.MkdirAll(kaDir, 0750); err != nil { + return err + } + + dstBin, err := os.OpenFile(filepath.Join(kaDir, "hook"), os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0755) + if err != nil { + return err + } + defer dstBin.Close() + + srcBin, err := os.Open("/hook") + if err != nil { + return err + } + defer srcBin.Close() + + if _, err := io.Copy(dstBin, srcBin); err != nil { + return err + } + + return nil +} +func main(){ + err := applyPodmanHook() + if err != nil { + log.Printf("Podman hook injection failed: %v", err) + } else { + log.Printf("Podman OCI hook injected successfully") + } +} diff --git a/KubeArmor/go.mod b/KubeArmor/go.mod index 583eaa8103..a43f183033 100644 --- a/KubeArmor/go.mod +++ b/KubeArmor/go.mod @@ -27,110 +27,189 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 github.com/cilium/cilium v1.14.12 github.com/cilium/ebpf v0.12.3 - github.com/containerd/containerd v1.7.13 + github.com/containerd/containerd v1.7.18 github.com/containerd/typeurl/v2 v2.1.1 - github.com/docker/docker v25.0.5+incompatible + github.com/containers/common v0.60.4 + github.com/containers/podman/v5 v5.2.5 + github.com/docker/docker v27.1.1+incompatible github.com/golang/protobuf v1.5.4 github.com/google/uuid v1.6.0 github.com/kubearmor/KubeArmor/pkg/KubeArmorController v0.0.0-20240110164432-c2c1b121cd94 github.com/kubearmor/KubeArmor/protobuf v0.0.0-20240110164432-c2c1b121cd94 github.com/opencontainers/runtime-spec v1.2.0 github.com/spf13/viper v1.18.2 - go.uber.org/zap v1.26.0 - golang.org/x/exp v0.0.0-20240110193028-0dcbfd608b1e - golang.org/x/sys v0.22.0 + go.uber.org/zap v1.27.0 + golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 + golang.org/x/sys v0.24.0 google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 k8s.io/api v0.29.0 k8s.io/apimachinery v0.29.0 k8s.io/client-go v0.29.0 k8s.io/cri-api v0.29.7 - k8s.io/klog/v2 v2.120.0 + k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20240310230437-4693a0247e57 sigs.k8s.io/controller-runtime v0.15.3 ) require ( + dario.cat/mergo v1.0.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/Microsoft/hcsshim v0.12.5 // indirect + github.com/VividCortex/ewma v1.2.0 // indirect + github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/ttrpc v1.2.3 // indirect + github.com/chzyer/readline v1.5.1 // indirect + github.com/containerd/cgroups/v3 v3.0.3 // indirect + github.com/containerd/errdefs v0.1.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect + github.com/containerd/ttrpc v1.2.4 // indirect + github.com/containers/buildah v1.37.5 // indirect + github.com/containers/image/v5 v5.32.2 // indirect + github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect + github.com/containers/ocicrypt v1.2.0 // indirect + github.com/containers/psgo v1.9.0 // indirect + github.com/containers/storage v1.55.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.1-0.20231103132048-7d375ecc2b09 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect + github.com/cyphar/filepath-securejoin v0.3.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/distribution/reference v0.5.0 // indirect + github.com/disiqueira/gotree/v3 v3.0.2 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/emicklei/go-restful/v3 v3.11.2 // indirect github.com/evanphx/json-patch/v5 v5.7.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-jose/go-jose/v4 v4.0.2 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect - github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/swag v0.22.7 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect + github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-containerregistry v0.20.0 // indirect + github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/schema v1.4.1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/imdario/mergo v0.3.16 // indirect + github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/manifoldco/promptui v0.9.0 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mattn/go-sqlite3 v1.14.22 // indirect + github.com/miekg/pkcs11 v1.1.1 // indirect + github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/sys/mountinfo v0.7.2 // indirect + github.com/moby/sys/user v0.3.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nxadm/tail v1.4.11 // indirect + github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect - github.com/pelletier/go-toml/v2 v2.1.1 // indirect + github.com/opencontainers/runc v1.1.13 // indirect + github.com/opencontainers/runtime-tools v0.9.1-0.20230914150019-408c51e934dc // indirect + github.com/opencontainers/selinux v1.11.0 // indirect + github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.18.0 // indirect - github.com/prometheus/client_model v0.5.0 // indirect - github.com/prometheus/common v0.45.0 // indirect - github.com/prometheus/procfs v0.12.0 // indirect + github.com/pkg/sftp v1.13.6 // indirect + github.com/proglottis/gpgme v0.1.3 // indirect + github.com/prometheus/client_golang v1.19.0 // indirect + github.com/prometheus/client_model v0.6.0 // indirect + github.com/prometheus/common v0.51.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rivo/uniseg v0.4.7 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect + github.com/sigstore/fulcio v1.4.5 // indirect + github.com/sigstore/rekor v1.3.6 // indirect + github.com/sigstore/sigstore v1.8.4 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect github.com/subosito/gotenv v1.6.0 // indirect + github.com/sylabs/sif/v2 v2.18.0 // indirect + github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect + github.com/tchap/go-patricia/v2 v2.3.1 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/ulikunitz/xz v0.5.12 // indirect + github.com/vbatts/tar-split v0.11.5 // indirect + github.com/vbauerster/mpb/v8 v8.7.5 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect + go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.50.0 // indirect go.opentelemetry.io/otel v1.25.0 // indirect go.opentelemetry.io/otel/metric v1.25.0 // indirect go.opentelemetry.io/otel/trace v1.25.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.25.0 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sync v0.7.0 // indirect - golang.org/x/term v0.22.0 // indirect - golang.org/x/text v0.16.0 // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/oauth2 v0.22.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/term v0.23.0 // indirect + golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240725223205-93522f1f2a9f // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - gotest.tools/v3 v3.4.0 // indirect k8s.io/apiextensions-apiserver v0.29.0 // indirect k8s.io/component-base v0.29.0 // indirect k8s.io/kube-openapi v0.0.0-20240105020646-a37d4de58910 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect + tags.cncf.io/container-device-interface v0.8.0 // indirect ) diff --git a/KubeArmor/hook/main.go b/KubeArmor/hook/main.go new file mode 100644 index 0000000000..1a85195090 --- /dev/null +++ b/KubeArmor/hook/main.go @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor + +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "bufio" + "log" + "net" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/kubearmor/KubeArmor/KubeArmor/types" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + LOGPATH = "/var/log/ka-hook.log" + rootfulContainersPath = "/var/lib/containers/storage/overlay-containers" + containersFileName = "containers.json" + volatileContainersFileName = "volatile-containers.json" +) + +var ( + kubeArmorSocket string + runtimeSocket string + detached bool +) + +type ContainerMetadata struct { + ID string `json:"id"` + Names []string `json:"names"` + Image string `json:"image"` + Metadata string `json:"metadata"` +} + +type MetadataDetails struct { + ImageName string `json:"image-name"` + Name string `json:"name"` +} + +func main() { + flag.StringVar(&kubeArmorSocket, "kubearmor-socket", "/var/run/kubearmor/ka.sock", "KubeArmor socket") + flag.StringVar(&runtimeSocket, "runtime-socket", "", "container runtime socket") + flag.BoolVar(&detached, "detached", false, "run detached") + flag.Parse() + + if runtimeSocket == "" { + log.Println("runtime socket must be set") + os.Exit(1) + } + if !strings.HasPrefix(runtimeSocket, "unix://") { + runtimeSocket = "unix://" + runtimeSocket + } + if detached { + if err := runDetached(); err != nil { + log.Println(err) + os.Exit(1) + } + os.Exit(0) + } + input, err := io.ReadAll(os.Stdin) + if err != nil { + log.Println(err) + os.Exit(1) + } + state := specs.State{} + err = json.Unmarshal(input, &state) + if err != nil { + log.Println(err) + os.Exit(1) + } + + if err := run(state); err != nil { + log.Println(err) + os.Exit(1) + } + +} + +func runDetached() error { + // we need to make sure the process exits at some point + time.AfterFunc(1*time.Minute, func() { + log.Println("failed to get containers, process timed out") + os.Exit(1) + }) + conn := waitOnKubeArmor() + defer conn.Close() + + handler, err := newPodmanHandler(runtimeSocket) + if err != nil { + return err + } + containers, err := handler.listContainers() + if err != nil { + return err + } + + for _, container := range containers { + data := types.HookRequest{ + Operation: types.HookContainerCreate, + Detached: true, + Container: container, + } + + dataJSON, err := json.Marshal(data) + if err != nil { + return err + } + + _, err = conn.Write(dataJSON) + if err != nil { + return err + } + ack := make([]byte, 1024) + _, err = conn.Read(ack) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + } + + return nil +} + +func run(state specs.State) error { + var container types.Container + operation := types.HookContainerCreate + // we try to connect to runtime here to make sure the socket is correct + // before spawning a detached process + _, err := newPodmanHandler(runtimeSocket) + if err != nil { + return err + } + + container.ContainerID = state.ID + if state.Status == specs.StateStopped { + operation = types.HookContainerDelete + return sendContainer(container, operation) + } + + var appArmorProfile string + var isKubeArmor bool + specBytes, err := os.ReadFile(filepath.Join(state.Bundle, "config.json")) + if err != nil { + return err + } else { + var spec specs.Spec + err = json.Unmarshal(specBytes, &spec) + if err != nil { + return err + } + appArmorProfile = spec.Process.ApparmorProfile // check if Process is nil?? + isKubeArmor = spec.Process.Args[0] == "/KubeArmor/kubearmor" + } + if isKubeArmor { + err = startDetachedProcess() + if err != nil { + return err + } + // we still continue to try to send container details after starting the detached process + // to make sure if it was a false positive (container trying to act as KubeArmor), we still + // monitor it. + } + passwdFile, err := os.Open("/etc/passwd") + if err != nil { + log.Fatalf("Failed to open /etc/passwd: %v", err) + } + defer passwdFile.Close() + + scanner := bufio.NewScanner(passwdFile) + var homeDir string + + // Iterate through /etc/passwd to find the user with the desired directory + for scanner.Scan() { + fields := strings.Split(scanner.Text(), ":") + if len(fields) < 6 { + continue // skip malformed lines + } + + userHomeDir := fields[5] + potentialPath := filepath.Join(userHomeDir, ".local/share/containers/storage/overlay-containers/containers.json") + + if _, err := os.Stat(potentialPath); err == nil { + homeDir = userHomeDir + break + } + } + + if homeDir == "" { + log.Fatalf("No matching user found with the overlay-containers path.") + } + + rootlessContainersPath := filepath.Join(homeDir, ".local/share/containers/storage/overlay-containers") + + // Rootful Podman metadata paths + metadataPath1 := filepath.Join(rootfulContainersPath, containersFileName) + metadataPath2 := filepath.Join(rootfulContainersPath, volatileContainersFileName) + + // Rootless Podman metadata paths + metadataPath3 := filepath.Join(rootlessContainersPath, containersFileName) + metadataPath4 := filepath.Join(rootlessContainersPath, volatileContainersFileName) + + + var paths []string + + isRootFullPodman := runtimeSocket == "unix:///run/podman/podman.sock" + + if isRootFullPodman { + paths = []string{metadataPath1, metadataPath2} + } else { + paths = []string{metadataPath3, metadataPath4} + } + + var details MetadataDetails + found := false + for _, path := range paths { + details, err = fetchContainerDetails(state.ID, path) + if err == nil { + found = true + break + } else { + fmt.Errorf("Error: %v\n", err) + } + } + + if !found { + return fmt.Errorf("container with ID %s not found in any path", state.ID) + } + + labels := []string{} + + for k, v := range state.Annotations { + labels = append(labels, k+"="+v) + } + //add labels for policy matching + labels = append(labels, "namespaceName="+"container_namespace") + labels = append(labels, "containerType="+"podman") + labels = append(labels, "kubearmor.io/container.name="+details.Name) + + nodename, nodeErr := os.Hostname() + if nodeErr != nil { + nodename = "" + } + + container.Labels = strings.Join(labels, ",") + + status := "stopped" + if state.Status == specs.StateRunning { + status = "running" + } + container = types.Container{ + ContainerID: state.ID, + ContainerName: details.Name, + ContainerImage: details.ImageName, + AppArmorProfile: appArmorProfile, + NamespaceName: "container_namespace", + EndPointName: details.Name, + NodeName: nodename, + Status: status, + Labels: strings.Join(labels, ","), + } + container.PidNS, container.MntNS = getNS(state.Pid) + + return sendContainer(container, operation) +} + +func fetchContainerDetails(containerID, metadataPath string) (MetadataDetails, error) { + + data, err := ioutil.ReadFile(metadataPath) + if err != nil { + return MetadataDetails{}, fmt.Errorf("unable to read metadata file: %w", err) + } + + var containers []ContainerMetadata + err = json.Unmarshal(data, &containers) + if err != nil { + return MetadataDetails{}, fmt.Errorf("unable to parse metadata file: %w", err) + } + + for _, container := range containers { + + + if container.ID == containerID { + var details MetadataDetails + err := json.Unmarshal([]byte(container.Metadata), &details) + if err != nil { + return MetadataDetails{}, fmt.Errorf("unable to parse container metadata: %w", err) + } + return details, nil + } + } + + return MetadataDetails{}, fmt.Errorf("container with ID %s not found", containerID) +} + +func getNS(pid int) (uint32, uint32) { + var pidNS uint32 + var mntNS uint32 + + nsPath := fmt.Sprintf("/proc/%d/ns", pid) + + pidLink, err := os.Readlink(filepath.Join(nsPath, "pid")) + if err == nil { + if _, err := fmt.Sscanf(pidLink, "pid:[%d]\n", &pidNS); err != nil { + log.Println(err) + } + } + + mntLink, err := os.Readlink(filepath.Join(nsPath, "mnt")) + if err == nil { + if _, err := fmt.Sscanf(mntLink, "mnt:[%d]\n", &mntNS); err != nil { + log.Println(err) + } + } + return pidNS, mntNS +} + +func sendContainer(container types.Container, operation types.HookOperation) error { + conn, err := net.Dial("unix", kubeArmorSocket) + if err != nil { + // not returning error here because this can happen in multiple cases + // that we don't want container creation to be blocked on: + // - hook was created before KubeArmor was running so the socket doesn't exist yet + // - KubeArmor crashed so there is nothing listening on socket + return nil + } + + defer conn.Close() + + data := types.HookRequest{ + Operation: operation, + Detached: false, + Container: container, + } + + dataJSON, err := json.Marshal(data) + if err != nil { + return err + } + + + for { + _, err = conn.Write(dataJSON) + if err != nil { + return err + } + ack := make([]byte, 1024) + n, err := conn.Read(ack) + if err == io.EOF { + return nil + } else if err != nil { + return err + } + response := ack[:n] + if bytes.Equal(response, []byte("ok")) { + return nil + } else { + time.Sleep(50 * time.Millisecond) // try again in 50 ms + continue + } + + } +} + +func waitOnKubeArmor() net.Conn { + for { + conn, err := net.Dial("unix", kubeArmorSocket) + if err == nil { + return conn + } + time.Sleep(500 * time.Millisecond) + } +} + +func startDetachedProcess() error { + args := os.Args[1:] + args = append(args, "--detached") + cmd := exec.Command(os.Args[0], args...) + logFile, err := os.OpenFile("/var/log/ka-hook.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666) + if err != nil { + return err + } + cmd.Stdout = logFile + cmd.Stderr = logFile + err = cmd.Start() + if err != nil { + return err + } + return cmd.Process.Release() +} diff --git a/KubeArmor/hook/podman.go b/KubeArmor/hook/podman.go new file mode 100644 index 0000000000..3b30427bb8 --- /dev/null +++ b/KubeArmor/hook/podman.go @@ -0,0 +1,82 @@ +// +build linux +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2022 Authors of KubeArmor + + +package main + + +import ( + "context" + "strings" + + + "github.com/containers/podman/v5/pkg/bindings" + "github.com/containers/podman/v5/pkg/bindings/containers" + "github.com/containers/podman/v5/pkg/domain/entities" + "github.com/kubearmor/KubeArmor/KubeArmor/types" +) + + +type podmanHandler struct { + conn context.Context +} + + +func newPodmanHandler(socket string) (*podmanHandler, error) { + conn, err := bindings.NewConnection(context.Background(), socket) + if err != nil { + return nil, err + } + return &podmanHandler{conn: conn}, nil +} + + +func (h *podmanHandler) listContainers() ([]types.Container, error) { + + listOptions := &containers.ListOptions{ + Namespace: func(b bool) *bool { return &b }(true), + } + + containerList, err := containers.List(h.conn, listOptions) + if err != nil { + return nil, err + } + + var containersData []types.Container + for _, container := range containerList { + c := containerFromListContainer(container) + containersData = append(containersData, c) + } + return containersData, nil +} + + +func containerFromListContainer(container entities.ListContainer) types.Container { + kaContainer := types.Container{} + + + kaContainer.ContainerID = container.ID + if len(container.Names) > 0 { + kaContainer.ContainerName = container.Names[0] + kaContainer.EndPointName = container.Names[0] + } + + + kaContainer.NamespaceName = "container_namespace" + // kaContainer.Privileged = container.Labels["privileged"] == "true" // Assuming a 'privileged' label is set + labels := []string{} + labels = append(labels, "namespaceName="+"container_namespace") + labels = append(labels, "containerType="+"podman") + labels = append(labels, "kubearmor.io/container.name="+container.Names[0]) + + for k, v := range container.Labels { + labels = append(labels,k+"="+v) + } + kaContainer.Labels = strings.Join(labels,",") + kaContainer.Status = container.State + kaContainer.PidNS, kaContainer.MntNS = getNS(container.Pid) + + return kaContainer +} + diff --git a/KubeArmor/monitor/systemMonitor.go b/KubeArmor/monitor/systemMonitor.go index c246c874bb..c8e518dff9 100644 --- a/KubeArmor/monitor/systemMonitor.go +++ b/KubeArmor/monitor/systemMonitor.go @@ -115,10 +115,10 @@ type SystemMonitor struct { // logs Logger *fd.Feeder - // container id -> cotnainer + // container id -> container Containers *map[string]tp.Container ContainersLock **sync.RWMutex - + // container id -> host pid ActiveHostPidMap *map[string]tp.PidMap ActivePidMapLock **sync.RWMutex diff --git a/KubeArmor/types/types.go b/KubeArmor/types/types.go index 1b6046f18d..0c133dfdcc 100644 --- a/KubeArmor/types/types.go +++ b/KubeArmor/types/types.go @@ -654,3 +654,20 @@ type PidNode struct { // KubeArmorHostPolicyEventCallback Function type KubeArmorHostPolicyEventCallback func(K8sKubeArmorHostPolicyEvent) pb.PolicyStatus + +// =========== // +// == Hooks == // +// =========== // + +type HookRequest struct { + Operation HookOperation `json:"operation"` + Detached bool `json:"detached"` + Container Container `json:"container"` +} + +type HookOperation int + +const ( + HookContainerCreate HookOperation = iota + HookContainerDelete +) \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000000..59adad51f9 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,86 @@ +services: + kubearmor-init: + container_name: kubearmor-init + image: docker.io/cheithanya/kubearmor-init:latest + pull_policy: "always" + user: root + labels: + app: kubearmor-init + volumes: + - "/tmp:/opt/kubearmor/BPF:rw,z" + - "/lib/modules:/lib/modules:ro,z" + - "/sys/fs/bpf:/sys/fs/bpf:ro,z" + - "/sys/kernel/security:/sys/kernel/security:ro,z" + - "/sys/kernel/debug:/sys/kernel/debug:ro,z" + - "/usr/src:/usr/src:z" + - "/media/root/etc/os-release:/media/root/etc/os-release:ro,z" + - "/etc/containers/oci/hooks.d/:/etc/containers/oci/hooks.d/:rw,z" + - "/usr/share/kubearmor:/usr/share/kubearmor:rw,z" + restart: on-failure + privileged: true + cap_add: + - SETUID + - SETGID + - SETPCAP + - SYS_ADMIN + - SYS_PTRACE + - MAC_ADMIN + - SYS_RESOURCE + - IPC_LOCK + - DAC_OVERRIDE + - DAC_READ_SEARCH + + kubearmor: + depends_on: + kubearmor-init: + condition: service_completed_successfully + hostname: cheithanya + container_name: kubearmor + image: "docker.io/cheithanya/kubearmor:latest" + pull_policy: "always" + user: root + command: + - "-k8s=false" + - "-enableKubeArmorPolicy" + - "-enableKubeArmorHostPolicy" + - "-visibility=process,network" + - "-hostVisibility=process,network" + - "-criSocket=unix:///run/podman/podman.sock" + - "-defaultFilePosture=audit" + - "-defaultNetworkPosture=audit" + - "-defaultCapabilitiesPosture=audit" + - "-hostDefaultFilePosture=audit" + - "-hostDefaultNetworkPosture=audit" + - "-hostDefaultCapabilitiesPosture=audit" + labels: + app: kubearmor + volumes: + - "/tmp:/opt/kubearmor/BPF" + - "/sys/fs/bpf:/sys/fs/bpf" + - "/sys/kernel/security:/sys/kernel/security" + - "/sys/kernel/debug:/sys/kernel/debug" + - "/etc/apparmor.d:/etc/apparmor.d" + - "/var/run/docker.sock:/var/run/docker.sock" + - "/run/docker:/run/docker" + - "/var/lib/docker:/var/lib/docker" + - "/etc/containers/oci/hooks.d/:/etc/containers/oci/hooks.d/:rw" + - "/usr/share/kubearmor:/usr/share/kubearmor:rw" + - "/var/run/kubearmor:/var/run/kubearmor:rw" + restart: always + ports: + - "32767:32767" + pid: "host" + privileged: true + cap_add: + - SETUID + - SETGID + - SETPCAP + - SYS_ADMIN + - SYS_PTRACE + - MAC_ADMIN + - SYS_RESOURCE + - IPC_LOCK + - DAC_OVERRIDE + - DAC_READ_SEARCH + +