Skip to content

Commit

Permalink
support for unorchestrated rootfull Podman using OCI hook
Browse files Browse the repository at this point in the history
Signed-off-by: Cheithanya <[email protected]>
  • Loading branch information
itsCheithanya committed Oct 28, 2024
1 parent dacac4a commit 43d9216
Show file tree
Hide file tree
Showing 13 changed files with 1,166 additions and 5 deletions.
33 changes: 32 additions & 1 deletion Dockerfile.init
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,29 @@
# Copyright 2021 Authors of KubeArmor

### Make compiler image

FROM golang:1.22-alpine3.20 AS builder
RUN apk --no-cache update
RUN apk add --no-cache git clang llvm make gcc protobuf
RUN apk add --no-cache linux-headers pkgconfig
RUN apk add --no-cache gpgme-dev
RUN apk add --no-cache btrfs-progs-dev
ARG GOARCH
ARG GOOS

WORKDIR /KubeArmor

COPY . .
WORKDIR /KubeArmor/KubeArmor

RUN go mod download

WORKDIR /KubeArmor/KubeArmor/deployHook
RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GO111MODULE=on go build -o deployHook .

WORKDIR /KubeArmor/KubeArmor/hook
RUN CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} GO111MODULE=on go build -tags 'containers_image_openpgp' -o hook .

FROM redhat/ubi9-minimal as kubearmor-init

ARG VERSION=latest
Expand Down Expand Up @@ -34,7 +57,15 @@ RUN groupadd --gid 1000 default \
COPY LICENSE /licenses/license.txt
COPY ./KubeArmor/BPF /KubeArmor/BPF/
COPY ./KubeArmor/build/compile.sh /KubeArmor/compile.sh
COPY --from=builder /KubeArmor/KubeArmor/hook/hook /hook
COPY --from=builder /KubeArmor/KubeArmor/deployHook/deployHook /KubeArmor/deployHook

# Copy the custom entrypoint script
COPY ./KubeArmor/build/entrypoint.sh /KubeArmor/entrypoint.sh
RUN chmod +x /KubeArmor/entrypoint.sh

RUN chown -R default:default /KubeArmor

USER 1000
ENTRYPOINT ["/KubeArmor/compile.sh"]

ENTRYPOINT ["/KubeArmor/entrypoint.sh"]
6 changes: 6 additions & 0 deletions KubeArmor/build/entrypoint.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/bash
set -e

/KubeArmor/compile.sh

/KubeArmor/deployHook
5 changes: 4 additions & 1 deletion KubeArmor/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ func IsK8sEnv() bool {
}

// ContainerRuntimeSocketKeys contains FIFO ordered keys of container runtimes
var ContainerRuntimeSocketKeys = []string{"docker", "containerd", "cri-o"}
var ContainerRuntimeSocketKeys = []string{"docker", "containerd", "cri-o","podman"}

// ContainerRuntimeSocketMap Structure
var ContainerRuntimeSocketMap = map[string][]string{
Expand All @@ -432,6 +432,9 @@ var ContainerRuntimeSocketMap = map[string][]string{
"/var/run/crio/crio.sock",
"/run/crio/crio.sock",
},
"podman":{
"/run/podman/podman.sock",
},
}

// GetCRISocket Function
Expand Down
3 changes: 2 additions & 1 deletion KubeArmor/core/dockerHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (

"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/events"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/client"

"github.com/kubearmor/KubeArmor/KubeArmor/common"
Expand Down Expand Up @@ -266,7 +267,7 @@ func (dm *KubeArmorDaemon) GetAlreadyDeployedDockerContainers() {
}
}

if containerList, err := Docker.DockerClient.ContainerList(context.Background(), types.ContainerListOptions{}); err == nil {
if containerList, err := Docker.DockerClient.ContainerList(context.Background(), container.ListOptions{}); err == nil {
for _, dcontainer := range containerList {
// get container information from docker client
container, err := Docker.GetContainerInfo(dcontainer.ID, dm.OwnerInfo)
Expand Down
206 changes: 206 additions & 0 deletions KubeArmor/core/hook_handler.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,206 @@
// SPDX-License-Identifier: Apache-2.0
// Copyright 2022 Authors of KubeArmor

package core

import (
"encoding/json"
"errors"
"io"
"log"
"net"
"os"
"path/filepath"
"sync/atomic"

kl "github.com/kubearmor/KubeArmor/KubeArmor/common"
cfg "github.com/kubearmor/KubeArmor/KubeArmor/config"
"github.com/kubearmor/KubeArmor/KubeArmor/types"
)

const kubearmorDir = "/var/run/kubearmor"

// ListenToHook starts listening on a UNIX socket and waits for container hooks
// to pass new containers
func (dm *KubeArmorDaemon) ListenToHook() {
if err := os.MkdirAll(kubearmorDir, 0750); err != nil {
log.Fatal(err)
}

listenPath := filepath.Join(kubearmorDir, "ka.sock")
err := os.Remove(listenPath) // in case kubearmor crashed and the socket wasn't removed (cleaning the socket file if got crashed)
if err != nil && !errors.Is(err, os.ErrNotExist) {
log.Fatal(err)
}

socket, err := net.Listen("unix", listenPath)
if err != nil {
log.Fatal(err)
}

defer socket.Close()
defer os.Remove(listenPath)
ready := &atomic.Bool{}

for {
conn, err := socket.Accept()
if err != nil {
log.Fatal(err)
}

go dm.handleConn(conn, ready)
}

}

// handleConn gets container details from container hooks.
func (dm *KubeArmorDaemon) handleConn(conn net.Conn, ready *atomic.Bool) {
// We need to makes sure that no containers accepted until all containers created before KubeArmor
// are sent first. This is done mainly to avoid race conditions between hooks sending in
// data that some containers were deleted only for process responsible for sending previous containers
// to send that these containers are created. Which will leave KubeArmor in an incorrect state.
defer conn.Close()
buf := make([]byte, 4096)

for {
n, err := conn.Read(buf)
if err == io.EOF {
return
}
if err != nil {
log.Fatal(err)
}

data := types.HookRequest{}

err = json.Unmarshal(buf[:n], &data)
if err != nil {
log.Fatal(err)
}

if data.Detached {
// we want KubeArmor to start accepting containers after
// all previous container are set
defer ready.Store(true)
} else if !ready.Load() {
_, err = conn.Write([]byte("err"))
if err == io.EOF {
return
} else if err != nil {
log.Println(err)
return
}
continue
}
_, err = conn.Write([]byte("ok"))
if err == io.EOF {
return
} else if err != nil {
log.Println(err)
return
}

if data.Operation == types.HookContainerCreate {
// dm.handleContainerCreate(data.Container)
dm.Logger.Printf("Create ID %s",data.Container.ContainerID)
dm.UpdatePodmanContainer(data.Container.ContainerID,data.Container,"create")
} else {
// dm.handleContainerDelete(data.Container.ContainerID)
dm.Logger.Printf("Delete ID %s",data.Container.ContainerID)
dm.UpdatePodmanContainer(data.Container.ContainerID, dm.Containers[data.Container.ContainerID],"destroy")
}

}
}
func (dm *KubeArmorDaemon) handleContainerCreate(container types.Container) {
endpoint := types.EndPoint{}

dm.Logger.Printf("added %s", container.ContainerID)

dm.ContainersLock.Lock()
defer dm.ContainersLock.Unlock()
if _, ok := dm.Containers[container.ContainerID]; !ok {
dm.Containers[container.ContainerID] = container
} else if dm.Containers[container.ContainerID].PidNS == 0 && dm.Containers[container.ContainerID].MntNS == 0 {
c := dm.Containers[container.ContainerID]
c.MntNS = container.MntNS
c.PidNS = container.PidNS
c.AppArmorProfile = container.AppArmorProfile
dm.Containers[c.ContainerID] = c

dm.EndPointsLock.Lock()
for idx, endPoint := range dm.EndPoints {
if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) {

// update apparmor profiles
if !kl.ContainsElement(endPoint.AppArmorProfiles, container.AppArmorProfile) {
dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles, container.AppArmorProfile)
}

if container.Privileged && dm.EndPoints[idx].PrivilegedContainers != nil {
dm.EndPoints[idx].PrivilegedContainers[container.ContainerName] = struct{}{}
}

endpoint = dm.EndPoints[idx]

break
}
}
dm.EndPointsLock.Unlock()
}

if len(dm.OwnerInfo) > 0 {
container.Owner = dm.OwnerInfo[container.EndPointName]
}

if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy {
dm.SystemMonitor.AddContainerIDToNsMap(container.ContainerID, container.NamespaceName, container.PidNS, container.MntNS)
dm.RuntimeEnforcer.RegisterContainer(container.ContainerID, container.PidNS, container.MntNS)

if len(endpoint.SecurityPolicies) > 0 { // struct can be empty or no policies registered for the endpoint yet
dm.Logger.UpdateSecurityPolicies("ADDED", endpoint)
if dm.RuntimeEnforcer != nil && endpoint.PolicyEnabled == types.KubeArmorPolicyEnabled {
// enforce security policies
dm.RuntimeEnforcer.UpdateSecurityPolicies(endpoint)
}
}
}
}
func (dm *KubeArmorDaemon) handleContainerDelete(containerID string) {
dm.ContainersLock.Lock()
container, ok := dm.Containers[containerID]
dm.Logger.Printf("deleted %s", containerID)
if !ok {
dm.ContainersLock.Unlock()
return
}
delete(dm.Containers, containerID)
dm.ContainersLock.Unlock()

dm.EndPointsLock.Lock()
for idx, endPoint := range dm.EndPoints {
if endPoint.NamespaceName == container.NamespaceName && endPoint.EndPointName == container.EndPointName && kl.ContainsElement(endPoint.Containers, container.ContainerID) {

// update apparmor profiles
for idxA, profile := range endPoint.AppArmorProfiles {
if profile == container.AppArmorProfile {
dm.EndPoints[idx].AppArmorProfiles = append(dm.EndPoints[idx].AppArmorProfiles[:idxA], dm.EndPoints[idx].AppArmorProfiles[idxA+1:]...)
break
}
}

break
}
}
dm.EndPointsLock.Unlock()

if dm.SystemMonitor != nil && cfg.GlobalCfg.Policy {
// update NsMap
dm.SystemMonitor.DeleteContainerIDFromNsMap(containerID, container.NamespaceName, container.PidNS, container.MntNS)
dm.RuntimeEnforcer.UnregisterContainer(containerID)
}

}



2 changes: 2 additions & 0 deletions KubeArmor/core/kubeArmor.go
Original file line number Diff line number Diff line change
Expand Up @@ -591,6 +591,8 @@ func KubeArmor() {
} else if strings.Contains(cfg.GlobalCfg.CRISocket, "cri-o") {
// monitor crio events
go dm.MonitorCrioEvents()
} else if strings.Contains(cfg.GlobalCfg.CRISocket, "podman") {
go dm.ListenToHook()
} else {
dm.Logger.Warnf("Failed to monitor containers: %s is not a supported CRI socket.", cfg.GlobalCfg.CRISocket)
enableContainerPolicy = false
Expand Down
Loading

0 comments on commit 43d9216

Please sign in to comment.