Skip to content

Commit

Permalink
feat(bpf,core,feeder,monitor): improve performance and owner only han…
Browse files Browse the repository at this point in the history
…dling (#1491)
  • Loading branch information
daemon1024 authored Nov 14, 2023
1 parent 97f5c11 commit 7619a8d
Show file tree
Hide file tree
Showing 10 changed files with 49 additions and 59 deletions.
19 changes: 15 additions & 4 deletions KubeArmor/BPF/system_monitor.c
Original file line number Diff line number Diff line change
Expand Up @@ -219,6 +219,7 @@ typedef struct __attribute__((__packed__)) sys_context

char comm[TASK_COMM_LEN];
char cwd[CWD_LEN];
u32 oid; // owner id
} sys_context_t;

#define BPF_MAP(_name, _type, _key_type, _value_type, _max_entries) \
Expand Down Expand Up @@ -1026,23 +1027,22 @@ static __always_inline u32 init_context(sys_context_t *context)

bufs_t *string_p = get_buffer(CWD_BUF_TYPE);
if (string_p == NULL)
return 0;
return 0;

if (!prepend_path(&path, string_p, CWD_BUF_TYPE))
{
return 0;
return 0;
}

u32 *off = get_buffer_offset(CWD_BUF_TYPE);
if (off == NULL)
return 0;

bpf_probe_read_str(&context->cwd, CWD_LEN,(void *)&string_p->buf[*off]);
bpf_probe_read_str(&context->cwd, CWD_LEN, (void *)&string_p->buf[*off]);

return 0;
}


SEC("kprobe/security_path_mknod")
int kprobe__security_path_mknod(struct pt_regs *ctx)
{
Expand Down Expand Up @@ -1456,6 +1456,17 @@ static __always_inline int trace_ret_generic(u32 id, struct pt_regs *ctx, u64 ty
return 0;
}

u64 pid_tgid = bpf_get_current_pid_tgid();

struct path *p = bpf_map_lookup_elem(&file_map, &pid_tgid);
if (p)
{
struct dentry *dent = READ_KERN(p->dentry);
struct inode *ino = READ_KERN(dent->d_inode);
kuid_t owner = READ_KERN(ino->i_uid);
context.oid = owner.val;
}

set_buffer_offset(DATA_BUF_TYPE, sizeof(sys_context_t));

bufs_t *bufs_p = get_buffer(DATA_BUF_TYPE);
Expand Down
20 changes: 0 additions & 20 deletions KubeArmor/core/containerdHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ import (
"fmt"
"os"
"strconv"
"strings"
"time"

kl "github.com/kubearmor/KubeArmor/KubeArmor/common"
Expand Down Expand Up @@ -60,9 +59,6 @@ type ContainerdHandler struct {
containerd context.Context
docker context.Context

// storage path
StoragePath string

// active containers
containers map[string]context.Context
}
Expand All @@ -71,14 +67,6 @@ type ContainerdHandler struct {
func NewContainerdHandler() *ContainerdHandler {
ch := &ContainerdHandler{}

if strings.Contains(cfg.GlobalCfg.CRISocket, "microk8s") { // microk8s
ch.StoragePath = "/var/snap/microk8s/common/run/containerd"
} else if strings.Contains(cfg.GlobalCfg.CRISocket, "k3s") { // k3s
ch.StoragePath = "/run/k3s/containerd"
} else { // vanilla containerd
ch.StoragePath = "/run/containerd"
}

conn, err := grpc.Dial(cfg.GlobalCfg.CRISocket, grpc.WithInsecure())
if err != nil {
return nil
Expand Down Expand Up @@ -154,14 +142,6 @@ func (ch *ContainerdHandler) GetContainerInfo(ctx context.Context, containerID s
spec := iface.(*specs.Spec)
container.AppArmorProfile = spec.Process.ApparmorProfile

if spec.Root.Path == "rootfs" { // containerd
preMergedDir := ch.StoragePath + "/io.containerd.runtime.v2.task/k8s.io/"
postMergedDir := "/rootfs"
container.MergedDir = preMergedDir + container.ContainerID + postMergedDir
} else { // docker
container.MergedDir = spec.Root.Path
}

// == //

taskReq := pt.ListPidsRequest{ContainerID: container.ContainerID}
Expand Down
3 changes: 0 additions & 3 deletions KubeArmor/core/crioHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,9 +120,6 @@ func (ch *CrioHandler) GetContainerInfo(ctx context.Context, containerID string)
// path to container's root storage
container.AppArmorProfile = containerInfo.RuntimeSpec.Process.ApparmorProfile

// path to the rootfs
container.MergedDir = containerInfo.RuntimeSpec.Root.Path

pid := strconv.Itoa(containerInfo.Pid)

if data, err := os.Readlink("/proc/" + pid + "/ns/pid"); err == nil {
Expand Down
2 changes: 0 additions & 2 deletions KubeArmor/core/dockerHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,6 @@ func (dh *DockerHandler) GetContainerInfo(containerID string) (tp.Container, err

container.AppArmorProfile = inspect.AppArmorProfile

container.MergedDir = inspect.GraphDriver.Data["MergedDir"]

// == //

pid := strconv.Itoa(inspect.State.Pid)
Expand Down
3 changes: 0 additions & 3 deletions KubeArmor/feeder/feeder.go
Original file line number Diff line number Diff line change
Expand Up @@ -596,9 +596,6 @@ func (fd *Feeder) PushLog(log tp.Log) {
// set hostname
log.HostName = cfg.GlobalCfg.Host

// remove MergedDir
log.MergedDir = ""

// remove flags
log.PolicyEnabled = 0
log.ProcessVisibilityEnabled = false
Expand Down
8 changes: 4 additions & 4 deletions KubeArmor/feeder/policyMatcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -1014,19 +1014,19 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log {

matchedFlags := false

if secPolicy.ReadOnly && log.Resource != "" && secPolicy.OwnerOnly && log.MergedDir != "" {
if secPolicy.ReadOnly && log.Resource != "" && secPolicy.OwnerOnly {
// read only && owner only
if strings.Contains(log.Data, "O_RDONLY") && strconv.Itoa(int(log.UID)) == getFileProcessUID(log.MergedDir+log.Resource) {
if strings.Contains(log.Data, "O_RDONLY") && log.UID == log.OID {
matchedFlags = true
}
} else if secPolicy.ReadOnly && log.Resource != "" {
// read only
if strings.Contains(log.Data, "O_RDONLY") {
matchedFlags = true
}
} else if secPolicy.OwnerOnly && log.MergedDir != "" {
} else if secPolicy.OwnerOnly {
// owner only
if strconv.Itoa(int(log.UID)) == getFileProcessUID(log.MergedDir+log.Resource) {
if log.UID == log.OID {
matchedFlags = true
}
} else {
Expand Down
4 changes: 1 addition & 3 deletions KubeArmor/monitor/logUpdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,6 @@ func (mon *SystemMonitor) UpdateContainerInfoByContainerID(log tp.Log) tp.Log {
log.ContainerName = val.ContainerName
log.ContainerImage = val.ContainerImage

// get merged directory
log.MergedDir = val.MergedDir

// update policy flag
log.PolicyEnabled = val.PolicyEnabled

Expand Down Expand Up @@ -88,6 +85,7 @@ func (mon *SystemMonitor) BuildLogBase(eventID int32, msg ContextCombined) tp.Lo
}

log.Cwd = strings.TrimRight(string(msg.ContextSys.Cwd[:]), "\x00") + "/"
log.OID = int32(msg.ContextSys.OID)

log.ParentProcessName = mon.GetExecPath(msg.ContainerID, msg.ContextSys.HostPPID)
log.ProcessName = mon.GetExecPath(msg.ContainerID, msg.ContextSys.HostPID)
Expand Down
31 changes: 23 additions & 8 deletions KubeArmor/monitor/systemMonitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ type SyscallContext struct {

Comm [16]byte
Cwd [80]byte
OID uint32
}

// ContextCombined Structure
Expand Down Expand Up @@ -563,21 +564,35 @@ func (mon *SystemMonitor) TraceSyscall() {
}

now := time.Now()
if now.After(time.Unix(int64(ctx.Ts), 0).Add(5 * time.Second)) {
if now.After(time.Unix(int64(ctx.Ts), 0).Add(10 * time.Second)) {
mon.Logger.Warn("Event dropped due to replay timeout")
continue
}

// Best effort replay
go func() {
time.Sleep(1 * time.Second)
select {
case mon.SyscallChannel <- dataRaw:
default:
// channel is full, wait for a short time before retrying
time.Sleep(1 * time.Second)
mon.Logger.Warn("Event droped due to busy event channel")
for i := 0; i < 10; i++ {
containerID := ""

if ctx.PidID != 0 && ctx.MntID != 0 {
containerID = mon.LookupContainerID(ctx.PidID, ctx.MntID, ctx.HostPPID, ctx.HostPID)

if containerID == "" {
time.Sleep(1 * time.Second)
continue
}
}

select {
case mon.SyscallChannel <- dataRaw:
default:
// channel is full, wait for a short time before retrying
time.Sleep(1 * time.Second)
mon.Logger.Warn("Event droped due to busy event channel")
}

}
mon.Logger.Warn("Event dropped due to replay timeout")
}()
}
}()
Expand Down
6 changes: 1 addition & 5 deletions KubeArmor/types/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@ type Container struct {
PidNS uint32 `json:"pidns"`
MntNS uint32 `json:"mntns"`

MergedDir string `json:"mergedDir"`

// == //

PolicyEnabled int `json:"policyEnabled"`
Expand Down Expand Up @@ -202,9 +200,6 @@ type Log struct {
ContainerName string `json:"containerName,omitempty"`
ContainerImage string `json:"containerImage,omitempty"`

// container merged directory
MergedDir string `json:"mergedDir,omitempty"`

// common
HostPPID int32 `json:"hostPPid"`
HostPID int32 `json:"hostPid"`
Expand Down Expand Up @@ -234,6 +229,7 @@ type Log struct {
Operation string `json:"operation"`
Resource string `json:"resource"`
Cwd string `json:"cwd"`
OID int32 `json:"oid"`
Data string `json:"data,omitempty"`
Action string `json:"action,omitempty"`
Result string `json:"result"`
Expand Down
12 changes: 5 additions & 7 deletions tests/k8s_env/syscalls/syscalls_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ package syscalls

import (
"fmt"
"strings"
"time"

"github.com/kubearmor/KubeArmor/protobuf"
Expand Down Expand Up @@ -482,9 +481,8 @@ var _ = Describe("Syscalls", func() {
})

It("mount will be blocked by default for a pod", func() {
if strings.Contains(K8sRuntimeEnforcer(), "bpf") {
Skip("Skipping due to alerts being generated when something is blocked by kubearmor")
}
Skip("Skipping due to alerts only being generated when something is blocked by kubearmor")

// Start KubeArmor Logs
err := KarmorLogStart("policy", "syscalls", "Syscall", ubuntu)
Expect(err).To(BeNil())
Expand All @@ -511,9 +509,9 @@ var _ = Describe("Syscalls", func() {
})

It("umount will be blocked by default for a pod as the capability not added", func() {
if strings.Contains(K8sRuntimeEnforcer(), "bpf") {
Skip("Skipping due to alerts being generated when something is blocked by kubearmor")
}

Skip("Skipping due to alerts only being generated when something is blocked by kubearmor")

// Start KubeArmor Logs
err := KarmorLogStart("policy", "syscalls", "Syscall", ubuntu)
Expect(err).To(BeNil())
Expand Down

0 comments on commit 7619a8d

Please sign in to comment.