Skip to content

Commit

Permalink
Merge pull request #1293 from Shreyas220/annotition
Browse files Browse the repository at this point in the history
feat: Annotating pre existing replicasets, statefulsets and daemonsets
  • Loading branch information
Shreyas220 authored Aug 8, 2023
2 parents f58a36b + 3260e68 commit 46a1e11
Show file tree
Hide file tree
Showing 11 changed files with 344 additions and 68 deletions.
6 changes: 6 additions & 0 deletions .github/workflows/ci-test-ginkgo.yml
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,12 @@ jobs:
run: |
make docker-build
- name: deploy pre existing pod
run: |
kubectl apply -f ./tests/ksp/pre-run-pod.yaml
sleep 60
kubectl get pods -A
- name: Run KubeArmor
working-directory: pkg/KubeArmorOperator
run: |
Expand Down
109 changes: 108 additions & 1 deletion KubeArmor/core/k8sHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ func (kh *K8sHandler) DoRequest(cmd string, data interface{}, path string) ([]by
// ================ //

// PatchDeploymentWithAppArmorAnnotations Function
func (kh *K8sHandler) PatchDeploymentWithAppArmorAnnotations(namespaceName, deploymentName string, appArmorAnnotations map[string]string) error {
func (kh *K8sHandler) PatchResourceWithAppArmorAnnotations(namespaceName, deploymentName string, appArmorAnnotations map[string]string, kind string) error {
if !kl.IsK8sEnv() { // not Kubernetes
return nil
}
Expand All @@ -248,6 +248,52 @@ func (kh *K8sHandler) PatchDeploymentWithAppArmorAnnotations(namespaceName, depl

spec = spec + `}}}}}`

if kind == "StatefulSet" {
_, err := kh.K8sClient.AppsV1().StatefulSets(namespaceName).Patch(context.Background(), deploymentName, types.StrategicMergePatchType, []byte(spec), metav1.PatchOptions{})
if err != nil {
return err
}
return nil

} else if kind == "ReplicaSet" {
rs, err := kh.K8sClient.AppsV1().ReplicaSets(namespaceName).Get(context.Background(), deploymentName, metav1.GetOptions{})
if err != nil {
return err
}
replicas := *rs.Spec.Replicas
_, err = kh.K8sClient.AppsV1().ReplicaSets(namespaceName).Patch(context.Background(), deploymentName, types.MergePatchType, []byte(spec), metav1.PatchOptions{})
if err != nil {
return err
}

// To update the annotations we need to restart the replicaset,we scale it down and scale it back up
patchData := []byte(fmt.Sprintf(`{"spec": {"replicas": 0}}`))
_, err = kh.K8sClient.AppsV1().ReplicaSets(namespaceName).Patch(context.Background(), deploymentName, types.StrategicMergePatchType, patchData, metav1.PatchOptions{})
if err != nil {
return err
}
time.Sleep(2 * time.Second)
patchData2 := []byte(fmt.Sprintf(`{"spec": {"replicas": %d}}`, replicas))
_, err = kh.K8sClient.AppsV1().ReplicaSets(namespaceName).Patch(context.Background(), deploymentName, types.StrategicMergePatchType, patchData2, metav1.PatchOptions{})
if err != nil {
return err
}

return nil
} else if kind == "DaemonSet" {
_, err := kh.K8sClient.AppsV1().DaemonSets(namespaceName).Patch(context.Background(), deploymentName, types.MergePatchType, []byte(spec), metav1.PatchOptions{})
if err != nil {
return err
}
return nil

} else if kind == "Pod" {
_, err := kh.K8sClient.CoreV1().Pods(namespaceName).Patch(context.Background(), deploymentName, types.MergePatchType, []byte(spec), metav1.PatchOptions{})
if err != nil {
panic(err.Error())
}

}
_, err := kh.K8sClient.AppsV1().Deployments(namespaceName).Patch(context.Background(), deploymentName, types.StrategicMergePatchType, []byte(spec), metav1.PatchOptions{})
if err != nil {
return err
Expand Down Expand Up @@ -526,3 +572,64 @@ func (kh *K8sHandler) WatchK8sHostSecurityPolicies() *http.Response {

return nil
}

// this function get the owner details of a pod
func getTopLevelOwner(obj metav1.ObjectMeta, namespace string, objkind string) (string, string, string, error) {
ownerRef := kl.GetControllingPodOwner(obj.OwnerReferences)
if ownerRef == nil {
return obj.Name, objkind, namespace, nil
}

switch ownerRef.Kind {
case "Pod":
pod, err := K8s.K8sClient.CoreV1().Pods(namespace).Get(context.Background(), ownerRef.Name, metav1.GetOptions{})
if err != nil {
return "", "", "", err
}
if len(pod.OwnerReferences) > 0 {
return getTopLevelOwner(pod.ObjectMeta, namespace, "Pod")
}
case "Deployment":
deployment, err := K8s.K8sClient.AppsV1().Deployments(namespace).Get(context.Background(), ownerRef.Name, metav1.GetOptions{})
if err != nil {
return "", "", "", err
}
if len(deployment.OwnerReferences) > 0 {
return getTopLevelOwner(deployment.ObjectMeta, namespace, "Deployment")
}
return deployment.Name, "Deployment", deployment.Namespace, nil
case "ReplicaSet":
replicaset, err := K8s.K8sClient.AppsV1().ReplicaSets(namespace).Get(context.Background(), ownerRef.Name, metav1.GetOptions{})
if err != nil {
return "", "", "", err
}
if len(replicaset.OwnerReferences) > 0 {
return getTopLevelOwner(replicaset.ObjectMeta, namespace, "ReplicaSet")
}
return replicaset.Name, "ReplicaSet", replicaset.Namespace, nil
case "StatefulSet":
statefulset, err := K8s.K8sClient.AppsV1().StatefulSets(namespace).Get(context.Background(), ownerRef.Name, metav1.GetOptions{})
if err != nil {
return "", "", "", err
}
if len(statefulset.OwnerReferences) > 0 {
return getTopLevelOwner(statefulset.ObjectMeta, namespace, "StatefulSet")
}
return statefulset.Name, "StatefulSet", statefulset.Namespace, nil

case "DaemonSet":
daemonset, err := K8s.K8sClient.AppsV1().DaemonSets(namespace).Get(context.Background(), ownerRef.Name, metav1.GetOptions{})
if err != nil {
return "", "", "", err
}
if len(daemonset.OwnerReferences) > 0 {
return getTopLevelOwner(daemonset.ObjectMeta, namespace, "DaemonSet")
}
return daemonset.Name, "DaemonSet", daemonset.Namespace, nil

// Default case when
default:
return obj.Name, objkind, namespace, nil
}
return "", "", "", nil
}
100 changes: 51 additions & 49 deletions KubeArmor/core/kubeUpdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -544,50 +544,19 @@ func (dm *KubeArmorDaemon) WatchK8sPods() {
pod.Metadata["namespaceName"] = event.Object.ObjectMeta.Namespace
pod.Metadata["podName"] = event.Object.ObjectMeta.Name

ownerRef := kl.GetControllingPodOwner(event.Object.ObjectMeta.OwnerReferences)
if ownerRef != nil {
podOwnerName = ownerRef.Name
pod.Metadata["owner.controller"] = ownerRef.Kind
if ownerRef.Kind == "ReplicaSet" {
deploymentName, deploymentNamespace := K8s.GetDeploymentNameControllingReplicaSet(pod.Metadata["namespaceName"], podOwnerName)
if deploymentName != "" {
pod.Metadata["deploymentName"] = deploymentName
pod.Metadata["owner.controllerName"] = deploymentName
pod.Metadata["owner.controller"] = "Deployment"
pod.Metadata["owner.namespace"] = deploymentNamespace
} else {
replicaSetName, replicaSetNamespace := K8s.GetReplicaSet(pod.Metadata["namespaceName"], podOwnerName)
if replicaSetName != "" {
pod.Metadata["owner.controllerName"] = replicaSetName
pod.Metadata["owner.namespace"] = replicaSetNamespace
}
}
// if it belongs to a replicaset, we also remove the pod template hash
podOwnerName = strings.TrimSuffix(podOwnerName, fmt.Sprintf("-%s", event.Object.ObjectMeta.Labels["pod-template-hash"]))
} else if ownerRef.Kind == "DaemonSet" {
daemonSetName, daemonSetNamespace := K8s.GetDaemonSet(pod.Metadata["namespaceName"], podOwnerName)
if daemonSetName != "" {
pod.Metadata["owner.controllerName"] = daemonSetName
pod.Metadata["owner.namespace"] = daemonSetNamespace
}
} else if ownerRef.Kind == "StatefulSet" {
statefulSetName, statefulSetNamespace := K8s.GetStatefulSet(pod.Metadata["namespaceName"], podOwnerName)
if statefulSetName != "" {
pod.Metadata["owner.controllerName"] = statefulSetName
pod.Metadata["owner.namespace"] = statefulSetNamespace
}
} else if ownerRef.Kind == "Pod" {
pod.Metadata["owner.controllerName"] = ownerRef.Name
pod.Metadata["owner.namespace"] = pod.Metadata["namespaceName"]
}
} else {
// static pod
podOwnerName = event.Object.ObjectMeta.Name
pod.Metadata["owner.controllerName"] = pod.Metadata["podName"]
pod.Metadata["owner.controller"] = "Pod"
pod.Metadata["owner.namespace"] = pod.Metadata["namespaceName"]
controllerName, controller, namespace, err := getTopLevelOwner(event.Object.ObjectMeta, event.Object.Namespace, event.Object.Kind)
if err != nil {
dm.Logger.Errf("Failed to get ownerRef (%s, %s)", event.Object.ObjectMeta.Name, err.Error())

}

podOwnerName = controllerName
pod.Metadata["deploymentName"] = controllerName
pod.Metadata["owner.controller"] = controller
pod.Metadata["owner.namespace"] = namespace

//get the owner , then check if that owner has owner if...do it recusivelt until you get the no owner

pod.Annotations = map[string]string{}
for k, v := range event.Object.Annotations {
pod.Annotations[k] = v
Expand Down Expand Up @@ -701,13 +670,46 @@ func (dm *KubeArmorDaemon) WatchK8sPods() {
appArmorAnnotations := map[string]string{}
updateAppArmor := false

if deploymentName, ok := pod.Metadata["deploymentName"]; ok {
deploy, err := K8s.K8sClient.AppsV1().Deployments(pod.Metadata["namespaceName"]).Get(context.Background(), deploymentName, metav1.GetOptions{})
if err == nil {
for _, c := range deploy.Spec.Template.Spec.Containers {
containers = append(containers, c.Name)
if _, ok := pod.Metadata["deploymentName"]; ok {
if pod.Metadata["owner.controller"] == "StatefulSet" {
statefulset, err := K8s.K8sClient.AppsV1().StatefulSets(pod.Metadata["namespaceName"]).Get(context.Background(), podOwnerName, metav1.GetOptions{})
if err == nil {
for _, c := range statefulset.Spec.Template.Spec.Containers {
containers = append(containers, c.Name)
}
}
} else if pod.Metadata["owner.controller"] == "ReplicaSet" {
replica, err := K8s.K8sClient.AppsV1().ReplicaSets(pod.Metadata["namespaceName"]).Get(context.Background(), podOwnerName, metav1.GetOptions{})
if err == nil {
for _, c := range replica.Spec.Template.Spec.Containers {
containers = append(containers, c.Name)
}
}

} else if pod.Metadata["owner.controller"] == "DaemonSet" {
daemon, err := K8s.K8sClient.AppsV1().DaemonSets(pod.Metadata["namespaceName"]).Get(context.Background(), podOwnerName, metav1.GetOptions{})
if err == nil {
for _, c := range daemon.Spec.Template.Spec.Containers {
containers = append(containers, c.Name)
}
}
} else if pod.Metadata["owner.controller"] == "Deployment" {
deploy, err := K8s.K8sClient.AppsV1().Deployments(pod.Metadata["namespaceName"]).Get(context.Background(), podOwnerName, metav1.GetOptions{})
if err == nil {
for _, c := range deploy.Spec.Template.Spec.Containers {
containers = append(containers, c.Name)
}
}
} else if pod.Metadata["owner.controller"] == "Pod" {
pod, err := K8s.K8sClient.CoreV1().Pods("default").Get(context.Background(), "my-pod", metav1.GetOptions{})
if err == nil {
for _, c := range pod.Spec.Containers {
containers = append(containers, c.Name)
}
}

}

}

for k, v := range pod.Annotations {
Expand Down Expand Up @@ -736,7 +738,7 @@ func (dm *KubeArmorDaemon) WatchK8sPods() {
if updateAppArmor && pod.Annotations["kubearmor-policy"] == "enabled" {
if deploymentName, ok := pod.Metadata["deploymentName"]; ok {
// patch the deployment with apparmor annotations
if err := K8s.PatchDeploymentWithAppArmorAnnotations(pod.Metadata["namespaceName"], deploymentName, appArmorAnnotations); err != nil {
if err := K8s.PatchResourceWithAppArmorAnnotations(pod.Metadata["namespaceName"], deploymentName, appArmorAnnotations, pod.Metadata["owner.controller"]); err != nil {
dm.Logger.Errf("Failed to update AppArmor Annotations (%s/%s/%s, %s)", pod.Metadata["namespaceName"], deploymentName, pod.Metadata["podName"], err.Error())
} else {
dm.Logger.Printf("Patched AppArmor Annotations (%s/%s/%s)", pod.Metadata["namespaceName"], deploymentName, pod.Metadata["podName"])
Expand All @@ -756,7 +758,7 @@ func (dm *KubeArmorDaemon) WatchK8sPods() {
if updateAppArmor && prevPolicyEnabled != "enabled" && pod.Annotations["kubearmor-policy"] == "enabled" {
if deploymentName, ok := pod.Metadata["deploymentName"]; ok {
// patch the deployment with apparmor annotations
if err := K8s.PatchDeploymentWithAppArmorAnnotations(pod.Metadata["namespaceName"], deploymentName, appArmorAnnotations); err != nil {
if err := K8s.PatchResourceWithAppArmorAnnotations(pod.Metadata["namespaceName"], deploymentName, appArmorAnnotations, pod.Metadata["owner.controller"]); err != nil {
dm.Logger.Errf("Failed to update AppArmor Annotations (%s/%s/%s, %s)", pod.Metadata["namespaceName"], deploymentName, pod.Metadata["podName"], err.Error())
} else {
dm.Logger.Printf("Patched AppArmor Annotations (%s/%s/%s)", pod.Metadata["namespaceName"], deploymentName, pod.Metadata["podName"])
Expand Down
6 changes: 6 additions & 0 deletions pkg/KubeArmorOperator/deployments/operator.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -187,12 +187,15 @@ rules:
resources:
- deployments
- daemonsets
- replicasets
- statefulsets
verbs:
- list
- get
- create
- delete
- update
- patch
- apiGroups:
- admissionregistration.k8s.io
resources:
Expand Down Expand Up @@ -342,6 +345,9 @@ rules:
- apps
resources:
- deployments
- replicasets
- daemonsets
- statefulsets
verbs:
- get
- patch
Expand Down
31 changes: 16 additions & 15 deletions pkg/KubeArmorOperator/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,22 +3,23 @@ module github.com/kubearmor/KubeArmor/pkg/KubeArmorOperator
go 1.20

require (
github.com/kubearmor/KubeArmor/KubeArmor v0.0.0-20230801181826-f1b41d01e8c1
github.com/kubearmor/KubeArmor/deployments v0.0.0-20230711122007-7ab3a56cfd76
github.com/kubearmor/KubeArmor/pkg/KubeArmorController v0.0.0-20230711122007-7ab3a56cfd76
github.com/kubearmor/KubeArmor/KubeArmor v0.0.0-20230804143049-f7f820045b85
github.com/kubearmor/KubeArmor/deployments v0.0.0-20230804143049-f7f820045b85
github.com/kubearmor/KubeArmor/pkg/KubeArmorController v0.0.0-20230804143049-f7f820045b85
github.com/spf13/cobra v1.7.0
go.uber.org/zap v1.24.0
go.uber.org/zap v1.25.0
golang.org/x/mod v0.10.0
k8s.io/api v0.27.3
k8s.io/apiextensions-apiserver v0.27.3
k8s.io/apimachinery v0.27.3
k8s.io/client-go v0.27.3
k8s.io/api v0.27.4
k8s.io/apiextensions-apiserver v0.27.4
k8s.io/apimachinery v0.27.4
k8s.io/client-go v0.27.4
k8s.io/kubectl v0.27.2
sigs.k8s.io/controller-runtime v0.15.0
)

require (
github.com/cilium/ebpf v0.11.0 // indirect
github.com/clarketm/json v1.17.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.10.2 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
Expand All @@ -44,7 +45,7 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/pelletier/go-toml/v2 v2.0.9 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/spf13/afero v1.9.5 // indirect
github.com/spf13/cast v1.5.1 // indirect
Expand All @@ -55,11 +56,11 @@ require (
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 // indirect
golang.org/x/net v0.11.0 // indirect
golang.org/x/net v0.14.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.9.0 // indirect
golang.org/x/text v0.10.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/term v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.30.0 // indirect
Expand All @@ -69,8 +70,8 @@ require (
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20230525220651-2546d827e515 // indirect
k8s.io/utils v0.0.0-20230505201702-9f6742963106 // indirect
k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
Loading

0 comments on commit 46a1e11

Please sign in to comment.