diff --git a/.github/ISSUE_TEMPLATE/feature-request-enhancement.md b/.github/ISSUE_TEMPLATE/feature-request-enhancement.md index 99cb9b11ef..831ecd7373 100644 --- a/.github/ISSUE_TEMPLATE/feature-request-enhancement.md +++ b/.github/ISSUE_TEMPLATE/feature-request-enhancement.md @@ -15,7 +15,7 @@ E.g., KubeArmor could support protecting the earth from malicious aliens. **Is your feature request related to a problem? Please describe the use case.** -A description of what the problem/use case is. E.g. Protect the earth from been decimated by nefarious aliens. +A description of what the problem/use case is. E.g. Protect the earth from being decimated by nefarious aliens. **Describe the solution you'd like** diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c5c99710dd..4dd9c29cdf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,17 +8,26 @@ 4. If the issue involves code changes you need to install development env using this [Development Guide](contribution/development_guide.md). -# Scope of contribution +# Scope of Contribution -Contributions are not necessarily in the form of code changes. Kubearmor community can benefit from contributions such as: +Contributions are not necessarily in the form of code changes. KubeArmor community can benefit from contributions such as: -1. [Policy-Templates](https://github.com/kubearmor/policy-templates): Users are welcome to contribute policy-templates for their workloads. The workloads have to be generic enough such that it benefits the wider community. E.g., if someone brings up a system policy restricting access to nginx process that would be useful in multiple scenarios and for the wider community then come up with a policy-template that is specific to your proprietary application. +1. [Policy-Templates](https://github.com/kubearmor/policy-templates) + + Users are welcome to contribute policy-templates for their workloads. The workloads have to be generic enough such that it benefits the wider community. E.g., if someone brings up a system policy restricting access to nginx process that would be useful in multiple scenarios and for the wider community then come up with a policy-template that is specific to your proprietary application. 2. Blogs - a. explaining feature use (KVMService, Event Auditor, Visibility, etc) - b. How to use Kubearmor to protect your workload? Specific use-cases you may have. Please do not shy away from getting as technical as you can. - c. ... put your topic of interest here ... -3. Feedback to the community. Just helping advance any discussion on KubeArmor Slack, Community meetings, office hours will make a big difference. + a. Explain The use of KubeArmor's features (KVMService, Event Auditor, Visibility, etc) + + b. Describe How to use KubeArmor to protect your workload with specific use-cases you may have. Please do not shy away from getting as technical as you can. + + c. Put your topic of interest + +3. Feedback to the community + + Just helping advance any discussion on KubeArmor Slack, Community meetings, office hours will make a big difference. + +4. Talking about KubeArmor in meetups -4. Talking about Kubearmor in meetups. We would certainly encourage users or devs of kubearmor to talk about it in open/closed forums. The community can help with logistics such as compiling/feedback on slide-decks, technical diagrams, etc. + We would certainly encourage users or devs of KubeArmor to talk about it in open/closed forums. The community can help with logistics such as compiling/feedback on slide-decks, technical diagrams, etc. diff --git a/KubeArmor/.goreleaser.yaml b/KubeArmor/.goreleaser.yaml index 4f0d79ef88..4d02bde087 100644 --- a/KubeArmor/.goreleaser.yaml +++ b/KubeArmor/.goreleaser.yaml @@ -72,3 +72,5 @@ nfpms: rpm: dependencies: - bcc-tools + - policycoreutils-devel + - setools-console diff --git a/KubeArmor/core/kubeArmor.go b/KubeArmor/core/kubeArmor.go index ef696763bf..bec082353f 100644 --- a/KubeArmor/core/kubeArmor.go +++ b/KubeArmor/core/kubeArmor.go @@ -215,7 +215,7 @@ func (dm *KubeArmorDaemon) CloseLogger() bool { // InitSystemMonitor Function func (dm *KubeArmorDaemon) InitSystemMonitor() bool { - dm.SystemMonitor = mon.NewSystemMonitor(dm.Node, dm.Logger, &dm.Containers, &dm.ContainersLock, + dm.SystemMonitor = mon.NewSystemMonitor(&dm.Node, dm.Logger, &dm.Containers, &dm.ContainersLock, &dm.ActivePidMap, &dm.ActiveHostPidMap, &dm.ActivePidMapLock, &dm.ActiveHostMap, &dm.ActiveHostMapLock) if dm.SystemMonitor == nil { return false @@ -330,6 +330,7 @@ func KubeArmor() { // Enable KubeArmorHostPolicy for both VM and KVMAgent and in non-k8s env if cfg.GlobalCfg.KVMAgent || (!cfg.GlobalCfg.K8sEnv && cfg.GlobalCfg.HostPolicy) { + dm.Node.NodeName = cfg.GlobalCfg.Host dm.Node.NodeIP = kl.GetExternalIPAddr() dm.Node.Annotations = map[string]string{} diff --git a/KubeArmor/core/kubeUpdate.go b/KubeArmor/core/kubeUpdate.go index aaeef9b767..f6057a806d 100644 --- a/KubeArmor/core/kubeUpdate.go +++ b/KubeArmor/core/kubeUpdate.go @@ -43,6 +43,13 @@ func (dm *KubeArmorDaemon) HandleNodeAnnotations(node *tp.Node) { node.Annotations["kubearmor-policy"] = "audited" } } + + if kl.IsInK8sCluster() && strings.Contains(string(lsm), "selinux") { + // exception: KubeArmor in a daemonset even though SELinux is enabled + if node.Annotations["kubearmor-policy"] == "enabled" { + node.Annotations["kubearmor-policy"] = "audited" + } + } } if node.Annotations["kubearmor-policy"] == "enabled" { @@ -94,6 +101,9 @@ func (dm *KubeArmorDaemon) WatchK8sNodes() { node := tp.Node{} + node.ClusterName = cfg.GlobalCfg.Cluster + node.NodeName = cfg.GlobalCfg.Host + for _, address := range event.Object.Status.Addresses { if address.Type == "InternalIP" { node.NodeIP = address.Address @@ -190,7 +200,6 @@ func (dm *KubeArmorDaemon) UpdateEndPointWithPod(action string, pod tp.K8sPod) { newPoint.Containers = []string{} newPoint.AppArmorProfiles = []string{} - newPoint.SELinuxProfiles = []string{} // update containers for k := range pod.Containers { @@ -243,13 +252,6 @@ func (dm *KubeArmorDaemon) UpdateEndPointWithPod(action string, pod tp.K8sPod) { } dm.DefaultPosturesLock.Unlock() - // update selinux profile names to the endpoint - for k, v := range pod.Annotations { - if strings.HasPrefix(k, "kubearmor-selinux") { - newPoint.SELinuxProfiles = append(newPoint.SELinuxProfiles, v) - } - } - // update security policies with the identities newPoint.SecurityPolicies = dm.GetSecurityPolicies(newPoint.Identities) @@ -376,13 +378,6 @@ func (dm *KubeArmorDaemon) UpdateEndPointWithPod(action string, pod tp.K8sPod) { } dm.DefaultPosturesLock.Unlock() - // update selinux profile names to the endpoint - for k, v := range pod.Annotations { - if strings.HasPrefix(k, "kubearmor-selinux") { - newEndPoint.SELinuxProfiles = append(newEndPoint.SELinuxProfiles, v) - } - } - // get security policies according to the updated identities newEndPoint.SecurityPolicies = dm.GetSecurityPolicies(newEndPoint.Identities) @@ -509,19 +504,10 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { if pod.Annotations["kubearmor-policy"] == "enabled" { pod.Annotations["kubearmor-policy"] = "audited" } - } else if lsm, err := ioutil.ReadFile("/sys/kernel/security/lsm"); err == nil { - if !strings.Contains(string(lsm), "apparmor") && !strings.Contains(string(lsm), "selinux") { - // exception: neither AppArmor nor SELinux - if pod.Annotations["kubearmor-policy"] == "enabled" { - pod.Annotations["kubearmor-policy"] = "audited" - } - } - - if kl.IsInK8sCluster() && strings.Contains(string(lsm), "selinux") { - // exception: KubeArmor in a daemonset even though SELinux is enabled - if pod.Annotations["kubearmor-policy"] == "enabled" { - pod.Annotations["kubearmor-policy"] = "audited" - } + } else if dm.RuntimeEnforcer != nil && dm.RuntimeEnforcer.EnforcerType == "SELinux" { + // exception: no SELinux support for containers + if pod.Annotations["kubearmor-policy"] == "enabled" { + pod.Annotations["kubearmor-policy"] = "audited" } } @@ -556,7 +542,7 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { pod.Annotations["kubearmor-visibility"] = cfg.GlobalCfg.Visibility } - // == Skip if already patched == // + // == AppArmor == // if event.Type == "ADDED" || event.Type == "MODIFIED" { exist := false @@ -577,8 +563,6 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { } } - // == AppArmor == // - if dm.RuntimeEnforcer != nil && dm.RuntimeEnforcer.EnforcerType == "AppArmor" { appArmorAnnotations := map[string]string{} updateAppArmor := false @@ -647,71 +631,6 @@ func (dm *KubeArmorDaemon) WatchK8sPods() { } } - // == SELinux == // - - if dm.RuntimeEnforcer != nil && dm.RuntimeEnforcer.EnforcerType == "SELinux" { - seLinuxAnnotations := map[string]string{} - updateSELinux := false - - for k, v := range pod.Annotations { - if strings.HasPrefix(k, "kubearmor-selinux") { - containerName := strings.Split(k, "/")[1] - seLinuxAnnotations[containerName] = v - } - } - - for _, container := range event.Object.Spec.Containers { - if _, ok := seLinuxAnnotations[container.Name]; !ok { - seLinuxAnnotations[container.Name] = "kubearmor-" + pod.Metadata["namespaceName"] + "-" + container.Name - updateSELinux = true - } - } - - if event.Type == "ADDED" { - // update selinux profiles - dm.RuntimeEnforcer.UpdateSELinuxProfiles(pod.Metadata["podName"], "ADDED", seLinuxAnnotations) - - if updateSELinux && pod.Annotations["kubearmor-policy"] == "enabled" { - if deploymentName, ok := pod.Metadata["deploymentName"]; ok { - // patch the deployment with selinux annotations - if err := K8s.PatchDeploymentWithSELinuxAnnotations(pod.Metadata["namespaceName"], deploymentName, seLinuxAnnotations); err != nil { - dm.Logger.Errf("Failed to update SELinux Annotations for KubeArmor (%s/%s/%s, %s)", pod.Metadata["namespaceName"], deploymentName, pod.Metadata["podName"], err.Error()) - } else { - dm.Logger.Printf("Patched SELinux Annotations for KubeArmor (%s/%s/%s)", pod.Metadata["namespaceName"], deploymentName, pod.Metadata["podName"]) - } - pod.Annotations["kubearmor-policy"] = "patched" - } - } - } else if event.Type == "MODIFIED" { - for _, k8spod := range dm.K8sPods { - if k8spod.Metadata["namespaceName"] == pod.Metadata["namespaceName"] && k8spod.Metadata["podName"] == pod.Metadata["podName"] { - prevPolicyEnabled := "disabled" - - if val, ok := k8spod.Annotations["kubearmor-policy"]; ok { - prevPolicyEnabled = val - } - - if updateSELinux && prevPolicyEnabled != "enabled" && pod.Annotations["kubearmor-policy"] == "enabled" { - if deploymentName, ok := pod.Metadata["deploymentName"]; ok { - // patch the deployment with selinux annotations - if err := K8s.PatchDeploymentWithSELinuxAnnotations(pod.Metadata["namespaceName"], deploymentName, seLinuxAnnotations); err != nil { - dm.Logger.Errf("Failed to update SELinux Annotations for KubeArmor (%s/%s/%s, %s)", pod.Metadata["namespaceName"], deploymentName, pod.Metadata["podName"], err.Error()) - } else { - dm.Logger.Printf("Patched SELinux Annotations for KubeArmor (%s/%s/%s)", pod.Metadata["namespaceName"], deploymentName, pod.Metadata["podName"]) - } - pod.Annotations["kubearmor-policy"] = "patched" - } - } - - break - } - } - } else if event.Type == "DELETED" { - // update selinux profiles - dm.RuntimeEnforcer.UpdateSELinuxProfiles(pod.Metadata["podName"], "DELETED", seLinuxAnnotations) - } - } - dm.K8sPodsLock.Lock() if event.Type == "ADDED" { diff --git a/KubeArmor/enforcer/appArmorHostProfile.go b/KubeArmor/enforcer/appArmorHostProfile.go index 308fb6a5c9..71bb653118 100644 --- a/KubeArmor/enforcer/appArmorHostProfile.go +++ b/KubeArmor/enforcer/appArmorHostProfile.go @@ -575,8 +575,6 @@ func (ae *AppArmorEnforcer) GenerateHostProfileBody(securityPolicies []tp.HostSe for _, path := range secPolicy.Spec.Process.MatchPaths { if path.Action == "Allow" { ae.AllowedHostProcessMatchPaths(path, fromSources) - } else if path.Action == "Audit" { - // } else if path.Action == "Block" { ae.BlockedHostProcessMatchPaths(path, &processBlackList, fromSources) } @@ -586,8 +584,6 @@ func (ae *AppArmorEnforcer) GenerateHostProfileBody(securityPolicies []tp.HostSe for _, dir := range secPolicy.Spec.Process.MatchDirectories { if dir.Action == "Allow" { ae.AllowedHostProcessMatchDirectories(dir, fromSources) - } else if dir.Action == "Audit" { - // } else if dir.Action == "Block" { ae.BlockedHostProcessMatchDirectories(dir, &processBlackList, fromSources) } @@ -595,9 +591,7 @@ func (ae *AppArmorEnforcer) GenerateHostProfileBody(securityPolicies []tp.HostSe } if len(secPolicy.Spec.Process.MatchPatterns) > 0 { for _, pat := range secPolicy.Spec.Process.MatchPatterns { - if pat.Action == "Audit" { - // - } else if pat.Action == "Block" { + if pat.Action == "Block" { ae.BlockedHostProcessMatchPatterns(pat, &processBlackList) } } @@ -607,8 +601,6 @@ func (ae *AppArmorEnforcer) GenerateHostProfileBody(securityPolicies []tp.HostSe for _, path := range secPolicy.Spec.File.MatchPaths { if path.Action == "Allow" { ae.AllowedHostFileMatchPaths(path, fromSources) - } else if path.Action == "Audit" { - // } else if path.Action == "Block" { ae.BlockedHostFileMatchPaths(path, &fileBlackList, fromSources) } @@ -618,8 +610,6 @@ func (ae *AppArmorEnforcer) GenerateHostProfileBody(securityPolicies []tp.HostSe for _, dir := range secPolicy.Spec.File.MatchDirectories { if dir.Action == "Allow" { ae.AllowedHostFileMatchDirectories(dir, fromSources) - } else if dir.Action == "Audit" { - // } else if dir.Action == "Block" { ae.BlockedHostFileMatchDirectories(dir, &fileBlackList, fromSources) } @@ -627,9 +617,7 @@ func (ae *AppArmorEnforcer) GenerateHostProfileBody(securityPolicies []tp.HostSe } if len(secPolicy.Spec.File.MatchPatterns) > 0 { for _, pat := range secPolicy.Spec.File.MatchPatterns { - if pat.Action == "Audit" { - // - } else if pat.Action == "Block" { + if pat.Action == "Block" { ae.BlockedHostFileMatchPatterns(pat, &fileBlackList) } } @@ -718,15 +706,15 @@ func (ae *AppArmorEnforcer) GenerateHostProfileBody(securityPolicies []tp.HostSe file = false } - if defaultPosture.FileAction != "block" && file { + if file { bodyFromSource = bodyFromSource + " file,\n" } - if defaultPosture.FileAction != "block" && network { + if network { bodyFromSource = bodyFromSource + " network,\n" } - if defaultPosture.CapabilitiesAction != "block" && capability { + if capability { bodyFromSource = bodyFromSource + " capability,\n" } diff --git a/KubeArmor/enforcer/appArmorProfile.go b/KubeArmor/enforcer/appArmorProfile.go index 5a26da6c62..c67d2f9b0f 100644 --- a/KubeArmor/enforcer/appArmorProfile.go +++ b/KubeArmor/enforcer/appArmorProfile.go @@ -752,8 +752,6 @@ func (ae *AppArmorEnforcer) GenerateProfileBody(securityPolicies []tp.SecurityPo for _, path := range secPolicy.Spec.Process.MatchPaths { if path.Action == "Allow" { ae.AllowedProcessMatchPaths(path, &processWhiteList, fromSources) - } else if path.Action == "Audit" { - // } else if path.Action == "Block" { ae.BlockedProcessMatchPaths(path, &processBlackList, fromSources) } @@ -763,8 +761,6 @@ func (ae *AppArmorEnforcer) GenerateProfileBody(securityPolicies []tp.SecurityPo for _, dir := range secPolicy.Spec.Process.MatchDirectories { if dir.Action == "Allow" { ae.AllowedProcessMatchDirectories(dir, &processWhiteList, fromSources) - } else if dir.Action == "Audit" { - // } else if dir.Action == "Block" { ae.BlockedProcessMatchDirectories(dir, &processBlackList, fromSources) } @@ -774,8 +770,6 @@ func (ae *AppArmorEnforcer) GenerateProfileBody(securityPolicies []tp.SecurityPo for _, pat := range secPolicy.Spec.Process.MatchPatterns { if pat.Action == "Allow" { ae.AllowedProcessMatchPatterns(pat, &processWhiteList) - } else if pat.Action == "Audit" { - // } else if pat.Action == "Block" { ae.BlockedProcessMatchPatterns(pat, &processBlackList) } @@ -786,8 +780,6 @@ func (ae *AppArmorEnforcer) GenerateProfileBody(securityPolicies []tp.SecurityPo for _, path := range secPolicy.Spec.File.MatchPaths { if path.Action == "Allow" { ae.AllowedFileMatchPaths(path, &fileWhiteList, fromSources) - } else if path.Action == "Audit" { - // } else if path.Action == "Block" { ae.BlockedFileMatchPaths(path, &fileBlackList, fromSources) } @@ -797,8 +789,6 @@ func (ae *AppArmorEnforcer) GenerateProfileBody(securityPolicies []tp.SecurityPo for _, dir := range secPolicy.Spec.File.MatchDirectories { if dir.Action == "Allow" { ae.AllowedFileMatchDirectories(dir, &fileWhiteList, fromSources) - } else if dir.Action == "Audit" { - // } else if dir.Action == "Block" { ae.BlockedFileMatchDirectories(dir, &fileBlackList, fromSources) } @@ -808,8 +798,6 @@ func (ae *AppArmorEnforcer) GenerateProfileBody(securityPolicies []tp.SecurityPo for _, pat := range secPolicy.Spec.File.MatchPatterns { if pat.Action == "Allow" { ae.AllowedFileMatchPatterns(pat, &fileWhiteList) - } else if pat.Action == "Audit" { - // } else if pat.Action == "Block" { ae.BlockedFileMatchPatterns(pat, &fileBlackList) } diff --git a/KubeArmor/feeder/feeder.go b/KubeArmor/feeder/feeder.go index 59f341c180..422cef7e9c 100644 --- a/KubeArmor/feeder/feeder.go +++ b/KubeArmor/feeder/feeder.go @@ -538,7 +538,7 @@ func (fd *Feeder) PushMessage(level, message string) { func (fd *Feeder) PushLog(log tp.Log) { log = fd.UpdateMatchedPolicy(log) - if log.UpdatedTime == "" { + if log.Source == "" { return } @@ -571,8 +571,8 @@ func (fd *Feeder) PushLog(log tp.Log) { pbAlert.Timestamp = log.Timestamp pbAlert.UpdatedTime = log.UpdatedTime - pbAlert.ClusterName = cfg.GlobalCfg.Cluster - pbAlert.HostName = cfg.GlobalCfg.Host + pbAlert.ClusterName = fd.Node.ClusterName + pbAlert.HostName = fd.Node.NodeName pbAlert.NamespaceName = log.NamespaceName pbAlert.PodName = log.PodName @@ -636,14 +636,14 @@ func (fd *Feeder) PushLog(log tp.Log) { default: } } - } else { // ContainerLog + } else { // ContainerLog || HostLog pbLog := pb.Log{} pbLog.Timestamp = log.Timestamp pbLog.UpdatedTime = log.UpdatedTime - pbLog.ClusterName = cfg.GlobalCfg.Cluster - pbLog.HostName = cfg.GlobalCfg.Host + pbLog.ClusterName = fd.Node.ClusterName + pbLog.HostName = fd.Node.NodeName pbLog.NamespaceName = log.NamespaceName pbLog.PodName = log.PodName diff --git a/KubeArmor/feeder/policyMatcher.go b/KubeArmor/feeder/policyMatcher.go index e12b001c87..a11af109dd 100644 --- a/KubeArmor/feeder/policyMatcher.go +++ b/KubeArmor/feeder/policyMatcher.go @@ -24,11 +24,11 @@ import ( func getProtocolFromName(proto string) string { switch strings.ToLower(proto) { case "tcp": - return "type=SOCK_STREAM" + return "protocol=TCP" case "udp": - return "type=SOCK_DGRAM" + return "protocol=UDP" case "icmp": - return "type=SOCK_RAW" + return "protocol=ICMP" default: return "unknown" } @@ -51,7 +51,7 @@ func getOperationAndCapabilityFromName(capName string) (op, cap string) { switch strings.ToLower(capName) { case "net_raw": op = "Network" - cap = "type=SOCK_RAW" + cap = "SOCK_RAW" default: return "", "unknown" } @@ -77,7 +77,9 @@ func (fd *Feeder) newMatchPolicy(policyEnabled int, policyName, src string, mp i match.OwnerOnly = ppt.OwnerOnly - if policyEnabled == tp.KubeArmorPolicyAudited && strings.HasPrefix(ppt.Action, "Block") { + if policyEnabled == tp.KubeArmorPolicyAudited && ppt.Action == "Allow" { + match.Action = "Audit (" + ppt.Action + ")" + } else if policyEnabled == tp.KubeArmorPolicyAudited && ppt.Action == "Block" { match.Action = "Audit (" + ppt.Action + ")" } else { match.Action = ppt.Action @@ -94,7 +96,9 @@ func (fd *Feeder) newMatchPolicy(policyEnabled int, policyName, src string, mp i match.OwnerOnly = pdt.OwnerOnly match.Recursive = pdt.Recursive - if policyEnabled == tp.KubeArmorPolicyAudited && strings.HasPrefix(pdt.Action, "Block") { + if policyEnabled == tp.KubeArmorPolicyAudited && pdt.Action == "Allow" { + match.Action = "Audit (" + pdt.Action + ")" + } else if policyEnabled == tp.KubeArmorPolicyAudited && pdt.Action == "Block" { match.Action = "Audit (" + pdt.Action + ")" } else { match.Action = pdt.Action @@ -110,7 +114,9 @@ func (fd *Feeder) newMatchPolicy(policyEnabled int, policyName, src string, mp i match.OwnerOnly = ppt.OwnerOnly - if policyEnabled == tp.KubeArmorPolicyAudited && strings.HasPrefix(ppt.Action, "Block") { + if policyEnabled == tp.KubeArmorPolicyAudited && ppt.Action == "Allow" { + match.Action = "Audit (" + ppt.Action + ")" + } else if policyEnabled == tp.KubeArmorPolicyAudited && ppt.Action == "Block" { match.Action = "Audit (" + ppt.Action + ")" } else { match.Action = ppt.Action @@ -127,7 +133,9 @@ func (fd *Feeder) newMatchPolicy(policyEnabled int, policyName, src string, mp i match.OwnerOnly = fpt.OwnerOnly match.ReadOnly = fpt.ReadOnly - if policyEnabled == tp.KubeArmorPolicyAudited && strings.HasPrefix(fpt.Action, "Block") { + if policyEnabled == tp.KubeArmorPolicyAudited && fpt.Action == "Allow" { + match.Action = "Audit (" + fpt.Action + ")" + } else if policyEnabled == tp.KubeArmorPolicyAudited && fpt.Action == "Block" { match.Action = "Audit (" + fpt.Action + ")" } else { match.Action = fpt.Action @@ -145,7 +153,9 @@ func (fd *Feeder) newMatchPolicy(policyEnabled int, policyName, src string, mp i match.ReadOnly = fdt.ReadOnly match.Recursive = fdt.Recursive - if policyEnabled == tp.KubeArmorPolicyAudited && strings.HasPrefix(fdt.Action, "Block") { + if policyEnabled == tp.KubeArmorPolicyAudited && fdt.Action == "Allow" { + match.Action = "Audit (" + fdt.Action + ")" + } else if policyEnabled == tp.KubeArmorPolicyAudited && fdt.Action == "Block" { match.Action = "Audit (" + fdt.Action + ")" } else { match.Action = fdt.Action @@ -161,7 +171,9 @@ func (fd *Feeder) newMatchPolicy(policyEnabled int, policyName, src string, mp i match.OwnerOnly = fpt.OwnerOnly match.ReadOnly = fpt.ReadOnly - if policyEnabled == tp.KubeArmorPolicyAudited && strings.HasPrefix(fpt.Action, "Block") { + if policyEnabled == tp.KubeArmorPolicyAudited && fpt.Action == "Allow" { + match.Action = "Audit (" + fpt.Action + ")" + } else if policyEnabled == tp.KubeArmorPolicyAudited && fpt.Action == "Block" { match.Action = "Audit (" + fpt.Action + ")" } else { match.Action = fpt.Action @@ -175,9 +187,11 @@ func (fd *Feeder) newMatchPolicy(policyEnabled int, policyName, src string, mp i match.Resource = getProtocolFromName(npt.Protocol) match.ResourceType = "Protocol" - if policyEnabled == tp.KubeArmorPolicyEnabled && fd.IsGKE && strings.HasPrefix(npt.Action, "Block") { + if policyEnabled == tp.KubeArmorPolicyAudited && npt.Action == "Allow" { + match.Action = "Audit (" + npt.Action + ")" + } else if policyEnabled == tp.KubeArmorPolicyAudited && npt.Action == "Block" { match.Action = "Audit (" + npt.Action + ")" - } else if policyEnabled == tp.KubeArmorPolicyAudited && strings.HasPrefix(npt.Action, "Block") { + } else if policyEnabled == tp.KubeArmorPolicyEnabled && fd.IsGKE && npt.Action == "Block" { match.Action = "Audit (" + npt.Action + ")" } else { match.Action = npt.Action @@ -193,7 +207,9 @@ func (fd *Feeder) newMatchPolicy(policyEnabled int, policyName, src string, mp i match.Resource = cap match.ResourceType = "Capability" - if policyEnabled == tp.KubeArmorPolicyAudited && strings.HasPrefix(cct.Action, "Block") { + if policyEnabled == tp.KubeArmorPolicyAudited && cct.Action == "Allow" { + match.Action = "Audit (" + cct.Action + ")" + } else if policyEnabled == tp.KubeArmorPolicyAudited && cct.Action == "Block" { match.Action = "Audit (" + cct.Action + ")" } else { match.Action = cct.Action @@ -436,7 +452,7 @@ func (fd *Feeder) UpdateSecurityPolicies(action string, endPoint tp.EndPoint) { // UpdateHostSecurityPolicies Function func (fd *Feeder) UpdateHostSecurityPolicies(action string, secPolicies []tp.HostSecurityPolicy) { if action == "DELETED" { - delete(fd.SecurityPolicies, cfg.GlobalCfg.Host) + delete(fd.SecurityPolicies, fd.Node.NodeName) return } @@ -650,7 +666,7 @@ func (fd *Feeder) UpdateHostSecurityPolicies(action string, secPolicies []tp.Hos } fd.SecurityPoliciesLock.Lock() - fd.SecurityPolicies[cfg.GlobalCfg.Host] = matches + fd.SecurityPolicies[fd.Node.NodeName] = matches fd.SecurityPoliciesLock.Unlock() } @@ -706,6 +722,14 @@ func setLogFields(log *tp.Log, action string, considerPosture, visibility, conta // == Policy Matches == // // ==================== // +func getDirectoryPart(path string) string { + dirs := strings.Split(path, "/") + if len(dirs) > 1 { + return strings.Join(dirs[0:len(dirs)-2], "/") + } + return "__no_directory__" +} + // UpdateMatchedPolicy Function func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { allowProcPolicy := "" @@ -740,7 +764,7 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { firstLogSource := strings.Replace(strings.Split(log.Source, " ")[0], "./", "", 1) firstLogResource := strings.Replace(strings.Split(log.Resource, " ")[0], "./", "", 1) - if secPolicy.Action == "Allow" { + if strings.Contains(secPolicy.Action, "Allow") { if secPolicy.Source == "" || (secPolicy.IsFromSource && ((secPolicy.Operation == "Process" && (secPolicy.Source == log.ParentProcessName || secPolicy.Source == log.ProcessName)) || // ./bash -> xxx || ./bash -c xxx (secPolicy.Operation != "Process" && (secPolicy.Source == log.ProcessName || strings.Contains(secPolicy.Source, firstLogSource))))) { @@ -841,13 +865,15 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { ((secPolicy.Operation == "Process" && (secPolicy.Source == log.ParentProcessName || secPolicy.Source == log.ProcessName)) || // ./bash -> xxx || ./bash -c xxx (secPolicy.Operation == "File" && (secPolicy.Source == log.ProcessName || strings.Contains(secPolicy.Source, firstLogSource))))) { - if matched || secPolicy.Resource == log.Resource || (secPolicy.ResourceType == "Path" && strings.HasSuffix(secPolicy.Resource, firstLogResource)) || - (secPolicy.ResourceType == "Directory" && strings.HasPrefix(firstLogResource, secPolicy.Resource) && !strings.Contains(strings.Replace(firstLogResource, secPolicy.Resource, "", 1), "/")) || - (secPolicy.ResourceType == "Directory" && secPolicy.Recursive && strings.HasPrefix(firstLogResource, secPolicy.Resource) && strings.Contains(strings.Replace(firstLogResource, secPolicy.Resource, "", 1), "/")) { + if matched || + (secPolicy.ResourceType == "Path" && secPolicy.Resource == log.Resource) || // exact path match + (secPolicy.ResourceType == "Path" && strings.HasSuffix(secPolicy.Resource, firstLogResource)) || // file name match + (secPolicy.ResourceType == "Directory" && strings.HasPrefix(log.Resource, secPolicy.Resource)) || // exact directory match (non-recursive and recursive) + (secPolicy.ResourceType == "Directory" && strings.HasSuffix(secPolicy.Resource, getDirectoryPart(firstLogResource))) { // surffix match (non-recurisve) - if secPolicy.Action == "Audit" && log.Result == "Passed" { - matchedFlags := false + matchedFlags := false + if (secPolicy.Action == "Audit" && log.Result == "Passed") || (log.PolicyEnabled == tp.KubeArmorPolicyAudited && strings.Contains(secPolicy.Action, "Allow")) { if secPolicy.ReadOnly && log.Resource != "" && secPolicy.OwnerOnly && log.MergedDir != "" { // read only && owner only if strings.Contains(log.Data, "O_RDONLY") && strconv.Itoa(int(log.UID)) == getFileProcessUID(log.MergedDir+log.Resource) { @@ -868,26 +894,54 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { matchedFlags = true } - if matchedFlags { - log.PolicyName = secPolicy.PolicyName - log.Severity = secPolicy.Severity - - if len(secPolicy.Tags) > 0 { - log.Tags = strings.Join(secPolicy.Tags[:], ",") + } else if log.PolicyEnabled == tp.KubeArmorPolicyAudited && strings.Contains(secPolicy.Action, "Block") { + if secPolicy.ReadOnly && log.Resource != "" && secPolicy.OwnerOnly && log.MergedDir != "" { + // read only && owner only + if strings.Contains(log.Data, "O_RDONLY") && strconv.Itoa(int(log.UID)) == getFileProcessUID(log.MergedDir+log.Resource) { + matchedFlags = true } - - if len(secPolicy.Message) > 0 { - log.Message = secPolicy.Message + } else if secPolicy.ReadOnly && log.Resource != "" { + // read only + if strings.Contains(log.Data, "O_RDONLY") { + matchedFlags = true + } + } else if secPolicy.OwnerOnly && log.MergedDir != "" { + // owner only + if strconv.Itoa(int(log.UID)) == getFileProcessUID(log.MergedDir+log.Resource) { + matchedFlags = true } + } + // otherwise, being supposed to be blocked + } - log.Type = "MatchedPolicy" - log.Action = secPolicy.Action + if (matchedFlags && secPolicy.Action == "Audit" && log.Result == "Passed") || (!matchedFlags && log.PolicyEnabled == tp.KubeArmorPolicyAudited && (strings.Contains(secPolicy.Action, "Allow") || strings.Contains(secPolicy.Action, "Block"))) { + log.Type = "MatchedPolicy" + + log.PolicyName = secPolicy.PolicyName + log.Severity = secPolicy.Severity - continue + if len(secPolicy.Tags) > 0 { + log.Tags = strings.Join(secPolicy.Tags[:], ",") + } + + if len(secPolicy.Message) > 0 { + log.Message = secPolicy.Message } + + if secPolicy.Action == "Audit" || log.PolicyEnabled == tp.KubeArmorPolicyAudited { + log.Enforcer = "eBPF Monitor" + } else { + log.Enforcer = fd.Enforcer + } + + log.Action = secPolicy.Action + + continue } - if log.Result != "Passed" { + if log.PolicyEnabled == tp.KubeArmorPolicyEnabled && log.Result != "Passed" { + log.Type = "MatchedPolicy" + log.PolicyName = secPolicy.PolicyName log.Severity = secPolicy.Severity @@ -899,7 +953,7 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { log.Message = secPolicy.Message } - log.Type = "MatchedPolicy" + log.Enforcer = fd.Enforcer log.Action = secPolicy.Action continue @@ -909,23 +963,35 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { } case "Network": if secPolicy.Operation == log.Operation { - if (secPolicy.Source == "" || (secPolicy.IsFromSource && (secPolicy.Source == log.ProcessName || strings.Contains(secPolicy.Source, firstLogSource)))) && strings.Contains(log.Resource, secPolicy.Resource) { + if secPolicy.Source == "" || (secPolicy.IsFromSource && (secPolicy.Source == log.ProcessName || strings.Contains(secPolicy.Source, firstLogSource))) { - log.PolicyName = secPolicy.PolicyName - log.Severity = secPolicy.Severity + if strings.Contains(log.Resource, secPolicy.Resource) { - if len(secPolicy.Tags) > 0 { - log.Tags = strings.Join(secPolicy.Tags[:], ",") - } + if (log.PolicyEnabled == tp.KubeArmorPolicyEnabled && log.Result != "Passed") || (secPolicy.Action == "Audit" && log.Result == "Passed") || (log.PolicyEnabled == tp.KubeArmorPolicyAudited && strings.Contains(secPolicy.Action, "Block")) { + log.Type = "MatchedPolicy" - if len(secPolicy.Message) > 0 { - log.Message = secPolicy.Message - } + log.PolicyName = secPolicy.PolicyName + log.Severity = secPolicy.Severity + + if len(secPolicy.Tags) > 0 { + log.Tags = strings.Join(secPolicy.Tags[:], ",") + } - log.Type = "MatchedPolicy" - log.Action = secPolicy.Action + if len(secPolicy.Message) > 0 { + log.Message = secPolicy.Message + } + + if secPolicy.Action == "Audit" || log.PolicyEnabled == tp.KubeArmorPolicyAudited { + log.Enforcer = "eBPF Monitor" + } else { + log.Enforcer = fd.Enforcer + } - continue + log.Action = secPolicy.Action + + continue + } + } } } } @@ -933,7 +999,7 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { fd.SecurityPoliciesLock.RUnlock() - if log.Result != "Passed" { + if log.Result == "Operation not permitted" || log.Result == "Permission denied" { if log.Operation == "Process" && allowProcPolicy == "" { considerFilePosture = true } else if log.Operation == "File" && allowFilePolicy == "" { @@ -946,65 +1012,10 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { if log.ContainerID != "" { // container if log.Type == "" { - if log.PolicyEnabled == tp.KubeArmorPolicyEnabled && log.Result != "Passed" { + if (log.PolicyEnabled == tp.KubeArmorPolicyEnabled && log.Result != "Passed") || (log.PolicyEnabled == tp.KubeArmorPolicyAudited) { if log.Operation == "Process" && allowProcPolicy != "" { - log.PolicyName = allowProcPolicy - log.Severity = allowProcPolicySeverity - - if len(allowProcTags) > 0 { - log.Tags = strings.Join(allowProcTags[:], ",") - } - - if len(allowProcMessage) > 0 { - log.Message = allowProcMessage - } - - log.Type = "MatchedPolicy" - log.Enforcer = fd.Enforcer - log.Action = "Allow" - - return log - - } else if log.Operation == "File" && allowFilePolicy != "" { - log.PolicyName = allowFilePolicy - log.Severity = allowFilePolicySeverity - - if len(allowFileTags) > 0 { - log.Tags = strings.Join(allowFileTags[:], ",") - } - - if len(allowFileMessage) > 0 { - log.Message = allowFileMessage - } - - log.Type = "MatchedPolicy" - log.Enforcer = fd.Enforcer - log.Action = "Allow" - - return log - - } else if log.Operation == "Network" && allowNetworkPolicy != "" { - log.PolicyName = allowNetworkPolicy - log.Severity = allowNetworkPolicySeverity - - if len(allowNetworkTags) > 0 { - log.Tags = strings.Join(allowNetworkTags[:], ",") - } - - if len(allowNetworkMessage) > 0 { - log.Message = allowNetworkMessage - } - log.Type = "MatchedPolicy" - log.Enforcer = fd.Enforcer - log.Action = "Allow" - return log - } - } - - if log.PolicyEnabled == tp.KubeArmorPolicyAudited { - if log.Operation == "Process" && allowProcPolicy != "" { log.PolicyName = allowProcPolicy log.Severity = allowProcPolicySeverity @@ -1016,13 +1027,19 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { log.Message = allowProcMessage } - log.Type = "MatchedPolicy" - log.Enforcer = "eBPF Monitor" - log.Action = "Audit (Allow)" + if log.PolicyEnabled == tp.KubeArmorPolicyAudited { + log.Enforcer = "eBPF Monitor" + log.Action = "Audit (Allow)" + } else { + log.Enforcer = fd.Enforcer + log.Action = "Allow" + } return log } else if log.Operation == "File" && allowFilePolicy != "" { + log.Type = "MatchedPolicy" + log.PolicyName = allowFilePolicy log.Severity = allowFilePolicySeverity @@ -1034,13 +1051,19 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { log.Message = allowFileMessage } - log.Type = "MatchedPolicy" - log.Enforcer = "eBPF Monitor" - log.Action = "Audit (Allow)" + if log.PolicyEnabled == tp.KubeArmorPolicyAudited { + log.Enforcer = "eBPF Monitor" + log.Action = "Audit (Allow)" + } else { + log.Enforcer = fd.Enforcer + log.Action = "Allow" + } return log } else if log.Operation == "Network" && allowNetworkPolicy != "" { + log.Type = "MatchedPolicy" + log.PolicyName = allowNetworkPolicy log.Severity = allowNetworkPolicySeverity @@ -1052,9 +1075,13 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { log.Message = allowNetworkMessage } - log.Type = "MatchedPolicy" - log.Enforcer = "eBPF Monitor" - log.Action = "Audit (Allow)" + if log.PolicyEnabled == tp.KubeArmorPolicyAudited { + log.Enforcer = "eBPF Monitor" + log.Action = "Audit (Allow)" + } else { + log.Enforcer = fd.Enforcer + log.Action = "Allow" + } return log } @@ -1091,88 +1118,17 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { } } else if log.Type == "MatchedPolicy" { - if log.PolicyEnabled == tp.KubeArmorPolicyAudited { - if log.Action == "Block" { - log.Enforcer = "eBPF Monitor" - log.Action = "Audit (Block)" - } - } - if log.Action == "Allow" && log.Result == "Passed" { return tp.Log{} } - - if log.Enforcer == "" { - if log.Action == "Audit" { - log.Enforcer = "eBPF Monitor" - } else { - log.Enforcer = fd.Enforcer - } - } - return log } } else { // host if log.Type == "" { - if fd.Node.PolicyEnabled == tp.KubeArmorPolicyEnabled && log.Result != "Passed" { + if (log.PolicyEnabled == tp.KubeArmorPolicyEnabled && log.Result != "Passed") || (log.PolicyEnabled == tp.KubeArmorPolicyAudited) { if log.Operation == "Process" && allowProcPolicy != "" { - log.PolicyName = allowProcPolicy - log.Severity = allowProcPolicySeverity - - if len(allowProcTags) > 0 { - log.Tags = strings.Join(allowProcTags[:], ",") - } - - if len(allowProcMessage) > 0 { - log.Message = allowProcMessage - } - log.Type = "MatchedHostPolicy" - log.Enforcer = fd.Enforcer - log.Action = "Allow" - - return log - } else if log.Operation == "File" && allowFilePolicy != "" { - log.PolicyName = allowFilePolicy - log.Severity = allowFilePolicySeverity - - if len(allowFileTags) > 0 { - log.Tags = strings.Join(allowFileTags[:], ",") - } - - if len(allowFileMessage) > 0 { - log.Message = allowFileMessage - } - - log.Type = "MatchedHostPolicy" - log.Enforcer = fd.Enforcer - log.Action = "Allow" - - return log - - } else if log.Operation == "Network" && allowNetworkPolicy != "" { - log.PolicyName = allowNetworkPolicy - log.Severity = allowNetworkPolicySeverity - - if len(allowNetworkTags) > 0 { - log.Tags = strings.Join(allowNetworkTags[:], ",") - } - - if len(allowNetworkMessage) > 0 { - log.Message = allowNetworkMessage - } - - log.Type = "MatchedHostPolicy" - log.Enforcer = fd.Enforcer - log.Action = "Allow" - - return log - } - } - - if fd.Node.PolicyEnabled == tp.KubeArmorPolicyAudited { - if log.Operation == "Process" && allowProcPolicy != "" { log.PolicyName = allowProcPolicy log.Severity = allowProcPolicySeverity @@ -1184,13 +1140,19 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { log.Message = allowProcMessage } - log.Type = "MatchedHostPolicy" - log.Enforcer = "eBPF Monitor" - log.Action = "Audit (Allow)" + if log.PolicyEnabled == tp.KubeArmorPolicyAudited { + log.Enforcer = "eBPF Monitor" + log.Action = "Audit (Allow)" + } else { + log.Enforcer = fd.Enforcer + log.Action = "Allow" + } return log } else if log.Operation == "File" && allowFilePolicy != "" { + log.Type = "MatchedHostPolicy" + log.PolicyName = allowFilePolicy log.Severity = allowFilePolicySeverity @@ -1202,13 +1164,19 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { log.Message = allowFileMessage } - log.Type = "MatchedHostPolicy" - log.Enforcer = "eBPF Monitor" - log.Action = "Audit (Allow)" + if log.PolicyEnabled == tp.KubeArmorPolicyAudited { + log.Enforcer = "eBPF Monitor" + log.Action = "Audit (Allow)" + } else { + log.Enforcer = fd.Enforcer + log.Action = "Allow" + } return log } else if log.Operation == "Network" && allowNetworkPolicy != "" { + log.Type = "MatchedHostPolicy" + log.PolicyName = allowNetworkPolicy log.Severity = allowNetworkPolicySeverity @@ -1220,15 +1188,19 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { log.Message = allowNetworkMessage } - log.Type = "MatchedHostPolicy" - log.Enforcer = "eBPF Monitor" - log.Action = "Audit (Allow)" + if log.PolicyEnabled == tp.KubeArmorPolicyAudited { + log.Enforcer = "eBPF Monitor" + log.Action = "Audit (Allow)" + } else { + log.Enforcer = fd.Enforcer + log.Action = "Allow" + } return log } } - if log.Result != "Passed" { + if log.Result == "Operation not permitted" || log.Result == "Permission denied" { if log.Operation == "Process" && allowProcPolicy == "" { considerFilePosture = true } else if log.Operation == "File" && allowFilePolicy == "" { @@ -1257,22 +1229,12 @@ func (fd *Feeder) UpdateMatchedPolicy(log tp.Log) tp.Log { } } else if log.Type == "MatchedPolicy" { - if log.PolicyEnabled == tp.KubeArmorPolicyAudited { - if log.Action == "Block" { - log.Enforcer = "eBPF Monitor" - log.Action = "Audit (Block)" - } - } + log.Type = "MatchedHostPolicy" if log.Action == "Allow" && log.Result == "Passed" { return tp.Log{} } - log.Type = "MatchedHostPolicy" - if log.Enforcer == "" { - log.Enforcer = fd.Enforcer - } - return log } } diff --git a/KubeArmor/monitor/hostLogUpdate.go b/KubeArmor/monitor/hostLogUpdate.go index 984747576c..6d16bfc1b2 100644 --- a/KubeArmor/monitor/hostLogUpdate.go +++ b/KubeArmor/monitor/hostLogUpdate.go @@ -92,7 +92,7 @@ func (mon *SystemMonitor) UpdateHostLogs() { var sockDomain string var sockType string - var sockProtocol string + var sockProtocol int32 if val, ok := msg.ContextArgs[0].(string); ok { sockDomain = val @@ -101,11 +101,11 @@ func (mon *SystemMonitor) UpdateHostLogs() { sockType = val } if val, ok := msg.ContextArgs[2].(int32); ok { - sockProtocol = strconv.Itoa(int(val)) + sockProtocol = val } log.Operation = "Network" - log.Resource = "domain=" + sockDomain + " type=" + sockType + " protocol=" + sockProtocol + log.Resource = "domain=" + sockDomain + " type=" + sockType + " protocol=" + getProtocol(sockProtocol) log.Data = "syscall=" + getSyscallName(int32(msg.ContextSys.EventID)) case SysConnect: // fd, sockaddr diff --git a/KubeArmor/monitor/logUpdate.go b/KubeArmor/monitor/logUpdate.go index c20f5ef4ad..719c9cb6d4 100644 --- a/KubeArmor/monitor/logUpdate.go +++ b/KubeArmor/monitor/logUpdate.go @@ -62,6 +62,15 @@ func (mon *SystemMonitor) BuildLogBase(msg ContextCombined) tp.Log { if log.ContainerID != "" { log = mon.UpdateContainerInfoByContainerID(log) + } else { + // update host policy flag + log.PolicyEnabled = mon.Node.PolicyEnabled + + // update host visibility flags + log.ProcessVisibilityEnabled = mon.Node.ProcessVisibilityEnabled + log.FileVisibilityEnabled = mon.Node.FileVisibilityEnabled + log.NetworkVisibilityEnabled = mon.Node.NetworkVisibilityEnabled + log.CapabilitiesVisibilityEnabled = mon.Node.CapabilitiesVisibilityEnabled } log.HostPPID = int32(msg.ContextSys.HostPPID) @@ -194,7 +203,7 @@ func (mon *SystemMonitor) UpdateLogs() { var sockDomain string var sockType string - var sockProtocol string + var sockProtocol int32 if val, ok := msg.ContextArgs[0].(string); ok { sockDomain = val @@ -203,11 +212,11 @@ func (mon *SystemMonitor) UpdateLogs() { sockType = val } if val, ok := msg.ContextArgs[2].(int32); ok { - sockProtocol = strconv.Itoa(int(val)) + sockProtocol = val } log.Operation = "Network" - log.Resource = "domain=" + sockDomain + " type=" + sockType + " protocol=" + sockProtocol + log.Resource = "domain=" + sockDomain + " type=" + sockType + " protocol=" + getProtocol(sockProtocol) log.Data = "syscall=" + getSyscallName(int32(msg.ContextSys.EventID)) case SysConnect: // fd, sockaddr diff --git a/KubeArmor/monitor/syscallParser.go b/KubeArmor/monitor/syscallParser.go index a76a8ea7bf..6744f01329 100644 --- a/KubeArmor/monitor/syscallParser.go +++ b/KubeArmor/monitor/syscallParser.go @@ -378,6 +378,26 @@ func getSocketType(st uint32) string { return strings.Join(f, "|") } +var protocols = map[int32]string{ + 1: "ICMP", + 6: "TCP", + 17: "UDP", + 58: "ICMPv6", +} + +// getProtocol Function +func getProtocol(proto int32) string { + var res string + + if protoName, ok := protocols[proto]; ok { + res = protoName + } else { + res = strconv.Itoa(int(proto)) + } + + return res +} + var capabilities = map[int32]string{ 0: "CAP_CHOWN", 1: "CAP_DAC_OVERRIDE", diff --git a/KubeArmor/monitor/systemMonitor.go b/KubeArmor/monitor/systemMonitor.go index 60ae89ed10..a1c88921d4 100644 --- a/KubeArmor/monitor/systemMonitor.go +++ b/KubeArmor/monitor/systemMonitor.go @@ -109,9 +109,8 @@ func init() { // SystemMonitor Structure type SystemMonitor struct { - // host - HostName string - KernelVersion string + // node + Node *tp.Node // logs Logger *fd.Feeder @@ -161,14 +160,12 @@ type SystemMonitor struct { } // NewSystemMonitor Function -func NewSystemMonitor(node tp.Node, logger *fd.Feeder, containers *map[string]tp.Container, containersLock **sync.RWMutex, +func NewSystemMonitor(node *tp.Node, logger *fd.Feeder, containers *map[string]tp.Container, containersLock **sync.RWMutex, activePidMap *map[string]tp.PidMap, activeHostPidMap *map[string]tp.PidMap, activePidMapLock **sync.RWMutex, activeHostMap *map[uint32]tp.PidMap, activeHostMapLock **sync.RWMutex) *SystemMonitor { mon := new(SystemMonitor) - mon.HostName = cfg.GlobalCfg.Host - mon.KernelVersion = node.KernelVersion - + mon.Node = node mon.Logger = logger mon.Containers = containers @@ -212,10 +209,10 @@ func (mon *SystemMonitor) InitBPF() error { return err } - mon.Logger.Printf("Downloaded kernel headers (%s)", mon.KernelVersion) + mon.Logger.Printf("Downloaded kernel headers (%s)", mon.Node.KernelVersion) // set a new location for kernel headers - if err := os.Setenv("BCC_KERNEL_SOURCE", homeDir+"/GKE/kernel/usr/src/linux-headers-"+mon.KernelVersion); err != nil { + if err := os.Setenv("BCC_KERNEL_SOURCE", homeDir+"/GKE/kernel/usr/src/linux-headers-"+mon.Node.KernelVersion); err != nil { mon.Logger.Err(err.Error()) } @@ -227,7 +224,7 @@ func (mon *SystemMonitor) InitBPF() error { // /media/root/usr folder in kubearmor for GKE. The following code // checks whether the /media/root/usr/src/kernel-hdrs path exists // and uses it for BCC kernel source, if present. - lklhdrpath := "/media/root/usr/src/linux-headers-" + mon.KernelVersion + lklhdrpath := "/media/root/usr/src/linux-headers-" + mon.Node.KernelVersion mon.Logger.Printf("checking if kernel headers path (%s) exists", lklhdrpath) if _, err := os.Stat(lklhdrpath); err == nil { mon.Logger.Printf("using kernel headers from (%s)", lklhdrpath) @@ -267,7 +264,7 @@ func (mon *SystemMonitor) InitBPF() error { return errors.New("bpf module is nil") } } else if cfg.GlobalCfg.Policy && cfg.GlobalCfg.HostPolicy { // container and host - if strings.HasPrefix(mon.KernelVersion, "4.") { // 4.x + if strings.HasPrefix(mon.Node.KernelVersion, "4.") { // 4.x mon.BpfModule = bcc.NewModule(bpfSource, []string{"-O2", "-DMONITOR_HOST_AND_CONTAINER"}) if mon.BpfModule == nil { return errors.New("bpf module is nil") diff --git a/KubeArmor/monitor/systemMonitor_test.go b/KubeArmor/monitor/systemMonitor_test.go index 74e2ed721b..133cbac595 100644 --- a/KubeArmor/monitor/systemMonitor_test.go +++ b/KubeArmor/monitor/systemMonitor_test.go @@ -55,7 +55,7 @@ func TestSystemMonitor(t *testing.T) { t.Log("[PASS] Created logger") // Create System Monitor - systemMonitor := NewSystemMonitor(node, logger, &Containers, &ContainersLock, + systemMonitor := NewSystemMonitor(&node, logger, &Containers, &ContainersLock, &ActivePidMap, &ActiveHostPidMap, &ActivePidMapLock, &ActiveHostMap, &ActiveHostMapLock) if systemMonitor == nil { t.Log("[FAIL] Failed to create SystemMonitor") @@ -124,7 +124,7 @@ func TestTraceSyscallWithPod(t *testing.T) { t.Log("[PASS] Created logger") // Create System Monitor - systemMonitor := NewSystemMonitor(node, logger, &Containers, &ContainersLock, + systemMonitor := NewSystemMonitor(&node, logger, &Containers, &ContainersLock, &ActivePidMap, &ActiveHostPidMap, &ActivePidMapLock, &ActiveHostMap, &ActiveHostMapLock) if systemMonitor == nil { t.Log("[FAIL] Failed to create SystemMonitor") @@ -227,7 +227,7 @@ func TestTraceSyscallWithHost(t *testing.T) { t.Log("[PASS] Created logger") // Create System Monitor - systemMonitor := NewSystemMonitor(node, logger, &Containers, &ContainersLock, + systemMonitor := NewSystemMonitor(&node, logger, &Containers, &ContainersLock, &ActivePidMap, &ActiveHostPidMap, &ActivePidMapLock, &ActiveHostMap, &ActiveHostMapLock) if systemMonitor == nil { t.Log("[FAIL] Failed to create SystemMonitor") diff --git a/KubeArmor/types/types.go b/KubeArmor/types/types.go index b207834020..49127250ba 100644 --- a/KubeArmor/types/types.go +++ b/KubeArmor/types/types.go @@ -71,7 +71,9 @@ type EndPoint struct { // Node Structure type Node struct { - NodeIP string `json:"nodeIP"` + ClusterName string `json:"clusterName"` + NodeName string `json:"nodeName"` + NodeIP string `json:"nodeIP"` Annotations map[string]string `json:"annotations"` Labels map[string]string `json:"labels"` diff --git a/contribution/contribution_guide.md b/contribution/contribution_guide.md index dd59013eaf..c0ef9aabaf 100644 --- a/contribution/contribution_guide.md +++ b/contribution/contribution_guide.md @@ -94,7 +94,7 @@ To make a contribution, please follow the steps below. 8. DCO Signoffs - To ensure that contributors are only submitting work that they have rights to, we are requiring everyone to acknowledge this by signing their work. Any copyright notices in this repos should specify the authors as "The KubeArmor authors". + To ensure that contributors are only submitting work that they have rights to, we are requiring everyone to acknowledge this by signing their work. Any copyright notices in this repo should specify the authors as "KubeArmor authors". To sign your work, just add a line like this at the end of your commit message: @@ -104,4 +104,4 @@ To make a contribution, please follow the steps below. This can easily be done with the `-s` or `--signoff` option to `git commit`. - By doing this you state that you can certify the following (from https://developercertificate.org/): + By doing this, you state that the source code being submitted originated from you (see https://developercertificate.org). diff --git a/contribution/development_guide.md b/contribution/development_guide.md index afcb7b1725..21d86a6294 100644 --- a/contribution/development_guide.md +++ b/contribution/development_guide.md @@ -23,6 +23,7 @@ ```text $ cd KubeArmor/contribution/vagrant ~/KubeArmor/contribution/vagrant$ ./setup.sh + ~/KubeArmor/contribution/vagrant$ sudo reboot ``` * VM Setup using Vagrant @@ -142,9 +143,9 @@ ~/KubeArmor/KubeArmor$ make vagrant-destroy ``` - * VM Setup using the latest Linux kernel (v5.13) + * VM Setup using Vagrant with Ubuntu 21.10 (v5.13) - To use the latest linux kernel for dev env you can run `make` with the `NETNEXT` flag set to `1` for the respective make option. + To use the recent Linux kernel v5.13 for dev env, you can run `make` with the `NETNEXT` flag set to `1` for the respective make option. ```text ~/KubeArmor/KubeArmor$ make vagrant-up NETNEXT=1 @@ -158,7 +159,7 @@ * Please Note: - You could skip the steps for the vagrant setup completely if you're directly compiling Kubearmor on any Linux distro, or using Virtualbox. + You can skip the steps for the vagrant setup completely if you're directly compiling KubeArmor on any Linux distro. Please ensure that the steps to setup K8s are followed so as to resolve any open dependencies. @@ -250,8 +251,10 @@ Here, we briefly give you an overview of KubeArmor's directories. core - The main body (start point) of KubeArmor enforcer - Runtime policy enforcer (enforcing security policies into LSMs) feeder - gRPC-based feeder (sending audit/system logs to a log server) + kvmAgent - KubeArmor VM agent log - Message logger (stdout) monitor - eBPF-based system monitor (mapping process IDs to container IDs) + policy - gRPC service to manage Host Policies for VM environments types - Type definitions protobuf/ - Protocol buffer ``` diff --git a/contribution/self-managed-k8s-selinux/k8s/initialize_kubernetes.sh b/contribution/self-managed-k8s-selinux/k8s/initialize_kubernetes.sh index 2bf85c544b..f5a63ebafc 100755 --- a/contribution/self-managed-k8s-selinux/k8s/initialize_kubernetes.sh +++ b/contribution/self-managed-k8s-selinux/k8s/initialize_kubernetes.sh @@ -2,8 +2,14 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright 2021 Authors of KubeArmor -if [ ! -z $1 ] && [ "$1" == "help" ]; then - echo "Usage: $0 [ flannel | weave | calico | cilium ] (master)" +# set default +if [ "$CNI" == "" ]; then + CNI=cilium +fi + +# check supported CNI +if [ "$CNI" != "flannel" ] && [ "$CNI" != "weave" ] && [ "$CNI" != "calico" ] && [ "$CNI" != "cilium" ]; then + echo "Usage: CNI={flannel|weave|calico|cilium} MASTER={true|false} $0" exit fi @@ -19,17 +25,10 @@ sudo mkdir -p /etc/kubernetes/pki/ sudo chcon -R -t svirt_sandbox_file_t /var/lib/etcd sudo chcon -R -t svirt_sandbox_file_t /etc/kubernetes/ -if [ ! -z $1 ] && [ "$1" == "weave" ]; then - # initialize the master node (weave) - sudo kubeadm init | tee -a ~/k8s_init.log -elif [ ! -z $1 ] && [ "$1" == "flannel" ]; then - # initialize the master node (flannel) - sudo kubeadm init --pod-network-cidr=10.244.0.0/16 | tee -a ~/k8s_init.log -elif [ ! -z $1 ] && [ "$1" == "calico" ]; then - # initialize the master node (calico) - sudo kubeadm init --pod-network-cidr=10.244.0.0/16 | tee -a ~/k8s_init.log -else - # initialize the master node (cilium) +# initialize the master node +if [ "$CNI" == "calico" ]; then + sudo kubeadm init --pod-network-cidr=192.168.0.0/16 | tee -a ~/k8s_init.log +else # weave, flannel, cilium sudo kubeadm init --pod-network-cidr=10.244.0.0/16 | tee -a ~/k8s_init.log fi @@ -49,25 +48,26 @@ else sudo cp -r $HOME/.kube/ /root/ fi -if [ ! -z $1 ] && [ "$1" == "weave" ]; then +if [ "$CNI" == "flannel" ]; then + # install a pod network (flannel) + kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/v0.17.0/Documentation/kube-flannel.yml +elif [ "$CNI" == "weave" ]; then # install a pod network (weave) export kubever=$(kubectl version | base64 | tr -d '\n') kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever" -elif [ ! -z $1 ] && [ "$1" == "flannel" ]; then - # install a pod network (flannel) - kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.13.0/Documentation/kube-flannel.yml -elif [ ! -z $1 ] && [ "$1" == "calico" ]; then +elif [ "$CNI" == "calico" ]; then # install a pod network (calico) - kubectl apply -f https://docs.projectcalico.org/v3.6/manifests/calico.yaml -else + kubectl apply -f https://projectcalico.docs.tigera.io/manifests/calico.yaml +elif [ "$CNI" == "cilium" ]; then # install a pod network (cilium) - kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.9/install/kubernetes/quick-install.yaml + curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz{,.sha256sum} + sha256sum --check cilium-linux-amd64.tar.gz.sha256sum + sudo tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin + rm cilium-linux-amd64.tar.gz{,.sha256sum} + /usr/local/bin/cilium install fi -if [ ! -z $2 ] && [ "$2" == "master" ]; then +if [ "$MASTER" == "true" ]; then # disable master isolation (due to the lack of resources) kubectl taint nodes --all node-role.kubernetes.io/master- -elif [ -z $1 ]; then - # disable master isolation (due to the lack of resources) by default - kubectl taint nodes --all node-role.kubernetes.io/master- fi diff --git a/contribution/self-managed-k8s/k8s/initialize_kubernetes.sh b/contribution/self-managed-k8s/k8s/initialize_kubernetes.sh index dbb9bedf8d..3eb8fda6d3 100755 --- a/contribution/self-managed-k8s/k8s/initialize_kubernetes.sh +++ b/contribution/self-managed-k8s/k8s/initialize_kubernetes.sh @@ -2,8 +2,14 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright 2021 Authors of KubeArmor -if [ ! -z $1 ] && [ "$1" == "help" ]; then - echo "Usage: $0 [ flannel | weave | calico | cilium ] (master)" +# set default +if [ "$CNI" == "" ]; then + CNI=cilium +fi + +# check supported CNI +if [ "$CNI" != "flannel" ] && [ "$CNI" != "weave" ] && [ "$CNI" != "calico" ] && [ "$CNI" != "cilium" ]; then + echo "Usage: CNI={flannel|weave|calico|cilium} MASTER={true|false} $0" exit fi @@ -18,18 +24,11 @@ sudo modprobe br_netfilter sudo bash -c "echo '1' > /proc/sys/net/bridge/bridge-nf-call-iptables" sudo bash -c "echo 'net.bridge.bridge-nf-call-iptables=1' >> /etc/sysctl.conf" -if [ ! -z $1 ] && [ "$1" == "weave" ]; then - # initialize the master node (weave) - sudo kubeadm init | tee -a ~/k8s_init.log -elif [ ! -z $1 ] && [ "$1" == "flannel" ]; then - # initialize the master node (flannel) - sudo kubeadm init --pod-network-cidr=10.244.0.0/16 | tee -a ~/k8s_init.log -elif [ ! -z $1 ] && [ "$1" == "calico" ]; then - # initialize the master node (calico) - sudo kubeadm init --pod-network-cidr=192.168.0.0/16 | tee -a ~/k8s_init.log -else - # initialize the master node (cilium) +# initialize the master node +if [ "$CNI" == "calico" ]; then sudo kubeadm init --pod-network-cidr=192.168.0.0/16 | tee -a ~/k8s_init.log +else # weave, flannel, cilium + sudo kubeadm init --pod-network-cidr=10.244.0.0/16 | tee -a ~/k8s_init.log fi # make kubectl work for non-root user @@ -47,25 +46,26 @@ else echo "export KUBECONFIG=$HOME/.kube/config" | tee -a ~/.bashrc fi -if [ ! -z $1 ] && [ "$1" == "weave" ]; then +if [ "$CNI" == "flannel" ]; then + # install a pod network (flannel) + kubectl apply -f https://raw.githubusercontent.com/flannel-io/flannel/v0.17.0/Documentation/kube-flannel.yml +elif [ "$CNI" == "weave" ]; then # install a pod network (weave) export kubever=$(kubectl version | base64 | tr -d '\n') kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$kubever" -elif [ ! -z $1 ] && [ "$1" == "flannel" ]; then - # install a pod network (flannel) - kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.13.0/Documentation/kube-flannel.yml -elif [ ! -z $1 ] && [ "$1" == "calico" ]; then +elif [ "$CNI" == "calico" ]; then # install a pod network (calico) - kubectl apply -f https://docs.projectcalico.org/v3.6/manifests/calico.yaml -else + kubectl apply -f https://projectcalico.docs.tigera.io/manifests/calico.yaml +elif [ "$CNI" == "cilium" ]; then # install a pod network (cilium) - kubectl create -f https://raw.githubusercontent.com/cilium/cilium/v1.9/install/kubernetes/quick-install.yaml + curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz{,.sha256sum} + sha256sum --check cilium-linux-amd64.tar.gz.sha256sum + sudo tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin + rm cilium-linux-amd64.tar.gz{,.sha256sum} + /usr/local/bin/cilium install fi -if [ ! -z $2 ] && [ "$2" == "master" ]; then +if [ "$MASTER" == "true" ]; then # disable master isolation (due to the lack of resources) kubectl taint nodes --all node-role.kubernetes.io/master- -elif [ -z $1 ]; then - # disable master isolation (due to the lack of resources) by default - kubectl taint nodes --all node-role.kubernetes.io/master- fi diff --git a/contribution/self-managed-k8s/k8s/install_kubernetes.sh b/contribution/self-managed-k8s/k8s/install_kubernetes.sh index 21877f2509..0fc2d3cee4 100755 --- a/contribution/self-managed-k8s/k8s/install_kubernetes.sh +++ b/contribution/self-managed-k8s/k8s/install_kubernetes.sh @@ -6,24 +6,23 @@ sudo apt-get update # install apt-transport-https -sudo apt-get install -y apt-transport-https +sudo apt-get install -y apt-transport-https ca-certificates curl -# add the public key -sudo apt-key adv --recv-keys --keyserver keyserver.ubuntu.com 3746C208A7317B0F - -# add the GPG key -curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - +# add the key for kubernetes repo +sudo curl -fsSLo /usr/share/keyrings/kubernetes-archive-keyring.gpg https://packages.cloud.google.com/apt/doc/apt-key.gpg # add sources.list.d -sudo touch /etc/apt/sources.list.d/kubernetes.list -echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list +echo "deb [signed-by=/usr/share/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list # update repo sudo apt-get update -# install Kubernetes +# install kubernetes sudo apt-get install -y kubeadm kubelet kubectl +# exclude kubernetes packages from updates +sudo apt-mark hold kubeadm kubelet kubectl + # mount bpffs (for cilium) echo "bpffs /sys/fs/bpf bpf defaults 0 0" | sudo tee -a /etc/fstab diff --git a/contribution/self-managed-k8s/setup.sh b/contribution/self-managed-k8s/setup.sh index 0375d60e19..0bafd5dc79 100755 --- a/contribution/self-managed-k8s/setup.sh +++ b/contribution/self-managed-k8s/setup.sh @@ -21,7 +21,7 @@ git -C /tmp/build/ clone --branch v0.24.0 --depth 1 https://github.com/iovisor/b # install dependencies for bcc sudo apt-get -y install build-essential cmake bison flex git python3 python3-pip \ clang-9 libllvm9 llvm-9-dev libclang-9-dev zlib1g-dev libelf-dev libedit-dev libfl-dev \ - arping netperf iperf3 + arping netperf iperf3 net-tools # install bcc mkdir -p /tmp/build/bcc/build; cd /tmp/build/bcc/build diff --git a/contribution/testing_guide.md b/contribution/testing_guide.md index 7f1becf124..8a6f730bce 100644 --- a/contribution/testing_guide.md +++ b/contribution/testing_guide.md @@ -1,6 +1,6 @@ # Testing Guide -There are two ways to check the functionalities of KubeArmor: 1) testing kubeArmor manually and 2) using the testing framework. +There are two ways to check the functionalities of KubeArmor: 1) testing KubeArmor manually and 2) using the testing framework. # 1. Test KubeArmor manually @@ -21,9 +21,9 @@ $ cd KubeArmor/KubeArmor ```text ~/KubeArmor/KubeArmor$ sudo -E ./kubearmor -gRPC=[gRPC port number] - -logPath=[log file path] - -enableKubeArmorPolicy=[true|false] - -enableKubeArmorHostPolicy=[true|false] + -logPath=[log file path] + -enableKubeArmorPolicy=[true|false] + -enableKubeArmorHostPolicy=[true|false] ``` ## 1.4. Apply security policies into Kubernetes @@ -56,7 +56,7 @@ $ kubectl -n [namespace name] exec -it [pod name] -- bash -c [command] ## 1.6. Check generated alerts -- Watch alerts using [kArmor](https://github.com/kubearmor/kubearmor-client) cli tool +- Watch alerts using [karmor](https://github.com/kubearmor/kubearmor-client) cli tool ```text $ karmor log [flags] @@ -65,12 +65,12 @@ $ kubectl -n [namespace name] exec -it [pod name] -- bash -c [command] flags: ```text - --gRPC string gRPC server information - -h, --help help for log - --json Flag to print alerts and logs in the JSON format - --logFilter string Filter for what kinds of alerts and logs to receive, {policy|system|all} (default "policy") - --logPath string Output location for alerts and logs, {path|stdout|none} (default "stdout") - --msgPath string Output location for messages, {path|stdout|none} (default "none") + --gRPC string gRPC server information + --help help for log + --json Flag to print alerts and logs in the JSON format + --logFilter string What kinds of alerts and logs to receive, {policy|system|all} (default "policy") + --logPath string Output location for alerts and logs, {path|stdout|none} (default "stdout") + --msgPath string Output location for messages, {path|stdout|none} (default "none") ``` Note that you will see alerts and logs generated right after `karmor` runs logs; thus, we recommend to run the above command in other terminal to see logs live. @@ -78,9 +78,9 @@ $ kubectl -n [namespace name] exec -it [pod name] -- bash -c [command] # 2. Test KubeArmor using the auto-testing framework -## 2.1. Prepare microservices and testcases +## 2.1. Prepare microservices and test scenarios -The auto-testing framework operates based on two things: microservices and testcases for each microservice. +The auto-testing framework operates based on two things: microservices and test scenarios for each microservice. - Microservices @@ -100,26 +100,26 @@ The auto-testing framework operates based on two things: microservices and testc As an example, we created 'multiubuntu' in [microservices](../tests/microservices) and defined 'multiubuntu-deployment.yaml' in [multiubuntu](../examples/multiubuntu). -- Testcases +- Test scenarios - Create a directory whose name is like '[microservice name]_[testcase name]' in [scenarios](../tests/scenarios) + Create a directory whose name is like '[microservice name]_[scenario name]' in [scenarios](../tests/scenarios) ```text $ cd KubeArmor/tests/scenarios - ~/KubeArmor/tests/scenarios$ mkdir [microservice name]_[testcase name] + ~/KubeArmor/tests/scenarios$ mkdir [microservice name]_[scenario name] ``` Then, define a YAML file for a test policy in the directory ```text - ~/KubeArmor/tests/scenarios$ cd [microservice name]_[testcase name] - .../[microservice name]_[testcase name]$ vi [policy name].yaml + ~/KubeArmor/tests/scenarios$ cd [microservice name]_[scenario name] + .../[microservice name]_[scenario name]$ vi [policy name].yaml ``` Create cmd files whose names are like 'cmd#' ```text - .../[microservice name]_[testcase name]$ vi cmd1 / cmd2 / ... + .../[microservice name]_[scenario name]$ vi cmd1 / cmd2 / ... ``` Here is a template for a cmd file. @@ -134,7 +134,7 @@ The auto-testing framework operates based on two things: microservices and testc action: [action in a policy] { Allow | Audit | Block } ``` - This is an example of a testcase. + This is a cmd example of a test scenario. ```text source: ubuntu-1-deployment diff --git a/contribution/vagrant/Vagrantfile b/contribution/vagrant/Vagrantfile index d3dacf8388..2aaa0fa6bf 100644 --- a/contribution/vagrant/Vagrantfile +++ b/contribution/vagrant/Vagrantfile @@ -5,13 +5,12 @@ if ENV['OS'] == "centos" then VM_NAME = "kubearmor-dev" else # ubuntu if ENV['NETNEXT'] == "1" then - VM_IMG = "ubuntu/impish64" + # VM_IMG = "ubuntu/jammy64" # 5.15 + VM_IMG = "ubuntu/impish64" # 5.13 VM_NAME = "kubearmor-dev-next" - elsif ENV['NETNEXT'] == "2" then - VM_IMG = "ubuntu/focal64" - VM_NAME = "kubearmor-dev-next-2" else - VM_IMG = "ubuntu/bionic64" + # VM_IMG = "ubuntu/focal64" # 5.4 + VM_IMG = "ubuntu/bionic64" # 4.15 VM_NAME = "kubearmor-dev" end end @@ -67,36 +66,35 @@ Vagrant.configure("2") do |config| config.vm.provision :file, source: "~/.gitconfig", destination: "$HOME/.gitconfig" if ENV['OS'] == "centos" then - # provision bash scripts + # install base dependencies config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s-selinux/setup.sh" + + # install Docker config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s-selinux/docker/install_docker.sh" + + # install Kubernetes config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s-selinux/k8s/install_kubernetes.sh" # initialize Kubernetes - config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s-selinux/k8s/initialize_kubernetes.sh" + config.vm.provision :shell, :inline => "MASTER=true /home/vagrant/KubeArmor/contribution/self-managed-k8s-selinux/k8s/initialize_kubernetes.sh" # enable SELinux config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s-selinux/enable_selinux.sh" else # ubuntu - if ENV['NETNEXT'] == "1" then - # install the latest kernel - config.vm.provision :shell, path: kubearmor_home + "/contribution/vagrant/dist-upgrade.sh" - - # reboot - config.vm.provision :reload - end - - # provision bash scripts + # install base dependencies config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s/setup.sh" + if ENV['RUNTIME'] == "containerd" then config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s/containerd/install-containerd.sh" else # default == 'docker' config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s/docker/install_docker.sh" end + + # install Kubernetes config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s/k8s/install_kubernetes.sh" # initialize Kubernetes - config.vm.provision :shell, path: kubearmor_home + "/contribution/self-managed-k8s/k8s/initialize_kubernetes.sh" + config.vm.provision :shell, :inline => "MASTER=true /home/vagrant/KubeArmor/contribution/self-managed-k8s/k8s/initialize_kubernetes.sh" end # install karmor diff --git a/examples/multiubuntu/build/Dockerfile b/examples/multiubuntu/build/Dockerfile index ca46fcd0d7..e0114c7466 100644 --- a/examples/multiubuntu/build/Dockerfile +++ b/examples/multiubuntu/build/Dockerfile @@ -5,7 +5,7 @@ FROM ubuntu:18.04 RUN apt-get update -RUN apt-get install -y net-tools iputils-ping telnet ssh tcpdump nmap dsniff +RUN apt-get install -y net-tools iputils-ping telnet ssh tcpdump nmap dsniff arping RUN apt-get install -y curl iperf3 netperf ethtool python-scapy python-pip RUN apt-get install -y iptables bridge-utils apache2 vim diff --git a/examples/multiubuntu/security-policies/ksp-ubuntu-1-cap-net-raw-block.yaml b/examples/multiubuntu/security-policies/ksp-ubuntu-1-cap-net-raw-block.yaml index 8de14c95a4..1b6a72edb4 100644 --- a/examples/multiubuntu/security-policies/ksp-ubuntu-1-cap-net-raw-block.yaml +++ b/examples/multiubuntu/security-policies/ksp-ubuntu-1-cap-net-raw-block.yaml @@ -17,5 +17,5 @@ spec: # multiubuntu_test_03 # test -# $ ping -c 1 127.0.0.1 -# ping: socket: Operation not permitted +# $ arping -c 1 127.0.0.1 +# arping: libnet_init(LIBNET_LINK, ): libnet_open_link(): UID/EUID 0 or capability CAP_NET_RAW required diff --git a/examples/multiubuntu/security-policies/ksp-ubuntu-3-proc-path-owner-allow.yaml b/examples/multiubuntu/security-policies/ksp-ubuntu-3-proc-path-owner-allow.yaml index 4d59f2e2df..78ba4723cf 100644 --- a/examples/multiubuntu/security-policies/ksp-ubuntu-3-proc-path-owner-allow.yaml +++ b/examples/multiubuntu/security-policies/ksp-ubuntu-3-proc-path-owner-allow.yaml @@ -30,6 +30,12 @@ spec: recursive: true - dir: /proc/ # required to change root to user1 (coarse-grained way) recursive: true + - dir: /lib/ # used by root and user1 + recursive: true + - dir: /sys/ # used by root and user1 + recursive: true + - dir: /pts/ # used by root and user1 + recursive: true action: Allow diff --git a/tests/host_scenarios/kubearmor-dev-next_test_01/cmd1 b/tests/host_scenarios/kubearmor-dev-next_test_01/cmd1 new file mode 100644 index 0000000000..f3bd9b2c68 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_01/cmd1 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: diff --help +result: failed +--- +operation: Process +condition: diff +action: Block diff --git a/tests/host_scenarios/kubearmor-dev-next_test_01/hsp-kubearmor-dev-next-proc-path-block.yaml b/tests/host_scenarios/kubearmor-dev-next_test_01/hsp-kubearmor-dev-next-proc-path-block.yaml new file mode 100644 index 0000000000..7599a5d1c9 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_01/hsp-kubearmor-dev-next-proc-path-block.yaml @@ -0,0 +1,23 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-kubearmor-dev-next-proc-path-block +spec: + nodeSelector: + matchLabels: + kubernetes.io/hostname: kubearmor-dev-next + severity: 5 + process: + matchPaths: + - path: /usr/bin/diff + action: + Block + +# kubearmor-dev-next_test_01 + +# test +# $ diff --help +# -bash: /usr/bin/diff: Permission denied + +# expectation +# anyone cannot execute /usr/bin/diff diff --git a/tests/host_scenarios/kubearmor-dev-next_test_02/cmd1 b/tests/host_scenarios/kubearmor-dev-next_test_02/cmd1 new file mode 100644 index 0000000000..a072031462 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_02/cmd1 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: cat /etc/passwd +result: passed +--- +operation: File +condition: /etc/passwd +action: Audit diff --git a/tests/host_scenarios/kubearmor-dev-next_test_02/cmd2 b/tests/host_scenarios/kubearmor-dev-next_test_02/cmd2 new file mode 100644 index 0000000000..5c32e4b864 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_02/cmd2 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: head -n 1 /etc/passwd +result: passed +--- +operation: File +condition: /etc/passwd +action: Audit diff --git a/tests/host_scenarios/kubearmor-dev-next_test_02/hsp-kubearmor-dev-next-file-path-audit.yaml b/tests/host_scenarios/kubearmor-dev-next_test_02/hsp-kubearmor-dev-next-file-path-audit.yaml new file mode 100644 index 0000000000..19a190bae8 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_02/hsp-kubearmor-dev-next-file-path-audit.yaml @@ -0,0 +1,25 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-kubearmor-dev-next-file-path-audit +spec: + nodeSelector: + matchLabels: + kubernetes.io/hostname: kubearmor-dev-next + severity: 5 + file: + matchPaths: + - path: /etc/passwd + action: + Audit + +# kubearmor-dev-next_test_02 + +# test +# $ cat /etc/passwd +# ... +# $ head /etc/passwd +# ... + +# expectation +# anyone can access /etc/passwd, but the access would be audited diff --git a/tests/host_scenarios/kubearmor-dev-next_test_03/cmd1 b/tests/host_scenarios/kubearmor-dev-next_test_03/cmd1 new file mode 100644 index 0000000000..14697005d3 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_03/cmd1 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: cat /etc/hostname +result: failed +--- +operation: File +condition: /etc/hostname +action: Block diff --git a/tests/host_scenarios/kubearmor-dev-next_test_03/hsp-kubearmor-dev-next-file-path-block.yaml b/tests/host_scenarios/kubearmor-dev-next_test_03/hsp-kubearmor-dev-next-file-path-block.yaml new file mode 100644 index 0000000000..c1cf7d2eb8 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_03/hsp-kubearmor-dev-next-file-path-block.yaml @@ -0,0 +1,23 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-kubearmor-dev-next-file-path-block +spec: + nodeSelector: + matchLabels: + kubernetes.io/hostname: kubearmor-dev-next + severity: 5 + file: + matchPaths: + - path: /etc/hostname + action: + Block + +# kubearmor-dev-next_test_03 + +# test +# $ cat /etc/hostname +# cat: /etc/hostname: Permission denied + +# expectation +# anyone cannot access /etc/hostname diff --git a/tests/host_scenarios/kubearmor-dev-next_test_04/cmd1 b/tests/host_scenarios/kubearmor-dev-next_test_04/cmd1 new file mode 100644 index 0000000000..b81b464c94 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_04/cmd1 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: bash -c date +result: failed +--- +operation: Process +condition: date +action: Block diff --git a/tests/host_scenarios/kubearmor-dev-next_test_04/cmd2 b/tests/host_scenarios/kubearmor-dev-next_test_04/cmd2 new file mode 100644 index 0000000000..dadb72c76c --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_04/cmd2 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: bash -c ls +result: passed +--- +operation: Process +condition: ls +action: Block diff --git a/tests/host_scenarios/kubearmor-dev-next_test_04/hsp-kubearmor-dev-next-proc-path-block-fromSource.yaml b/tests/host_scenarios/kubearmor-dev-next_test_04/hsp-kubearmor-dev-next-proc-path-block-fromSource.yaml new file mode 100644 index 0000000000..e23048de0d --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_04/hsp-kubearmor-dev-next-proc-path-block-fromSource.yaml @@ -0,0 +1,31 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-kubearmor-dev-next-proc-path-block-fromsource +spec: + nodeSelector: + matchLabels: + kubernetes.io/hostname: kubearmor-dev-next + severity: 5 + process: + matchPaths: + - path: /bin/date + fromSource: + - path: /bin/bash # ubuntu + - path: /usr/bin/date + fromSource: + - path: /usr/bin/bash # centos + action: + Block + +# kubearmor-dev-next_test_04 + +# test +# (/home/vagrant/selinux-test/) $ bash -c date +# bash: 1: date: Permission denied +# (/home/vagrant/selinux-test/) $ bash -c ls +# ls ... + +# expectation +# (/usr)/bin/bash cannot execute (/usr)/bin/date +# (/usr)/bin/bash can execute any others diff --git a/tests/host_scenarios/kubearmor-dev-next_test_05/cmd1 b/tests/host_scenarios/kubearmor-dev-next_test_05/cmd1 new file mode 100644 index 0000000000..2e77e1110f --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_05/cmd1 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: bash -c date +result: passed +--- +operation: Process +condition: date +action: Allow diff --git a/tests/host_scenarios/kubearmor-dev-next_test_05/cmd2 b/tests/host_scenarios/kubearmor-dev-next_test_05/cmd2 new file mode 100644 index 0000000000..62c3a70041 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_05/cmd2 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: bash -c ls +result: failed +--- +operation: Process +condition: ls +action: Allow diff --git a/tests/host_scenarios/kubearmor-dev-next_test_05/hsp-kubearmor-dev-next-proc-path-allow-fromSource.yaml b/tests/host_scenarios/kubearmor-dev-next_test_05/hsp-kubearmor-dev-next-proc-path-allow-fromSource.yaml new file mode 100644 index 0000000000..49d6766f11 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_05/hsp-kubearmor-dev-next-proc-path-allow-fromSource.yaml @@ -0,0 +1,31 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-kubearmor-dev-next-proc-path-allow-fromsource +spec: + nodeSelector: + matchLabels: + kubernetes.io/hostname: kubearmor-dev-next + severity: 5 + process: + matchPaths: + - path: /bin/date + fromSource: + - path: /bin/bash # ubuntu + - path: /usr/bin/date + fromSource: + - path: /usr/bin/bash # centos + action: + Allow + +# kubearmor-dev-next_test_05 + +# test +# $ bash -c date +# ... +# $ bash -c ls +# bash: /usr/bin/ls: Permission denied + +# expectation +# (/usr)/bin/bash can only execute (/usr)/bin/date +# (/usr)/bin/bash cannot execute any others diff --git a/tests/host_scenarios/kubearmor-dev-next_test_06/cmd1 b/tests/host_scenarios/kubearmor-dev-next_test_06/cmd1 new file mode 100644 index 0000000000..66a5e30b61 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_06/cmd1 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: head -n 1 /etc/hostname +result: failed +--- +operation: File +condition: /etc/hostname +action: Block diff --git a/tests/host_scenarios/kubearmor-dev-next_test_06/cmd2 b/tests/host_scenarios/kubearmor-dev-next_test_06/cmd2 new file mode 100644 index 0000000000..2f64370806 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_06/cmd2 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: head -n 1 /etc/hosts +result: passed +--- +operation: File +condition: /etc/hosts +action: Block diff --git a/tests/host_scenarios/kubearmor-dev-next_test_06/hsp-kubearmor-dev-next-file-path-block-fromSource.yaml b/tests/host_scenarios/kubearmor-dev-next_test_06/hsp-kubearmor-dev-next-file-path-block-fromSource.yaml new file mode 100644 index 0000000000..fca1025782 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_06/hsp-kubearmor-dev-next-file-path-block-fromSource.yaml @@ -0,0 +1,28 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-kubearmor-dev-next-file-path-block-fromsource +spec: + nodeSelector: + matchLabels: + kubernetes.io/hostname: kubearmor-dev-next + severity: 5 + file: + matchPaths: + - path: /etc/hostname + fromSource: + - path: /usr/bin/head + action: + Block + +# kubearmor-dev-next_test_06 + +# test +# $ head /etc/hostname +# head: cannot open '/etc/hostname' for reading: Permission denied +# $ head /etc/hosts +# ... + +# expectation +# /usr/bin/head cannot access /etc/hostname +# /usr/bin/head can access any others diff --git a/tests/host_scenarios/kubearmor-dev-next_test_07/cmd1 b/tests/host_scenarios/kubearmor-dev-next_test_07/cmd1 new file mode 100644 index 0000000000..325aa7da65 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_07/cmd1 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: head -n 1 /etc/hostname +result: passed +--- +operation: File +condition: /etc/hostname +action: Allow diff --git a/tests/host_scenarios/kubearmor-dev-next_test_07/cmd2 b/tests/host_scenarios/kubearmor-dev-next_test_07/cmd2 new file mode 100644 index 0000000000..6100c04e54 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_07/cmd2 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: head -n 1 /etc/hosts +result: failed +--- +operation: File +condition: /etc/hosts +action: Allow diff --git a/tests/host_scenarios/kubearmor-dev-next_test_07/hsp-kubearmor-dev-next-file-path-allow-fromSource.yaml b/tests/host_scenarios/kubearmor-dev-next_test_07/hsp-kubearmor-dev-next-file-path-allow-fromSource.yaml new file mode 100644 index 0000000000..96742ad752 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_07/hsp-kubearmor-dev-next-file-path-allow-fromSource.yaml @@ -0,0 +1,28 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-kubearmor-dev-next-file-path-allow-fromsource +spec: + nodeSelector: + matchLabels: + kubernetes.io/hostname: kubearmor-dev-next + severity: 5 + file: + matchPaths: + - path: /etc/hostname + fromSource: + - path: /usr/bin/head + action: + Allow + +# kubearmor-dev-next_test_07 + +# test +# $ head /etc/hostname +# kubearmor-dev +# $ head /etc/hosts +# head: /etc/hosts: Permission denied + +# expectation +# /usr/bin/head can only access /etc/hostname +# /usr/bin/head cannot access any others diff --git a/tests/host_scenarios/kubearmor-dev-next_test_08/cmd1 b/tests/host_scenarios/kubearmor-dev-next_test_08/cmd1 new file mode 100644 index 0000000000..8f4b637c92 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_08/cmd1 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: head -n 1 /etc/default/useradd +result: passed +--- +operation: File +condition: /etc/default/useradd +action: Allow diff --git a/tests/host_scenarios/kubearmor-dev-next_test_08/cmd2 b/tests/host_scenarios/kubearmor-dev-next_test_08/cmd2 new file mode 100644 index 0000000000..0d8373bec6 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_08/cmd2 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: head -n 1 /etc/hostname +result: failed +--- +operation: File +condition: /etc/hostname +action: Allow diff --git a/tests/host_scenarios/kubearmor-dev-next_test_08/hsp-kubearmor-dev-next-file-dir-allow-fromSource.yaml b/tests/host_scenarios/kubearmor-dev-next_test_08/hsp-kubearmor-dev-next-file-dir-allow-fromSource.yaml new file mode 100644 index 0000000000..19af3a0c42 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_08/hsp-kubearmor-dev-next-file-dir-allow-fromSource.yaml @@ -0,0 +1,29 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-kubearmor-dev-next-file-dir-allow-fromsource +spec: + nodeSelector: + matchLabels: + kubernetes.io/hostname: kubearmor-dev-next + severity: 5 + file: + matchDirectories: + - dir: /etc/default/ + recursive: true + fromSource: + - path: /usr/bin/head + action: + Allow + +# kubearmor-dev-next_test_08 + +# test +# $ head /etc/default/useradd +# Default values for useradd(8) ... +# $ head /etc/hostname +# head: /etc/hostname: Permission denied + +# expectation +# /usr/bin/head can only access /etc/default/* +# /usr/bin/head cannot access any others diff --git a/tests/host_scenarios/kubearmor-dev-next_test_09/cmd1 b/tests/host_scenarios/kubearmor-dev-next_test_09/cmd1 new file mode 100644 index 0000000000..b9abb14bb8 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_09/cmd1 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: head -n 1 /etc/default/useradd +result: failed +--- +operation: File +condition: /etc/default/useradd +action: Block diff --git a/tests/host_scenarios/kubearmor-dev-next_test_09/cmd2 b/tests/host_scenarios/kubearmor-dev-next_test_09/cmd2 new file mode 100644 index 0000000000..3acc041aec --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_09/cmd2 @@ -0,0 +1,7 @@ +source: kubearmor-dev +cmd: head -n 1 /etc/hostname +result: passed +--- +operation: File +condition: /etc/hostname +action: Block diff --git a/tests/host_scenarios/kubearmor-dev-next_test_09/hsp-kubearmor-dev-next-file-dir-block-fromSource.yaml b/tests/host_scenarios/kubearmor-dev-next_test_09/hsp-kubearmor-dev-next-file-dir-block-fromSource.yaml new file mode 100644 index 0000000000..53a84836c7 --- /dev/null +++ b/tests/host_scenarios/kubearmor-dev-next_test_09/hsp-kubearmor-dev-next-file-dir-block-fromSource.yaml @@ -0,0 +1,28 @@ +apiVersion: security.kubearmor.com/v1 +kind: KubeArmorHostPolicy +metadata: + name: hsp-kubearmor-dev-next-file-dir-block-fromsource +spec: + nodeSelector: + matchLabels: + kubernetes.io/hostname: kubearmor-dev-next + severity: 5 + file: + matchDirectories: + - dir: /etc/default/ + fromSource: + - path: /usr/bin/head + action: + Block + +# kubearmor-dev-next_test_09 + +# test +# $ head /etc/default/useradd +# head: useradd: Permission denied +# $ head /etc/hostname +# kubearmor-dev + +# expectation +# /usr/bin/head cannot access /etc/default/* +# /usr/bin/head can access any others diff --git a/tests/scenarios/multiubuntu_test_03/cmd1 b/tests/scenarios/multiubuntu_test_03/cmd1 index d3241f23c4..9264401795 100644 --- a/tests/scenarios/multiubuntu_test_03/cmd1 +++ b/tests/scenarios/multiubuntu_test_03/cmd1 @@ -1,5 +1,5 @@ source: ubuntu-1-deployment -cmd: ping -c 1 127.0.0.1 +cmd: arping -c 1 127.0.0.1 result: failed --- operation: Network diff --git a/tests/scenarios/multiubuntu_test_07/cmd1 b/tests/scenarios/multiubuntu_test_07/cmd1 index 691297244a..6af1ca9e25 100644 --- a/tests/scenarios/multiubuntu_test_07/cmd1 +++ b/tests/scenarios/multiubuntu_test_07/cmd1 @@ -3,5 +3,5 @@ cmd: ping -c 1 127.0.0.1 result: passed --- operation: Network -condition: SOCK_RAW +condition: protocol=ICMP action: Audit diff --git a/tests/scenarios/multiubuntu_test_13/cmd1 b/tests/scenarios/multiubuntu_test_13/cmd1 index 323dc25d2e..6e343e5d9b 100644 --- a/tests/scenarios/multiubuntu_test_13/cmd1 +++ b/tests/scenarios/multiubuntu_test_13/cmd1 @@ -3,5 +3,5 @@ cmd: cat /run/secrets/kubernetes.io/serviceaccount/token result: failed --- operation: File -condition: serviceaccount/token +condition: serviceaccount action: Block diff --git a/tests/test-scenarios-github.sh b/tests/test-scenarios-github.sh index 880299a64a..209e922029 100755 --- a/tests/test-scenarios-github.sh +++ b/tests/test-scenarios-github.sh @@ -43,60 +43,35 @@ SKIP_HOST_POLICY=1 case $1 in "-testPolicy") - if [ "$LSM" == "selinux" ]; then - echo "KubeArmor does not support container policies in SELinux-enabled environments" - exit - fi - SKIP_CONTAINER_POLICY=0 - SKIP_NATIVE_POLICY=1 - SKIP_HOST_POLICY=1 - ARMOR_OPTIONS=${@:2} ;; - "-testHostPolicy") echo "If you want to test host policies, please run KubeArmor separately and use test-scenarios-in-runtime.sh" exit ;; - "-testNativePolicy") if [ "$LSM" != "apparmor" ]; then echo "KubeArmor does not support native policies if AppArmor is not enabled" exit fi - - SKIP_CONTAINER_POLICY=1 SKIP_NATIVE_POLICY=0 - SKIP_HOST_POLICY=1 - ARMOR_OPTIONS=${@:2} ;; - *) if [ "$LSM" == "selinux" ]; then - echo "KubeArmor only supports host policies in SELinux-enabled environments" echo "If you want to test host policies, please run KubeArmor separately and use test-scenarios-in-runtime.sh" - exit - + echo "KubeArmor does not support native policies if AppArmor is not enabled" + SKIP_CONTAINER_POLICY=0 elif [ "$LSM" == "apparmor" ]; then echo "If you want to test host policies, please run KubeArmor separately and use test-scenarios-in-runtime.sh" - SKIP_CONTAINER_POLICY=0 SKIP_NATIVE_POLICY=0 - SKIP_HOST_POLICY=1 - - ARMOR_OPTIONS=$@ - else # none echo "KubeArmor does not support native policies if AppArmor is not enabled" - SKIP_CONTAINER_POLICY=0 - SKIP_NATIVE_POLICY=1 - SKIP_HOST_POLICY=0 - - ARMOR_OPTIONS=$@ fi + ARMOR_OPTIONS=$@ ;; esac @@ -288,7 +263,7 @@ function should_find_passed_log() { res_cmd=1 else echo $audit_log - DBG "[INFO] Found the log from logs" + DBG "Found the log from logs" fi } @@ -320,7 +295,7 @@ function should_not_find_any_host_log() { res_cmd=1 else audit_log="" - DBG "[INFO] Found no log from logs" + DBG "Found no log from logs" fi } @@ -336,7 +311,7 @@ function should_find_passed_host_log() { res_cmd=1 else echo $audit_log - DBG "[INFO] Found the log from logs" + DBG "Found the log from logs" fi } @@ -429,6 +404,23 @@ function run_test_scenario() { COND=$(cat $cmd | grep "^condition" | cut -d' ' -f2-) ACTION=$(cat $cmd | grep "^action" | awk '{print $2}') + # if SELinux is enabled but a test policy not for hosts + if [[ "$LSM" == "selinux" ]] && [[ $HOST_POLICY -eq 0 ]]; then + # replace Block with Audit + if [ "$ACTION" == "Block" ]; then + if [ "$RESULT" == "failed" ]; then + ACTION="Audit" + RESULT="passed" + fi + # replace Allow with "failed" to Audit with "passed" + elif [ "$ACTION" == "Allow" ]; then + if [ "$RESULT" == "failed" ]; then + ACTION="Audit" + RESULT="passed" + fi + fi + fi + # if AppArmor and SELinux are not enabled if [ "$LSM" == "none" ]; then # replace Block with Audit diff --git a/tests/test-scenarios-in-runtime.sh b/tests/test-scenarios-in-runtime.sh index 1bba2beee1..ac3ca40ad5 100755 --- a/tests/test-scenarios-in-runtime.sh +++ b/tests/test-scenarios-in-runtime.sh @@ -39,67 +39,36 @@ SKIP_HOST_POLICY=1 case $1 in "-testPolicy") - if [ "$LSM" == "selinux" ]; then - echo "KubeArmor does not support container policies in SELinux-enabled environments" - exit - fi - SKIP_CONTAINER_POLICY=0 - SKIP_NATIVE_POLICY=1 - SKIP_HOST_POLICY=1 - ARMOR_OPTIONS=${@:2} ;; - "-testHostPolicy") - SKIP_CONTAINER_POLICY=1 - SKIP_NATIVE_POLICY=1 SKIP_HOST_POLICY=0 - ARMOR_OPTIONS=${@:2} - ARMOR_OPTIONS=(${ARMOR_OPTIONS[@]} "-enableKubeArmorHostPolicy") ;; - "-testNativePolicy") if [ "$LSM" != "apparmor" ]; then echo "KubeArmor does not support native policies if AppArmor is not enabled" exit fi - - SKIP_CONTAINER_POLICY=1 SKIP_NATIVE_POLICY=0 - SKIP_HOST_POLICY=1 - ARMOR_OPTIONS=${@:2} ;; - *) if [ "$LSM" == "selinux" ]; then - echo "KubeArmor only supports host policies in SELinux-enabled environments" - - SKIP_CONTAINER_POLICY=1 - SKIP_NATIVE_POLICY=1 + echo "KubeArmor does not support native policies if AppArmor is not enabled" + SKIP_CONTAINER_POLICY=0 SKIP_HOST_POLICY=0 - - ARMOR_OPTIONS=$@ - ARMOR_OPTIONS=(${ARMOR_OPTIONS[@]} "-enableKubeArmorHostPolicy") - elif [ "$LSM" == "apparmor" ]; then SKIP_CONTAINER_POLICY=0 SKIP_NATIVE_POLICY=0 SKIP_HOST_POLICY=0 - - ARMOR_OPTIONS=$@ - else # none echo "KubeArmor does not support native policies if AppArmor is not enabled" - SKIP_CONTAINER_POLICY=0 - SKIP_NATIVE_POLICY=1 SKIP_HOST_POLICY=0 - - ARMOR_OPTIONS=$@ fi + ARMOR_OPTIONS=$@ ;; esac @@ -224,7 +193,7 @@ function should_find_passed_log() { res_cmd=1 else echo $audit_log - DBG "[INFO] Found the log from logs" + DBG "Found the log from logs" fi else # local audit_log=$(grep -E "$1.*policyName.*$2.*MatchedPolicy.*operation.*$3.*resource.*$4.*data.*action.*$5" $ARMOR_LOG | grep -v grep | tail -n 1 | grep Passed) @@ -234,7 +203,7 @@ function should_find_passed_log() { res_cmd=1 else echo $audit_log - DBG "[INFO] Found the log from logs" + DBG "Found the log from logs" fi fi } @@ -286,7 +255,7 @@ function should_not_find_any_host_log() { res_cmd=1 else audit_log="" - DBG "[INFO] Found no log from logs" + DBG "Found no log from logs" fi else # local audit_log=$(grep -E "$HOST_NAME.*policyName.*$1.*MatchedHostPolicy.*$5.*operation.*$2.*resource.*$3.*data.*action.*$4" $ARMOR_LOG | grep -v grep | tail -n 1 | grep -v Passed) @@ -296,7 +265,7 @@ function should_not_find_any_host_log() { res_cmd=1 else audit_log="" - DBG "[INFO] Found no log from logs" + DBG "Found no log from logs" fi fi } @@ -317,7 +286,7 @@ function should_find_passed_host_log() { res_cmd=1 else echo $audit_log - DBG "[INFO] Found the log from logs" + DBG "Found the log from logs" fi else # local audit_log=$(grep -E "$HOST_NAME.*policyName.*$1.*MatchedHostPolicy.*operation.*$2.*resource.*$3.*data.*action.*$4" $ARMOR_LOG | grep -v grep | tail -n 1 | grep Passed) @@ -327,7 +296,7 @@ function should_find_passed_host_log() { res_cmd=1 else echo $audit_log - DBG "[INFO] Found the log from logs" + DBG "Found the log from logs" fi fi } @@ -436,6 +405,23 @@ function run_test_scenario() { COND=$(cat $cmd | grep "^condition" | cut -d' ' -f2-) ACTION=$(cat $cmd | grep "^action" | awk '{print $2}') + # if SELinux is enabled but a test policy not for hosts + if [[ "$LSM" == "selinux" ]] && [[ $HOST_POLICY -eq 0 ]]; then + # replace Block with Audit + if [ "$ACTION" == "Block" ]; then + if [ "$RESULT" == "failed" ]; then + ACTION="Audit" + RESULT="passed" + fi + # replace Allow with "failed" to Audit with "passed" + elif [ "$ACTION" == "Allow" ]; then + if [ "$RESULT" == "failed" ]; then + ACTION="Audit" + RESULT="passed" + fi + fi + fi + # if AppArmor and SELinux are not enabled if [ "$LSM" == "none" ]; then # replace Block with Audit diff --git a/tests/test-scenarios-local.sh b/tests/test-scenarios-local.sh index b5774eb631..e24a3c6951 100755 --- a/tests/test-scenarios-local.sh +++ b/tests/test-scenarios-local.sh @@ -43,60 +43,35 @@ SKIP_HOST_POLICY=1 case $1 in "-testPolicy") - if [ "$LSM" == "selinux" ]; then - echo "KubeArmor does not support container policies in SELinux-enabled environments" - exit - fi - SKIP_CONTAINER_POLICY=0 - SKIP_NATIVE_POLICY=1 - SKIP_HOST_POLICY=1 - ARMOR_OPTIONS=${@:2} ;; - "-testHostPolicy") echo "If you want to test host policies, please run KubeArmor separately and use test-scenarios-in-runtime.sh" exit ;; - "-testNativePolicy") if [ "$LSM" != "apparmor" ]; then echo "KubeArmor does not support native policies if AppArmor is not enabled" exit fi - - SKIP_CONTAINER_POLICY=1 SKIP_NATIVE_POLICY=0 - SKIP_HOST_POLICY=1 - ARMOR_OPTIONS=${@:2} ;; - *) if [ "$LSM" == "selinux" ]; then - echo "KubeArmor only supports host policies in SELinux-enabled environments" echo "If you want to test host policies, please run KubeArmor separately and use test-scenarios-in-runtime.sh" - exit - + echo "KubeArmor does not support native policies if AppArmor is not enabled" + SKIP_CONTAINER_POLICY=0 elif [ "$LSM" == "apparmor" ]; then echo "If you want to test host policies, please run KubeArmor separately and use test-scenarios-in-runtime.sh" - SKIP_CONTAINER_POLICY=0 SKIP_NATIVE_POLICY=0 - SKIP_HOST_POLICY=1 - - ARMOR_OPTIONS=$@ - else # none echo "KubeArmor does not support native policies if AppArmor is not enabled" - SKIP_CONTAINER_POLICY=0 - SKIP_NATIVE_POLICY=1 - SKIP_HOST_POLICY=0 - - ARMOR_OPTIONS=$@ fi + ARMOR_OPTIONS=$@ ;; esac @@ -247,7 +222,7 @@ function should_find_passed_log() { res_cmd=1 else echo $audit_log - DBG "[INFO] Found the log from logs" + DBG "Found the log from logs" fi } @@ -279,7 +254,7 @@ function should_not_find_any_host_log() { res_cmd=1 else audit_log="" - DBG "[INFO] Found no log from logs" + DBG "Found no log from logs" fi } @@ -295,7 +270,7 @@ function should_find_passed_host_log() { res_cmd=1 else echo $audit_log - DBG "[INFO] Found the log from logs" + DBG "Found the log from logs" fi } @@ -388,6 +363,23 @@ function run_test_scenario() { COND=$(cat $cmd | grep "^condition" | cut -d' ' -f2-) ACTION=$(cat $cmd | grep "^action" | awk '{print $2}') + # if SELinux is enabled but a test policy not for hosts + if [[ "$LSM" == "selinux" ]] && [[ $HOST_POLICY -eq 0 ]]; then + # replace Block with Audit + if [ "$ACTION" == "Block" ]; then + if [ "$RESULT" == "failed" ]; then + ACTION="Audit" + RESULT="passed" + fi + # replace Allow with "failed" to Audit with "passed" + elif [ "$ACTION" == "Allow" ]; then + if [ "$RESULT" == "failed" ]; then + ACTION="Audit" + RESULT="passed" + fi + fi + fi + # if AppArmor and SELinux are not enabled if [ "$LSM" == "none" ]; then # replace Block with Audit