diff --git a/KubeArmor/common/common.go b/KubeArmor/common/common.go index 44cc7857ea..2249159c01 100644 --- a/KubeArmor/common/common.go +++ b/KubeArmor/common/common.go @@ -274,18 +274,6 @@ func RunCommandAndWaitWithErr(cmd string, args []string) error { return nil } -// ========== // -// == Host == // -// ========== // - -// GetHostName Function -func GetHostName() string { - if res, err := os.Hostname(); err == nil { - return res - } - return "" -} - // ============= // // == Network == // // ============= // diff --git a/KubeArmor/config/config.go b/KubeArmor/config/config.go index 4c7c299a18..09e20136c8 100644 --- a/KubeArmor/config/config.go +++ b/KubeArmor/config/config.go @@ -4,6 +4,7 @@ package config import ( + "fmt" "os" "strings" @@ -47,7 +48,7 @@ var GlobalCfg KubearmorConfig const ConfigCluster string = "cluster" // ConfigHost Host name key -const ConfigHost string = "localhost" +const ConfigHost string = "host" // ConfigGRPC GRPC Port key const ConfigGRPC string = "gRPC" @@ -98,7 +99,7 @@ func readCmdLineParams() { seLinuxProfileDirStr := flag.String(ConfigSELinuxProfileDir, "/tmp/kubearmor.selinux", "SELinux profile directory") visStr := flag.String(ConfigVisibility, "process,file,network,capabilities", "Container Visibility to use [process,file,network,capabilities,none]") - hostVisStr := flag.String(ConfigHostVisibility, "", "Host Visibility to use [process,file,network,capabilities,none] (default \"none\" for k8s, \"process,file,network,capabilities\" for VM)") + hostVisStr := flag.String(ConfigHostVisibility, "default", "Host Visibility to use [process,file,network,capabilities,none] (default \"none\" for k8s, \"process,file,network,capabilities\" for VM)") policyB := flag.Bool(ConfigKubearmorPolicy, true, "enabling KubeArmorPolicy") hostPolicyB := flag.Bool(ConfigKubearmorHostPolicy, false, "enabling KubeArmorHostPolicy") @@ -111,6 +112,13 @@ func readCmdLineParams() { coverageTestB := flag.Bool(ConfigCoverageTest, false, "enabling CoverageTest") + flags := []string{} + flag.VisitAll(func(f *flag.Flag) { + kv := fmt.Sprintf("%s:%v", f.Name, f.Value) + flags = append(flags, kv) + }) + kg.Printf("Arguments [%s]", strings.Join(flags, " ")) + flag.Parse() viper.SetDefault(ConfigCluster, *clusterStr) @@ -171,17 +179,20 @@ func LoadConfig() error { GlobalCfg.Policy = viper.GetBool(ConfigKubearmorPolicy) GlobalCfg.HostPolicy = viper.GetBool(ConfigKubearmorHostPolicy) GlobalCfg.KVMAgent = viper.GetBool(ConfigKubearmorVM) - if GlobalCfg.KVMAgent { - GlobalCfg.Policy = false - GlobalCfg.HostPolicy = true - } GlobalCfg.K8sEnv = viper.GetBool(ConfigK8sEnv) GlobalCfg.DefaultFilePosture = viper.GetString(ConfigDefaultFilePosture) GlobalCfg.DefaultNetworkPosture = viper.GetString(ConfigDefaultNetworkPosture) GlobalCfg.DefaultCapabilitiesPosture = viper.GetString(ConfigDefaultCapabilitiesPosture) - if GlobalCfg.HostVisibility == "" { + kg.Printf("Configuration [%+v]", GlobalCfg) + + if GlobalCfg.KVMAgent { + GlobalCfg.Policy = false + GlobalCfg.HostPolicy = true + } + + if GlobalCfg.HostVisibility == "default" { if GlobalCfg.KVMAgent || (!GlobalCfg.K8sEnv && GlobalCfg.HostPolicy) { GlobalCfg.HostVisibility = "process,file,network,capabilities" } else { // k8s @@ -191,7 +202,7 @@ func LoadConfig() error { GlobalCfg.CoverageTest = viper.GetBool(ConfigCoverageTest) - kg.Printf("config [%+v]", GlobalCfg) + kg.Printf("Final Configuration [%+v]", GlobalCfg) return nil } diff --git a/KubeArmor/core/containerdHandler.go b/KubeArmor/core/containerdHandler.go index 230e60cf3b..dd9b7a894a 100644 --- a/KubeArmor/core/containerdHandler.go +++ b/KubeArmor/core/containerdHandler.go @@ -116,6 +116,8 @@ func NewContainerdHandler() *ContainerdHandler { // active containers ch.containers = map[string]context.Context{} + kg.Print("Initialized Containerd Handler") + return ch } diff --git a/KubeArmor/core/dockerHandler.go b/KubeArmor/core/dockerHandler.go index 04dd47d9d3..ef67d74ba1 100644 --- a/KubeArmor/core/dockerHandler.go +++ b/KubeArmor/core/dockerHandler.go @@ -83,6 +83,8 @@ func NewDockerHandler() *DockerHandler { } docker.DockerClient = DockerClient + kg.Printf("Initialized Docker Handler (version: %s)", docker.Version.APIVersion) + return docker } diff --git a/KubeArmor/core/kubeArmor.go b/KubeArmor/core/kubeArmor.go index 3ead01a89c..8e0eecc8f1 100644 --- a/KubeArmor/core/kubeArmor.go +++ b/KubeArmor/core/kubeArmor.go @@ -318,11 +318,17 @@ func KubeArmor() { dm.Node.Annotations = map[string]string{} dm.HandleNodeAnnotations(&dm.Node) + hostInfo := kl.GetCommandOutputWithoutErr("hostnamectl", []string{}) + for _, line := range strings.Split(hostInfo, "\n") { + if strings.Contains(line, "Operating System") { + dm.Node.OSImage = strings.Split(line, ": ")[1] + break + } + } + dm.Node.KernelVersion = kl.GetCommandOutputWithoutErr("uname", []string{"-r"}) dm.Node.KernelVersion = strings.TrimSuffix(dm.Node.KernelVersion, "\n") - kg.Print("Updated the node information") - } else if cfg.GlobalCfg.K8sEnv { if !K8s.InitK8sClient() { kg.Err("Failed to initialize Kubernetes client") @@ -368,6 +374,19 @@ func KubeArmor() { } } + kg.Printf("Node Name: %s", dm.Node.NodeName) + kg.Printf("Node IP: %s", dm.Node.NodeIP) + if dm.K8sEnabled { + kg.Printf("Node Annotations: %v", dm.Node.Annotations) + } + + kg.Printf("OS Image: %s", dm.Node.OSImage) + kg.Printf("Kernel Version: %s", dm.Node.KernelVersion) + if dm.K8sEnabled { + kg.Printf("Kubelet Version: %s", dm.Node.KubeletVersion) + kg.Printf("Container Runtime: %s", dm.Node.ContainerRuntimeVersion) + } + // == // // initialize log feeder @@ -420,8 +439,6 @@ func KubeArmor() { // == // if dm.K8sEnabled && cfg.GlobalCfg.Policy { - dm.Logger.Printf("Container Runtime: %s", dm.Node.ContainerRuntimeVersion) - if strings.HasPrefix(dm.Node.ContainerRuntimeVersion, "docker") { sockFile := false diff --git a/KubeArmor/core/kubeUpdate.go b/KubeArmor/core/kubeUpdate.go index f6057a806d..3c5ed087f9 100644 --- a/KubeArmor/core/kubeUpdate.go +++ b/KubeArmor/core/kubeUpdate.go @@ -94,8 +94,8 @@ func (dm *KubeArmorDaemon) WatchK8sNodes() { // Kubearmor uses hostname to get the corresponding node information, but there are exceptions. // For example, the node name on EKS can be of the format ..compute.internal - nodeName := strings.Split(event.Object.ObjectMeta.Name, ".") - if nodeName[0] != cfg.GlobalCfg.Host { + nodeName := strings.Split(event.Object.ObjectMeta.Name, ".")[0] + if nodeName != cfg.GlobalCfg.Host { continue } diff --git a/KubeArmor/enforcer/runtimeEnforcer.go b/KubeArmor/enforcer/runtimeEnforcer.go index c738ff75d6..cf73b8386d 100644 --- a/KubeArmor/enforcer/runtimeEnforcer.go +++ b/KubeArmor/enforcer/runtimeEnforcer.go @@ -23,9 +23,11 @@ type RuntimeEnforcer struct { // LSM type EnforcerType string - // LSMs + // LSM - AppArmor appArmorEnforcer *AppArmorEnforcer - seLinuxEnforcer *SELinuxEnforcer + + // LSM - SELinux + seLinuxEnforcer *SELinuxEnforcer } // NewRuntimeEnforcer Function @@ -55,9 +57,10 @@ func NewRuntimeEnforcer(node tp.Node, logger *fd.Feeder) *RuntimeEnforcer { } } - re.EnforcerType = string(lsm) + lsms := string(lsm) + re.Logger.Printf("Supported LSMs: %s", lsms) - if strings.Contains(re.EnforcerType, "apparmor") { + if strings.Contains(lsms, "apparmor") { re.appArmorEnforcer = NewAppArmorEnforcer(node, logger) if re.appArmorEnforcer != nil { re.Logger.Print("Initialized AppArmor Enforcer") @@ -65,7 +68,7 @@ func NewRuntimeEnforcer(node tp.Node, logger *fd.Feeder) *RuntimeEnforcer { logger.UpdateEnforcer(re.EnforcerType) return re } - } else if strings.Contains(re.EnforcerType, "selinux") { + } else if strings.Contains(lsms, "selinux") { if !kl.IsInK8sCluster() { re.seLinuxEnforcer = NewSELinuxEnforcer(node, logger) if re.seLinuxEnforcer != nil { diff --git a/KubeArmor/main.go b/KubeArmor/main.go index 9d51e26e8d..9613a446ee 100644 --- a/KubeArmor/main.go +++ b/KubeArmor/main.go @@ -6,6 +6,8 @@ package main import ( "os" "path/filepath" + "syscall" + "time" cfg "github.com/kubearmor/KubeArmor/KubeArmor/config" "github.com/kubearmor/KubeArmor/KubeArmor/core" @@ -29,8 +31,12 @@ func main() { return } - err = cfg.LoadConfig() - if err != nil { + if finfo, err := os.Stat(os.Args[0]); err == nil { + stat := finfo.Sys().(*syscall.Stat_t) + kg.Printf("Build Time: %v", time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec))) + } + + if err := cfg.LoadConfig(); err != nil { kg.Err(err.Error()) return }