Skip to content

Commit

Permalink
Merge pull request kubearmor#715 from kloudmax/hostname
Browse files Browse the repository at this point in the history
add debug info
  • Loading branch information
nyrahul authored May 26, 2022
2 parents 72de65d + 0f79e8e commit 658b254
Show file tree
Hide file tree
Showing 8 changed files with 62 additions and 33 deletions.
12 changes: 0 additions & 12 deletions KubeArmor/common/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -274,18 +274,6 @@ func RunCommandAndWaitWithErr(cmd string, args []string) error {
return nil
}

// ========== //
// == Host == //
// ========== //

// GetHostName Function
func GetHostName() string {
if res, err := os.Hostname(); err == nil {
return res
}
return ""
}

// ============= //
// == Network == //
// ============= //
Expand Down
27 changes: 19 additions & 8 deletions KubeArmor/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
package config

import (
"fmt"
"os"
"strings"

Expand Down Expand Up @@ -47,7 +48,7 @@ var GlobalCfg KubearmorConfig
const ConfigCluster string = "cluster"

// ConfigHost Host name key
const ConfigHost string = "localhost"
const ConfigHost string = "host"

// ConfigGRPC GRPC Port key
const ConfigGRPC string = "gRPC"
Expand Down Expand Up @@ -98,7 +99,7 @@ func readCmdLineParams() {
seLinuxProfileDirStr := flag.String(ConfigSELinuxProfileDir, "/tmp/kubearmor.selinux", "SELinux profile directory")

visStr := flag.String(ConfigVisibility, "process,file,network,capabilities", "Container Visibility to use [process,file,network,capabilities,none]")
hostVisStr := flag.String(ConfigHostVisibility, "", "Host Visibility to use [process,file,network,capabilities,none] (default \"none\" for k8s, \"process,file,network,capabilities\" for VM)")
hostVisStr := flag.String(ConfigHostVisibility, "default", "Host Visibility to use [process,file,network,capabilities,none] (default \"none\" for k8s, \"process,file,network,capabilities\" for VM)")

policyB := flag.Bool(ConfigKubearmorPolicy, true, "enabling KubeArmorPolicy")
hostPolicyB := flag.Bool(ConfigKubearmorHostPolicy, false, "enabling KubeArmorHostPolicy")
Expand All @@ -111,6 +112,13 @@ func readCmdLineParams() {

coverageTestB := flag.Bool(ConfigCoverageTest, false, "enabling CoverageTest")

flags := []string{}
flag.VisitAll(func(f *flag.Flag) {
kv := fmt.Sprintf("%s:%v", f.Name, f.Value)
flags = append(flags, kv)
})
kg.Printf("Arguments [%s]", strings.Join(flags, " "))

flag.Parse()

viper.SetDefault(ConfigCluster, *clusterStr)
Expand Down Expand Up @@ -171,17 +179,20 @@ func LoadConfig() error {
GlobalCfg.Policy = viper.GetBool(ConfigKubearmorPolicy)
GlobalCfg.HostPolicy = viper.GetBool(ConfigKubearmorHostPolicy)
GlobalCfg.KVMAgent = viper.GetBool(ConfigKubearmorVM)
if GlobalCfg.KVMAgent {
GlobalCfg.Policy = false
GlobalCfg.HostPolicy = true
}
GlobalCfg.K8sEnv = viper.GetBool(ConfigK8sEnv)

GlobalCfg.DefaultFilePosture = viper.GetString(ConfigDefaultFilePosture)
GlobalCfg.DefaultNetworkPosture = viper.GetString(ConfigDefaultNetworkPosture)
GlobalCfg.DefaultCapabilitiesPosture = viper.GetString(ConfigDefaultCapabilitiesPosture)

if GlobalCfg.HostVisibility == "" {
kg.Printf("Configuration [%+v]", GlobalCfg)

if GlobalCfg.KVMAgent {
GlobalCfg.Policy = false
GlobalCfg.HostPolicy = true
}

if GlobalCfg.HostVisibility == "default" {
if GlobalCfg.KVMAgent || (!GlobalCfg.K8sEnv && GlobalCfg.HostPolicy) {
GlobalCfg.HostVisibility = "process,file,network,capabilities"
} else { // k8s
Expand All @@ -191,7 +202,7 @@ func LoadConfig() error {

GlobalCfg.CoverageTest = viper.GetBool(ConfigCoverageTest)

kg.Printf("config [%+v]", GlobalCfg)
kg.Printf("Final Configuration [%+v]", GlobalCfg)

return nil
}
2 changes: 2 additions & 0 deletions KubeArmor/core/containerdHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,8 @@ func NewContainerdHandler() *ContainerdHandler {
// active containers
ch.containers = map[string]context.Context{}

kg.Print("Initialized Containerd Handler")

return ch
}

Expand Down
2 changes: 2 additions & 0 deletions KubeArmor/core/dockerHandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,8 @@ func NewDockerHandler() *DockerHandler {
}
docker.DockerClient = DockerClient

kg.Printf("Initialized Docker Handler (version: %s)", docker.Version.APIVersion)

return docker
}

Expand Down
25 changes: 21 additions & 4 deletions KubeArmor/core/kubeArmor.go
Original file line number Diff line number Diff line change
Expand Up @@ -318,11 +318,17 @@ func KubeArmor() {
dm.Node.Annotations = map[string]string{}
dm.HandleNodeAnnotations(&dm.Node)

hostInfo := kl.GetCommandOutputWithoutErr("hostnamectl", []string{})
for _, line := range strings.Split(hostInfo, "\n") {
if strings.Contains(line, "Operating System") {
dm.Node.OSImage = strings.Split(line, ": ")[1]
break
}
}

dm.Node.KernelVersion = kl.GetCommandOutputWithoutErr("uname", []string{"-r"})
dm.Node.KernelVersion = strings.TrimSuffix(dm.Node.KernelVersion, "\n")

kg.Print("Updated the node information")

} else if cfg.GlobalCfg.K8sEnv {
if !K8s.InitK8sClient() {
kg.Err("Failed to initialize Kubernetes client")
Expand Down Expand Up @@ -368,6 +374,19 @@ func KubeArmor() {
}
}

kg.Printf("Node Name: %s", dm.Node.NodeName)
kg.Printf("Node IP: %s", dm.Node.NodeIP)
if dm.K8sEnabled {
kg.Printf("Node Annotations: %v", dm.Node.Annotations)
}

kg.Printf("OS Image: %s", dm.Node.OSImage)
kg.Printf("Kernel Version: %s", dm.Node.KernelVersion)
if dm.K8sEnabled {
kg.Printf("Kubelet Version: %s", dm.Node.KubeletVersion)
kg.Printf("Container Runtime: %s", dm.Node.ContainerRuntimeVersion)
}

// == //

// initialize log feeder
Expand Down Expand Up @@ -420,8 +439,6 @@ func KubeArmor() {
// == //

if dm.K8sEnabled && cfg.GlobalCfg.Policy {
dm.Logger.Printf("Container Runtime: %s", dm.Node.ContainerRuntimeVersion)

if strings.HasPrefix(dm.Node.ContainerRuntimeVersion, "docker") {
sockFile := false

Expand Down
4 changes: 2 additions & 2 deletions KubeArmor/core/kubeUpdate.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ func (dm *KubeArmorDaemon) WatchK8sNodes() {

// Kubearmor uses hostname to get the corresponding node information, but there are exceptions.
// For example, the node name on EKS can be of the format <hostname>.<region>.compute.internal
nodeName := strings.Split(event.Object.ObjectMeta.Name, ".")
if nodeName[0] != cfg.GlobalCfg.Host {
nodeName := strings.Split(event.Object.ObjectMeta.Name, ".")[0]
if nodeName != cfg.GlobalCfg.Host {
continue
}

Expand Down
13 changes: 8 additions & 5 deletions KubeArmor/enforcer/runtimeEnforcer.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,11 @@ type RuntimeEnforcer struct {
// LSM type
EnforcerType string

// LSMs
// LSM - AppArmor
appArmorEnforcer *AppArmorEnforcer
seLinuxEnforcer *SELinuxEnforcer

// LSM - SELinux
seLinuxEnforcer *SELinuxEnforcer
}

// NewRuntimeEnforcer Function
Expand Down Expand Up @@ -55,17 +57,18 @@ func NewRuntimeEnforcer(node tp.Node, logger *fd.Feeder) *RuntimeEnforcer {
}
}

re.EnforcerType = string(lsm)
lsms := string(lsm)
re.Logger.Printf("Supported LSMs: %s", lsms)

if strings.Contains(re.EnforcerType, "apparmor") {
if strings.Contains(lsms, "apparmor") {
re.appArmorEnforcer = NewAppArmorEnforcer(node, logger)
if re.appArmorEnforcer != nil {
re.Logger.Print("Initialized AppArmor Enforcer")
re.EnforcerType = "AppArmor"
logger.UpdateEnforcer(re.EnforcerType)
return re
}
} else if strings.Contains(re.EnforcerType, "selinux") {
} else if strings.Contains(lsms, "selinux") {
if !kl.IsInK8sCluster() {
re.seLinuxEnforcer = NewSELinuxEnforcer(node, logger)
if re.seLinuxEnforcer != nil {
Expand Down
10 changes: 8 additions & 2 deletions KubeArmor/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ package main
import (
"os"
"path/filepath"
"syscall"
"time"

cfg "github.com/kubearmor/KubeArmor/KubeArmor/config"
"github.com/kubearmor/KubeArmor/KubeArmor/core"
Expand All @@ -29,8 +31,12 @@ func main() {
return
}

err = cfg.LoadConfig()
if err != nil {
if finfo, err := os.Stat(os.Args[0]); err == nil {
stat := finfo.Sys().(*syscall.Stat_t)
kg.Printf("Build Time: %v", time.Unix(int64(stat.Ctim.Sec), int64(stat.Ctim.Nsec)))
}

if err := cfg.LoadConfig(); err != nil {
kg.Err(err.Error())
return
}
Expand Down

0 comments on commit 658b254

Please sign in to comment.