Skip to content

Commit

Permalink
feat: Add check K8S008 for detecting the use of the Docker socket (#26
Browse files Browse the repository at this point in the history
)
bryantbiggs authored Feb 23, 2023

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
1 parent c59f5a3 commit bef79ec
Showing 9 changed files with 89 additions and 8 deletions.
6 changes: 4 additions & 2 deletions eksup/src/analysis.rs
Original file line number Diff line number Diff line change
@@ -2,7 +2,7 @@ use anyhow::Result;
use aws_sdk_eks::model::Cluster;
use serde::{Deserialize, Serialize};

use crate::{eks, finding::Findings, k8s};
use crate::{eks, finding::Findings, k8s, version};

/// Container of all findings collected
#[derive(Debug, Serialize, Deserialize)]
@@ -37,6 +37,7 @@ impl Results {
output.push_str(&self.kubernetes.pod_topology_distribution.to_stdout_table()?);
output.push_str(&self.kubernetes.readiness_probe.to_stdout_table()?);
output.push_str(&self.kubernetes.termination_grace_period.to_stdout_table()?);
output.push_str(&self.kubernetes.docker_socket.to_stdout_table()?);

Ok(output)
}
@@ -52,13 +53,14 @@ pub(crate) async fn analyze(aws_shared_config: &aws_config::SdkConfig, cluster:

let cluster_name = cluster.name().unwrap();
let cluster_version = cluster.version().unwrap();
let target_version = version::get_target_version(cluster_version)?;

let cluster_findings = eks::get_cluster_findings(cluster).await?;
let subnet_findings = eks::get_subnet_findings(&ec2_client, &k8s_client, cluster).await?;
let addon_findings = eks::get_addon_findings(&eks_client, cluster_name, cluster_version).await?;
let dataplane_findings =
eks::get_data_plane_findings(&asg_client, &ec2_client, &eks_client, &k8s_client, cluster).await?;
let kubernetes_findings = k8s::get_kubernetes_findings(&k8s_client).await?;
let kubernetes_findings = k8s::get_kubernetes_findings(&k8s_client, &target_version).await?;

Ok(Results {
cluster: cluster_findings,
4 changes: 4 additions & 0 deletions eksup/src/finding.rs
Original file line number Diff line number Diff line change
@@ -139,6 +139,9 @@ pub enum Code {

/// `pod.spec.TerminationGracePeriodSeconds` is set to zero
K8S007,

/// Mounts `docker.sock` or `dockershim.sock`
K8S008,
}

impl std::fmt::Display for Code {
@@ -163,6 +166,7 @@ impl std::fmt::Display for Code {
Code::K8S005 => write!(f, "K8S005"),
Code::K8S006 => write!(f, "K8S006"),
Code::K8S007 => write!(f, "K8S007"),
Code::K8S008 => write!(f, "K8S008"),
}
}
}
6 changes: 4 additions & 2 deletions eksup/src/k8s/checks.rs
Original file line number Diff line number Diff line change
@@ -368,6 +368,8 @@ pub struct DockerSocket {

#[tabled(inline)]
pub resource: Resource,

pub docker_socket: bool,
}

impl Findings for Vec<DockerSocket> {
@@ -420,6 +422,6 @@ pub trait K8sFindings {
/// K8S007 - check if StatefulSets have terminationGracePeriodSeconds == 0
fn termination_grace_period(&self) -> Option<TerminationGracePeriod>;

// /// K8S008 - check if resources use the Docker socket
// fn docker_socket(&self) -> Option<DockerSocket>;
/// K8S008 - check if resources use the Docker socket
fn docker_socket(&self, target_version: &str) -> Option<DockerSocket>;
}
10 changes: 8 additions & 2 deletions eksup/src/k8s/findings.rs
Original file line number Diff line number Diff line change
@@ -14,25 +14,31 @@ pub struct KubernetesFindings {
pub readiness_probe: Vec<checks::Probe>,
pub pod_topology_distribution: Vec<checks::PodTopologyDistribution>,
pub termination_grace_period: Vec<checks::TerminationGracePeriod>,
pub docker_socket: Vec<checks::DockerSocket>,
}

pub async fn get_kubernetes_findings(k8s_client: &K8sClient) -> Result<KubernetesFindings> {
pub async fn get_kubernetes_findings(k8s_client: &K8sClient, target_version: &str) -> Result<KubernetesFindings> {
let resources = resources::get_resources(k8s_client).await?;

let min_replicas: Vec<checks::MinReplicas> = resources.iter().filter_map(|s| s.min_replicas()).collect();
let min_ready_seconds: Vec<checks::MinReadySeconds> =
resources.iter().filter_map(|s| s.min_ready_seconds()).collect();
let readiness_probe: Vec<checks::Probe> = resources.iter().filter_map(|s| s.readiness_probe()).collect();
let pod_topology_distribution: Vec<checks::PodTopologyDistribution> =
resources.iter().filter_map(|s| s.pod_topology_distribution()).collect();
let readiness_probe: Vec<checks::Probe> = resources.iter().filter_map(|s| s.readiness_probe()).collect();
let termination_grace_period: Vec<checks::TerminationGracePeriod> =
resources.iter().filter_map(|s| s.termination_grace_period()).collect();
let docker_socket: Vec<checks::DockerSocket> = resources
.iter()
.filter_map(|s| s.docker_socket(target_version))
.collect();

Ok(KubernetesFindings {
min_replicas,
min_ready_seconds,
readiness_probe,
pod_topology_distribution,
termination_grace_period,
docker_socket,
})
}
44 changes: 43 additions & 1 deletion eksup/src/k8s/resources.rs
Original file line number Diff line number Diff line change
@@ -7,7 +7,7 @@ use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use tabled::Tabled;

use crate::{finding, k8s::checks};
use crate::{finding, k8s::checks, version};

/// Custom resource definition for ENIConfig as specified in the AWS VPC CNI
///
@@ -501,6 +501,48 @@ impl checks::K8sFindings for StdResource {
None => None,
}
}

fn docker_socket(&self, target_version: &str) -> Option<checks::DockerSocket> {
let pod_template = self.spec.template.to_owned();

let target_version = version::parse_minor(target_version).unwrap();
if target_version > 24 {
// From 1.25+, there shouldn't be any further action required
return None;
}
let remediation = if target_version < 24 {
finding::Remediation::Recommended
} else {
finding::Remediation::Required
};

match pod_template {
Some(pod_template) => {
let containers = pod_template.spec.unwrap_or_default().containers;

for container in containers {
let volume_mounts = container.volume_mounts.unwrap_or_default();
for volume_mount in volume_mounts {
if volume_mount.mount_path.contains("docker.sock") || volume_mount.mount_path.contains("dockershim.sock") {
let finding = finding::Finding {
code: finding::Code::K8S008,
symbol: remediation.symbol(),
remediation,
};

return Some(checks::DockerSocket {
finding,
resource: self.get_resource(),
docker_socket: true,
});
}
}
}
None
}
None => None,
}
}
}

pub async fn get_resources(client: &Client) -> Result<Vec<StdResource>> {
2 changes: 2 additions & 0 deletions eksup/src/playbook.rs
Original file line number Diff line number Diff line change
@@ -60,6 +60,7 @@ pub struct TemplateData {
pod_topology_distribution: String,
readiness_probe: String,
termination_grace_period: String,
docker_socket: String,
}

fn get_release_data() -> Result<HashMap<Version, Release>> {
@@ -183,6 +184,7 @@ pub(crate) fn create(args: &Playbook, cluster: &Cluster, analysis: analysis::Res
pod_topology_distribution: kubernetes_findings.pod_topology_distribution.to_markdown_table("\t")?,
readiness_probe: kubernetes_findings.readiness_probe.to_markdown_table("\t")?,
termination_grace_period: kubernetes_findings.termination_grace_period.to_markdown_table("\t")?,
docker_socket: kubernetes_findings.docker_socket.to_markdown_table("\t")?,
};

let filename = match &args.filename {
6 changes: 6 additions & 0 deletions eksup/src/version.rs
Original file line number Diff line number Diff line change
@@ -8,6 +8,12 @@ use serde::{Deserialize, Serialize};
/// Latest support version
pub const LATEST: &str = "1.25";

#[derive(Debug, Serialize, Deserialize)]
pub struct Versions {
pub current: String,
pub target: String,
}

seq!(N in 20..=24 {
/// Kubernetes version(s) supported
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
3 changes: 3 additions & 0 deletions eksup/templates/playbook.md
Original file line number Diff line number Diff line change
@@ -221,6 +221,9 @@ When upgrading the control plane, Amazon EKS performs standard infrastructure an
#### Check [[K8S007]](https://clowdhaus.github.io/eksup/process/checks/#k8s007)
{{ termination_grace_period }}

#### Check [[K8S008]](https://clowdhaus.github.io/eksup/process/checks/#k8s008)
{{ docker_socket }}

2. Inspect [AWS service quotas](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) before upgrading. Accounts that are multi-tenant or already have a number of resources provisioned may be at risk of hitting service quota limits which will cause the cluster upgrade to fail, or impede the upgrade process.

{{#if pod_ips}}
16 changes: 15 additions & 1 deletion examples/test-mixed_v1.24_upgrade.md
Original file line number Diff line number Diff line change
@@ -73,8 +73,8 @@
#### Check [[K8S001]](https://clowdhaus.github.io/eksup/process/checks/#k8s001)
| CHECK | | NODE | CONTROL PLANE | SKEW | QUANTITY |
|--------|----|-------|---------------|------|----------|
| K8S001 | ⚠️ | v1.22 | v1.23 | +1 | 2 |
| K8S001 || v1.21 | v1.23 | +2 | 2 |
| K8S001 | ⚠️ | v1.22 | v1.23 | +1 | 2 |

| | NAME | NODE | CONTROL PLANE | SKEW |
|----|-----------------------------|-------|---------------|------|
@@ -248,6 +248,20 @@ When upgrading the control plane, Amazon EKS performs standard infrastructure an
|| bad-ss | statefulset | StatefulSet | 0 |


#### Check [[K8S008]](https://clowdhaus.github.io/eksup/process/checks/#k8s008)
| | NAME | NAMESPACE | KIND | DOCKERSOCKET |
|----|-------------------|-------------|-------------|--------------|
|| bad-cron | cronjob | CronJob | true |
|| bad-ds | daemonset | DaemonSet | true |
|| aws-node | kube-system | DaemonSet | true |
|| bad-dpl | deployment | Deployment | true |
|| bad-cron-27953110 | cronjob | Job | true |
|| bad-cron-27953115 | cronjob | Job | true |
|| bad-cron-27953120 | cronjob | Job | true |
|| bad-job | job | Job | true |
|| bad-ss | statefulset | StatefulSet | true |


2. Inspect [AWS service quotas](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) before upgrading. Accounts that are multi-tenant or already have a number of resources provisioned may be at risk of hitting service quota limits which will cause the cluster upgrade to fail, or impede the upgrade process.

3. Verify that there is sufficient IP space available to the pods running in the cluster when using custom networking. With the in-place, surge upgrade process, there will be higher IP consumption during the upgrade.

0 comments on commit bef79ec

Please sign in to comment.