Skip to content

Commit

Permalink
[api] allow specifying security contexts (#107)
Browse files Browse the repository at this point in the history
Allows users to specify their own pod and container-level security
contexts for all pods created by the operator's statefulsets. Also makes
the operator's pod itself a more locked down default.
  • Loading branch information
schallert authored Mar 22, 2019
1 parent 5b9a494 commit 8bd67d6
Show file tree
Hide file tree
Showing 10 changed files with 77 additions and 66 deletions.
9 changes: 6 additions & 3 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -241,12 +241,15 @@ build-docker: ## Build m3db-operator docker image with go binary
@echo "--- $@"
@./build/build-docker.sh

.PHONE: helm-bundle
helm-bundle: install-codegen-tools
.PHONY: helm-bundle-no-deps
helm-bundle-no-deps:
@echo "--- $@"
@helm template helm/m3db-operator > bundle.yaml
@helm template --namespace default helm/m3db-operator > bundle.yaml
@PATH=$(retool_bin_path):$(PATH) kubeval -v=1.12.0 bundle.yaml

.PHONY: helm-bundle
helm-bundle: install-codegen-tools helm-bundle-no-deps

.PHONY: publish-helm-charts
publish-helm-charts: ## pushes a new version of the helm chart
@echo "+ $@"
Expand Down
4 changes: 4 additions & 0 deletions bundle.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,10 @@ spec:
labels:
name: m3db-operator
spec:
securityContext:
runAsNonRoot: true
runAsUser: 65534
runAsGroup: 65534
containers:
- name: m3db-operator
image: quay.io/m3db/m3db-operator:v0.1.4
Expand Down
2 changes: 2 additions & 0 deletions docs/api.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ ClusterSpec defines the desired state for a M3 cluster to be converge to.
| podIdentityConfig | PodIdentityConfig sets the configuration for pod identity. If unset only pod name and UID will be used. | *PodIdentityConfig | false |
| containerResources | Resources defines memory / cpu constraints for each container in the cluster. | [corev1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#resourcerequirements-v1-core) | false |
| dataDirVolumeClaimTemplate | DataDirVolumeClaimTemplate is the volume claim template for an M3DB instance's data. It claims PersistentVolumes for cluster storage, volumes are dynamically provisioned by when the StorageClass is defined. | *[corev1.PersistentVolumeClaim](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#persistentvolumeclaim-v1-core) | false |
| podSecurityContext | PodSecurityContext allows the user to specify an optional security context for pods. | *corev1.PodSecurityContext | false |
| securityContext | SecurityContext allows the user to specify a container-level security context. | *corev1.SecurityContext | false |
| labels | Labels sets the base labels that will be applied to resources created by the cluster. // TODO(schallert): design doc on labeling scheme. | map[string]string | false |

[Back to TOC](#table-of-contents)
Expand Down
4 changes: 4 additions & 0 deletions helm/m3db-operator/templates/stateful_set.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@ spec:
labels:
name: {{ .Values.operator.name }}
spec:
securityContext:
runAsNonRoot: true
runAsUser: 65534
runAsGroup: 65534
containers:
- name: {{ .Values.operator.name }}
image: {{ .Values.image.repository}}:{{ .Values.image.tag }}
Expand Down
8 changes: 8 additions & 0 deletions pkg/apis/m3dboperator/v1alpha1/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,14 @@ type ClusterSpec struct {
// +optional
DataDirVolumeClaimTemplate *corev1.PersistentVolumeClaim `json:"dataDirVolumeClaimTemplate,omitempty" yaml:"dataDirVolumeClaimTemplate"`

// PodSecurityContext allows the user to specify an optional security context
// for pods.
PodSecurityContext *corev1.PodSecurityContext `json:"podSecurityContext,omitempty"`

// SecurityContext allows the user to specify a container-level security
// context.
SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`

// Labels sets the base labels that will be applied to resources created by
// the cluster. // TODO(schallert): design doc on labeling scheme.
Labels map[string]string `json:"labels,omitempty" yaml:"labels"`
Expand Down
10 changes: 10 additions & 0 deletions pkg/apis/m3dboperator/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions pkg/k8sops/fixtures/testM3DBCluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,10 @@ spec:
limits:
memory: 2Gi
cpu: '2'
podSecurityContext:
fsGroup: 10
securityContext:
runAsUser: 20
etcdEndpoints:
- ep0
- ep1
Expand Down
30 changes: 0 additions & 30 deletions pkg/k8sops/generators.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ import (
v1 "k8s.io/api/core/v1"
apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"

"github.com/kubernetes/utils/pointer"
)
Expand Down Expand Up @@ -132,37 +131,8 @@ func GenerateStatefulSet(

clusterSpec := cluster.Spec

// TODO(schallert): we're currently using the health of the coordinator for
// liveness probes until https://github.com/m3db/m3/issues/996 is fixed. Move
// to the dbnode's health endpoint once fixed.
probeHealth := &v1.Probe{
TimeoutSeconds: _probeTimeoutSeconds,
InitialDelaySeconds: _probeInitialDelaySeconds,
FailureThreshold: _probeFailureThreshold,
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(_probePort),
Path: _probePathHealth,
Scheme: v1.URISchemeHTTP,
},
},
}

probeReady := &v1.Probe{
TimeoutSeconds: _probeTimeoutSeconds,
InitialDelaySeconds: _probeInitialDelaySeconds,
FailureThreshold: _probeFailureThreshold,
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{_healthFileName},
},
},
}

statefulSet := NewBaseStatefulSet(ssName, isolationGroupName, cluster, instanceAmount)
m3dbContainer := &statefulSet.Spec.Template.Spec.Containers[0]
m3dbContainer.LivenessProbe = probeHealth
m3dbContainer.ReadinessProbe = probeReady
m3dbContainer.Resources = clusterSpec.ContainerResources
m3dbContainer.Ports = generateContainerPorts()
statefulSet.Spec.Template.Spec.Affinity = GenerateStatefulSetAffinity(isolationGroup)
Expand Down
16 changes: 7 additions & 9 deletions pkg/k8sops/generators_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,9 @@ func TestGenerateStatefulSet(t *testing.T) {
Labels: labels,
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
FSGroup: pointer.Int64Ptr(10),
},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
Expand All @@ -151,17 +154,12 @@ func TestGenerateStatefulSet(t *testing.T) {
},
Containers: []v1.Container{
{
Name: ssName,
SecurityContext: &v1.SecurityContext{
Privileged: &[]bool{true}[0],
Capabilities: &v1.Capabilities{
Add: []v1.Capability{
"IPC_LOCK",
},
},
},
Name: ssName,
LivenessProbe: health,
ReadinessProbe: readiness,
SecurityContext: &v1.SecurityContext{
RunAsUser: pointer.Int64Ptr(20),
},
Command: []string{
"m3dbnode",
},
Expand Down
56 changes: 32 additions & 24 deletions pkg/k8sops/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,20 +193,6 @@ func (k *k8sops) CheckStatefulStatus(cluster *myspec.M3DBCluster, statefulSet *a
}

// NewBaseProbe returns a probe configured for default ports.
func NewBaseProbe() *v1.Probe {
return &v1.Probe{
TimeoutSeconds: _probeTimeoutSeconds,
InitialDelaySeconds: _probeInitialDelaySeconds,
FailureThreshold: _probeFailureThreshold,
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(_probePort),
Path: _probePathHealth,
Scheme: v1.URISchemeHTTP,
},
},
}
}

// NewBaseStatefulSet returns a base configured stateful set.
func NewBaseStatefulSet(ssName, isolationGroup string, cluster *myspec.M3DBCluster, instanceCount int32) *appsv1.StatefulSet {
Expand All @@ -223,6 +209,33 @@ func NewBaseStatefulSet(ssName, isolationGroup string, cluster *myspec.M3DBClust
objLabels[k] = v
}

// TODO(schallert): we're currently using the health of the coordinator for
// liveness probes until https://github.com/m3db/m3/issues/996 is fixed. Move
// to the dbnode's health endpoint once fixed.
probeHealth := &v1.Probe{
TimeoutSeconds: _probeTimeoutSeconds,
InitialDelaySeconds: _probeInitialDelaySeconds,
FailureThreshold: _probeFailureThreshold,
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(_probePort),
Path: _probePathHealth,
Scheme: v1.URISchemeHTTP,
},
},
}

probeReady := &v1.Probe{
TimeoutSeconds: _probeTimeoutSeconds,
InitialDelaySeconds: _probeInitialDelaySeconds,
FailureThreshold: _probeFailureThreshold,
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{_healthFileName},
},
},
}

return &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: ssName,
Expand All @@ -239,18 +252,13 @@ func NewBaseStatefulSet(ssName, isolationGroup string, cluster *myspec.M3DBClust
Labels: objLabels,
},
Spec: v1.PodSpec{
SecurityContext: cluster.Spec.PodSecurityContext,
Containers: []v1.Container{
{
Name: ssName,
SecurityContext: &v1.SecurityContext{
Privileged: &[]bool{true}[0],
Capabilities: &v1.Capabilities{
Add: []v1.Capability{
"IPC_LOCK",
},
},
},
ReadinessProbe: nil,
Name: ssName,
SecurityContext: cluster.Spec.SecurityContext,
ReadinessProbe: probeReady,
LivenessProbe: probeHealth,
Command: []string{
"m3dbnode",
},
Expand Down

0 comments on commit 8bd67d6

Please sign in to comment.