Skip to content

Commit

Permalink
Add emptyDir sizeLimit support for local dirs (#1993)
Browse files Browse the repository at this point in the history
* Add emptyDir sizeLimit support

Signed-off-by: Jacob Salway <[email protected]>

* Bump appVersion and add sizeLimit example

Signed-off-by: Jacob Salway <[email protected]>

---------

Signed-off-by: Jacob Salway <[email protected]>
  • Loading branch information
jacobsalway authored Apr 19, 2024
1 parent 4f26fe7 commit ccb3ceb
Show file tree
Hide file tree
Showing 5 changed files with 64 additions and 4 deletions.
4 changes: 2 additions & 2 deletions charts/spark-operator-chart/Chart.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
apiVersion: v2
name: spark-operator
description: A Helm chart for Spark on Kubernetes operator
version: 1.2.11
appVersion: v1beta2-1.4.3-3.5.0
version: 1.2.12
appVersion: v1beta2-1.4.4-3.5.0
keywords:
- spark
home: https://github.com/kubeflow/spark-operator
Expand Down
2 changes: 1 addition & 1 deletion charts/spark-operator-chart/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# spark-operator

![Version: 1.2.11](https://img.shields.io/badge/Version-1.2.11-informational?style=flat-square) ![AppVersion: v1beta2-1.4.3-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.3--3.5.0-informational?style=flat-square)
![Version: 1.2.12](https://img.shields.io/badge/Version-1.2.12-informational?style=flat-square) ![AppVersion: v1beta2-1.4.4-3.5.0](https://img.shields.io/badge/AppVersion-v1beta2--1.4.4--3.5.0-informational?style=flat-square)

A Helm chart for Spark on Kubernetes operator

Expand Down
3 changes: 2 additions & 1 deletion docs/user-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -360,7 +360,8 @@ spec:
persistentVolumeClaim:
claimName: my-pvc
- name: spark-work
emptyDir: {}
emptyDir:
sizeLimit: 5Gi
driver:
volumeMounts:
- name: spark-work
Expand Down
3 changes: 3 additions & 0 deletions pkg/controller/sparkapplication/submission.go
Original file line number Diff line number Diff line change
Expand Up @@ -516,6 +516,9 @@ func buildLocalVolumeOptions(prefix string, volume v1.Volume, volumeMount v1.Vol
}
case volume.EmptyDir != nil:
options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "emptyDir", volume.Name, volumeMount.MountPath))
if volume.EmptyDir.SizeLimit != nil {
options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "emptyDir", volume.Name, "sizeLimit", volume.EmptyDir.SizeLimit.String()))
}
case volume.PersistentVolumeClaim != nil:
options = append(options, fmt.Sprintf(VolumeMountPathTemplate, "persistentVolumeClaim", volume.Name, volumeMount.MountPath))
options = append(options, fmt.Sprintf(VolumeMountOptionTemplate, "persistentVolumeClaim", volume.Name, "claimName", volume.PersistentVolumeClaim.ClaimName))
Expand Down
56 changes: 56 additions & 0 deletions pkg/controller/sparkapplication/submission_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"github.com/stretchr/testify/assert"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/kubeflow/spark-operator/pkg/apis/sparkoperator.k8s.io/v1beta2"
Expand Down Expand Up @@ -366,6 +367,61 @@ func TestAddLocalDir_Driver_Executor(t *testing.T) {
assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "hostPath", volumes[0].Name, "path", volumes[0].HostPath.Path), localDirOptions[3])
}

func TestAddEmptyDir_Driver_Executor_WithSizeLimit(t *testing.T) {
sizeLimit := resource.MustParse("5Gi")
volumes := []corev1.Volume{
{
Name: "spark-local-dir-1",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{
SizeLimit: &sizeLimit,
},
},
},
}

volumeMounts := []corev1.VolumeMount{
{
Name: "spark-local-dir-1",
MountPath: "/tmp/mnt-1",
},
}

app := &v1beta2.SparkApplication{
ObjectMeta: metav1.ObjectMeta{
Name: "spark-test",
UID: "spark-test-1",
},
Spec: v1beta2.SparkApplicationSpec{
Volumes: volumes,
Driver: v1beta2.DriverSpec{
SparkPodSpec: v1beta2.SparkPodSpec{
VolumeMounts: volumeMounts,
},
},
Executor: v1beta2.ExecutorSpec{
SparkPodSpec: v1beta2.SparkPodSpec{
VolumeMounts: volumeMounts,
},
},
},
}

localDirOptions, err := addLocalDirConfOptions(app)
if err != nil {
t.Fatal(err)
}

assert.Equal(t, 0, len(app.Spec.Volumes))
assert.Equal(t, 0, len(app.Spec.Driver.VolumeMounts))
assert.Equal(t, 0, len(app.Spec.Executor.VolumeMounts))
assert.Equal(t, 4, len(localDirOptions))
assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "driver", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[0])
assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "driver", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[1])
assert.Equal(t, fmt.Sprintf(VolumeMountPathTemplate, "executor", "emptyDir", volumes[0].Name, volumeMounts[0].MountPath), localDirOptions[2])
assert.Equal(t, fmt.Sprintf(VolumeMountOptionPathTemplate, "executor", "emptyDir", volumes[0].Name, "sizeLimit", volumes[0].EmptyDir.SizeLimit.String()), localDirOptions[3])
}

func TestPopulateLabels_Driver_Executor(t *testing.T) {
const (
AppLabelKey = "app-label-key"
Expand Down

0 comments on commit ccb3ceb

Please sign in to comment.