Skip to content

Commit

Permalink
Merge #774
Browse files Browse the repository at this point in the history
774: ci: i/o soak test - ensure remote replicas r=blaisedias a=blaisedias

Ensure i/o soak test runs with some volumes provisioned on a remote node.
Run the i/o soak test in a configuration where at least one
instance of mayastor is running on a node where no application pods
are running, so that some volumes can only be provisioned on a remote
node.

Do this by adding labels to "alternate" nodes, and node selectors to pods.
This relies on the fact that if enough pods and volumes are scheduled to
run simultaneously MOAC will schedule some replicas to run on nodes
where the test apps are not running.

This is deemed good enough, and has been observed to work
on a 3 node cluster with 30 fio pods and volumes.

Co-authored-by: Blaise Dias <[email protected]>
  • Loading branch information
mayastor-bors and blaisedias committed Mar 9, 2021
2 parents d7f0d3c + 4f0fd72 commit 88edede
Show file tree
Hide file tree
Showing 5 changed files with 65 additions and 18 deletions.
8 changes: 4 additions & 4 deletions scripts/e2e-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ TOPDIR=$(realpath "$SCRIPTDIR/..")
# 2. replicas_pod_remove SHOULD be the last test before uninstall
# this is a disruptive test.
#TESTS="install basic_volume_io csi replica rebuild node_disconnect/replica_pod_remove uninstall"
ALL_TESTS="install basic_volume_io csi resource_check replica rebuild uninstall"
DEFAULT_TESTS="install basic_volume_io csi resource_check replica rebuild uninstall"
ONDEMAND_TESTS="install basic_volume_io csi resource_check uninstall"
EXTENDED_TESTS="install basic_volume_io csi resource_check uninstall"
CONTINUOUS_TESTS="install basic_volume_io csi resource_check replica rebuild uninstall"
EXTENDED_TESTS="install basic_volume_io csi resource_check io_soak uninstall"
CONTINUOUS_TESTS="install basic_volume_io csi resource_check replica rebuild io_soak uninstall"

#exit values
EXITV_OK=0
Expand Down Expand Up @@ -188,7 +188,7 @@ case "$profile" in
tests="$custom_tests"
;;
default)
tests="$ALL_TESTS"
tests="$DEFAULT_TESTS"
;;
*)
echo "Unknown profile: $profile"
Expand Down
6 changes: 6 additions & 0 deletions test/e2e/common/util_testpods.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,12 @@ func CreateFioPodDef(podName string, volName string) *corev1.Pod {
return createFioPodDef(podName, volName, false)
}

/// Create a test fio pod in default namespace, no options and no context
/// mayastor volume is mounted on /dev/sdm
func CreateRawBlockFioPodDef(podName string, volName string) *corev1.Pod {
return createFioPodDef(podName, volName, true)
}

/// Create a test fio pod in default namespace, no options and no context
/// mayastor volume is mounted on /volume
func CreateFioPod(podName string, volName string) (*corev1.Pod, error) {
Expand Down
21 changes: 15 additions & 6 deletions test/e2e/io_soak/filesystem_fio.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import (
)

// IO soak filesystem fio job

type FioFsSoakJob struct {
volName string
scName string
Expand All @@ -27,8 +26,10 @@ func (job FioFsSoakJob) removeVolume() {
common.RmPVC(job.volName, job.scName)
}

func (job FioFsSoakJob) makeTestPod() (*coreV1.Pod, error) {
pod, err := common.CreateFioPod(job.podName, job.volName)
func (job FioFsSoakJob) makeTestPod(selector map[string]string) (*coreV1.Pod, error) {
pod := common.CreateFioPodDef(job.podName, job.volName)
pod.Spec.NodeSelector = selector
pod, err := common.CreatePod(pod)
return pod, err
}

Expand All @@ -37,13 +38,21 @@ func (job FioFsSoakJob) removeTestPod() error {
}

func (job FioFsSoakJob) run(duration time.Duration, doneC chan<- string, errC chan<- error) {
thinkTime := 1 // 1 microsecond
thinkTimeBlocks := 1000

FioDutyCycles := e2e_config.GetConfig().IOSoakTest.FioDutyCycles
ixp := job.id % len(FioDutyCycles)
if len(FioDutyCycles) != 0 {
ixp := job.id % len(FioDutyCycles)
thinkTime = FioDutyCycles[ixp].ThinkTime
thinkTimeBlocks = FioDutyCycles[ixp].ThinkTimeBlocks
}

RunIoSoakFio(
job.podName,
duration,
FioDutyCycles[ixp].ThinkTime,
FioDutyCycles[ixp].ThinkTimeBlocks,
thinkTime,
thinkTimeBlocks,
false,
doneC,
errC,
Expand Down
28 changes: 25 additions & 3 deletions test/e2e/io_soak/io_soak_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,20 @@ var defTimeoutSecs = "120s"

type IoSoakJob interface {
makeVolume()
makeTestPod() (*coreV1.Pod, error)
makeTestPod(map[string]string) (*corev1.Pod, error)
removeTestPod() error
removeVolume()
run(time.Duration, chan<- string, chan<- error)
getPodName() string
}

const NodeSelectorKey = "e2e-io-soak"
const NodeSelectorAppValue = "e2e-app"

var AppNodeSelector = map[string]string{
NodeSelectorKey: NodeSelectorAppValue,
}

var scNames []string
var jobs []IoSoakJob

Expand Down Expand Up @@ -64,6 +71,8 @@ func IOSoakTest(protocols []common.ShareProto, replicas int, loadFactor int, dur
nodeList, err := common.GetNodeLocs()
Expect(err).ToNot(HaveOccurred())

var nodes []string

numMayastorNodes := 0
jobCount := 0
sort.Slice(nodeList, func(i, j int) bool { return nodeList[i].NodeName < nodeList[j].NodeName })
Expand All @@ -72,6 +81,13 @@ func IOSoakTest(protocols []common.ShareProto, replicas int, loadFactor int, dur
logf.Log.Info("MayastorNode", "name", node.NodeName, "index", i)
jobCount += loadFactor
numMayastorNodes += 1
nodes = append(nodes, node.NodeName)
}
}

for i, node := range nodes {
if i%2 == 0 {
common.LabelNode(node, NodeSelectorKey, NodeSelectorAppValue)
}
}

Expand Down Expand Up @@ -115,7 +131,7 @@ func IOSoakTest(protocols []common.ShareProto, replicas int, loadFactor int, dur
logf.Log.Info("Creating test pods")
// Create the job test pods
for _, job := range jobs {
pod, err := job.makeTestPod()
pod, err := job.makeTestPod(AppNodeSelector)
Expect(err).ToNot(HaveOccurred())
Expect(pod).ToNot(BeNil())
}
Expand Down Expand Up @@ -169,9 +185,15 @@ func IOSoakTest(protocols []common.ShareProto, replicas int, loadFactor int, dur
err = common.RmStorageClass(scName)
Expect(err).ToNot(HaveOccurred())
}

for i, node := range nodes {
if i%2 == 0 {
common.UnlabelNode(node, NodeSelectorKey)
}
}
}

var _ = Describe("Mayastor Volume IO test", func() {
var _ = Describe("Mayastor Volume IO soak test", func() {

AfterEach(func() {
logf.Log.Info("AfterEach")
Expand Down
20 changes: 15 additions & 5 deletions test/e2e/io_soak/rawblock_fio.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,10 @@ func (job FioRawBlockSoakJob) removeVolume() {
common.RmPVC(job.volName, job.scName)
}

func (job FioRawBlockSoakJob) makeTestPod() (*coreV1.Pod, error) {
pod, err := common.CreateRawBlockFioPod(job.podName, job.volName)
func (job FioRawBlockSoakJob) makeTestPod(selector map[string]string) (*coreV1.Pod, error) {
pod := common.CreateRawBlockFioPodDef(job.podName, job.volName)
pod.Spec.NodeSelector = selector
pod, err := common.CreatePod(pod)
return pod, err
}

Expand All @@ -37,13 +39,21 @@ func (job FioRawBlockSoakJob) removeTestPod() error {
}

func (job FioRawBlockSoakJob) run(duration time.Duration, doneC chan<- string, errC chan<- error) {
thinkTime := 1 // 1 microsecond
thinkTimeBlocks := 1000

FioDutyCycles := e2e_config.GetConfig().IOSoakTest.FioDutyCycles
ixp := job.id % len(FioDutyCycles)
if len(FioDutyCycles) != 0 {
ixp := job.id % len(FioDutyCycles)
thinkTime = FioDutyCycles[ixp].ThinkTime
thinkTimeBlocks = FioDutyCycles[ixp].ThinkTimeBlocks
}

RunIoSoakFio(
job.podName,
duration,
FioDutyCycles[ixp].ThinkTime,
FioDutyCycles[ixp].ThinkTimeBlocks,
thinkTime,
thinkTimeBlocks,
true,
doneC,
errC,
Expand Down

0 comments on commit 88edede

Please sign in to comment.