-
Notifications
You must be signed in to change notification settings - Fork 4
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix: for readiness check both the phase and ready status #277
Changes from all commits
c82a71e
a65a44b
65f1cae
b6590aa
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -121,32 +121,33 @@ func (c K8Client) RestartPodWithGracePeriod(podName string, gracePeriodSec *int6 | |
} | ||
|
||
func (c K8Client) AwaitReadiness() error { | ||
retries := 0 | ||
maxRetries := 300 // 5 * 60s | ||
for { | ||
if retries >= maxRetries { | ||
return errors.New("Awaited readiness of pods in namespace %s, but timed out after 30s") | ||
} | ||
if retries > 0 { | ||
time.Sleep(1 * time.Second) | ||
} | ||
return c.AwaitReadinessWithTimeout(5 * time.Minute) | ||
} | ||
|
||
brokersAreRunning, err := c.checkIfBrokersAreRunning() | ||
if err != nil { | ||
return err | ||
} | ||
func (c K8Client) AwaitReadinessWithTimeout(timeout time.Duration) error { | ||
timedOut := time.After(timeout) | ||
ticker := time.Tick(1 * time.Second) | ||
|
||
gatewaysAreRunning, err := c.checkIfGatewaysAreRunning() | ||
if err != nil { | ||
return err | ||
} | ||
// Keep checking until we're timed out | ||
for { | ||
select { | ||
case <-timedOut: | ||
return errors.New(fmt.Sprintf("Awaited readiness of pods in namespace %v, but timed out after %v", c.GetCurrentNamespace(), timeout)) | ||
case <-ticker: | ||
Comment on lines
+134
to
+136
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 👍 Yeah I thought also last week that we can write this better with channels thanks for improving this 🚀 |
||
brokersAreRunning, err := c.checkIfBrokersAreRunning() | ||
if err != nil { | ||
LogVerbose("Failed to check broker status. Will retry. %v", err) | ||
} | ||
gatewaysAreRunning, err := c.checkIfGatewaysAreRunning() | ||
if err != nil { | ||
LogVerbose("Failed to check gateway status. Will retry. %v", err) | ||
} | ||
|
||
if brokersAreRunning && gatewaysAreRunning { | ||
break | ||
if brokersAreRunning && gatewaysAreRunning { | ||
return nil | ||
} | ||
} | ||
retries++ | ||
} | ||
return nil | ||
} | ||
|
||
func (c K8Client) checkIfBrokersAreRunning() (bool, error) { | ||
|
@@ -161,8 +162,8 @@ func (c K8Client) checkIfBrokersAreRunning() (bool, error) { | |
|
||
allRunning := true | ||
for _, pod := range pods.Items { | ||
if !pod.Status.ContainerStatuses[0].Ready { // assuming there is only one container | ||
LogVerbose("Pod %s is in phase %s, but not ready. Wait for some seconds.", pod.Name, pod.Status.Phase) | ||
if pod.Status.Phase != v1.PodRunning || !pod.Status.ContainerStatuses[0].Ready { // assuming there is only one container | ||
LogVerbose("Pod %s is in phase %s, and not ready. Wait for some seconds.", pod.Name, pod.Status.Phase) | ||
allRunning = false | ||
break | ||
} | ||
|
@@ -198,7 +199,7 @@ func (c K8Client) AwaitPodReadiness(podName string, timeout time.Duration) error | |
pod, err := c.Clientset.CoreV1().Pods(c.GetCurrentNamespace()).Get(context.TODO(), podName, metav1.GetOptions{}) | ||
if err != nil { | ||
LogVerbose("Failed to get pod %s. Will retry", pod.Name) | ||
} else if pod.Status.ContainerStatuses[0].Ready { // assuming there is only one container | ||
} else if pod.Status.Phase == v1.PodRunning && pod.Status.ContainerStatuses[0].Ready { // assuming there is only one container | ||
return nil | ||
} else { | ||
LogVerbose("Pod %s is in phase %s, but not ready. Wait for some seconds", pod.Name, pod.Status.Phase) | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -15,7 +15,9 @@ | |
package internal | ||
|
||
import ( | ||
v1 "k8s.io/api/core/v1" | ||
"testing" | ||
"time" | ||
|
||
"github.com/stretchr/testify/assert" | ||
"github.com/stretchr/testify/require" | ||
|
@@ -265,3 +267,66 @@ func Test_GetEmbeddedGateway(t *testing.T) { | |
require.NotEmpty(t, names) | ||
assert.Equal(t, "broker", names[0], "Expected to retrieve broker") | ||
} | ||
|
||
func Test_ShouldReturnTrueWhenBrokersAreReady(t *testing.T) { | ||
// given | ||
k8Client := CreateFakeClient() | ||
|
||
// broker | ||
selector, err := metav1.ParseToLabelSelector(getSelfManagedBrokerLabels()) | ||
require.NoError(t, err) | ||
k8Client.CreateBrokerPodsWithStatus(t, selector, "broker1", v1.PodRunning, true) | ||
k8Client.CreateBrokerPodsWithStatus(t, selector, "broker2", v1.PodRunning, true) | ||
|
||
gatewaySelector, err := metav1.ParseToLabelSelector(getSelfManagedGatewayLabels()) | ||
require.NoError(t, err) | ||
k8Client.CreateDeploymentWithLabelsAndName(t, gatewaySelector, "gateway") | ||
|
||
// when | ||
err = k8Client.AwaitReadinessWithTimeout(2 * time.Second) | ||
|
||
// then | ||
require.NoError(t, err) | ||
} | ||
|
||
func Test_ShouldReturnErrorWhenAtleastOneBrokerIsNotReady(t *testing.T) { | ||
// given | ||
k8Client := CreateFakeClient() | ||
|
||
// broker | ||
selector, err := metav1.ParseToLabelSelector(getSelfManagedBrokerLabels()) | ||
require.NoError(t, err) | ||
k8Client.CreateBrokerPodsWithStatus(t, selector, "broker1", v1.PodRunning, true) | ||
k8Client.CreateBrokerPodsWithStatus(t, selector, "broker2", v1.PodRunning, false) | ||
|
||
gatewaySelector, err := metav1.ParseToLabelSelector(getSelfManagedGatewayLabels()) | ||
require.NoError(t, err) | ||
k8Client.CreateDeploymentWithLabelsAndName(t, gatewaySelector, "gateway") | ||
|
||
// when | ||
err = k8Client.AwaitReadinessWithTimeout(2 * time.Second) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🔧 Could we reduce the timeout otherwise the tests take a bit long |
||
|
||
// then | ||
require.Error(t, err) | ||
} | ||
|
||
func Test_ShouldReturnErrorWhenAtleastOneBrokerIsNotRunning(t *testing.T) { | ||
// given | ||
k8Client := CreateFakeClient() | ||
|
||
// broker | ||
selector, err := metav1.ParseToLabelSelector(getSelfManagedBrokerLabels()) | ||
require.NoError(t, err) | ||
k8Client.CreateBrokerPodsWithStatus(t, selector, "broker1", v1.PodRunning, true) | ||
k8Client.CreateBrokerPodsWithStatus(t, selector, "broker2", v1.PodPending, true) | ||
|
||
gatewaySelector, err := metav1.ParseToLabelSelector(getSelfManagedGatewayLabels()) | ||
require.NoError(t, err) | ||
k8Client.CreateDeploymentWithLabelsAndName(t, gatewaySelector, "gateway") | ||
|
||
// when | ||
err = k8Client.AwaitReadinessWithTimeout(2 * time.Second) | ||
|
||
// then | ||
require.Error(t, err) | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
❌ I think this is not used? BTW you could also call the function above.