Skip to content

Commit

Permalink
Don't rely on .status.podName to find Pod associated with a TaskRun
Browse files Browse the repository at this point in the history
This adds Reconciler.getPod, which looks up the Pod for a TaskRun by
performing a label selector query on Pods, looking for the label we
apply to Pods generated by TaskRuns.

If zero Pods are returned, it's the same as .status.podName being "". If
multiple Pods are returned, that's an error.

Also, clean up metrics_test.go a bit while I'm in that area
  • Loading branch information
imjasonh authored and tekton-robot committed Dec 16, 2019
1 parent fd07ff9 commit 0f20c35
Show file tree
Hide file tree
Showing 6 changed files with 310 additions and 416 deletions.
19 changes: 5 additions & 14 deletions pkg/reconciler/taskrun/cancel.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,28 +26,19 @@ import (
"knative.dev/pkg/apis"
)

type logger interface {
Warn(args ...interface{})
Warnf(template string, args ...interface{})
}

// cancelTaskRun marks the TaskRun as cancelled and delete pods linked to it.
func cancelTaskRun(tr *v1alpha1.TaskRun, clientSet kubernetes.Interface, logger logger) error {
logger.Warn("task run %q has been cancelled", tr.Name)
func cancelTaskRun(tr *v1alpha1.TaskRun, clientset kubernetes.Interface) error {
tr.Status.SetCondition(&apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "TaskRunCancelled",
Message: fmt.Sprintf("TaskRun %q was cancelled", tr.Name),
})

if tr.Status.PodName == "" {
logger.Warnf("task run %q has no pod running yet", tr.Name)
return nil
}

if err := clientSet.CoreV1().Pods(tr.Namespace).Delete(tr.Status.PodName, &metav1.DeleteOptions{}); err != nil {
pod, err := getPod(tr, clientset)
if err != nil {
return err
}
return nil

return clientset.CoreV1().Pods(tr.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
}
105 changes: 52 additions & 53 deletions pkg/reconciler/taskrun/cancel_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,78 +22,77 @@ import (

"github.com/google/go-cmp/cmp"
"github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1"
ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing"
"github.com/tektoncd/pipeline/test"
tb "github.com/tektoncd/pipeline/test/builder"
"go.uber.org/zap"
"go.uber.org/zap/zaptest/observer"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/pkg/apis"
)

func TestCancelTaskRun(t *testing.T) {
testCases := []struct {
name string
taskRun *v1alpha1.TaskRun
pod *corev1.Pod
expectedStatus apis.Condition
namespace := "the-namespace"
taskRunName := "the-taskrun"
wantStatus := &apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "TaskRunCancelled",
Message: `TaskRun "the-taskrun" was cancelled`,
}
for _, c := range []struct {
desc string
taskRun *v1alpha1.TaskRun
pod *corev1.Pod
}{{
name: "no-pod-scheduled",
taskRun: tb.TaskRun("test-taskrun-run-cancelled", "foo", tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
tb.TaskRunCancelled,
), tb.TaskRunStatus(tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
}))),
expectedStatus: apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "TaskRunCancelled",
Message: `TaskRun "test-taskrun-run-cancelled" was cancelled`,
desc: "no pod scheduled",
taskRun: &v1alpha1.TaskRun{
ObjectMeta: metav1.ObjectMeta{
Name: taskRunName,
Namespace: namespace,
},
Spec: v1alpha1.TaskRunSpec{
Status: v1alpha1.TaskRunSpecStatusCancelled,
},
},
}, {
name: "pod-scheduled",
taskRun: tb.TaskRun("test-taskrun-run-cancelled", "foo", tb.TaskRunSpec(
tb.TaskRunTaskRef(simpleTask.Name),
tb.TaskRunCancelled,
), tb.TaskRunStatus(tb.StatusCondition(apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionUnknown,
}), tb.PodName("foo-is-bar"))),
desc: "pod scheduled",
taskRun: &v1alpha1.TaskRun{
ObjectMeta: metav1.ObjectMeta{
Name: taskRunName,
Namespace: namespace,
},
Spec: v1alpha1.TaskRunSpec{
Status: v1alpha1.TaskRunSpecStatusCancelled,
},
},
pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "foo-is-bar",
Namespace: namespace,
Name: "the-pod",
Labels: map[string]string{
"tekton.dev/taskRun": taskRunName,
},
}},
expectedStatus: apis.Condition{
Type: apis.ConditionSucceeded,
Status: corev1.ConditionFalse,
Reason: "TaskRunCancelled",
Message: `TaskRun "test-taskrun-run-cancelled" was cancelled`,
},
}}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
}} {
t.Run(c.desc, func(t *testing.T) {
d := test.Data{
TaskRuns: []*v1alpha1.TaskRun{tc.taskRun},
TaskRuns: []*v1alpha1.TaskRun{c.taskRun},
}
if tc.pod != nil {
d.Pods = []*corev1.Pod{tc.pod}
if c.pod != nil {
d.Pods = []*corev1.Pod{c.pod}
}

ctx, _ := ttesting.SetupFakeContext(t)
ctx, cancel := context.WithCancel(ctx)
testAssets, cancel := getTaskRunController(t, d)
defer cancel()
c, _ := test.SeedTestData(t, ctx, d)
observer, _ := observer.New(zap.InfoLevel)
err := cancelTaskRun(tc.taskRun, c.Kube, zap.New(observer).Sugar())
if err != nil {
if err := testAssets.Controller.Reconciler.Reconcile(context.Background(), getRunName(c.taskRun)); err != nil {
t.Fatal(err)
}
if d := cmp.Diff(tc.taskRun.Status.GetCondition(apis.ConditionSucceeded), &tc.expectedStatus, ignoreLastTransitionTime); d != "" {
t.Fatalf("-want, +got: %v", d)
if d := cmp.Diff(wantStatus, c.taskRun.Status.GetCondition(apis.ConditionSucceeded), ignoreLastTransitionTime); d != "" {
t.Errorf("Diff(-want, +got): %s", d)
}

if c.pod != nil {
if _, err := testAssets.Controller.Reconciler.(*Reconciler).KubeClientSet.CoreV1().Pods(c.taskRun.Namespace).Get(c.pod.Name, metav1.GetOptions{}); !kerrors.IsNotFound(err) {
t.Errorf("Pod was not deleted; wanted not-found error, got %v", err)
}
}
})
}
Expand Down
26 changes: 10 additions & 16 deletions pkg/reconciler/taskrun/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func NewRecorder() (*Recorder, error) {
}
r.pod = pod

err = view.Register(
if err := view.Register(
&view.View{
Description: trDuration.Description(),
Measure: trDuration,
Expand Down Expand Up @@ -150,9 +150,7 @@ func NewRecorder() (*Recorder, error) {
Aggregation: view.LastValue(),
TagKeys: []tag.Key{r.task, r.taskRun, r.namespace, r.pod},
},
)

if err != nil {
); err != nil {
r.initialized = false
return r, err
}
Expand Down Expand Up @@ -257,9 +255,15 @@ func (r *Recorder) RecordPodLatency(pod *corev1.Pod, tr *v1alpha1.TaskRun) error
return errors.New("ignoring the metrics recording for pod , failed to initialize the metrics recorder")
}

scheduledTime := getScheduledTime(pod)
var scheduledTime metav1.Time
for _, c := range pod.Status.Conditions {
if c.Type == corev1.PodScheduled {
scheduledTime = c.LastTransitionTime
break
}
}
if scheduledTime.IsZero() {
return errors.New("pod has never got scheduled")
return errors.New("pod was never scheduled")
}

latency := scheduledTime.Sub(pod.CreationTimestamp.Time)
Expand All @@ -283,13 +287,3 @@ func (r *Recorder) RecordPodLatency(pod *corev1.Pod, tr *v1alpha1.TaskRun) error

return nil
}

func getScheduledTime(pod *corev1.Pod) metav1.Time {
for _, c := range pod.Status.Conditions {
if c.Type == corev1.PodScheduled {
return c.LastTransitionTime
}
}

return metav1.Time{}
}
Loading

0 comments on commit 0f20c35

Please sign in to comment.