diff --git a/pkg/oc/admin/prune/imageprune/prune.go b/pkg/oc/admin/prune/imageprune/prune.go index d68c5039aa53..882b70f6d67d 100644 --- a/pkg/oc/admin/prune/imageprune/prune.go +++ b/pkg/oc/admin/prune/imageprune/prune.go @@ -560,7 +560,7 @@ func (p *pruner) addDaemonSetsToGraph(dss *kapisext.DaemonSetList) []error { ds := &dss.Items[i] desc := fmt.Sprintf("DaemonSet %s", getName(ds)) glog.V(4).Infof("Examining %s", desc) - dsNode := appsgraph.EnsureDaemonSetNode(p.g, ds) + dsNode := kubegraph.EnsureDaemonSetNode(p.g, ds) errs = append(errs, p.addPodSpecToGraph(getRef(ds), &ds.Spec.Template.Spec, dsNode)...) } @@ -578,7 +578,7 @@ func (p *pruner) addDeploymentsToGraph(dmnts *kapisext.DeploymentList) []error { d := &dmnts.Items[i] ref := getRef(d) glog.V(4).Infof("Examining %s", getKindName(ref)) - dNode := appsgraph.EnsureDeploymentNode(p.g, d) + dNode := kubegraph.EnsureDeploymentNode(p.g, d) errs = append(errs, p.addPodSpecToGraph(ref, &d.Spec.Template.Spec, dNode)...) } @@ -615,7 +615,7 @@ func (p *pruner) addReplicaSetsToGraph(rss *kapisext.ReplicaSetList) []error { rs := &rss.Items[i] ref := getRef(rs) glog.V(4).Infof("Examining %s", getKindName(ref)) - rsNode := appsgraph.EnsureReplicaSetNode(p.g, rs) + rsNode := kubegraph.EnsureReplicaSetNode(p.g, rs) errs = append(errs, p.addPodSpecToGraph(ref, &rs.Spec.Template.Spec, rsNode)...) } diff --git a/pkg/oc/cli/cmd/status.go b/pkg/oc/cli/cmd/status.go index 6b4204b4c990..95f929764619 100644 --- a/pkg/oc/cli/cmd/status.go +++ b/pkg/oc/cli/cmd/status.go @@ -160,7 +160,7 @@ func (o *StatusOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, baseC canRequestProjects, _ := loginutil.CanRequestProjects(config, o.namespace) o.describer = &describe.ProjectStatusDescriber{ - K: kclientset, + KubeClient: kclientset, ProjectClient: projectClient.Project(), BuildClient: buildClient.Build(), ImageClient: imageClient.Image(), diff --git a/pkg/oc/cli/describe/deployments.go b/pkg/oc/cli/describe/deployments.go index 22c14cc6d251..7510ccf76b69 100644 --- a/pkg/oc/cli/describe/deployments.go +++ b/pkg/oc/cli/describe/deployments.go @@ -473,12 +473,12 @@ func (d *LatestDeploymentsDescriber) Describe(namespace, name string) (string, e for i := range deployments { kubegraph.EnsureReplicationControllerNode(g, &deployments[i]) } - appsedges.AddTriggerEdges(g, dcNode) - appsedges.AddDeploymentEdges(g, dcNode) + appsedges.AddTriggerDeploymentConfigsEdges(g, dcNode) + appsedges.AddDeploymentConfigsDeploymentEdges(g, dcNode) activeDeployment, inactiveDeployments := appsedges.RelevantDeployments(g, dcNode) return tabbedString(func(out *tabwriter.Writer) error { - descriptions := describeDeployments(f, dcNode, activeDeployment, inactiveDeployments, nil, d.count) + descriptions := describeDeploymentConfigDeployments(f, dcNode, activeDeployment, inactiveDeployments, nil, d.count) for i, description := range descriptions { descriptions[i] = fmt.Sprintf("%v %v", name, description) } diff --git a/pkg/oc/cli/describe/projectstatus.go b/pkg/oc/cli/describe/projectstatus.go index 2e0c98691d22..47db03f3bbb1 100644 --- a/pkg/oc/cli/describe/projectstatus.go +++ b/pkg/oc/cli/describe/projectstatus.go @@ -22,6 +22,7 @@ import ( kautoscalingclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion" kcoreclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" kapisextclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" + deployutil "k8s.io/kubernetes/pkg/controller/deployment/util" appsapi "github.com/openshift/origin/pkg/apps/apis/apps" appsclient "github.com/openshift/origin/pkg/apps/generated/internalclientset/typed/apps/internalversion" @@ -60,7 +61,7 @@ const ForbiddenListWarning = "Forbidden" // ProjectStatusDescriber generates extended information about a Project type ProjectStatusDescriber struct { - K kclientset.Interface + KubeClient kclientset.Interface // OpenShift clients ProjectClient projectclient.ProjectInterface @@ -87,15 +88,16 @@ func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, set g := osgraph.New() loaders := []GraphLoader{ - &serviceLoader{namespace: namespace, lister: d.K.Core()}, - &serviceAccountLoader{namespace: namespace, lister: d.K.Core()}, - &secretLoader{namespace: namespace, lister: d.K.Core()}, - &pvcLoader{namespace: namespace, lister: d.K.Core()}, - &rcLoader{namespace: namespace, lister: d.K.Core()}, - &podLoader{namespace: namespace, lister: d.K.Core()}, - &statefulSetLoader{namespace: namespace, lister: d.K.Apps()}, - &horizontalPodAutoscalerLoader{namespace: namespace, lister: d.K.Autoscaling()}, - &deploymentLoader{namespace: namespace, lister: d.K.Extensions()}, + &serviceLoader{namespace: namespace, lister: d.KubeClient.Core()}, + &serviceAccountLoader{namespace: namespace, lister: d.KubeClient.Core()}, + &secretLoader{namespace: namespace, lister: d.KubeClient.Core()}, + &pvcLoader{namespace: namespace, lister: d.KubeClient.Core()}, + &rcLoader{namespace: namespace, lister: d.KubeClient.Core()}, + &podLoader{namespace: namespace, lister: d.KubeClient.Core()}, + &statefulSetLoader{namespace: namespace, lister: d.KubeClient.Apps()}, + &horizontalPodAutoscalerLoader{namespace: namespace, lister: d.KubeClient.Autoscaling()}, + &deploymentLoader{namespace: namespace, lister: d.KubeClient.Extensions()}, + &replicasetLoader{namespace: namespace, lister: d.KubeClient.Extensions()}, // TODO check swagger for feature enablement and selectively add bcLoader and buildLoader // then remove errors.TolerateNotFoundError method. &bcLoader{namespace: namespace, lister: d.BuildClient}, @@ -148,8 +150,11 @@ func (d *ProjectStatusDescriber) MakeGraph(namespace string) (osgraph.Graph, set kubeedges.AddHPAScaleRefEdges(g) buildedges.AddAllInputOutputEdges(g) buildedges.AddAllBuildEdges(g) - appsedges.AddAllTriggerEdges(g) - appsedges.AddAllDeploymentEdges(g) + appsedges.AddAllTriggerDeploymentConfigsEdges(g) + kubeedges.AddAllTriggerDeploymentsEdges(g) + kubeedges.AddAllTriggerStatefulSetsEdges(g) + appsedges.AddAllDeploymentConfigsDeploymentEdges(g) + kubeedges.AddAllDeploymentEdges(g) appsedges.AddAllVolumeClaimEdges(g) imageedges.AddAllImageStreamRefEdges(g) imageedges.AddAllImageStreamImageRefEdges(g) @@ -198,6 +203,9 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error standaloneRCs, coveredByRCs := graphview.AllReplicationControllers(g, coveredNodes) coveredNodes.Insert(coveredByRCs.List()...) + standaloneRSs, coveredByRSs := graphview.AllReplicaSets(g, coveredNodes) + coveredNodes.Insert(coveredByRSs.List()...) + standaloneImages, coveredByImages := graphview.AllImagePipelinesFromBuildConfig(g, coveredNodes) coveredNodes.Insert(coveredByImages.List()...) @@ -228,15 +236,31 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error printLines(out, "", 0, describeServiceInServiceGroup(f, service, exposes...)...) for _, dcPipeline := range service.DeploymentConfigPipelines { - printLines(out, indent, 1, describeDeploymentInServiceGroup(local, dcPipeline, func(rc *kubegraph.ReplicationControllerNode) int32 { + printLines(out, indent, 1, describeDeploymentConfigInServiceGroup(local, dcPipeline, func(rc *kubegraph.ReplicationControllerNode) int32 { return graphview.MaxRecentContainerRestartsForRC(g, rc) })...) } - for _, node := range service.FulfillingStatefulSets { + for _, node := range service.StatefulSets { printLines(out, indent, 1, describeStatefulSetInServiceGroup(local, node)...) } + for _, node := range service.Deployments { + printLines(out, indent, 1, describeDeploymentInServiceGroup(local, node, func(rs *kubegraph.ReplicaSetNode) int32 { + return graphview.MaxRecentContainerRestartsForRS(g, rs) + })...) + } + + rsNode: + for _, rsNode := range service.FulfillingRSs { + for _, coveredD := range service.FulfillingDeployments { + if kubeedges.BelongsToDeployment(coveredD.Deployment, rsNode.ReplicaSet) { + continue rsNode + } + } + printLines(out, indent, 1, describeRSInServiceGroup(local, rsNode)...) + } + rcNode: for _, rcNode := range service.FulfillingRCs { for _, coveredDC := range service.FulfillingDCs { @@ -255,6 +279,11 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error continue pod } } + for _, coveredRS := range service.FulfillingRSs { + if g.Edge(node, coveredRS) != nil { + continue pod + } + } // TODO: collapse into FulfillingControllers for _, covered := range service.FulfillingStatefulSets { if g.Edge(node, covered) != nil { @@ -267,7 +296,7 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error for _, standaloneDC := range standaloneDCs { fmt.Fprintln(out) - printLines(out, indent, 0, describeDeploymentInServiceGroup(f, standaloneDC, func(rc *kubegraph.ReplicationControllerNode) int32 { + printLines(out, indent, 0, describeDeploymentConfigInServiceGroup(f, standaloneDC, func(rc *kubegraph.ReplicationControllerNode) int32 { return graphview.MaxRecentContainerRestartsForRC(g, rc) })...) } @@ -283,6 +312,10 @@ func (d *ProjectStatusDescriber) Describe(namespace, name string) (string, error fmt.Fprintln(out) printLines(out, indent, 0, describeRCInServiceGroup(f, standaloneRC.RC)...) } + for _, standaloneRS := range standaloneRSs { + fmt.Fprintln(out) + printLines(out, indent, 0, describeRSInServiceGroup(f, standaloneRS.RS)...) + } monopods, err := filterBoringPods(standalonePods) if err != nil { @@ -516,10 +549,14 @@ func (f namespacedFormatter) ResourceName(obj interface{}) string { return namespaceNameWithType("sa", t.Name, t.Namespace, f.currentNamespace, f.hideNamespace) case *kubegraph.ReplicationControllerNode: return namespaceNameWithType("rc", t.ReplicationController.Name, t.ReplicationController.Namespace, f.currentNamespace, f.hideNamespace) + case *kubegraph.ReplicaSetNode: + return namespaceNameWithType("rc", t.ReplicaSet.Name, t.ReplicaSet.Namespace, f.currentNamespace, f.hideNamespace) case *kubegraph.HorizontalPodAutoscalerNode: return namespaceNameWithType("hpa", t.HorizontalPodAutoscaler.Name, t.HorizontalPodAutoscaler.Namespace, f.currentNamespace, f.hideNamespace) case *kubegraph.StatefulSetNode: return namespaceNameWithType("statefulset", t.StatefulSet.Name, t.StatefulSet.Namespace, f.currentNamespace, f.hideNamespace) + case *kubegraph.DeploymentNode: + return namespaceNameWithType("deployment", t.Deployment.Name, t.Deployment.Namespace, f.currentNamespace, f.hideNamespace) case *kubegraph.PersistentVolumeClaimNode: return namespaceNameWithType("pvc", t.PersistentVolumeClaim.Name, t.PersistentVolumeClaim.Namespace, f.currentNamespace, f.hideNamespace) @@ -562,46 +599,102 @@ func describeAllProjectsOnServer(f formatter, server string) string { return fmt.Sprintf("Showing all projects on server %s\n", server) } -func describeDeploymentInServiceGroup(f formatter, deploy graphview.DeploymentConfigPipeline, restartFn func(*kubegraph.ReplicationControllerNode) int32) []string { - local := namespacedFormatter{currentNamespace: deploy.Deployment.DeploymentConfig.Namespace} +func describeDeploymentConfigInServiceGroup(f formatter, deploy graphview.DeploymentConfigPipeline, restartFn func(*kubegraph.ReplicationControllerNode) int32) []string { + local := namespacedFormatter{currentNamespace: deploy.DeploymentConfig.DeploymentConfig.Namespace} includeLastPass := deploy.ActiveDeployment == nil if len(deploy.Images) == 1 { format := "%s deploys %s %s" - if deploy.Deployment.DeploymentConfig.Spec.Test { + if deploy.DeploymentConfig.DeploymentConfig.Spec.Test { format = "%s test deploys %s %s" } - lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeImageInPipeline(local, deploy.Images[0], deploy.Deployment.DeploymentConfig.Namespace), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))} + lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.DeploymentConfig), describeImageInPipeline(local, deploy.Images[0], deploy.DeploymentConfig.DeploymentConfig.Namespace), describeDeploymentConfigTrigger(deploy.DeploymentConfig.DeploymentConfig))} if len(lines[0]) > 120 && strings.Contains(lines[0], " <- ") { segments := strings.SplitN(lines[0], " <- ", 2) lines[0] = segments[0] + " <-" lines = append(lines, segments[1]) } lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(deploy.Images[0].Build, deploy.Images[0].LastSuccessfulBuild, deploy.Images[0].LastUnsuccessfulBuild, deploy.Images[0].ActiveBuilds, deploy.Images[0].DestinationResolved, includeLastPass)...)...) - lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, restartFn, maxDisplayDeployments)...) + lines = append(lines, describeDeploymentConfigDeployments(local, deploy.DeploymentConfig, deploy.ActiveDeployment, deploy.InactiveDeployments, restartFn, maxDisplayDeployments)...) return lines } format := "%s deploys %s" - if deploy.Deployment.DeploymentConfig.Spec.Test { + if deploy.DeploymentConfig.DeploymentConfig.Spec.Test { format = "%s test deploys %s" } - lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeDeploymentConfigTrigger(deploy.Deployment.DeploymentConfig))} + lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.DeploymentConfig), describeDeploymentConfigTrigger(deploy.DeploymentConfig.DeploymentConfig))} for _, image := range deploy.Images { - lines = append(lines, describeImageInPipeline(local, image, deploy.Deployment.DeploymentConfig.Namespace)) + lines = append(lines, describeImageInPipeline(local, image, deploy.DeploymentConfig.DeploymentConfig.Namespace)) lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...) - lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, restartFn, maxDisplayDeployments)...) + lines = append(lines, describeDeploymentConfigDeployments(local, deploy.DeploymentConfig, deploy.ActiveDeployment, deploy.InactiveDeployments, restartFn, maxDisplayDeployments)...) } return lines } -func describeStatefulSetInServiceGroup(f formatter, node *kubegraph.StatefulSetNode) []string { +func describeDeploymentInServiceGroup(f formatter, deploy graphview.Deployment, restartFn func(node *kubegraph.ReplicaSetNode) int32) []string { + local := namespacedFormatter{currentNamespace: deploy.Deployment.Deployment.Namespace} + // TODO: Figure out what this is + includeLastPass := false + + if len(deploy.Images) == 1 { + format := "%s deploys %s %s" + lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), describeImageInPipeline(local, deploy.Images[0], deploy.Deployment.Deployment.Namespace), "")} + if len(lines[0]) > 120 && strings.Contains(lines[0], " <- ") { + segments := strings.SplitN(lines[0], " <- ", 2) + lines[0] = segments[0] + " <-" + lines = append(lines, segments[1]) + } + lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(deploy.Images[0].Build, deploy.Images[0].LastSuccessfulBuild, deploy.Images[0].LastUnsuccessfulBuild, deploy.Images[0].ActiveBuilds, deploy.Images[0].DestinationResolved, includeLastPass)...)...) + lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, restartFn, maxDisplayDeployments)...) + return lines + } + images := []string{} - for _, container := range node.StatefulSet.Spec.Template.Spec.Containers { + for _, container := range deploy.Deployment.Deployment.Spec.Template.Spec.Containers { images = append(images, container.Image) } + imagesWithoutTriggers := "" + if len(deploy.Images) == 0 { + imagesWithoutTriggers = strings.Join(images, ",") + } + format := "%s deploys %s" + lines := []string{fmt.Sprintf(format, f.ResourceName(deploy.Deployment), imagesWithoutTriggers)} + for _, image := range deploy.Images { + lines = append(lines, describeImageInPipeline(local, image, deploy.Deployment.Deployment.Namespace)) + lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...) + } + lines = append(lines, describeDeployments(local, deploy.Deployment, deploy.ActiveDeployment, deploy.InactiveDeployments, restartFn, maxDisplayDeployments)...) - return []string{fmt.Sprintf("%s manages %s, %s", f.ResourceName(node), strings.Join(images, ", "), describeStatefulSetStatus(node.StatefulSet))} + return lines +} + +func describeStatefulSetInServiceGroup(f formatter, node graphview.StatefulSet) []string { + local := namespacedFormatter{currentNamespace: node.StatefulSet.StatefulSet.Namespace} + includeLastPass := false + format := "%s manages %s" + images := []string{} + for _, container := range node.StatefulSet.StatefulSet.Spec.Template.Spec.Containers { + images = append(images, container.Image) + } + imagesWithoutTriggers := "" + if len(node.Images) == 0 { + imagesWithoutTriggers = strings.Join(images, ",") + } + if len(node.Images) == 1 { + image := node.Images[0] + lines := []string{fmt.Sprintf(format, f.ResourceName(node.StatefulSet), describeImageInPipeline(local, image, node.StatefulSet.StatefulSet.Namespace))} + lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...) + lines = append(lines, describeStatefulSetStatus(node.StatefulSet.StatefulSet)) + return lines + } + lines := []string{fmt.Sprintf(format, f.ResourceName(node.StatefulSet), imagesWithoutTriggers)} + for _, image := range node.Images { + lines = append(lines, describeImageInPipeline(local, image, node.StatefulSet.StatefulSet.Namespace)) + lines = append(lines, indentLines(" ", describeAdditionalBuildDetail(image.Build, image.LastSuccessfulBuild, image.LastUnsuccessfulBuild, image.ActiveBuilds, image.DestinationResolved, includeLastPass)...)...) + } + lines = append(lines, describeStatefulSetStatus(node.StatefulSet.StatefulSet)) + return lines } func describeRCInServiceGroup(f formatter, rcNode *kubegraph.ReplicationControllerNode) []string { @@ -620,6 +713,18 @@ func describeRCInServiceGroup(f formatter, rcNode *kubegraph.ReplicationControll return lines } +func describeRSInServiceGroup(f formatter, rsNode *kubegraph.ReplicaSetNode) []string { + images := []string{} + for _, container := range rsNode.ReplicaSet.Spec.Template.Spec.Containers { + images = append(images, container.Image) + } + + lines := []string{fmt.Sprintf("%s runs %s", f.ResourceName(rsNode), strings.Join(images, ", "))} + lines = append(lines, describeRSStatus(rsNode.ReplicaSet)) + + return lines +} + func describePodInServiceGroup(f formatter, podNode *kubegraph.PodNode) []string { images := []string{} for _, container := range podNode.Pod.Spec.Containers { @@ -1038,7 +1143,36 @@ func describeSourceInPipeline(source *buildapi.BuildSource) (string, bool) { return "", false } -func describeDeployments(f formatter, dcNode *appsgraph.DeploymentConfigNode, activeDeployment *kubegraph.ReplicationControllerNode, inactiveDeployments []*kubegraph.ReplicationControllerNode, restartFn func(*kubegraph.ReplicationControllerNode) int32, count int) []string { +func describeDeployments(f formatter, dNode *kubegraph.DeploymentNode, activeDeployment *kubegraph.ReplicaSetNode, inactiveDeployments []*kubegraph.ReplicaSetNode, restartFn func(node *kubegraph.ReplicaSetNode) int32, count int) []string { + if dNode == nil || activeDeployment == nil { + return nil + } + out := []string{} + deploymentsToPrint := append([]*kubegraph.ReplicaSetNode{}, inactiveDeployments...) + revision, _ := deployutil.Revision(dNode.Deployment) + + deploymentsToPrint = append([]*kubegraph.ReplicaSetNode{activeDeployment}, inactiveDeployments...) + for i, deployment := range deploymentsToPrint { + restartCount := int32(0) + if restartFn != nil { + restartCount = restartFn(deployment) + } + out = append(out, describeDeploymentStatus(deployment.ReplicaSet, revision, i == 0, restartCount)) + } + return out +} + +func describeDeploymentStatus(rs *kapisext.ReplicaSet, revision int64, first bool, restartCount int32) string { + timeAt := strings.ToLower(formatRelativeTime(rs.CreationTimestamp.Time)) + replicaSetRevision, _ := deployutil.Revision(rs) + if replicaSetRevision == revision { + return fmt.Sprintf("deployment #%d running for %s%s", replicaSetRevision, timeAt, describePodSummaryInline(rs.Status.ReadyReplicas, rs.Status.Replicas, rs.Spec.Replicas, false, restartCount)) + } else { + return fmt.Sprintf("deployment #%d deployed %s ago%s", replicaSetRevision, timeAt, describePodSummaryInline(rs.Status.ReadyReplicas, rs.Status.Replicas, rs.Spec.Replicas, first, restartCount)) + } +} + +func describeDeploymentConfigDeployments(f formatter, dcNode *appsgraph.DeploymentConfigNode, activeDeployment *kubegraph.ReplicationControllerNode, inactiveDeployments []*kubegraph.ReplicationControllerNode, restartFn func(*kubegraph.ReplicationControllerNode) int32, count int) []string { if dcNode == nil { return nil } @@ -1062,7 +1196,7 @@ func describeDeployments(f formatter, dcNode *appsgraph.DeploymentConfigNode, ac if restartFn != nil { restartCount = restartFn(deployment) } - out = append(out, describeDeploymentStatus(deployment.ReplicationController, i == 0, dcNode.DeploymentConfig.Spec.Test, restartCount)) + out = append(out, describeDeploymentConfigDeploymentStatus(deployment.ReplicationController, i == 0, dcNode.DeploymentConfig.Spec.Test, restartCount)) switch { case count == -1: if appsutil.IsCompleteDeployment(deployment.ReplicationController) { @@ -1077,7 +1211,7 @@ func describeDeployments(f formatter, dcNode *appsgraph.DeploymentConfigNode, ac return out } -func describeDeploymentStatus(rc *kapi.ReplicationController, first, test bool, restartCount int32) string { +func describeDeploymentConfigDeploymentStatus(rc *kapi.ReplicationController, first, test bool, restartCount int32) string { timeAt := strings.ToLower(formatRelativeTime(rc.CreationTimestamp.Time)) status := appsutil.DeploymentStatusFor(rc) version := appsutil.DeploymentVersionFor(rc) @@ -1111,6 +1245,11 @@ func describeDeploymentStatus(rc *kapi.ReplicationController, first, test bool, } } +func describeDeploymentConfigRolloutStatus(d *kapisext.Deployment) string { + timeAt := strings.ToLower(formatRelativeTime(d.CreationTimestamp.Time)) + return fmt.Sprintf("created %s ago%s", timeAt, describePodSummaryInline(int32(d.Status.Replicas), int32(d.Status.Replicas), int32(d.Spec.Replicas), false, 0)) +} + func describeStatefulSetStatus(p *kapps.StatefulSet) string { timeAt := strings.ToLower(formatRelativeTime(p.CreationTimestamp.Time)) // TODO: Replace first argument in describePodSummaryInline with ReadyReplicas once that's a thing for pet sets. @@ -1122,6 +1261,11 @@ func describeRCStatus(rc *kapi.ReplicationController) string { return fmt.Sprintf("rc/%s created %s ago%s", rc.Name, timeAt, describePodSummaryInline(rc.Status.ReadyReplicas, rc.Status.Replicas, rc.Spec.Replicas, false, 0)) } +func describeRSStatus(rs *kapisext.ReplicaSet) string { + timeAt := strings.ToLower(formatRelativeTime(rs.CreationTimestamp.Time)) + return fmt.Sprintf("rs/%s created %s ago%s", rs.Name, timeAt, describePodSummaryInline(rs.Status.ReadyReplicas, rs.Status.Replicas, rs.Spec.Replicas, false, 0)) +} + func describePodSummaryInline(ready, actual, requested int32, includeEmpty bool, restartCount int32) string { s := describePodSummary(ready, requested, includeEmpty, restartCount) if len(s) == 0 { @@ -1414,7 +1558,31 @@ func (l *deploymentLoader) Load() error { func (l *deploymentLoader) AddToGraph(g osgraph.Graph) error { for i := range l.items { - appsgraph.EnsureDeploymentNode(g, &l.items[i]) + kubegraph.EnsureDeploymentNode(g, &l.items[i]) + } + + return nil +} + +type replicasetLoader struct { + namespace string + lister kapisextclient.ReplicaSetsGetter + items []kapisext.ReplicaSet +} + +func (l *replicasetLoader) Load() error { + list, err := l.lister.ReplicaSets(l.namespace).List(metav1.ListOptions{}) + if err != nil { + return err + } + + l.items = list.Items + return nil +} + +func (l *replicasetLoader) AddToGraph(g osgraph.Graph) error { + for i := range l.items { + kubegraph.EnsureReplicaSetNode(g, &l.items[i]) } return nil diff --git a/pkg/oc/cli/describe/projectstatus_test.go b/pkg/oc/cli/describe/projectstatus_test.go index 0c7c43216363..29ea54e9a9f6 100644 --- a/pkg/oc/cli/describe/projectstatus_test.go +++ b/pkg/oc/cli/describe/projectstatus_test.go @@ -328,6 +328,23 @@ func TestProjectStatus(t *testing.T) { }, Time: mustParseTime("2015-04-07T04:12:25Z"), }, + "with deployment": { + File: "deployment.yaml", + ErrFn: func(err error) bool { return err == nil }, + Extra: []runtime.Object{ + &projectapi.Project{ + ObjectMeta: metav1.ObjectMeta{Name: "example", Namespace: ""}, + }, + }, + Contains: []string{ + "In project example on server https://example.com:8443\n", + "svc/ruby-deploy", + "deployment/ruby-deploy deploys istag/ruby-deploy:latest <-", + "bc/ruby-deploy source builds https://github.com/openshift/ruby-ex.git on istag/ruby-22-centos7:latest", + "not built yet", + }, + Time: mustParseTime("2015-04-07T04:12:25Z"), + }, "with stateful sets": { File: "statefulset.yaml", Extra: []runtime.Object{ @@ -339,7 +356,8 @@ func TestProjectStatus(t *testing.T) { Contains: []string{ "In project example on server https://example.com:8443\n", "svc/galera (headless):3306", - "statefulset/mysql manages erkules/galera:basic, created less than a second ago - 3 pods", + "statefulset/mysql manages erkules/galera:basic", + "created less than a second ago - 3 pods", "* pod/mysql-1 has restarted 7 times", }, Time: mustParseTime("2015-04-07T04:12:25Z"), @@ -446,7 +464,7 @@ func TestProjectStatus(t *testing.T) { routeClient := routefakeclient.NewSimpleClientset(filterByScheme(routeclientscheme.Scheme, objs...)...) d := ProjectStatusDescriber{ - K: kc, + KubeClient: kc, ProjectClient: projectClient.Project(), BuildClient: buildClient.Build(), ImageClient: imageClient.Image(), @@ -458,6 +476,7 @@ func TestProjectStatus(t *testing.T) { LogsCommandName: "oc logs -p", SecurityPolicyCommandFormat: "policycommand %s %s", } + t.Logf("describing %q ...", test.File) out, err := d.Describe("example", "") if !test.ErrFn(err) { t.Errorf("%s: unexpected error: %v", k, err) @@ -508,7 +527,7 @@ func TestProjectStatusErrors(t *testing.T) { }) d := ProjectStatusDescriber{ - K: kc, + KubeClient: kc, ProjectClient: projectClient.Project(), BuildClient: buildClient.Build(), ImageClient: imageClient.Image(), diff --git a/pkg/oc/graph/appsgraph/analysis/dc_test.go b/pkg/oc/graph/appsgraph/analysis/dc_test.go index 7c36ca4664d6..a20d831d9444 100644 --- a/pkg/oc/graph/appsgraph/analysis/dc_test.go +++ b/pkg/oc/graph/appsgraph/analysis/dc_test.go @@ -16,7 +16,7 @@ func TestMissingImageStreamTag(t *testing.T) { t.Fatalf("unexpected error: %v", err) } buildedges.AddAllInputOutputEdges(g) - appsedges.AddAllTriggerEdges(g) + appsedges.AddAllTriggerDeploymentConfigsEdges(g) imageedges.AddAllImageStreamRefEdges(g) imageedges.AddAllImageStreamImageRefEdges(g) @@ -36,7 +36,7 @@ func TestMissingImageStream(t *testing.T) { t.Fatalf("unexpected error: %v", err) } buildedges.AddAllInputOutputEdges(g) - appsedges.AddAllTriggerEdges(g) + appsedges.AddAllTriggerDeploymentConfigsEdges(g) imageedges.AddAllImageStreamRefEdges(g) imageedges.AddAllImageStreamImageRefEdges(g) @@ -56,7 +56,7 @@ func TestMissingReadinessProbe(t *testing.T) { t.Fatalf("unexpected error: %v", err) } buildedges.AddAllInputOutputEdges(g) - appsedges.AddAllTriggerEdges(g) + appsedges.AddAllTriggerDeploymentConfigsEdges(g) imageedges.AddAllImageStreamRefEdges(g) markers := FindDeploymentConfigReadinessWarnings(g, osgraph.DefaultNamer, "command probe") diff --git a/pkg/oc/graph/appsgraph/edge_test.go b/pkg/oc/graph/appsgraph/edge_test.go index 94db5e4b6de4..07b5dbbcd658 100644 --- a/pkg/oc/graph/appsgraph/edge_test.go +++ b/pkg/oc/graph/appsgraph/edge_test.go @@ -39,7 +39,7 @@ func TestNamespaceEdgeMatching(t *testing.T) { fn("ns", g) fn("other", g) - AddAllDeploymentEdges(g) + AddAllDeploymentConfigsDeploymentEdges(g) if len(g.Edges()) != 4 { t.Fatal(g) diff --git a/pkg/oc/graph/appsgraph/edges.go b/pkg/oc/graph/appsgraph/edges.go index d80e11397b17..e48bcd0a8788 100644 --- a/pkg/oc/graph/appsgraph/edges.go +++ b/pkg/oc/graph/appsgraph/edges.go @@ -27,8 +27,8 @@ const ( VolumeClaimEdgeKind = "VolumeClaim" ) -// AddTriggerEdges creates edges that point to named Docker image repositories for each image used in the deployment. -func AddTriggerEdges(g osgraph.MutableUniqueGraph, node *appsgraph.DeploymentConfigNode) *appsgraph.DeploymentConfigNode { +// AddTriggerDeploymentConfigsEdges creates edges that point to named Docker image repositories for each image used in the deployment. +func AddTriggerDeploymentConfigsEdges(g osgraph.MutableUniqueGraph, node *appsgraph.DeploymentConfigNode) *appsgraph.DeploymentConfigNode { podTemplate := node.DeploymentConfig.Spec.Template if podTemplate == nil { return node @@ -60,15 +60,15 @@ func AddTriggerEdges(g osgraph.MutableUniqueGraph, node *appsgraph.DeploymentCon return node } -func AddAllTriggerEdges(g osgraph.MutableUniqueGraph) { +func AddAllTriggerDeploymentConfigsEdges(g osgraph.MutableUniqueGraph) { for _, node := range g.(graph.Graph).Nodes() { if dcNode, ok := node.(*appsgraph.DeploymentConfigNode); ok { - AddTriggerEdges(g, dcNode) + AddTriggerDeploymentConfigsEdges(g, dcNode) } } } -func AddDeploymentEdges(g osgraph.MutableUniqueGraph, node *appsgraph.DeploymentConfigNode) *appsgraph.DeploymentConfigNode { +func AddDeploymentConfigsDeploymentEdges(g osgraph.MutableUniqueGraph, node *appsgraph.DeploymentConfigNode) *appsgraph.DeploymentConfigNode { for _, n := range g.(graph.Graph).Nodes() { if rcNode, ok := n.(*kubegraph.ReplicationControllerNode); ok { if rcNode.ReplicationController.Namespace != node.DeploymentConfig.Namespace { @@ -84,10 +84,10 @@ func AddDeploymentEdges(g osgraph.MutableUniqueGraph, node *appsgraph.Deployment return node } -func AddAllDeploymentEdges(g osgraph.MutableUniqueGraph) { +func AddAllDeploymentConfigsDeploymentEdges(g osgraph.MutableUniqueGraph) { for _, node := range g.(graph.Graph).Nodes() { if dcNode, ok := node.(*appsgraph.DeploymentConfigNode); ok { - AddDeploymentEdges(g, dcNode) + AddDeploymentConfigsDeploymentEdges(g, dcNode) } } } diff --git a/pkg/oc/graph/appsgraph/nodes/nodes.go b/pkg/oc/graph/appsgraph/nodes/nodes.go index a03b4ab4cff3..25f399b7044a 100644 --- a/pkg/oc/graph/appsgraph/nodes/nodes.go +++ b/pkg/oc/graph/appsgraph/nodes/nodes.go @@ -3,67 +3,11 @@ package nodes import ( "github.com/gonum/graph" - kapisext "k8s.io/kubernetes/pkg/apis/extensions" - appsapi "github.com/openshift/origin/pkg/apps/apis/apps" osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph" kubegraph "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes" ) -// EnsureDaemonSetNode adds the provided daemon set to the graph if it does not exist -func EnsureDaemonSetNode(g osgraph.MutableUniqueGraph, ds *kapisext.DaemonSet) *DaemonSetNode { - dsName := DaemonSetNodeName(ds) - dsNode := osgraph.EnsureUnique( - g, - dsName, - func(node osgraph.Node) graph.Node { - return &DaemonSetNode{Node: node, DaemonSet: ds, IsFound: true} - }, - ).(*DaemonSetNode) - - podTemplateSpecNode := kubegraph.EnsurePodTemplateSpecNode(g, &ds.Spec.Template, ds.Namespace, dsName) - g.AddEdge(dsNode, podTemplateSpecNode, osgraph.ContainsEdgeKind) - - return dsNode -} - -func FindOrCreateSyntheticDaemonSetNode(g osgraph.MutableUniqueGraph, ds *kapisext.DaemonSet) *DaemonSetNode { - return osgraph.EnsureUnique( - g, - DaemonSetNodeName(ds), - func(node osgraph.Node) graph.Node { - return &DaemonSetNode{Node: node, DaemonSet: ds, IsFound: false} - }, - ).(*DaemonSetNode) -} - -// EnsureDeploymentNode adds the provided upstream deployment to the graph if it does not exist -func EnsureDeploymentNode(g osgraph.MutableUniqueGraph, deployment *kapisext.Deployment) *DeploymentNode { - deploymentName := DeploymentNodeName(deployment) - deploymentNode := osgraph.EnsureUnique( - g, - deploymentName, - func(node osgraph.Node) graph.Node { - return &DeploymentNode{Node: node, Deployment: deployment, IsFound: true} - }, - ).(*DeploymentNode) - - podTemplateSpecNode := kubegraph.EnsurePodTemplateSpecNode(g, &deployment.Spec.Template, deployment.Namespace, deploymentName) - g.AddEdge(deploymentNode, podTemplateSpecNode, osgraph.ContainsEdgeKind) - - return deploymentNode -} - -func FindOrCreateSyntheticDeploymentNode(g osgraph.MutableUniqueGraph, deployment *kapisext.Deployment) *DeploymentNode { - return osgraph.EnsureUnique( - g, - DeploymentNodeName(deployment), - func(node osgraph.Node) graph.Node { - return &DeploymentNode{Node: node, Deployment: deployment, IsFound: false} - }, - ).(*DeploymentNode) -} - // EnsureDeploymentConfigNode adds the provided deployment config to the graph if it does not exist func EnsureDeploymentConfigNode(g osgraph.MutableUniqueGraph, dc *appsapi.DeploymentConfig) *DeploymentConfigNode { dcName := DeploymentConfigNodeName(dc) @@ -92,30 +36,3 @@ func FindOrCreateSyntheticDeploymentConfigNode(g osgraph.MutableUniqueGraph, dc }, ).(*DeploymentConfigNode) } - -// EnsureReplicaSetNode adds the provided replica set to the graph if it does not exist -func EnsureReplicaSetNode(g osgraph.MutableUniqueGraph, rs *kapisext.ReplicaSet) *ReplicaSetNode { - rsName := ReplicaSetNodeName(rs) - rsNode := osgraph.EnsureUnique( - g, - rsName, - func(node osgraph.Node) graph.Node { - return &ReplicaSetNode{Node: node, ReplicaSet: rs, IsFound: true} - }, - ).(*ReplicaSetNode) - - podTemplateSpecNode := kubegraph.EnsurePodTemplateSpecNode(g, &rs.Spec.Template, rs.Namespace, rsName) - g.AddEdge(rsNode, podTemplateSpecNode, osgraph.ContainsEdgeKind) - - return rsNode -} - -func FindOrCreateSyntheticReplicaSetNode(g osgraph.MutableUniqueGraph, rs *kapisext.ReplicaSet) *ReplicaSetNode { - return osgraph.EnsureUnique( - g, - ReplicaSetNodeName(rs), - func(node osgraph.Node) graph.Node { - return &ReplicaSetNode{Node: node, ReplicaSet: rs, IsFound: false} - }, - ).(*ReplicaSetNode) -} diff --git a/pkg/oc/graph/appsgraph/nodes/types.go b/pkg/oc/graph/appsgraph/nodes/types.go index ae3cd28e9153..4a887cdb0e43 100644 --- a/pkg/oc/graph/appsgraph/nodes/types.go +++ b/pkg/oc/graph/appsgraph/nodes/types.go @@ -3,73 +3,14 @@ package nodes import ( "reflect" - kapisext "k8s.io/kubernetes/pkg/apis/extensions" - appsapi "github.com/openshift/origin/pkg/apps/apis/apps" osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph" ) var ( - DaemonSetNodeKind = reflect.TypeOf(kapisext.DaemonSet{}).Name() - DeploymentNodeKind = reflect.TypeOf(kapisext.Deployment{}).Name() DeploymentConfigNodeKind = reflect.TypeOf(appsapi.DeploymentConfig{}).Name() - ReplicaSetNodeKind = reflect.TypeOf(kapisext.ReplicaSet{}).Name() ) -func DaemonSetNodeName(o *kapisext.DaemonSet) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(DaemonSetNodeKind, o) -} - -type DaemonSetNode struct { - osgraph.Node - DaemonSet *kapisext.DaemonSet - - IsFound bool -} - -func (n DaemonSetNode) Found() bool { - return n.IsFound -} - -func (n DaemonSetNode) Object() interface{} { - return n.DaemonSet -} - -func (n DaemonSetNode) String() string { - return string(DaemonSetNodeName(n.DaemonSet)) -} - -func (*DaemonSetNode) Kind() string { - return DaemonSetNodeKind -} - -func DeploymentNodeName(o *kapisext.Deployment) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(DeploymentNodeKind, o) -} - -type DeploymentNode struct { - osgraph.Node - Deployment *kapisext.Deployment - - IsFound bool -} - -func (n DeploymentNode) Found() bool { - return n.IsFound -} - -func (n DeploymentNode) Object() interface{} { - return n.Deployment -} - -func (n DeploymentNode) String() string { - return string(DeploymentNodeName(n.Deployment)) -} - -func (*DeploymentNode) Kind() string { - return DeploymentNodeKind -} - func DeploymentConfigNodeName(o *appsapi.DeploymentConfig) osgraph.UniqueName { return osgraph.GetUniqueRuntimeObjectNodeName(DeploymentConfigNodeKind, o) } @@ -96,30 +37,3 @@ func (n DeploymentConfigNode) String() string { func (*DeploymentConfigNode) Kind() string { return DeploymentConfigNodeKind } - -func ReplicaSetNodeName(o *kapisext.ReplicaSet) osgraph.UniqueName { - return osgraph.GetUniqueRuntimeObjectNodeName(ReplicaSetNodeKind, o) -} - -type ReplicaSetNode struct { - osgraph.Node - ReplicaSet *kapisext.ReplicaSet - - IsFound bool -} - -func (n ReplicaSetNode) Found() bool { - return n.IsFound -} - -func (n ReplicaSetNode) Object() interface{} { - return n.ReplicaSet -} - -func (n ReplicaSetNode) String() string { - return string(ReplicaSetNodeName(n.ReplicaSet)) -} - -func (*ReplicaSetNode) Kind() string { - return ReplicaSetNodeKind -} diff --git a/pkg/oc/graph/genericgraph/graphview/dc_pipeline.go b/pkg/oc/graph/genericgraph/graphview/dc_pipeline.go index 3183d09c9fff..0930d2acb47a 100644 --- a/pkg/oc/graph/genericgraph/graphview/dc_pipeline.go +++ b/pkg/oc/graph/genericgraph/graphview/dc_pipeline.go @@ -10,7 +10,7 @@ import ( ) type DeploymentConfigPipeline struct { - Deployment *appsgraph.DeploymentConfigNode + DeploymentConfig *appsgraph.DeploymentConfigNode ActiveDeployment *kubegraph.ReplicationControllerNode InactiveDeployments []*kubegraph.ReplicationControllerNode @@ -43,7 +43,7 @@ func NewDeploymentConfigPipeline(g osgraph.Graph, dcNode *appsgraph.DeploymentCo covered.Insert(dcNode.ID()) dcPipeline := DeploymentConfigPipeline{} - dcPipeline.Deployment = dcNode + dcPipeline.DeploymentConfig = dcNode // for everything that can trigger a deployment, create an image pipeline and add it to the list for _, istNode := range g.PredecessorNodesByEdgeKind(dcNode, appsedges.TriggersDeploymentEdgeKind) { @@ -80,5 +80,5 @@ type SortedDeploymentConfigPipeline []DeploymentConfigPipeline func (m SortedDeploymentConfigPipeline) Len() int { return len(m) } func (m SortedDeploymentConfigPipeline) Swap(i, j int) { m[i], m[j] = m[j], m[i] } func (m SortedDeploymentConfigPipeline) Less(i, j int) bool { - return CompareObjectMeta(&m[i].Deployment.DeploymentConfig.ObjectMeta, &m[j].Deployment.DeploymentConfig.ObjectMeta) + return CompareObjectMeta(&m[i].DeploymentConfig.DeploymentConfig.ObjectMeta, &m[j].DeploymentConfig.DeploymentConfig.ObjectMeta) } diff --git a/pkg/oc/graph/genericgraph/graphview/deployments.go b/pkg/oc/graph/genericgraph/graphview/deployments.go new file mode 100644 index 000000000000..10c82ff9484e --- /dev/null +++ b/pkg/oc/graph/genericgraph/graphview/deployments.go @@ -0,0 +1,73 @@ +package graphview + +import ( + appsedges "github.com/openshift/origin/pkg/oc/graph/appsgraph" + osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph" + kubeedges "github.com/openshift/origin/pkg/oc/graph/kubegraph" + kubegraph "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes" +) + +type Deployment struct { + Deployment *kubegraph.DeploymentNode + + ActiveDeployment *kubegraph.ReplicaSetNode + InactiveDeployments []*kubegraph.ReplicaSetNode + + Images []ImagePipeline + + // TODO: handle conflicting once controller refs are present, not worth it yet +} + +// AllDeployments returns all the Deployments that aren't in the excludes set and the set of covered NodeIDs +func AllDeployments(g osgraph.Graph, excludeNodeIDs IntSet) ([]Deployment, IntSet) { + covered := IntSet{} + views := []Deployment{} + + for _, uncastNode := range g.NodesByKind(kubegraph.DeploymentNodeKind) { + if excludeNodeIDs.Has(uncastNode.ID()) { + continue + } + + view, covers := NewDeployment(g, uncastNode.(*kubegraph.DeploymentNode)) + covered.Insert(covers.List()...) + views = append(views, view) + } + + return views, covered +} + +// NewDeployment returns the Deployment and a set of all the NodeIDs covered by the Deployment +func NewDeployment(g osgraph.Graph, node *kubegraph.DeploymentNode) (Deployment, IntSet) { + covered := IntSet{} + covered.Insert(node.ID()) + + view := Deployment{} + view.Deployment = node + + for _, istNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.TriggersDeploymentEdgeKind) { + imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, istNode, istNode.(ImageTagLocation)) + covered.Insert(covers.List()...) + view.Images = append(view.Images, imagePipeline) + } + + // for image that we use, create an image pipeline and add it to the list + for _, tagNode := range g.PredecessorNodesByEdgeKind(node, appsedges.UsedInDeploymentEdgeKind) { + imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, tagNode, tagNode.(ImageTagLocation)) + + covered.Insert(covers.List()...) + view.Images = append(view.Images, imagePipeline) + } + + view.ActiveDeployment, view.InactiveDeployments = kubeedges.RelevantDeployments(g, view.Deployment) + for _, rs := range view.InactiveDeployments { + _, covers := NewReplicaSet(g, rs) + covered.Insert(covers.List()...) + } + + if view.ActiveDeployment != nil { + _, covers := NewReplicaSet(g, view.ActiveDeployment) + covered.Insert(covers.List()...) + } + + return view, covered +} diff --git a/pkg/oc/graph/genericgraph/graphview/petset.go b/pkg/oc/graph/genericgraph/graphview/petset.go index 92685f125f3d..3288e531c81f 100644 --- a/pkg/oc/graph/genericgraph/graphview/petset.go +++ b/pkg/oc/graph/genericgraph/graphview/petset.go @@ -1,6 +1,7 @@ package graphview import ( + appsedges "github.com/openshift/origin/pkg/oc/graph/appsgraph" osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph" kubeedges "github.com/openshift/origin/pkg/oc/graph/kubegraph" kubegraph "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes" @@ -12,6 +13,8 @@ type StatefulSet struct { OwnedPods []*kubegraph.PodNode CreatedPods []*kubegraph.PodNode + Images []ImagePipeline + // TODO: handle conflicting once controller refs are present, not worth it yet } @@ -47,5 +50,19 @@ func NewStatefulSet(g osgraph.Graph, node *kubegraph.StatefulSetNode) (StatefulS view.OwnedPods = append(view.OwnedPods, podNode) } + for _, istNode := range g.PredecessorNodesByEdgeKind(node, kubeedges.TriggersDeploymentEdgeKind) { + imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, istNode, istNode.(ImageTagLocation)) + covered.Insert(covers.List()...) + view.Images = append(view.Images, imagePipeline) + } + + // for image that we use, create an image pipeline and add it to the list + for _, tagNode := range g.PredecessorNodesByEdgeKind(node, appsedges.UsedInDeploymentEdgeKind) { + imagePipeline, covers := NewImagePipelineFromImageTagLocation(g, tagNode, tagNode.(ImageTagLocation)) + + covered.Insert(covers.List()...) + view.Images = append(view.Images, imagePipeline) + } + return view, covered } diff --git a/pkg/oc/graph/genericgraph/graphview/rs.go b/pkg/oc/graph/genericgraph/graphview/rs.go new file mode 100644 index 000000000000..10a5693754d3 --- /dev/null +++ b/pkg/oc/graph/genericgraph/graphview/rs.go @@ -0,0 +1,71 @@ +package graphview + +import ( + osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph" + kubeedges "github.com/openshift/origin/pkg/oc/graph/kubegraph" + "github.com/openshift/origin/pkg/oc/graph/kubegraph/analysis" + kubegraph "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type ReplicaSet struct { + RS *kubegraph.ReplicaSetNode + + OwnedPods []*kubegraph.PodNode + CreatedPods []*kubegraph.PodNode +} + +func AllReplicaSets(g osgraph.Graph, excludeNodeIDs IntSet) ([]ReplicaSet, IntSet) { + covered := IntSet{} + rsViews := []ReplicaSet{} + + for _, uncastNode := range g.NodesByKind(kubegraph.ReplicaSetNodeKind) { + if excludeNodeIDs.Has(uncastNode.ID()) { + continue + } + + rsView, covers := NewReplicaSet(g, uncastNode.(*kubegraph.ReplicaSetNode)) + covered.Insert(covers.List()...) + rsViews = append(rsViews, rsView) + } + + return rsViews, covered +} + +// MaxRecentContainerRestarts returns the maximum container restarts for all pods +func (rs *ReplicaSet) MaxRecentContainerRestarts() int32 { + var maxRestarts int32 + for _, pod := range rs.OwnedPods { + for _, status := range pod.Status.ContainerStatuses { + if status.RestartCount > maxRestarts && analysis.ContainerRestartedRecently(status, metav1.Now()) { + maxRestarts = status.RestartCount + } + } + } + return maxRestarts +} + +// NewReplicationController returns the ReplicationController and a set of all the NodeIDs covered by the ReplicationController +func NewReplicaSet(g osgraph.Graph, rsNode *kubegraph.ReplicaSetNode) (ReplicaSet, IntSet) { + covered := IntSet{} + covered.Insert(rsNode.ID()) + + rsView := ReplicaSet{} + rsView.RS = rsNode + + for _, uncastPodNode := range g.PredecessorNodesByEdgeKind(rsNode, kubeedges.ManagedByControllerEdgeKind) { + podNode := uncastPodNode.(*kubegraph.PodNode) + covered.Insert(podNode.ID()) + rsView.OwnedPods = append(rsView.OwnedPods, podNode) + } + + return rsView, covered +} + +func MaxRecentContainerRestartsForRS(g osgraph.Graph, rsNode *kubegraph.ReplicaSetNode) int32 { + if rsNode == nil { + return 0 + } + rs, _ := NewReplicaSet(g, rsNode) + return rs.MaxRecentContainerRestarts() +} diff --git a/pkg/oc/graph/genericgraph/graphview/service_group.go b/pkg/oc/graph/genericgraph/graphview/service_group.go index 09854c17b360..5803a8386a89 100644 --- a/pkg/oc/graph/genericgraph/graphview/service_group.go +++ b/pkg/oc/graph/genericgraph/graphview/service_group.go @@ -21,12 +21,16 @@ type ServiceGroup struct { DeploymentConfigPipelines []DeploymentConfigPipeline ReplicationControllers []ReplicationController + ReplicaSets []ReplicaSet + Deployments []Deployment StatefulSets []StatefulSet // TODO: this has to stop FulfillingStatefulSets []*kubegraph.StatefulSetNode + FulfillingDeployments []*kubegraph.DeploymentNode FulfillingDCs []*appsgraph.DeploymentConfigNode FulfillingRCs []*kubegraph.ReplicationControllerNode + FulfillingRSs []*kubegraph.ReplicaSetNode FulfillingPods []*kubegraph.PodNode ExposingRoutes []*routegraph.RouteNode @@ -67,12 +71,16 @@ func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (Servi service.FulfillingDCs = append(service.FulfillingDCs, castContainer) case *kubegraph.ReplicationControllerNode: service.FulfillingRCs = append(service.FulfillingRCs, castContainer) + case *kubegraph.ReplicaSetNode: + service.FulfillingRSs = append(service.FulfillingRSs, castContainer) case *kubegraph.PodNode: service.FulfillingPods = append(service.FulfillingPods, castContainer) case *kubegraph.StatefulSetNode: service.FulfillingStatefulSets = append(service.FulfillingStatefulSets, castContainer) + case *kubegraph.DeploymentNode: + service.FulfillingDeployments = append(service.FulfillingDeployments, castContainer) default: - utilruntime.HandleError(fmt.Errorf("unrecognized container: %v", castContainer)) + utilruntime.HandleError(fmt.Errorf("unrecognized container: %v (%T)", castContainer, castContainer)) } } @@ -102,6 +110,13 @@ func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (Servi service.ReplicationControllers = append(service.ReplicationControllers, rcView) } + for _, fulfillingRS := range service.FulfillingRSs { + rsView, rsCovers := NewReplicaSet(g, fulfillingRS) + + covered.Insert(rsCovers.List()...) + service.ReplicaSets = append(service.ReplicaSets, rsView) + } + for _, fulfillingStatefulSet := range service.FulfillingStatefulSets { view, covers := NewStatefulSet(g, fulfillingStatefulSet) @@ -109,6 +124,13 @@ func NewServiceGroup(g osgraph.Graph, serviceNode *kubegraph.ServiceNode) (Servi service.StatefulSets = append(service.StatefulSets, view) } + for _, fulfillingDeployment := range service.FulfillingDeployments { + view, covers := NewDeployment(g, fulfillingDeployment) + + covered.Insert(covers.List()...) + service.Deployments = append(service.Deployments, view) + } + for _, fulfillingPod := range service.FulfillingPods { _, podCovers := NewPod(g, fulfillingPod) covered.Insert(podCovers.List()...) diff --git a/pkg/oc/graph/genericgraph/graphview/veneering_test.go b/pkg/oc/graph/genericgraph/graphview/veneering_test.go index c17c80429ca0..850e8df999fc 100644 --- a/pkg/oc/graph/genericgraph/graphview/veneering_test.go +++ b/pkg/oc/graph/genericgraph/graphview/veneering_test.go @@ -31,7 +31,7 @@ func TestServiceGroup(t *testing.T) { kubeedges.AddAllExposedPodTemplateSpecEdges(g) buildedges.AddAllInputOutputEdges(g) - appsedges.AddAllTriggerEdges(g) + appsedges.AddAllTriggerDeploymentConfigsEdges(g) coveredNodes := IntSet{} @@ -104,7 +104,7 @@ func TestBareDCGroup(t *testing.T) { kubeedges.AddAllExposedPodTemplateSpecEdges(g) buildedges.AddAllInputOutputEdges(g) - appsedges.AddAllTriggerEdges(g) + appsedges.AddAllTriggerDeploymentConfigsEdges(g) coveredNodes := IntSet{} @@ -160,7 +160,7 @@ func TestBareBCGroup(t *testing.T) { kubeedges.AddAllExposedPodTemplateSpecEdges(g) buildedges.AddAllInputOutputEdges(g) - appsedges.AddAllTriggerEdges(g) + appsedges.AddAllTriggerDeploymentConfigsEdges(g) coveredNodes := IntSet{} @@ -348,8 +348,8 @@ func TestGraph(t *testing.T) { kubeedges.AddAllExposedPodTemplateSpecEdges(g) buildedges.AddAllInputOutputEdges(g) buildedges.AddAllBuildEdges(g) - appsedges.AddAllTriggerEdges(g) - appsedges.AddAllDeploymentEdges(g) + appsedges.AddAllTriggerDeploymentConfigsEdges(g) + appsedges.AddAllDeploymentConfigsDeploymentEdges(g) t.Log(g) @@ -399,7 +399,7 @@ func TestGraph(t *testing.T) { } for _, bareDCPipeline := range bareDCPipelines { - t.Logf("from %s", bareDCPipeline.Deployment.DeploymentConfig.Name) + t.Logf("from %s", bareDCPipeline.DeploymentConfig.DeploymentConfig.Name) for _, path := range bareDCPipeline.Images { t.Logf(" %v", path) } @@ -413,7 +413,7 @@ func TestGraph(t *testing.T) { indent := " " for _, deployment := range serviceGroup.DeploymentConfigPipelines { - t.Logf("%sdeployment %s", indent, deployment.Deployment.DeploymentConfig.Name) + t.Logf("%sdeployment %s", indent, deployment.DeploymentConfig.DeploymentConfig.Name) for _, image := range deployment.Images { t.Logf("%s image %s", indent, image.Image.ImageSpec()) if image.Build != nil { diff --git a/pkg/oc/graph/genericgraph/test/deployment.yaml b/pkg/oc/graph/genericgraph/test/deployment.yaml new file mode 100644 index 000000000000..dad69886e16a --- /dev/null +++ b/pkg/oc/graph/genericgraph/test/deployment.yaml @@ -0,0 +1,138 @@ +apiVersion: v1 +items: +- apiVersion: v1 + kind: Service + metadata: + creationTimestamp: null + labels: + app: ruby-deploy + name: ruby-deploy + spec: + ports: + - port: 8080 + protocol: TCP + targetPort: 8080 + selector: + deploymentconfig: ruby-deploy + sessionAffinity: None + type: ClusterIP + status: + loadBalancer: {} +- apiVersion: extensions/v1beta1 + kind: Deployment + metadata: + annotations: + deployment.kubernetes.io/revision: "1" + image.openshift.io/triggers: '[{"from":{"kind":"ImageStreamTag","name":"ruby-deploy:latest"},"fieldPath":"spec.template.spec.containers[?(@.name==\"ruby-deploy\")].image"}]' + openshift.io/generated-by: OpenShiftNewApp + creationTimestamp: null + generation: 1 + labels: + app: ruby-deploy + name: ruby-deploy + spec: + progressDeadlineSeconds: 600 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + deploymentconfig: ruby-deploy + strategy: + rollingUpdate: + maxSurge: 25% + maxUnavailable: 25% + type: RollingUpdate + template: + metadata: + creationTimestamp: null + labels: + deploymentconfig: ruby-deploy + spec: + containers: + - image: busybox@sha256:4cee1979ba0bf7db9fc5d28fb7b798ca69ae95a47c5fecf46327720df4ff352d + imagePullPolicy: IfNotPresent + name: ruby-deploy + ports: + - containerPort: 8080 + protocol: TCP + resources: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + dnsPolicy: ClusterFirst + restartPolicy: Always + schedulerName: default-scheduler + securityContext: {} + terminationGracePeriodSeconds: 30 + status: + conditions: + - lastTransitionTime: 2018-02-06T09:11:19Z + lastUpdateTime: 2018-02-06T09:11:19Z + message: Deployment does not have minimum availability. + reason: MinimumReplicasUnavailable + status: "False" + type: Available + - lastTransitionTime: 2018-02-06T09:11:19Z + lastUpdateTime: 2018-02-06T09:11:20Z + message: ReplicaSet "ruby-deploy-7556484b56" is progressing. + reason: ReplicaSetUpdated + status: "True" + type: Progressing + observedGeneration: 1 + replicas: 1 + unavailableReplicas: 1 + updatedReplicas: 1 +- apiVersion: v1 + kind: BuildConfig + metadata: + annotations: + openshift.io/generated-by: OpenShiftNewApp + creationTimestamp: null + labels: + app: ruby-deploy + name: ruby-deploy + spec: + failedBuildsHistoryLimit: 5 + nodeSelector: null + output: + to: + kind: ImageStreamTag + name: ruby-deploy:latest + postCommit: {} + resources: {} + runPolicy: Serial + source: + git: + uri: https://github.com/openshift/ruby-ex.git + type: Git + strategy: + sourceStrategy: + from: + kind: ImageStreamTag + name: ruby-22-centos7:latest + type: Source + successfulBuildsHistoryLimit: 5 + triggers: + - github: + secret: GXQEpU0tBI75UZ5OvNg- + type: GitHub + - generic: + secret: zUdEUH2eIbDIU34q1Qzk + type: Generic + - type: ConfigChange + - imageChange: {} + type: ImageChange + status: + lastVersion: 0 +- apiVersion: v1 + kind: ImageStream + metadata: + creationTimestamp: null + generation: 1 + name: ruby-deploy + spec: + lookupPolicy: + local: false + status: + dockerImageRepository: "" +kind: List +metadata: {} diff --git a/pkg/oc/graph/kubegraph/analysis/hpa_test.go b/pkg/oc/graph/kubegraph/analysis/hpa_test.go index 89345db5ced9..b194bbfb5b39 100644 --- a/pkg/oc/graph/kubegraph/analysis/hpa_test.go +++ b/pkg/oc/graph/kubegraph/analysis/hpa_test.go @@ -62,7 +62,7 @@ func TestOverlappingHPAsWarning(t *testing.T) { } kubegraph.AddHPAScaleRefEdges(g) - appsgraph.AddAllDeploymentEdges(g) + appsgraph.AddAllDeploymentConfigsDeploymentEdges(g) markers := FindOverlappingHPAs(g, osgraph.DefaultNamer) if len(markers) != 8 { @@ -87,7 +87,7 @@ func TestOverlappingLegacyHPAsWarning(t *testing.T) { } kubegraph.AddHPAScaleRefEdges(g) - appsgraph.AddAllDeploymentEdges(g) + appsgraph.AddAllDeploymentConfigsDeploymentEdges(g) markers := FindOverlappingHPAs(g, osgraph.DefaultNamer) if len(markers) != 8 { diff --git a/pkg/oc/graph/kubegraph/edges.go b/pkg/oc/graph/kubegraph/edges.go index a5ba81f82bc6..229c2abca7d8 100644 --- a/pkg/oc/graph/kubegraph/edges.go +++ b/pkg/oc/graph/kubegraph/edges.go @@ -1,6 +1,7 @@ package kubegraph import ( + "encoding/json" "strings" "github.com/gonum/graph" @@ -12,11 +13,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" kapi "k8s.io/kubernetes/pkg/apis/core" - kapisext "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/apis/extensions" appsapi "github.com/openshift/origin/pkg/apps/apis/apps" + imageapi "github.com/openshift/origin/pkg/image/apis/image" + triggerapi "github.com/openshift/origin/pkg/image/apis/image/v1/trigger" + "github.com/openshift/origin/pkg/image/trigger/annotations" appsgraph "github.com/openshift/origin/pkg/oc/graph/appsgraph/nodes" osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph" + imagegraph "github.com/openshift/origin/pkg/oc/graph/imagegraph/nodes" kubegraph "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes" ) @@ -33,6 +38,12 @@ const ( ReferencedServiceAccountEdgeKind = "ReferencedServiceAccount" // ScalingEdgeKind goes from HorizontalPodAutoscaler to scaled objects indicating that the HPA scales the object ScalingEdgeKind = "Scaling" + // TriggersDeploymentEdgeKind points from DeploymentConfigs to ImageStreamTags that trigger the deployment + TriggersDeploymentEdgeKind = "TriggersDeployment" + // UsedInDeploymentEdgeKind points from DeploymentConfigs to DockerImageReferences that are used in the deployment + UsedInDeploymentEdgeKind = "UsedInDeployment" + // DeploymentEdgeKind points from Deployment to the ReplicaSet that are fulfilling the deployment + DeploymentEdgeKind = "Deployment" ) // AddExposedPodTemplateSpecEdges ensures that a directed edge exists between a service and all the PodTemplateSpecs @@ -121,6 +132,8 @@ func AddAllManagedByControllerPodEdges(g osgraph.MutableUniqueGraph) { switch cast := node.(type) { case *kubegraph.ReplicationControllerNode: AddManagedByControllerPodEdges(g, cast, cast.ReplicationController.Namespace, cast.ReplicationController.Spec.Selector) + case *kubegraph.ReplicaSetNode: + AddManagedByControllerPodEdges(g, cast, cast.ReplicaSet.Namespace, cast.ReplicaSet.Spec.Selector.MatchLabels) case *kubegraph.StatefulSetNode: // TODO: refactor to handle expanded selectors (along with ReplicaSets and Deployments) AddManagedByControllerPodEdges(g, cast, cast.StatefulSet.Namespace, cast.StatefulSet.Spec.Selector.MatchLabels) @@ -243,8 +256,8 @@ func AddHPAScaleRefEdges(g osgraph.Graph) { syntheticNode = kubegraph.FindOrCreateSyntheticReplicationControllerNode(g, &kapi.ReplicationController{ObjectMeta: syntheticMeta}) case appsapi.IsResourceOrLegacy("deploymentconfigs", r): syntheticNode = appsgraph.FindOrCreateSyntheticDeploymentConfigNode(g, &appsapi.DeploymentConfig{ObjectMeta: syntheticMeta}) - case r == kapisext.Resource("deployments"): - syntheticNode = appsgraph.FindOrCreateSyntheticDeploymentNode(g, &kapisext.Deployment{ObjectMeta: syntheticMeta}) + case r == extensions.Resource("deployments"): + syntheticNode = kubegraph.FindOrCreateSyntheticDeploymentNode(g, &extensions.Deployment{ObjectMeta: syntheticMeta}) default: continue } @@ -252,3 +265,139 @@ func AddHPAScaleRefEdges(g osgraph.Graph) { g.AddEdge(hpaNode, syntheticNode, ScalingEdgeKind) } } + +func addTriggerEdges(obj runtime.Object, podTemplate kapi.PodTemplateSpec, addEdgeFn func(image appsapi.TemplateImage, err error)) { + m, err := meta.Accessor(obj) + if err != nil { + return + } + triggerAnnotation, ok := m.GetAnnotations()[triggerapi.TriggerAnnotationKey] + if !ok { + return + } + triggers := []triggerapi.ObjectFieldTrigger{} + if err := json.Unmarshal([]byte(triggerAnnotation), &triggers); err != nil { + return + } + triggerFn := func(container *kapi.Container) (appsapi.TemplateImage, bool) { + from := kapi.ObjectReference{} + for _, trigger := range triggers { + c, remainder, err := annotations.ContainerForObjectFieldPath(obj, trigger.FieldPath) + if err != nil || remainder != "image" { + continue + } + from.Namespace = trigger.From.Namespace + if len(from.Namespace) == 0 { + from.Namespace = m.GetNamespace() + } + from.Name = trigger.From.Name + from.Kind = trigger.From.Kind + if len(from.Kind) == 0 { + from.Kind = "ImageStreamTag" + } + return appsapi.TemplateImage{ + Image: c.GetImage(), + From: &from, + }, true + } + return appsapi.TemplateImage{}, false + } + appsapi.EachTemplateImage(&podTemplate.Spec, triggerFn, addEdgeFn) +} + +func AddTriggerStatefulSetsEdges(g osgraph.MutableUniqueGraph, node *kubegraph.StatefulSetNode) *kubegraph.StatefulSetNode { + addTriggerEdges(node.StatefulSet, node.StatefulSet.Spec.Template, func(image appsapi.TemplateImage, err error) { + if err != nil { + return + } + if image.From != nil { + if len(image.From.Name) == 0 { + return + } + name, tag, _ := imageapi.SplitImageStreamTag(image.From.Name) + in := imagegraph.FindOrCreateSyntheticImageStreamTagNode(g, imagegraph.MakeImageStreamTagObjectMeta(image.From.Namespace, name, tag)) + g.AddEdge(in, node, TriggersDeploymentEdgeKind) + return + } + + tag := image.Ref.Tag + image.Ref.Tag = "" + in := imagegraph.EnsureDockerRepositoryNode(g, image.Ref.String(), tag) + g.AddEdge(in, node, UsedInDeploymentEdgeKind) + }) + return node +} + +func AddAllTriggerStatefulSetsEdges(g osgraph.MutableUniqueGraph) { + for _, node := range g.(graph.Graph).Nodes() { + if sNode, ok := node.(*kubegraph.StatefulSetNode); ok { + AddTriggerStatefulSetsEdges(g, sNode) + } + } +} + +func AddTriggerDeploymentsEdges(g osgraph.MutableUniqueGraph, node *kubegraph.DeploymentNode) *kubegraph.DeploymentNode { + addTriggerEdges(node.Deployment, node.Deployment.Spec.Template, func(image appsapi.TemplateImage, err error) { + if err != nil { + return + } + if image.From != nil { + if len(image.From.Name) == 0 { + return + } + name, tag, _ := imageapi.SplitImageStreamTag(image.From.Name) + in := imagegraph.FindOrCreateSyntheticImageStreamTagNode(g, imagegraph.MakeImageStreamTagObjectMeta(image.From.Namespace, name, tag)) + g.AddEdge(in, node, TriggersDeploymentEdgeKind) + return + } + tag := image.Ref.Tag + image.Ref.Tag = "" + in := imagegraph.EnsureDockerRepositoryNode(g, image.Ref.String(), tag) + g.AddEdge(in, node, UsedInDeploymentEdgeKind) + }) + return node +} + +func AddAllTriggerDeploymentsEdges(g osgraph.MutableUniqueGraph) { + for _, node := range g.(graph.Graph).Nodes() { + if dNode, ok := node.(*kubegraph.DeploymentNode); ok { + AddTriggerDeploymentsEdges(g, dNode) + } + } +} + +func AddDeploymentEdges(g osgraph.MutableUniqueGraph, node *kubegraph.DeploymentNode) *kubegraph.DeploymentNode { + for _, n := range g.(graph.Graph).Nodes() { + if rsNode, ok := n.(*kubegraph.ReplicaSetNode); ok { + if rsNode.ReplicaSet.Namespace != node.Deployment.Namespace { + continue + } + if BelongsToDeployment(node.Deployment, rsNode.ReplicaSet) { + g.AddEdge(node, rsNode, DeploymentEdgeKind) + g.AddEdge(rsNode, node, ManagedByControllerEdgeKind) + } + } + } + + return node +} + +func BelongsToDeployment(config *extensions.Deployment, b *extensions.ReplicaSet) bool { + if b.OwnerReferences == nil { + return false + } + for _, ref := range b.OwnerReferences { + if ref.Kind == "Deployment" && ref.Name == config.Name { + return true + } + } + return false +} + +func AddAllDeploymentEdges(g osgraph.MutableUniqueGraph) { + for _, node := range g.(graph.Graph).Nodes() { + if dNode, ok := node.(*kubegraph.DeploymentNode); ok { + AddDeploymentEdges(g, dNode) + } + } +} diff --git a/pkg/oc/graph/kubegraph/helpers.go b/pkg/oc/graph/kubegraph/helpers.go new file mode 100644 index 000000000000..862c8f857ee5 --- /dev/null +++ b/pkg/oc/graph/kubegraph/helpers.go @@ -0,0 +1,44 @@ +package kubegraph + +import ( + "sort" + + "k8s.io/kubernetes/pkg/controller/deployment/util" + + osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph" + kubegraph "github.com/openshift/origin/pkg/oc/graph/kubegraph/nodes" +) + +// RelevantDeployments returns the active deployment and a list of inactive deployments (in order from newest to oldest) +func RelevantDeployments(g osgraph.Graph, dNode *kubegraph.DeploymentNode) (*kubegraph.ReplicaSetNode, []*kubegraph.ReplicaSetNode) { + allDeployments := []*kubegraph.ReplicaSetNode{} + uncastDeployments := g.SuccessorNodesByEdgeKind(dNode, DeploymentEdgeKind) + if len(uncastDeployments) == 0 { + return nil, []*kubegraph.ReplicaSetNode{} + } + + for i := range uncastDeployments { + allDeployments = append(allDeployments, uncastDeployments[i].(*kubegraph.ReplicaSetNode)) + } + + sort.Sort(RecentDeploymentReferences(allDeployments)) + + deploymentRevision, _ := util.Revision(dNode.Deployment) + firstRSRevision, _ := util.Revision(allDeployments[0].ReplicaSet) + + if deploymentRevision == firstRSRevision { + return allDeployments[0], allDeployments[1:] + } + + return nil, allDeployments +} + +type RecentDeploymentReferences []*kubegraph.ReplicaSetNode + +func (m RecentDeploymentReferences) Len() int { return len(m) } +func (m RecentDeploymentReferences) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m RecentDeploymentReferences) Less(i, j int) bool { + firstRev, _ := util.Revision(m[i].ReplicaSet) + secondRev, _ := util.Revision(m[j].ReplicaSet) + return firstRev > secondRev +} diff --git a/pkg/oc/graph/kubegraph/nodes/nodes.go b/pkg/oc/graph/kubegraph/nodes/nodes.go index ad0aa19e43df..73f1ab9b45cf 100644 --- a/pkg/oc/graph/kubegraph/nodes/nodes.go +++ b/pkg/oc/graph/kubegraph/nodes/nodes.go @@ -2,6 +2,7 @@ package nodes import ( "github.com/gonum/graph" + "k8s.io/kubernetes/pkg/apis/extensions" kapps "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" @@ -114,6 +115,37 @@ func EnsureReplicationControllerNode(g osgraph.MutableUniqueGraph, rc *kapi.Repl return rcNode } +// EnsureReplicaSetNode adds a graph node for the ReplicaSet if it does not already exist. +func EnsureReplicaSetNode(g osgraph.MutableUniqueGraph, rs *extensions.ReplicaSet) *ReplicaSetNode { + rcNodeName := ReplicaSetNodeName(rs) + rcNode := osgraph.EnsureUnique(g, + rcNodeName, + func(node osgraph.Node) graph.Node { + return &ReplicaSetNode{node, rs, true} + }, + ).(*ReplicaSetNode) + + rcSpecNode := EnsureReplicaSetSpecNode(g, &rs.Spec, rs.Namespace, rcNodeName) + g.AddEdge(rcNode, rcSpecNode, osgraph.ContainsEdgeKind) + + return rcNode +} + +func EnsureReplicaSetSpecNode(g osgraph.MutableUniqueGraph, rsSpec *extensions.ReplicaSetSpec, namespace string, ownerName osgraph.UniqueName) *ReplicaSetSpecNode { + rcSpecName := ReplicaSetSpecNodeName(rsSpec, ownerName) + rcSpecNode := osgraph.EnsureUnique(g, + rcSpecName, + func(node osgraph.Node) graph.Node { + return &ReplicaSetSpecNode{node, rsSpec, namespace, ownerName} + }, + ).(*ReplicaSetSpecNode) + + ptSpecNode := EnsurePodTemplateSpecNode(g, &rsSpec.Template, namespace, rcSpecName) + g.AddEdge(rcSpecNode, ptSpecNode, osgraph.ContainsEdgeKind) + + return rcSpecNode +} + func FindOrCreateSyntheticReplicationControllerNode(g osgraph.MutableUniqueGraph, rc *kapi.ReplicationController) *ReplicationControllerNode { return osgraph.EnsureUnique(g, ReplicationControllerNodeName(rc), @@ -123,6 +155,16 @@ func FindOrCreateSyntheticReplicationControllerNode(g osgraph.MutableUniqueGraph ).(*ReplicationControllerNode) } +func FindOrCreateSyntheticDeploymentNode(g osgraph.MutableUniqueGraph, deployment *extensions.Deployment) *DeploymentNode { + return osgraph.EnsureUnique( + g, + DeploymentNodeName(deployment), + func(node osgraph.Node) graph.Node { + return &DeploymentNode{Node: node, Deployment: deployment, IsFound: false} + }, + ).(*DeploymentNode) +} + func EnsureReplicationControllerSpecNode(g osgraph.MutableUniqueGraph, rcSpec *kapi.ReplicationControllerSpec, namespace string, ownerName osgraph.UniqueName) *ReplicationControllerSpecNode { rcSpecName := ReplicationControllerSpecNodeName(rcSpec, ownerName) rcSpecNode := osgraph.EnsureUnique(g, @@ -211,3 +253,70 @@ func EnsureStatefulSetSpecNode(g osgraph.MutableUniqueGraph, spec *kapps.Statefu return specNode } + +func EnsureDeploymentNode(g osgraph.MutableUniqueGraph, deployment *extensions.Deployment) *DeploymentNode { + nodeName := DeploymentNodeName(deployment) + node := osgraph.EnsureUnique(g, + nodeName, + func(node osgraph.Node) graph.Node { + return &DeploymentNode{Node: node, Deployment: deployment} + }, + ).(*DeploymentNode) + + specNode := EnsureDeploymentSpecNode(g, &deployment.Spec, deployment.Namespace, nodeName) + g.AddEdge(node, specNode, osgraph.ContainsEdgeKind) + + return node +} + +func EnsureDeploymentSpecNode(g osgraph.MutableUniqueGraph, spec *extensions.DeploymentSpec, namespace string, ownerName osgraph.UniqueName) *DeploymentSpecNode { + specName := DeploymentSpecNodeName(spec, ownerName) + specNode := osgraph.EnsureUnique(g, + specName, + func(node osgraph.Node) graph.Node { + return &DeploymentSpecNode{node, spec, namespace, ownerName} + }, + ).(*DeploymentSpecNode) + + ptSpecNode := EnsurePodTemplateSpecNode(g, &spec.Template, namespace, specName) + g.AddEdge(specNode, ptSpecNode, osgraph.ContainsEdgeKind) + + return specNode +} + +// EnsureDaemonSetNode adds the provided daemon set to the graph if it does not exist +func EnsureDaemonSetNode(g osgraph.MutableUniqueGraph, ds *extensions.DaemonSet) *DaemonSetNode { + dsName := DaemonSetNodeName(ds) + dsNode := osgraph.EnsureUnique( + g, + dsName, + func(node osgraph.Node) graph.Node { + return &DaemonSetNode{Node: node, DaemonSet: ds, IsFound: true} + }, + ).(*DaemonSetNode) + + podTemplateSpecNode := EnsurePodTemplateSpecNode(g, &ds.Spec.Template, ds.Namespace, dsName) + g.AddEdge(dsNode, podTemplateSpecNode, osgraph.ContainsEdgeKind) + + return dsNode +} + +func FindOrCreateSyntheticDaemonSetNode(g osgraph.MutableUniqueGraph, ds *extensions.DaemonSet) *DaemonSetNode { + return osgraph.EnsureUnique( + g, + DaemonSetNodeName(ds), + func(node osgraph.Node) graph.Node { + return &DaemonSetNode{Node: node, DaemonSet: ds, IsFound: false} + }, + ).(*DaemonSetNode) +} + +func FindOrCreateSyntheticReplicaSetNode(g osgraph.MutableUniqueGraph, rs *extensions.ReplicaSet) *ReplicaSetNode { + return osgraph.EnsureUnique( + g, + ReplicaSetNodeName(rs), + func(node osgraph.Node) graph.Node { + return &ReplicaSetNode{Node: node, ReplicaSet: rs, IsFound: false} + }, + ).(*ReplicaSetNode) +} diff --git a/pkg/oc/graph/kubegraph/nodes/types.go b/pkg/oc/graph/kubegraph/nodes/types.go index f87239193346..a5c843508732 100644 --- a/pkg/oc/graph/kubegraph/nodes/types.go +++ b/pkg/oc/graph/kubegraph/nodes/types.go @@ -7,6 +7,7 @@ import ( kapps "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/autoscaling" kapi "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/extensions" osgraph "github.com/openshift/origin/pkg/oc/graph/genericgraph" ) @@ -24,6 +25,11 @@ var ( HorizontalPodAutoscalerNodeKind = reflect.TypeOf(autoscaling.HorizontalPodAutoscaler{}).Name() StatefulSetNodeKind = reflect.TypeOf(kapps.StatefulSet{}).Name() StatefulSetSpecNodeKind = reflect.TypeOf(kapps.StatefulSetSpec{}).Name() + DeploymentNodeKind = reflect.TypeOf(extensions.Deployment{}).Name() + DeploymentSpecNodeKind = reflect.TypeOf(extensions.DeploymentSpec{}).Name() + ReplicaSetNodeKind = reflect.TypeOf(extensions.ReplicaSet{}).Name() + ReplicaSetSpecNodeKind = reflect.TypeOf(extensions.ReplicaSetSpec{}).Name() + DaemonSetNodeKind = reflect.TypeOf(extensions.DaemonSet{}).Name() ) func ServiceNodeName(o *kapi.Service) osgraph.UniqueName { @@ -106,6 +112,65 @@ func (*PodSpecNode) Kind() string { return PodSpecNodeKind } +func ReplicaSetNodeName(o *extensions.ReplicaSet) osgraph.UniqueName { + return osgraph.GetUniqueRuntimeObjectNodeName(ReplicaSetNodeKind, o) +} + +type ReplicaSetNode struct { + osgraph.Node + ReplicaSet *extensions.ReplicaSet + + IsFound bool +} + +func (n ReplicaSetNode) Found() bool { + return n.IsFound +} + +func (n ReplicaSetNode) Object() interface{} { + return n.ReplicaSet +} + +func (n ReplicaSetNode) String() string { + return string(ReplicaSetNodeName(n.ReplicaSet)) +} + +func (n ReplicaSetNode) UniqueName() osgraph.UniqueName { + return ReplicaSetNodeName(n.ReplicaSet) +} + +func (*ReplicaSetNode) Kind() string { + return ReplicaSetNodeKind +} + +func ReplicaSetSpecNodeName(o *extensions.ReplicaSetSpec, ownerName osgraph.UniqueName) osgraph.UniqueName { + return osgraph.UniqueName(fmt.Sprintf("%s|%v", ReplicaSetSpecNodeKind, ownerName)) +} + +type ReplicaSetSpecNode struct { + osgraph.Node + ReplicaSetSpec *extensions.ReplicaSetSpec + Namespace string + + OwnerName osgraph.UniqueName +} + +func (n ReplicaSetSpecNode) Object() interface{} { + return n.ReplicaSetSpec +} + +func (n ReplicaSetSpecNode) String() string { + return string(n.UniqueName()) +} + +func (n ReplicaSetSpecNode) UniqueName() osgraph.UniqueName { + return ReplicaSetSpecNodeName(n.ReplicaSetSpec, n.OwnerName) +} + +func (*ReplicaSetSpecNode) Kind() string { + return ReplicaSetSpecNodeKind +} + func ReplicationControllerNodeName(o *kapi.ReplicationController) osgraph.UniqueName { return osgraph.GetUniqueRuntimeObjectNodeName(ReplicationControllerNodeKind, o) } @@ -303,6 +368,61 @@ func (n HorizontalPodAutoscalerNode) UniqueName() osgraph.UniqueName { return HorizontalPodAutoscalerNodeName(n.HorizontalPodAutoscaler) } +func DeploymentNodeName(o *extensions.Deployment) osgraph.UniqueName { + return osgraph.GetUniqueRuntimeObjectNodeName(DeploymentNodeKind, o) +} + +type DeploymentNode struct { + osgraph.Node + Deployment *extensions.Deployment + + IsFound bool +} + +func (n DeploymentNode) Object() interface{} { + return n.Deployment +} + +func (n DeploymentNode) String() string { + return string(n.UniqueName()) +} + +func (n DeploymentNode) UniqueName() osgraph.UniqueName { + return DeploymentNodeName(n.Deployment) +} + +func (*DeploymentNode) Kind() string { + return DeploymentNodeKind +} + +func DeploymentSpecNodeName(o *extensions.DeploymentSpec, ownerName osgraph.UniqueName) osgraph.UniqueName { + return osgraph.UniqueName(fmt.Sprintf("%s|%v", DeploymentSpecNodeKind, ownerName)) +} + +type DeploymentSpecNode struct { + osgraph.Node + DeploymentSpec *extensions.DeploymentSpec + Namespace string + + OwnerName osgraph.UniqueName +} + +func (n DeploymentSpecNode) Object() interface{} { + return n.DeploymentSpec +} + +func (n DeploymentSpecNode) String() string { + return string(n.UniqueName()) +} + +func (n DeploymentSpecNode) UniqueName() osgraph.UniqueName { + return DeploymentSpecNodeName(n.DeploymentSpec, n.OwnerName) +} + +func (*DeploymentSpecNode) Kind() string { + return DeploymentSpecNodeKind +} + func StatefulSetNodeName(o *kapps.StatefulSet) osgraph.UniqueName { return osgraph.GetUniqueRuntimeObjectNodeName(StatefulSetNodeKind, o) } @@ -355,3 +475,30 @@ func (n StatefulSetSpecNode) UniqueName() osgraph.UniqueName { func (*StatefulSetSpecNode) Kind() string { return StatefulSetSpecNodeKind } + +func DaemonSetNodeName(o *extensions.DaemonSet) osgraph.UniqueName { + return osgraph.GetUniqueRuntimeObjectNodeName(DaemonSetNodeKind, o) +} + +type DaemonSetNode struct { + osgraph.Node + DaemonSet *extensions.DaemonSet + + IsFound bool +} + +func (n DaemonSetNode) Found() bool { + return n.IsFound +} + +func (n DaemonSetNode) Object() interface{} { + return n.DaemonSet +} + +func (n DaemonSetNode) String() string { + return string(DaemonSetNodeName(n.DaemonSet)) +} + +func (*DaemonSetNode) Kind() string { + return DaemonSetNodeKind +}