Skip to content

Commit

Permalink
MGDSTRM-8638 removing legacy capacity reporting fields (#819)
Browse files Browse the repository at this point in the history
  • Loading branch information
shawkins authored Sep 29, 2022
1 parent 335501a commit d3bbde6
Show file tree
Hide file tree
Showing 7 changed files with 3 additions and 107 deletions.

This file was deleted.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,6 @@ public class ManagedKafkaAgentStatus {

private List<ManagedKafkaCondition> conditions;

private ClusterCapacity total;

private ClusterCapacity remaining;

private NodeCounts nodeInfo;

private ClusterResizeInfo resizeInfo;

private String updatedTimestamp;

private List<StrimziVersionStatus> strimzi;
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
package org.bf2.operator.controllers;

import io.fabric8.kubernetes.api.model.Quantity;
import io.javaoperatorsdk.operator.api.reconciler.Constants;
import io.javaoperatorsdk.operator.api.reconciler.Context;
import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
Expand All @@ -17,18 +16,12 @@
import org.bf2.operator.managers.InformerManager;
import org.bf2.operator.managers.ObservabilityManager;
import org.bf2.operator.managers.StrimziManager;
import org.bf2.operator.resources.v1alpha1.ClusterCapacity;
import org.bf2.operator.resources.v1alpha1.ClusterCapacityBuilder;
import org.bf2.operator.resources.v1alpha1.ClusterResizeInfo;
import org.bf2.operator.resources.v1alpha1.ClusterResizeInfoBuilder;
import org.bf2.operator.resources.v1alpha1.ManagedKafkaAgent;
import org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatus;
import org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatusBuilder;
import org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition;
import org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Status;
import org.bf2.operator.resources.v1alpha1.ManagedKafkaCondition.Type;
import org.bf2.operator.resources.v1alpha1.NodeCounts;
import org.bf2.operator.resources.v1alpha1.NodeCountsBuilder;
import org.bf2.operator.resources.v1alpha1.ProfileCapacity;
import org.bf2.operator.resources.v1alpha1.StrimziVersionStatus;
import org.jboss.logging.Logger;
Expand Down Expand Up @@ -128,46 +121,8 @@ private ManagedKafkaAgentStatus buildStatus(ManagedKafkaAgent resource) {

Map<String, ProfileCapacity> capacity = capacityManager.buildCapacity(resource);

// dummy capacity information - to be removed
ClusterCapacity total = new ClusterCapacityBuilder()
.withConnections(10000)
.withDataRetentionSize(Quantity.parse("40Gi"))
.withIngressEgressThroughputPerSec(Quantity.parse("40Gi"))
.withPartitions(10000)
.build();

ClusterCapacity remaining = new ClusterCapacityBuilder()
.withConnections(10000)
.withDataRetentionSize(Quantity.parse("40Gi"))
.withIngressEgressThroughputPerSec(Quantity.parse("40Gi"))
.withPartitions(10000)
.build();

ClusterCapacity delta = new ClusterCapacityBuilder()
.withConnections(10000)
.withDataRetentionSize(Quantity.parse("40Gi"))
.withIngressEgressThroughputPerSec(Quantity.parse("40Gi"))
.withPartitions(10000)
.build();

NodeCounts nodeInfo = new NodeCountsBuilder()
.withCeiling(0)
.withCurrent(0)
.withCurrentWorkLoadMinimum(0)
.withFloor(0)
.build();

ClusterResizeInfo resize = new ClusterResizeInfoBuilder()
.withDelta(delta)
.withNodeDelta(3)
.build();

return new ManagedKafkaAgentStatusBuilder()
.withConditions(status == null ? Arrays.asList(readyCondition) : status.getConditions())
.withTotal(total)
.withRemaining(remaining)
.withNodeInfo(nodeInfo)
.withResizeInfo(resize)
.withUpdatedTimestamp(ConditionUtils.iso8601Now())
.withStrimzi(strimziVersions)
.withCapacity(capacity)
Expand Down
7 changes: 3 additions & 4 deletions sync/src/test/java/org/bf2/sync/AgentPollerTest.java
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import io.quarkus.test.junit.mockito.InjectMock;
import io.quarkus.test.kubernetes.client.WithKubernetesTestServer;
import org.bf2.common.ManagedKafkaAgentResourceClient;
import org.bf2.operator.resources.v1alpha1.ClusterCapacityBuilder;
import org.bf2.operator.resources.v1alpha1.ManagedKafkaAgent;
import org.bf2.operator.resources.v1alpha1.ManagedKafkaAgentStatusBuilder;
import org.bf2.sync.controlplane.ControlPlaneRestClient;
Expand All @@ -16,6 +15,7 @@
import javax.inject.Inject;

import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;

@WithKubernetesTestServer
Expand Down Expand Up @@ -50,8 +50,7 @@ public void testAddDelete() {
managedKafkaAgentSync.loop(); // pick up the agent from the control plane

ManagedKafkaAgent local = lookup.getLocalManagedKafkaAgent();
local.setStatus(new ManagedKafkaAgentStatusBuilder()
.withRemaining(new ClusterCapacityBuilder().withConnections(1000).build()).build());
local.setStatus(new ManagedKafkaAgentStatusBuilder().build());
client.replaceStatus(local);

assertEquals("test-token", local.getSpec().getObservability().getAccessToken());
Expand All @@ -64,7 +63,7 @@ public void testAddDelete() {
local = lookup.getLocalManagedKafkaAgent();

assertEquals("abc", local.getSpec().getObservability().getAccessToken());
assertEquals(1000, local.getStatus().getRemaining().getConnections());
assertNotNull(local.getStatus());
}

}
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,5 @@ public static void assertManagedKafkaStatus(ManagedKafka mk, ManagedKafkaStatus
public static void assertManagedKafkaAgentStatus(ManagedKafkaAgentStatus agentStatus) {
String yml = Serialization.asYaml(agentStatus);
assertEquals(1, agentStatus.getConditions().size(), yml);
assertNotNull(agentStatus.getTotal(), yml);
assertNotNull(agentStatus.getRemaining(), yml);
assertNotNull(agentStatus.getResizeInfo(), yml);
assertNotNull(agentStatus.getNodeInfo(), yml);
}
}

0 comments on commit d3bbde6

Please sign in to comment.