Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

enable golint: gosimple,whitespace #1218

Merged
merged 1 commit into from
Oct 5, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,14 @@ linters:
#- goconst
#- gocritic
#- gocyclo
#- gosimple
- gosimple
#- staticcheck
#- stylecheck
#- unused
#- varcheck
#- unparam
#- unconvert
#- whitespace
- whitespace

issues:
include:
Expand Down
1 change: 0 additions & 1 deletion adapter/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ var (
)

func (a *Adapter) makeProviderOrDie() provider.MetricsProvider {

// Get a config to talk to the apiserver
cfg, err := config.GetConfig()
if err != nil {
Expand Down
1 change: 0 additions & 1 deletion controllers/hpa.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ func (r *ScaledObjectReconciler) newHPAForScaledObject(logger logr.Logger, scale

// updateHPAIfNeeded checks whether update of HPA is needed
func (r *ScaledObjectReconciler) updateHPAIfNeeded(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, foundHpa *autoscalingv2beta2.HorizontalPodAutoscaler, gvkr *kedav1alpha1.GroupVersionKindResource) error {

hpa, err := r.newHPAForScaledObject(logger, scaledObject, gvkr)
if err != nil {
logger.Error(err, "Failed to create new HPA resource", "HPA.Namespace", scaledObject.Namespace, "HPA.Name", getHPAName(scaledObject))
Expand Down
3 changes: 0 additions & 3 deletions controllers/scaledjob_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@ type ScaledJobReconciler struct {

// SetupWithManager initializes the ScaledJobReconciler instance and starts a new controller managed by the passed Manager instance.
func (r *ScaledJobReconciler) SetupWithManager(mgr ctrl.Manager) error {

r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), nil, mgr.GetScheme())

return ctrl.NewControllerManagedBy(mgr).
Expand Down Expand Up @@ -88,7 +87,6 @@ func (r *ScaledJobReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {

// reconcileJobType implemets reconciler logic for K8s Jobs based ScaleObject
func (r *ScaledJobReconciler) reconcileScaledJob(logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) (string, error) {

msg, err := r.deletePreviousVersionScaleJobs(logger, scaledJob)
if err != nil {
return msg, err
Expand Down Expand Up @@ -130,7 +128,6 @@ func (r *ScaledJobReconciler) deletePreviousVersionScaleJobs(logger logr.Logger,

// requestScaleLoop request ScaleLoop handler for the respective ScaledJob
func (r *ScaledJobReconciler) requestScaleLoop(logger logr.Logger, scaledJob *kedav1alpha1.ScaledJob) error {

logger.V(1).Info("Starting a new ScaleLoop")

return r.scaleHandler.HandleScalableObject(scaledJob)
Expand Down
2 changes: 0 additions & 2 deletions controllers/scaledobject_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ type ScaledObjectReconciler struct {

// SetupWithManager initializes the ScaledObjectReconciler instance and starts a new controller managed by the passed Manager instance.
func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager) error {

// create Discovery clientset
clientset, err := discovery.NewDiscoveryClientForConfig(mgr.GetConfig())
if err != nil {
Expand Down Expand Up @@ -156,7 +155,6 @@ func (r *ScaledObjectReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error

// reconcileScaledObject implements reconciler logic for ScaleObject
func (r *ScaledObjectReconciler) reconcileScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) (string, error) {

// Check scale target Name is specified
if scaledObject.Spec.ScaleTargetRef.Name == "" {
err := fmt.Errorf("ScaledObject.spec.scaleTargetRef.name is missing")
Expand Down
4 changes: 1 addition & 3 deletions controllers/scaledobject_finalizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ const (

// finalizeScaledObject runs finalization logic on ScaledObject if there's finalizer
func (r *ScaledObjectReconciler) finalizeScaledObject(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {

if util.Contains(scaledObject.GetFinalizers(), scaledObjectFinalizer) {
// Run finalization logic for scaledObjectFinalizer. If the
// finalization logic fails, don't remove the finalizer so
Expand All @@ -27,7 +26,7 @@ func (r *ScaledObjectReconciler) finalizeScaledObject(logger logr.Logger, scaled
}

// if enabled, scale scaleTarget back to the original replica count (to the state it was before scaling with KEDA)
if scaledObject.Spec.Advanced != nil && scaledObject.Spec.Advanced.RestoreToOriginalReplicaCount == true {
if scaledObject.Spec.Advanced != nil && scaledObject.Spec.Advanced.RestoreToOriginalReplicaCount {
scale, err := (*r.scaleClient).Scales(scaledObject.Namespace).Get(context.TODO(), scaledObject.Status.ScaleTargetGVKR.GroupResource(), scaledObject.Spec.ScaleTargetRef.Name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
Expand Down Expand Up @@ -60,7 +59,6 @@ func (r *ScaledObjectReconciler) finalizeScaledObject(logger logr.Logger, scaled

// ensureFinalizer check there is finalizer present on the ScaledObject, if not it adds one
func (r *ScaledObjectReconciler) ensureFinalizer(logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject) error {

if !util.Contains(scaledObject.GetFinalizers(), scaledObjectFinalizer) {
logger.Info("Adding Finalizer for the ScaledObject")
scaledObject.SetFinalizers(append(scaledObject.GetFinalizers(), scaledObjectFinalizer))
Expand Down
2 changes: 0 additions & 2 deletions pkg/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,12 +117,10 @@ func (p *KedaProvider) GetExternalMetric(namespace string, metricSelector labels
return &external_metrics.ExternalMetricValueList{
Items: matchingMetrics,
}, nil

}

// ListAllExternalMetrics returns the supported external metrics for this provider
func (p *KedaProvider) ListAllExternalMetrics() []provider.ExternalMetricInfo {

externalMetricsInfo := []provider.ExternalMetricInfo{}

//get all ScaledObjects in namespace(s) watched by the operator
Expand Down
2 changes: 0 additions & 2 deletions pkg/scalers/artemis_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ func NewArtemisQueueScaler(resolvedSecrets, metadata, authParams map[string]stri
}

func parseArtemisMetadata(resolvedEnv, metadata, authParams map[string]string) (*artemisMetadata, error) {

meta := artemisMetadata{}

meta.queueLength = defaultArtemisQueueLength
Expand Down Expand Up @@ -152,7 +151,6 @@ func (s *artemisScaler) IsActive(ctx context.Context) (bool, error) {
}

func (s *artemisScaler) getMonitoringEndpoint() string {

replacer := strings.NewReplacer("<<managementEndpoint>>", s.metadata.managementEndpoint,
"<<queueName>>", s.metadata.queueName,
"<<brokerName>>", s.metadata.brokerName,
Expand Down
1 change: 0 additions & 1 deletion pkg/scalers/azure/azure_aad_podidentity.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ const (

// GetAzureADPodIdentityToken returns the AADToken for resource
func GetAzureADPodIdentityToken(audience string) (AADToken, error) {

var token AADToken

resp, err := http.Get(fmt.Sprintf(msiURL, url.QueryEscape(audience)))
Expand Down
1 change: 0 additions & 1 deletion pkg/scalers/azure/azure_eventhub.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@ func GetEventHubClient(info EventHubInfo) (*eventhub.Hub, error) {

// GetCheckpointFromBlobStorage accesses Blob storage and gets checkpoint information of a partition
func GetCheckpointFromBlobStorage(ctx context.Context, info EventHubInfo, partitionID string) (Checkpoint, error) {

blobCreds, storageEndpoint, err := ParseAzureStorageBlobConnection("none", info.StorageConnection, "")
if err != nil {
return Checkpoint{}, err
Expand Down
1 change: 0 additions & 1 deletion pkg/scalers/azure/azure_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (

// GetAzureQueueLength returns the length of a queue in int
func GetAzureQueueLength(ctx context.Context, podIdentity string, connectionString, queueName string, accountName string) (int32, error) {

credential, endpoint, err := ParseAzureStorageQueueConnection(podIdentity, connectionString, accountName)
if err != nil {
return -1, err
Expand Down
1 change: 0 additions & 1 deletion pkg/scalers/azure/azure_storage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,5 @@ func TestParseStorageConnectionString(t *testing.T) {
"but got", endpoint)
}
}

}
}
1 change: 0 additions & 1 deletion pkg/scalers/azure_blob_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ func TestAzBlobParseMetadata(t *testing.T) {
}
if testData.podIdentity != "" && testData.podIdentity != podIdentity && err == nil {
t.Error("Expected success but got error: podIdentity value is not returned as expected")

}
}
}
Expand Down
2 changes: 0 additions & 2 deletions pkg/scalers/azure_eventhub_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ func parseAzureEventHubMetadata(metadata, resolvedEnv, authParams map[string]str

//GetUnprocessedEventCountInPartition gets number of unprocessed events in a given partition
func (scaler *azureEventHubScaler) GetUnprocessedEventCountInPartition(ctx context.Context, partitionInfo *eventhub.HubPartitionRuntimeInformation) (newEventCount int64, checkpoint azure.Checkpoint, err error) {

//if partitionInfo.LastEnqueuedOffset = -1, that means event hub partition is empty
if partitionInfo != nil && partitionInfo.LastEnqueuedOffset == "-1" {
return 0, azure.Checkpoint{}, nil
Expand Down Expand Up @@ -161,7 +160,6 @@ func (scaler *azureEventHubScaler) GetUnprocessedEventCountInPartition(ctx conte

// GetUnprocessedEventCountWithoutCheckpoint returns the number of messages on the without a checkoutpoint info
func GetUnprocessedEventCountWithoutCheckpoint(partitionInfo *eventhub.HubPartitionRuntimeInformation) int64 {

// if both values are 0 then there is exactly one message inside the hub. First message after init
if (partitionInfo.BeginningSequenceNumber == 0 && partitionInfo.LastSequenceNumber == 0) || (partitionInfo.BeginningSequenceNumber != partitionInfo.LastSequenceNumber) {
return (partitionInfo.LastSequenceNumber - partitionInfo.BeginningSequenceNumber) + 1
Expand Down
3 changes: 0 additions & 3 deletions pkg/scalers/azure_eventhub_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,6 @@ func TestGetUnprocessedEventCountIfNoCheckpointExists(t *testing.T) {
}

func TestGetUnprocessedEventCountWithoutCheckpointReturning1Message(t *testing.T) {

//After the first message the lastsequencenumber init to 0
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
Expand All @@ -255,7 +254,6 @@ func TestGetUnprocessedEventCountWithoutCheckpointReturning1Message(t *testing.T
}

func TestGetUnprocessedEventCountWithoutCheckpointReturning0Message(t *testing.T) {

//An empty partition starts with an equal value on last-/beginning-sequencenumber other than 0
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
Expand All @@ -271,7 +269,6 @@ func TestGetUnprocessedEventCountWithoutCheckpointReturning0Message(t *testing.T
}

func TestGetUnprocessedEventCountWithoutCheckpointReturning2Messages(t *testing.T) {

partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
LastSequenceNumber: 1,
Expand Down
11 changes: 3 additions & 8 deletions pkg/scalers/azure_log_analytics_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,6 @@ func NewAzureLogAnalyticsScaler(resolvedSecrets, metadata, authParams map[string
}

func parseAzureLogAnalyticsMetadata(resolvedEnv, metadata, authParams map[string]string, podIdentity string) (*azureLogAnalyticsMetadata, error) {

meta := azureLogAnalyticsMetadata{}

if podIdentity == "" || podIdentity == "none" {
Expand Down Expand Up @@ -281,7 +280,7 @@ func (s *azureLogAnalyticsScaler) executeQuery(query string, tokenInfo tokenData
body, statusCode, err := s.executeLogAnalyticsREST(query, tokenInfo)

//Handle expired token
if statusCode == 403 || (body != nil && len(body) > 0 && strings.Contains(string(body), "TokenExpired")) {
if statusCode == 403 || (len(body) > 0 && strings.Contains(string(body), "TokenExpired")) {
logAnalyticsLog.Info("Token expired, refreshing token...")

tokenInfo, err := s.refreshAccessToken()
Expand All @@ -301,7 +300,7 @@ func (s *azureLogAnalyticsScaler) executeQuery(query string, tokenInfo tokenData
return metricsData{}, err
}

if body == nil || len(body) == 0 {
if len(body) == 0 {
return metricsData{}, fmt.Errorf("Error executing Log Analytics REST API request: empty body")
}

Expand All @@ -311,7 +310,6 @@ func (s *azureLogAnalyticsScaler) executeQuery(query string, tokenInfo tokenData
}

if statusCode == 200 {

metricsInfo := metricsData{}
metricsInfo.threshold = s.metadata.threshold
metricsInfo.value = 0
Expand Down Expand Up @@ -378,7 +376,6 @@ func (s *azureLogAnalyticsScaler) executeQuery(query string, tokenInfo tokenData
}

func (s *azureLogAnalyticsScaler) refreshAccessToken() (tokenData, error) {

tokenInfo, err := s.getAuthorizationToken()

if err != nil {
Expand All @@ -401,9 +398,7 @@ func (s *azureLogAnalyticsScaler) refreshAccessToken() (tokenData, error) {
}

func (s *azureLogAnalyticsScaler) getAuthorizationToken() (tokenData, error) {

body, statusCode, err, tokenInfo := []byte{}, 0, *new(error), tokenData{}

if s.metadata.podIdentity == "" {
body, statusCode, err = s.executeAADApicall()
} else {
Expand All @@ -412,7 +407,7 @@ func (s *azureLogAnalyticsScaler) getAuthorizationToken() (tokenData, error) {

if err != nil {
return tokenData{}, err
} else if body == nil || len(body) == 0 {
} else if len(body) == 0 {
return tokenData{}, fmt.Errorf("Error getting access token: empty body")
}

Expand Down
3 changes: 0 additions & 3 deletions pkg/scalers/azure_servicebus_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ var getServiceBusLengthTestScalers = []azureServiceBusScaler{

func TestParseServiceBusMetadata(t *testing.T) {
for _, testData := range parseServiceBusMetadataDataset {

meta, err := parseAzureServiceBusMetadata(sampleResolvedEnv, testData.metadata, testData.authParams, testData.podIdentity)

if err != nil && !testData.isError {
Expand All @@ -102,7 +101,6 @@ func TestParseServiceBusMetadata(t *testing.T) {
}

func TestGetServiceBusLength(t *testing.T) {

t.Log("This test will use the environment variable SERVICEBUS_CONNECTION_STRING if it is set")
t.Log("If set, it will connect to the servicebus namespace specified by the connection string & check:")
t.Logf("\tQueue '%s' has 1 message\n", queueName)
Expand All @@ -123,7 +121,6 @@ func TestGetServiceBusLength(t *testing.T) {
if length != 1 {
t.Errorf("Expected 1 message, got %d", length)
}

} else {
// Just test error message
length, err := scaler.GetAzureServiceBusLength(context.TODO())
Expand Down
2 changes: 0 additions & 2 deletions pkg/scalers/cron_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ func getCronTime(location *time.Location, spec string) (int64, error) {
c.Stop()

return cronTime, nil

}

func parseCronMetadata(metadata, resolvedEnv map[string]string) (*cronMetadata, error) {
Expand Down Expand Up @@ -157,7 +156,6 @@ func (s *cronScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec {

// GetMetrics finds the current value of the metric
func (s *cronScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {

var currentReplicas = int64(defaultDesiredReplicas)
isActive, err := s.IsActive(ctx)
if err != nil {
Expand Down
20 changes: 9 additions & 11 deletions pkg/scalers/external_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,18 +81,16 @@ func TestExternalPushScaler_Run(t *testing.T) {
retries := 0
defer cancel()
for {
select {
case <-time.After(time.Second * 1):
if resultCount == serverCount*iterationCount {
t.Logf("resultCount == %d", resultCount)
return
}
<-time.After(time.Second * 1)
if resultCount == serverCount*iterationCount {
t.Logf("resultCount == %d", resultCount)
return
}

retries++
if retries > 10 {
t.Fatalf("Expected resultCount to be %d after %d retries, but got %d", serverCount*iterationCount, retries, resultCount)
return
}
retries++
if retries > 10 {
t.Fatalf("Expected resultCount to be %d after %d retries, but got %d", serverCount*iterationCount, retries, resultCount)
return
}
}
}
Expand Down
3 changes: 0 additions & 3 deletions pkg/scalers/gcp_pub_sub_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ func parsePubSubMetadata(metadata, resolvedEnv map[string]string) (*pubsubMetada

// IsActive checks if there are any messages in the subscription
func (s *pubsubScaler) IsActive(ctx context.Context) (bool, error) {

size, err := s.GetSubscriptionSize(ctx)

if err != nil {
Expand All @@ -93,7 +92,6 @@ func (s *pubsubScaler) IsActive(ctx context.Context) (bool, error) {
}

func (s *pubsubScaler) Close() error {

if s.client != nil {
err := s.client.metricsClient.Close()
if err != nil {
Expand All @@ -106,7 +104,6 @@ func (s *pubsubScaler) Close() error {

// GetMetricSpecForScaling returns the metric spec for the HPA
func (s *pubsubScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec {

// Construct the target subscription size as a quantity
targetSubscriptionSizeQty := resource.NewQuantity(int64(s.metadata.targetSubscriptionSize), resource.DecimalSI)

Expand Down
1 change: 0 additions & 1 deletion pkg/scalers/huawei_cloudeye_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -343,5 +343,4 @@ func (h *huaweiCloudeyeScaler) GetCloudeyeMetrics() (float64, error) {
}

return metricValue, nil

}
2 changes: 0 additions & 2 deletions pkg/scalers/kafka_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -337,10 +337,8 @@ func (s *kafkaScaler) getLagForPartition(partition int32, offsets *sarama.Offset
return invalidOffset, fmt.Errorf("invalid offset found for topic %s in group %s and partition %d, probably no offset is committed yet", s.metadata.topic, s.metadata.group, partition)
}
return latestOffset, nil

}
return (latestOffset - consumerOffset), nil

}

// Close closes the kafka admin and client
Expand Down
3 changes: 0 additions & 3 deletions pkg/scalers/liiklus_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ const (

// NewLiiklusScaler creates a new liiklusScaler scaler
func NewLiiklusScaler(resolvedEnv map[string]string, metadata map[string]string) (Scaler, error) {

lm, err := parseLiiklusMetadata(metadata)
if err != nil {
return nil, err
Expand Down Expand Up @@ -143,11 +142,9 @@ func (s *liiklusScaler) getLag(ctx context.Context) (uint64, map[uint32]uint64,
totalLag += diff
}
return totalLag, lags, nil

}

func parseLiiklusMetadata(metadata map[string]string) (*liiklusMetadata, error) {

lagThreshold := defaultLiiklusLagThreshold

if val, ok := metadata[liiklusLagThresholdMetricName]; ok {
Expand Down
Loading