Skip to content

Commit

Permalink
Modified the switch cases.
Browse files Browse the repository at this point in the history
Signed-off-by: Shubham82 <[email protected]>
  • Loading branch information
Shubham82 committed Dec 1, 2020
1 parent 29cedc7 commit 08079ed
Show file tree
Hide file tree
Showing 37 changed files with 195 additions and 165 deletions.
2 changes: 1 addition & 1 deletion .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ linters:
#- errcheck
#- funlen
- goconst
#- gocritic
- gocritic
- gocyclo
- gosimple
- stylecheck
Expand Down
12 changes: 9 additions & 3 deletions adapter/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,13 @@ func getWatchNamespace() (string, error) {
}

func main() {
var err error
defer func() {
if err != nil {
logger.Error(err, "unable to run external metrics adapter")
}
}()

defer klog.Flush()

printVersion()
Expand All @@ -112,8 +119,7 @@ func main() {
cmd.WithExternalMetrics(kedaProvider)

logger.Info(cmd.Message)
if err := cmd.Run(wait.NeverStop); err != nil {
logger.Error(err, "unable to run external metrics adapter")
os.Exit(1)
if err = cmd.Run(wait.NeverStop); err != nil {
return
}
}
6 changes: 3 additions & 3 deletions controllers/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"

//"k8s.io/client-go/kubernetes/scheme"
// "k8s.io/client-go/kubernetes/scheme"

"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
//kedav1alpha1 "github.com/kedacore/keda/v2/api/v1alpha1"
// kedav1alpha1 "github.com/kedacore/keda/v2/api/v1alpha1"
// +kubebuilder:scaffold:imports
)

Expand All @@ -56,7 +56,7 @@ var _ = BeforeSuite(func(done Done) {
CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")},
}

//var err error
// var err error
// cfg, err = testEnv.Start()
// Expect(err).ToNot(HaveOccurred())
// Expect(cfg).ToNot(BeNil())
Expand Down
8 changes: 4 additions & 4 deletions pkg/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ func (p *KedaProvider) GetExternalMetric(namespace string, metricSelector labels
return nil, err
}

//get the scaled objects matching namespace and labels
// get the scaled objects matching namespace and labels
scaledObjects := &kedav1alpha1.ScaledObjectList{}
opts := []client.ListOption{
client.InNamespace(namespace),
Expand All @@ -88,7 +88,7 @@ func (p *KedaProvider) GetExternalMetric(namespace string, metricSelector labels
scalerName := strings.Replace(fmt.Sprintf("%T", scaler), "*scalers.", "", 1)

for _, metricSpec := range metricSpecs {
//skip cpu/memory resource scaler
// skip cpu/memory resource scaler
if metricSpec.External == nil {
continue
}
Expand All @@ -111,7 +111,7 @@ func (p *KedaProvider) GetExternalMetric(namespace string, metricSelector labels
scaler.Close()
}

if len(matchingMetrics) <= 0 {
if len(matchingMetrics) == 0 {
return nil, fmt.Errorf("No matching metrics found for " + info.Metric)
}

Expand All @@ -124,7 +124,7 @@ func (p *KedaProvider) GetExternalMetric(namespace string, metricSelector labels
func (p *KedaProvider) ListAllExternalMetrics() []provider.ExternalMetricInfo {
externalMetricsInfo := []provider.ExternalMetricInfo{}

//get all ScaledObjects in namespace(s) watched by the operator
// get all ScaledObjects in namespace(s) watched by the operator
scaledObjects := &kedav1alpha1.ScaledObjectList{}
opts := []client.ListOption{
client.InNamespace(p.watchedNamespace),
Expand Down
2 changes: 1 addition & 1 deletion pkg/scalers/artemis_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ func (s *artemisScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec {
return []v2beta2.MetricSpec{metricSpec}
}

//GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
func (s *artemisScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {
messages, err := s.getQueueMessageCount()

Expand Down
7 changes: 4 additions & 3 deletions pkg/scalers/aws_iam_authorization.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,16 @@ func getAwsAuthorization(authParams, metadata, resolvedEnv map[string]string) (a
meta.podIdentityOwner = false
} else if metadata["identityOwner"] == "" || metadata["identityOwner"] == "pod" {
meta.podIdentityOwner = true
if authParams["awsRoleArn"] != "" {
switch {
case authParams["awsRoleArn"] != "":
meta.awsRoleArn = authParams["awsRoleArn"]
} else if (authParams["awsAccessKeyID"] != "" || authParams["awsAccessKeyId"] != "") && authParams["awsSecretAccessKey"] != "" {
case (authParams["awsAccessKeyID"] != "" || authParams["awsAccessKeyId"] != "") && authParams["awsSecretAccessKey"] != "":
meta.awsAccessKeyID = authParams["awsAccessKeyID"]
if meta.awsAccessKeyID == "" {
meta.awsAccessKeyID = authParams["awsAccessKeyId"]
}
meta.awsSecretAccessKey = authParams["awsSecretAccessKey"]
} else {
default:
if metadata["awsAccessKeyID"] != "" {
meta.awsAccessKeyID = metadata["awsAccessKeyID"]
} else if metadata["awsAccessKeyIDFromEnv"] != "" {
Expand Down
2 changes: 1 addition & 1 deletion pkg/scalers/aws_kinesis_stream_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ func (s *awsKinesisStreamScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec
return []v2beta2.MetricSpec{metricSpec}
}

//GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
func (s *awsKinesisStreamScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {
shardCount, err := s.GetAwsKinesisOpenShardCount()

Expand Down
4 changes: 2 additions & 2 deletions pkg/scalers/aws_sqs_queue_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig) (*awsSqsQueueMetadata, error

queueURLPath := queueURL.Path
queueURLPathParts := strings.Split(queueURLPath, "/")
if len(queueURLPathParts) != 3 || len(queueURLPathParts[2]) <= 0 {
if len(queueURLPathParts) != 3 || len(queueURLPathParts[2]) == 0 {
return nil, fmt.Errorf("cannot get queueName from queueURL")
}

Expand Down Expand Up @@ -133,7 +133,7 @@ func (s *awsSqsQueueScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec {
return []v2beta2.MetricSpec{metricSpec}
}

//GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
func (s *awsSqsQueueScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {
queuelen, err := s.GetAwsSqsQueueLength()

Expand Down
13 changes: 7 additions & 6 deletions pkg/scalers/azure/azure_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,17 +207,18 @@ func formatTimeSpan(timeSpan string) (string, error) {

func verifyAggregationTypeIsSupported(aggregationType string, data []insights.MetricValue) (*float64, error) {
var valuePtr *float64
if strings.EqualFold(string(insights.Average), aggregationType) && data[len(data)-1].Average != nil {
switch {
case strings.EqualFold(string(insights.Average), aggregationType) && data[len(data)-1].Average != nil:
valuePtr = data[len(data)-1].Average
} else if strings.EqualFold(string(insights.Total), aggregationType) && data[len(data)-1].Total != nil {
case strings.EqualFold(string(insights.Total), aggregationType) && data[len(data)-1].Total != nil:
valuePtr = data[len(data)-1].Total
} else if strings.EqualFold(string(insights.Maximum), aggregationType) && data[len(data)-1].Maximum != nil {
case strings.EqualFold(string(insights.Maximum), aggregationType) && data[len(data)-1].Maximum != nil:
valuePtr = data[len(data)-1].Maximum
} else if strings.EqualFold(string(insights.Minimum), aggregationType) && data[len(data)-1].Minimum != nil {
case strings.EqualFold(string(insights.Minimum), aggregationType) && data[len(data)-1].Minimum != nil:
valuePtr = data[len(data)-1].Minimum
} else if strings.EqualFold(string(insights.Count), aggregationType) && data[len(data)-1].Count != nil {
case strings.EqualFold(string(insights.Count), aggregationType) && data[len(data)-1].Count != nil:
valuePtr = data[len(data)-1].Count
} else {
default:
err := fmt.Errorf("unsupported aggregation type %s", insights.AggregationType(strings.ToTitle(aggregationType)))
return nil, err
}
Expand Down
17 changes: 9 additions & 8 deletions pkg/scalers/azure/azure_storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,21 +120,22 @@ func parseAzureStorageConnectionString(connectionString string, endpointType Sto

var endpointProtocol, name, key, endpointSuffix, endpoint string
for _, v := range parts {
if strings.HasPrefix(v, "DefaultEndpointsProtocol") {
switch {
case strings.HasPrefix(v, "DefaultEndpointsProtocol"):
endpointProtocol = getValue(v)
} else if strings.HasPrefix(v, "AccountName") {
case strings.HasPrefix(v, "AccountName"):
name = getValue(v)
} else if strings.HasPrefix(v, "AccountKey") {
case strings.HasPrefix(v, "AccountKey"):
key = getValue(v)
} else if strings.HasPrefix(v, "EndpointSuffix") {
case strings.HasPrefix(v, "EndpointSuffix"):
endpointSuffix = getValue(v)
} else if endpointType == BlobEndpoint && strings.HasPrefix(v, endpointType.Prefix()) {
case endpointType == BlobEndpoint && strings.HasPrefix(v, endpointType.Prefix()):
endpoint = getValue(v)
} else if endpointType == QueueEndpoint && strings.HasPrefix(v, endpointType.Prefix()) {
case endpointType == QueueEndpoint && strings.HasPrefix(v, endpointType.Prefix()):
endpoint = getValue(v)
} else if endpointType == TableEndpoint && strings.HasPrefix(v, endpointType.Prefix()) {
case endpointType == TableEndpoint && strings.HasPrefix(v, endpointType.Prefix()):
endpoint = getValue(v)
} else if endpointType == FileEndpoint && strings.HasPrefix(v, endpointType.Prefix()) {
case endpointType == FileEndpoint && strings.HasPrefix(v, endpointType.Prefix()):
endpoint = getValue(v)
}
}
Expand Down
9 changes: 5 additions & 4 deletions pkg/scalers/azure_blob_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,8 @@ func parseAzureBlobMetadata(config *ScalerConfig) (*azureBlobMetadata, kedav1alp

// If the Use AAD Pod Identity is not present, or set to "none"
// then check for connection string
if config.PodIdentity == "" || config.PodIdentity == kedav1alpha1.PodIdentityProviderNone {
switch config.PodIdentity {
case "", kedav1alpha1.PodIdentityProviderNone:
// Azure Blob Scaler expects a "connection" parameter in the metadata
// of the scaler or in a TriggerAuthentication object
if config.AuthParams["connection"] != "" {
Expand All @@ -103,14 +104,14 @@ func parseAzureBlobMetadata(config *ScalerConfig) (*azureBlobMetadata, kedav1alp
if len(meta.connection) == 0 {
return nil, "", fmt.Errorf("no connection setting given")
}
} else if config.PodIdentity == kedav1alpha1.PodIdentityProviderAzure {
case kedav1alpha1.PodIdentityProviderAzure:
// If the Use AAD Pod Identity is present then check account name
if val, ok := config.TriggerMetadata["accountName"]; ok && val != "" {
meta.accountName = val
} else {
return nil, "", fmt.Errorf("no accountName given")
}
} else {
default:
return nil, "", fmt.Errorf("pod identity %s not supported for azure storage blobs", config.PodIdentity)
}

Expand Down Expand Up @@ -156,7 +157,7 @@ func (s *azureBlobScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec {
return []v2beta2.MetricSpec{metricSpec}
}

//GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
func (s *azureBlobScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {
bloblen, err := azure.GetAzureBlobListLength(
ctx,
Expand Down
6 changes: 3 additions & 3 deletions pkg/scalers/azure_eventhub_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,9 +130,9 @@ func parseAzureEventHubMetadata(config *ScalerConfig) (*eventHubMetadata, error)
return &meta, nil
}

//GetUnprocessedEventCountInPartition gets number of unprocessed events in a given partition
// GetUnprocessedEventCountInPartition gets number of unprocessed events in a given partition
func (scaler *azureEventHubScaler) GetUnprocessedEventCountInPartition(ctx context.Context, partitionInfo *eventhub.HubPartitionRuntimeInformation) (newEventCount int64, checkpoint azure.Checkpoint, err error) {
//if partitionInfo.LastEnqueuedOffset = -1, that means event hub partition is empty
// if partitionInfo.LastEnqueuedOffset = -1, that means event hub partition is empty
if partitionInfo != nil && partitionInfo.LastEnqueuedOffset == "-1" {
return 0, azure.Checkpoint{}, nil
}
Expand All @@ -151,7 +151,7 @@ func (scaler *azureEventHubScaler) GetUnprocessedEventCountInPartition(ctx conte

unprocessedEventCountInPartition := int64(0)

//If checkpoint.Offset is empty that means no messages has been processed from an event hub partition
// If checkpoint.Offset is empty that means no messages has been processed from an event hub partition
// And since partitionInfo.LastSequenceNumber = 0 for the very first message hence
// total unprocessed message will be partitionInfo.LastSequenceNumber + 1
if checkpoint.Offset == "" {
Expand Down
4 changes: 2 additions & 2 deletions pkg/scalers/azure_eventhub_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ func TestGetUnprocessedEventCountIfNoCheckpointExists(t *testing.T) {
}

func TestGetUnprocessedEventCountWithoutCheckpointReturning1Message(t *testing.T) {
//After the first message the lastsequencenumber init to 0
// After the first message the lastsequencenumber init to 0
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
LastSequenceNumber: 0,
Expand All @@ -272,7 +272,7 @@ func TestGetUnprocessedEventCountWithoutCheckpointReturning1Message(t *testing.T
}

func TestGetUnprocessedEventCountWithoutCheckpointReturning0Message(t *testing.T) {
//An empty partition starts with an equal value on last-/beginning-sequencenumber other than 0
// An empty partition starts with an equal value on last-/beginning-sequencenumber other than 0
partitionInfo := eventhub.HubPartitionRuntimeInformation{
PartitionID: "0",
LastSequenceNumber: 255,
Expand Down
27 changes: 14 additions & 13 deletions pkg/scalers/azure_log_analytics_scaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,8 @@ func NewAzureLogAnalyticsScaler(config *ScalerConfig) (Scaler, error) {

func parseAzureLogAnalyticsMetadata(config *ScalerConfig) (*azureLogAnalyticsMetadata, error) {
meta := azureLogAnalyticsMetadata{}

if config.PodIdentity == "" || config.PodIdentity == kedav1alpha1.PodIdentityProviderNone {
switch config.PodIdentity {
case "", kedav1alpha1.PodIdentityProviderNone:
// Getting tenantId
tenantID, err := getParameterFromConfig(config, "tenantId", true)
if err != nil {
Expand All @@ -128,9 +128,9 @@ func parseAzureLogAnalyticsMetadata(config *ScalerConfig) (*azureLogAnalyticsMet
meta.clientSecret = clientSecret

meta.podIdentity = ""
} else if config.PodIdentity == kedav1alpha1.PodIdentityProviderAzure {
case kedav1alpha1.PodIdentityProviderAzure:
meta.podIdentity = string(config.PodIdentity)
} else {
default:
return nil, fmt.Errorf("error parsing metadata. Details: Log Analytics Scaler doesn't support pod identity %s", config.PodIdentity)
}

Expand Down Expand Up @@ -207,7 +207,7 @@ func (s *azureLogAnalyticsScaler) GetMetricSpecForScaling() []v2beta2.MetricSpec
return []v2beta2.MetricSpec{metricSpec}
}

//GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
// GetMetrics returns value for a supported metric and an error if there is a problem getting the metric
func (s *azureLogAnalyticsScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {
receivedMetric, err := s.getMetricData()

Expand Down Expand Up @@ -265,7 +265,7 @@ func (s *azureLogAnalyticsScaler) getMetricData() (metricsData, error) {
}

func (s *azureLogAnalyticsScaler) getAccessToken() (tokenData, error) {
//if there is no token yet or it will be expired in less, that 30 secs
// if there is no token yet or it will be expired in less, that 30 secs
currentTimeSec := time.Now().Unix()
tokenInfo := tokenData{}

Expand Down Expand Up @@ -302,7 +302,7 @@ func (s *azureLogAnalyticsScaler) executeQuery(query string, tokenInfo tokenData

body, statusCode, err = s.executeLogAnalyticsREST(query, tokenInfo)

//Handle expired token
// Handle expired token
if statusCode == 403 || (len(body) > 0 && strings.Contains(string(body), "TokenExpired")) {
tokenInfo, err = s.refreshAccessToken()
if err != nil {
Expand Down Expand Up @@ -346,12 +346,13 @@ func (s *azureLogAnalyticsScaler) executeQuery(query string, tokenInfo tokenData
metricsInfo.threshold = s.metadata.threshold
metricsInfo.value = 0

//Pre-validation of query result:
if len(queryData.Tables) == 0 || len(queryData.Tables[0].Columns) == 0 || len(queryData.Tables[0].Rows) == 0 {
// Pre-validation of query result:
switch {
case len(queryData.Tables) == 0 || len(queryData.Tables[0].Columns) == 0 || len(queryData.Tables[0].Rows) == 0:
return metricsData{}, fmt.Errorf("error validating Log Analytics request. Details: there is no results after running your query. HTTP code: %d. Body: %s", statusCode, string(body))
} else if len(queryData.Tables) > 1 {
case len(queryData.Tables) > 1:
return metricsData{}, fmt.Errorf("error validating Log Analytics request. Details: too many tables in query result: %d, expected: 1. HTTP code: %d. Body: %s", len(queryData.Tables), statusCode, string(body))
} else if len(queryData.Tables[0].Rows) > 1 {
case len(queryData.Tables[0].Rows) > 1:
return metricsData{}, fmt.Errorf("error validating Log Analytics request. Details: too many rows in query result: %d, expected: 1. HTTP code: %d. Body: %s", len(queryData.Tables[0].Rows), statusCode, string(body))
}

Expand Down Expand Up @@ -385,7 +386,7 @@ func (s *azureLogAnalyticsScaler) executeQuery(query string, tokenInfo tokenData

func parseTableValueToInt64(value interface{}, dataType string) (int64, error) {
if value != nil {
//type can be: real, int, long
// type can be: real, int, long
if dataType == "real" || dataType == "int" || dataType == "long" {
convertedValue, isConverted := value.(float64)
if !isConverted {
Expand All @@ -408,7 +409,7 @@ func (s *azureLogAnalyticsScaler) refreshAccessToken() (tokenData, error) {
return tokenData{}, err
}

//Now, let's check we can use this token. If no, wait until we can use it
// Now, let's check we can use this token. If no, wait until we can use it
currentTimeSec := time.Now().Unix()
if currentTimeSec < tokenInfo.NotBefore {
if currentTimeSec < tokenInfo.NotBefore+10 {
Expand Down
8 changes: 4 additions & 4 deletions pkg/scalers/azure_log_analytics_scaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ var (
query = "let x = 10; let y = 1; print MetricValue = x, Threshold = y;"
)

//Faked parameters
// Faked parameters
var sampleLogAnalyticsResolvedEnv = map[string]string{
tenantID: "d248da64-0e1e-4f79-b8c6-72ab7aa055eb",
clientID: "41826dd4-9e0a-4357-a5bd-a88ad771ea7d",
Expand Down Expand Up @@ -66,9 +66,9 @@ var testLogAnalyticsMetadata = []parseLogAnalyticsMetadataTestData{
{map[string]string{"tenantId": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientId": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecret": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceId": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": "", "threshold": "1900000000"}, true},
// Missing threshold, should fail
{map[string]string{"tenantId": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientId": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecret": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceId": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": ""}, true},
//All parameters set, should succeed
// All parameters set, should succeed
{map[string]string{"tenantId": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientId": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecret": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceId": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000"}, false},
//All parameters set, should succeed
// All parameters set, should succeed
{map[string]string{"tenantIdFromEnv": "d248da64-0e1e-4f79-b8c6-72ab7aa055eb", "clientIdFromEnv": "41826dd4-9e0a-4357-a5bd-a88ad771ea7d", "clientSecretFromEnv": "U6DtAX5r6RPZxd~l12Ri3X8J9urt5Q-xs", "workspaceIdFromEnv": "074dd9f8-c368-4220-9400-acb6e80fc325", "query": query, "threshold": "1900000000"}, false},
}

Expand All @@ -83,7 +83,7 @@ var testLogAnalyticsMetadataWithEmptyAuthParams = []parseLogAnalyticsMetadataTes
{map[string]string{"query": "", "threshold": "1900000000"}, true},
// Missing threshold, should fail
{map[string]string{"query": query, "threshold": ""}, true},
//All parameters set, should succeed
// All parameters set, should succeed
{map[string]string{"query": query, "threshold": "1900000000"}, true},
}

Expand Down
Loading

0 comments on commit 08079ed

Please sign in to comment.