diff --git a/README.md b/README.md index f5ef9e1..e897d33 100644 --- a/README.md +++ b/README.md @@ -39,17 +39,27 @@ path as your binary, it will be autodetected and you could run by just calling i >> Default values don't count for precedence. ``` - --config-file string config file (default "config.yaml") - --duration int Duration of each individual run in minutes. (default 1) - --cooldown int Cooldown time between tests in seconds. (default 10 s) - --gateway-url string Gateway url to perform the test against (default "https://api.integration.openshift.com") - -h, --help help for ocm-api-load - --ocm-token string OCM Authorization token - --ocm-token-url string Token URL (default "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token") - --output-path string Output directory for result and report files (default "results") - --rate string Rate of the attack. Format example 5/s. (Available units 'ns', 'us', 'ms', 's', 'm', 'h') (default "1/s") - --test-id string Unique ID to identify the test run. UUID is recommended (default "dc049b1d-92b4-420c-9eb7-34f30229ef46") - --test-names strings Names for the tests to be run. (default [all]) + --aws-access-key string AWS access key + --aws-access-secret string AWS access secret + --aws-account-id string AWS Account ID, is the 12-digit account number. + --aws-region string AWS region (default "us-west-1") + --config-file string config file (default "config.yaml") + --cooldown int Cooldown time between tests in seconds. (default 10) + --duration int Duration of each individual run in minutes. (default 1) + --end-rate int Ending request per second rate. (E.g.: 5 would be 5 req/s) + --gateway-url string Gateway url to perform the test against (default "https://api.integration.openshift.com") + -h, --help help for ocm-api-load + --ocm-token string OCM Authorization token + --ocm-token-url string Token URL (default "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token") + --output-path string Output directory for result and report files (default "results") + --ramp-duration int Duration of ramp in minutes, before normal execution. (default 0) + --ramp-steps int Number of stepts to get from start rate to end rate. (Minimum 2 steps) + --ramp-type string Type of ramp to use for all tests. (linear, exponential) + --rate string Rate of the attack. Format example 5/s. (Available units 'ns', 'us', 'ms', 's', 'm', 'h') (default "1/s") + --start-rate int Starting request per second rate. (E.g.: 5 would be 5 req/s) + --test-id string Unique ID to identify the test run. UUID is recommended (default "c160dab1-7fa3-4965-9797-47da16e5c1b9") + --test-names strings Names for the tests to be run. + -v, --verbose set this flag to activate verbose logging. ``` ## Tests @@ -60,12 +70,15 @@ path as your binary, it will be autodetected and you could run by just calling i | list-subscriptions | /api/accounts_mgmt/v1/subscriptions | GET | | access-review | /api/authorizations/v1/access_review | POST | | register-new-cluster | /api/accounts_mgmt/v1/cluster_registrations | POST | +| register-existing-cluster | /api/accounts_mgmt/v1/cluster_registrations | POST | | create-cluster | /api/clusters_mgmt/v1/clusters | POST | | list-clusters | /api/clusters_mgmt/v1/clusters | GET | | get-current-account | /api/accounts_mgmt/v1/current_account | GET | | quota-cost | /api/accounts_mgmt/v1/organizations/{orgId}/quota_cost | GET | | resource-review | /api/authorizations/v1/resource_review | POST | | cluster-authorizations | /api/accounts_mgmt/v1/cluster_authorizations | POST | +| self-terms-review | /api/authorizations/v1/self_terms_review | POST | +| certificates | /api/accounts_mgmt/v1/certificates | POST | |--|--|--| ## Config file @@ -83,6 +96,11 @@ path as your binary, it will be autodetected and you could run by just calling i - cooldown: Cooldown time between tests in seconds. (default 10 s) - rate: Rate of the attack. Format example 5/s. (Available units 'ns', 'us', 'ms', 's', 'm', 'h') (default "1/s") - test-id: Unique ID to identify the test run. UUID is recommended (default "dc049b1d-92b4-420c-9eb7-34f30229ef46") +- ramp-type: Type of ramp to use for all tests. (linear, exponential) +- ramp-duration: Duration of ramp in minutes, before normal execution. (default 0) +- start-rate: Starting request per second rate. (E.g.: 5 would be 5 req/s) +- end-rate: Ending request per second rate. (E.g.: 5 would be 5 req/s) +- ramp-steps: Number of stepts to get from start rate to end rate. (Minimum 2 steps) - tests: List of the tests to run. Empty list means all. ### Test options @@ -92,6 +110,31 @@ Each test can contain this options: - rate: Rate of the attack. Format example 5/s. (Available units 'ns', 'us', 'ms', 's', 'm', 'h') (default "1/s") - duration: Override duration for the test. (A positive integer accompanied of a valid unit) +#### Ramping functionality + +Each test can have a specific configuration for ranmping up the rate, inthis case the following options must be provided. + +- duration: in minutes +- ramp-type: Type of ramp to use for all tests. (linear, exponential) +- ramp-duration: Duration of ramp in minutes, before normal execution. (default 0) +- start-rate: Starting request per second rate. (E.g.: 5 would be 5 req/s) +- end-rate: Ending request per second rate. (E.g.: 5 would be 5 req/s) +- ramp-steps: Number of stepts to get from start rate to end rate. (Minimum 2 steps) + +> `rate` option is not needed for this. + +##### Example + +```yaml + cluster-authorizations: + duration: 30 + ramp-type: exponential + ramp-duration: 10 + start-rate: 1 + end-rate: 50 + ramp-steps: 6 +``` + ### Obligatory options - ocm-token @@ -169,3 +212,47 @@ Steps: - Be sure you are in the latest version of `main` branch and have bumped the version - Now you are ready to run `make release` this will build the binary and generate the tarfiles that contain all the needed files + +## Ramping Up Theory + +The test will run a number with a running time of / rounded for each step, this can sometimes make the test last more or less than the expected duration, but we want to have a even distribution of times. + +As each step finishes it will increase the rate according to a delta that is calculated with the parameters: + +For both types of ramps we have common behaviour: + +- First rate: is always `start-rate` +- Last rate: is always `end-rate` +- Since we cannot use float values for rates, we round all the rates to it's closest integer. + +### For a linear ramp it will use this formula + +`delta = ( end-rate - start-rate ) / ( ramp-steps - 1 )` +>ramp-steps, has always have to be greater than 1 + +So the new rate will be: + +`newRate = oldRate + delta` + +### For an exponential distribution + +We are using the exponential formula `f(t)= x * ^ t` + +the `coeff` is calculated with this formula + +`coeff = (end-rate / start-rate) ^ (1 / ramp-steps)` + +So the new rate will be: + +`newRate = start-rate * coeff ^ <# of step>` + +### `duration` vs `ramp-duration` + +The `duration` is the number of minutes the test is going to run. The `ramp-duration` is the number of minutes the ramp is going to last. + +- If `ramp-duration` is not set, the ramp will take the whole `duration`. +- If `ramp-duration` is set, it will run the ramp for that long and then run the remaining of the `duration` at the `end-rate`. + - E.g.: if `duration` is 30 minutes and `ramp-duration` is 20 minutes. The test will run a ramp for 20 minutes and keep running at `end-rate` for the remaining 10 minutes. So it will run `end-rate` for `duration` - `ramp-duration` minutes. +- If `ramp-duration` is greater than `duration` it will just perform a ramp for `ramp-duration` minutes. + +Overrides for the values work the same, localized test values take priority over global values. diff --git a/automation.py b/automation.py index 6b9ef2b..fce7c33 100755 --- a/automation.py +++ b/automation.py @@ -189,6 +189,127 @@ def show_graphs(directory, filename): validate=False) +def cma_graph(directory, filename): + regex = re.compile(r'(.*/)?[\w-]+_([\w-]+).(\w.+)') + matches = regex.match(filename) + if regex.match(filename) and matches.group(3) == 'json': + # Initializes database for current file in current directory + # Read by 20000 chunks + disk_engine = create_engine( + 'sqlite:///{}.db'.format(matches.group(2))) + + j = 0 + index_start = 1 + chunk = 20000 + for df in pd.read_json(os.path.join(directory, filename), + lines=True, + chunksize=chunk): + df.index += index_start + + columns = ['timestamp', 'latency'] + + for c in df.columns: + if c not in columns: + df = df.drop(c, axis=1) + + j += 1 + logger.info(f'completed {j*chunk} rows') + + df.to_sql('data', disk_engine, if_exists='append') + index_start = df.index[-1] + 1 + + df = pd.read_sql_query('SELECT * FROM data', disk_engine) + df_t = pd.DataFrame(df.iloc[:, -1]) + df_t.index = df.timestamp + + df_t['cma'] = df_t.expanding().mean() + + data = [{ + 'type': 'line', + 'x': df_t.index, + 'y': df_t['cma']/1000000, + }] + + layout = { + 'title': 'Cumulative AVG Latency: {}'.format( + matches.group(2)), + 'xaxis': {'title': 'Time', + 'showgrid': 'true', + 'ticklabelmode': "period"}, + 'yaxis': {'title': 'Milliseconds (log)', 'type': 'linear'}, + } + + fig_dict = {'data': data, 'layout': layout} + + os.remove('{}.db'.format(matches.group(2))) + + pio.show(fig_dict, + engine="kaleido", + width=1600, + height=900, + validate=False) + + +def count_graph(directory, filename): + regex = re.compile(r'(.*/)?[\w-]+_([\w-]+).(\w.+)') + matches = regex.match(filename) + if regex.match(filename) and matches.group(3) == 'json': + # Initializes database for current file in current directory + # Read by 20000 chunks + disk_engine = create_engine( + 'sqlite:///{}.db'.format(matches.group(2))) + + j = 0 + index_start = 1 + chunk = 20000 + for df in pd.read_json(os.path.join(directory, filename), + lines=True, + chunksize=chunk): + df.index += index_start + + columns = ['timestamp', 'latency'] + + for c in df.columns: + if c not in columns: + df = df.drop(c, axis=1) + + j += 1 + logger.info(f'completed {j*chunk} rows') + + df.to_sql('data', disk_engine, if_exists='append') + index_start = df.index[-1] + 1 + + df = pd.read_sql_query('SELECT * FROM data', disk_engine) + df_t = pd.DataFrame(df.iloc[:, -1]) + df_t.index = df.timestamp + + df_t['count'] = df_t.expanding().count() + + data = [{ + 'type': 'line', + 'x': df_t.index, + 'y': df_t['count'], + }] + + layout = { + 'title': 'Request count : {}'.format(matches.group(2)), + 'xaxis': {'title': 'Time', + 'showgrid': 'true', + 'ticklabelmode': "period"}, + 'yaxis': {'title': 'Number of requests', 'type': 'linear'}, + } + + fig_dict = {'data': data, 'layout': layout} + + os.remove('{}.db'.format(matches.group(2))) + + pio.show(fig_dict, + engine="kaleido", + width=1600, + height=900, + validate=False) + + def generate_summaries(directory): try: os.stat('{}/summaries'.format(directory)) @@ -466,6 +587,26 @@ def main(): help='filename of a result to display the graph. \ (Overrides generating all graphs.)') + cma_parser = action_subparsers.add_parser("cma", + help="generate cummulative average graph \ + for the results file", + parents=[parent_parser]) + + cma_parser.add_argument('--filename', + dest="filename", + help='filename of a result to display the graph', + required=True) + + count_parser = action_subparsers.add_parser("count", + help="generate cummulative count of requests graph \ + for the results file", + parents=[parent_parser]) + + count_parser.add_argument('--filename', + dest="filename", + help='filename of a result to display the graph', + required=True) + action_subparsers.add_parser("summary", help="generates vegeta \ summary for results", @@ -478,8 +619,7 @@ def main(): report_parser.add_argument('--filename', dest='filename', default='report-{}.docx'.format( - date.strftime("%Y-%m-%d") - ), + date.strftime("%Y-%m-%d")), help='name for the report file.') upload_parser = action_subparsers.add_parser("upload", @@ -516,6 +656,10 @@ def main(): show_graphs(args.directory, args.filename) else: generate_graphs(args.directory) + elif args.action_command == 'cma': + cma_graph(args.directory, args.filename) + elif args.action_command == 'count': + count_graph(args.directory, args.filename) elif args.action_command == 'summary': generate_summaries(args.directory) elif args.action_command == 'report': diff --git a/cmd/ocm-load-test.go b/cmd/ocm-load-test.go index bb24a49..298c4e0 100644 --- a/cmd/ocm-load-test.go +++ b/cmd/ocm-load-test.go @@ -4,13 +4,11 @@ import ( "context" "fmt" "os" - "time" "github.com/cloud-bulldozer/ocm-api-load/pkg/cmd" "github.com/cloud-bulldozer/ocm-api-load/pkg/helpers" "github.com/cloud-bulldozer/ocm-api-load/pkg/logging" "github.com/cloud-bulldozer/ocm-api-load/pkg/tests" - "github.com/cloud-bulldozer/ocm-api-load/pkg/types" uuid "github.com/satori/go.uuid" "github.com/spf13/cobra" @@ -43,19 +41,28 @@ var rootCmd = &cobra.Command{ func init() { cobra.OnInitialize(initConfig) + //Flags with defaults rootCmd.Flags().StringVar(&configFile, "config-file", "config.yaml", "config file") rootCmd.Flags().String("ocm-token-url", "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token", "Token URL") - rootCmd.Flags().String("ocm-token", "", "OCM Authorization token") rootCmd.Flags().String("gateway-url", "https://api.integration.openshift.com", "Gateway url to perform the test against") rootCmd.Flags().String("test-id", uuid.NewV4().String(), "Unique ID to identify the test run. UUID is recommended") rootCmd.Flags().String("output-path", "results", "Output directory for result and report files") rootCmd.Flags().Int("duration", 1, "Duration of each individual run in minutes.") rootCmd.Flags().String("rate", "1/s", "Rate of the attack. Format example 5/s. (Available units 'ns', 'us', 'ms', 's', 'm', 'h')") - rootCmd.Flags().StringSlice("test-names", []string{}, "Names for the tests to be run.") rootCmd.Flags().BoolP("verbose", "v", false, "set this flag to activate verbose logging.") rootCmd.Flags().Int("cooldown", 10, "Cooldown time between tests in seconds.") + rootCmd.Flags().StringSlice("test-names", []string{}, "Names for the tests to be run.") + //Ramping Flags + rootCmd.Flags().String("ramp-type", "", "Type of ramp to use for all tests. (linear, exponential)") + rootCmd.Flags().Int("start-rate", 0, "Starting request per second rate. (E.g.: 5 would be 5 req/s)") + rootCmd.Flags().Int("end-rate", 0, "Ending request per second rate. (E.g.: 5 would be 5 req/s)") + rootCmd.Flags().Int("ramp-steps", 0, "Number of stepts to get from start rate to end rate. (Minimum 2 steps)") + rootCmd.Flags().Int("ramp-duration", 0, "Duration of ramp in minutes, before normal execution") + + //Required flags + rootCmd.Flags().String("ocm-token", "", "OCM Authorization token") // AWS config - // If needed to use multiple AWS account, use the config file + // If multiple AWS account are needed use the config file rootCmd.Flags().String("aws-region", "us-west-1", "AWS region") rootCmd.Flags().String("aws-access-key", "", "AWS access key") rootCmd.Flags().String("aws-access-secret", "", "AWS access secret") @@ -141,44 +148,35 @@ func run(cmd *cobra.Command, args []string) error { if viper.GetString("ocm-token") == "" { logger.Fatal(cmd.Context(), "ocm-token is a necessary configuration") } - err = helpers.CreateFolder(viper.GetString("output-path"), logger) + err = helpers.CreateFolder(cmd.Context(), viper.GetString("output-path"), logger) if err != nil { logger.Fatal(cmd.Context(), "creating api connection: %v", err) } logger.Info(cmd.Context(), "Using output directory: %s", viper.GetString("output-path")) - connection, err := helpers.BuildConnection(viper.GetString("gateway-url"), + connection, err := helpers.BuildConnection(cmd.Context(), viper.GetString("gateway-url"), viper.GetString("client.id"), viper.GetString("client.secret"), viper.GetString("ocm-token"), logger, - cmd.Context()) + ) if err != nil { logger.Fatal(cmd.Context(), "creating api connection: %v", err) } - defer helpers.Cleanup(connection) - - vegetaRate, err := helpers.ParseRate(viper.GetString("rate")) - if err != nil { - logger.Fatal(cmd.Context(), "parsing rate: %v", err) - } + defer helpers.Cleanup(cmd.Context(), connection) configTests() configAWS(cmd.Context(), logger) - testConfig := types.TestConfiguration{ - TestID: viper.GetString("test-id"), - OutputDirectory: viper.GetString("output-path"), - Duration: time.Duration(viper.GetInt("duration")) * time.Minute, - Cooldown: time.Duration(viper.GetInt("cooldown")) * time.Second, - Rate: vegetaRate, - Connection: connection, - Logger: logger, - Ctx: cmd.Context(), - } + runner := tests.NewRunner( + viper.GetString("test-id"), + viper.GetString("output-path"), + logger, + connection, + ) - if err := tests.Run(testConfig); err != nil { + if err := runner.Run(cmd.Context()); err != nil { logger.Fatal(cmd.Context(), "running load test: %v", err) } diff --git a/config.example.yaml b/config.example.yaml index b9b2737..08cb90a 100644 --- a/config.example.yaml +++ b/config.example.yaml @@ -19,6 +19,10 @@ cooldown: 10 output-path: "./results" rate: "5/s" test-id: new-test +ramp-type: exponential +start-rate: 1 +end-rate: 120 +ramp-steps: 6 tests: self-access-token: rate: "1000/h" @@ -51,11 +55,17 @@ tests: rate: "2000/h" duration: 1 cluster-authorizations: - rate: "2000/h" - duration: 1 + duration: 30 + ramp-type: linear + start-rate: 1 + end-rate: 50 + ramp-steps: 6 self-terms-review: - rate: "10/s" - duration: 1 + duration: 30 + ramp-type: exponential + start-rate: 1 + end-rate: 50 + ramp-steps: 6 certificates: rate: "2/s" duration: 1 diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100644 index 0000000..0abd461 --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,62 @@ +package config + +import ( + "context" + + "github.com/cloud-bulldozer/ocm-api-load/pkg/logging" + "github.com/spf13/viper" +) + +type ConfigHelper struct { + logger logging.Logger + conf *viper.Viper +} + +func NewConfigHelper(logger logging.Logger, conf *viper.Viper) *ConfigHelper { + return &ConfigHelper{ + logger: logger, + conf: conf, + } +} + +func (c *ConfigHelper) ResolveStringConfig(ctx context.Context, def, key string) string { + s := c.conf.GetString(key) + if s == "" { + c.logger.Info(ctx, "no value for %s. Using default value.", key) + return def + } + return s +} + +func (c *ConfigHelper) ResolveIntConfig(ctx context.Context, def int, key string) int { + i := c.conf.GetInt(key) + if i == 0 { + c.logger.Info(ctx, "no value for %s. Using default value.", key) + return def + } + return i +} + +func (c *ConfigHelper) ValidateRampConfig(ctx context.Context, max, min, steps int) bool { + if steps < 2 { + c.logger.Warn(ctx, + "steps must be always 2 or more. Ignoring ramping configuration.") + return false + } + if min < 1 { + c.logger.Warn(ctx, + "min rate must be always 1 or more. Ignoring ramping configuration.") + return false + } + if max < 1 { + c.logger.Warn(ctx, + "max rate must be always 1 or more. Ignoring ramping configuration.") + return false + } + if max <= min { + c.logger.Warn(ctx, + "max rate must be bigger than min rate. Ignoring ramping configuration.") + return false + } + return true +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go new file mode 100644 index 0000000..6409692 --- /dev/null +++ b/pkg/config/config_test.go @@ -0,0 +1,100 @@ +package config + +import ( + "context" + "testing" + + "github.com/cloud-bulldozer/ocm-api-load/pkg/logging" + "github.com/spf13/viper" +) + +func TestConfigHelper_ResolveStringConfig(t *testing.T) { + tests := []struct { + name string + def string + key string + want string + }{ + {"1", "NoName", "testName", "NoName"}, + {"2", "NoName", "test_name", "MyTest"}, + {"3", "5/s", "rate", "5/s"}, + {"4", "5/s", "MyTest.rate", "2/s"}, + } + conf := viper.New() + conf.Set("test_name", "MyTest") + conf.Set("MyTest", map[string]string{}) + conf.Set("MyTest.rate", "2/s") + logBuilder := logging.NewGoLoggerBuilder() + log, _ := logBuilder.Build() + c := NewConfigHelper(log, conf) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := c.ResolveStringConfig(context.TODO(), tt.def, tt.key); got != tt.want { + t.Errorf("ConfigHelper.ResolveStringConfig() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestConfigHelper_ResolveIntConfig(t *testing.T) { + tests := []struct { + name string + def int + key string + want int + }{ + {"1", 15, "testDuration", 15}, + {"2", 15, "test_duration", 33}, + {"3", 4, "duration", 4}, + {"4", 4, "MyTest.duration", 99}, + } + conf := viper.New() + conf.Set("test_duration", 33) + conf.Set("MyTest", map[string]interface{}{}) + conf.Set("MyTest.duration", 99) + logBuilder := logging.NewGoLoggerBuilder() + log, _ := logBuilder.Build() + c := NewConfigHelper(log, conf) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := c.ResolveIntConfig(context.TODO(), tt.def, tt.key); got != tt.want { + t.Errorf("ConfigHelper.ResolveIntConfig() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestConfigHelper_ValidateRampConfig(t *testing.T) { + tests := []struct { + name string + min int + max int + steps int + want bool + }{ + {"1", 1, 5, 2, true}, + {"2", 1, 5, 1, false}, + {"3", 1, 5, 0, false}, + {"4", 0, 5, 2, false}, + {"4", 1, 0, 2, false}, + {"4", 5, 5, 2, false}, + {"4", 5, 2, 2, false}, + } + conf := viper.New() + conf.Set("test_duration", 33) + conf.Set("MyTest", map[string]interface{}{}) + conf.Set("MyTest.duration", 99) + logBuilder := logging.NewGoLoggerBuilder() + log, _ := logBuilder.Build() + c := &ConfigHelper{ + logger: log, + conf: conf, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := c.ValidateRampConfig(context.TODO(), tt.max, tt.min, tt.steps); got != tt.want { + t.Errorf("ConfigHelper.ValidateRampConfig() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/helpers/clean_clusters_transport.go b/pkg/helpers/clean_clusters_transport.go index ff037f6..c6991f4 100644 --- a/pkg/helpers/clean_clusters_transport.go +++ b/pkg/helpers/clean_clusters_transport.go @@ -15,16 +15,16 @@ import ( type CleanClustersTransport struct { Wrapped http.RoundTripper Logger logging.Logger - Context context.Context } func (t *CleanClustersTransport) RoundTrip(request *http.Request) (*http.Response, error) { var err error manipulated := false + ctx := request.Context() if t.isCreateCluster(request) { request, manipulated, err = t.manipulateRequest(request) if err != nil { - t.Logger.Error(t.Context, "Failed to manipulate request for cleanup: %v", err) + t.Logger.Error(ctx, "Failed to manipulate request for cleanup: %v", err) } } response, err := t.Wrapped.RoundTrip(request) @@ -41,54 +41,57 @@ func (t *CleanClustersTransport) RoundTrip(request *http.Request) (*http.Respons } func (t *CleanClustersTransport) addToCleanup(request *http.Request, response *http.Response) *http.Response { + ctx := request.Context() body, err := ioutil.ReadAll(response.Body) if err != nil { - t.Logger.Error(t.Context, "Failed to read body of response for request %s %s: %v", request.Method, + t.Logger.Error(ctx, "Failed to read body of response for request %s %s: %v", request.Method, request.URL.String(), err) return response } var cluster map[string]interface{} err = json.Unmarshal(body, &cluster) if err != nil { - t.Logger.Error(t.Context, "Failed to unmarshal body of response for request %s %s: %v", request.Method, + t.Logger.Error(ctx, "Failed to unmarshal body of response for request %s %s: %v", request.Method, request.URL.String(), err) return response } clusterID, ok := cluster["id"] if !ok { - t.Logger.Error(t.Context, "Failed to get cluster ID from body of response for request %s %s: %v", request.Method, + t.Logger.Error(ctx, "Failed to get cluster ID from body of response for request %s %s: %v", request.Method, request.URL.String(), err) return response } - markClusterForCleanup(clusterID.(string), true, t.Logger, t.Context) + markClusterForCleanup(ctx, clusterID.(string), true, t.Logger) response.Body = ioutil.NopCloser(strings.NewReader(string(body))) return response } func (t *CleanClustersTransport) removeFromCleanup(request *http.Request) { + ctx := request.Context() urlParts := strings.Split(request.URL.String(), "?") url := urlParts[0] parts := strings.Split(url, "/") clusterID := parts[len(parts)-1] - t.Logger.Info(t.Context, "Removing cluster '%s' from cleanup", clusterID) + t.Logger.Info(ctx, "Removing cluster '%s' from cleanup", clusterID) delete(createdClusterIDs, clusterID) } func (t *CleanClustersTransport) manipulateRequest(request *http.Request) (*http.Request, bool, error) { + ctx := request.Context() body, err := ioutil.ReadAll(request.Body) if err != nil { - t.Logger.Error(t.Context, "Failed to read body of cluster for request %s %s: %v", + t.Logger.Error(ctx, "Failed to read body of cluster for request %s %s: %v", request.Method, request.URL.String(), err) return request, false, err } newBody, err := addTestProperties(string(body)) if err != nil { - t.Logger.Error(t.Context, "Failed to add test properties to cluster for request %s %s: %v", + t.Logger.Error(ctx, "Failed to add test properties to cluster for request %s %s: %v", request.Method, request.URL.String(), err) return request, false, err } - t.Logger.Info(t.Context, "%s %s: %s", request.Method, request.URL.String(), newBody) + t.Logger.Info(ctx, "%s %s: %s", request.Method, request.URL.String(), newBody) request.Body = ioutil.NopCloser(strings.NewReader(newBody)) request.ContentLength = int64(len(newBody)) return request, true, nil @@ -104,7 +107,7 @@ func (t *CleanClustersTransport) isDeleteCluster(request *http.Request) bool { return parts[len(parts)-2] == "clusters" && request.Method == "DELETE" } -func markClusterForCleanup(clusterID string, deprovision bool, logger logging.Logger, ctx context.Context) { +func markClusterForCleanup(ctx context.Context, clusterID string, deprovision bool, logger logging.Logger) { logger.Info(ctx, "Marking cluster '%s' for cleanup with 'deprovision'=%v", clusterID, deprovision) createdClusterIDs[clusterID] = deprovision } diff --git a/pkg/helpers/connection.go b/pkg/helpers/connection.go index 9ff8530..14726e5 100644 --- a/pkg/helpers/connection.go +++ b/pkg/helpers/connection.go @@ -4,13 +4,13 @@ import ( "context" "net/http" - sdk "github.com/openshift-online/ocm-sdk-go" "github.com/cloud-bulldozer/ocm-api-load/pkg/logging" + sdk "github.com/openshift-online/ocm-sdk-go" ) // BuildConnection build the vegeta connection // that is going to be used for testing -func BuildConnection(gateway, clientID, clientSecret, token string, logger logging.Logger, ctx context.Context) (*sdk.Connection, error) { +func BuildConnection(ctx context.Context, gateway, clientID, clientSecret, token string, logger logging.Logger) (*sdk.Connection, error) { conn, err := sdk.NewConnectionBuilder(). Insecure(true). URL(gateway). @@ -18,7 +18,7 @@ func BuildConnection(gateway, clientID, clientSecret, token string, logger loggi Tokens(token). Logger(logger). TransportWrapper(func(wrapped http.RoundTripper) http.RoundTripper { - return &CleanClustersTransport{Wrapped: wrapped, Logger: logger, Context: ctx} + return &CleanClustersTransport{Wrapped: wrapped, Logger: logger} }). BuildContext(ctx) if err != nil { diff --git a/pkg/helpers/file_system.go b/pkg/helpers/file_system.go index 9dd158c..2675d3d 100644 --- a/pkg/helpers/file_system.go +++ b/pkg/helpers/file_system.go @@ -10,8 +10,8 @@ import ( ) // CreateFolder creates folder in the system -func CreateFolder(path string, logger logging.Logger) error { - logger.Info(context.Background(), "Creating '%s' directory", path) +func CreateFolder(ctx context.Context, path string, logger logging.Logger) error { + logger.Info(ctx, "Creating '%s' directory", path) folder, err := filepath.Abs(path) if err != nil { return err diff --git a/pkg/helpers/helpers.go b/pkg/helpers/helpers.go index 43227ef..d3ed080 100644 --- a/pkg/helpers/helpers.go +++ b/pkg/helpers/helpers.go @@ -19,17 +19,17 @@ var createdClusterIDs = map[string]bool{} var validateDeletedClusterIDs = make([]string, 0) var failedCleanupClusterIDs = make([]string, 0) -func Cleanup(connection *sdk.Connection) { +func Cleanup(ctx context.Context, connection *sdk.Connection) { if len(createdClusterIDs) == 0 { return } - connection.Logger().Info(context.TODO(), "About to clean up the following clusters:") + connection.Logger().Info(ctx, "About to clean up the following clusters:") for clusterID, deprovision := range createdClusterIDs { - connection.Logger().Info(context.TODO(), "Cluster ID: %s, deprovision: %v", clusterID, deprovision) - DeleteCluster(clusterID, deprovision, connection) + connection.Logger().Info(ctx, "Cluster ID: %s, deprovision: %v", clusterID, deprovision) + DeleteCluster(ctx, clusterID, deprovision, connection) } for _, clusterID := range validateDeletedClusterIDs { - err := verifyClusterDeleted(clusterID, connection) + err := verifyClusterDeleted(ctx, clusterID, connection) if err != nil { markFailedCleanup(clusterID) } else { @@ -37,32 +37,32 @@ func Cleanup(connection *sdk.Connection) { } } if len(failedCleanupClusterIDs) > 0 { - connection.Logger().Warn(context.TODO(), "The following clusters failed deletion: %v", failedCleanupClusterIDs) + connection.Logger().Warn(ctx, "The following clusters failed deletion: %v", failedCleanupClusterIDs) } createdClusterIDs = make(map[string]bool) failedCleanupClusterIDs = make([]string, 0) } -func DeleteCluster(id string, deprovision bool, connection *sdk.Connection) { - connection.Logger().Info(context.TODO(), "Deleting cluster '%s'", id) +func DeleteCluster(ctx context.Context, id string, deprovision bool, connection *sdk.Connection) { + connection.Logger().Info(ctx, "Deleting cluster '%s'", id) // Send the request to delete the cluster response, err := connection.Delete(). Path(ClustersEndpoint+id). Parameter("deprovision", deprovision). Send() if err != nil { - connection.Logger().Error(context.TODO(), "Failed to delete cluster '%s', got error: %v", id, err) + connection.Logger().Error(ctx, "Failed to delete cluster '%s', got error: %v", id, err) markFailedCleanup(id) } else if response.Status() != 204 { - connection.Logger().Error(context.TODO(), "Failed to delete cluster '%s', got http status %d", id, response.Status()) + connection.Logger().Error(ctx, "Failed to delete cluster '%s', got http status %d", id, response.Status()) markFailedCleanup(id) } else { validateDeletedClusterIDs = append(validateDeletedClusterIDs, id) - connection.Logger().Info(context.TODO(), "Cluster '%s' deleted", id) + connection.Logger().Info(ctx, "Cluster '%s' deleted", id) } } -func CreateCluster(body string, gatewayConnection *sdk.Connection) (string, map[string]interface{}, error) { +func CreateCluster(ctx context.Context, body string, gatewayConnection *sdk.Connection) (string, map[string]interface{}, error) { postResponse, err := gatewayConnection.Post(). Path(ClustersEndpoint). String(body). @@ -80,14 +80,14 @@ func CreateCluster(body string, gatewayConnection *sdk.Connection) (string, map[ } clusterID, ok := data["id"] if !ok { - gatewayConnection.Logger().Error(context.TODO(), "ClusterID not present") + gatewayConnection.Logger().Error(ctx, "ClusterID not present") } - gatewayConnection.Logger().Info(context.TODO(), "Cluster '%s' created", clusterID.(string)) + gatewayConnection.Logger().Info(ctx, "Cluster '%s' created", clusterID.(string)) return clusterID.(string), data, nil } -func verifyClusterDeleted(clusterID string, connection *sdk.Connection) error { - connection.Logger().Info(context.TODO(), "verifying deleted cluster '%s'", clusterID) +func verifyClusterDeleted(ctx context.Context, clusterID string, connection *sdk.Connection) error { + connection.Logger().Info(ctx, "verifying deleted cluster '%s'", clusterID) var forcedErr error var getStatus int err := retry.Retry(func(attempt uint) error { @@ -107,7 +107,7 @@ func verifyClusterDeleted(clusterID string, connection *sdk.Connection) error { strategy.Wait(1*time.Second), strategy.Limit(300)) if err != nil { - connection.Logger().Error(context.TODO(), "failed to delete cluster '%s': %v", clusterID, err) + connection.Logger().Error(ctx, "failed to delete cluster '%s': %v", clusterID, err) return err } if forcedErr != nil { @@ -116,6 +116,6 @@ func verifyClusterDeleted(clusterID string, connection *sdk.Connection) error { if getStatus != 404 { return fmt.Errorf("failed to wait for cluster '%s' to be archived", clusterID) } - connection.Logger().Info(context.TODO(), "Cluster '%s' deleted successfully", clusterID) + connection.Logger().Info(ctx, "Cluster '%s' deleted successfully", clusterID) return nil } diff --git a/pkg/ramping/exponential.go b/pkg/ramping/exponential.go new file mode 100644 index 0000000..9033a81 --- /dev/null +++ b/pkg/ramping/exponential.go @@ -0,0 +1,41 @@ +package ramping + +import "math" + +type Exponential struct { + startRate int + endRate int + steps int + currentRate float64 + currentStep int + delta float64 +} + +func NewExponentialRamp(startRate, endRate, steps int) *Exponential { + d := math.Pow((float64(endRate) / float64(startRate)), (1 / float64(steps))) + return &Exponential{ + startRate: startRate, + endRate: endRate, + steps: steps, + currentStep: 1, + currentRate: float64(startRate), + delta: d, + } +} + +func (e *Exponential) NextRate() int { + if e.currentStep == e.steps { + return int(e.endRate) + } + e.currentRate = float64(e.startRate) * math.Pow(e.delta, float64(e.currentStep)) + e.currentStep += 1 + return int(math.Round(e.currentRate)) +} + +func (e *Exponential) GetSteps() int { + return int(e.steps) +} + +func (e *Exponential) GetType() string { + return "Exponential ramp" +} diff --git a/pkg/ramping/exponential_test.go b/pkg/ramping/exponential_test.go new file mode 100644 index 0000000..98ac96c --- /dev/null +++ b/pkg/ramping/exponential_test.go @@ -0,0 +1,71 @@ +package ramping + +import ( + "testing" +) + +func TestExponential_NextRate_scenario1(t *testing.T) { + e := NewExponentialRamp(2, 20, 8) + tests := []struct { + name string + want int + }{ + {"step1", 3}, + {"step2", 4}, + {"step3", 5}, + {"step4", 6}, + {"step5", 8}, + {"step6", 11}, + {"step7", 15}, + {"step8", 20}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := e.NextRate(); got != tt.want { + t.Errorf("Exponential.NextRate() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestExponential_NextRate_scenario2(t *testing.T) { + e := NewExponentialRamp(1, 100, 8) + tests := []struct { + name string + want int + }{ + {"step1", 2}, + {"step2", 3}, + {"step3", 6}, + {"step4", 10}, + {"step5", 18}, + {"step6", 32}, + {"step7", 56}, + {"step8", 100}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := e.NextRate(); got != tt.want { + t.Errorf("Exponential.NextRate() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestExponential_GetSteps(t *testing.T) { + t.Run("testing GetSteps", func(t *testing.T) { + e := NewExponentialRamp(1, 10, 5) + if got := e.GetSteps(); got != 5 { + t.Errorf("Exponential.GetSteps() = %v, want %v", got, 5) + } + }) +} + +func TestExponential_GetType(t *testing.T) { + t.Run("testing GetType", func(t *testing.T) { + e := NewExponentialRamp(1, 10, 5) + if got := e.GetType(); got != "Exponential ramp" { + t.Errorf("Exponential.GetType() = %v, want %v", got, "Exponential ramp") + } + }) +} diff --git a/pkg/ramping/linear.go b/pkg/ramping/linear.go new file mode 100644 index 0000000..24896bf --- /dev/null +++ b/pkg/ramping/linear.go @@ -0,0 +1,45 @@ +package ramping + +import ( + "math" +) + +type Linear struct { + startRate int + endRate int + steps int + currentRate float64 + currentStep int + delta float64 +} + +func NewLinearRamp(startRate, endRate, steps int) *Linear { + d := float64(endRate-startRate) / float64(steps-1) + return &Linear{ + startRate: startRate, + endRate: endRate, + steps: steps, + currentStep: 1, + currentRate: float64(startRate), + delta: d, + } +} + +func (l *Linear) NextRate() int { + if l.currentStep == l.steps { + return int(l.endRate) + } + if l.currentStep != 1 { + l.currentRate = l.currentRate + l.delta + } + l.currentStep += 1 + return int(math.Round(l.currentRate)) +} + +func (l *Linear) GetSteps() int { + return int(l.steps) +} + +func (l *Linear) GetType() string { + return "Linear ramp" +} diff --git a/pkg/ramping/linear_test.go b/pkg/ramping/linear_test.go new file mode 100644 index 0000000..3cd2e59 --- /dev/null +++ b/pkg/ramping/linear_test.go @@ -0,0 +1,96 @@ +package ramping + +import ( + "testing" +) + +func TestLinear_NextRate_scenario1(t *testing.T) { + l := NewLinearRamp(2, 50, 6) + tests := []struct { + name string + want int + }{ + {"step1", 2}, + {"step2", 12}, + {"step3", 21}, + {"step4", 31}, + {"step5", 40}, + {"step6", 50}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := l.NextRate(); got != tt.want { + t.Errorf("Linear.NextRate() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestLinear_NextRate_scenario2(t *testing.T) { + l := NewLinearRamp(1, 15, 9) + tests := []struct { + name string + want int + }{ + {"step1", 1}, + {"step2", 3}, + {"step3", 5}, + {"step4", 6}, + {"step5", 8}, + {"step6", 10}, + {"step7", 12}, + {"step8", 13}, + {"step9", 15}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := l.NextRate(); got != tt.want { + t.Errorf("Linear.NextRate() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestLinear_NextRate_scenario3(t *testing.T) { + l := NewLinearRamp(1, 10, 10) + tests := []struct { + name string + want int + }{ + {"step1", 1}, + {"step2", 2}, + {"step3", 3}, + {"step4", 4}, + {"step5", 5}, + {"step6", 6}, + {"step7", 7}, + {"step8", 8}, + {"step9", 9}, + {"step10", 10}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := l.NextRate(); got != tt.want { + t.Errorf("Linear.NextRate() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestLinear_GetSteps(t *testing.T) { + t.Run("testing GetSteps", func(t *testing.T) { + l := NewLinearRamp(1, 10, 7) + if got := l.GetSteps(); got != 7 { + t.Errorf("Linear.GetSteps() = %v, want %v", got, 7) + } + }) +} + +func TestLinear_GetType(t *testing.T) { + t.Run("testing GetType", func(t *testing.T) { + l := NewLinearRamp(1, 10, 7) + if got := l.GetType(); got != "Linear ramp" { + t.Errorf("Linear.GetType() = %v, want %v", got, "Linear ramp") + } + }) +} diff --git a/pkg/ramping/ramper.go b/pkg/ramping/ramper.go new file mode 100644 index 0000000..90b7261 --- /dev/null +++ b/pkg/ramping/ramper.go @@ -0,0 +1,27 @@ +package ramping + +type RampType int64 + +const ( + LinearRamp RampType = iota + ExponentialRamp +) + +type Ramper interface { + NextRate() int + GetSteps() int + GetType() string +} + +// NewRampingService when using None ramping +// send the rate in the minRate it will be the used +// to initialize NoneRamp +func NewRampingService(rampType RampType, startRate, endRate, steps int) Ramper { + switch rampType { + case LinearRamp: + return NewLinearRamp(startRate, endRate, steps) + case ExponentialRamp: + return NewExponentialRamp(startRate, endRate, steps) + } + return nil +} diff --git a/pkg/ramping/ramper_test.go b/pkg/ramping/ramper_test.go new file mode 100644 index 0000000..ab0805f --- /dev/null +++ b/pkg/ramping/ramper_test.go @@ -0,0 +1,28 @@ +package ramping + +import ( + "reflect" + "testing" +) + +func TestNewRampingService(t *testing.T) { + tests := []struct { + name string + rampType RampType + startRate int + endRate int + steps int + want Ramper + }{ + {"Linear", LinearRamp, 2, 10, 4, NewLinearRamp(2, 10, 4)}, + {"Exponential", ExponentialRamp, 2, 10, 4, NewExponentialRamp(2, 10, 4)}, + {"Error", 2, 2, 10, 4, nil}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NewRampingService(tt.rampType, tt.startRate, tt.endRate, tt.steps); !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewRampingService() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/tests/handlers/accounts.go b/pkg/tests/handlers/accounts.go index f915425..8202cae 100644 --- a/pkg/tests/handlers/accounts.go +++ b/pkg/tests/handlers/accounts.go @@ -2,6 +2,7 @@ package handlers import ( "bytes" + "context" "fmt" "net/http" "strings" @@ -17,12 +18,12 @@ import ( // TestRegisterNewCluster performs a load test on the endpoint responsible for // handling Registering New Clusters. This endpoint is typically used by Hive // and not directly accessed by most clients. -func TestRegisterNewCluster(options *types.TestOptions) error { +func TestRegisterNewCluster(ctx context.Context, options *types.TestOptions) error { testName := options.TestName // Fetch the authorization token and create a dynamic Target generator for // building valid HTTP Requests - targeter := generateClusterRegistrationTargeter(options) + targeter := generateClusterRegistrationTargeter(ctx, options) for res := range options.Attacker.Attack(targeter, options.Rate, options.Duration, testName) { options.Encoder.Encode(res) @@ -31,11 +32,11 @@ func TestRegisterNewCluster(options *types.TestOptions) error { return nil } -func TestRegisterExistingCluster(options *types.TestOptions) error { +func TestRegisterExistingCluster(ctx context.Context, options *types.TestOptions) error { testName := options.TestName quantity := options.Rate.Freq - targeter := generateClusterReRegistrationTargeter(quantity, options) + targeter := generateClusterReRegistrationTargeter(ctx, quantity, options) for res := range options.Attacker.Attack(targeter, options.Rate, options.Duration, testName) { options.Encoder.Encode(res) @@ -46,24 +47,24 @@ func TestRegisterExistingCluster(options *types.TestOptions) error { // getAuthorizationToken will fetch and return the current user's Authorization //Token which is required by certain endpoints such as Cluster Registration. -func getAuthorizationToken(options *types.TestOptions) string { +func getAuthorizationToken(ctx context.Context, options *types.TestOptions) string { result, err := options.Connection.AccountsMgmt().V1().AccessToken().Post().Send() if err != nil { - options.Logger.Error(options.Context, "Unable to retrieve authorization token: %s", err) + options.Logger.Error(ctx, "Unable to retrieve authorization token: %s", err) } body := result.Body().Auths() token := body["cloud.openshift.com"].Auth() if len(token) == 0 { - options.Logger.Warn(options.Context, "Authorization token appears to be empty. Other requests may not succeed.") + options.Logger.Warn(ctx, "Authorization token appears to be empty. Other requests may not succeed.") } else { - options.Logger.Info(options.Context, "Successfully fetched Authorization Token") + options.Logger.Info(ctx, "Successfully fetched Authorization Token") } return token } // generateClusterReRegistrationTargeter registers fake clusters and then // returns a targeter which uses those fake clusters. -func generateClusterReRegistrationTargeter(qty int, options *types.TestOptions) vegeta.Targeter { +func generateClusterReRegistrationTargeter(ctx context.Context, qty int, options *types.TestOptions) vegeta.Targeter { clusterIds := make([]string, qty) var currentTarget = 0 @@ -71,32 +72,32 @@ func generateClusterReRegistrationTargeter(qty int, options *types.TestOptions) // Cache the Authorization Token to avoid retrieving it with every request var authorizationToken = "" if len(authorizationToken) == 0 { - authorizationToken = getAuthorizationToken(options) + authorizationToken = getAuthorizationToken(ctx, options) } // Register multiple mock clusters and store their IDs - options.Logger.Info(options.Context, "Registering %d clusters to use for re-registration test", qty) + options.Logger.Info(ctx, "Registering %d clusters to use for re-registration test", qty) for i := range clusterIds { clusterID := uuid.NewV4().String() body, err := v1.NewClusterRegistrationRequest().AuthorizationToken(authorizationToken).ClusterID(clusterID).Build() if err != nil { - options.Logger.Fatal(options.Context, "Unable to build cluster registration request: %v", err) + options.Logger.Fatal(ctx, "Unable to build cluster registration request: %v", err) } var rawBody bytes.Buffer err = v1.MarshalClusterRegistrationRequest(body, &rawBody) if err != nil { - options.Logger.Fatal(options.Context, "Unable to serialize cluster registration request body: ", err) + options.Logger.Fatal(ctx, "Unable to serialize cluster registration request body: ", err) } resp, err := options.Connection.AccountsMgmt().V1().ClusterRegistrations().Post().Request(body).Send() if err != nil { - options.Logger.Fatal(options.Context, "Unable to register cluster: ", err) + options.Logger.Fatal(ctx, "Unable to register cluster: ", err) } - options.Logger.Info(options.Context, "[%d/%d] Registered Cluster: '%s'. Response: %d\n", i, len(clusterIds), clusterID, resp.Status()) + options.Logger.Info(ctx, "[%d/%d] Registered Cluster: '%s'. Response: %d\n", i, len(clusterIds), clusterID, resp.Status()) clusterIds[i] = clusterID // Avoid hitting rate limiting @@ -139,12 +140,12 @@ func generateClusterReRegistrationTargeter(qty int, options *types.TestOptions) // generateClusterRegistrationTargeter returns a targeter which will create a // unique Cluster Registration request body each time using a valid auth token // and a UUID for the Cluster's ID to ensure uniqueness. -func generateClusterRegistrationTargeter(options *types.TestOptions) vegeta.Targeter { +func generateClusterRegistrationTargeter(ctx context.Context, options *types.TestOptions) vegeta.Targeter { // Cache the Authorization Token to avoid retrieving it with every request var authorizationToken = "" if len(authorizationToken) == 0 { - authorizationToken = getAuthorizationToken(options) + authorizationToken = getAuthorizationToken(ctx, options) } targeter := func(t *vegeta.Target) error { @@ -173,7 +174,7 @@ func generateClusterRegistrationTargeter(options *types.TestOptions) vegeta.Targ } // Test quota cost -func TestQuotaCost(options *types.TestOptions) error { +func TestQuotaCost(ctx context.Context, options *types.TestOptions) error { conn := options.Connection @@ -192,15 +193,15 @@ func TestQuotaCost(options *types.TestOptions) error { return fmt.Errorf("no organizations where found for this account") } - options.Logger.Info(options.Context, "Using Organization id: %s.", orgID) + options.Logger.Info(ctx, "Using Organization id: %s.", orgID) options.Path = strings.Replace(options.Path, "{orgId}", orgID, 1) - return TestStaticEndpoint(options) + return TestStaticEndpoint(ctx, options) } // Test Cluster Authorizations -func TestClusterAuthorizations(options *types.TestOptions) error { +func TestClusterAuthorizations(ctx context.Context, options *types.TestOptions) error { targeter := func(t *vegeta.Target) error { @@ -208,7 +209,7 @@ func TestClusterAuthorizations(options *types.TestOptions) error { clusterId := uuid.NewV4().String() t.Method = http.MethodPost t.URL = options.Path - t.Body = clusterAuthorizationsBody(clusterId, options) + t.Body = clusterAuthorizationsBody(ctx, clusterId, options) return nil } @@ -221,7 +222,7 @@ func TestClusterAuthorizations(options *types.TestOptions) error { return nil } -func clusterAuthorizationsBody(clusterID string, options *types.TestOptions) []byte { +func clusterAuthorizationsBody(ctx context.Context, clusterID string, options *types.TestOptions) []byte { buff := &bytes.Buffer{} reservedResource := v1.NewReservedResource(). ResourceName(helpers.M5XLargeResource). @@ -241,13 +242,13 @@ func clusterAuthorizationsBody(clusterID string, options *types.TestOptions) []b Resources(reservedResource). Build() if err != nil { - options.Logger.Info(options.Context, "building `cluster-authorizations` request: %s", err) + options.Logger.Info(ctx, "building `cluster-authorizations` request: %s", err) return buff.Bytes() } err = v1.MarshalClusterAuthorizationRequest(clusterAuthReq, buff) if err != nil { - options.Logger.Error(options.Context, "marshaling `cluster-authorizations` request: %s", err) + options.Logger.Error(ctx, "marshaling `cluster-authorizations` request: %s", err) } return buff.Bytes() } diff --git a/pkg/tests/handlers/clusters.go b/pkg/tests/handlers/clusters.go index cffe201..73d8ab6 100644 --- a/pkg/tests/handlers/clusters.go +++ b/pkg/tests/handlers/clusters.go @@ -13,10 +13,10 @@ import ( vegeta "github.com/tsenart/vegeta/v12/lib" ) -func TestCreateCluster(options *types.TestOptions) error { +func TestCreateCluster(ctx context.Context, options *types.TestOptions) error { testName := options.TestName - targeter := generateCreateClusterTargeter(options.ID, options.Method, options.Path, options.Context, options.Logger) + targeter := generateCreateClusterTargeter(ctx, options.ID, options.Method, options.Path, options.Logger) for res := range options.Attacker.Attack(targeter, options.Rate, options.Duration, testName) { options.Encoder.Encode(res) @@ -28,7 +28,7 @@ func TestCreateCluster(options *types.TestOptions) error { // Generates a targeter for the "POST /api/clusters_mgmt/v1/clusters" endpoint // with monotonic increasing indexes. // The clusters created are "fake clusters", that is, do not consume any cloud-provider infrastructure. -func generateCreateClusterTargeter(ID, method, url string, ctx context.Context, log logging.Logger) vegeta.Targeter { +func generateCreateClusterTargeter(ctx context.Context, ID, method, url string, log logging.Logger) vegeta.Targeter { idx := 0 // This will take the first 4 characters of the UUID diff --git a/pkg/tests/handlers/static.go b/pkg/tests/handlers/static.go index 8f9aefc..134903d 100644 --- a/pkg/tests/handlers/static.go +++ b/pkg/tests/handlers/static.go @@ -1,11 +1,13 @@ package handlers import ( + "context" + "github.com/cloud-bulldozer/ocm-api-load/pkg/types" vegeta "github.com/tsenart/vegeta/v12/lib" ) -func TestStaticEndpoint(options *types.TestOptions) error { +func TestStaticEndpoint(ctx context.Context, options *types.TestOptions) error { // Specify the HTTP request(s) that will be executed target := vegeta.Target{ diff --git a/pkg/tests/run.go b/pkg/tests/run.go index 0060b22..ffb4c15 100644 --- a/pkg/tests/run.go +++ b/pkg/tests/run.go @@ -1,19 +1,50 @@ package tests import ( + "context" "fmt" + "math" "net/http" "time" + "github.com/cloud-bulldozer/ocm-api-load/pkg/config" "github.com/cloud-bulldozer/ocm-api-load/pkg/helpers" - "github.com/cloud-bulldozer/ocm-api-load/pkg/types" + "github.com/cloud-bulldozer/ocm-api-load/pkg/logging" + ramp "github.com/cloud-bulldozer/ocm-api-load/pkg/ramping" + sdk "github.com/openshift-online/ocm-sdk-go" "github.com/spf13/viper" vegeta "github.com/tsenart/vegeta/v12/lib" ) -func Run(tc types.TestConfiguration) error { - logger := tc.Logger +// Runner prepares config and runs tests +type Runner struct { + connection *sdk.Connection + logger logging.Logger + outputDirectory string + testID string +} + +func NewRunner(testID, outputDirectory string, logger logging.Logger, conn *sdk.Connection) *Runner { + return &Runner{ + connection: conn, + logger: logger, + outputDirectory: outputDirectory, + testID: testID, + } +} + +func (r *Runner) Run(ctx context.Context) error { + duration := viper.GetInt("duration") + cooldown := viper.GetInt("cooldown") + rate := viper.GetString("rate") + rampType := viper.GetString("ramp-type") + startRate := viper.GetInt("start-rate") + endRate := viper.GetInt("end-rate") + rampSteps := viper.GetInt("ramp-steps") + rampDuration := viper.GetInt("ramp-duration") + tests_conf := viper.Sub("tests") + confHelper := config.NewConfigHelper(r.logger, tests_conf) for i, t := range tests { // Check if the test is set to run if !tests_conf.InConfig(t.TestName) && !tests_conf.InConfig("all") { @@ -23,72 +54,110 @@ func Run(tc types.TestConfiguration) error { // Create an Attacker for each individual test. This is due to the // fact that vegeta (and compatible parsers, such as benchmark-wrapper) // expect the sequence to start at 0 for each result file. (Possibly a bug?) - connAttacker := vegeta.Client(&http.Client{Transport: tc.Connection}) + connAttacker := vegeta.Client(&http.Client{Transport: r.connection}) attacker := vegeta.NewAttacker(connAttacker) // Open a file and create an encoder that will be used to store the // results for each test. - fileName := fmt.Sprintf("%s_%s.json", tc.TestID, t.TestName) - resultsFile, err := helpers.CreateFile(fileName, tc.OutputDirectory) + fileName := fmt.Sprintf("%s_%s.json", r.testID, t.TestName) + resultsFile, err := helpers.CreateFile(fileName, r.outputDirectory) if err != nil { return err } encoder := vegeta.NewJSONEncoder(resultsFile) // Bind "Test Harness" - t.ID = tc.TestID + t.ID = r.testID t.Attacker = attacker - t.Connection = tc.Connection + t.Connection = r.connection t.Encoder = &encoder - t.Logger = logger - t.Context = tc.Ctx + t.Logger = r.logger // Create the vegeta rate with the config values - current_test_rate := tests_conf.GetString(fmt.Sprintf("%s.rate", t.TestName)) - if current_test_rate == "" { - logger.Info(tc.Ctx, "no specific rate for test %s. Using default", t.TestName) - t.Rate = tc.Rate - } else { - r, err := helpers.ParseRate(current_test_rate) - if err != nil { - logger.Warn(tc.Ctx, - "error parsing rate for test %s: %s. Using default", - t.TestName, - current_test_rate) - t.Rate = tc.Rate - } else { - t.Rate = r - } + currentTestRate := confHelper.ResolveStringConfig(ctx, rate, fmt.Sprintf("%s.rate", t.TestName)) + rate, err := helpers.ParseRate(currentTestRate) + if err != nil { + r.logger.Warn(ctx, + "error parsing rate for test %s: %s. Using default", + t.TestName, + currentTestRate) } + t.Rate = rate // Check for an override on the test duration - dur := tests_conf.GetInt(fmt.Sprintf("%s.duration", t.TestName)) - if dur == 0 { - // Using default - t.Duration = tc.Duration - } else { - t.Duration = time.Duration(dur) * time.Minute + currentTestDuration := confHelper.ResolveIntConfig(ctx, duration, fmt.Sprintf("%s.duration", t.TestName)) + t.Duration = time.Duration(currentTestDuration) * time.Minute + + var ramper ramp.Ramper + currentRampDuration := 0 + remainingDuration := 0 + currentTestRamp := confHelper.ResolveStringConfig(ctx, rampType, fmt.Sprintf("%s.ramp-type", t.TestName)) + if currentTestRamp != "" { + currentStartRate := confHelper.ResolveIntConfig(ctx, startRate, fmt.Sprintf("%s.start-rate", t.TestName)) + currentEndRate := confHelper.ResolveIntConfig(ctx, endRate, fmt.Sprintf("%s.end-rate", t.TestName)) + currentSteps := confHelper.ResolveIntConfig(ctx, rampSteps, fmt.Sprintf("%s.ramp-steps", t.TestName)) + currentRampDuration = confHelper.ResolveIntConfig(ctx, rampDuration, fmt.Sprintf("%s.ramp-duration", t.TestName)) + r.logger.Info(ctx, "Validating Ramp configuration for test %s", t.TestName) + if !confHelper.ValidateRampConfig(ctx, currentStartRate, currentEndRate, currentSteps) { + currentTestRamp = "" + } + switch currentTestRamp { + case "linear": + ramper = ramp.NewRampingService(ramp.LinearRamp, currentStartRate, currentEndRate, currentSteps) + case "exponential": + ramper = ramp.NewRampingService(ramp.ExponentialRamp, currentStartRate, currentEndRate, currentSteps) + } } - logger.Info(tc.Ctx, "Executing Test: %s", t.TestName) - logger.Info(tc.Ctx, "Rate: %s", t.Rate.String()) - logger.Info(tc.Ctx, "Duration: %s", t.Duration.String()) - logger.Info(tc.Ctx, "Endpoint: %s", t.Path) - err = t.Handler(&t) - if err != nil { - return err + if ramper == nil { + r.logger.Info(ctx, "Executing Test: %s", t.TestName) + r.logger.Info(ctx, "Rate: %s", t.Rate.String()) + r.logger.Info(ctx, "Duration: %s", t.Duration.String()) + r.logger.Info(ctx, "Endpoint: %s", t.Path) + err = t.Handler(ctx, &t) + if err != nil { + return err + } + } else { + r.logger.Info(ctx, "Executing Test: %s", t.TestName) + r.logger.Info(ctx, "Ramp type: %s", ramper.GetType()) + r.logger.Info(ctx, "Endpoint: %s", t.Path) + if currentRampDuration == 0 { + duration := math.Round(t.Duration.Minutes() / float64(ramper.GetSteps())) + t.Duration = time.Duration(duration) * time.Minute + } else { + remainingDuration = int(t.Duration.Minutes()) - currentRampDuration + duration := math.Round(float64(currentRampDuration) / float64(ramper.GetSteps())) + t.Duration = time.Duration(duration) * time.Minute + } + + for i := 0; i < ramper.GetSteps(); i++ { + r.logger.Info(ctx, "Ramping up... step %v", i+1) + rateInt := ramper.NextRate() + newRate, _ := helpers.ParseRate(fmt.Sprint(rateInt)) + t.Rate = newRate + if i+1 == ramper.GetSteps() && remainingDuration > 0 { + t.Duration = t.Duration + (time.Duration(remainingDuration) * time.Minute) + } + r.logger.Info(ctx, "Rate: %s", t.Rate.String()) + r.logger.Info(ctx, "Duration: %s", t.Duration.String()) + err = t.Handler(ctx, &t) + if err != nil { + return err + } + } } // Cleanup (cannot defer as it must happen for each test in the loop) - logger.Info(tc.Ctx, "Results written to: %s", fileName) + r.logger.Info(ctx, "Results written to: %s", fileName) err = resultsFile.Close() if err != nil { return err } - if i+1 < len(tests_conf.AllSettings()) { - logger.Info(tc.Ctx, "Cooling down for next test for: %v s", tc.Cooldown.Seconds()) - time.Sleep(tc.Cooldown) + if i < len(tests_conf.AllSettings()) { + r.logger.Info(ctx, "Cooling down for next test for: %v s", cooldown) + time.Sleep(time.Duration(cooldown) * time.Second) } } return nil diff --git a/pkg/types/test_config.go b/pkg/types/test_config.go index 67e1d45..2b53874 100644 --- a/pkg/types/test_config.go +++ b/pkg/types/test_config.go @@ -1,22 +1,14 @@ package types import ( - "context" "time" - "github.com/cloud-bulldozer/ocm-api-load/pkg/logging" - sdk "github.com/openshift-online/ocm-sdk-go" vegeta "github.com/tsenart/vegeta/v12/lib" ) // TestConfiguration type TestConfiguration struct { - TestID string - OutputDirectory string - Duration time.Duration - Cooldown time.Duration - Rate vegeta.Rate - Connection *sdk.Connection - Logger logging.Logger - Ctx context.Context + Duration time.Duration + Cooldown time.Duration + Rate vegeta.Rate } diff --git a/pkg/types/test_options.go b/pkg/types/test_options.go index 3184f4f..8135c9e 100644 --- a/pkg/types/test_options.go +++ b/pkg/types/test_options.go @@ -24,11 +24,10 @@ type TestOptions struct { Duration time.Duration // Test "Infrastructure" - ID string // Unique UUID of a given test-suite execution. - Handler func(*TestOptions) (err error) // Function which tests the given endpoint + ID string // Unique UUID of a given test-suite execution. + Handler func(context.Context, *TestOptions) (err error) // Function which tests the given endpoint Attacker *vegeta.Attacker Connection *sdk.Connection Encoder *vegeta.Encoder // Encodes results and writes them to a File Logger logging.Logger - Context context.Context }