['EndpointActions']['data'], 'output'>;
+
const getOutputDataIfNeeded = (action: ActionDetails): ResponseOutput => {
const commentUppercase = (action?.comment ?? '').toUpperCase();
diff --git a/x-pack/plugins/security_solution/scripts/openapi/bundle_detections.js b/x-pack/plugins/security_solution/scripts/openapi/bundle_detections.js
index e2df0d47f5b47..f79437c33222c 100644
--- a/x-pack/plugins/security_solution/scripts/openapi/bundle_detections.js
+++ b/x-pack/plugins/security_solution/scripts/openapi/bundle_detections.js
@@ -11,34 +11,36 @@ const { join, resolve } = require('path');
const ROOT = resolve(__dirname, '../..');
-bundle({
- sourceGlob: join(ROOT, 'common/api/detection_engine/**/*.schema.yaml'),
- outputFilePath: join(
- ROOT,
- 'docs/openapi/serverless/security_solution_detections_api_{version}.bundled.schema.yaml'
- ),
- options: {
- includeLabels: ['serverless'],
- specInfo: {
- title: 'Security Solution Detections API (Elastic Cloud Serverless)',
- description:
- 'You can create rules that automatically turn events and external alerts sent to Elastic Security into detection alerts. These alerts are displayed on the Detections page.',
+(async () => {
+ await bundle({
+ sourceGlob: join(ROOT, 'common/api/detection_engine/**/*.schema.yaml'),
+ outputFilePath: join(
+ ROOT,
+ 'docs/openapi/serverless/security_solution_detections_api_{version}.bundled.schema.yaml'
+ ),
+ options: {
+ includeLabels: ['serverless'],
+ specInfo: {
+ title: 'Security Solution Detections API (Elastic Cloud Serverless)',
+ description:
+ 'You can create rules that automatically turn events and external alerts sent to Elastic Security into detection alerts. These alerts are displayed on the Detections page.',
+ },
},
- },
-});
+ });
-bundle({
- sourceGlob: join(ROOT, 'common/api/detection_engine/**/*.schema.yaml'),
- outputFilePath: join(
- ROOT,
- 'docs/openapi/ess/security_solution_detections_api_{version}.bundled.schema.yaml'
- ),
- options: {
- includeLabels: ['ess'],
- specInfo: {
- title: 'Security Solution Detections API (Elastic Cloud and self-hosted)',
- description:
- 'You can create rules that automatically turn events and external alerts sent to Elastic Security into detection alerts. These alerts are displayed on the Detections page.',
+ await bundle({
+ sourceGlob: join(ROOT, 'common/api/detection_engine/**/*.schema.yaml'),
+ outputFilePath: join(
+ ROOT,
+ 'docs/openapi/ess/security_solution_detections_api_{version}.bundled.schema.yaml'
+ ),
+ options: {
+ includeLabels: ['ess'],
+ specInfo: {
+ title: 'Security Solution Detections API (Elastic Cloud and self-hosted)',
+ description:
+ 'You can create rules that automatically turn events and external alerts sent to Elastic Security into detection alerts. These alerts are displayed on the Detections page.',
+ },
},
- },
-});
+ });
+})();
diff --git a/x-pack/plugins/security_solution/server/endpoint/mocks/mocks.ts b/x-pack/plugins/security_solution/server/endpoint/mocks/mocks.ts
index 9b6f001934910..141a5ebb440f6 100644
--- a/x-pack/plugins/security_solution/server/endpoint/mocks/mocks.ts
+++ b/x-pack/plugins/security_solution/server/endpoint/mocks/mocks.ts
@@ -267,6 +267,8 @@ export interface HttpApiTestSetupMock {
getRegisteredRouteHandler: (method: RouterMethod, path: string) => RequestHandler;
/** Retrieves the route handler configuration that was registered with the router */
getRegisteredRouteConfig: (method: RouterMethod, path: string) => RouteConfig;
+ /** Sets endpoint authz overrides on the data returned by `EndpointAppContext.services.getEndpointAuthz()` */
+ setEndpointAuthz: (overrides: Partial) => void;
/** Get a registered versioned route */
getRegisteredVersionedRoute: (
method: RouterMethod,
@@ -287,8 +289,9 @@ export const createHttpApiTestSetupMock = (): HttpApi
const endpointAppContextMock = createMockEndpointAppContext();
const scopedEsClusterClientMock = elasticsearchServiceMock.createScopedClusterClient();
const savedObjectClientMock = savedObjectsClientMock.create();
+ const endpointAuthz = getEndpointAuthzInitialStateMock();
const httpHandlerContextMock = requestContextMock.convertContext(
- createRouteHandlerContext(scopedEsClusterClientMock, savedObjectClientMock)
+ createRouteHandlerContext(scopedEsClusterClientMock, savedObjectClientMock, { endpointAuthz })
);
const httpResponseMock = httpServerMock.createResponseFactory();
const getRegisteredRouteHandler: HttpApiTestSetupMock['getRegisteredRouteHandler'] = (
@@ -321,6 +324,11 @@ export const createHttpApiTestSetupMock =
(): HttpApi
return handler[0];
};
+ const setEndpointAuthz = (overrides: Partial) => {
+ Object.assign(endpointAuthz, overrides);
+ };
+
+ (endpointAppContextMock.service.getEndpointAuthz as jest.Mock).mockResolvedValue(endpointAuthz);
return {
routerMock,
@@ -348,6 +356,7 @@ export const createHttpApiTestSetupMock = (): HttpApi
getRegisteredRouteHandler,
getRegisteredRouteConfig,
+ setEndpointAuthz,
getRegisteredVersionedRoute: getRegisteredVersionedRouteMock.bind(null, routerMock),
};
diff --git a/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_download_handler.test.ts b/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_download_handler.test.ts
index 2cc6d8efd199e..050de9019f21e 100644
--- a/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_download_handler.test.ts
+++ b/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_download_handler.test.ts
@@ -46,6 +46,7 @@ describe('Response Actions file download API', () => {
const actionRequestEsSearchResponse = createActionRequestsEsSearchResultsMock();
actionRequestEsSearchResponse.hits.hits[0]._source!.EndpointActions.action_id = '321-654';
+ actionRequestEsSearchResponse.hits.hits[0]._source!.EndpointActions.data.command = 'get-file';
applyEsClientSearchMock({
esClientMock,
diff --git a/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_download_handler.ts b/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_download_handler.ts
index 7095b7d87a50c..2e16c57886f7d 100644
--- a/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_download_handler.ts
+++ b/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_download_handler.ts
@@ -6,6 +6,7 @@
*/
import type { RequestHandler } from '@kbn/core/server';
+import { ensureUserHasAuthzToFilesForAction } from './utils';
import type { EndpointActionFileDownloadParams } from '../../../../common/api/endpoint';
import { EndpointActionFileDownloadSchema } from '../../../../common/api/endpoint';
import type { ResponseActionsClient } from '../../services';
@@ -47,9 +48,10 @@ export const registerActionFileDownloadRoutes = (
},
},
withEndpointAuthz(
- { any: ['canWriteFileOperations', 'canWriteExecuteOperations'] },
+ { any: ['canWriteFileOperations', 'canWriteExecuteOperations', 'canGetRunningProcesses'] },
logger,
- getActionFileDownloadRouteHandler(endpointContext)
+ getActionFileDownloadRouteHandler(endpointContext),
+ ensureUserHasAuthzToFilesForAction
)
);
};
diff --git a/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_info_handler.test.ts b/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_info_handler.test.ts
index e9914dc4232d9..b2866f7cca263 100644
--- a/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_info_handler.test.ts
+++ b/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_info_handler.test.ts
@@ -42,6 +42,7 @@ describe('Response Action file info API', () => {
const actionRequestEsSearchResponse = createActionRequestsEsSearchResultsMock();
actionRequestEsSearchResponse.hits.hits[0]._source!.EndpointActions.action_id = '321-654';
+ actionRequestEsSearchResponse.hits.hits[0]._source!.EndpointActions.data.command = 'get-file';
applyEsClientSearchMock({
esClientMock,
diff --git a/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_info_handler.ts b/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_info_handler.ts
index a84f3b3a8bf6f..1cb4e95e1eaf1 100644
--- a/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_info_handler.ts
+++ b/x-pack/plugins/security_solution/server/endpoint/routes/actions/file_info_handler.ts
@@ -6,6 +6,7 @@
*/
import type { RequestHandler } from '@kbn/core/server';
+import { ensureUserHasAuthzToFilesForAction } from './utils';
import { stringify } from '../../utils/stringify';
import type { EndpointActionFileInfoParams } from '../../../../common/api/endpoint';
import { EndpointActionFileInfoSchema } from '../../../../common/api/endpoint';
@@ -83,9 +84,10 @@ export const registerActionFileInfoRoute = (
},
},
withEndpointAuthz(
- { any: ['canWriteFileOperations', 'canWriteExecuteOperations'] },
+ { any: ['canWriteFileOperations', 'canWriteExecuteOperations', 'canGetRunningProcesses'] },
endpointContext.logFactory.get('actionFileInfo'),
- getActionFileInfoRouteHandler(endpointContext)
+ getActionFileInfoRouteHandler(endpointContext),
+ ensureUserHasAuthzToFilesForAction
)
);
};
diff --git a/x-pack/plugins/security_solution/server/endpoint/routes/actions/utils.test.ts b/x-pack/plugins/security_solution/server/endpoint/routes/actions/utils.test.ts
new file mode 100644
index 0000000000000..eaf05e972943c
--- /dev/null
+++ b/x-pack/plugins/security_solution/server/endpoint/routes/actions/utils.test.ts
@@ -0,0 +1,80 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import type { HttpApiTestSetupMock } from '../../mocks';
+import { createHttpApiTestSetupMock } from '../../mocks';
+import type { LogsEndpointAction } from '../../../../common/endpoint/types';
+import { EndpointActionGenerator } from '../../../../common/endpoint/data_generators/endpoint_action_generator';
+import { applyEsClientSearchMock } from '../../mocks/utils.mock';
+import { ENDPOINT_ACTIONS_INDEX } from '../../../../common/endpoint/constants';
+import { ensureUserHasAuthzToFilesForAction } from './utils';
+import type { Mutable } from 'utility-types';
+import type { KibanaRequest } from '@kbn/core-http-server';
+
+describe('Route utilities', () => {
+ describe('#ensureUserHasAuthzToFilesForAction()', () => {
+ let testSetupMock: HttpApiTestSetupMock;
+ let actionRequestMock: LogsEndpointAction;
+ let httpRequestMock: Mutable>;
+
+ beforeEach(() => {
+ const endpointGenerator = new EndpointActionGenerator('seed');
+
+ actionRequestMock = endpointGenerator.generate();
+ testSetupMock = createHttpApiTestSetupMock();
+
+ httpRequestMock = testSetupMock.createRequestMock({
+ params: { action_id: actionRequestMock.EndpointActions.action_id },
+ });
+
+ applyEsClientSearchMock({
+ esClientMock: testSetupMock.getEsClientMock(),
+ index: ENDPOINT_ACTIONS_INDEX,
+ response: endpointGenerator.toEsSearchResponse([
+ endpointGenerator.toEsSearchHit(actionRequestMock),
+ ]),
+ });
+ });
+
+ it.each`
+ command | authzKey | agentType
+ ${'get-file'} | ${'canWriteFileOperations'} | ${'endpoint'}
+ ${'execute'} | ${'canWriteExecuteOperations'} | ${'endpoint'}
+ ${'running-processes'} | ${'canGetRunningProcesses'} | ${'sentinel_one'}
+ `(
+ 'should throw when user is not authorized to `$command` for $agentType',
+ async ({ command, authzKey, agentType }) => {
+ testSetupMock.setEndpointAuthz({ [authzKey]: false });
+ actionRequestMock.EndpointActions.data.command = command;
+ actionRequestMock.EndpointActions.input_type = agentType;
+
+ await expect(() =>
+ ensureUserHasAuthzToFilesForAction(testSetupMock.httpHandlerContextMock, httpRequestMock)
+ ).rejects.toThrow('Endpoint authorization failure');
+ }
+ );
+
+ it('should throw when response action is not supported by agent type', async () => {
+ actionRequestMock.EndpointActions.input_type = 'sentinel_one';
+ actionRequestMock.EndpointActions.data.command = 'execute';
+
+ await expect(() =>
+ ensureUserHasAuthzToFilesForAction(testSetupMock.httpHandlerContextMock, httpRequestMock)
+ ).rejects.toThrow('Response action [execute] not supported for agent type [sentinel_one]');
+ });
+
+ it('should throw when response action does not support access to files', async () => {
+ actionRequestMock.EndpointActions.data.command = 'running-processes';
+
+ await expect(() =>
+ ensureUserHasAuthzToFilesForAction(testSetupMock.httpHandlerContextMock, httpRequestMock)
+ ).rejects.toThrow(
+ 'Response action [running-processes] for agent type [endpoint] does not support file downloads'
+ );
+ });
+ });
+});
diff --git a/x-pack/plugins/security_solution/server/endpoint/routes/actions/utils.ts b/x-pack/plugins/security_solution/server/endpoint/routes/actions/utils.ts
new file mode 100644
index 0000000000000..92033801e71b6
--- /dev/null
+++ b/x-pack/plugins/security_solution/server/endpoint/routes/actions/utils.ts
@@ -0,0 +1,135 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import type { KibanaRequest } from '@kbn/core-http-server';
+import { deepFreeze } from '@kbn/std';
+import { get } from 'lodash';
+import { CustomHttpRequestError } from '../../../utils/custom_http_request_error';
+import { isActionSupportedByAgentType } from '../../../../common/endpoint/service/response_actions/is_response_action_supported';
+import { EndpointAuthorizationError } from '../../errors';
+import { fetchActionRequestById } from '../../services/actions/utils/fetch_action_request_by_id';
+import type { SecuritySolutionRequestHandlerContext } from '../../../types';
+import type {
+ ResponseActionAgentType,
+ ResponseActionsApiCommandNames,
+} from '../../../../common/endpoint/service/response_actions/constants';
+
+type CommandsWithFileAccess = Readonly<
+ Record>>
+>;
+
+// FYI: this object here should help to quickly catch instances where we might forget to update the
+// authz on the file info/download apis when a response action needs to support file downloads.
+const COMMANDS_WITH_ACCESS_TO_FILES: CommandsWithFileAccess = deepFreeze({
+ 'get-file': {
+ endpoint: true,
+ sentinel_one: true,
+ crowdstrike: false,
+ },
+ execute: {
+ endpoint: true,
+ sentinel_one: false,
+ crowdstrike: false,
+ },
+ 'running-processes': {
+ endpoint: false,
+ sentinel_one: true,
+ crowdstrike: false,
+ },
+ upload: {
+ endpoint: false,
+ sentinel_one: false,
+ crowdstrike: false,
+ },
+ scan: {
+ endpoint: false,
+ sentinel_one: false,
+ crowdstrike: false,
+ },
+ isolate: {
+ endpoint: false,
+ sentinel_one: false,
+ crowdstrike: false,
+ },
+ unisolate: {
+ endpoint: false,
+ sentinel_one: false,
+ crowdstrike: false,
+ },
+ 'kill-process': {
+ endpoint: false,
+ sentinel_one: false,
+ crowdstrike: false,
+ },
+ 'suspend-process': {
+ endpoint: false,
+ sentinel_one: false,
+ crowdstrike: false,
+ },
+});
+
+/**
+ * Checks to ensure that the user has the correct authz for the response action associated with the action id.
+ *
+ * FYI: Additional check is needed because the File info and download APIs are used by multiple response actions,
+ * thus we want to ensure that we don't allow access to file associated with response actions the user does
+ * not have authz to.
+ *
+ * @param context
+ * @param request
+ */
+export const ensureUserHasAuthzToFilesForAction = async (
+ context: SecuritySolutionRequestHandlerContext,
+ request: KibanaRequest
+): Promise => {
+ const userAuthz = await (await context.securitySolution).getEndpointAuthz();
+ const coreContext = await context.core;
+ const esClient = coreContext.elasticsearch.client.asInternalUser;
+ const { action_id: actionId } = request.params as { action_id: string };
+ const {
+ EndpointActions: {
+ data: { command },
+ input_type: agentType,
+ },
+ } = await fetchActionRequestById(esClient, actionId);
+
+ // Check if command is supported by the agent type
+ if (!isActionSupportedByAgentType(agentType, command, 'manual')) {
+ throw new CustomHttpRequestError(
+ `Response action [${command}] not supported for agent type [${agentType}]`,
+ 400
+ );
+ }
+
+ // Check if the command is marked as having access to files
+ if (!get(COMMANDS_WITH_ACCESS_TO_FILES, `${command}.${agentType}`, false)) {
+ throw new CustomHttpRequestError(
+ `Response action [${command}] for agent type [${agentType}] does not support file downloads`,
+ 400
+ );
+ }
+
+ let hasAuthzToCommand = false;
+
+ switch (command) {
+ case 'get-file':
+ hasAuthzToCommand = userAuthz.canWriteFileOperations;
+ break;
+
+ case 'execute':
+ hasAuthzToCommand = userAuthz.canWriteExecuteOperations;
+ break;
+
+ case 'running-processes':
+ hasAuthzToCommand = userAuthz.canGetRunningProcesses;
+ break;
+ }
+
+ if (!hasAuthzToCommand) {
+ throw new EndpointAuthorizationError();
+ }
+};
diff --git a/x-pack/plugins/security_solution/server/endpoint/routes/error_handler.ts b/x-pack/plugins/security_solution/server/endpoint/routes/error_handler.ts
index a14303e0004ee..ca8602e0969d1 100644
--- a/x-pack/plugins/security_solution/server/endpoint/routes/error_handler.ts
+++ b/x-pack/plugins/security_solution/server/endpoint/routes/error_handler.ts
@@ -8,7 +8,7 @@
import type { IKibanaResponse, KibanaResponseFactory, Logger } from '@kbn/core/server';
import { FleetFileNotFound } from '@kbn/fleet-plugin/server/errors';
import { CustomHttpRequestError } from '../../utils/custom_http_request_error';
-import { NotFoundError } from '../errors';
+import { EndpointAuthorizationError, NotFoundError } from '../errors';
import { EndpointHostUnEnrolledError, EndpointHostNotFoundError } from '../services/metadata';
/**
@@ -51,6 +51,10 @@ export const errorHandler = (
return res.notFound({ body: error });
}
+ if (error instanceof EndpointAuthorizationError) {
+ return res.forbidden({ body: error });
+ }
+
// Kibana CORE will take care of `500` errors when the handler `throw`'s, including logging the error
throw error;
};
diff --git a/x-pack/plugins/security_solution/server/endpoint/routes/with_endpoint_authz.test.ts b/x-pack/plugins/security_solution/server/endpoint/routes/with_endpoint_authz.test.ts
index 573b8dc9cbae5..d5cccedf7bb95 100644
--- a/x-pack/plugins/security_solution/server/endpoint/routes/with_endpoint_authz.test.ts
+++ b/x-pack/plugins/security_solution/server/endpoint/routes/with_endpoint_authz.test.ts
@@ -11,7 +11,7 @@ import { requestContextMock } from '../../lib/detection_engine/routes/__mocks__'
import type { EndpointApiNeededAuthz } from './with_endpoint_authz';
import { withEndpointAuthz } from './with_endpoint_authz';
import type { EndpointAuthz } from '../../../common/endpoint/types/authz';
-import { EndpointAuthorizationError } from '../errors';
+import { EndpointAuthorizationError, NotFoundError } from '../errors';
import { getEndpointAuthzInitialStateMock } from '../../../common/endpoint/service/authz/mocks';
describe('When using `withEndpointAuthz()`', () => {
@@ -105,4 +105,37 @@ describe('When using `withEndpointAuthz()`', () => {
body: expect.any(EndpointAuthorizationError),
});
});
+
+ it('should call additionalChecks callback if defined', async () => {
+ const additionalChecks = jest.fn();
+ const routeContextMock = coreMock.createCustomRequestHandlerContext(mockContext);
+ await withEndpointAuthz(
+ { any: ['canGetRunningProcesses'] },
+ logger,
+ mockRequestHandler,
+ additionalChecks
+ )(routeContextMock, mockRequest, mockResponse);
+
+ expect(additionalChecks).toHaveBeenCalledWith(routeContextMock, mockRequest);
+ expect(mockRequestHandler).toHaveBeenCalled();
+ });
+
+ it('should deny access if additionalChecks callback throws an error', async () => {
+ const error = new NotFoundError('something happen');
+ const additionalChecks = jest.fn(async () => {
+ throw error;
+ });
+ const routeContextMock = coreMock.createCustomRequestHandlerContext(mockContext);
+ await withEndpointAuthz(
+ { any: ['canGetRunningProcesses'] },
+ logger,
+ mockRequestHandler,
+ additionalChecks
+ )(routeContextMock, mockRequest, mockResponse);
+
+ expect(mockRequestHandler).not.toHaveBeenCalled();
+ expect(mockResponse.notFound).toHaveBeenCalledWith({
+ body: error,
+ });
+ });
});
diff --git a/x-pack/plugins/security_solution/server/endpoint/routes/with_endpoint_authz.ts b/x-pack/plugins/security_solution/server/endpoint/routes/with_endpoint_authz.ts
index a241148c7b714..e42064488aa59 100644
--- a/x-pack/plugins/security_solution/server/endpoint/routes/with_endpoint_authz.ts
+++ b/x-pack/plugins/security_solution/server/endpoint/routes/with_endpoint_authz.ts
@@ -5,7 +5,8 @@
* 2.0.
*/
-import type { RequestHandler, Logger } from '@kbn/core/server';
+import type { RequestHandler, KibanaRequest, Logger } from '@kbn/core/server';
+import { errorHandler } from './error_handler';
import { stringify } from '../utils/stringify';
import type { EndpointAuthzKeyList } from '../../../common/endpoint/types/authz';
import type { SecuritySolutionRequestHandlerContext } from '../../types';
@@ -29,11 +30,16 @@ export interface EndpointApiNeededAuthz {
* @param neededAuthz
* @param routeHandler
* @param logger
+ * @param additionalChecks
*/
export const withEndpointAuthz = (
neededAuthz: EndpointApiNeededAuthz,
logger: Logger,
- routeHandler: T
+ routeHandler: T,
+ additionalChecks?: (
+ context: SecuritySolutionRequestHandlerContext,
+ request: KibanaRequest
+ ) => void | Promise
): T => {
const needAll: EndpointAuthzKeyList = neededAuthz.all ?? [];
const needAny: EndpointAuthzKeyList = neededAuthz.any ?? [];
@@ -104,6 +110,16 @@ export const withEndpointAuthz = (
}
}
+ if (additionalChecks) {
+ try {
+ await additionalChecks(context, request);
+ } catch (err) {
+ logger.debug(() => stringify(err));
+
+ return errorHandler(logger, response, err);
+ }
+ }
+
// Authz is good call the route handler
return (routeHandler as unknown as RequestHandler)(context, request, response);
};
diff --git a/x-pack/plugins/stack_alerts/common/constants.ts b/x-pack/plugins/stack_alerts/common/constants.ts
index b8f15a1e3c1b1..37d428ac4bce6 100644
--- a/x-pack/plugins/stack_alerts/common/constants.ts
+++ b/x-pack/plugins/stack_alerts/common/constants.ts
@@ -7,6 +7,7 @@
export const MAX_SELECTABLE_GROUP_BY_TERMS = 4;
export const MAX_SELECTABLE_SOURCE_FIELDS = 5;
+export const MAX_HITS_FOR_GROUP_BY = 100;
const HOST_NAME = 'host.name';
const HOST_HOSTNAME = 'host.hostname';
diff --git a/x-pack/plugins/stack_alerts/public/rule_types/es_query/validation.test.ts b/x-pack/plugins/stack_alerts/public/rule_types/es_query/validation.test.ts
index 6b3668a023292..80aa0c7352b97 100644
--- a/x-pack/plugins/stack_alerts/public/rule_types/es_query/validation.test.ts
+++ b/x-pack/plugins/stack_alerts/public/rule_types/es_query/validation.test.ts
@@ -419,4 +419,25 @@ describe('expression params validation', () => {
'Cannot select more than 5 fields'
);
});
+
+ test('if groupBy is defined and size is greater than max allowed, should return proper errror message', () => {
+ const initialParams: EsQueryRuleParams = {
+ index: ['test'],
+ esQuery: `{\n \"query\":{\n \"match_all\" : {}\n }\n}`,
+ size: 101,
+ timeWindowSize: 1,
+ timeWindowUnit: 's',
+ threshold: [0],
+ timeField: '',
+ excludeHitsFromPreviousRun: true,
+ aggType: 'count',
+ groupBy: 'top',
+ termSize: 5,
+ termField: ['term'],
+ };
+ expect(validateExpression(initialParams).errors.size.length).toBeGreaterThan(0);
+ expect(validateExpression(initialParams).errors.size[0]).toBe(
+ 'Size cannot exceed 100 when using a group by field.'
+ );
+ });
});
diff --git a/x-pack/plugins/stack_alerts/public/rule_types/es_query/validation.ts b/x-pack/plugins/stack_alerts/public/rule_types/es_query/validation.ts
index c8119110d76a2..8d68d7711e765 100644
--- a/x-pack/plugins/stack_alerts/public/rule_types/es_query/validation.ts
+++ b/x-pack/plugins/stack_alerts/public/rule_types/es_query/validation.ts
@@ -19,6 +19,7 @@ import {
MAX_SELECTABLE_GROUP_BY_TERMS,
ES_QUERY_MAX_HITS_PER_EXECUTION_SERVERLESS,
ES_QUERY_MAX_HITS_PER_EXECUTION,
+ MAX_HITS_FOR_GROUP_BY,
} from '../../../common/constants';
import { EsQueryRuleParams, SearchType } from './types';
import { isEsqlQueryRule, isSearchSourceRule } from './util';
@@ -75,6 +76,21 @@ const validateCommonParams = (ruleParams: EsQueryRuleParams, isServerless?: bool
);
}
+ if (
+ groupBy &&
+ builtInGroupByTypes[groupBy] &&
+ builtInGroupByTypes[groupBy].sizeRequired &&
+ size &&
+ size > MAX_HITS_FOR_GROUP_BY
+ ) {
+ errors.size.push(
+ i18n.translate('xpack.stackAlerts.esQuery.ui.validation.error.sizeTooLargeForGroupByText', {
+ defaultMessage: 'Size cannot exceed {max} when using a group by field.',
+ values: { max: MAX_HITS_FOR_GROUP_BY },
+ })
+ );
+ }
+
if (
groupBy &&
builtInGroupByTypes[groupBy].validNormalizedTypes &&
diff --git a/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_es_query.test.ts b/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_es_query.test.ts
index 46b3bb2ff495f..6a97832c1ae57 100644
--- a/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_es_query.test.ts
+++ b/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_es_query.test.ts
@@ -377,4 +377,106 @@ describe('fetchEsQuery', () => {
{ meta: true }
);
});
+
+ it('should log if group by and top hits size is too large', async () => {
+ const params = {
+ ...defaultParams,
+ groupBy: 'top',
+ termField: 'host.name',
+ termSize: 10,
+ size: 200,
+ };
+ const date = new Date().toISOString();
+
+ await fetchEsQuery({
+ ruleId: 'abc',
+ name: 'test-rule',
+ params,
+ timestamp: undefined,
+ services,
+ spacePrefix: '',
+ publicBaseUrl: '',
+ dateStart: date,
+ dateEnd: date,
+ });
+ expect(logger.warn).toHaveBeenCalledWith(`Top hits size is capped at 100`);
+ expect(scopedClusterClientMock.asCurrentUser.search).toHaveBeenCalledWith(
+ {
+ allow_no_indices: true,
+ body: {
+ aggs: {
+ groupAgg: {
+ aggs: {
+ conditionSelector: {
+ bucket_selector: {
+ buckets_path: {
+ compareValue: '_count',
+ },
+ script: 'params.compareValue < 0L',
+ },
+ },
+ topHitsAgg: {
+ top_hits: {
+ size: 100,
+ },
+ },
+ },
+ terms: {
+ field: 'host.name',
+ size: 10,
+ },
+ },
+ groupAggCount: {
+ stats_bucket: {
+ buckets_path: 'groupAgg._count',
+ },
+ },
+ },
+ docvalue_fields: [
+ {
+ field: '@timestamp',
+ format: 'strict_date_optional_time',
+ },
+ ],
+ query: {
+ bool: {
+ filter: [
+ {
+ match_all: {},
+ },
+ {
+ bool: {
+ filter: [
+ {
+ range: {
+ '@timestamp': {
+ format: 'strict_date_optional_time',
+ gte: date,
+ lte: date,
+ },
+ },
+ },
+ ],
+ },
+ },
+ ],
+ },
+ },
+ sort: [
+ {
+ '@timestamp': {
+ format: 'strict_date_optional_time||epoch_millis',
+ order: 'desc',
+ },
+ },
+ ],
+ },
+ ignore_unavailable: true,
+ index: ['test-index'],
+ size: 0,
+ track_total_hits: true,
+ },
+ { meta: true }
+ );
+ });
});
diff --git a/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_es_query.ts b/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_es_query.ts
index 9a3aba68039b9..0a27a4e2c373a 100644
--- a/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_es_query.ts
+++ b/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_es_query.ts
@@ -120,6 +120,7 @@ export async function fetchEsQuery({
),
},
...(isGroupAgg ? { topHitsSize: params.size } : {}),
+ loggerCb: (message: string) => logger.warn(message),
}),
});
diff --git a/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_search_source_query.test.ts b/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_search_source_query.test.ts
index e64cd443fadf6..cdc053aac565b 100644
--- a/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_search_source_query.test.ts
+++ b/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_search_source_query.test.ts
@@ -7,6 +7,7 @@
import { OnlySearchSourceRuleParams } from '../types';
import { createSearchSourceMock } from '@kbn/data-plugin/common/search/search_source/mocks';
+import { loggerMock } from '@kbn/logging-mocks';
import {
updateSearchSource,
generateLink,
@@ -63,6 +64,8 @@ const defaultParams: OnlySearchSourceRuleParams = {
timeField: 'timeFieldNotFromDataView',
};
+const logger = loggerMock.create();
+
describe('fetchSearchSourceQuery', () => {
const dataViewMock = createDataView();
@@ -90,7 +93,8 @@ describe('fetchSearchSourceQuery', () => {
params,
undefined,
dateStart,
- dateEnd
+ dateEnd,
+ logger
);
const searchRequest = searchSource.getSearchRequestBody();
expect(filterToExcludeHitsFromPreviousRun).toBe(null);
@@ -130,7 +134,8 @@ describe('fetchSearchSourceQuery', () => {
params,
'2020-02-09T23:12:41.941Z',
dateStart,
- dateEnd
+ dateEnd,
+ logger
);
const searchRequest = searchSource.getSearchRequestBody();
expect(searchRequest.track_total_hits).toBe(true);
@@ -195,7 +200,8 @@ describe('fetchSearchSourceQuery', () => {
params,
'2020-01-09T22:12:41.941Z',
dateStart,
- dateEnd
+ dateEnd,
+ logger
);
const searchRequest = searchSource.getSearchRequestBody();
expect(filterToExcludeHitsFromPreviousRun).toBe(null);
@@ -235,7 +241,8 @@ describe('fetchSearchSourceQuery', () => {
params,
'2020-02-09T23:12:41.941Z',
dateStart,
- dateEnd
+ dateEnd,
+ logger
);
const searchRequest = searchSource.getSearchRequestBody();
expect(filterToExcludeHitsFromPreviousRun).toBe(null);
@@ -281,7 +288,8 @@ describe('fetchSearchSourceQuery', () => {
params,
'2020-02-09T23:12:41.941Z',
dateStart,
- dateEnd
+ dateEnd,
+ logger
);
const searchRequest = searchSource.getSearchRequestBody();
expect(searchRequest.track_total_hits).toBeUndefined();
@@ -337,6 +345,84 @@ describe('fetchSearchSourceQuery', () => {
}
`);
});
+
+ it('should log if group by and top hits size is too large', async () => {
+ const params = {
+ ...defaultParams,
+ excludeHitsFromPreviousRun: false,
+ groupBy: 'top',
+ termField: 'host.name',
+ termSize: 10,
+ size: 200,
+ };
+
+ const searchSourceInstance = createSearchSourceMock({ index: dataViewMock });
+
+ const { dateStart, dateEnd } = getTimeRange();
+ const { searchSource } = await updateSearchSource(
+ searchSourceInstance,
+ dataViewMock,
+ params,
+ '2020-02-09T23:12:41.941Z',
+ dateStart,
+ dateEnd,
+ logger
+ );
+ const searchRequest = searchSource.getSearchRequestBody();
+ expect(searchRequest.track_total_hits).toBeUndefined();
+ expect(searchRequest.size).toMatchInlineSnapshot(`0`);
+ expect(searchRequest.query).toMatchInlineSnapshot(`
+ Object {
+ "bool": Object {
+ "filter": Array [
+ Object {
+ "range": Object {
+ "time": Object {
+ "format": "strict_date_optional_time",
+ "gte": "2020-02-09T23:10:41.941Z",
+ "lte": "2020-02-09T23:15:41.941Z",
+ },
+ },
+ },
+ ],
+ "must": Array [],
+ "must_not": Array [],
+ "should": Array [],
+ },
+ }
+ `);
+ expect(searchRequest.aggs).toMatchInlineSnapshot(`
+ Object {
+ "groupAgg": Object {
+ "aggs": Object {
+ "conditionSelector": Object {
+ "bucket_selector": Object {
+ "buckets_path": Object {
+ "compareValue": "_count",
+ },
+ "script": "params.compareValue < 0L",
+ },
+ },
+ "topHitsAgg": Object {
+ "top_hits": Object {
+ "size": 100,
+ },
+ },
+ },
+ "terms": Object {
+ "field": "host.name",
+ "size": 10,
+ },
+ },
+ "groupAggCount": Object {
+ "stats_bucket": Object {
+ "buckets_path": "groupAgg._count",
+ },
+ },
+ }
+ `);
+ expect(logger.warn).toHaveBeenCalledWith('Top hits size is capped at 100');
+ });
});
describe('generateLink', () => {
@@ -352,7 +438,8 @@ describe('fetchSearchSourceQuery', () => {
params,
'2020-02-09T23:12:41.941Z',
dateStart,
- dateEnd
+ dateEnd,
+ logger
);
expect(filterToExcludeHitsFromPreviousRun).toMatchInlineSnapshot(`
diff --git a/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_search_source_query.ts b/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_search_source_query.ts
index bc281b3a08f0d..6290186653426 100644
--- a/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_search_source_query.ts
+++ b/x-pack/plugins/stack_alerts/server/rule_types/es_query/lib/fetch_search_source_query.ts
@@ -66,6 +66,7 @@ export async function fetchSearchSourceQuery({
latestTimestamp,
dateStart,
dateEnd,
+ logger,
alertLimit
);
@@ -108,6 +109,7 @@ export async function updateSearchSource(
latestTimestamp: string | undefined,
dateStart: string,
dateEnd: string,
+ logger: Logger,
alertLimit?: number
): Promise<{ searchSource: ISearchSource; filterToExcludeHitsFromPreviousRun: Filter | null }> {
const isGroupAgg = isGroupAggregation(params.termField);
@@ -171,6 +173,7 @@ export async function updateSearchSource(
),
},
...(isGroupAgg ? { topHitsSize: params.size } : {}),
+ loggerCb: (message: string) => logger.warn(message),
})
);
return {
diff --git a/x-pack/plugins/stack_connectors/common/thehive/constants.ts b/x-pack/plugins/stack_connectors/common/thehive/constants.ts
new file mode 100644
index 0000000000000..62afe84b801f4
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/common/thehive/constants.ts
@@ -0,0 +1,34 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { i18n } from '@kbn/i18n';
+
+export const THEHIVE_TITLE = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.connectorTypeTitle',
+ {
+ defaultMessage: 'TheHive',
+ }
+);
+export const THEHIVE_CONNECTOR_ID = '.thehive';
+
+export enum SUB_ACTION {
+ PUSH_TO_SERVICE = 'pushToService',
+ CREATE_ALERT = 'createAlert',
+}
+export enum TheHiveSeverity {
+ LOW = 1,
+ MEDIUM = 2,
+ HIGH = 3,
+ CRITICAL = 4,
+}
+export enum TheHiveTLP {
+ CLEAR = 0,
+ GREEN = 1,
+ AMBER = 2,
+ AMBER_STRICT = 3,
+ RED = 4,
+}
diff --git a/x-pack/plugins/stack_connectors/common/thehive/schema.ts b/x-pack/plugins/stack_connectors/common/thehive/schema.ts
new file mode 100644
index 0000000000000..e880ca900591a
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/common/thehive/schema.ts
@@ -0,0 +1,186 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { schema } from '@kbn/config-schema';
+import { TheHiveSeverity, TheHiveTLP, SUB_ACTION } from './constants';
+
+export const TheHiveConfigSchema = schema.object({
+ url: schema.string(),
+ organisation: schema.nullable(schema.string()),
+});
+
+export const TheHiveSecretsSchema = schema.object({
+ apiKey: schema.string(),
+});
+
+export const ExecutorSubActionPushParamsSchema = schema.object({
+ incident: schema.object({
+ title: schema.string(),
+ description: schema.string(),
+ externalId: schema.nullable(schema.string()),
+ severity: schema.nullable(schema.number({ defaultValue: TheHiveSeverity.MEDIUM })),
+ tlp: schema.nullable(schema.number({ defaultValue: TheHiveTLP.AMBER })),
+ tags: schema.nullable(schema.arrayOf(schema.string())),
+ }),
+ comments: schema.nullable(
+ schema.arrayOf(
+ schema.object({
+ comment: schema.string(),
+ commentId: schema.string(),
+ })
+ )
+ ),
+});
+
+export const PushToServiceIncidentSchema = {
+ title: schema.string(),
+ description: schema.string(),
+ severity: schema.nullable(schema.number()),
+ tlp: schema.nullable(schema.number()),
+ tags: schema.nullable(schema.arrayOf(schema.string())),
+};
+
+export const ExecutorSubActionGetIncidentParamsSchema = schema.object({
+ externalId: schema.string(),
+});
+
+export const ExecutorSubActionCreateAlertParamsSchema = schema.object({
+ title: schema.string(),
+ description: schema.string(),
+ type: schema.string(),
+ source: schema.string(),
+ sourceRef: schema.string(),
+ severity: schema.nullable(schema.number({ defaultValue: TheHiveSeverity.MEDIUM })),
+ tlp: schema.nullable(schema.number({ defaultValue: TheHiveTLP.AMBER })),
+ tags: schema.nullable(schema.arrayOf(schema.string())),
+});
+
+export const ExecutorParamsSchema = schema.oneOf([
+ schema.object({
+ subAction: schema.literal(SUB_ACTION.PUSH_TO_SERVICE),
+ subActionParams: ExecutorSubActionPushParamsSchema,
+ }),
+ schema.object({
+ subAction: schema.literal(SUB_ACTION.CREATE_ALERT),
+ subActionParams: ExecutorSubActionCreateAlertParamsSchema,
+ }),
+]);
+
+export const TheHiveIncidentResponseSchema = schema.object(
+ {
+ _id: schema.string(),
+ _type: schema.string(),
+ _createdBy: schema.string(),
+ _updatedBy: schema.nullable(schema.string()),
+ _createdAt: schema.number(),
+ _updatedAt: schema.nullable(schema.number()),
+ number: schema.number(),
+ title: schema.string(),
+ description: schema.string(),
+ severity: schema.number(),
+ severityLabel: schema.string(),
+ startDate: schema.number(),
+ endDate: schema.nullable(schema.number()),
+ tags: schema.nullable(schema.arrayOf(schema.string())),
+ flag: schema.boolean(),
+ tlp: schema.number(),
+ tlpLabel: schema.string(),
+ pap: schema.number(),
+ papLabel: schema.string(),
+ status: schema.string(),
+ stage: schema.string(),
+ summary: schema.nullable(schema.string()),
+ impactStatus: schema.nullable(schema.string()),
+ assignee: schema.nullable(schema.string()),
+ customFields: schema.nullable(schema.arrayOf(schema.recordOf(schema.string(), schema.any()))),
+ userPermissions: schema.nullable(schema.arrayOf(schema.string())),
+ extraData: schema.object({}, { unknowns: 'allow' }),
+ newDate: schema.number(),
+ inProgressDate: schema.nullable(schema.number()),
+ closedDate: schema.nullable(schema.number()),
+ alertDate: schema.nullable(schema.number()),
+ alertNewDate: schema.nullable(schema.number()),
+ alertInProgressDate: schema.nullable(schema.number()),
+ alertImportedDate: schema.nullable(schema.number()),
+ timeToDetect: schema.number(),
+ timeToTriage: schema.nullable(schema.number()),
+ timeToQualify: schema.nullable(schema.number()),
+ timeToAcknowledge: schema.nullable(schema.number()),
+ timeToResolve: schema.nullable(schema.number()),
+ handlingDuration: schema.nullable(schema.number()),
+ },
+ { unknowns: 'ignore' }
+);
+
+export const TheHiveUpdateIncidentResponseSchema = schema.any();
+
+export const TheHiveAddCommentResponseSchema = schema.object(
+ {
+ _id: schema.string(),
+ _type: schema.string(),
+ createdBy: schema.string(),
+ createdAt: schema.number(),
+ updatedAt: schema.nullable(schema.number()),
+ updatedBy: schema.nullable(schema.string()),
+ message: schema.string(),
+ isEdited: schema.boolean(),
+ extraData: schema.object({}, { unknowns: 'allow' }),
+ },
+ { unknowns: 'ignore' }
+);
+
+export const TheHiveCreateAlertResponseSchema = schema.object(
+ {
+ _id: schema.string(),
+ _type: schema.string(),
+ _createdBy: schema.string(),
+ _updatedBy: schema.nullable(schema.string()),
+ _createdAt: schema.number(),
+ _updatedAt: schema.nullable(schema.number()),
+ type: schema.string(),
+ source: schema.string(),
+ sourceRef: schema.string(),
+ externalLink: schema.nullable(schema.string()),
+ title: schema.string(),
+ description: schema.string(),
+ severity: schema.number(),
+ severityLabel: schema.string(),
+ date: schema.number(),
+ tags: schema.nullable(schema.arrayOf(schema.string())),
+ tlp: schema.number(),
+ tlpLabel: schema.string(),
+ pap: schema.number(),
+ papLabel: schema.string(),
+ follow: schema.nullable(schema.boolean()),
+ customFields: schema.nullable(schema.arrayOf(schema.object({}, { unknowns: 'allow' }))),
+ caseTemplate: schema.nullable(schema.string()),
+ observableCount: schema.number(),
+ caseId: schema.nullable(schema.string()),
+ status: schema.string(),
+ stage: schema.string(),
+ assignee: schema.nullable(schema.string()),
+ summary: schema.nullable(schema.string()),
+ extraData: schema.object({}, { unknowns: 'allow' }),
+ newDate: schema.number(),
+ inProgressDate: schema.nullable(schema.number()),
+ closedDate: schema.nullable(schema.number()),
+ importedDate: schema.nullable(schema.number()),
+ timeToDetect: schema.number(),
+ timeToTriage: schema.nullable(schema.number()),
+ timeToQualify: schema.nullable(schema.number()),
+ timeToAcknowledge: schema.nullable(schema.number()),
+ },
+ { unknowns: 'ignore' }
+);
+
+export const TheHiveFailureResponseSchema = schema.object(
+ {
+ type: schema.number(),
+ message: schema.string(),
+ },
+ { unknowns: 'ignore' }
+);
diff --git a/x-pack/plugins/stack_connectors/common/thehive/types.ts b/x-pack/plugins/stack_connectors/common/thehive/types.ts
new file mode 100644
index 0000000000000..b67820ac77e5e
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/common/thehive/types.ts
@@ -0,0 +1,39 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { TypeOf } from '@kbn/config-schema';
+import {
+ TheHiveConfigSchema,
+ TheHiveSecretsSchema,
+ ExecutorParamsSchema,
+ ExecutorSubActionPushParamsSchema,
+ ExecutorSubActionCreateAlertParamsSchema,
+ TheHiveFailureResponseSchema,
+ TheHiveIncidentResponseSchema,
+} from './schema';
+
+export type TheHiveConfig = TypeOf;
+export type TheHiveSecrets = TypeOf;
+
+export type ExecutorParams = TypeOf;
+export type ExecutorSubActionPushParams = TypeOf;
+export type ExecutorSubActionCreateAlertParams = TypeOf<
+ typeof ExecutorSubActionCreateAlertParamsSchema
+>;
+
+export type TheHiveFailureResponse = TypeOf;
+
+export interface ExternalServiceIncidentResponse {
+ id: string;
+ title: string;
+ url: string;
+ pushedDate: string;
+}
+
+export type Incident = Omit;
+
+export type GetIncidentResponse = TypeOf;
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/index.ts b/x-pack/plugins/stack_connectors/public/connector_types/index.ts
index 893b756338dcb..dd1c5e5c63a2a 100644
--- a/x-pack/plugins/stack_connectors/public/connector_types/index.ts
+++ b/x-pack/plugins/stack_connectors/public/connector_types/index.ts
@@ -32,6 +32,7 @@ import { getXmattersConnectorType } from './xmatters';
import { getD3SecurityConnectorType } from './d3security';
import { ExperimentalFeaturesService } from '../common/experimental_features_service';
import { getSentinelOneConnectorType } from './sentinelone';
+import { getTheHiveConnectorType } from './thehive';
import { getCrowdStrikeConnectorType } from './crowdstrike';
export interface RegistrationServices {
@@ -71,6 +72,7 @@ export function registerConnectorTypes({
connectorTypeRegistry.register(getTorqConnectorType());
connectorTypeRegistry.register(getTinesConnectorType());
connectorTypeRegistry.register(getD3SecurityConnectorType());
+ connectorTypeRegistry.register(getTheHiveConnectorType());
if (ExperimentalFeaturesService.get().sentinelOneConnectorOn) {
connectorTypeRegistry.register(getSentinelOneConnectorType());
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/connector.test.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/connector.test.tsx
new file mode 100644
index 0000000000000..7b61456b093d7
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/connector.test.tsx
@@ -0,0 +1,113 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React from 'react';
+import TheHiveConnectorFields from './connector';
+import { ConnectorFormTestProvider } from '../lib/test_utils';
+import { act, render, waitFor } from '@testing-library/react';
+import userEvent from '@testing-library/user-event';
+
+jest.mock('@kbn/triggers-actions-ui-plugin/public/common/lib/kibana');
+
+describe('TheHiveActionConnectorFields renders', () => {
+ const actionConnector = {
+ actionTypeId: '.thehive',
+ name: 'thehive',
+ config: {
+ url: 'https://test.com',
+ },
+ secrets: {
+ apiKey: 'apiKey',
+ },
+ isDeprecated: false,
+ };
+
+ it('TheHive connector fields are rendered', () => {
+ const { getByTestId } = render(
+
+ {}}
+ />
+
+ );
+
+ expect(getByTestId('config.url-input')).toBeInTheDocument();
+ expect(getByTestId('secrets.apiKey-input')).toBeInTheDocument();
+ });
+
+ describe('Validation', () => {
+ const onSubmit = jest.fn();
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ const tests: Array<[string, string]> = [
+ ['config.url-input', 'not-valid'],
+ ['secrets.apiKey-input', ''],
+ ];
+
+ it('connector validation succeeds when connector config is valid', async () => {
+ const { getByTestId } = render(
+
+ {}}
+ />
+
+ );
+
+ await act(async () => {
+ userEvent.click(getByTestId('form-test-provide-submit'));
+ });
+
+ waitFor(() => {
+ expect(onSubmit).toBeCalledWith({
+ data: {
+ actionTypeId: '.thehive',
+ name: 'thehive',
+ config: {
+ url: 'https://test.com',
+ },
+ secrets: {
+ apiKey: 'apiKey',
+ },
+ isDeprecated: false,
+ },
+ isValid: true,
+ });
+ });
+ });
+
+ it.each(tests)('validates correctly %p', async (field, value) => {
+ const res = render(
+
+ {}}
+ />
+
+ );
+
+ await act(async () => {
+ await userEvent.type(res.getByTestId(field), `{selectall}{backspace}${value}`, {
+ delay: 10,
+ });
+ });
+
+ await act(async () => {
+ userEvent.click(res.getByTestId('form-test-provide-submit'));
+ });
+
+ expect(onSubmit).toHaveBeenCalledWith({ data: {}, isValid: false });
+ });
+ });
+});
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/connector.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/connector.tsx
new file mode 100644
index 0000000000000..01ab1803c00ea
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/connector.tsx
@@ -0,0 +1,51 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React from 'react';
+import { ActionConnectorFieldsProps } from '@kbn/triggers-actions-ui-plugin/public';
+import {
+ ConfigFieldSchema,
+ SimpleConnectorForm,
+ SecretsFieldSchema,
+} from '@kbn/triggers-actions-ui-plugin/public';
+
+import {
+ URL_LABEL,
+ API_KEY_LABEL,
+ ORGANISATION_LABEL,
+ ORGANISATION_HELP_TEXT,
+} from './translations';
+
+const configFormSchema: ConfigFieldSchema[] = [
+ {
+ id: 'organisation',
+ label: ORGANISATION_LABEL,
+ isRequired: false,
+ helpText: ORGANISATION_HELP_TEXT,
+ },
+ { id: 'url', label: URL_LABEL, isUrlField: true },
+];
+
+const secretsFormSchema: SecretsFieldSchema[] = [
+ { id: 'apiKey', label: API_KEY_LABEL, isPasswordField: true },
+];
+
+const TheHiveConnectorFields: React.FC = ({ readOnly, isEdit }) => {
+ return (
+ <>
+
+ >
+ );
+};
+
+// eslint-disable-next-line import/no-default-export
+export { TheHiveConnectorFields as default };
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/constants.ts b/x-pack/plugins/stack_connectors/public/connector_types/thehive/constants.ts
new file mode 100644
index 0000000000000..b94fd5e4ad4be
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/constants.ts
@@ -0,0 +1,114 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { i18n } from '@kbn/i18n';
+import { TheHiveSeverity, TheHiveTLP, SUB_ACTION } from '../../../common/thehive/constants';
+
+export const eventActionOptions = [
+ {
+ value: SUB_ACTION.PUSH_TO_SERVICE,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectCreateCaseOptionLabel',
+ {
+ defaultMessage: 'Create Case',
+ }
+ ),
+ },
+ {
+ value: SUB_ACTION.CREATE_ALERT,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectCreateAlertOptionLabel',
+ {
+ defaultMessage: 'Create Alert',
+ }
+ ),
+ },
+];
+
+export const severityOptions = [
+ {
+ value: TheHiveSeverity.LOW,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectSeverityLowOptionLabel',
+ {
+ defaultMessage: 'LOW',
+ }
+ ),
+ },
+ {
+ value: TheHiveSeverity.MEDIUM,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectSeverityMediumOptionLabel',
+ {
+ defaultMessage: 'MEDIUM',
+ }
+ ),
+ },
+ {
+ value: TheHiveSeverity.HIGH,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectSeverityHighOptionLabel',
+ {
+ defaultMessage: 'HIGH',
+ }
+ ),
+ },
+ {
+ value: TheHiveSeverity.CRITICAL,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectSeverityCriticalOptionLabel',
+ {
+ defaultMessage: 'CRITICAL',
+ }
+ ),
+ },
+];
+
+export const tlpOptions = [
+ {
+ value: TheHiveTLP.CLEAR,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectTlpClearOptionLabel',
+ {
+ defaultMessage: 'CLEAR',
+ }
+ ),
+ },
+ {
+ value: TheHiveTLP.GREEN,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectTlpGreenOptionLabel',
+ {
+ defaultMessage: 'GREEN',
+ }
+ ),
+ },
+ {
+ value: TheHiveTLP.AMBER,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectTlpAmberOptionLabel',
+ {
+ defaultMessage: 'AMBER',
+ }
+ ),
+ },
+ {
+ value: TheHiveTLP.AMBER_STRICT,
+ text: i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventSelectTlpAmberStrictOptionLabel',
+ {
+ defaultMessage: 'AMBER+STRICT',
+ }
+ ),
+ },
+ {
+ value: TheHiveTLP.RED,
+ text: i18n.translate('xpack.stackConnectors.components.thehive.eventSelectTlpRedOptionLabel', {
+ defaultMessage: 'RED',
+ }),
+ },
+];
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/index.ts b/x-pack/plugins/stack_connectors/public/connector_types/thehive/index.ts
new file mode 100644
index 0000000000000..9a98fce201e58
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/index.ts
@@ -0,0 +1,8 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+export { getConnectorType as getTheHiveConnectorType } from './thehive';
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/logo.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/logo.tsx
new file mode 100644
index 0000000000000..45025e1f24041
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/logo.tsx
@@ -0,0 +1,40 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React from 'react';
+import { LogoProps } from '../types';
+
+const Logo = (props: LogoProps) => (
+
+
+
+
+
+
+);
+
+// eslint-disable-next-line import/no-default-export
+export { Logo as default };
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/params.test.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params.test.tsx
new file mode 100644
index 0000000000000..d69080938fc26
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params.test.tsx
@@ -0,0 +1,105 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React from 'react';
+import { fireEvent, render } from '@testing-library/react';
+import { ActionConnector } from '@kbn/triggers-actions-ui-plugin/public/types';
+import TheHiveParamsFields from './params';
+import { SUB_ACTION } from '../../../common/thehive/constants';
+import { ExecutorParams, ExecutorSubActionPushParams } from '../../../common/thehive/types';
+
+describe('TheHiveParamsFields renders', () => {
+ const subActionParams: ExecutorSubActionPushParams = {
+ incident: {
+ title: 'title {test}',
+ description: 'test description',
+ tlp: 2,
+ severity: 2,
+ tags: ['test1'],
+ externalId: null,
+ },
+ comments: [],
+ };
+ const actionParams: ExecutorParams = {
+ subAction: SUB_ACTION.PUSH_TO_SERVICE,
+ subActionParams,
+ };
+ const connector: ActionConnector = {
+ secrets: {},
+ config: {},
+ id: 'test',
+ actionTypeId: '.test',
+ name: 'Test',
+ isPreconfigured: false,
+ isDeprecated: false,
+ isSystemAction: false as const,
+ };
+
+ const editAction = jest.fn();
+ const defaultProps = {
+ actionConnector: connector,
+ actionParams,
+ editAction,
+ errors: { 'subActionParams.incident.title': [] },
+ index: 0,
+ messageVariables: [],
+ };
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ it('all Params fields is rendered', () => {
+ const { getByTestId } = render( );
+
+ expect(getByTestId('eventActionSelect')).toBeInTheDocument();
+ expect(getByTestId('eventActionSelect')).toHaveValue(SUB_ACTION.PUSH_TO_SERVICE);
+ });
+
+ it('calls editAction function with the correct arguments', () => {
+ const { getByTestId } = render( );
+ const eventActionEl = getByTestId('eventActionSelect');
+
+ fireEvent.change(eventActionEl, { target: { value: SUB_ACTION.CREATE_ALERT } });
+ expect(editAction).toHaveBeenCalledWith(
+ 'subActionParams',
+ {
+ tlp: 2,
+ severity: 2,
+ tags: [],
+ sourceRef: '{{alert.uuid}}',
+ },
+ 0
+ );
+
+ fireEvent.change(eventActionEl, { target: { value: SUB_ACTION.PUSH_TO_SERVICE } });
+ expect(editAction).toHaveBeenCalledWith(
+ 'subActionParams',
+ {
+ incident: {
+ tlp: 2,
+ severity: 2,
+ tags: [],
+ },
+ comments: [],
+ },
+ 0
+ );
+ });
+
+ it('handles the case when subAction is undefined', () => {
+ const newProps = {
+ ...defaultProps,
+ actionParams: {
+ ...actionParams,
+ subAction: undefined,
+ },
+ };
+ render( );
+ expect(editAction).toHaveBeenCalledWith('subAction', SUB_ACTION.PUSH_TO_SERVICE, 0);
+ });
+});
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/params.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params.tsx
new file mode 100644
index 0000000000000..f0221ce7a460b
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params.tsx
@@ -0,0 +1,133 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React, { useState, useEffect, useRef, useMemo } from 'react';
+import { ActionParamsProps, ActionConnectorMode } from '@kbn/triggers-actions-ui-plugin/public';
+import { EuiFormRow, EuiSelect } from '@elastic/eui';
+import { eventActionOptions } from './constants';
+import { SUB_ACTION } from '../../../common/thehive/constants';
+import { ExecutorParams } from '../../../common/thehive/types';
+import { TheHiveParamsAlertFields } from './params_alert';
+import { TheHiveParamsCaseFields } from './params_case';
+import * as translations from './translations';
+
+const TheHiveParamsFields: React.FunctionComponent> = ({
+ actionConnector,
+ actionParams,
+ editAction,
+ index,
+ errors,
+ messageVariables,
+ executionMode,
+}) => {
+ const [eventAction, setEventAction] = useState(
+ actionParams.subAction ?? SUB_ACTION.PUSH_TO_SERVICE
+ );
+ const actionConnectorRef = useRef(actionConnector?.id ?? '');
+ const isTest = useMemo(() => executionMode === ActionConnectorMode.Test, [executionMode]);
+
+ useEffect(() => {
+ if (actionConnector != null && actionConnectorRef.current !== actionConnector.id) {
+ actionConnectorRef.current = actionConnector.id;
+ editAction(
+ 'subActionParams',
+ {
+ incident: {
+ tlp: 2,
+ severity: 2,
+ tags: [],
+ },
+ comments: [],
+ },
+ index
+ );
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [actionConnector]);
+
+ useEffect(() => {
+ if (!actionParams.subAction) {
+ editAction('subAction', SUB_ACTION.PUSH_TO_SERVICE, index);
+ }
+ if (!actionParams.subActionParams) {
+ editAction(
+ 'subActionParams',
+ {
+ incident: {
+ tlp: 2,
+ severity: 2,
+ tags: [],
+ },
+ comments: [],
+ },
+ index
+ );
+ }
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [actionParams]);
+
+ useEffect(() => {
+ editAction('subAction', eventAction, index);
+ // eslint-disable-next-line react-hooks/exhaustive-deps
+ }, [eventAction]);
+
+ const setEventActionType = (eventActionType: SUB_ACTION) => {
+ const subActionParams =
+ eventActionType === SUB_ACTION.CREATE_ALERT
+ ? {
+ tlp: 2,
+ severity: 2,
+ tags: [],
+ sourceRef: isTest ? undefined : '{{alert.uuid}}',
+ }
+ : {
+ incident: {
+ tlp: 2,
+ severity: 2,
+ tags: [],
+ },
+ comments: [],
+ };
+
+ setEventAction(eventActionType);
+ editAction('subActionParams', subActionParams, index);
+ };
+
+ return (
+ <>
+
+ setEventActionType(e.target.value as SUB_ACTION)}
+ />
+
+ {eventAction === SUB_ACTION.PUSH_TO_SERVICE ? (
+
+ ) : (
+
+ )}
+ >
+ );
+};
+
+// eslint-disable-next-line import/no-default-export
+export { TheHiveParamsFields as default };
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_alert.test.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_alert.test.tsx
new file mode 100644
index 0000000000000..138595bd52690
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_alert.test.tsx
@@ -0,0 +1,70 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React from 'react';
+import { render } from '@testing-library/react';
+import { ActionConnector } from '@kbn/triggers-actions-ui-plugin/public/types';
+import { TheHiveParamsAlertFields } from './params_alert';
+import { SUB_ACTION } from '../../../common/thehive/constants';
+import { ExecutorParams, ExecutorSubActionCreateAlertParams } from '../../../common/thehive/types';
+
+describe('TheHiveParamsFields renders', () => {
+ const subActionParams: ExecutorSubActionCreateAlertParams = {
+ title: 'title {test}',
+ description: 'description test',
+ tlp: 2,
+ severity: 2,
+ tags: ['test1'],
+ source: 'source test',
+ type: 'sourceType test',
+ sourceRef: 'sourceRef test',
+ };
+ const actionParams: ExecutorParams = {
+ subAction: SUB_ACTION.CREATE_ALERT,
+ subActionParams,
+ };
+ const connector: ActionConnector = {
+ secrets: {},
+ config: {},
+ id: 'test',
+ actionTypeId: '.test',
+ name: 'Test',
+ isPreconfigured: false,
+ isDeprecated: false,
+ isSystemAction: false as const,
+ };
+
+ const editAction = jest.fn();
+ const defaultProps = {
+ actionConnector: connector,
+ actionParams,
+ editAction,
+ errors: { 'subActionParams.incident.title': [] },
+ index: 0,
+ messageVariables: [],
+ };
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ it('all Params fields is rendered', () => {
+ const { getByTestId } = render( );
+
+ expect(getByTestId('titleInput')).toBeInTheDocument();
+ expect(getByTestId('descriptionTextArea')).toBeInTheDocument();
+ expect(getByTestId('tagsInput')).toBeInTheDocument();
+ expect(getByTestId('severitySelectInput')).toBeInTheDocument();
+ expect(getByTestId('tlpSelectInput')).toBeInTheDocument();
+ expect(getByTestId('typeInput')).toBeInTheDocument();
+ expect(getByTestId('sourceInput')).toBeInTheDocument();
+ expect(getByTestId('sourceRefInput')).toBeInTheDocument();
+
+ expect(getByTestId('severitySelectInput')).toHaveValue('2');
+ expect(getByTestId('tlpSelectInput')).toHaveValue('2');
+ });
+});
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_alert.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_alert.tsx
new file mode 100644
index 0000000000000..868298ef98a7c
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_alert.tsx
@@ -0,0 +1,192 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React, { useState, useMemo } from 'react';
+import {
+ TextFieldWithMessageVariables,
+ TextAreaWithMessageVariables,
+ ActionParamsProps,
+} from '@kbn/triggers-actions-ui-plugin/public';
+import { EuiFormRow, EuiSelect, EuiComboBox } from '@elastic/eui';
+import { ExecutorParams, ExecutorSubActionCreateAlertParams } from '../../../common/thehive/types';
+import { severityOptions, tlpOptions } from './constants';
+import * as translations from './translations';
+
+export const TheHiveParamsAlertFields: React.FC> = ({
+ actionParams,
+ editAction,
+ index,
+ errors,
+ messageVariables,
+}) => {
+ const alert = useMemo(
+ () =>
+ (actionParams.subActionParams as ExecutorSubActionCreateAlertParams) ??
+ ({
+ tlp: 2,
+ severity: 2,
+ tags: [],
+ } as unknown as ExecutorSubActionCreateAlertParams),
+ [actionParams.subActionParams]
+ );
+
+ const [severity, setSeverity] = useState(alert.severity ?? severityOptions[1].value);
+ const [tlp, setTlp] = useState(alert.tlp ?? tlpOptions[2].value);
+ const [selectedOptions, setSelected] = useState>(
+ alert.tags?.map((tag) => ({ label: tag })) ?? []
+ );
+
+ const onCreateOption = (searchValue: string) => {
+ setSelected([...selectedOptions, { label: searchValue }]);
+ editAction('subActionParams', { ...alert, tags: [...(alert.tags ?? []), searchValue] }, index);
+ };
+
+ const onChange = (selectedOptionList: Array<{ label: string }>) => {
+ setSelected(selectedOptionList);
+ editAction(
+ 'subActionParams',
+ { ...alert, tags: selectedOptionList.map((option) => option.label) },
+ index
+ );
+ };
+
+ return (
+ <>
+ {
+ editAction('subActionParams', { ...alert, [key]: value }, index);
+ }}
+ messageVariables={messageVariables}
+ paramsProperty={'title'}
+ inputTargetValue={alert.title ?? undefined}
+ wrapField={true}
+ formRowProps={{
+ label: translations.TITLE_LABEL,
+ fullWidth: true,
+ helpText: '',
+ isInvalid:
+ errors['createAlertParam.title'] !== undefined &&
+ Number(errors['createAlertParam.title'].length) > 0 &&
+ alert.title !== undefined,
+ error: errors['createAlertParam.title'] as string,
+ }}
+ errors={errors['createAlertParam.title'] as string[]}
+ />
+ {
+ editAction('subActionParams', { ...alert, [key]: value }, index);
+ }}
+ messageVariables={messageVariables}
+ paramsProperty={'description'}
+ inputTargetValue={alert.description ?? undefined}
+ errors={errors['createAlertParam.description'] as string[]}
+ />
+ {
+ editAction('subActionParams', { ...alert, [key]: value }, index);
+ }}
+ paramsProperty={'type'}
+ inputTargetValue={alert.type ?? undefined}
+ wrapField={true}
+ formRowProps={{
+ label: translations.TYPE_LABEL,
+ fullWidth: true,
+ helpText: '',
+ isInvalid:
+ errors['createAlertParam.type'] !== undefined &&
+ Number(errors['createAlertParam.type'].length) > 0 &&
+ alert.type !== undefined,
+ error: errors['createAlertParam.type'] as string,
+ }}
+ errors={errors['createAlertParam.type'] as string[]}
+ />
+ {
+ editAction('subActionParams', { ...alert, [key]: value }, index);
+ }}
+ paramsProperty={'source'}
+ inputTargetValue={alert.source ?? undefined}
+ wrapField={true}
+ formRowProps={{
+ label: translations.SOURCE_LABEL,
+ fullWidth: true,
+ helpText: '',
+ isInvalid:
+ errors['createAlertParam.source'] !== undefined &&
+ Number(errors['createAlertParam.source'].length) > 0 &&
+ alert.source !== undefined,
+ error: errors['createAlertParam.source'] as string,
+ }}
+ errors={errors['createAlertParam.source'] as string[]}
+ />
+ {
+ editAction('subActionParams', { ...alert, [key]: value }, index);
+ }}
+ messageVariables={messageVariables}
+ paramsProperty={'sourceRef'}
+ inputTargetValue={alert.sourceRef ?? undefined}
+ wrapField={true}
+ formRowProps={{
+ label: translations.SOURCE_REF_LABEL,
+ fullWidth: true,
+ helpText: '',
+ isInvalid:
+ errors['createAlertParam.sourceRef'] !== undefined &&
+ Number(errors['createAlertParam.sourceRef'].length) > 0 &&
+ alert.sourceRef !== undefined,
+ error: errors['createAlertParam.sourceRef'] as string,
+ }}
+ errors={errors['createAlertParam.sourceRef'] as string[]}
+ />
+
+ {
+ editAction(
+ 'subActionParams',
+ { ...alert, severity: parseInt(e.target.value, 10) },
+ index
+ );
+ setSeverity(parseInt(e.target.value, 10));
+ }}
+ />
+
+
+ {
+ editAction('subActionParams', { ...alert, tlp: parseInt(e.target.value, 10) }, index);
+ setTlp(parseInt(e.target.value, 10));
+ }}
+ />
+
+
+
+
+ >
+ );
+};
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_case.test.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_case.test.tsx
new file mode 100644
index 0000000000000..f76d9fe8aece0
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_case.test.tsx
@@ -0,0 +1,69 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React from 'react';
+import { render } from '@testing-library/react';
+import { ActionConnector } from '@kbn/triggers-actions-ui-plugin/public/types';
+import TheHiveParamsFields from './params';
+import { SUB_ACTION } from '../../../common/thehive/constants';
+import { ExecutorParams, ExecutorSubActionPushParams } from '../../../common/thehive/types';
+
+describe('TheHiveParamsFields renders', () => {
+ const subActionParams: ExecutorSubActionPushParams = {
+ incident: {
+ title: 'title {test}',
+ description: 'test description',
+ tlp: 2,
+ severity: 2,
+ tags: ['test1'],
+ externalId: null,
+ },
+ comments: [],
+ };
+ const actionParams: ExecutorParams = {
+ subAction: SUB_ACTION.PUSH_TO_SERVICE,
+ subActionParams,
+ };
+ const connector: ActionConnector = {
+ secrets: {},
+ config: {},
+ id: 'test',
+ actionTypeId: '.test',
+ name: 'Test',
+ isPreconfigured: false,
+ isDeprecated: false,
+ isSystemAction: false as const,
+ };
+
+ const editAction = jest.fn();
+ const defaultProps = {
+ actionConnector: connector,
+ actionParams,
+ editAction,
+ errors: { 'subActionParams.incident.title': [] },
+ index: 0,
+ messageVariables: [],
+ };
+
+ beforeEach(() => {
+ jest.clearAllMocks();
+ });
+
+ it('all Params fields is rendered', () => {
+ const { getByTestId } = render( );
+
+ expect(getByTestId('titleInput')).toBeInTheDocument();
+ expect(getByTestId('descriptionTextArea')).toBeInTheDocument();
+ expect(getByTestId('tagsInput')).toBeInTheDocument();
+ expect(getByTestId('severitySelectInput')).toBeInTheDocument();
+ expect(getByTestId('tlpSelectInput')).toBeInTheDocument();
+ expect(getByTestId('commentsTextArea')).toBeInTheDocument();
+
+ expect(getByTestId('severitySelectInput')).toHaveValue('2');
+ expect(getByTestId('tlpSelectInput')).toHaveValue('2');
+ });
+});
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_case.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_case.tsx
new file mode 100644
index 0000000000000..7ee7f36efd90c
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/params_case.tsx
@@ -0,0 +1,154 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import React, { useState, useMemo, useCallback } from 'react';
+import {
+ TextFieldWithMessageVariables,
+ TextAreaWithMessageVariables,
+ ActionParamsProps,
+} from '@kbn/triggers-actions-ui-plugin/public';
+import { EuiFormRow, EuiSelect, EuiComboBox } from '@elastic/eui';
+import { ExecutorParams, ExecutorSubActionPushParams } from '../../../common/thehive/types';
+import { severityOptions, tlpOptions } from './constants';
+import * as translations from './translations';
+
+export const TheHiveParamsCaseFields: React.FC> = ({
+ actionParams,
+ editAction,
+ index,
+ errors,
+ messageVariables,
+}) => {
+ const { incident, comments } = useMemo(
+ () =>
+ (actionParams.subActionParams as ExecutorSubActionPushParams) ??
+ ({
+ incident: {
+ tlp: 2,
+ severity: 2,
+ tags: [],
+ },
+ comments: [],
+ } as unknown as ExecutorSubActionPushParams),
+ [actionParams.subActionParams]
+ );
+
+ const [severity, setSeverity] = useState(incident.severity ?? severityOptions[1].value);
+ const [tlp, setTlp] = useState(incident.tlp ?? tlpOptions[2].value);
+ const [selectedOptions, setSelected] = useState>(
+ incident.tags?.map((tag) => ({ label: tag })) ?? []
+ );
+
+ const editSubActionProperty = useCallback(
+ (key: string, value: any) => {
+ const newProps =
+ key !== 'comments'
+ ? {
+ incident: { ...incident, [key]: value },
+ comments,
+ }
+ : { incident, [key]: value };
+ editAction('subActionParams', newProps, index);
+ },
+ [comments, editAction, incident, index]
+ );
+
+ const editComment = useCallback(
+ (key, value) => {
+ editSubActionProperty(key, [{ commentId: '1', comment: value }]);
+ },
+ [editSubActionProperty]
+ );
+
+ const onCreateOption = (searchValue: string) => {
+ setSelected([...selectedOptions, { label: searchValue }]);
+ editSubActionProperty('tags', [...(incident.tags ?? []), searchValue]);
+ };
+
+ const onChange = (selectedOptionList: Array<{ label: string }>) => {
+ setSelected(selectedOptionList);
+ editSubActionProperty(
+ 'tags',
+ selectedOptionList.map((option) => option.label)
+ );
+ };
+
+ return (
+ <>
+ 0 &&
+ incident.title !== undefined,
+ error: errors['pushToServiceParam.incident.title'] as string,
+ }}
+ errors={errors['pushToServiceParam.incident.title'] as string[]}
+ />
+
+
+ {
+ editSubActionProperty('severity', parseInt(e.target.value, 10));
+ setSeverity(parseInt(e.target.value, 10));
+ }}
+ />
+
+
+ {
+ editSubActionProperty('tlp', parseInt(e.target.value, 10));
+ setTlp(parseInt(e.target.value, 10));
+ }}
+ />
+
+
+
+
+ 0 ? comments[0].comment : undefined}
+ label={translations.COMMENTS_LABEL}
+ />
+ >
+ );
+};
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/thehive.test.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/thehive.test.tsx
new file mode 100644
index 0000000000000..3a6788a8bf55d
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/thehive.test.tsx
@@ -0,0 +1,137 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { TypeRegistry } from '@kbn/triggers-actions-ui-plugin/public/application/type_registry';
+import { registerConnectorTypes } from '..';
+import { ActionTypeModel as ConnectorTypeModel } from '@kbn/triggers-actions-ui-plugin/public/types';
+import { experimentalFeaturesMock, registrationServicesMock } from '../../mocks';
+import { SUB_ACTION } from '../../../common/thehive/constants';
+import { ExperimentalFeaturesService } from '../../common/experimental_features_service';
+
+const CONNECTOR_TYPE_ID = '.thehive';
+let connectorTypeModel: ConnectorTypeModel;
+beforeAll(() => {
+ const connectorTypeRegistry = new TypeRegistry();
+ ExperimentalFeaturesService.init({ experimentalFeatures: experimentalFeaturesMock });
+ registerConnectorTypes({ connectorTypeRegistry, services: registrationServicesMock });
+ const getResult = connectorTypeRegistry.get(CONNECTOR_TYPE_ID);
+ if (getResult !== null) {
+ connectorTypeModel = getResult;
+ }
+});
+
+describe('actionTypeRegistry.get() works', () => {
+ test('action type static data is as expected', () => {
+ expect(connectorTypeModel.id).toEqual(CONNECTOR_TYPE_ID);
+ });
+});
+
+describe('thehive pushToService action params validation', () => {
+ test('pushToService action params validation succeeds when action params is valid', async () => {
+ const actionParams = {
+ subAction: SUB_ACTION.PUSH_TO_SERVICE,
+ subActionParams: {
+ incident: {
+ title: 'title {test}',
+ description: 'test description',
+ },
+ },
+ comments: [],
+ };
+
+ expect(await connectorTypeModel.validateParams(actionParams)).toEqual({
+ errors: {
+ 'pushToServiceParam.incident.title': [],
+ 'pushToServiceParam.incident.description': [],
+ 'createAlertParam.title': [],
+ 'createAlertParam.description': [],
+ 'createAlertParam.type': [],
+ 'createAlertParam.source': [],
+ 'createAlertParam.sourceRef': [],
+ },
+ });
+ });
+
+ test('pushToService action params validation fails when Required fields is not valid', async () => {
+ const actionParams = {
+ subAction: SUB_ACTION.PUSH_TO_SERVICE,
+ subActionParams: {
+ incident: {
+ title: '',
+ description: '',
+ },
+ },
+ comments: [],
+ };
+
+ expect(await connectorTypeModel.validateParams(actionParams)).toEqual({
+ errors: {
+ 'pushToServiceParam.incident.title': ['Title is required.'],
+ 'pushToServiceParam.incident.description': ['Description is required.'],
+ 'createAlertParam.title': [],
+ 'createAlertParam.description': [],
+ 'createAlertParam.type': [],
+ 'createAlertParam.source': [],
+ 'createAlertParam.sourceRef': [],
+ },
+ });
+ });
+});
+
+describe('thehive createAlert action params validation', () => {
+ test('createAlert action params validation succeeds when action params is valid', async () => {
+ const actionParams = {
+ subAction: SUB_ACTION.CREATE_ALERT,
+ subActionParams: {
+ title: 'some title {test}',
+ description: 'some description {test}',
+ type: 'type test',
+ source: 'source test',
+ sourceRef: 'source reference test',
+ },
+ comments: [],
+ };
+
+ expect(await connectorTypeModel.validateParams(actionParams)).toEqual({
+ errors: {
+ 'pushToServiceParam.incident.title': [],
+ 'pushToServiceParam.incident.description': [],
+ 'createAlertParam.title': [],
+ 'createAlertParam.description': [],
+ 'createAlertParam.type': [],
+ 'createAlertParam.source': [],
+ 'createAlertParam.sourceRef': [],
+ },
+ });
+ });
+
+ test('params validation fails when Required fields is not valid', async () => {
+ const actionParams = {
+ subAction: SUB_ACTION.CREATE_ALERT,
+ subActionParams: {
+ title: '',
+ description: '',
+ type: '',
+ source: '',
+ sourceRef: '',
+ },
+ comments: [],
+ };
+
+ expect(await connectorTypeModel.validateParams(actionParams)).toEqual({
+ errors: {
+ 'pushToServiceParam.incident.title': [],
+ 'pushToServiceParam.incident.description': [],
+ 'createAlertParam.title': ['Title is required.'],
+ 'createAlertParam.description': ['Description is required.'],
+ 'createAlertParam.type': ['Type is required.'],
+ 'createAlertParam.source': ['Source is required.'],
+ 'createAlertParam.sourceRef': ['Source Reference is required.'],
+ },
+ });
+ });
+});
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/thehive.tsx b/x-pack/plugins/stack_connectors/public/connector_types/thehive/thehive.tsx
new file mode 100644
index 0000000000000..5523a24e05d50
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/thehive.tsx
@@ -0,0 +1,86 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { lazy } from 'react';
+import { i18n } from '@kbn/i18n';
+import { GenericValidationResult } from '@kbn/triggers-actions-ui-plugin/public/types';
+import { TheHiveConnector } from './types';
+import { THEHIVE_CONNECTOR_ID, SUB_ACTION, THEHIVE_TITLE } from '../../../common/thehive/constants';
+import {
+ ExecutorParams,
+ ExecutorSubActionPushParams,
+ ExecutorSubActionCreateAlertParams,
+} from '../../../common/thehive/types';
+
+export function getConnectorType(): TheHiveConnector {
+ return {
+ id: THEHIVE_CONNECTOR_ID,
+ iconClass: lazy(() => import('./logo')),
+ selectMessage: i18n.translate('xpack.stackConnectors.components.thehive.descriptionText', {
+ defaultMessage: 'Create cases and alerts in TheHive',
+ }),
+ actionTypeTitle: THEHIVE_TITLE,
+ hideInUi: true,
+ validateParams: async (
+ actionParams: ExecutorParams
+ ): Promise> => {
+ const translations = await import('./translations');
+
+ const errors = {
+ 'pushToServiceParam.incident.title': new Array(),
+ 'pushToServiceParam.incident.description': new Array(),
+ 'createAlertParam.title': new Array(),
+ 'createAlertParam.description': new Array(),
+ 'createAlertParam.type': new Array(),
+ 'createAlertParam.source': new Array(),
+ 'createAlertParam.sourceRef': new Array(),
+ };
+
+ const validationResult = {
+ errors,
+ };
+
+ const { subAction, subActionParams } = actionParams;
+ if (subAction === SUB_ACTION.PUSH_TO_SERVICE) {
+ const pushToServiceParam = subActionParams as ExecutorSubActionPushParams;
+ if (pushToServiceParam && pushToServiceParam.incident) {
+ if (!pushToServiceParam.incident.title?.length) {
+ errors['pushToServiceParam.incident.title'].push(translations.TITLE_REQUIRED);
+ }
+ if (!pushToServiceParam.incident.description?.length) {
+ errors['pushToServiceParam.incident.description'].push(
+ translations.DESCRIPTION_REQUIRED
+ );
+ }
+ }
+ } else if (subAction === SUB_ACTION.CREATE_ALERT) {
+ const createAlertParam = subActionParams as ExecutorSubActionCreateAlertParams;
+ if (createAlertParam) {
+ if (!createAlertParam.title?.length) {
+ errors['createAlertParam.title'].push(translations.TITLE_REQUIRED);
+ }
+ if (!createAlertParam.description?.length) {
+ errors['createAlertParam.description'].push(translations.DESCRIPTION_REQUIRED);
+ }
+ if (!createAlertParam.type?.length) {
+ errors['createAlertParam.type'].push(translations.TYPE_REQUIRED);
+ }
+ if (!createAlertParam.source?.length) {
+ errors['createAlertParam.source'].push(translations.SOURCE_REQUIRED);
+ }
+ if (!createAlertParam.sourceRef?.length) {
+ errors['createAlertParam.sourceRef'].push(translations.SOURCE_REF_REQUIRED);
+ }
+ }
+ }
+
+ return validationResult;
+ },
+ actionConnectorFields: lazy(() => import('./connector')),
+ actionParamsFields: lazy(() => import('./params')),
+ };
+}
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/translations.ts b/x-pack/plugins/stack_connectors/public/connector_types/thehive/translations.ts
new file mode 100644
index 0000000000000..fa2c7b822019a
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/translations.ts
@@ -0,0 +1,138 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { i18n } from '@kbn/i18n';
+
+export const URL_LABEL = i18n.translate('xpack.stackConnectors.components.thehive.urlFieldLabel', {
+ defaultMessage: 'URL',
+});
+
+export const ORGANISATION_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.organisationFieldLabel',
+ {
+ defaultMessage: 'Organisation',
+ }
+);
+
+export const ORGANISATION_HELP_TEXT = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.organisationFieldHelpText',
+ {
+ defaultMessage: `By default, the user's default organization will be considered.`,
+ }
+);
+
+export const API_KEY_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.apiKeyFieldLabel',
+ {
+ defaultMessage: 'API Key',
+ }
+);
+
+export const EVENT_ACTION_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.eventActionSelectFieldLabel',
+ {
+ defaultMessage: 'Event Action',
+ }
+);
+
+export const TITLE_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.titleFieldLabel',
+ {
+ defaultMessage: 'Title*',
+ }
+);
+
+export const DESCRIPTION_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.descriptionFieldLabel',
+ {
+ defaultMessage: 'Description*',
+ }
+);
+
+export const TLP_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.tlpSelectFieldLabel',
+ {
+ defaultMessage: 'TLP',
+ }
+);
+
+export const SEVERITY_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.severitySelectFieldLabel',
+ {
+ defaultMessage: 'Severity',
+ }
+);
+
+export const TAGS_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.TagsMultiSelectFieldLabel',
+ {
+ defaultMessage: 'Tags',
+ }
+);
+
+export const COMMENTS_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.commentsTextAreaFieldLabel',
+ {
+ defaultMessage: 'Additional comments',
+ }
+);
+
+export const TYPE_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.typeFieldLabel',
+ {
+ defaultMessage: 'Type*',
+ }
+);
+
+export const SOURCE_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.sourceFieldLabel',
+ {
+ defaultMessage: 'Source*',
+ }
+);
+
+export const SOURCE_REF_LABEL = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.sourceRefFieldLabel',
+ {
+ defaultMessage: 'Source Reference*',
+ }
+);
+
+export const TITLE_REQUIRED = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.requiredTitleText',
+ {
+ defaultMessage: 'Title is required.',
+ }
+);
+
+export const DESCRIPTION_REQUIRED = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.requiredDescriptionText',
+ {
+ defaultMessage: 'Description is required.',
+ }
+);
+
+export const TYPE_REQUIRED = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.requiredTypeText',
+ {
+ defaultMessage: 'Type is required.',
+ }
+);
+
+export const SOURCE_REQUIRED = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.requiredSourceText',
+ {
+ defaultMessage: 'Source is required.',
+ }
+);
+
+export const SOURCE_REF_REQUIRED = i18n.translate(
+ 'xpack.stackConnectors.components.thehive.requiredSourceRefText',
+ {
+ defaultMessage: 'Source Reference is required.',
+ }
+);
diff --git a/x-pack/plugins/stack_connectors/public/connector_types/thehive/types.ts b/x-pack/plugins/stack_connectors/public/connector_types/thehive/types.ts
new file mode 100644
index 0000000000000..0724b5bf2b9d3
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/public/connector_types/thehive/types.ts
@@ -0,0 +1,11 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { ActionTypeModel as ConnectorTypeModel } from '@kbn/triggers-actions-ui-plugin/public';
+import { TheHiveConfig, TheHiveSecrets, ExecutorParams } from '../../../common/thehive/types';
+
+export type TheHiveConnector = ConnectorTypeModel;
diff --git a/x-pack/plugins/stack_connectors/server/connector_types/index.ts b/x-pack/plugins/stack_connectors/server/connector_types/index.ts
index 6364fed0e193f..2c905471761ed 100644
--- a/x-pack/plugins/stack_connectors/server/connector_types/index.ts
+++ b/x-pack/plugins/stack_connectors/server/connector_types/index.ts
@@ -29,6 +29,7 @@ import { getConnectorType as getWebhookConnectorType } from './webhook';
import { getConnectorType as getXmattersConnectorType } from './xmatters';
import { getConnectorType as getTeamsConnectorType } from './teams';
import { getConnectorType as getD3SecurityConnectorType } from './d3security';
+import { getConnectorType as getTheHiveConnectorType } from './thehive';
import { getOpsgenieConnectorType } from './opsgenie';
import type { ActionParamsType as ServiceNowITSMActionParams } from './servicenow_itsm';
import type { ActionParamsType as ServiceNowSIRActionParams } from './servicenow_sir';
@@ -109,6 +110,7 @@ export function registerConnectorTypes({
actions.registerSubActionConnectorType(getGeminiConnectorType());
actions.registerSubActionConnectorType(getD3SecurityConnectorType());
actions.registerSubActionConnectorType(getResilientConnectorType());
+ actions.registerSubActionConnectorType(getTheHiveConnectorType());
if (experimentalFeatures.sentinelOneConnectorOn) {
actions.registerSubActionConnectorType(getSentinelOneConnectorType());
diff --git a/x-pack/plugins/stack_connectors/server/connector_types/thehive/index.test.ts b/x-pack/plugins/stack_connectors/server/connector_types/thehive/index.test.ts
new file mode 100644
index 0000000000000..86176462ab6d2
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/server/connector_types/thehive/index.test.ts
@@ -0,0 +1,20 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { TheHiveConnectorType, getConnectorType } from '.';
+
+let connectorType: TheHiveConnectorType;
+
+describe('TheHive Connector', () => {
+ beforeEach(() => {
+ connectorType = getConnectorType();
+ });
+ test('exposes the connector as `TheHive` with id `.thehive`', () => {
+ expect(connectorType.id).toEqual('.thehive');
+ expect(connectorType.name).toEqual('TheHive');
+ });
+});
diff --git a/x-pack/plugins/stack_connectors/server/connector_types/thehive/index.ts b/x-pack/plugins/stack_connectors/server/connector_types/thehive/index.ts
new file mode 100644
index 0000000000000..d39849adb4490
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/server/connector_types/thehive/index.ts
@@ -0,0 +1,46 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import {
+ SubActionConnectorType,
+ ValidatorType,
+} from '@kbn/actions-plugin/server/sub_action_framework/types';
+import {
+ AlertingConnectorFeatureId,
+ SecurityConnectorFeatureId,
+ UptimeConnectorFeatureId,
+} from '@kbn/actions-plugin/common/types';
+import { urlAllowListValidator } from '@kbn/actions-plugin/server';
+import { TheHiveConnector } from './thehive';
+import {
+ TheHiveConfigSchema,
+ TheHiveSecretsSchema,
+ PushToServiceIncidentSchema,
+} from '../../../common/thehive/schema';
+import { THEHIVE_CONNECTOR_ID, THEHIVE_TITLE } from '../../../common/thehive/constants';
+import { TheHiveConfig, TheHiveSecrets } from '../../../common/thehive/types';
+
+export type TheHiveConnectorType = SubActionConnectorType;
+
+export function getConnectorType(): TheHiveConnectorType {
+ return {
+ id: THEHIVE_CONNECTOR_ID,
+ minimumLicenseRequired: 'platinum',
+ name: THEHIVE_TITLE,
+ getService: (params) => new TheHiveConnector(params, PushToServiceIncidentSchema),
+ supportedFeatureIds: [
+ AlertingConnectorFeatureId,
+ SecurityConnectorFeatureId,
+ UptimeConnectorFeatureId,
+ ],
+ schema: {
+ config: TheHiveConfigSchema,
+ secrets: TheHiveSecretsSchema,
+ },
+ validators: [{ type: ValidatorType.CONFIG, validator: urlAllowListValidator('url') }],
+ };
+}
diff --git a/x-pack/plugins/stack_connectors/server/connector_types/thehive/thehive.test.ts b/x-pack/plugins/stack_connectors/server/connector_types/thehive/thehive.test.ts
new file mode 100644
index 0000000000000..6218d48ae33fa
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/server/connector_types/thehive/thehive.test.ts
@@ -0,0 +1,409 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { TheHiveConnector } from './thehive';
+import { actionsConfigMock } from '@kbn/actions-plugin/server/actions_config.mock';
+import { THEHIVE_CONNECTOR_ID } from '../../../common/thehive/constants';
+import { loggingSystemMock } from '@kbn/core-logging-server-mocks';
+import { actionsMock } from '@kbn/actions-plugin/server/mocks';
+import {
+ TheHiveIncidentResponseSchema,
+ TheHiveUpdateIncidentResponseSchema,
+ TheHiveAddCommentResponseSchema,
+ TheHiveCreateAlertResponseSchema,
+ PushToServiceIncidentSchema,
+} from '../../../common/thehive/schema';
+import type { ExecutorSubActionCreateAlertParams, Incident } from '../../../common/thehive/types';
+
+const mockTime = new Date('2024-04-03T09:10:30.000');
+
+describe('TheHiveConnector', () => {
+ const connector = new TheHiveConnector(
+ {
+ configurationUtilities: actionsConfigMock.create(),
+ connector: { id: '1', type: THEHIVE_CONNECTOR_ID },
+ config: { url: 'https://example.com', organisation: null },
+ secrets: { apiKey: 'test123' },
+ logger: loggingSystemMock.createLogger(),
+ services: actionsMock.createServices(),
+ },
+ PushToServiceIncidentSchema
+ );
+
+ let mockRequest: jest.Mock;
+ let mockError: jest.Mock;
+
+ beforeAll(() => {
+ jest.useFakeTimers();
+ jest.setSystemTime(mockTime);
+ });
+
+ afterAll(() => {
+ jest.useRealTimers();
+ });
+
+ beforeEach(() => {
+ mockError = jest.fn().mockImplementation(() => {
+ throw new Error('API Error');
+ });
+ jest.clearAllMocks();
+ });
+
+ describe('createIncident', () => {
+ const mockResponse = {
+ data: {
+ _id: '~172064',
+ _type: 'Case',
+ _createdBy: 'user1@thehive.local',
+ _createdAt: 1712128153041,
+ number: 67,
+ title: 'title',
+ description: 'description',
+ severity: 1,
+ severityLabel: 'LOW',
+ startDate: 1712128153029,
+ tags: ['tag1', 'tag2'],
+ flag: false,
+ tlp: 2,
+ tlpLabel: 'AMBER',
+ pap: 2,
+ papLabel: 'AMBER',
+ status: 'New',
+ stage: 'New',
+ assignee: 'user1@thehive.local',
+ customFields: [],
+ userPermissions: [
+ 'manageCase/create',
+ 'manageAlert/update',
+ 'manageProcedure',
+ 'managePage',
+ 'manageObservable',
+ 'manageCase/delete',
+ 'manageAlert/create',
+ 'manageCaseReport',
+ 'manageAlert/delete',
+ 'accessTheHiveFS',
+ 'manageKnowledgeBase',
+ 'manageAction',
+ 'manageShare',
+ 'manageAnalyse',
+ 'manageFunction/invoke',
+ 'manageTask',
+ 'manageCase/merge',
+ 'manageCustomEvent',
+ 'manageAlert/import',
+ 'manageCase/changeOwnership',
+ 'manageComment',
+ 'manageAlert/reopen',
+ 'manageCase/update',
+ 'manageCase/reopen',
+ ],
+ extraData: {},
+ newDate: 1712128153029,
+ timeToDetect: 0,
+ },
+ };
+
+ beforeEach(() => {
+ mockRequest = jest.fn().mockResolvedValue(mockResponse);
+ // @ts-ignore
+ connector.request = mockRequest;
+ jest.clearAllMocks();
+ });
+
+ const incident: Incident = {
+ title: 'title',
+ description: 'description',
+ severity: 1,
+ tlp: 2,
+ tags: ['tag1', 'tag2'],
+ };
+
+ it('TheHive API call is successful with correct parameters', async () => {
+ const response = await connector.createIncident(incident);
+ expect(mockRequest).toBeCalledTimes(1);
+ expect(mockRequest).toHaveBeenCalledWith({
+ url: 'https://example.com/api/v1/case',
+ method: 'post',
+ responseSchema: TheHiveIncidentResponseSchema,
+ data: incident,
+ headers: {
+ Authorization: 'Bearer test123',
+ 'X-Organisation': null,
+ },
+ });
+ expect(response).toEqual({
+ id: '~172064',
+ url: 'https://example.com/cases/~172064/details',
+ title: 'title',
+ pushedDate: '2024-04-03T07:09:13.041Z',
+ });
+ });
+
+ it('errors during API calls are properly handled', async () => {
+ // @ts-ignore
+ connector.request = mockError;
+
+ await expect(connector.createIncident(incident)).rejects.toThrow('API Error');
+ });
+ });
+
+ describe('updateIncident', () => {
+ const mockResponse = {
+ data: null,
+ };
+
+ beforeEach(() => {
+ mockRequest = jest.fn().mockResolvedValue(mockResponse);
+ // @ts-ignore
+ connector.request = mockRequest;
+ jest.clearAllMocks();
+ });
+
+ const incident: Incident = {
+ title: 'new title',
+ description: 'new description',
+ severity: 3,
+ tlp: 1,
+ tags: ['tag3'],
+ };
+
+ it('TheHive API call is successful with correct parameters', async () => {
+ const response = await connector.updateIncident({ incidentId: '~172064', incident });
+ expect(mockRequest).toBeCalledTimes(1);
+ expect(mockRequest).toHaveBeenCalledWith({
+ url: 'https://example.com/api/v1/case/~172064',
+ method: 'patch',
+ responseSchema: TheHiveUpdateIncidentResponseSchema,
+ data: incident,
+ headers: {
+ Authorization: 'Bearer test123',
+ 'X-Organisation': null,
+ },
+ });
+ expect(response).toEqual({
+ id: '~172064',
+ url: 'https://example.com/cases/~172064/details',
+ title: 'new title',
+ pushedDate: mockTime.toISOString(),
+ });
+ });
+
+ it('errors during API calls are properly handled', async () => {
+ // @ts-ignore
+ connector.request = mockError;
+
+ await expect(connector.updateIncident({ incidentId: '~172064', incident })).rejects.toThrow(
+ 'API Error'
+ );
+ });
+ });
+
+ describe('addComment', () => {
+ const mockResponse = {
+ data: {
+ _id: '~41156688',
+ _type: 'Comment',
+ createdBy: 'user1@thehive.local',
+ createdAt: 1712158280100,
+ message: 'test comment',
+ isEdited: false,
+ extraData: {},
+ },
+ };
+
+ beforeEach(() => {
+ mockRequest = jest.fn().mockResolvedValue(mockResponse);
+ // @ts-ignore
+ connector.request = mockRequest;
+ jest.clearAllMocks();
+ });
+
+ it('TheHive API call is successful with correct parameters', async () => {
+ await connector.addComment({
+ incidentId: '~172064',
+ comment: 'test comment',
+ });
+ expect(mockRequest).toBeCalledTimes(1);
+ expect(mockRequest).toHaveBeenCalledWith({
+ url: 'https://example.com/api/v1/case/~172064/comment',
+ method: 'post',
+ responseSchema: TheHiveAddCommentResponseSchema,
+ data: { message: 'test comment' },
+ headers: {
+ Authorization: 'Bearer test123',
+ 'X-Organisation': null,
+ },
+ });
+ });
+
+ it('errors during API calls are properly handled', async () => {
+ // @ts-ignore
+ connector.request = mockError;
+
+ await expect(
+ connector.addComment({ incidentId: '~172064', comment: 'test comment' })
+ ).rejects.toThrow('API Error');
+ });
+ });
+
+ describe('getIncident', () => {
+ const mockResponse = {
+ data: {
+ _id: '~172064',
+ _type: 'Case',
+ _createdBy: 'user1@thehive.local',
+ _createdAt: 1712128153041,
+ number: 67,
+ title: 'title',
+ description: 'description',
+ severity: 1,
+ severityLabel: 'LOW',
+ startDate: 1712128153029,
+ tags: ['tag1', 'tag2'],
+ flag: false,
+ tlp: 2,
+ tlpLabel: 'AMBER',
+ pap: 2,
+ papLabel: 'AMBER',
+ status: 'New',
+ stage: 'New',
+ assignee: 'user1@thehive.local',
+ customFields: [],
+ userPermissions: [
+ 'manageCase/create',
+ 'manageAlert/update',
+ 'manageProcedure',
+ 'managePage',
+ 'manageObservable',
+ 'manageCase/delete',
+ 'manageAlert/create',
+ 'manageCaseReport',
+ 'manageAlert/delete',
+ 'accessTheHiveFS',
+ 'manageKnowledgeBase',
+ 'manageAction',
+ 'manageShare',
+ 'manageAnalyse',
+ 'manageFunction/invoke',
+ 'manageTask',
+ 'manageCase/merge',
+ 'manageCustomEvent',
+ 'manageAlert/import',
+ 'manageCase/changeOwnership',
+ 'manageComment',
+ 'manageAlert/reopen',
+ 'manageCase/update',
+ 'manageCase/reopen',
+ ],
+ extraData: {},
+ newDate: 1712128153029,
+ timeToDetect: 0,
+ },
+ };
+
+ beforeEach(() => {
+ mockRequest = jest.fn().mockResolvedValue(mockResponse);
+ // @ts-ignore
+ connector.request = mockRequest;
+ jest.clearAllMocks();
+ });
+
+ it('TheHive API call is successful with correct parameters', async () => {
+ const response = await connector.getIncident({ id: '~172064' });
+ expect(mockRequest).toBeCalledTimes(1);
+ expect(mockRequest).toHaveBeenCalledWith({
+ url: 'https://example.com/api/v1/case/~172064',
+ responseSchema: TheHiveIncidentResponseSchema,
+ headers: {
+ Authorization: 'Bearer test123',
+ 'X-Organisation': null,
+ },
+ });
+ expect(response).toEqual(mockResponse.data);
+ });
+
+ it('errors during API calls are properly handled', async () => {
+ // @ts-ignore
+ connector.request = mockError;
+
+ await expect(connector.getIncident({ id: '~172064' })).rejects.toThrow('API Error');
+ });
+ });
+
+ describe('createAlert', () => {
+ const mockResponse = {
+ data: {
+ _id: '~41128088',
+ _type: 'Alert',
+ _createdBy: 'user1@thehive.local',
+ _createdAt: 1712161128982,
+ type: 'alert type',
+ source: 'alert source',
+ sourceRef: 'test123',
+ title: 'title',
+ description: 'description',
+ severity: 1,
+ severityLabel: 'LOW',
+ date: 1712161128964,
+ tags: ['tag1', 'tag2'],
+ tlp: 2,
+ tlpLabel: 'AMBER',
+ pap: 2,
+ papLabel: 'AMBER',
+ follow: true,
+ customFields: [],
+ observableCount: 0,
+ status: 'New',
+ stage: 'New',
+ extraData: {},
+ newDate: 1712161128967,
+ timeToDetect: 0,
+ },
+ };
+
+ beforeEach(() => {
+ mockRequest = jest.fn().mockResolvedValue(mockResponse);
+ // @ts-ignore
+ connector.request = mockRequest;
+ jest.clearAllMocks();
+ });
+
+ const alert: ExecutorSubActionCreateAlertParams = {
+ title: 'title',
+ description: 'description',
+ type: 'alert type',
+ source: 'alert source',
+ sourceRef: 'test123',
+ severity: 1,
+ tlp: 2,
+ tags: ['tag1', 'tag2'],
+ };
+
+ it('TheHive API call is successful with correct parameters', async () => {
+ await connector.createAlert(alert);
+ expect(mockRequest).toBeCalledTimes(1);
+ expect(mockRequest).toHaveBeenCalledWith({
+ url: 'https://example.com/api/v1/alert',
+ method: 'post',
+ responseSchema: TheHiveCreateAlertResponseSchema,
+ data: alert,
+ headers: {
+ Authorization: 'Bearer test123',
+ 'X-Organisation': null,
+ },
+ });
+ });
+
+ it('errors during API calls are properly handled', async () => {
+ // @ts-ignore
+ connector.request = mockError;
+
+ await expect(connector.createAlert(alert)).rejects.toThrow('API Error');
+ });
+ });
+});
diff --git a/x-pack/plugins/stack_connectors/server/connector_types/thehive/thehive.ts b/x-pack/plugins/stack_connectors/server/connector_types/thehive/thehive.ts
new file mode 100644
index 0000000000000..fe0caf8788f28
--- /dev/null
+++ b/x-pack/plugins/stack_connectors/server/connector_types/thehive/thehive.ts
@@ -0,0 +1,140 @@
+/*
+ * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
+ * or more contributor license agreements. Licensed under the Elastic License
+ * 2.0; you may not use this file except in compliance with the Elastic License
+ * 2.0.
+ */
+
+import { ServiceParams, CaseConnector } from '@kbn/actions-plugin/server';
+import type { AxiosError } from 'axios';
+import { Type } from '@kbn/config-schema';
+import { SUB_ACTION } from '../../../common/thehive/constants';
+import {
+ TheHiveIncidentResponseSchema,
+ TheHiveUpdateIncidentResponseSchema,
+ TheHiveAddCommentResponseSchema,
+ TheHiveCreateAlertResponseSchema,
+ ExecutorSubActionCreateAlertParamsSchema,
+} from '../../../common/thehive/schema';
+import type {
+ TheHiveConfig,
+ TheHiveSecrets,
+ ExecutorSubActionCreateAlertParams,
+ TheHiveFailureResponse,
+ ExternalServiceIncidentResponse,
+ Incident,
+ GetIncidentResponse,
+} from '../../../common/thehive/types';
+
+export const API_VERSION = 'v1';
+
+export class TheHiveConnector extends CaseConnector<
+ TheHiveConfig,
+ TheHiveSecrets,
+ Incident,
+ GetIncidentResponse
+> {
+ private url: string;
+ private apiKey: string;
+ private organisation: string | null;
+ private urlWithoutTrailingSlash: string;
+
+ constructor(
+ params: ServiceParams,
+ pushToServiceParamsExtendedSchema: Record>
+ ) {
+ super(params, pushToServiceParamsExtendedSchema);
+
+ this.registerSubAction({
+ name: SUB_ACTION.CREATE_ALERT,
+ method: 'createAlert',
+ schema: ExecutorSubActionCreateAlertParamsSchema,
+ });
+
+ this.url = this.config.url;
+ this.organisation = this.config.organisation;
+ this.apiKey = this.secrets.apiKey;
+ this.urlWithoutTrailingSlash = this.url?.endsWith('/') ? this.url.slice(0, -1) : this.url;
+ }
+
+ private getAuthHeaders() {
+ return { Authorization: `Bearer ${this.apiKey}`, 'X-Organisation': this.organisation };
+ }
+
+ protected getResponseErrorMessage(error: AxiosError): string {
+ if (!error.response?.status) {
+ return 'Unknown API Error';
+ }
+ return `API Error: ${error.response?.data?.type} - ${error.response?.data?.message}`;
+ }
+
+ public async createIncident(incident: Incident): Promise {
+ const res = await this.request({
+ method: 'post',
+ url: `${this.url}/api/${API_VERSION}/case`,
+ data: incident,
+ headers: this.getAuthHeaders(),
+ responseSchema: TheHiveIncidentResponseSchema,
+ });
+
+ return {
+ id: res.data._id,
+ title: res.data.title,
+ url: `${this.urlWithoutTrailingSlash}/cases/${res.data._id}/details`,
+ pushedDate: new Date(res.data._createdAt).toISOString(),
+ };
+ }
+
+ public async addComment({ incidentId, comment }: { incidentId: string; comment: string }) {
+ await this.request({
+ method: 'post',
+ url: `${this.url}/api/${API_VERSION}/case/${incidentId}/comment`,
+ data: { message: comment },
+ headers: this.getAuthHeaders(),
+ responseSchema: TheHiveAddCommentResponseSchema,
+ });
+ }
+
+ public async updateIncident({
+ incidentId,
+ incident,
+ }: {
+ incidentId: string;
+ incident: Incident;
+ }): Promise {
+ await this.request({
+ method: 'patch',
+ url: `${this.url}/api/${API_VERSION}/case/${incidentId}`,
+ data: incident,
+ headers: this.getAuthHeaders(),
+ responseSchema: TheHiveUpdateIncidentResponseSchema,
+ });
+
+ return {
+ id: incidentId,
+ title: incident.title,
+ url: `${this.urlWithoutTrailingSlash}/cases/${incidentId}/details`,
+ pushedDate: new Date().toISOString(),
+ };
+ }
+
+ public async getIncident({ id }: { id: string }): Promise {
+ const res = await this.request({
+ url: `${this.url}/api/${API_VERSION}/case/${id}`,
+ headers: this.getAuthHeaders(),
+ responseSchema: TheHiveIncidentResponseSchema,
+ });
+
+ return res.data;
+ }
+
+ public async createAlert(alert: ExecutorSubActionCreateAlertParams) {
+ await this.request({
+ method: 'post',
+ url: `${this.url}/api/${API_VERSION}/alert`,
+ data: alert,
+ headers: this.getAuthHeaders(),
+ responseSchema: TheHiveCreateAlertResponseSchema,
+ });
+ }
+}
diff --git a/x-pack/plugins/stack_connectors/server/plugin.test.ts b/x-pack/plugins/stack_connectors/server/plugin.test.ts
index ee99a4e4be297..0c9551f787ca0 100644
--- a/x-pack/plugins/stack_connectors/server/plugin.test.ts
+++ b/x-pack/plugins/stack_connectors/server/plugin.test.ts
@@ -131,7 +131,7 @@ describe('Stack Connectors Plugin', () => {
name: 'Torq',
})
);
- expect(actionsSetup.registerSubActionConnectorType).toHaveBeenCalledTimes(9);
+ expect(actionsSetup.registerSubActionConnectorType).toHaveBeenCalledTimes(10);
expect(actionsSetup.registerSubActionConnectorType).toHaveBeenNthCalledWith(
1,
expect.objectContaining({
@@ -183,13 +183,20 @@ describe('Stack Connectors Plugin', () => {
);
expect(actionsSetup.registerSubActionConnectorType).toHaveBeenNthCalledWith(
8,
+ expect.objectContaining({
+ id: '.thehive',
+ name: 'TheHive',
+ })
+ );
+ expect(actionsSetup.registerSubActionConnectorType).toHaveBeenNthCalledWith(
+ 9,
expect.objectContaining({
id: '.sentinelone',
name: 'Sentinel One',
})
);
expect(actionsSetup.registerSubActionConnectorType).toHaveBeenNthCalledWith(
- 9,
+ 10,
expect.objectContaining({
id: '.crowdstrike',
name: 'CrowdStrike',
diff --git a/x-pack/plugins/task_manager/kibana.jsonc b/x-pack/plugins/task_manager/kibana.jsonc
index 9d1a9e216bec7..e1141bbc58377 100644
--- a/x-pack/plugins/task_manager/kibana.jsonc
+++ b/x-pack/plugins/task_manager/kibana.jsonc
@@ -11,8 +11,6 @@
"task_manager"
],
"optionalPlugins": [
- "cloud",
- "serverless",
"usageCollection"
]
}
diff --git a/x-pack/plugins/task_manager/server/MONITORING.md b/x-pack/plugins/task_manager/server/MONITORING.md
index c4e66ab92bad5..02946b9b3e53f 100644
--- a/x-pack/plugins/task_manager/server/MONITORING.md
+++ b/x-pack/plugins/task_manager/server/MONITORING.md
@@ -50,7 +50,7 @@ The root `timestamp` is the time in which the summary was exposed (either to the
Follow this step-by-step guide to make sense of the stats: https://www.elastic.co/guide/en/kibana/master/task-manager-troubleshooting.html#task-manager-diagnosing-root-cause
#### The Configuration Section
-The `configuration` section summarizes Task Manager's current configuration, including dynamic configurations which change over time, such as `poll_interval` and `capacity` which adjust in reaction to changing load on the system.
+The `configuration` section summarizes Task Manager's current configuration, including dynamic configurations which change over time, such as `poll_interval` and `max_workers` which adjust in reaction to changing load on the system.
These are "Hot" stats which are updated whenever a change happens in the configuration.
@@ -69,8 +69,8 @@ The `runtime` tracks Task Manager's performance as it runs, making note of task
These include:
- The time it takes a task to run (p50, p90, p95 & p99, using a configurable running average window, `50` by default)
- The average _drift_ that tasks experience (p50, p90, p95 & p99, using the same configurable running average window as above). Drift tells us how long after a task's scheduled a task typically executes.
- - The average _load_ (p50, p90, p95 & p99, using the same configurable running average window as above). Load tells us what percentage of capacity is in use at the end of each polling cycle.
- - The polling rate (the timestamp of the last time a polling cycle completed), the polling health stats (number of version clashes and mismatches) and the result [`No tasks | Filled task pool | Unexpectedly ran out of capacity`] frequency the past 50 polling cycles (using the same window size as the one used for running averages)
+ - The average _load_ (p50, p90, p95 & p99, using the same configurable running average window as above). Load tells us what percentage of workers is in use at the end of each polling cycle.
+ - The polling rate (the timestamp of the last time a polling cycle completed), the polling health stats (number of version clashes and mismatches) and the result [`No tasks | Filled task pool | Unexpectedly ran out of workers`] frequency the past 50 polling cycles (using the same window size as the one used for running averages)
- The `Success | Retry | Failure ratio` by task type. This is different than the workload stats which tell you what's in the queue, but ca't keep track of retries and of non recurring tasks as they're wiped off the index when completed.
These are "Hot" stats which are updated reactively as Tasks are executed and interacted with.
diff --git a/x-pack/plugins/task_manager/server/config.test.ts b/x-pack/plugins/task_manager/server/config.test.ts
index 81e9e24ea4586..bb59a73a305d6 100644
--- a/x-pack/plugins/task_manager/server/config.test.ts
+++ b/x-pack/plugins/task_manager/server/config.test.ts
@@ -23,6 +23,7 @@ describe('config validation', () => {
"warn_threshold": 5000,
},
"max_attempts": 3,
+ "max_workers": 10,
"metrics_reset_interval": 30000,
"monitored_aggregated_stats_refresh_rate": 60000,
"monitored_stats_health_verbose_log": Object {
@@ -80,6 +81,7 @@ describe('config validation', () => {
"warn_threshold": 5000,
},
"max_attempts": 3,
+ "max_workers": 10,
"metrics_reset_interval": 30000,
"monitored_aggregated_stats_refresh_rate": 60000,
"monitored_stats_health_verbose_log": Object {
@@ -135,6 +137,7 @@ describe('config validation', () => {
"warn_threshold": 5000,
},
"max_attempts": 3,
+ "max_workers": 10,
"metrics_reset_interval": 30000,
"monitored_aggregated_stats_refresh_rate": 60000,
"monitored_stats_health_verbose_log": Object {
diff --git a/x-pack/plugins/task_manager/server/config.ts b/x-pack/plugins/task_manager/server/config.ts
index f0f4031a4c8ac..eec63c5be489c 100644
--- a/x-pack/plugins/task_manager/server/config.ts
+++ b/x-pack/plugins/task_manager/server/config.ts
@@ -8,9 +8,6 @@
import { schema, TypeOf } from '@kbn/config-schema';
export const MAX_WORKERS_LIMIT = 100;
-export const DEFAULT_CAPACITY = 10;
-export const MAX_CAPACITY = 50;
-export const MIN_CAPACITY = 5;
export const DEFAULT_MAX_WORKERS = 10;
export const DEFAULT_POLL_INTERVAL = 3000;
export const DEFAULT_VERSION_CONFLICT_THRESHOLD = 80;
@@ -67,8 +64,6 @@ const requestTimeoutsConfig = schema.object({
export const configSchema = schema.object(
{
allow_reading_invalid_state: schema.boolean({ defaultValue: true }),
- /* The number of normal cost tasks that this Kibana instance will run simultaneously */
- capacity: schema.maybe(schema.number({ min: MIN_CAPACITY, max: MAX_CAPACITY })),
ephemeral_tasks: schema.object({
enabled: schema.boolean({ defaultValue: false }),
/* How many requests can Task Manager buffer before it rejects new requests. */
@@ -86,12 +81,11 @@ export const configSchema = schema.object(
min: 1,
}),
/* The maximum number of tasks that this Kibana instance will run simultaneously. */
- max_workers: schema.maybe(
- schema.number({
- // disable the task manager rather than trying to specify it with 0 workers
- min: 1,
- })
- ),
+ max_workers: schema.number({
+ defaultValue: DEFAULT_MAX_WORKERS,
+ // disable the task manager rather than trying to specify it with 0 workers
+ min: 1,
+ }),
/* The interval at which monotonically increasing metrics counters will reset */
metrics_reset_interval: schema.number({
defaultValue: DEFAULT_METRICS_RESET_INTERVAL,
diff --git a/x-pack/plugins/task_manager/server/ephemeral_task_lifecycle.test.ts b/x-pack/plugins/task_manager/server/ephemeral_task_lifecycle.test.ts
index 2a6f1bf8c33b8..19cfa2943502c 100644
--- a/x-pack/plugins/task_manager/server/ephemeral_task_lifecycle.test.ts
+++ b/x-pack/plugins/task_manager/server/ephemeral_task_lifecycle.test.ts
@@ -18,7 +18,7 @@ import { v4 as uuidv4 } from 'uuid';
import { asTaskPollingCycleEvent, asTaskRunEvent, TaskPersistence } from './task_events';
import { TaskRunResult } from './task_running';
import { TaskPoolRunResult } from './task_pool';
-import { TaskPoolMock } from './task_pool/task_pool.mock';
+import { TaskPoolMock } from './task_pool.mock';
import { executionContextServiceMock } from '@kbn/core/server/mocks';
import { taskManagerMock } from './mocks';
@@ -45,6 +45,7 @@ describe('EphemeralTaskLifecycle', () => {
definitions: new TaskTypeDictionary(taskManagerLogger),
executionContext,
config: {
+ max_workers: 10,
max_attempts: 9,
poll_interval: 6000000,
version_conflict_threshold: 80,
@@ -155,7 +156,7 @@ describe('EphemeralTaskLifecycle', () => {
expect(ephemeralTaskLifecycle.attemptToRun(task)).toMatchObject(asOk(task));
poolCapacity.mockReturnValue({
- availableCapacity: 10,
+ availableWorkers: 10,
});
lifecycleEvent$.next(
@@ -178,7 +179,7 @@ describe('EphemeralTaskLifecycle', () => {
expect(ephemeralTaskLifecycle.attemptToRun(task)).toMatchObject(asOk(task));
poolCapacity.mockReturnValue({
- availableCapacity: 10,
+ availableWorkers: 10,
});
lifecycleEvent$.next(
@@ -215,7 +216,7 @@ describe('EphemeralTaskLifecycle', () => {
expect(ephemeralTaskLifecycle.attemptToRun(tasks[2])).toMatchObject(asOk(tasks[2]));
poolCapacity.mockReturnValue({
- availableCapacity: 2,
+ availableWorkers: 2,
});
lifecycleEvent$.next(
@@ -255,9 +256,9 @@ describe('EphemeralTaskLifecycle', () => {
// pool has capacity for both
poolCapacity.mockReturnValue({
- availableCapacity: 10,
+ availableWorkers: 10,
});
- pool.getUsedCapacityByType.mockReturnValue(0);
+ pool.getOccupiedWorkersByType.mockReturnValue(0);
lifecycleEvent$.next(
asTaskPollingCycleEvent(asOk({ result: FillPoolResult.NoTasksClaimed }))
@@ -295,10 +296,10 @@ describe('EphemeralTaskLifecycle', () => {
// pool has capacity in general
poolCapacity.mockReturnValue({
- availableCapacity: 2,
+ availableWorkers: 2,
});
// but when we ask how many it has occupied by type - wee always have one worker already occupied by that type
- pool.getUsedCapacityByType.mockReturnValue(1);
+ pool.getOccupiedWorkersByType.mockReturnValue(1);
lifecycleEvent$.next(
asTaskPollingCycleEvent(asOk({ result: FillPoolResult.NoTasksClaimed }))
@@ -307,7 +308,7 @@ describe('EphemeralTaskLifecycle', () => {
expect(pool.run).toHaveBeenCalledTimes(0);
// now we release the worker in the pool and cause another cycle in the epheemral queue
- pool.getUsedCapacityByType.mockReturnValue(0);
+ pool.getOccupiedWorkersByType.mockReturnValue(0);
lifecycleEvent$.next(
asTaskPollingCycleEvent(asOk({ result: FillPoolResult.NoTasksClaimed }))
);
@@ -355,9 +356,9 @@ describe('EphemeralTaskLifecycle', () => {
// pool has capacity for all
poolCapacity.mockReturnValue({
- availableCapacity: 10,
+ availableWorkers: 10,
});
- pool.getUsedCapacityByType.mockReturnValue(0);
+ pool.getOccupiedWorkersByType.mockReturnValue(0);
lifecycleEvent$.next(asTaskPollingCycleEvent(asOk({ result: FillPoolResult.NoTasksClaimed })));
@@ -388,19 +389,19 @@ describe('EphemeralTaskLifecycle', () => {
expect(ephemeralTaskLifecycle.queuedTasks).toBe(3);
poolCapacity.mockReturnValue({
- availableCapacity: 1,
+ availableWorkers: 1,
});
lifecycleEvent$.next(asTaskPollingCycleEvent(asOk({ result: FillPoolResult.NoTasksClaimed })));
expect(ephemeralTaskLifecycle.queuedTasks).toBe(2);
poolCapacity.mockReturnValue({
- availableCapacity: 1,
+ availableWorkers: 1,
});
lifecycleEvent$.next(asTaskPollingCycleEvent(asOk({ result: FillPoolResult.NoTasksClaimed })));
expect(ephemeralTaskLifecycle.queuedTasks).toBe(1);
poolCapacity.mockReturnValue({
- availableCapacity: 1,
+ availableWorkers: 1,
});
lifecycleEvent$.next(asTaskPollingCycleEvent(asOk({ result: FillPoolResult.NoTasksClaimed })));
expect(ephemeralTaskLifecycle.queuedTasks).toBe(0);
diff --git a/x-pack/plugins/task_manager/server/ephemeral_task_lifecycle.ts b/x-pack/plugins/task_manager/server/ephemeral_task_lifecycle.ts
index c7ee267b848e5..37cc166ece211 100644
--- a/x-pack/plugins/task_manager/server/ephemeral_task_lifecycle.ts
+++ b/x-pack/plugins/task_manager/server/ephemeral_task_lifecycle.ts
@@ -143,13 +143,13 @@ export class EphemeralTaskLifecycle {
taskType && this.definitions.get(taskType)?.maxConcurrency
? Math.max(
Math.min(
- this.pool.availableCapacity(),
+ this.pool.availableWorkers,
this.definitions.get(taskType)!.maxConcurrency! -
- this.pool.getUsedCapacityByType(taskType)
+ this.pool.getOccupiedWorkersByType(taskType)
),
0
)
- : this.pool.availableCapacity();
+ : this.pool.availableWorkers;
private emitEvent = (event: TaskLifecycleEvent) => {
this.events$.next(event);
diff --git a/x-pack/plugins/task_manager/server/index.ts b/x-pack/plugins/task_manager/server/index.ts
index 965df090911fd..8d50c37adda0b 100644
--- a/x-pack/plugins/task_manager/server/index.ts
+++ b/x-pack/plugins/task_manager/server/index.ts
@@ -55,6 +55,9 @@ export type {
export const config: PluginConfigDescriptor = {
schema: configSchema,
+ exposeToUsage: {
+ max_workers: true,
+ },
deprecations: ({ deprecate }) => {
return [
deprecate('ephemeral_tasks.enabled', 'a future version', {
@@ -65,10 +68,6 @@ export const config: PluginConfigDescriptor = {
level: 'warning',
message: `Configuring "xpack.task_manager.ephemeral_tasks.request_capacity" is deprecated and will be removed in a future version. Remove this setting to increase task execution resiliency.`,
}),
- deprecate('max_workers', 'a future version', {
- level: 'warning',
- message: `Configuring "xpack.task_manager.max_workers" is deprecated and will be removed in a future version. Remove this setting and use "xpack.task_manager.capacity" instead.`,
- }),
(settings, fromPath, addDeprecation) => {
const taskManager = get(settings, fromPath);
if (taskManager?.index) {
diff --git a/x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap b/x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap
deleted file mode 100644
index e59912ed91905..0000000000000
--- a/x-pack/plugins/task_manager/server/integration_tests/__snapshots__/task_cost_check.test.ts.snap
+++ /dev/null
@@ -1,10 +0,0 @@
-// Jest Snapshot v1, https://goo.gl/fbAQLP
-
-exports[`Task cost checks detects tasks with cost definitions 1`] = `
-Array [
- Object {
- "cost": 10,
- "taskType": "alerting:siem.indicatorRule",
- },
-]
-`;
diff --git a/x-pack/plugins/task_manager/server/integration_tests/managed_configuration.test.ts b/x-pack/plugins/task_manager/server/integration_tests/managed_configuration.test.ts
index cc16b8d0544cf..c0939b5b31667 100644
--- a/x-pack/plugins/task_manager/server/integration_tests/managed_configuration.test.ts
+++ b/x-pack/plugins/task_manager/server/integration_tests/managed_configuration.test.ts
@@ -35,362 +35,164 @@ describe('managed configuration', () => {
},
};
- afterEach(() => clock.restore());
-
- describe('managed poll interval', () => {
- beforeEach(async () => {
- jest.resetAllMocks();
- clock = sinon.useFakeTimers();
-
- const context = coreMock.createPluginInitializerContext({
- capacity: 10,
- max_attempts: 9,
- poll_interval: 3000,
- allow_reading_invalid_state: false,
- version_conflict_threshold: 80,
- monitored_aggregated_stats_refresh_rate: 60000,
- monitored_stats_health_verbose_log: {
- enabled: false,
- level: 'debug' as const,
- warn_delayed_task_start_in_seconds: 60,
- },
- monitored_stats_required_freshness: 4000,
- monitored_stats_running_average_window: 50,
- request_capacity: 1000,
- monitored_task_execution_thresholds: {
- default: {
- error_threshold: 90,
- warn_threshold: 80,
- },
- custom: {},
- },
- ephemeral_tasks: {
- enabled: true,
- request_capacity: 10,
- },
- unsafe: {
- exclude_task_types: [],
- authenticate_background_task_utilization: true,
- },
- event_loop_delay: {
- monitor: true,
- warn_threshold: 5000,
- },
- worker_utilization_running_average_window: 5,
- metrics_reset_interval: 3000,
- claim_strategy: 'default',
- request_timeouts: {
- update_by_query: 1000,
- },
- });
- logger = context.logger.get('taskManager');
-
- const taskManager = new TaskManagerPlugin(context);
- (
- await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined })
- ).registerTaskDefinitions({
- foo: {
- title: 'Foo',
- createTaskRunner: jest.fn(),
- },
- });
-
- const coreStart = coreMock.createStart();
- coreStart.elasticsearch = esStart;
- esStart.client.asInternalUser.child.mockReturnValue(
- esStart.client.asInternalUser as unknown as Client
- );
- coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient);
- taskManagerStart = await taskManager.start(coreStart, {});
-
- // force rxjs timers to fire when they are scheduled for setTimeout(0) as the
- // sinon fake timers cause them to stall
- clock.tick(0);
+ beforeEach(async () => {
+ jest.resetAllMocks();
+ clock = sinon.useFakeTimers();
+
+ const context = coreMock.createPluginInitializerContext({
+ max_workers: 10,
+ max_attempts: 9,
+ poll_interval: 3000,
+ allow_reading_invalid_state: false,
+ version_conflict_threshold: 80,
+ monitored_aggregated_stats_refresh_rate: 60000,
+ monitored_stats_health_verbose_log: {
+ enabled: false,
+ level: 'debug' as const,
+ warn_delayed_task_start_in_seconds: 60,
+ },
+ monitored_stats_required_freshness: 4000,
+ monitored_stats_running_average_window: 50,
+ request_capacity: 1000,
+ monitored_task_execution_thresholds: {
+ default: {
+ error_threshold: 90,
+ warn_threshold: 80,
+ },
+ custom: {},
+ },
+ ephemeral_tasks: {
+ enabled: true,
+ request_capacity: 10,
+ },
+ unsafe: {
+ exclude_task_types: [],
+ authenticate_background_task_utilization: true,
+ },
+ event_loop_delay: {
+ monitor: true,
+ warn_threshold: 5000,
+ },
+ worker_utilization_running_average_window: 5,
+ metrics_reset_interval: 3000,
+ claim_strategy: 'default',
+ request_timeouts: {
+ update_by_query: 1000,
+ },
});
-
- test('should increase poll interval when Elasticsearch returns 429 error', async () => {
- savedObjectsClient.create.mockRejectedValueOnce(
- SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b')
- );
-
- // Cause "too many requests" error to be thrown
- await expect(
- taskManagerStart.schedule({
- taskType: 'foo',
- state: {},
- params: {},
- })
- ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`);
- clock.tick(ADJUST_THROUGHPUT_INTERVAL);
-
- expect(logger.warn).toHaveBeenCalledWith(
- 'Poll interval configuration is temporarily increased after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Poll interval configuration changing from 3000 to 3600 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
- );
- expect(logger.debug).toHaveBeenCalledWith('Task poller now using interval of 3600ms');
+ logger = context.logger.get('taskManager');
+
+ const taskManager = new TaskManagerPlugin(context);
+ (
+ await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined })
+ ).registerTaskDefinitions({
+ foo: {
+ title: 'Foo',
+ createTaskRunner: jest.fn(),
+ },
});
- test('should increase poll interval when Elasticsearch returns "cannot execute [inline] scripts" error', async () => {
- const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked;
- childEsClient.search.mockImplementationOnce(async () => {
- throw inlineScriptError;
- });
-
- await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot(
- `"cannot execute [inline] scripts\\" error"`
- );
-
- clock.tick(ADJUST_THROUGHPUT_INTERVAL);
-
- expect(logger.warn).toHaveBeenCalledWith(
- 'Poll interval configuration is temporarily increased after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Poll interval configuration changing from 3000 to 3600 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
- );
- expect(logger.debug).toHaveBeenCalledWith('Task poller now using interval of 3600ms');
- });
+ const coreStart = coreMock.createStart();
+ coreStart.elasticsearch = esStart;
+ esStart.client.asInternalUser.child.mockReturnValue(
+ esStart.client.asInternalUser as unknown as Client
+ );
+ coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient);
+ taskManagerStart = await taskManager.start(coreStart);
+
+ // force rxjs timers to fire when they are scheduled for setTimeout(0) as the
+ // sinon fake timers cause them to stall
+ clock.tick(0);
});
- describe('managed capacity with default claim strategy', () => {
- beforeEach(async () => {
- jest.resetAllMocks();
- clock = sinon.useFakeTimers();
-
- const context = coreMock.createPluginInitializerContext({
- capacity: 10,
- max_attempts: 9,
- poll_interval: 3000,
- allow_reading_invalid_state: false,
- version_conflict_threshold: 80,
- monitored_aggregated_stats_refresh_rate: 60000,
- monitored_stats_health_verbose_log: {
- enabled: false,
- level: 'debug' as const,
- warn_delayed_task_start_in_seconds: 60,
- },
- monitored_stats_required_freshness: 4000,
- monitored_stats_running_average_window: 50,
- request_capacity: 1000,
- monitored_task_execution_thresholds: {
- default: {
- error_threshold: 90,
- warn_threshold: 80,
- },
- custom: {},
- },
- ephemeral_tasks: {
- enabled: true,
- request_capacity: 10,
- },
- unsafe: {
- exclude_task_types: [],
- authenticate_background_task_utilization: true,
- },
- event_loop_delay: {
- monitor: true,
- warn_threshold: 5000,
- },
- worker_utilization_running_average_window: 5,
- metrics_reset_interval: 3000,
- claim_strategy: 'default',
- request_timeouts: {
- update_by_query: 1000,
- },
- });
- logger = context.logger.get('taskManager');
-
- const taskManager = new TaskManagerPlugin(context);
- (
- await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined })
- ).registerTaskDefinitions({
- foo: {
- title: 'Foo',
- createTaskRunner: jest.fn(),
- },
- });
-
- const coreStart = coreMock.createStart();
- coreStart.elasticsearch = esStart;
- esStart.client.asInternalUser.child.mockReturnValue(
- esStart.client.asInternalUser as unknown as Client
- );
- coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient);
- taskManagerStart = await taskManager.start(coreStart, {});
-
- // force rxjs timers to fire when they are scheduled for setTimeout(0) as the
- // sinon fake timers cause them to stall
- clock.tick(0);
- });
-
- test('should lower capacity when Elasticsearch returns 429 error', async () => {
- savedObjectsClient.create.mockRejectedValueOnce(
- SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b')
- );
-
- // Cause "too many requests" error to be thrown
- await expect(
- taskManagerStart.schedule({
- taskType: 'foo',
- state: {},
- params: {},
- })
- ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`);
- clock.tick(ADJUST_THROUGHPUT_INTERVAL);
-
- expect(logger.warn).toHaveBeenCalledWith(
- 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Task pool now using 10 as the max worker value which is based on a capacity of 10'
- );
- });
-
- test('should lower capacity when Elasticsearch returns "cannot execute [inline] scripts" error', async () => {
- const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked;
- childEsClient.search.mockImplementationOnce(async () => {
- throw inlineScriptError;
- });
-
- await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot(
- `"cannot execute [inline] scripts\\" error"`
- );
- clock.tick(ADJUST_THROUGHPUT_INTERVAL);
+ afterEach(() => clock.restore());
- expect(logger.warn).toHaveBeenCalledWith(
- 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Task pool now using 10 as the max worker value which is based on a capacity of 10'
- );
- });
+ test('should lower max workers when Elasticsearch returns 429 error', async () => {
+ savedObjectsClient.create.mockRejectedValueOnce(
+ SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b')
+ );
+
+ // Cause "too many requests" error to be thrown
+ await expect(
+ taskManagerStart.schedule({
+ taskType: 'foo',
+ state: {},
+ params: {},
+ })
+ ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`);
+ clock.tick(ADJUST_THROUGHPUT_INTERVAL);
+
+ expect(logger.warn).toHaveBeenCalledWith(
+ 'Max workers configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
+ );
+ expect(logger.debug).toHaveBeenCalledWith(
+ 'Max workers configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
+ );
+ expect(logger.debug).toHaveBeenCalledWith('Task pool now using 10 as the max worker value');
});
- describe('managed capacity with mget claim strategy', () => {
- beforeEach(async () => {
- jest.resetAllMocks();
- clock = sinon.useFakeTimers();
-
- const context = coreMock.createPluginInitializerContext({
- capacity: 10,
- max_attempts: 9,
- poll_interval: 3000,
- allow_reading_invalid_state: false,
- version_conflict_threshold: 80,
- monitored_aggregated_stats_refresh_rate: 60000,
- monitored_stats_health_verbose_log: {
- enabled: false,
- level: 'debug' as const,
- warn_delayed_task_start_in_seconds: 60,
- },
- monitored_stats_required_freshness: 4000,
- monitored_stats_running_average_window: 50,
- request_capacity: 1000,
- monitored_task_execution_thresholds: {
- default: {
- error_threshold: 90,
- warn_threshold: 80,
- },
- custom: {},
- },
- ephemeral_tasks: {
- enabled: true,
- request_capacity: 10,
- },
- unsafe: {
- exclude_task_types: [],
- authenticate_background_task_utilization: true,
- },
- event_loop_delay: {
- monitor: true,
- warn_threshold: 5000,
- },
- worker_utilization_running_average_window: 5,
- metrics_reset_interval: 3000,
- claim_strategy: 'unsafe_mget',
- request_timeouts: {
- update_by_query: 1000,
- },
- });
- logger = context.logger.get('taskManager');
-
- const taskManager = new TaskManagerPlugin(context);
- (
- await taskManager.setup(coreMock.createSetup(), { usageCollection: undefined })
- ).registerTaskDefinitions({
- foo: {
- title: 'Foo',
- createTaskRunner: jest.fn(),
- },
- });
-
- const coreStart = coreMock.createStart();
- coreStart.elasticsearch = esStart;
- esStart.client.asInternalUser.child.mockReturnValue(
- esStart.client.asInternalUser as unknown as Client
- );
- coreStart.savedObjects.createInternalRepository.mockReturnValue(savedObjectsClient);
- taskManagerStart = await taskManager.start(coreStart, {});
+ test('should increase poll interval when Elasticsearch returns 429 error', async () => {
+ savedObjectsClient.create.mockRejectedValueOnce(
+ SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b')
+ );
+
+ // Cause "too many requests" error to be thrown
+ await expect(
+ taskManagerStart.schedule({
+ taskType: 'foo',
+ state: {},
+ params: {},
+ })
+ ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`);
+ clock.tick(ADJUST_THROUGHPUT_INTERVAL);
+
+ expect(logger.warn).toHaveBeenCalledWith(
+ 'Poll interval configuration is temporarily increased after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
+ );
+ expect(logger.debug).toHaveBeenCalledWith(
+ 'Poll interval configuration changing from 3000 to 3600 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
+ );
+ expect(logger.debug).toHaveBeenCalledWith('Task poller now using interval of 3600ms');
+ });
- // force rxjs timers to fire when they are scheduled for setTimeout(0) as the
- // sinon fake timers cause them to stall
- clock.tick(0);
+ test('should lower max workers when Elasticsearch returns "cannot execute [inline] scripts" error', async () => {
+ const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked;
+ childEsClient.search.mockImplementationOnce(async () => {
+ throw inlineScriptError;
});
- test('should lower capacity when Elasticsearch returns 429 error', async () => {
- savedObjectsClient.create.mockRejectedValueOnce(
- SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b')
- );
-
- // Cause "too many requests" error to be thrown
- await expect(
- taskManagerStart.schedule({
- taskType: 'foo',
- state: {},
- params: {},
- })
- ).rejects.toThrowErrorMatchingInlineSnapshot(`"Too Many Requests"`);
- clock.tick(ADJUST_THROUGHPUT_INTERVAL);
+ await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot(
+ `"cannot execute [inline] scripts\\" error"`
+ );
+ clock.tick(ADJUST_THROUGHPUT_INTERVAL);
+
+ expect(logger.warn).toHaveBeenCalledWith(
+ 'Max workers configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
+ );
+ expect(logger.debug).toHaveBeenCalledWith(
+ 'Max workers configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
+ );
+ expect(logger.debug).toHaveBeenCalledWith('Task pool now using 10 as the max worker value');
+ });
- expect(logger.warn).toHaveBeenCalledWith(
- 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Task pool now using 20 as the max allowed cost which is based on a capacity of 10'
- );
+ test('should increase poll interval when Elasticsearch returns "cannot execute [inline] scripts" error', async () => {
+ const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked;
+ childEsClient.search.mockImplementationOnce(async () => {
+ throw inlineScriptError;
});
- test('should lower capacity when Elasticsearch returns "cannot execute [inline] scripts" error', async () => {
- const childEsClient = esStart.client.asInternalUser.child({}) as jest.Mocked;
- childEsClient.search.mockImplementationOnce(async () => {
- throw inlineScriptError;
- });
+ await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot(
+ `"cannot execute [inline] scripts\\" error"`
+ );
- await expect(taskManagerStart.fetch({})).rejects.toThrowErrorMatchingInlineSnapshot(
- `"cannot execute [inline] scripts\\" error"`
- );
- clock.tick(ADJUST_THROUGHPUT_INTERVAL);
+ clock.tick(ADJUST_THROUGHPUT_INTERVAL);
- expect(logger.warn).toHaveBeenCalledWith(
- 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Capacity configuration changing from 10 to 8 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
- );
- expect(logger.debug).toHaveBeenCalledWith(
- 'Task pool now using 20 as the max allowed cost which is based on a capacity of 10'
- );
- });
+ expect(logger.warn).toHaveBeenCalledWith(
+ 'Poll interval configuration is temporarily increased after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
+ );
+ expect(logger.debug).toHaveBeenCalledWith(
+ 'Poll interval configuration changing from 3000 to 3600 after seeing 1 "too many request" and/or "execute [inline] script" error(s)'
+ );
+ expect(logger.debug).toHaveBeenCalledWith('Task poller now using interval of 3600ms');
});
});
diff --git a/x-pack/plugins/task_manager/server/integration_tests/task_cost_check.test.ts b/x-pack/plugins/task_manager/server/integration_tests/task_cost_check.test.ts
deleted file mode 100644
index 96678f714ac69..0000000000000
--- a/x-pack/plugins/task_manager/server/integration_tests/task_cost_check.test.ts
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import {
- type TestElasticsearchUtils,
- type TestKibanaUtils,
-} from '@kbn/core-test-helpers-kbn-server';
-import { TaskCost, TaskDefinition } from '../task';
-import { setupTestServers } from './lib';
-import { TaskTypeDictionary } from '../task_type_dictionary';
-
-jest.mock('../task_type_dictionary', () => {
- const actual = jest.requireActual('../task_type_dictionary');
- return {
- ...actual,
- TaskTypeDictionary: jest.fn().mockImplementation((opts) => {
- return new actual.TaskTypeDictionary(opts);
- }),
- };
-});
-
-// Notify response-ops if a task sets a cost to something other than `Normal`
-describe('Task cost checks', () => {
- let esServer: TestElasticsearchUtils;
- let kibanaServer: TestKibanaUtils;
- let taskTypeDictionary: TaskTypeDictionary;
-
- beforeAll(async () => {
- const setupResult = await setupTestServers();
- esServer = setupResult.esServer;
- kibanaServer = setupResult.kibanaServer;
-
- const mockedTaskTypeDictionary = jest.requireMock('../task_type_dictionary');
- expect(mockedTaskTypeDictionary.TaskTypeDictionary).toHaveBeenCalledTimes(1);
- taskTypeDictionary = mockedTaskTypeDictionary.TaskTypeDictionary.mock.results[0].value;
- });
-
- afterAll(async () => {
- if (kibanaServer) {
- await kibanaServer.stop();
- }
- if (esServer) {
- await esServer.stop();
- }
- });
-
- it('detects tasks with cost definitions', async () => {
- const taskTypes = taskTypeDictionary.getAllDefinitions();
- const taskTypesWithCost = taskTypes
- .map((taskType: TaskDefinition) =>
- !!taskType.cost ? { taskType: taskType.type, cost: taskType.cost } : null
- )
- .filter(
- (tt: { taskType: string; cost: TaskCost } | null) =>
- null != tt && tt.cost !== TaskCost.Normal
- );
- expect(taskTypesWithCost).toMatchSnapshot();
- });
-});
diff --git a/x-pack/plugins/task_manager/server/lib/calculate_health_status.test.ts b/x-pack/plugins/task_manager/server/lib/calculate_health_status.test.ts
index 49c68459982ba..fc2f34701e3c1 100644
--- a/x-pack/plugins/task_manager/server/lib/calculate_health_status.test.ts
+++ b/x-pack/plugins/task_manager/server/lib/calculate_health_status.test.ts
@@ -16,6 +16,7 @@ Date.now = jest.fn().mockReturnValue(new Date(now));
const logger = loggingSystemMock.create().get();
const config = {
enabled: true,
+ max_workers: 10,
index: 'foo',
max_attempts: 9,
poll_interval: 3000,
@@ -72,8 +73,6 @@ const getStatsWithTimestamp = ({
configuration: {
timestamp,
value: {
- capacity: { config: 10, as_cost: 20, as_workers: 10 },
- claim_strategy: 'default',
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
monitored_stats_running_average_window: 50,
@@ -85,6 +84,7 @@ const getStatsWithTimestamp = ({
},
},
poll_interval: 3000,
+ max_workers: 10,
},
status: HealthStatus.OK,
},
@@ -213,29 +213,24 @@ const getStatsWithTimestamp = ({
timestamp,
value: {
count: 2,
- cost: 4,
task_types: {
taskType1: {
count: 1,
- cost: 2,
status: {
idle: 1,
},
},
taskType2: {
count: 1,
- cost: 2,
status: {
idle: 1,
},
},
},
non_recurring: 2,
- non_recurring_cost: 4,
owner_ids: 0,
schedule: [['5m', 2]],
overdue: 0,
- overdue_cost: 0,
overdue_non_recurring: 0,
estimated_schedule_density: [
0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
diff --git a/x-pack/plugins/task_manager/server/lib/create_managed_configuration.test.ts b/x-pack/plugins/task_manager/server/lib/create_managed_configuration.test.ts
index a93da63ae693a..b1d6ce92c323a 100644
--- a/x-pack/plugins/task_manager/server/lib/create_managed_configuration.test.ts
+++ b/x-pack/plugins/task_manager/server/lib/create_managed_configuration.test.ts
@@ -13,7 +13,6 @@ import {
ADJUST_THROUGHPUT_INTERVAL,
} from './create_managed_configuration';
import { mockLogger } from '../test_utils';
-import { CLAIM_STRATEGY_DEFAULT, CLAIM_STRATEGY_MGET, TaskManagerConfig } from '../config';
describe('createManagedConfiguration()', () => {
let clock: sinon.SinonFakeTimers;
@@ -27,141 +26,51 @@ describe('createManagedConfiguration()', () => {
afterEach(() => clock.restore());
test('returns observables with initialized values', async () => {
- const capacitySubscription = jest.fn();
+ const maxWorkersSubscription = jest.fn();
const pollIntervalSubscription = jest.fn();
- const { capacityConfiguration$, pollIntervalConfiguration$ } = createManagedConfiguration({
+ const { maxWorkersConfiguration$, pollIntervalConfiguration$ } = createManagedConfiguration({
logger,
errors$: new Subject(),
- config: {
- capacity: 20,
- poll_interval: 2,
- } as TaskManagerConfig,
+ startingMaxWorkers: 1,
+ startingPollInterval: 2,
});
- capacityConfiguration$.subscribe(capacitySubscription);
+ maxWorkersConfiguration$.subscribe(maxWorkersSubscription);
pollIntervalConfiguration$.subscribe(pollIntervalSubscription);
- expect(capacitySubscription).toHaveBeenCalledTimes(1);
- expect(capacitySubscription).toHaveBeenNthCalledWith(1, 20);
+ expect(maxWorkersSubscription).toHaveBeenCalledTimes(1);
+ expect(maxWorkersSubscription).toHaveBeenNthCalledWith(1, 1);
expect(pollIntervalSubscription).toHaveBeenCalledTimes(1);
expect(pollIntervalSubscription).toHaveBeenNthCalledWith(1, 2);
});
- test('uses max_workers config as capacity if only max workers is defined', async () => {
- const capacitySubscription = jest.fn();
- const pollIntervalSubscription = jest.fn();
- const { capacityConfiguration$, pollIntervalConfiguration$ } = createManagedConfiguration({
- logger,
- errors$: new Subject(),
- config: {
- max_workers: 10,
- poll_interval: 2,
- } as TaskManagerConfig,
- });
- capacityConfiguration$.subscribe(capacitySubscription);
- pollIntervalConfiguration$.subscribe(pollIntervalSubscription);
- expect(capacitySubscription).toHaveBeenCalledTimes(1);
- expect(capacitySubscription).toHaveBeenNthCalledWith(1, 10);
- expect(pollIntervalSubscription).toHaveBeenCalledTimes(1);
- expect(pollIntervalSubscription).toHaveBeenNthCalledWith(1, 2);
- });
-
- test('uses max_workers config as capacity but does not exceed MAX_CAPACITY', async () => {
- const capacitySubscription = jest.fn();
- const pollIntervalSubscription = jest.fn();
- const { capacityConfiguration$, pollIntervalConfiguration$ } = createManagedConfiguration({
- logger,
- errors$: new Subject(),
- config: {
- max_workers: 1000,
- poll_interval: 2,
- } as TaskManagerConfig,
- });
- capacityConfiguration$.subscribe(capacitySubscription);
- pollIntervalConfiguration$.subscribe(pollIntervalSubscription);
- expect(capacitySubscription).toHaveBeenCalledTimes(1);
- expect(capacitySubscription).toHaveBeenNthCalledWith(1, 50);
- expect(pollIntervalSubscription).toHaveBeenCalledTimes(1);
- expect(pollIntervalSubscription).toHaveBeenNthCalledWith(1, 2);
- });
-
- test('uses provided defaultCapacity if neither capacity nor max_workers is defined', async () => {
- const capacitySubscription = jest.fn();
- const pollIntervalSubscription = jest.fn();
- const { capacityConfiguration$, pollIntervalConfiguration$ } = createManagedConfiguration({
- defaultCapacity: 500,
- logger,
- errors$: new Subject(),
- config: {
- poll_interval: 2,
- } as TaskManagerConfig,
- });
- capacityConfiguration$.subscribe(capacitySubscription);
- pollIntervalConfiguration$.subscribe(pollIntervalSubscription);
- expect(capacitySubscription).toHaveBeenCalledTimes(1);
- expect(capacitySubscription).toHaveBeenNthCalledWith(1, 500);
- expect(pollIntervalSubscription).toHaveBeenCalledTimes(1);
- expect(pollIntervalSubscription).toHaveBeenNthCalledWith(1, 2);
- });
-
- test('logs warning and uses capacity config if both capacity and max_workers is defined', async () => {
- const capacitySubscription = jest.fn();
- const pollIntervalSubscription = jest.fn();
- const { capacityConfiguration$, pollIntervalConfiguration$ } = createManagedConfiguration({
- logger,
- errors$: new Subject(),
- config: {
- capacity: 30,
- max_workers: 10,
- poll_interval: 2,
- } as TaskManagerConfig,
- });
- capacityConfiguration$.subscribe(capacitySubscription);
- pollIntervalConfiguration$.subscribe(pollIntervalSubscription);
- expect(capacitySubscription).toHaveBeenCalledTimes(1);
- expect(capacitySubscription).toHaveBeenNthCalledWith(1, 30);
- expect(pollIntervalSubscription).toHaveBeenCalledTimes(1);
- expect(pollIntervalSubscription).toHaveBeenNthCalledWith(1, 2);
- expect(logger.warn).toHaveBeenCalledWith(
- `Both \"xpack.task_manager.capacity\" and \"xpack.task_manager.max_workers\" configs are set, max_workers will be ignored in favor of capacity and the setting should be removed.`
- );
- });
-
test(`skips errors that aren't about too many requests`, async () => {
- const capacitySubscription = jest.fn();
+ const maxWorkersSubscription = jest.fn();
const pollIntervalSubscription = jest.fn();
const errors$ = new Subject();
- const { capacityConfiguration$, pollIntervalConfiguration$ } = createManagedConfiguration({
+ const { maxWorkersConfiguration$, pollIntervalConfiguration$ } = createManagedConfiguration({
errors$,
logger,
- config: {
- capacity: 10,
- poll_interval: 100,
- } as TaskManagerConfig,
+ startingMaxWorkers: 100,
+ startingPollInterval: 100,
});
- capacityConfiguration$.subscribe(capacitySubscription);
+ maxWorkersConfiguration$.subscribe(maxWorkersSubscription);
pollIntervalConfiguration$.subscribe(pollIntervalSubscription);
errors$.next(new Error('foo'));
clock.tick(ADJUST_THROUGHPUT_INTERVAL);
- expect(capacitySubscription).toHaveBeenCalledTimes(1);
+ expect(maxWorkersSubscription).toHaveBeenCalledTimes(1);
expect(pollIntervalSubscription).toHaveBeenCalledTimes(1);
});
- describe('capacity configuration', () => {
- function setupScenario(
- startingCapacity: number,
- claimStrategy: string = CLAIM_STRATEGY_DEFAULT
- ) {
+ describe('maxWorker configuration', () => {
+ function setupScenario(startingMaxWorkers: number) {
const errors$ = new Subject();
const subscription = jest.fn();
- const { capacityConfiguration$ } = createManagedConfiguration({
+ const { maxWorkersConfiguration$ } = createManagedConfiguration({
errors$,
+ startingMaxWorkers,
logger,
- config: {
- capacity: startingCapacity,
- poll_interval: 1,
- claim_strategy: claimStrategy,
- } as TaskManagerConfig,
+ startingPollInterval: 1,
});
- capacityConfiguration$.subscribe(subscription);
+ maxWorkersConfiguration$.subscribe(subscription);
return { subscription, errors$ };
}
@@ -172,103 +81,66 @@ describe('createManagedConfiguration()', () => {
afterEach(() => clock.restore());
- describe('default claim strategy', () => {
- test('should decrease configuration at the next interval when an error is emitted', async () => {
- const { subscription, errors$ } = setupScenario(10);
- errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
- clock.tick(ADJUST_THROUGHPUT_INTERVAL - 1);
- expect(subscription).toHaveBeenCalledTimes(1);
- expect(subscription).toHaveBeenNthCalledWith(1, 10);
- clock.tick(1);
- expect(subscription).toHaveBeenCalledTimes(2);
- expect(subscription).toHaveBeenNthCalledWith(2, 8);
- });
-
- test('should log a warning when the configuration changes from the starting value', async () => {
- const { errors$ } = setupScenario(10);
- errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
- clock.tick(ADJUST_THROUGHPUT_INTERVAL);
- expect(logger.warn).toHaveBeenCalledWith(
- 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
- );
- });
-
- test('should increase configuration back to normal incrementally after an error is emitted', async () => {
- const { subscription, errors$ } = setupScenario(10);
- errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
- clock.tick(ADJUST_THROUGHPUT_INTERVAL * 10);
- expect(subscription).toHaveBeenNthCalledWith(1, 10);
- expect(subscription).toHaveBeenNthCalledWith(2, 8);
- expect(subscription).toHaveBeenNthCalledWith(3, 9);
- expect(subscription).toHaveBeenNthCalledWith(4, 10);
- // No new calls due to value not changing and usage of distinctUntilChanged()
- expect(subscription).toHaveBeenCalledTimes(4);
- });
+ test('should decrease configuration at the next interval when an error is emitted', async () => {
+ const { subscription, errors$ } = setupScenario(100);
+ errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
+ clock.tick(ADJUST_THROUGHPUT_INTERVAL - 1);
+ expect(subscription).toHaveBeenCalledTimes(1);
+ clock.tick(1);
+ expect(subscription).toHaveBeenCalledTimes(2);
+ expect(subscription).toHaveBeenNthCalledWith(2, 80);
+ });
- test('should keep reducing configuration when errors keep emitting until it reaches minimum', async () => {
- const { subscription, errors$ } = setupScenario(10);
- for (let i = 0; i < 20; i++) {
- errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
- clock.tick(ADJUST_THROUGHPUT_INTERVAL);
- }
- expect(subscription).toHaveBeenNthCalledWith(1, 10);
- expect(subscription).toHaveBeenNthCalledWith(2, 8);
- expect(subscription).toHaveBeenNthCalledWith(3, 6);
- expect(subscription).toHaveBeenNthCalledWith(4, 4);
- expect(subscription).toHaveBeenNthCalledWith(5, 3);
- expect(subscription).toHaveBeenNthCalledWith(6, 2);
- expect(subscription).toHaveBeenNthCalledWith(7, 1);
- // No new calls due to value not changing and usage of distinctUntilChanged()
- expect(subscription).toHaveBeenCalledTimes(7);
- });
+ test('should log a warning when the configuration changes from the starting value', async () => {
+ const { errors$ } = setupScenario(100);
+ errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
+ clock.tick(ADJUST_THROUGHPUT_INTERVAL);
+ expect(logger.warn).toHaveBeenCalledWith(
+ 'Max workers configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
+ );
});
- describe('mget claim strategy', () => {
- test('should decrease configuration at the next interval when an error is emitted', async () => {
- const { subscription, errors$ } = setupScenario(10, CLAIM_STRATEGY_MGET);
- errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
- clock.tick(ADJUST_THROUGHPUT_INTERVAL - 1);
- expect(subscription).toHaveBeenCalledTimes(1);
- expect(subscription).toHaveBeenNthCalledWith(1, 10);
- clock.tick(1);
- expect(subscription).toHaveBeenCalledTimes(2);
- expect(subscription).toHaveBeenNthCalledWith(2, 8);
- });
+ test('should increase configuration back to normal incrementally after an error is emitted', async () => {
+ const { subscription, errors$ } = setupScenario(100);
+ errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
+ clock.tick(ADJUST_THROUGHPUT_INTERVAL * 10);
+ expect(subscription).toHaveBeenNthCalledWith(2, 80);
+ expect(subscription).toHaveBeenNthCalledWith(3, 84);
+ // 88.2- > 89 from Math.ceil
+ expect(subscription).toHaveBeenNthCalledWith(4, 89);
+ expect(subscription).toHaveBeenNthCalledWith(5, 94);
+ expect(subscription).toHaveBeenNthCalledWith(6, 99);
+ // 103.95 -> 100 from Math.min with starting value
+ expect(subscription).toHaveBeenNthCalledWith(7, 100);
+ // No new calls due to value not changing and usage of distinctUntilChanged()
+ expect(subscription).toHaveBeenCalledTimes(7);
+ });
- test('should log a warning when the configuration changes from the starting value', async () => {
- const { errors$ } = setupScenario(10, CLAIM_STRATEGY_MGET);
+ test('should keep reducing configuration when errors keep emitting', async () => {
+ const { subscription, errors$ } = setupScenario(100);
+ for (let i = 0; i < 20; i++) {
errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
clock.tick(ADJUST_THROUGHPUT_INTERVAL);
- expect(logger.warn).toHaveBeenCalledWith(
- 'Capacity configuration is temporarily reduced after Elasticsearch returned 1 "too many request" and/or "execute [inline] script" error(s).'
- );
- });
-
- test('should increase configuration back to normal incrementally after an error is emitted', async () => {
- const { subscription, errors$ } = setupScenario(10, CLAIM_STRATEGY_MGET);
- errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
- clock.tick(ADJUST_THROUGHPUT_INTERVAL * 10);
- expect(subscription).toHaveBeenNthCalledWith(1, 10);
- expect(subscription).toHaveBeenNthCalledWith(2, 8);
- expect(subscription).toHaveBeenNthCalledWith(3, 9);
- expect(subscription).toHaveBeenNthCalledWith(4, 10);
- // No new calls due to value not changing and usage of distinctUntilChanged()
- expect(subscription).toHaveBeenCalledTimes(4);
- });
-
- test('should keep reducing configuration when errors keep emitting until it reaches minimum', async () => {
- const { subscription, errors$ } = setupScenario(10, CLAIM_STRATEGY_MGET);
- for (let i = 0; i < 20; i++) {
- errors$.next(SavedObjectsErrorHelpers.createTooManyRequestsError('a', 'b'));
- clock.tick(ADJUST_THROUGHPUT_INTERVAL);
- }
- expect(subscription).toHaveBeenNthCalledWith(1, 10);
- expect(subscription).toHaveBeenNthCalledWith(2, 8);
- expect(subscription).toHaveBeenNthCalledWith(3, 6);
- expect(subscription).toHaveBeenNthCalledWith(4, 5);
- // No new calls due to value not changing and usage of distinctUntilChanged()
- expect(subscription).toHaveBeenCalledTimes(4);
- });
+ }
+ expect(subscription).toHaveBeenNthCalledWith(2, 80);
+ expect(subscription).toHaveBeenNthCalledWith(3, 64);
+ // 51.2 -> 51 from Math.floor
+ expect(subscription).toHaveBeenNthCalledWith(4, 51);
+ expect(subscription).toHaveBeenNthCalledWith(5, 40);
+ expect(subscription).toHaveBeenNthCalledWith(6, 32);
+ expect(subscription).toHaveBeenNthCalledWith(7, 25);
+ expect(subscription).toHaveBeenNthCalledWith(8, 20);
+ expect(subscription).toHaveBeenNthCalledWith(9, 16);
+ expect(subscription).toHaveBeenNthCalledWith(10, 12);
+ expect(subscription).toHaveBeenNthCalledWith(11, 9);
+ expect(subscription).toHaveBeenNthCalledWith(12, 7);
+ expect(subscription).toHaveBeenNthCalledWith(13, 5);
+ expect(subscription).toHaveBeenNthCalledWith(14, 4);
+ expect(subscription).toHaveBeenNthCalledWith(15, 3);
+ expect(subscription).toHaveBeenNthCalledWith(16, 2);
+ expect(subscription).toHaveBeenNthCalledWith(17, 1);
+ // No new calls due to value not changing and usage of distinctUntilChanged()
+ expect(subscription).toHaveBeenCalledTimes(17);
});
});
@@ -279,10 +151,8 @@ describe('createManagedConfiguration()', () => {
const { pollIntervalConfiguration$ } = createManagedConfiguration({
logger,
errors$,
- config: {
- poll_interval: startingPollInterval,
- capacity: 20,
- } as TaskManagerConfig,
+ startingPollInterval,
+ startingMaxWorkers: 1,
});
pollIntervalConfiguration$.subscribe(subscription);
return { subscription, errors$ };
diff --git a/x-pack/plugins/task_manager/server/lib/create_managed_configuration.ts b/x-pack/plugins/task_manager/server/lib/create_managed_configuration.ts
index 3036eb2008de6..5c7b1a16a4308 100644
--- a/x-pack/plugins/task_manager/server/lib/create_managed_configuration.ts
+++ b/x-pack/plugins/task_manager/server/lib/create_managed_configuration.ts
@@ -10,26 +10,17 @@ import { filter, mergeScan, map, scan, distinctUntilChanged, startWith } from 'r
import { SavedObjectsErrorHelpers } from '@kbn/core/server';
import { Logger } from '@kbn/core/server';
import { isEsCannotExecuteScriptError } from './identify_es_error';
-import { CLAIM_STRATEGY_MGET, DEFAULT_CAPACITY, MAX_CAPACITY, TaskManagerConfig } from '../config';
-import { TaskCost } from '../task';
const FLUSH_MARKER = Symbol('flush');
export const ADJUST_THROUGHPUT_INTERVAL = 10 * 1000;
export const PREFERRED_MAX_POLL_INTERVAL = 60 * 1000;
-
-// Capacity is measured in number of normal cost tasks that can be run
-// At a minimum, we need to be able to run a single task with the greatest cost
-// so we should convert the greatest cost to normal cost
-export const MIN_COST = TaskCost.ExtraLarge / TaskCost.Normal;
-
-// For default claim strategy
export const MIN_WORKERS = 1;
-// When errors occur, reduce capacity by CAPACITY_DECREASE_PERCENTAGE
-// When errors no longer occur, start increasing capacity by CAPACITY_INCREASE_PERCENTAGE
+// When errors occur, reduce maxWorkers by MAX_WORKERS_DECREASE_PERCENTAGE
+// When errors no longer occur, start increasing maxWorkers by MAX_WORKERS_INCREASE_PERCENTAGE
// until starting value is reached
-const CAPACITY_DECREASE_PERCENTAGE = 0.8;
-const CAPACITY_INCREASE_PERCENTAGE = 1.05;
+const MAX_WORKERS_DECREASE_PERCENTAGE = 0.8;
+const MAX_WORKERS_INCREASE_PERCENTAGE = 1.05;
// When errors occur, increase pollInterval by POLL_INTERVAL_INCREASE_PERCENTAGE
// When errors no longer occur, start decreasing pollInterval by POLL_INTERVAL_DECREASE_PERCENTAGE
@@ -38,32 +29,28 @@ const POLL_INTERVAL_DECREASE_PERCENTAGE = 0.95;
const POLL_INTERVAL_INCREASE_PERCENTAGE = 1.2;
interface ManagedConfigurationOpts {
- config: TaskManagerConfig;
- defaultCapacity?: number;
- errors$: Observable;
logger: Logger;
+ startingMaxWorkers: number;
+ startingPollInterval: number;
+ errors$: Observable;
}
export interface ManagedConfiguration {
- startingCapacity: number;
- capacityConfiguration$: Observable;
+ maxWorkersConfiguration$: Observable;
pollIntervalConfiguration$: Observable;
}
export function createManagedConfiguration({
- config,
- defaultCapacity = DEFAULT_CAPACITY,
logger,
+ startingMaxWorkers,
+ startingPollInterval,
errors$,
}: ManagedConfigurationOpts): ManagedConfiguration {
const errorCheck$ = countErrors(errors$, ADJUST_THROUGHPUT_INTERVAL);
- const startingCapacity = calculateStartingCapacity(config, logger, defaultCapacity);
- const startingPollInterval = config.poll_interval;
return {
- startingCapacity,
- capacityConfiguration$: errorCheck$.pipe(
- createCapacityScan(config, logger, startingCapacity),
- startWith(startingCapacity),
+ maxWorkersConfiguration$: errorCheck$.pipe(
+ createMaxWorkersScan(logger, startingMaxWorkers),
+ startWith(startingMaxWorkers),
distinctUntilChanged()
),
pollIntervalConfiguration$: errorCheck$.pipe(
@@ -74,39 +61,37 @@ export function createManagedConfiguration({
};
}
-function createCapacityScan(config: TaskManagerConfig, logger: Logger, startingCapacity: number) {
- return scan((previousCapacity: number, errorCount: number) => {
- let newCapacity: number;
+function createMaxWorkersScan(logger: Logger, startingMaxWorkers: number) {
+ return scan((previousMaxWorkers: number, errorCount: number) => {
+ let newMaxWorkers: number;
if (errorCount > 0) {
- const minCapacity = getMinCapacity(config);
- // Decrease capacity by CAPACITY_DECREASE_PERCENTAGE while making sure it doesn't go lower than minCapacity.
+ // Decrease max workers by MAX_WORKERS_DECREASE_PERCENTAGE while making sure it doesn't go lower than 1.
// Using Math.floor to make sure the number is different than previous while not being a decimal value.
- newCapacity = Math.max(
- Math.floor(previousCapacity * CAPACITY_DECREASE_PERCENTAGE),
- minCapacity
+ newMaxWorkers = Math.max(
+ Math.floor(previousMaxWorkers * MAX_WORKERS_DECREASE_PERCENTAGE),
+ MIN_WORKERS
);
} else {
- // Increase capacity by CAPACITY_INCREASE_PERCENTAGE while making sure it doesn't go
+ // Increase max workers by MAX_WORKERS_INCREASE_PERCENTAGE while making sure it doesn't go
// higher than the starting value. Using Math.ceil to make sure the number is different than
// previous while not being a decimal value
- newCapacity = Math.min(
- startingCapacity,
- Math.ceil(previousCapacity * CAPACITY_INCREASE_PERCENTAGE)
+ newMaxWorkers = Math.min(
+ startingMaxWorkers,
+ Math.ceil(previousMaxWorkers * MAX_WORKERS_INCREASE_PERCENTAGE)
);
}
-
- if (newCapacity !== previousCapacity) {
+ if (newMaxWorkers !== previousMaxWorkers) {
logger.debug(
- `Capacity configuration changing from ${previousCapacity} to ${newCapacity} after seeing ${errorCount} "too many request" and/or "execute [inline] script" error(s)`
+ `Max workers configuration changing from ${previousMaxWorkers} to ${newMaxWorkers} after seeing ${errorCount} "too many request" and/or "execute [inline] script" error(s)`
);
- if (previousCapacity === startingCapacity) {
+ if (previousMaxWorkers === startingMaxWorkers) {
logger.warn(
- `Capacity configuration is temporarily reduced after Elasticsearch returned ${errorCount} "too many request" and/or "execute [inline] script" error(s).`
+ `Max workers configuration is temporarily reduced after Elasticsearch returned ${errorCount} "too many request" and/or "execute [inline] script" error(s).`
);
}
}
- return newCapacity;
- }, startingCapacity);
+ return newMaxWorkers;
+ }, startingMaxWorkers);
}
function createPollIntervalScan(logger: Logger, startingPollInterval: number) {
@@ -201,36 +186,3 @@ function resetErrorCount() {
count: 0,
};
}
-
-function getMinCapacity(config: TaskManagerConfig) {
- switch (config.claim_strategy) {
- case CLAIM_STRATEGY_MGET:
- return MIN_COST;
-
- default:
- return MIN_WORKERS;
- }
-}
-
-export function calculateStartingCapacity(
- config: TaskManagerConfig,
- logger: Logger,
- defaultCapacity: number
-): number {
- if (config.capacity !== undefined && config.max_workers !== undefined) {
- logger.warn(
- `Both "xpack.task_manager.capacity" and "xpack.task_manager.max_workers" configs are set, max_workers will be ignored in favor of capacity and the setting should be removed.`
- );
- }
-
- if (config.capacity) {
- // Use capacity if explicitly set
- return config.capacity!;
- } else if (config.max_workers) {
- // Otherwise use max_worker value as capacity, capped at MAX_CAPACITY
- return Math.min(config.max_workers, MAX_CAPACITY);
- }
-
- // Neither are set, use the given default capacity
- return defaultCapacity;
-}
diff --git a/x-pack/plugins/task_manager/server/lib/fill_pool.test.ts b/x-pack/plugins/task_manager/server/lib/fill_pool.test.ts
index d3533ac058314..9fdb16fb5f677 100644
--- a/x-pack/plugins/task_manager/server/lib/fill_pool.test.ts
+++ b/x-pack/plugins/task_manager/server/lib/fill_pool.test.ts
@@ -30,6 +30,7 @@ describe('fillPool', () => {
tasksUpdated: tasks?.length ?? 0,
tasksConflicted: 0,
tasksClaimed: 0,
+ tasksRejected: 0,
},
docs: tasks,
})
diff --git a/x-pack/plugins/task_manager/server/lib/get_default_capacity.test.ts b/x-pack/plugins/task_manager/server/lib/get_default_capacity.test.ts
deleted file mode 100644
index fb68a3620e43c..0000000000000
--- a/x-pack/plugins/task_manager/server/lib/get_default_capacity.test.ts
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { CLAIM_STRATEGY_DEFAULT, CLAIM_STRATEGY_MGET, DEFAULT_CAPACITY } from '../config';
-import { getDefaultCapacity } from './get_default_capacity';
-
-describe('getDefaultCapacity', () => {
- it('returns default capacity when not in cloud', () => {
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: false,
- isServerless: false,
- isBackgroundTaskNodeOnly: false,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(DEFAULT_CAPACITY);
-
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: false,
- isServerless: true,
- isBackgroundTaskNodeOnly: false,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(DEFAULT_CAPACITY);
-
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: false,
- isServerless: false,
- isBackgroundTaskNodeOnly: true,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(DEFAULT_CAPACITY);
-
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: false,
- isServerless: true,
- isBackgroundTaskNodeOnly: true,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(DEFAULT_CAPACITY);
- });
-
- it('returns default capacity when default claim strategy', () => {
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: true,
- isServerless: false,
- isBackgroundTaskNodeOnly: false,
- claimStrategy: CLAIM_STRATEGY_DEFAULT,
- })
- ).toBe(DEFAULT_CAPACITY);
-
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: true,
- isServerless: false,
- isBackgroundTaskNodeOnly: true,
- claimStrategy: CLAIM_STRATEGY_DEFAULT,
- })
- ).toBe(DEFAULT_CAPACITY);
- });
-
- it('returns default capacity when serverless', () => {
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: false,
- isServerless: true,
- isBackgroundTaskNodeOnly: false,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(DEFAULT_CAPACITY);
-
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: false,
- isServerless: true,
- isBackgroundTaskNodeOnly: true,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(DEFAULT_CAPACITY);
-
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: true,
- isServerless: true,
- isBackgroundTaskNodeOnly: false,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(DEFAULT_CAPACITY);
-
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: true,
- isServerless: true,
- isBackgroundTaskNodeOnly: true,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(DEFAULT_CAPACITY);
- });
-
- it('returns capacity as expected when in cloud and claim strategy is mget', () => {
- // 1GB
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: true,
- isServerless: false,
- isBackgroundTaskNodeOnly: false,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(10);
-
- // 1GB but somehow background task node only is true
- expect(
- getDefaultCapacity({
- heapSizeLimit: 851443712,
- isCloud: true,
- isServerless: false,
- isBackgroundTaskNodeOnly: true,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(10);
-
- // 2GB
- expect(
- getDefaultCapacity({
- heapSizeLimit: 1702887424,
- isCloud: true,
- isServerless: false,
- isBackgroundTaskNodeOnly: false,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(15);
-
- // 2GB but somehow background task node only is true
- expect(
- getDefaultCapacity({
- heapSizeLimit: 1702887424,
- isCloud: true,
- isServerless: false,
- isBackgroundTaskNodeOnly: true,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(15);
-
- // 4GB
- expect(
- getDefaultCapacity({
- heapSizeLimit: 3405774848,
- isCloud: true,
- isServerless: false,
- isBackgroundTaskNodeOnly: false,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(25);
-
- // 4GB background task only
- expect(
- getDefaultCapacity({
- heapSizeLimit: 3405774848,
- isCloud: true,
- isServerless: false,
- isBackgroundTaskNodeOnly: true,
- claimStrategy: CLAIM_STRATEGY_MGET,
- })
- ).toBe(50);
- });
-});
diff --git a/x-pack/plugins/task_manager/server/lib/get_default_capacity.ts b/x-pack/plugins/task_manager/server/lib/get_default_capacity.ts
deleted file mode 100644
index aeafa0f63c4d7..0000000000000
--- a/x-pack/plugins/task_manager/server/lib/get_default_capacity.ts
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { CLAIM_STRATEGY_MGET, DEFAULT_CAPACITY } from '../config';
-
-interface GetDefaultCapacityOpts {
- claimStrategy?: string;
- heapSizeLimit: number;
- isCloud: boolean;
- isServerless: boolean;
- isBackgroundTaskNodeOnly: boolean;
-}
-
-// Map instance size to desired capacity
-const HEAP_TO_CAPACITY_MAP = [
- { minHeap: 0, maxHeap: 1, capacity: 10 },
- { minHeap: 1, maxHeap: 2, capacity: 15 },
- { minHeap: 2, maxHeap: 4, capacity: 25, backgroundTaskNodeOnly: false },
- { minHeap: 2, maxHeap: 4, capacity: 50, backgroundTaskNodeOnly: true },
-];
-
-export function getDefaultCapacity({
- claimStrategy,
- heapSizeLimit: heapSizeLimitInBytes,
- isCloud,
- isServerless,
- isBackgroundTaskNodeOnly,
-}: GetDefaultCapacityOpts) {
- // perform heap size based calculations only in cloud
- if (isCloud && !isServerless && claimStrategy === CLAIM_STRATEGY_MGET) {
- // convert bytes to GB
- const heapSizeLimitInGB = heapSizeLimitInBytes / 1e9;
-
- const config = HEAP_TO_CAPACITY_MAP.find((map) => {
- return (
- heapSizeLimitInGB > map.minHeap &&
- heapSizeLimitInGB <= map.maxHeap &&
- (map.backgroundTaskNodeOnly === undefined ||
- isBackgroundTaskNodeOnly === map.backgroundTaskNodeOnly)
- );
- });
-
- return config?.capacity ?? DEFAULT_CAPACITY;
- }
-
- return DEFAULT_CAPACITY;
-}
diff --git a/x-pack/plugins/task_manager/server/lib/log_health_metrics.test.ts b/x-pack/plugins/task_manager/server/lib/log_health_metrics.test.ts
index a39568df5fdd2..ea0793b60266b 100644
--- a/x-pack/plugins/task_manager/server/lib/log_health_metrics.test.ts
+++ b/x-pack/plugins/task_manager/server/lib/log_health_metrics.test.ts
@@ -435,8 +435,7 @@ function getMockMonitoredHealth(overrides = {}): MonitoredHealth {
timestamp: new Date().toISOString(),
status: HealthStatus.OK,
value: {
- capacity: { config: 10, as_cost: 20, as_workers: 10 },
- claim_strategy: 'default',
+ max_workers: 10,
poll_interval: 3000,
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
@@ -455,19 +454,16 @@ function getMockMonitoredHealth(overrides = {}): MonitoredHealth {
status: HealthStatus.OK,
value: {
count: 4,
- cost: 8,
task_types: {
- actions_telemetry: { count: 2, cost: 4, status: { idle: 2 } },
- alerting_telemetry: { count: 1, cost: 2, status: { idle: 1 } },
- session_cleanup: { count: 1, cost: 2, status: { idle: 1 } },
+ actions_telemetry: { count: 2, status: { idle: 2 } },
+ alerting_telemetry: { count: 1, status: { idle: 1 } },
+ session_cleanup: { count: 1, status: { idle: 1 } },
},
schedule: [],
overdue: 0,
- overdue_cost: 0,
overdue_non_recurring: 0,
estimatedScheduleDensity: [],
non_recurring: 20,
- non_recurring_cost: 40,
owner_ids: 2,
estimated_schedule_density: [],
capacity_requirements: {
diff --git a/x-pack/plugins/task_manager/server/metrics/create_aggregator.test.ts b/x-pack/plugins/task_manager/server/metrics/create_aggregator.test.ts
index b1cf9a90b6cb6..309617a8e4cc3 100644
--- a/x-pack/plugins/task_manager/server/metrics/create_aggregator.test.ts
+++ b/x-pack/plugins/task_manager/server/metrics/create_aggregator.test.ts
@@ -45,6 +45,7 @@ const config: TaskManagerConfig = {
warn_threshold: 5000,
},
max_attempts: 9,
+ max_workers: 10,
metrics_reset_interval: 30000,
monitored_aggregated_stats_refresh_rate: 5000,
monitored_stats_health_verbose_log: {
diff --git a/x-pack/plugins/task_manager/server/monitoring/background_task_utilization_statistics.ts b/x-pack/plugins/task_manager/server/monitoring/background_task_utilization_statistics.ts
index 5a9a9e07aadf7..837f29c83f108 100644
--- a/x-pack/plugins/task_manager/server/monitoring/background_task_utilization_statistics.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/background_task_utilization_statistics.ts
@@ -21,7 +21,7 @@ import {
} from '../task_events';
import { MonitoredStat } from './monitoring_stats_stream';
import { AggregatedStat, AggregatedStatProvider } from '../lib/runtime_statistics_aggregator';
-import { createRunningAveragedStat } from './task_run_calculators';
+import { createRunningAveragedStat } from './task_run_calcultors';
import { DEFAULT_WORKER_UTILIZATION_RUNNING_AVERAGE_WINDOW } from '../config';
export interface PublicBackgroundTaskUtilizationStat extends JsonObject {
diff --git a/x-pack/plugins/task_manager/server/monitoring/capacity_estimation.test.ts b/x-pack/plugins/task_manager/server/monitoring/capacity_estimation.test.ts
index 9791ac805e500..263f2e9987b7c 100644
--- a/x-pack/plugins/task_manager/server/monitoring/capacity_estimation.test.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/capacity_estimation.test.ts
@@ -21,7 +21,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 1,
overdue_non_recurring: 0,
@@ -77,7 +77,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 1,
overdue_non_recurring: 0,
@@ -135,7 +135,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 1,
overdue_non_recurring: 0,
@@ -172,7 +172,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 1,
overdue_non_recurring: 0,
@@ -228,7 +228,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
// 0 active tasks at this moment in time, so no owners identifiable
owner_ids: 0,
@@ -285,7 +285,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 3,
overdue_non_recurring: 0,
@@ -347,7 +347,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: provisionedKibanaInstances,
overdue_non_recurring: 0,
@@ -428,7 +428,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: provisionedKibanaInstances,
overdue_non_recurring: 0,
@@ -510,7 +510,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 1,
overdue_non_recurring: 0,
@@ -578,7 +578,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 1,
overdue_non_recurring: 0,
@@ -643,7 +643,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 1,
overdue_non_recurring: 0,
@@ -708,7 +708,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 1,
overdue_non_recurring: 0,
@@ -784,7 +784,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
owner_ids: 1,
overdue_non_recurring: 0,
@@ -862,7 +862,7 @@ describe('estimateCapacity', () => {
estimateCapacity(
logger,
mockStats(
- { capacity: { config: 10, as_cost: 20, as_workers: 10 }, poll_interval: 3000 },
+ { max_workers: 10, poll_interval: 3000 },
{
overdue: undefined,
owner_ids: 1,
@@ -949,8 +949,7 @@ function mockStats(
status: HealthStatus.OK,
timestamp: new Date().toISOString(),
value: {
- capacity: { config: 10, as_cost: 20, as_workers: 10 },
- claim_strategy: 'default',
+ max_workers: 0,
poll_interval: 0,
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
@@ -970,19 +969,16 @@ function mockStats(
timestamp: new Date().toISOString(),
value: {
count: 4,
- cost: 8,
task_types: {
- actions_telemetry: { count: 2, cost: 4, status: { idle: 2 } },
- alerting_telemetry: { count: 1, cost: 2, status: { idle: 1 } },
- session_cleanup: { count: 1, cost: 2, status: { idle: 1 } },
+ actions_telemetry: { count: 2, status: { idle: 2 } },
+ alerting_telemetry: { count: 1, status: { idle: 1 } },
+ session_cleanup: { count: 1, status: { idle: 1 } },
},
schedule: [],
overdue: 0,
- overdue_cost: 0,
overdue_non_recurring: 0,
estimated_schedule_density: [],
non_recurring: 20,
- non_recurring_cost: 40,
owner_ids: 2,
capacity_requirements: {
per_minute: 150,
diff --git a/x-pack/plugins/task_manager/server/monitoring/capacity_estimation.ts b/x-pack/plugins/task_manager/server/monitoring/capacity_estimation.ts
index d1c2f3591ea22..b12382f16e27b 100644
--- a/x-pack/plugins/task_manager/server/monitoring/capacity_estimation.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/capacity_estimation.ts
@@ -10,7 +10,7 @@ import stats from 'stats-lite';
import { JsonObject } from '@kbn/utility-types';
import { Logger } from '@kbn/core/server';
import { RawMonitoringStats, RawMonitoredStat, HealthStatus } from './monitoring_stats_stream';
-import { AveragedStat } from './task_run_calculators';
+import { AveragedStat } from './task_run_calcultors';
import { TaskPersistenceTypes } from './task_run_statistics';
import { asErr, asOk, map, Result } from '../lib/result_type';
@@ -61,10 +61,8 @@ export function estimateCapacity(
non_recurring: percentageOfExecutionsUsedByNonRecurringTasks,
} = capacityStats.runtime.value.execution.persistence;
const { overdue, capacity_requirements: capacityRequirements } = workload;
- const {
- poll_interval: pollInterval,
- capacity: { config: configuredCapacity },
- } = capacityStats.configuration.value;
+ const { poll_interval: pollInterval, max_workers: maxWorkers } =
+ capacityStats.configuration.value;
/**
* On average, how many polling cycles does it take to execute a task?
@@ -80,10 +78,10 @@ export function estimateCapacity(
);
/**
- * Given the current configuration how much capacity do we have to run normal cost tasks?
+ * Given the current configuration how much task capacity do we have?
*/
const capacityPerMinutePerKibana = Math.round(
- ((60 * 1000) / (averagePollIntervalsPerExecution * pollInterval)) * configuredCapacity
+ ((60 * 1000) / (averagePollIntervalsPerExecution * pollInterval)) * maxWorkers
);
/**
diff --git a/x-pack/plugins/task_manager/server/monitoring/configuration_statistics.test.ts b/x-pack/plugins/task_manager/server/monitoring/configuration_statistics.test.ts
index 0b5387b66dece..822356e2d6534 100644
--- a/x-pack/plugins/task_manager/server/monitoring/configuration_statistics.test.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/configuration_statistics.test.ts
@@ -13,6 +13,7 @@ import { TaskManagerConfig } from '../config';
describe('Configuration Statistics Aggregator', () => {
test('merges the static config with the merged configs', async () => {
const configuration: TaskManagerConfig = {
+ max_workers: 10,
max_attempts: 9,
poll_interval: 6000000,
allow_reading_invalid_state: false,
@@ -54,8 +55,7 @@ describe('Configuration Statistics Aggregator', () => {
};
const managedConfig = {
- startingCapacity: 10,
- capacityConfiguration$: new Subject(),
+ maxWorkersConfiguration$: new Subject(),
pollIntervalConfiguration$: new Subject(),
};
@@ -65,12 +65,7 @@ describe('Configuration Statistics Aggregator', () => {
.pipe(take(3), bufferCount(3))
.subscribe(([initial, updatedWorkers, updatedInterval]) => {
expect(initial.value).toEqual({
- capacity: {
- config: 10,
- as_workers: 10,
- as_cost: 20,
- },
- claim_strategy: 'default',
+ max_workers: 10,
poll_interval: 6000000,
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
@@ -84,12 +79,7 @@ describe('Configuration Statistics Aggregator', () => {
},
});
expect(updatedWorkers.value).toEqual({
- capacity: {
- config: 8,
- as_workers: 8,
- as_cost: 16,
- },
- claim_strategy: 'default',
+ max_workers: 8,
poll_interval: 6000000,
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
@@ -103,12 +93,7 @@ describe('Configuration Statistics Aggregator', () => {
},
});
expect(updatedInterval.value).toEqual({
- capacity: {
- config: 8,
- as_workers: 8,
- as_cost: 16,
- },
- claim_strategy: 'default',
+ max_workers: 8,
poll_interval: 3000,
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
@@ -123,7 +108,7 @@ describe('Configuration Statistics Aggregator', () => {
});
resolve();
}, reject);
- managedConfig.capacityConfiguration$.next(8);
+ managedConfig.maxWorkersConfiguration$.next(8);
managedConfig.pollIntervalConfiguration$.next(3000);
} catch (error) {
reject(error);
diff --git a/x-pack/plugins/task_manager/server/monitoring/configuration_statistics.ts b/x-pack/plugins/task_manager/server/monitoring/configuration_statistics.ts
index c606b63694b0f..dc3221351a33e 100644
--- a/x-pack/plugins/task_manager/server/monitoring/configuration_statistics.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/configuration_statistics.ts
@@ -8,11 +8,9 @@
import { combineLatest, of } from 'rxjs';
import { pick, merge } from 'lodash';
import { map, startWith } from 'rxjs';
-import { JsonObject } from '@kbn/utility-types';
import { AggregatedStatProvider } from '../lib/runtime_statistics_aggregator';
-import { CLAIM_STRATEGY_DEFAULT, TaskManagerConfig } from '../config';
+import { TaskManagerConfig } from '../config';
import { ManagedConfiguration } from '../lib/create_managed_configuration';
-import { getCapacityInCost, getCapacityInWorkers } from '../task_pool';
const CONFIG_FIELDS_TO_EXPOSE = [
'request_capacity',
@@ -21,19 +19,10 @@ const CONFIG_FIELDS_TO_EXPOSE = [
'monitored_task_execution_thresholds',
] as const;
-interface CapacityConfig extends JsonObject {
- capacity: {
- config: number;
- as_workers: number;
- as_cost: number;
- };
-}
-
export type ConfigStat = Pick<
TaskManagerConfig,
- 'poll_interval' | 'claim_strategy' | (typeof CONFIG_FIELDS_TO_EXPOSE)[number]
-> &
- CapacityConfig;
+ 'max_workers' | 'poll_interval' | (typeof CONFIG_FIELDS_TO_EXPOSE)[number]
+>;
export function createConfigurationAggregator(
config: TaskManagerConfig,
@@ -41,21 +30,16 @@ export function createConfigurationAggregator(
): AggregatedStatProvider {
return combineLatest([
of(pick(config, ...CONFIG_FIELDS_TO_EXPOSE)),
- of({ claim_strategy: config.claim_strategy ?? CLAIM_STRATEGY_DEFAULT }),
managedConfig.pollIntervalConfiguration$.pipe(
startWith(config.poll_interval),
map>((pollInterval) => ({
poll_interval: pollInterval,
}))
),
- managedConfig.capacityConfiguration$.pipe(
- startWith(managedConfig.startingCapacity),
- map((capacity) => ({
- capacity: {
- config: capacity,
- as_workers: getCapacityInWorkers(capacity),
- as_cost: getCapacityInCost(capacity),
- },
+ managedConfig.maxWorkersConfiguration$.pipe(
+ startWith(config.max_workers),
+ map>((maxWorkers) => ({
+ max_workers: maxWorkers,
}))
),
]).pipe(
diff --git a/x-pack/plugins/task_manager/server/monitoring/ephemeral_task_statistics.test.ts b/x-pack/plugins/task_manager/server/monitoring/ephemeral_task_statistics.test.ts
index ac16070d7c131..d7135837e052e 100644
--- a/x-pack/plugins/task_manager/server/monitoring/ephemeral_task_statistics.test.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/ephemeral_task_statistics.test.ts
@@ -176,11 +176,11 @@ describe('Ephemeral Task Statistics', () => {
});
const runningAverageWindowSize = 5;
- const capacity = 10;
+ const maxWorkers = 10;
const ephemeralTaskAggregator = createEphemeralTaskAggregator(
ephemeralTaskLifecycle,
runningAverageWindowSize,
- capacity
+ maxWorkers
);
function expectWindowEqualsUpdate(
@@ -229,7 +229,7 @@ describe('Ephemeral Task Statistics', () => {
});
});
-test('returns the average load added per polling cycle cycle by ephemeral tasks when load exceeds capacity', async () => {
+test('returns the average load added per polling cycle cycle by ephemeral tasks when load exceeds max workers', async () => {
const tasksExecuted = [0, 5, 10, 20, 15, 10, 5, 0, 0, 0, 0, 0];
const expectedLoad = [0, 50, 100, 200, 150, 100, 50, 0, 0, 0, 0, 0];
@@ -241,11 +241,11 @@ test('returns the average load added per polling cycle cycle by ephemeral tasks
});
const runningAverageWindowSize = 5;
- const capacity = 10;
+ const maxWorkers = 10;
const ephemeralTaskAggregator = createEphemeralTaskAggregator(
ephemeralTaskLifecycle,
runningAverageWindowSize,
- capacity
+ maxWorkers
);
function expectWindowEqualsUpdate(
diff --git a/x-pack/plugins/task_manager/server/monitoring/ephemeral_task_statistics.ts b/x-pack/plugins/task_manager/server/monitoring/ephemeral_task_statistics.ts
index d02080a56a1aa..b77eae1080fbc 100644
--- a/x-pack/plugins/task_manager/server/monitoring/ephemeral_task_statistics.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/ephemeral_task_statistics.ts
@@ -17,7 +17,7 @@ import {
AveragedStat,
calculateRunningAverage,
createRunningAveragedStat,
-} from './task_run_calculators';
+} from './task_run_calcultors';
import { HealthStatus } from './monitoring_stats_stream';
export interface EphemeralTaskStat extends JsonObject {
@@ -35,7 +35,7 @@ export interface SummarizedEphemeralTaskStat extends JsonObject {
export function createEphemeralTaskAggregator(
ephemeralTaskLifecycle: EphemeralTaskLifecycle,
runningAverageWindowSize: number,
- capacity: number
+ maxWorkers: number
): AggregatedStatProvider {
const ephemeralTaskRunEvents$ = ephemeralTaskLifecycle.events.pipe(
filter((taskEvent: TaskLifecycleEvent) => isTaskRunEvent(taskEvent))
@@ -70,7 +70,7 @@ export function createEphemeralTaskAggregator(
map(([tasksRanSincePreviousQueueSize, ephemeralQueueSize]) => ({
queuedTasks: ephemeralQueuedTasksQueue(ephemeralQueueSize),
executionsPerCycle: ephemeralQueueExecutionsPerCycleQueue(tasksRanSincePreviousQueueSize),
- load: ephemeralTaskLoadQueue(calculateWorkerLoad(capacity, tasksRanSincePreviousQueueSize)),
+ load: ephemeralTaskLoadQueue(calculateWorkerLoad(maxWorkers, tasksRanSincePreviousQueueSize)),
})),
startWith({
queuedTasks: [],
diff --git a/x-pack/plugins/task_manager/server/monitoring/index.ts b/x-pack/plugins/task_manager/server/monitoring/index.ts
index 5dc024b53de10..9ee32e97d7758 100644
--- a/x-pack/plugins/task_manager/server/monitoring/index.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/index.ts
@@ -18,7 +18,6 @@ import { TaskPollingLifecycle } from '../polling_lifecycle';
import { ManagedConfiguration } from '../lib/create_managed_configuration';
import { EphemeralTaskLifecycle } from '../ephemeral_task_lifecycle';
import { AdHocTaskCounter } from '../lib/adhoc_task_counter';
-import { TaskTypeDictionary } from '../task_type_dictionary';
export type { MonitoringStats, RawMonitoringStats } from './monitoring_stats_stream';
export {
@@ -28,20 +27,27 @@ export {
createMonitoringStatsStream,
} from './monitoring_stats_stream';
-export interface CreateMonitoringStatsOpts {
- taskStore: TaskStore;
- elasticsearchAndSOAvailability$: Observable;
- config: TaskManagerConfig;
- managedConfig: ManagedConfiguration;
- logger: Logger;
- adHocTaskCounter: AdHocTaskCounter;
- taskDefinitions: TaskTypeDictionary;
- taskPollingLifecycle?: TaskPollingLifecycle;
- ephemeralTaskLifecycle?: EphemeralTaskLifecycle;
-}
-
export function createMonitoringStats(
- opts: CreateMonitoringStatsOpts
+ taskStore: TaskStore,
+ elasticsearchAndSOAvailability$: Observable,
+ config: TaskManagerConfig,
+ managedConfig: ManagedConfiguration,
+ logger: Logger,
+ adHocTaskCounter: AdHocTaskCounter,
+ taskPollingLifecycle?: TaskPollingLifecycle,
+ ephemeralTaskLifecycle?: EphemeralTaskLifecycle
): Observable {
- return createMonitoringStatsStream(createAggregators(opts));
+ return createMonitoringStatsStream(
+ createAggregators(
+ taskStore,
+ elasticsearchAndSOAvailability$,
+ config,
+ managedConfig,
+ logger,
+ adHocTaskCounter,
+ taskPollingLifecycle,
+ ephemeralTaskLifecycle
+ ),
+ config
+ );
}
diff --git a/x-pack/plugins/task_manager/server/monitoring/monitoring_stats_stream.test.ts b/x-pack/plugins/task_manager/server/monitoring/monitoring_stats_stream.test.ts
index 075b663e4ce83..f4da53871ffa3 100644
--- a/x-pack/plugins/task_manager/server/monitoring/monitoring_stats_stream.test.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/monitoring_stats_stream.test.ts
@@ -5,6 +5,7 @@
* 2.0.
*/
+import { TaskManagerConfig } from '../config';
import { of, Subject } from 'rxjs';
import { take, bufferCount } from 'rxjs';
import { createMonitoringStatsStream } from './monitoring_stats_stream';
@@ -16,9 +17,51 @@ beforeEach(() => {
});
describe('createMonitoringStatsStream', () => {
+ const configuration: TaskManagerConfig = {
+ max_workers: 10,
+ max_attempts: 9,
+ poll_interval: 6000000,
+ allow_reading_invalid_state: false,
+ version_conflict_threshold: 80,
+ monitored_stats_required_freshness: 6000000,
+ request_capacity: 1000,
+ monitored_aggregated_stats_refresh_rate: 5000,
+ monitored_stats_health_verbose_log: {
+ enabled: false,
+ level: 'debug' as const,
+ warn_delayed_task_start_in_seconds: 60,
+ },
+ monitored_stats_running_average_window: 50,
+ monitored_task_execution_thresholds: {
+ default: {
+ error_threshold: 90,
+ warn_threshold: 80,
+ },
+ custom: {},
+ },
+ ephemeral_tasks: {
+ enabled: true,
+ request_capacity: 10,
+ },
+ unsafe: {
+ exclude_task_types: [],
+ authenticate_background_task_utilization: true,
+ },
+ event_loop_delay: {
+ monitor: true,
+ warn_threshold: 5000,
+ },
+ worker_utilization_running_average_window: 5,
+ metrics_reset_interval: 3000,
+ claim_strategy: 'default',
+ request_timeouts: {
+ update_by_query: 1000,
+ },
+ };
+
it('returns the initial config used to configure Task Manager', async () => {
return new Promise((resolve) => {
- createMonitoringStatsStream(of())
+ createMonitoringStatsStream(of(), configuration)
.pipe(take(1))
.subscribe((firstValue) => {
expect(firstValue.stats).toEqual({});
@@ -31,7 +74,7 @@ describe('createMonitoringStatsStream', () => {
const aggregatedStats$ = new Subject();
return new Promise((resolve) => {
- createMonitoringStatsStream(aggregatedStats$)
+ createMonitoringStatsStream(aggregatedStats$, configuration)
.pipe(take(3), bufferCount(3))
.subscribe(([initialValue, secondValue, thirdValue]) => {
expect(initialValue.stats).toMatchObject({
@@ -39,7 +82,7 @@ describe('createMonitoringStatsStream', () => {
stats: {
configuration: {
value: {
- capacity: 10,
+ max_workers: 10,
poll_interval: 6000000,
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
@@ -72,7 +115,7 @@ describe('createMonitoringStatsStream', () => {
configuration: {
timestamp: expect.any(String),
value: {
- capacity: 10,
+ max_workers: 10,
poll_interval: 6000000,
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
@@ -105,7 +148,7 @@ describe('createMonitoringStatsStream', () => {
configuration: {
timestamp: expect.any(String),
value: {
- capacity: 10,
+ max_workers: 10,
poll_interval: 6000000,
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
diff --git a/x-pack/plugins/task_manager/server/monitoring/monitoring_stats_stream.ts b/x-pack/plugins/task_manager/server/monitoring/monitoring_stats_stream.ts
index e1bffb55d54fa..5ee6465dae0eb 100644
--- a/x-pack/plugins/task_manager/server/monitoring/monitoring_stats_stream.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/monitoring_stats_stream.ts
@@ -10,6 +10,8 @@ import { map, scan } from 'rxjs';
import { set } from '@kbn/safer-lodash-set';
import { Logger } from '@kbn/core/server';
import { JsonObject } from '@kbn/utility-types';
+import { TaskStore } from '../task_store';
+import { TaskPollingLifecycle } from '../polling_lifecycle';
import {
createWorkloadAggregator,
summarizeWorkloadStat,
@@ -35,9 +37,11 @@ import {
import { ConfigStat, createConfigurationAggregator } from './configuration_statistics';
import { TaskManagerConfig } from '../config';
+import { ManagedConfiguration } from '../lib/create_managed_configuration';
+import { EphemeralTaskLifecycle } from '../ephemeral_task_lifecycle';
import { CapacityEstimationStat, withCapacityEstimate } from './capacity_estimation';
+import { AdHocTaskCounter } from '../lib/adhoc_task_counter';
import { AggregatedStatProvider } from '../lib/runtime_statistics_aggregator';
-import { CreateMonitoringStatsOpts } from '.';
export interface MonitoringStats {
last_update: string;
@@ -77,28 +81,26 @@ export interface RawMonitoringStats {
};
}
-export function createAggregators({
- taskStore,
- elasticsearchAndSOAvailability$,
- config,
- managedConfig,
- logger,
- taskDefinitions,
- adHocTaskCounter,
- taskPollingLifecycle,
- ephemeralTaskLifecycle,
-}: CreateMonitoringStatsOpts): AggregatedStatProvider {
+export function createAggregators(
+ taskStore: TaskStore,
+ elasticsearchAndSOAvailability$: Observable,
+ config: TaskManagerConfig,
+ managedConfig: ManagedConfiguration,
+ logger: Logger,
+ adHocTaskCounter: AdHocTaskCounter,
+ taskPollingLifecycle?: TaskPollingLifecycle,
+ ephemeralTaskLifecycle?: EphemeralTaskLifecycle
+): AggregatedStatProvider {
const aggregators: AggregatedStatProvider[] = [
createConfigurationAggregator(config, managedConfig),
- createWorkloadAggregator({
+ createWorkloadAggregator(
taskStore,
elasticsearchAndSOAvailability$,
- refreshInterval: config.monitored_aggregated_stats_refresh_rate,
- pollInterval: config.poll_interval,
- logger,
- taskDefinitions,
- }),
+ config.monitored_aggregated_stats_refresh_rate,
+ config.poll_interval,
+ logger
+ ),
];
if (taskPollingLifecycle) {
aggregators.push(
@@ -116,7 +118,7 @@ export function createAggregators({
createEphemeralTaskAggregator(
ephemeralTaskLifecycle,
config.monitored_stats_running_average_window,
- managedConfig.startingCapacity
+ config.max_workers
)
);
}
@@ -124,7 +126,8 @@ export function createAggregators({
}
export function createMonitoringStatsStream(
- provider$: AggregatedStatProvider
+ provider$: AggregatedStatProvider,
+ config: TaskManagerConfig
): Observable {
const initialStats = {
last_update: new Date().toISOString(),
diff --git a/x-pack/plugins/task_manager/server/monitoring/task_run_calculators.test.ts b/x-pack/plugins/task_manager/server/monitoring/task_run_calcultors.test.ts
similarity index 98%
rename from x-pack/plugins/task_manager/server/monitoring/task_run_calculators.test.ts
rename to x-pack/plugins/task_manager/server/monitoring/task_run_calcultors.test.ts
index 46df2b1b21d42..b5f6be8b7524d 100644
--- a/x-pack/plugins/task_manager/server/monitoring/task_run_calculators.test.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/task_run_calcultors.test.ts
@@ -12,7 +12,7 @@ import {
calculateFrequency,
createRunningAveragedStat,
createMapOfRunningAveragedStats,
-} from './task_run_calculators';
+} from './task_run_calcultors';
describe('calculateRunningAverage', () => {
test('calculates the running average and median of a window of values', async () => {
diff --git a/x-pack/plugins/task_manager/server/monitoring/task_run_calculators.ts b/x-pack/plugins/task_manager/server/monitoring/task_run_calcultors.ts
similarity index 100%
rename from x-pack/plugins/task_manager/server/monitoring/task_run_calculators.ts
rename to x-pack/plugins/task_manager/server/monitoring/task_run_calcultors.ts
diff --git a/x-pack/plugins/task_manager/server/monitoring/task_run_statistics.ts b/x-pack/plugins/task_manager/server/monitoring/task_run_statistics.ts
index 517b29a54cd64..6a7f10b7e75b6 100644
--- a/x-pack/plugins/task_manager/server/monitoring/task_run_statistics.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/task_run_statistics.ts
@@ -35,7 +35,7 @@ import {
calculateFrequency,
createRunningAveragedStat,
createMapOfRunningAveragedStats,
-} from './task_run_calculators';
+} from './task_run_calcultors';
import { HealthStatus } from './monitoring_stats_stream';
import { TaskPollingLifecycle } from '../polling_lifecycle';
import { TaskExecutionFailureThreshold, TaskManagerConfig } from '../config';
diff --git a/x-pack/plugins/task_manager/server/monitoring/workload_statistics.test.ts b/x-pack/plugins/task_manager/server/monitoring/workload_statistics.test.ts
index 2289c00b6405e..7ef860efa783a 100644
--- a/x-pack/plugins/task_manager/server/monitoring/workload_statistics.test.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/workload_statistics.test.ts
@@ -15,14 +15,13 @@ import {
padBuckets,
estimateRecurringTaskScheduling,
} from './workload_statistics';
-import { ConcreteTaskInstance, TaskCost } from '../task';
+import { ConcreteTaskInstance } from '../task';
import { times } from 'lodash';
import { taskStoreMock } from '../task_store.mock';
import { of, Subject } from 'rxjs';
import { sleep } from '../test_utils';
import type * as estypes from '@elastic/elasticsearch/lib/api/typesWithBodyKey';
-import { TaskTypeDictionary } from '../task_type_dictionary';
type ResponseWithAggs = Omit, 'aggregations'> & {
aggregations: WorkloadAggregationResponse;
@@ -33,98 +32,52 @@ const asApiResponse = (body: ResponseWithAggs) =>
.createSuccessTransportRequestPromise(body as estypes.SearchResponse)
.then((res) => res.body as ResponseWithAggs);
-const logger = loggingSystemMock.create().get();
-
-const definitions = new TaskTypeDictionary(logger);
-definitions.registerTaskDefinitions({
- report: {
- title: 'report',
- cost: TaskCost.ExtraLarge,
- createTaskRunner: jest.fn(),
- },
- foo: {
- title: 'foo',
- createTaskRunner: jest.fn(),
- },
- bar: {
- title: 'bar',
- cost: TaskCost.Tiny,
- createTaskRunner: jest.fn(),
- },
-});
describe('Workload Statistics Aggregator', () => {
- beforeEach(() => {
- jest.resetAllMocks();
- });
-
test('queries the Task Store at a fixed interval for the current workload', async () => {
const taskStore = taskStoreMock.create({});
taskStore.aggregate.mockResolvedValue(
asApiResponse({
- hits: { hits: [], max_score: 0, total: { value: 3, relation: 'eq' } },
+ hits: {
+ hits: [],
+ max_score: 0,
+ total: { value: 0, relation: 'eq' },
+ },
took: 1,
timed_out: false,
- _shards: { total: 1, successful: 1, skipped: 1, failed: 0 },
+ _shards: {
+ total: 1,
+ successful: 1,
+ skipped: 1,
+ failed: 0,
+ },
aggregations: {
taskType: {
- buckets: [
- {
- key: 'foo',
- doc_count: 1,
- status: {
- doc_count_error_upper_bound: 0,
- sum_other_doc_count: 0,
- buckets: [{ key: 'idle', doc_count: 1 }],
- },
- },
- {
- key: 'bar',
- doc_count: 1,
- status: {
- doc_count_error_upper_bound: 0,
- sum_other_doc_count: 0,
- buckets: [{ key: 'claiming', doc_count: 1 }],
- },
- },
- {
- key: 'report',
- doc_count: 1,
- status: {
- doc_count_error_upper_bound: 0,
- sum_other_doc_count: 0,
- buckets: [{ key: 'idle', doc_count: 1 }],
- },
- },
- ],
+ buckets: [],
doc_count_error_upper_bound: 0,
sum_other_doc_count: 0,
},
schedule: {
- buckets: [{ key: '1m', doc_count: 8 }],
+ buckets: [],
doc_count_error_upper_bound: 0,
sum_other_doc_count: 0,
},
nonRecurringTasks: {
- doc_count: 1,
- taskType: {
- buckets: [{ key: 'report', doc_count: 1 }],
- doc_count_error_upper_bound: 0,
- sum_other_doc_count: 0,
+ doc_count: 13,
+ },
+ ownerIds: {
+ ownerIds: {
+ value: 1,
},
},
- ownerIds: { ownerIds: { value: 1 } },
// The `FiltersAggregate` doesn't cover the case of a nested `AggregationsAggregationContainer`, in which `FiltersAggregate`
// would not have a `buckets` property, but rather a keyed property that's inferred from the request.
// @ts-expect-error
idleTasks: {
doc_count: 0,
overdue: {
- doc_count: 1,
- nonRecurring: { doc_count: 0 },
- taskTypes: {
- buckets: [{ key: 'foo', doc_count: 1 }],
- doc_count_error_upper_bound: 0,
- sum_other_doc_count: 0,
+ doc_count: 0,
+ nonRecurring: {
+ doc_count: 0,
},
},
scheduleDensity: {
@@ -136,7 +89,9 @@ describe('Workload Statistics Aggregator', () => {
to: 1.601651976274e12,
to_as_string: '2020-10-02T15:19:36.274Z',
doc_count: 0,
- histogram: { buckets: [] },
+ histogram: {
+ buckets: [],
+ },
},
],
},
@@ -145,51 +100,87 @@ describe('Workload Statistics Aggregator', () => {
})
);
- const workloadAggregator = createWorkloadAggregator({
+ const workloadAggregator = createWorkloadAggregator(
taskStore,
- elasticsearchAndSOAvailability$: of(true),
- refreshInterval: 10,
- pollInterval: 3000,
- logger,
- taskDefinitions: definitions,
- });
+ of(true),
+ 10,
+ 3000,
+ loggingSystemMock.create().get()
+ );
return new Promise((resolve) => {
workloadAggregator.pipe(first()).subscribe(() => {
expect(taskStore.aggregate).toHaveBeenCalledWith({
aggs: {
taskType: {
- terms: { size: 3, field: 'task.taskType' },
- aggs: { status: { terms: { field: 'task.status' } } },
+ terms: { size: 100, field: 'task.taskType' },
+ aggs: {
+ status: {
+ terms: { field: 'task.status' },
+ },
+ },
},
schedule: {
- terms: { field: 'task.schedule.interval', size: 100 },
+ terms: {
+ field: 'task.schedule.interval',
+ size: 100,
+ },
},
nonRecurringTasks: {
- missing: { field: 'task.schedule.interval' },
- aggs: { taskType: { terms: { size: 3, field: 'task.taskType' } } },
+ missing: { field: 'task.schedule' },
},
ownerIds: {
- filter: { range: { 'task.startedAt': { gte: 'now-1w/w' } } },
- aggs: { ownerIds: { cardinality: { field: 'task.ownerId' } } },
+ filter: {
+ range: {
+ 'task.startedAt': {
+ gte: 'now-1w/w',
+ },
+ },
+ },
+ aggs: {
+ ownerIds: {
+ cardinality: {
+ field: 'task.ownerId',
+ },
+ },
+ },
},
idleTasks: {
- filter: { term: { 'task.status': 'idle' } },
+ filter: {
+ term: { 'task.status': 'idle' },
+ },
aggs: {
scheduleDensity: {
- range: { field: 'task.runAt', ranges: [{ from: 'now', to: 'now+1m' }] },
+ range: {
+ field: 'task.runAt',
+ ranges: [{ from: 'now', to: 'now+1m' }],
+ },
aggs: {
histogram: {
- date_histogram: { field: 'task.runAt', fixed_interval: '3s' },
- aggs: { interval: { terms: { field: 'task.schedule.interval' } } },
+ date_histogram: {
+ field: 'task.runAt',
+ fixed_interval: '3s',
+ },
+ aggs: {
+ interval: {
+ terms: {
+ field: 'task.schedule.interval',
+ },
+ },
+ },
},
},
},
overdue: {
- filter: { range: { 'task.runAt': { lt: 'now' } } },
+ filter: {
+ range: {
+ 'task.runAt': { lt: 'now' },
+ },
+ },
aggs: {
- nonRecurring: { missing: { field: 'task.schedule.interval' } },
- taskTypes: { terms: { size: 3, field: 'task.taskType' } },
+ nonRecurring: {
+ missing: { field: 'task.schedule' },
+ },
},
},
},
@@ -203,18 +194,36 @@ describe('Workload Statistics Aggregator', () => {
const mockAggregatedResult = () =>
asApiResponse({
- hits: { hits: [], max_score: 0, total: { value: 4, relation: 'eq' } },
+ hits: {
+ hits: [],
+ max_score: 0,
+ total: { value: 4, relation: 'eq' },
+ },
took: 1,
timed_out: false,
- _shards: { total: 1, successful: 1, skipped: 1, failed: 0 },
+ _shards: {
+ total: 1,
+ successful: 1,
+ skipped: 1,
+ failed: 0,
+ },
aggregations: {
schedule: {
doc_count_error_upper_bound: 0,
sum_other_doc_count: 0,
buckets: [
- { key: '3600s', doc_count: 1 },
- { key: '60s', doc_count: 1 },
- { key: '720m', doc_count: 1 },
+ {
+ key: '3600s',
+ doc_count: 1,
+ },
+ {
+ key: '60s',
+ doc_count: 1,
+ },
+ {
+ key: '720m',
+ doc_count: 1,
+ },
],
},
taskType: {
@@ -222,55 +231,66 @@ describe('Workload Statistics Aggregator', () => {
sum_other_doc_count: 0,
buckets: [
{
- key: 'foo',
+ key: 'actions_telemetry',
doc_count: 2,
status: {
doc_count_error_upper_bound: 0,
sum_other_doc_count: 0,
- buckets: [{ key: 'idle', doc_count: 2 }],
+ buckets: [
+ {
+ key: 'idle',
+ doc_count: 2,
+ },
+ ],
},
},
{
- key: 'bar',
+ key: 'alerting_telemetry',
doc_count: 1,
status: {
doc_count_error_upper_bound: 0,
sum_other_doc_count: 0,
- buckets: [{ key: 'idle', doc_count: 1 }],
+ buckets: [
+ {
+ key: 'idle',
+ doc_count: 1,
+ },
+ ],
},
},
{
- key: 'report',
+ key: 'session_cleanup',
doc_count: 1,
status: {
doc_count_error_upper_bound: 0,
sum_other_doc_count: 0,
- buckets: [{ key: 'idle', doc_count: 1 }],
+ buckets: [
+ {
+ key: 'idle',
+ doc_count: 1,
+ },
+ ],
},
},
],
},
nonRecurringTasks: {
- doc_count: 1,
- taskType: {
- buckets: [{ key: 'report', doc_count: 1 }],
- doc_count_error_upper_bound: 0,
- sum_other_doc_count: 0,
+ doc_count: 13,
+ },
+ ownerIds: {
+ ownerIds: {
+ value: 1,
},
},
- ownerIds: { ownerIds: { value: 1 } },
// The `FiltersAggregate` doesn't cover the case of a nested `AggregationsAggregationContainer`, in which `FiltersAggregate`
// would not have a `buckets` property, but rather a keyed property that's inferred from the request.
// @ts-expect-error
idleTasks: {
- doc_count: 3,
+ doc_count: 13,
overdue: {
- doc_count: 2,
- nonRecurring: { doc_count: 1 },
- taskTypes: {
- buckets: [{ key: 'foo', doc_count: 1 }],
- doc_count_error_upper_bound: 0,
- sum_other_doc_count: 0,
+ doc_count: 6,
+ nonRecurring: {
+ doc_count: 6,
},
},
scheduleDensity: {
@@ -286,25 +306,23 @@ describe('Workload Statistics Aggregator', () => {
const taskStore = taskStoreMock.create({});
taskStore.aggregate.mockResolvedValue(mockAggregatedResult());
- const workloadAggregator = createWorkloadAggregator({
+ const workloadAggregator = createWorkloadAggregator(
taskStore,
- elasticsearchAndSOAvailability$: of(true),
- refreshInterval: 10,
- pollInterval: 3000,
- logger,
- taskDefinitions: definitions,
- });
+ of(true),
+ 10,
+ 3000,
+ loggingSystemMock.create().get()
+ );
return new Promise((resolve) => {
workloadAggregator.pipe(first()).subscribe((result) => {
expect(result.key).toEqual('workload');
expect(result.value).toMatchObject({
count: 4,
- cost: 15,
task_types: {
- foo: { count: 2, cost: 4, status: { idle: 2 } },
- bar: { count: 1, cost: 1, status: { idle: 1 } },
- report: { count: 1, cost: 10, status: { idle: 1 } },
+ actions_telemetry: { count: 2, status: { idle: 2 } },
+ alerting_telemetry: { count: 1, status: { idle: 1 } },
+ session_cleanup: { count: 1, status: { idle: 1 } },
},
});
resolve();
@@ -318,14 +336,13 @@ describe('Workload Statistics Aggregator', () => {
const availability$ = new Subject();
- const workloadAggregator = createWorkloadAggregator({
+ const workloadAggregator = createWorkloadAggregator(
taskStore,
- elasticsearchAndSOAvailability$: of(true),
- refreshInterval: 10,
- pollInterval: 3000,
- logger,
- taskDefinitions: definitions,
- });
+ availability$,
+ 10,
+ 3000,
+ loggingSystemMock.create().get()
+ );
return new Promise(async (resolve, reject) => {
try {
@@ -333,11 +350,25 @@ describe('Workload Statistics Aggregator', () => {
expect(result.key).toEqual('workload');
expect(result.value).toMatchObject({
count: 4,
- cost: 15,
task_types: {
- foo: { count: 2, cost: 4, status: { idle: 2 } },
- bar: { count: 1, cost: 1, status: { idle: 1 } },
- report: { count: 1, cost: 10, status: { idle: 1 } },
+ actions_telemetry: {
+ count: 2,
+ status: {
+ idle: 2,
+ },
+ },
+ alerting_telemetry: {
+ count: 1,
+ status: {
+ idle: 1,
+ },
+ },
+ session_cleanup: {
+ count: 1,
+ status: {
+ idle: 1,
+ },
+ },
},
});
resolve();
@@ -358,22 +389,19 @@ describe('Workload Statistics Aggregator', () => {
const taskStore = taskStoreMock.create({});
taskStore.aggregate.mockResolvedValue(mockAggregatedResult());
- const workloadAggregator = createWorkloadAggregator({
+ const workloadAggregator = createWorkloadAggregator(
taskStore,
- elasticsearchAndSOAvailability$: of(true),
- refreshInterval: 10,
- pollInterval: 3000,
- logger,
- taskDefinitions: definitions,
- });
+ of(true),
+ 10,
+ 3000,
+ loggingSystemMock.create().get()
+ );
return new Promise((resolve) => {
workloadAggregator.pipe(first()).subscribe((result) => {
expect(result.key).toEqual('workload');
expect(result.value).toMatchObject({
- overdue: 2,
- overdue_cost: 2,
- overdue_non_recurring: 1,
+ overdue: 6,
});
resolve();
});
@@ -384,14 +412,13 @@ describe('Workload Statistics Aggregator', () => {
const taskStore = taskStoreMock.create({});
taskStore.aggregate.mockResolvedValue(mockAggregatedResult());
- const workloadAggregator = createWorkloadAggregator({
+ const workloadAggregator = createWorkloadAggregator(
taskStore,
- elasticsearchAndSOAvailability$: of(true),
- refreshInterval: 10,
- pollInterval: 3000,
- logger,
- taskDefinitions: definitions,
- });
+ of(true),
+ 10,
+ 3000,
+ loggingSystemMock.create().get()
+ );
return new Promise((resolve) => {
workloadAggregator.pipe(first()).subscribe((result) => {
@@ -413,14 +440,13 @@ describe('Workload Statistics Aggregator', () => {
const taskStore = taskStoreMock.create({});
taskStore.aggregate.mockResolvedValue(mockAggregatedResult());
- const workloadAggregator = createWorkloadAggregator({
+ const workloadAggregator = createWorkloadAggregator(
taskStore,
- elasticsearchAndSOAvailability$: of(true),
- refreshInterval: 60 * 1000,
- pollInterval: 3000,
- logger,
- taskDefinitions: definitions,
- });
+ of(true),
+ 60 * 1000,
+ 3000,
+ loggingSystemMock.create().get()
+ );
return new Promise((resolve) => {
workloadAggregator.pipe(first()).subscribe(() => {
@@ -452,14 +478,13 @@ describe('Workload Statistics Aggregator', () => {
const taskStore = taskStoreMock.create({});
taskStore.aggregate.mockResolvedValue(mockAggregatedResult());
- const workloadAggregator = createWorkloadAggregator({
+ const workloadAggregator = createWorkloadAggregator(
taskStore,
- elasticsearchAndSOAvailability$: of(true),
- refreshInterval: 15 * 60 * 1000,
- pollInterval: 3000,
- logger,
- taskDefinitions: definitions,
- });
+ of(true),
+ 15 * 60 * 1000,
+ 3000,
+ loggingSystemMock.create().get()
+ );
return new Promise((resolve) => {
workloadAggregator.pipe(first()).subscribe((result) => {
@@ -492,41 +517,42 @@ describe('Workload Statistics Aggregator', () => {
const taskStore = taskStoreMock.create({});
taskStore.aggregate
.mockResolvedValueOnce(
- mockAggregatedResult().then((res) => setTaskTypeCount(res, 'foo', { idle: 2 }))
+ mockAggregatedResult().then((res) =>
+ setTaskTypeCount(res, 'alerting_telemetry', {
+ idle: 2,
+ })
+ )
)
.mockRejectedValueOnce(new Error('Elasticsearch has gone poof'))
.mockResolvedValueOnce(
- mockAggregatedResult().then((res) => setTaskTypeCount(res, 'foo', { idle: 1, failed: 1 }))
+ mockAggregatedResult().then((res) =>
+ setTaskTypeCount(res, 'alerting_telemetry', {
+ idle: 1,
+ failed: 1,
+ })
+ )
);
- const workloadAggregator = createWorkloadAggregator({
- taskStore,
- elasticsearchAndSOAvailability$: of(true),
- refreshInterval: 10,
- pollInterval: 3000,
- logger,
- taskDefinitions: definitions,
- });
+ const logger = loggingSystemMock.create().get();
+ const workloadAggregator = createWorkloadAggregator(taskStore, of(true), 10, 3000, logger);
return new Promise((resolve, reject) => {
workloadAggregator.pipe(take(2), bufferCount(2)).subscribe((results) => {
expect(results[0].key).toEqual('workload');
expect(results[0].value).toMatchObject({
- count: 4,
- cost: 15,
+ count: 5,
task_types: {
- bar: { count: 1, cost: 1, status: { idle: 1 } },
- report: { count: 1, cost: 10, status: { idle: 1 } },
- foo: { count: 2, cost: 4, status: { idle: 2 } },
+ actions_telemetry: { count: 2, status: { idle: 2 } },
+ alerting_telemetry: { count: 2, status: { idle: 2 } },
+ session_cleanup: { count: 1, status: { idle: 1 } },
},
});
expect(results[1].key).toEqual('workload');
expect(results[1].value).toMatchObject({
- count: 4,
- cost: 15,
+ count: 5,
task_types: {
- bar: { count: 1, cost: 1, status: { idle: 1 } },
- report: { count: 1, cost: 10, status: { idle: 1 } },
- foo: { count: 2, cost: 4, status: { idle: 1, failed: 1 } },
+ actions_telemetry: { count: 2, status: { idle: 2 } },
+ alerting_telemetry: { count: 2, status: { idle: 1, failed: 1 } },
+ session_cleanup: { count: 1, status: { idle: 1 } },
},
});
resolve();
@@ -541,27 +567,49 @@ describe('Workload Statistics Aggregator', () => {
const taskStore = taskStoreMock.create({});
taskStore.aggregate.mockResolvedValue(
asApiResponse({
- hits: { hits: [], max_score: 0, total: { value: 4, relation: 'eq' } },
+ hits: {
+ hits: [],
+ max_score: 0,
+ total: { value: 4, relation: 'eq' },
+ },
took: 1,
timed_out: false,
- _shards: { total: 1, successful: 1, skipped: 1, failed: 0 },
+ _shards: {
+ total: 1,
+ successful: 1,
+ skipped: 1,
+ failed: 0,
+ },
aggregations: {
schedule: {
doc_count_error_upper_bound: 0,
sum_other_doc_count: 0,
buckets: [
// repeats each cycle
- { key: `${pollingIntervalInSeconds}s`, doc_count: 1 },
- // 6 times per minute
- { key: `10s`, doc_count: 20 },
- // 1 times per minute
- { key: `60s`, doc_count: 10 },
- // 4 times per hour
- { key: '15m', doc_count: 90 },
- // 2 times per day
- { key: '720m', doc_count: 10 },
- // 8 times per day
- { key: '3h', doc_count: 100 },
+ {
+ key: `${pollingIntervalInSeconds}s`,
+ doc_count: 1,
+ },
+ {
+ key: `10s`, // 6 times per minute
+ doc_count: 20,
+ },
+ {
+ key: `60s`, // 1 times per minute
+ doc_count: 10,
+ },
+ {
+ key: '15m', // 4 times per hour
+ doc_count: 90,
+ },
+ {
+ key: '720m', // 2 times per day
+ doc_count: 10,
+ },
+ {
+ key: '3h', // 8 times per day
+ doc_count: 100,
+ },
],
},
taskType: {
@@ -571,13 +619,12 @@ describe('Workload Statistics Aggregator', () => {
},
nonRecurringTasks: {
doc_count: 13,
- taskType: {
- buckets: [{ key: 'report', doc_count: 13 }],
- doc_count_error_upper_bound: 0,
- sum_other_doc_count: 0,
+ },
+ ownerIds: {
+ ownerIds: {
+ value: 3,
},
},
- ownerIds: { ownerIds: { value: 3 } },
// The `FiltersAggregate` doesn't cover the case of a nested `AggregationContainer`, in which `FiltersAggregate`
// would not have a `buckets` property, but rather a keyed property that's inferred from the request.
// @ts-expect-error
@@ -585,11 +632,8 @@ describe('Workload Statistics Aggregator', () => {
doc_count: 13,
overdue: {
doc_count: 6,
- nonRecurring: { doc_count: 0 },
- taskTypes: {
- buckets: [{ key: 'foo', doc_count: 6 }],
- doc_count_error_upper_bound: 0,
- sum_other_doc_count: 0,
+ nonRecurring: {
+ doc_count: 0,
},
},
scheduleDensity: {
@@ -602,14 +646,13 @@ describe('Workload Statistics Aggregator', () => {
})
);
- const workloadAggregator = createWorkloadAggregator({
+ const workloadAggregator = createWorkloadAggregator(
taskStore,
- elasticsearchAndSOAvailability$: of(true),
- refreshInterval: 10,
- pollInterval: pollingIntervalInSeconds * 1000,
- logger,
- taskDefinitions: definitions,
- });
+ of(true),
+ 10,
+ pollingIntervalInSeconds * 1000,
+ loggingSystemMock.create().get()
+ );
return new Promise((resolve) => {
workloadAggregator.pipe(first()).subscribe((result) => {
@@ -617,7 +660,7 @@ describe('Workload Statistics Aggregator', () => {
expect(result.value).toMatchObject({
capacity_requirements: {
- // these are buckets of required capacity, rather than aggregated requirements.
+ // these are buckets of required capacity, rather than aggregated requirmenets.
per_minute: 150,
per_hour: 360,
per_day: 820,
@@ -632,14 +675,14 @@ describe('Workload Statistics Aggregator', () => {
const refreshInterval = 1000;
const taskStore = taskStoreMock.create({});
- const workloadAggregator = createWorkloadAggregator({
+ const logger = loggingSystemMock.create().get();
+ const workloadAggregator = createWorkloadAggregator(
taskStore,
- elasticsearchAndSOAvailability$: of(true),
+ of(true),
refreshInterval,
- pollInterval: 3000,
- logger,
- taskDefinitions: definitions,
- });
+ 3000,
+ logger
+ );
return new Promise((resolve, reject) => {
let errorWasThrowAt = 0;
@@ -651,7 +694,9 @@ describe('Workload Statistics Aggregator', () => {
reject(new Error(`Elasticsearch is still poof`));
}
- return setTaskTypeCount(await mockAggregatedResult(), 'foo', { idle: 2 });
+ return setTaskTypeCount(await mockAggregatedResult(), 'alerting_telemetry', {
+ idle: 2,
+ });
});
workloadAggregator.pipe(take(2), bufferCount(2)).subscribe((results) => {
@@ -754,7 +799,7 @@ describe('estimateRecurringTaskScheduling', () => {
});
describe('padBuckets', () => {
- test('returns zeroed out buckets when there are no buckets in the histogram', async () => {
+ test('returns zeroed out bucklets when there are no buckets in the histogram', async () => {
expect(
padBuckets(10, 3000, {
key: '2020-10-02T19:47:28.128Z-2020-10-02T19:48:28.128Z',
diff --git a/x-pack/plugins/task_manager/server/monitoring/workload_statistics.ts b/x-pack/plugins/task_manager/server/monitoring/workload_statistics.ts
index e437b420c04f5..6c372ce0fc453 100644
--- a/x-pack/plugins/task_manager/server/monitoring/workload_statistics.ts
+++ b/x-pack/plugins/task_manager/server/monitoring/workload_statistics.ts
@@ -16,9 +16,7 @@ import { AggregatedStatProvider } from '../lib/runtime_statistics_aggregator';
import { parseIntervalAsSecond, asInterval, parseIntervalAsMillisecond } from '../lib/intervals';
import { HealthStatus } from './monitoring_stats_stream';
import { TaskStore } from '../task_store';
-import { createRunningAveragedStat } from './task_run_calculators';
-import { TaskTypeDictionary } from '../task_type_dictionary';
-import { TaskCost } from '../task';
+import { createRunningAveragedStat } from './task_run_calcultors';
interface StatusStat extends JsonObject {
[status: string]: number;
@@ -26,20 +24,16 @@ interface StatusStat extends JsonObject {
interface TaskTypeStat extends JsonObject {
[taskType: string]: {
count: number;
- cost: number;
status: StatusStat;
};
}
interface RawWorkloadStat extends JsonObject {
count: number;
- cost: number;
task_types: TaskTypeStat;
schedule: Array<[string, number]>;
non_recurring: number;
- non_recurring_cost: number;
overdue: number;
- overdue_cost: number;
overdue_non_recurring: number;
estimated_schedule_density: number[];
capacity_requirements: CapacityRequirements;
@@ -115,34 +109,22 @@ type ScheduleDensityResult = AggregationResultOf<
type ScheduledIntervals = ScheduleDensityResult['histogram']['buckets'][0];
// Set an upper bound just in case a customer sets a really high refresh rate
-const MAX_SCHEDULE_DENSITY_BUCKETS = 50;
-
-interface CreateWorkloadAggregatorOpts {
- taskStore: TaskStore;
- elasticsearchAndSOAvailability$: Observable;
- refreshInterval: number;
- pollInterval: number;
- logger: Logger;
- taskDefinitions: TaskTypeDictionary;
-}
+const MAX_SHCEDULE_DENSITY_BUCKETS = 50;
-export function createWorkloadAggregator({
- taskStore,
- elasticsearchAndSOAvailability$,
- refreshInterval,
- pollInterval,
- logger,
- taskDefinitions,
-}: CreateWorkloadAggregatorOpts): AggregatedStatProvider {
+export function createWorkloadAggregator(
+ taskStore: TaskStore,
+ elasticsearchAndSOAvailability$: Observable,
+ refreshInterval: number,
+ pollInterval: number,
+ logger: Logger
+): AggregatedStatProvider {
// calculate scheduleDensity going two refreshIntervals or 1 minute into into the future
// (the longer of the two)
const scheduleDensityBuckets = Math.min(
Math.max(Math.round(60000 / pollInterval), Math.round((refreshInterval * 2) / pollInterval)),
- MAX_SCHEDULE_DENSITY_BUCKETS
+ MAX_SHCEDULE_DENSITY_BUCKETS
);
- const totalNumTaskDefinitions = taskDefinitions.getAllTypes().length;
- const taskTypeTermAggSize = Math.min(totalNumTaskDefinitions, 10000);
const ownerIdsQueue = createRunningAveragedStat(scheduleDensityBuckets);
return combineLatest([timer(0, refreshInterval), elasticsearchAndSOAvailability$]).pipe(
@@ -151,24 +133,39 @@ export function createWorkloadAggregator({
taskStore.aggregate({
aggs: {
taskType: {
- terms: { size: taskTypeTermAggSize, field: 'task.taskType' },
- aggs: { status: { terms: { field: 'task.status' } } },
+ terms: { size: 100, field: 'task.taskType' },
+ aggs: {
+ status: {
+ terms: { field: 'task.status' },
+ },
+ },
},
schedule: {
terms: { field: 'task.schedule.interval', size: 100 },
},
nonRecurringTasks: {
- missing: { field: 'task.schedule.interval' },
- aggs: {
- taskType: { terms: { size: taskTypeTermAggSize, field: 'task.taskType' } },
- },
+ missing: { field: 'task.schedule' },
},
ownerIds: {
- filter: { range: { 'task.startedAt': { gte: 'now-1w/w' } } },
- aggs: { ownerIds: { cardinality: { field: 'task.ownerId' } } },
+ filter: {
+ range: {
+ 'task.startedAt': {
+ gte: 'now-1w/w',
+ },
+ },
+ },
+ aggs: {
+ ownerIds: {
+ cardinality: {
+ field: 'task.ownerId',
+ },
+ },
+ },
},
idleTasks: {
- filter: { term: { 'task.status': 'idle' } },
+ filter: {
+ term: { 'task.status': 'idle' },
+ },
aggs: {
scheduleDensity: {
// create a window of upcoming tasks
@@ -190,7 +187,7 @@ export function createWorkloadAggregator({
field: 'task.runAt',
fixed_interval: asInterval(pollInterval),
},
- // break down each bucket in the histogram by schedule
+ // break down each bucket in the historgram by schedule
aggs: {
interval: {
terms: { field: 'task.schedule.interval' },
@@ -200,10 +197,15 @@ export function createWorkloadAggregator({
},
},
overdue: {
- filter: { range: { 'task.runAt': { lt: 'now' } } },
+ filter: {
+ range: {
+ 'task.runAt': { lt: 'now' },
+ },
+ },
aggs: {
- taskTypes: { terms: { size: taskTypeTermAggSize, field: 'task.taskType' } },
- nonRecurring: { missing: { field: 'task.schedule.interval' } },
+ nonRecurring: {
+ missing: { field: 'task.schedule' },
+ },
},
},
},
@@ -224,13 +226,11 @@ export function createWorkloadAggregator({
const taskTypes = aggregations.taskType.buckets;
const nonRecurring = aggregations.nonRecurringTasks.doc_count;
- const nonRecurringTaskTypes = aggregations.nonRecurringTasks.taskType.buckets;
const ownerIds = aggregations.ownerIds.ownerIds.value;
const {
overdue: {
doc_count: overdue,
- taskTypes: { buckets: taskTypesOverdue = [] } = {},
nonRecurring: { doc_count: overdueNonRecurring },
},
scheduleDensity: { buckets: [scheduleDensity] = [] } = {},
@@ -243,7 +243,6 @@ export function createWorkloadAggregator({
asSeconds: parseIntervalAsSecond(schedule.key as string),
count: schedule.doc_count,
};
-
accm.schedules.push(parsedSchedule);
if (parsedSchedule.asSeconds <= 60) {
accm.cadence.perMinute +=
@@ -258,7 +257,11 @@ export function createWorkloadAggregator({
return accm;
},
{
- cadence: { perMinute: 0, perHour: 0, perDay: 0 },
+ cadence: {
+ perMinute: 0,
+ perHour: 0,
+ perDay: 0,
+ },
schedules: [] as Array<{
interval: string;
asSeconds: number;
@@ -267,36 +270,20 @@ export function createWorkloadAggregator({
}
);
- const totalNonRecurringCost = getTotalCost(nonRecurringTaskTypes, taskDefinitions);
- const totalOverdueCost = getTotalCost(taskTypesOverdue, taskDefinitions);
-
- let totalCost = 0;
- const taskTypeSummary = taskTypes.reduce((acc, bucket) => {
- const value = bucket as TaskTypeWithStatusBucket;
- const cost =
- value.doc_count * taskDefinitions.get(value.key as string)?.cost ?? TaskCost.Normal;
- totalCost += cost;
- return Object.assign(acc, {
- [value.key as string]: {
- count: value.doc_count,
- cost,
- status: mapValues(keyBy(value.status.buckets, 'key'), 'doc_count'),
- },
- });
- }, {});
-
const summary: WorkloadStat = {
count,
- cost: totalCost,
- task_types: taskTypeSummary,
+ task_types: mapValues(keyBy(taskTypes, 'key'), ({ doc_count: docCount, status }) => {
+ return {
+ count: docCount,
+ status: mapValues(keyBy(status.buckets, 'key'), 'doc_count'),
+ };
+ }),
non_recurring: nonRecurring,
- non_recurring_cost: totalNonRecurringCost,
owner_ids: ownerIdsQueue(ownerIds),
schedule: schedules
.sort((scheduleLeft, scheduleRight) => scheduleLeft.asSeconds - scheduleRight.asSeconds)
.map((schedule) => [schedule.interval, schedule.count]),
overdue,
- overdue_cost: totalOverdueCost,
overdue_non_recurring: overdueNonRecurring,
estimated_schedule_density: padBuckets(
scheduleDensityBuckets,
@@ -470,37 +457,40 @@ export interface WorkloadAggregationResponse {
taskType: TaskTypeAggregation;
schedule: ScheduleAggregation;
idleTasks: IdleTasksAggregation;
- nonRecurringTasks: { doc_count: number; taskType: TaskTypeAggregation };
- ownerIds: { ownerIds: { value: number } };
- [otherAggs: string]: estypes.AggregationsAggregate;
-}
-
-export type TaskTypeWithStatusBucket = TaskTypeBucket & {
- status: {
- buckets: Array<{
- doc_count: number;
- key: string | number;
- }>;
- doc_count_error_upper_bound?: number | undefined;
- sum_other_doc_count?: number | undefined;
+ nonRecurringTasks: {
+ doc_count: number;
};
-};
-
-export interface TaskTypeBucket {
- doc_count: number;
- key: string | number;
+ ownerIds: {
+ ownerIds: {
+ value: number;
+ };
+ };
+ [otherAggs: string]: estypes.AggregationsAggregate;
}
-
// @ts-expect-error key doesn't accept a string
export interface TaskTypeAggregation extends estypes.AggregationsFiltersAggregate {
- buckets: Array;
+ buckets: Array<{
+ doc_count: number;
+ key: string | number;
+ status: {
+ buckets: Array<{
+ doc_count: number;
+ key: string | number;
+ }>;
+ doc_count_error_upper_bound?: number | undefined;
+ sum_other_doc_count?: number | undefined;
+ };
+ }>;
doc_count_error_upper_bound?: number | undefined;
sum_other_doc_count?: number | undefined;
}
// @ts-expect-error key doesn't accept a string
export interface ScheduleAggregation extends estypes.AggregationsFiltersAggregate {
- buckets: Array<{ doc_count: number; key: string | number }>;
+ buckets: Array<{
+ doc_count: number;
+ key: string | number;
+ }>;
doc_count_error_upper_bound?: number | undefined;
sum_other_doc_count?: number | undefined;
}
@@ -528,8 +518,9 @@ export interface IdleTasksAggregation extends estypes.AggregationsFiltersAggrega
};
overdue: {
doc_count: number;
- nonRecurring: { doc_count: number };
- taskTypes: TaskTypeAggregation;
+ nonRecurring: {
+ doc_count: number;
+ };
};
}
@@ -546,11 +537,3 @@ interface DateRangeBucket {
from_as_string?: string;
doc_count: number;
}
-
-function getTotalCost(taskTypeBuckets: TaskTypeBucket[], definitions: TaskTypeDictionary): number {
- let cost = 0;
- for (const bucket of taskTypeBuckets) {
- cost += bucket.doc_count * definitions.get(bucket.key as string)?.cost ?? TaskCost.Normal;
- }
- return cost;
-}
diff --git a/x-pack/plugins/task_manager/server/plugin.test.ts b/x-pack/plugins/task_manager/server/plugin.test.ts
index a1589504bb364..7b80920a57559 100644
--- a/x-pack/plugins/task_manager/server/plugin.test.ts
+++ b/x-pack/plugins/task_manager/server/plugin.test.ts
@@ -11,8 +11,6 @@ import { TaskManagerConfig } from './config';
import { Subject } from 'rxjs';
import { bufferCount, take } from 'rxjs';
import { CoreStatus, ServiceStatusLevels } from '@kbn/core/server';
-import { serverlessPluginMock } from '@kbn/serverless/server/mocks';
-import { cloudMock } from '@kbn/cloud-plugin/public/mocks';
import { taskPollingLifecycleMock } from './polling_lifecycle.mock';
import { TaskPollingLifecycle } from './polling_lifecycle';
import type { TaskPollingLifecycle as TaskPollingLifecycleClass } from './polling_lifecycle';
@@ -40,6 +38,7 @@ jest.mock('./ephemeral_task_lifecycle', () => {
const coreStart = coreMock.createStart();
const pluginInitializerContextParams = {
+ max_workers: 10,
max_attempts: 9,
poll_interval: 3000,
version_conflict_threshold: 80,
@@ -149,10 +148,7 @@ describe('TaskManagerPlugin', () => {
pluginInitializerContext.node.roles.backgroundTasks = true;
const taskManagerPlugin = new TaskManagerPlugin(pluginInitializerContext);
taskManagerPlugin.setup(coreMock.createSetup(), { usageCollection: undefined });
- taskManagerPlugin.start(coreStart, {
- serverless: serverlessPluginMock.createStartContract(),
- cloud: cloudMock.createStart(),
- });
+ taskManagerPlugin.start(coreStart);
expect(TaskPollingLifecycle as jest.Mock).toHaveBeenCalledTimes(1);
expect(
@@ -167,10 +163,7 @@ describe('TaskManagerPlugin', () => {
pluginInitializerContext.node.roles.backgroundTasks = false;
const taskManagerPlugin = new TaskManagerPlugin(pluginInitializerContext);
taskManagerPlugin.setup(coreMock.createSetup(), { usageCollection: undefined });
- taskManagerPlugin.start(coreStart, {
- serverless: serverlessPluginMock.createStartContract(),
- cloud: cloudMock.createStart(),
- });
+ taskManagerPlugin.start(coreStart);
expect(TaskPollingLifecycle as jest.Mock).not.toHaveBeenCalled();
expect(
diff --git a/x-pack/plugins/task_manager/server/plugin.ts b/x-pack/plugins/task_manager/server/plugin.ts
index a3a295169057d..1926b48b31ea6 100644
--- a/x-pack/plugins/task_manager/server/plugin.ts
+++ b/x-pack/plugins/task_manager/server/plugin.ts
@@ -18,8 +18,6 @@ import {
ServiceStatusLevels,
CoreStatus,
} from '@kbn/core/server';
-import { ServerlessPluginStart } from '@kbn/serverless/server';
-import type { CloudStart } from '@kbn/cloud-plugin/server';
import {
registerDeleteInactiveNodesTaskDefinition,
scheduleDeleteInactiveNodesTaskDefinition,
@@ -45,7 +43,6 @@ import { setupIntervalLogging } from './lib/log_health_metrics';
import { metricsStream, Metrics } from './metrics';
import { TaskManagerMetricsCollector } from './metrics/task_metrics_collector';
import { TaskPartitioner } from './lib/task_partitioner';
-import { getDefaultCapacity } from './lib/get_default_capacity';
export interface TaskManagerSetupContract {
/**
@@ -79,11 +76,6 @@ export type TaskManagerStartContract = Pick<
getRegisteredTypes: () => string[];
};
-export interface TaskManagerPluginStart {
- cloud?: CloudStart;
- serverless?: ServerlessPluginStart;
-}
-
const LogHealthForBackgroundTasksOnlyMinutes = 60;
export class TaskManagerPlugin
@@ -107,7 +99,6 @@ export class TaskManagerPlugin
private taskManagerMetricsCollector?: TaskManagerMetricsCollector;
private nodeRoles: PluginInitializerContext['node']['roles'];
private kibanaDiscoveryService?: KibanaDiscoveryService;
- private heapSizeLimit: number = 0;
constructor(private readonly initContext: PluginInitializerContext) {
this.initContext = initContext;
@@ -131,13 +122,6 @@ export class TaskManagerPlugin
): TaskManagerSetupContract {
this.elasticsearchAndSOAvailability$ = getElasticsearchAndSOAvailability(core.status.core$);
- core.metrics
- .getOpsMetrics$()
- .pipe(distinctUntilChanged())
- .subscribe((metrics) => {
- this.heapSizeLimit = metrics.process.memory.heap.size_limit;
- });
-
setupSavedObjects(core.savedObjects, this.config);
this.taskManagerId = this.initContext.env.instanceUuid;
@@ -248,10 +232,12 @@ export class TaskManagerPlugin
};
}
- public start(
- { savedObjects, elasticsearch, executionContext, docLinks }: CoreStart,
- { cloud, serverless }: TaskManagerPluginStart
- ): TaskManagerStartContract {
+ public start({
+ savedObjects,
+ elasticsearch,
+ executionContext,
+ docLinks,
+ }: CoreStart): TaskManagerStartContract {
const savedObjectsRepository = savedObjects.createInternalRepository([
TASK_SO_NAME,
BACKGROUND_TASK_NODE_SO_NAME,
@@ -281,29 +267,11 @@ export class TaskManagerPlugin
requestTimeouts: this.config.request_timeouts,
});
- const defaultCapacity = getDefaultCapacity({
- claimStrategy: this.config?.claim_strategy,
- heapSizeLimit: this.heapSizeLimit,
- isCloud: cloud?.isCloudEnabled ?? false,
- isServerless: !!serverless,
- isBackgroundTaskNodeOnly: this.isNodeBackgroundTasksOnly(),
- });
-
- this.logger.info(
- `Task manager isCloud=${
- cloud?.isCloudEnabled ?? false
- } isServerless=${!!serverless} claimStrategy=${
- this.config!.claim_strategy
- } isBackgroundTaskNodeOnly=${this.isNodeBackgroundTasksOnly()} heapSizeLimit=${
- this.heapSizeLimit
- } defaultCapacity=${defaultCapacity}`
- );
-
const managedConfiguration = createManagedConfiguration({
- config: this.config!,
- errors$: taskStore.errors$,
- defaultCapacity,
logger: this.logger,
+ errors$: taskStore.errors$,
+ startingMaxWorkers: this.config!.max_workers,
+ startingPollInterval: this.config!.poll_interval,
});
// Only poll for tasks if configured to run tasks
@@ -342,17 +310,16 @@ export class TaskManagerPlugin
});
}
- createMonitoringStats({
+ createMonitoringStats(
taskStore,
- elasticsearchAndSOAvailability$: this.elasticsearchAndSOAvailability$!,
- config: this.config!,
- managedConfig: managedConfiguration,
- logger: this.logger,
- adHocTaskCounter: this.adHocTaskCounter,
- taskDefinitions: this.definitions,
- taskPollingLifecycle: this.taskPollingLifecycle,
- ephemeralTaskLifecycle: this.ephemeralTaskLifecycle,
- }).subscribe((stat) => this.monitoringStats$.next(stat));
+ this.elasticsearchAndSOAvailability$!,
+ this.config!,
+ managedConfiguration,
+ this.logger,
+ this.adHocTaskCounter,
+ this.taskPollingLifecycle,
+ this.ephemeralTaskLifecycle
+ ).subscribe((stat) => this.monitoringStats$.next(stat));
metricsStream({
config: this.config!,
diff --git a/x-pack/plugins/task_manager/server/polling/delay_on_claim_conflicts.test.ts b/x-pack/plugins/task_manager/server/polling/delay_on_claim_conflicts.test.ts
index 11741aeadcf2d..f06c43bc15587 100644
--- a/x-pack/plugins/task_manager/server/polling/delay_on_claim_conflicts.test.ts
+++ b/x-pack/plugins/task_manager/server/polling/delay_on_claim_conflicts.test.ts
@@ -22,10 +22,10 @@ describe('delayOnClaimConflicts', () => {
'initializes with a delay of 0',
fakeSchedulers(async () => {
const pollInterval = 100;
- const capacity = 10;
+ const maxWorkers = 10;
const taskLifecycleEvents$ = new Subject();
const delays = delayOnClaimConflicts(
- of(capacity),
+ of(maxWorkers),
of(pollInterval),
taskLifecycleEvents$,
80,
@@ -42,11 +42,11 @@ describe('delayOnClaimConflicts', () => {
'emits a random delay whenever p50 of claim clashes exceed 80% of available max_workers',
fakeSchedulers(async () => {
const pollInterval = 100;
- const capacity = 10;
+ const maxWorkers = 10;
const taskLifecycleEvents$ = new Subject();
const delays$ = firstValueFrom(
- delayOnClaimConflicts(of(capacity), of(pollInterval), taskLifecycleEvents$, 80, 2).pipe(
+ delayOnClaimConflicts(of(maxWorkers), of(pollInterval), taskLifecycleEvents$, 80, 2).pipe(
take(2),
bufferCount(2)
)
@@ -60,6 +60,7 @@ describe('delayOnClaimConflicts', () => {
tasksUpdated: 0,
tasksConflicted: 8,
tasksClaimed: 0,
+ tasksRejected: 0,
},
docs: [],
})
@@ -93,6 +94,7 @@ describe('delayOnClaimConflicts', () => {
tasksUpdated: 0,
tasksConflicted: 8,
tasksClaimed: 0,
+ tasksRejected: 0,
},
docs: [],
})
@@ -109,6 +111,7 @@ describe('delayOnClaimConflicts', () => {
tasksUpdated: 0,
tasksConflicted: 10,
tasksClaimed: 0,
+ tasksRejected: 0,
},
docs: [],
})
@@ -134,14 +137,18 @@ describe('delayOnClaimConflicts', () => {
'doesnt emit a new delay when conflicts have reduced',
fakeSchedulers(async () => {
const pollInterval = 100;
- const capacity = 10;
+ const maxWorkers = 10;
const taskLifecycleEvents$ = new Subject();
const handler = jest.fn();
- delayOnClaimConflicts(of(capacity), of(pollInterval), taskLifecycleEvents$, 80, 2).subscribe(
- handler
- );
+ delayOnClaimConflicts(
+ of(maxWorkers),
+ of(pollInterval),
+ taskLifecycleEvents$,
+ 80,
+ 2
+ ).subscribe(handler);
await sleep(0);
expect(handler).toHaveBeenCalledWith(0);
@@ -154,6 +161,7 @@ describe('delayOnClaimConflicts', () => {
tasksUpdated: 0,
tasksConflicted: 8,
tasksClaimed: 0,
+ tasksRejected: 0,
},
docs: [],
})
@@ -174,6 +182,7 @@ describe('delayOnClaimConflicts', () => {
tasksUpdated: 0,
tasksConflicted: 7,
tasksClaimed: 0,
+ tasksRejected: 0,
},
docs: [],
})
@@ -192,6 +201,7 @@ describe('delayOnClaimConflicts', () => {
tasksUpdated: 0,
tasksConflicted: 9,
tasksClaimed: 0,
+ tasksRejected: 0,
},
docs: [],
})
diff --git a/x-pack/plugins/task_manager/server/polling/delay_on_claim_conflicts.ts b/x-pack/plugins/task_manager/server/polling/delay_on_claim_conflicts.ts
index 21b16b1a8d5c5..f491d58fc59ee 100644
--- a/x-pack/plugins/task_manager/server/polling/delay_on_claim_conflicts.ts
+++ b/x-pack/plugins/task_manager/server/polling/delay_on_claim_conflicts.ts
@@ -19,14 +19,13 @@ import { ManagedConfiguration } from '../lib/create_managed_configuration';
import { TaskLifecycleEvent } from '../polling_lifecycle';
import { isTaskPollingCycleEvent } from '../task_events';
import { ClaimAndFillPoolResult } from '../lib/fill_pool';
-import { createRunningAveragedStat } from '../monitoring/task_run_calculators';
-import { getCapacityInWorkers } from '../task_pool';
+import { createRunningAveragedStat } from '../monitoring/task_run_calcultors';
/**
* Emits a delay amount in ms to apply to polling whenever the task store exceeds a threshold of claim claimClashes
*/
export function delayOnClaimConflicts(
- capacityConfiguration$: ManagedConfiguration['capacityConfiguration$'],
+ maxWorkersConfiguration$: ManagedConfiguration['maxWorkersConfiguration$'],
pollIntervalConfiguration$: ManagedConfiguration['pollIntervalConfiguration$'],
taskLifecycleEvents$: Observable,
claimClashesPercentageThreshold: number,
@@ -38,7 +37,7 @@ export function delayOnClaimConflicts(
merge(
of(0),
combineLatest([
- capacityConfiguration$,
+ maxWorkersConfiguration$,
pollIntervalConfiguration$,
taskLifecycleEvents$.pipe(
map>((taskEvent: TaskLifecycleEvent) =>
@@ -52,10 +51,7 @@ export function delayOnClaimConflicts(
map((claimClashes: Option) => (claimClashes as Some).value)
),
]).pipe(
- map(([capacity, pollInterval, latestClaimConflicts]) => {
- // convert capacity to maxWorkers
- const maxWorkers = getCapacityInWorkers(capacity);
-
+ map(([maxWorkers, pollInterval, latestClaimConflicts]) => {
// add latest claimConflict count to queue
claimConflictQueue(latestClaimConflicts);
diff --git a/x-pack/plugins/task_manager/server/polling_lifecycle.test.ts b/x-pack/plugins/task_manager/server/polling_lifecycle.test.ts
index e804f1c166cee..baf45cb65ea1e 100644
--- a/x-pack/plugins/task_manager/server/polling_lifecycle.test.ts
+++ b/x-pack/plugins/task_manager/server/polling_lifecycle.test.ts
@@ -20,8 +20,6 @@ import { asOk, Err, isErr, isOk, Result } from './lib/result_type';
import { FillPoolResult } from './lib/fill_pool';
import { ElasticsearchResponseError } from './lib/identify_es_error';
import { executionContextServiceMock } from '@kbn/core/server/mocks';
-import { TaskCost } from './task';
-import { CLAIM_STRATEGY_MGET } from './config';
import { TaskPartitioner } from './lib/task_partitioner';
import { KibanaDiscoveryService } from './kibana_discovery_service';
@@ -46,6 +44,7 @@ describe('TaskPollingLifecycle', () => {
const taskManagerOpts = {
config: {
enabled: true,
+ max_workers: 10,
index: 'foo',
max_attempts: 9,
poll_interval: 6000000,
@@ -91,8 +90,7 @@ describe('TaskPollingLifecycle', () => {
unusedTypes: [],
definitions: new TaskTypeDictionary(taskManagerLogger),
middleware: createInitialMiddleware(),
- startingCapacity: 20,
- capacityConfiguration$: of(20),
+ maxWorkersConfiguration$: of(100),
pollIntervalConfiguration$: of(100),
executionContext,
taskPartitioner: new TaskPartitioner('test', {} as KibanaDiscoveryService),
@@ -107,23 +105,12 @@ describe('TaskPollingLifecycle', () => {
afterEach(() => clock.restore());
describe('start', () => {
- taskManagerOpts.definitions.registerTaskDefinitions({
- report: {
- title: 'report',
- maxConcurrency: 1,
- cost: TaskCost.ExtraLarge,
- createTaskRunner: jest.fn(),
- },
- quickReport: {
- title: 'quickReport',
- maxConcurrency: 5,
- createTaskRunner: jest.fn(),
- },
- });
-
test('begins polling once the ES and SavedObjects services are available', () => {
const elasticsearchAndSOAvailability$ = new Subject();
- new TaskPollingLifecycle({ ...taskManagerOpts, elasticsearchAndSOAvailability$ });
+ new TaskPollingLifecycle({
+ ...taskManagerOpts,
+ elasticsearchAndSOAvailability$,
+ });
clock.tick(150);
expect(mockTaskClaiming.claimAvailableTasksIfCapacityIsAvailable).not.toHaveBeenCalled();
@@ -134,70 +121,55 @@ describe('TaskPollingLifecycle', () => {
expect(mockTaskClaiming.claimAvailableTasksIfCapacityIsAvailable).toHaveBeenCalled();
});
- test('provides TaskClaiming with the capacity available when strategy = CLAIM_STRATEGY_DEFAULT', () => {
+ test('provides TaskClaiming with the capacity available', () => {
const elasticsearchAndSOAvailability$ = new Subject();
- const capacity$ = new Subject();
+ const maxWorkers$ = new Subject();
+ taskManagerOpts.definitions.registerTaskDefinitions({
+ report: {
+ title: 'report',
+ maxConcurrency: 1,
+ createTaskRunner: jest.fn(),
+ },
+ quickReport: {
+ title: 'quickReport',
+ maxConcurrency: 5,
+ createTaskRunner: jest.fn(),
+ },
+ });
new TaskPollingLifecycle({
...taskManagerOpts,
elasticsearchAndSOAvailability$,
- capacityConfiguration$: capacity$,
+ maxWorkersConfiguration$: maxWorkers$,
});
const taskClaimingGetCapacity = (TaskClaiming as jest.Mock).mock
- .calls[0][0].getAvailableCapacity;
+ .calls[0][0].getCapacity;
- capacity$.next(40);
- expect(taskClaimingGetCapacity()).toEqual(40);
+ maxWorkers$.next(20);
+ expect(taskClaimingGetCapacity()).toEqual(20);
expect(taskClaimingGetCapacity('report')).toEqual(1);
expect(taskClaimingGetCapacity('quickReport')).toEqual(5);
- capacity$.next(60);
- expect(taskClaimingGetCapacity()).toEqual(60);
+ maxWorkers$.next(30);
+ expect(taskClaimingGetCapacity()).toEqual(30);
expect(taskClaimingGetCapacity('report')).toEqual(1);
expect(taskClaimingGetCapacity('quickReport')).toEqual(5);
- capacity$.next(4);
- expect(taskClaimingGetCapacity()).toEqual(4);
+ maxWorkers$.next(2);
+ expect(taskClaimingGetCapacity()).toEqual(2);
expect(taskClaimingGetCapacity('report')).toEqual(1);
- expect(taskClaimingGetCapacity('quickReport')).toEqual(4);
- });
-
- test('provides TaskClaiming with the capacity available when strategy = CLAIM_STRATEGY_MGET', () => {
- const elasticsearchAndSOAvailability$ = new Subject();
- const capacity$ = new Subject();
-
- new TaskPollingLifecycle({
- ...taskManagerOpts,
- config: { ...taskManagerOpts.config, claim_strategy: CLAIM_STRATEGY_MGET },
- elasticsearchAndSOAvailability$,
- capacityConfiguration$: capacity$,
- });
-
- const taskClaimingGetCapacity = (TaskClaiming as jest.Mock).mock
- .calls[0][0].getAvailableCapacity;
-
- capacity$.next(40);
- expect(taskClaimingGetCapacity()).toEqual(80);
- expect(taskClaimingGetCapacity('report')).toEqual(10);
- expect(taskClaimingGetCapacity('quickReport')).toEqual(10);
-
- capacity$.next(60);
- expect(taskClaimingGetCapacity()).toEqual(120);
- expect(taskClaimingGetCapacity('report')).toEqual(10);
- expect(taskClaimingGetCapacity('quickReport')).toEqual(10);
-
- capacity$.next(4);
- expect(taskClaimingGetCapacity()).toEqual(8);
- expect(taskClaimingGetCapacity('report')).toEqual(8);
- expect(taskClaimingGetCapacity('quickReport')).toEqual(8);
+ expect(taskClaimingGetCapacity('quickReport')).toEqual(2);
});
});
describe('stop', () => {
test('stops polling once the ES and SavedObjects services become unavailable', () => {
const elasticsearchAndSOAvailability$ = new Subject();
- new TaskPollingLifecycle({ elasticsearchAndSOAvailability$, ...taskManagerOpts });
+ new TaskPollingLifecycle({
+ elasticsearchAndSOAvailability$,
+ ...taskManagerOpts,
+ });
elasticsearchAndSOAvailability$.next(true);
@@ -244,7 +216,7 @@ describe('TaskPollingLifecycle', () => {
of(
asOk({
docs: [],
- stats: { tasksUpdated: 0, tasksConflicted: 0, tasksClaimed: 0 },
+ stats: { tasksUpdated: 0, tasksConflicted: 0, tasksClaimed: 0, tasksRejected: 0 },
})
)
);
@@ -326,47 +298,7 @@ describe('TaskPollingLifecycle', () => {
of(
asOk({
docs: [],
- stats: { tasksUpdated: 0, tasksConflicted: 0, tasksClaimed: 0 },
- })
- )
- );
- const elasticsearchAndSOAvailability$ = new Subject();
- const taskPollingLifecycle = new TaskPollingLifecycle({
- ...taskManagerOpts,
- elasticsearchAndSOAvailability$,
- });
-
- const emittedEvents: TaskLifecycleEvent[] = [];
-
- taskPollingLifecycle.events.subscribe((event: TaskLifecycleEvent) =>
- emittedEvents.push(event)
- );
-
- elasticsearchAndSOAvailability$.next(true);
- expect(mockTaskClaiming.claimAvailableTasksIfCapacityIsAvailable).toHaveBeenCalled();
- await retryUntil('workerUtilizationEvent emitted', () => {
- return !!emittedEvents.find(
- (event: TaskLifecycleEvent) => event.id === 'workerUtilization'
- );
- });
-
- const workerUtilizationEvent = emittedEvents.find(
- (event: TaskLifecycleEvent) => event.id === 'workerUtilization'
- );
- expect(workerUtilizationEvent).toEqual({
- id: 'workerUtilization',
- type: 'TASK_MANAGER_STAT',
- event: { tag: 'ok', value: 0 },
- });
- });
-
- test('should set utilization to max when capacity is not fully reached but there are tasks left unclaimed', async () => {
- clock.restore();
- mockTaskClaiming.claimAvailableTasksIfCapacityIsAvailable.mockImplementation(() =>
- of(
- asOk({
- docs: [],
- stats: { tasksUpdated: 0, tasksConflicted: 0, tasksClaimed: 0, tasksLeftUnclaimed: 2 },
+ stats: { tasksUpdated: 0, tasksConflicted: 0, tasksClaimed: 0, tasksRejected: 0 },
})
)
);
@@ -389,15 +321,6 @@ describe('TaskPollingLifecycle', () => {
(event: TaskLifecycleEvent) => event.id === 'workerUtilization'
);
});
-
- const workerUtilizationEvent = emittedEvents.find(
- (event: TaskLifecycleEvent) => event.id === 'workerUtilization'
- );
- expect(workerUtilizationEvent).toEqual({
- id: 'workerUtilization',
- type: 'TASK_MANAGER_STAT',
- event: { tag: 'ok', value: 100 },
- });
});
test('should emit event when polling error occurs', async () => {
diff --git a/x-pack/plugins/task_manager/server/polling_lifecycle.ts b/x-pack/plugins/task_manager/server/polling_lifecycle.ts
index f13a7ad20806c..3b9c5621da0b9 100644
--- a/x-pack/plugins/task_manager/server/polling_lifecycle.ts
+++ b/x-pack/plugins/task_manager/server/polling_lifecycle.ts
@@ -45,8 +45,6 @@ import { TaskClaiming } from './queries/task_claiming';
import { ClaimOwnershipResult } from './task_claimers';
import { TaskPartitioner } from './lib/task_partitioner';
-const MAX_BUFFER_OPERATIONS = 100;
-
export interface ITaskEventEmitter {
get events(): Observable;
}
@@ -103,7 +101,7 @@ export class TaskPollingLifecycle implements ITaskEventEmitter this.events$.next(event);
this.bufferedStore = new BufferedTaskStore(this.store, {
- bufferMaxOperations: MAX_BUFFER_OPERATIONS,
+ bufferMaxOperations: config.max_workers,
logger,
});
this.pool = new TaskPool({
logger,
- strategy: config.claim_strategy,
- capacity$: capacityConfiguration$,
- definitions: this.definitions,
+ maxWorkers$: maxWorkersConfiguration$,
});
this.pool.load.subscribe(emitEvent);
@@ -146,7 +142,17 @@ export class TaskPollingLifecycle implements ITaskEventEmitter this.pool.availableCapacity(taskType),
+ getCapacity: (taskType?: string) =>
+ taskType && this.definitions.get(taskType)?.maxConcurrency
+ ? Math.max(
+ Math.min(
+ this.pool.availableWorkers,
+ this.definitions.get(taskType)!.maxConcurrency! -
+ this.pool.getOccupiedWorkersByType(taskType)
+ ),
+ 0
+ )
+ : this.pool.availableWorkers,
taskPartitioner,
});
// pipe taskClaiming events into the lifecycle event stream
@@ -157,7 +163,7 @@ export class TaskPollingLifecycle implements ITaskEventEmitter | undefined;
if (claimStrategy === CLAIM_STRATEGY_DEFAULT) {
pollIntervalDelay$ = delayOnClaimConflicts(
- capacityConfiguration$,
+ maxWorkersConfiguration$,
pollIntervalConfiguration$,
this.events$,
config.version_conflict_threshold,
@@ -171,22 +177,19 @@ export class TaskPollingLifecycle implements ITaskEventEmitter {
- const capacity = this.pool.availableCapacity();
+ const capacity = this.pool.availableWorkers;
if (!capacity) {
- const usedCapacityPercentage = this.pool.usedCapacityPercentage;
-
// if there isn't capacity, emit a load event so that we can expose how often
// high load causes the poller to skip work (work isn't called when there is no capacity)
- this.emitEvent(asTaskManagerStatEvent('load', asOk(usedCapacityPercentage)));
+ this.emitEvent(asTaskManagerStatEvent('load', asOk(this.pool.workerLoad)));
// Emit event indicating task manager utilization
- this.emitEvent(asTaskManagerStatEvent('workerUtilization', asOk(usedCapacityPercentage)));
+ this.emitEvent(asTaskManagerStatEvent('workerUtilization', asOk(this.pool.workerLoad)));
}
return capacity;
},
work: this.pollForWork,
});
-
this.subscribeToPoller(poller.events$);
elasticsearchAndSOAvailability$.subscribe((areESAndSOAvailable) => {
@@ -259,7 +262,7 @@ export class TaskPollingLifecycle implements ITaskEventEmitter {
+ mapOk(() => {
// Emit event indicating task manager utilization % at the end of a polling cycle
-
- // Get the actual utilization as a percentage
- let tmUtilization = this.pool.usedCapacityPercentage;
-
- // Check whether there are any tasks left unclaimed
- // If we're not at capacity and there are unclaimed tasks, then
- // there must be high cost tasks that need to be claimed
- // Artificially inflate the utilization to represent the unclaimed load
- if (tmUtilization < 100 && (results.stats?.tasksLeftUnclaimed ?? 0) > 0) {
- tmUtilization = 100;
- }
-
- this.emitEvent(asTaskManagerStatEvent('workerUtilization', asOk(tmUtilization)));
+ // This represents the number of workers busy + number of tasks claimed in this cycle
+ this.emitEvent(asTaskManagerStatEvent('workerUtilization', asOk(this.pool.workerLoad)));
})
)
)
diff --git a/x-pack/plugins/task_manager/server/queries/task_claiming.test.ts b/x-pack/plugins/task_manager/server/queries/task_claiming.test.ts
index de57a73f80533..bc4adb71dd4a1 100644
--- a/x-pack/plugins/task_manager/server/queries/task_claiming.test.ts
+++ b/x-pack/plugins/task_manager/server/queries/task_claiming.test.ts
@@ -80,7 +80,7 @@ describe('TaskClaiming', () => {
unusedTypes: [],
taskStore: taskStoreMock.create({ taskManagerId: '' }),
maxAttempts: 2,
- getAvailableCapacity: () => 10,
+ getCapacity: () => 10,
taskPartitioner,
});
@@ -130,7 +130,7 @@ describe('TaskClaiming', () => {
unusedTypes: [],
taskStore: taskStoreMock.create({ taskManagerId: '' }),
maxAttempts: 2,
- getAvailableCapacity: () => 10,
+ getCapacity: () => 10,
taskPartitioner,
});
diff --git a/x-pack/plugins/task_manager/server/queries/task_claiming.ts b/x-pack/plugins/task_manager/server/queries/task_claiming.ts
index f5ef18452509b..188f47b0d2d2f 100644
--- a/x-pack/plugins/task_manager/server/queries/task_claiming.ts
+++ b/x-pack/plugins/task_manager/server/queries/task_claiming.ts
@@ -38,7 +38,7 @@ export interface TaskClaimingOpts {
taskStore: TaskStore;
maxAttempts: number;
excludedTaskTypes: string[];
- getAvailableCapacity: (taskType?: string) => number;
+ getCapacity: (taskType?: string) => number;
taskPartitioner: TaskPartitioner;
}
@@ -87,7 +87,7 @@ export class TaskClaiming {
private definitions: TaskTypeDictionary;
private events$: Subject;
private taskStore: TaskStore;
- private getAvailableCapacity: (taskType?: string) => number;
+ private getCapacity: (taskType?: string) => number;
private logger: Logger;
private readonly taskClaimingBatchesByType: TaskClaimingBatches;
private readonly taskMaxAttempts: Record;
@@ -106,7 +106,7 @@ export class TaskClaiming {
this.definitions = opts.definitions;
this.maxAttempts = opts.maxAttempts;
this.taskStore = opts.taskStore;
- this.getAvailableCapacity = opts.getAvailableCapacity;
+ this.getCapacity = opts.getCapacity;
this.logger = opts.logger.get('taskClaiming');
this.taskClaimingBatchesByType = this.partitionIntoClaimingBatches(this.definitions);
this.taskMaxAttempts = Object.fromEntries(this.normalizeMaxAttempts(this.definitions));
@@ -170,13 +170,13 @@ export class TaskClaiming {
public claimAvailableTasksIfCapacityIsAvailable(
claimingOptions: Omit
): Observable> {
- if (this.getAvailableCapacity()) {
+ if (this.getCapacity()) {
const opts: TaskClaimerOpts = {
batches: this.getClaimingBatches(),
claimOwnershipUntil: claimingOptions.claimOwnershipUntil,
taskStore: this.taskStore,
events$: this.events$,
- getCapacity: this.getAvailableCapacity,
+ getCapacity: this.getCapacity,
unusedTypes: this.unusedTypes,
definitions: this.definitions,
taskMaxAttempts: this.taskMaxAttempts,
diff --git a/x-pack/plugins/task_manager/server/routes/health.test.ts b/x-pack/plugins/task_manager/server/routes/health.test.ts
index 9c08c5b5fb4c4..a97d99079bc58 100644
--- a/x-pack/plugins/task_manager/server/routes/health.test.ts
+++ b/x-pack/plugins/task_manager/server/routes/health.test.ts
@@ -823,8 +823,7 @@ function mockHealthStats(overrides = {}) {
configuration: {
timestamp: new Date().toISOString(),
value: {
- capacity: { config: 10, as_cost: 20, as_workers: 10 },
- claim_strategy: 'default',
+ max_workers: 10,
poll_interval: 3000,
request_capacity: 1000,
monitored_aggregated_stats_refresh_rate: 5000,
@@ -842,19 +841,16 @@ function mockHealthStats(overrides = {}) {
timestamp: new Date().toISOString(),
value: {
count: 4,
- cost: 8,
task_types: {
- actions_telemetry: { count: 2, cost: 4, status: { idle: 2 } },
- alerting_telemetry: { count: 1, cost: 2, status: { idle: 1 } },
- session_cleanup: { count: 1, cost: 2, status: { idle: 1 } },
+ actions_telemetry: { count: 2, status: { idle: 2 } },
+ alerting_telemetry: { count: 1, status: { idle: 1 } },
+ session_cleanup: { count: 1, status: { idle: 1 } },
},
schedule: [],
overdue: 0,
- overdue_cost: 2,
overdue_non_recurring: 0,
estimatedScheduleDensity: [],
non_recurring: 20,
- non_recurring_cost: 40,
owner_ids: [0, 0, 0, 1, 2, 0, 0, 2, 2, 2, 1, 2, 1, 1],
estimated_schedule_density: [],
capacity_requirements: {
diff --git a/x-pack/plugins/task_manager/server/task.ts b/x-pack/plugins/task_manager/server/task.ts
index 96df4a703c5f7..fae99bb8f1f5b 100644
--- a/x-pack/plugins/task_manager/server/task.ts
+++ b/x-pack/plugins/task_manager/server/task.ts
@@ -16,12 +16,6 @@ export enum TaskPriority {
Normal = 50,
}
-export enum TaskCost {
- Tiny = 1,
- Normal = 2,
- ExtraLarge = 10,
-}
-
/*
* Type definitions and validations for tasks.
*/
@@ -133,10 +127,6 @@ export const taskDefinitionSchema = schema.object(
* Priority of this task type. Defaults to "NORMAL" if not defined
*/
priority: schema.maybe(schema.number()),
- /**
- * Cost to run this task type. Defaults to "Normal".
- */
- cost: schema.number({ defaultValue: TaskCost.Normal }),
/**
* An optional more detailed description of what this task does.
*/
@@ -182,7 +172,7 @@ export const taskDefinitionSchema = schema.object(
paramsSchema: schema.maybe(schema.any()),
},
{
- validate({ timeout, priority, cost }) {
+ validate({ timeout, priority }) {
if (!isInterval(timeout) || isErr(tryAsResult(() => parseIntervalAsMillisecond(timeout)))) {
return `Invalid timeout "${timeout}". Timeout must be of the form "{number}{cadance}" where number is an integer. Example: 5m.`;
}
@@ -192,12 +182,6 @@ export const taskDefinitionSchema = schema.object(
.filter((key) => isNaN(Number(key)))
.map((key) => `${key} => ${TaskPriority[key as keyof typeof TaskPriority]}`)}`;
}
-
- if (cost && (!isNumber(cost) || !(cost in TaskCost))) {
- return `Invalid cost "${cost}". Cost must be one of ${Object.keys(TaskCost)
- .filter((key) => isNaN(Number(key)))
- .map((key) => `${key} => ${TaskCost[key as keyof typeof TaskCost]}`)}`;
- }
},
}
);
diff --git a/x-pack/plugins/task_manager/server/task_claimers/index.ts b/x-pack/plugins/task_manager/server/task_claimers/index.ts
index 134c72041f96f..1caa6e2addb0f 100644
--- a/x-pack/plugins/task_manager/server/task_claimers/index.ts
+++ b/x-pack/plugins/task_manager/server/task_claimers/index.ts
@@ -37,7 +37,6 @@ export interface ClaimOwnershipResult {
tasksUpdated: number;
tasksConflicted: number;
tasksClaimed: number;
- tasksLeftUnclaimed?: number;
};
docs: ConcreteTaskInstance[];
timing?: TaskTiming;
@@ -62,12 +61,13 @@ export function getTaskClaimer(logger: Logger, strategy: string): TaskClaimerFn
return claimAvailableTasksDefault;
}
-export function getEmptyClaimOwnershipResult(): ClaimOwnershipResult {
+export function getEmptyClaimOwnershipResult() {
return {
stats: {
tasksUpdated: 0,
tasksConflicted: 0,
tasksClaimed: 0,
+ tasksRejected: 0,
},
docs: [],
};
diff --git a/x-pack/plugins/task_manager/server/task_claimers/strategy_default.test.ts b/x-pack/plugins/task_manager/server/task_claimers/strategy_default.test.ts
index d58fd83486efa..8aa206bbe1872 100644
--- a/x-pack/plugins/task_manager/server/task_claimers/strategy_default.test.ts
+++ b/x-pack/plugins/task_manager/server/task_claimers/strategy_default.test.ts
@@ -133,7 +133,7 @@ describe('TaskClaiming', () => {
excludedTaskTypes,
unusedTypes: unusedTaskTypes,
maxAttempts: taskClaimingOpts.maxAttempts ?? 2,
- getAvailableCapacity: taskClaimingOpts.getAvailableCapacity ?? (() => 10),
+ getCapacity: taskClaimingOpts.getCapacity ?? (() => 10),
taskPartitioner,
...taskClaimingOpts,
});
@@ -158,7 +158,7 @@ describe('TaskClaiming', () => {
excludedTaskTypes?: string[];
unusedTaskTypes?: string[];
}) {
- const getCapacity = taskClaimingOpts.getAvailableCapacity ?? (() => 10);
+ const getCapacity = taskClaimingOpts.getCapacity ?? (() => 10);
const { taskClaiming, store } = initialiseTestClaiming({
storeOpts,
taskClaimingOpts,
@@ -447,7 +447,7 @@ if (doc['task.runAt'].size()!=0) {
},
taskClaimingOpts: {
maxAttempts,
- getAvailableCapacity: (type) => {
+ getCapacity: (type) => {
switch (type) {
case 'limitedToOne':
case 'anotherLimitedToOne':
@@ -577,7 +577,7 @@ if (doc['task.runAt'].size()!=0) {
},
taskClaimingOpts: {
maxAttempts,
- getAvailableCapacity: (type) => {
+ getCapacity: (type) => {
switch (type) {
case 'limitedToTwo':
return 2;
@@ -686,7 +686,7 @@ if (doc['task.runAt'].size()!=0) {
},
taskClaimingOpts: {
maxAttempts,
- getAvailableCapacity: (type) => {
+ getCapacity: (type) => {
switch (type) {
case 'limitedToOne':
case 'anotherLimitedToOne':
@@ -1139,7 +1139,7 @@ if (doc['task.runAt'].size()!=0) {
storeOpts: {
taskManagerId,
},
- taskClaimingOpts: { getAvailableCapacity: () => maxDocs },
+ taskClaimingOpts: { getCapacity: () => maxDocs },
claimingOpts: {
claimOwnershipUntil,
},
@@ -1219,9 +1219,9 @@ if (doc['task.runAt'].size()!=0) {
function instantiateStoreWithMockedApiResponses({
taskManagerId = uuidv4(),
definitions = taskDefinitions,
- getAvailableCapacity = () => 10,
+ getCapacity = () => 10,
tasksClaimed,
- }: Partial> & {
+ }: Partial> & {
taskManagerId?: string;
tasksClaimed?: ConcreteTaskInstance[][];
} = {}) {
@@ -1254,7 +1254,7 @@ if (doc['task.runAt'].size()!=0) {
unusedTypes: [],
taskStore,
maxAttempts: 2,
- getAvailableCapacity,
+ getCapacity,
taskPartitioner,
});
diff --git a/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.test.ts b/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.test.ts
index 2c4b5fd6a96c6..b58ea02893c10 100644
--- a/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.test.ts
+++ b/x-pack/plugins/task_manager/server/task_claimers/strategy_mget.test.ts
@@ -15,11 +15,10 @@ import {
ConcreteTaskInstance,
ConcreteTaskInstanceVersion,
TaskPriority,
- TaskCost,
} from '../task';
import { SearchOpts, StoreOpts } from '../task_store';
import { asTaskClaimEvent, TaskEvent } from '../task_events';
-import { asOk, asErr, isOk, unwrap } from '../lib/result_type';
+import { asOk, isOk, unwrap } from '../lib/result_type';
import { TaskTypeDictionary } from '../task_type_dictionary';
import { mockLogger } from '../test_utils';
import {
@@ -34,7 +33,6 @@ import apm from 'elastic-apm-node';
import { TASK_MANAGER_TRANSACTION_TYPE } from '../task_running';
import { ClaimOwnershipResult } from '.';
import { FillPoolResult } from '../lib/fill_pool';
-import { SavedObjectsErrorHelpers } from '@kbn/core/server';
import { TaskPartitioner } from '../lib/task_partitioner';
import type { MustNotCondition } from '../queries/query_clauses';
import {
@@ -54,7 +52,6 @@ jest.mock('../constants', () => ({
'anotherLimitedToOne',
'limitedToTwo',
'limitedToFive',
- 'yawn',
],
}));
@@ -77,18 +74,14 @@ const taskDefinitions = new TaskTypeDictionary(taskManagerLogger);
taskDefinitions.registerTaskDefinitions({
report: {
title: 'report',
- cost: TaskCost.Normal,
createTaskRunner: jest.fn(),
},
dernstraight: {
title: 'dernstraight',
- cost: TaskCost.ExtraLarge,
createTaskRunner: jest.fn(),
},
yawn: {
title: 'yawn',
- cost: TaskCost.Tiny,
- maxConcurrency: 1,
createTaskRunner: jest.fn(),
},
});
@@ -117,17 +110,6 @@ describe('TaskClaiming', () => {
});
describe('claimAvailableTasks', () => {
- function getVersionMapsFromTasks(tasks: ConcreteTaskInstance[]) {
- const versionMap = new Map();
- const docLatestVersions = new Map();
- for (const task of tasks) {
- versionMap.set(task.id, { esId: task.id, seqNo: 32, primaryTerm: 32 });
- docLatestVersions.set(`task:${task.id}`, { esId: task.id, seqNo: 32, primaryTerm: 32 });
- }
-
- return { versionMap, docLatestVersions };
- }
-
function initialiseTestClaiming({
storeOpts = {},
taskClaimingOpts = {},
@@ -148,27 +130,20 @@ describe('TaskClaiming', () => {
store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
if (hits == null) hits = [generateFakeTasks(1)];
-
- const docVersion = [];
if (versionMaps == null) {
- versionMaps = [];
+ versionMaps = [new Map()];
for (const oneHit of hits) {
const map = new Map();
- const mapWithTaskPrefix = new Map();
+ versionMaps.push(map);
for (const task of oneHit) {
map.set(task.id, { esId: task.id, seqNo: 32, primaryTerm: 32 });
- mapWithTaskPrefix.set(`task:${task.id}`, { esId: task.id, seqNo: 32, primaryTerm: 32 });
}
- versionMaps.push(map);
- docVersion.push(mapWithTaskPrefix);
}
}
for (let i = 0; i < hits.length; i++) {
store.fetch.mockResolvedValueOnce({ docs: hits[i], versionMap: versionMaps[i] });
- store.getDocVersions.mockResolvedValueOnce(docVersion[i]);
- const oneBulkGetResult = hits[i].map((hit) => asOk(hit));
- store.bulkGet.mockResolvedValueOnce(oneBulkGetResult);
+ store.getDocVersions.mockResolvedValueOnce(versionMaps[i]);
const oneBulkResult = hits[i].map((hit) => asOk(hit));
store.bulkUpdate.mockResolvedValueOnce(oneBulkResult);
}
@@ -181,7 +156,7 @@ describe('TaskClaiming', () => {
excludedTaskTypes,
unusedTypes: unusedTaskTypes,
maxAttempts: taskClaimingOpts.maxAttempts ?? 2,
- getAvailableCapacity: taskClaimingOpts.getAvailableCapacity ?? (() => 10),
+ getCapacity: taskClaimingOpts.getCapacity ?? (() => 10),
taskPartitioner,
...taskClaimingOpts,
});
@@ -228,14 +203,6 @@ describe('TaskClaiming', () => {
return unwrap(resultOrErr) as ClaimOwnershipResult;
});
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(store.fetch.mock.calls).toMatchObject({});
- expect(store.getDocVersions.mock.calls).toMatchObject({});
return results.map((result, index) => ({
result,
args: {
@@ -322,1250 +289,8 @@ describe('TaskClaiming', () => {
expect(result).toMatchObject({});
});
- test('should limit claimed tasks based on task cost and available capacity', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }), // total cost = 2
- mockInstance({ id: `id-2`, taskType: 'report' }), // total cost = 4
- mockInstance({ id: `id-3`, taskType: 'yawn' }), // total cost = 5
- mockInstance({ id: `id-4`, taskType: 'dernstraight' }), // claiming this will exceed the available capacity
- mockInstance({ id: `id-5`, taskType: 'report' }),
- mockInstance({ id: `id-6`, taskType: 'report' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
-
- store.bulkGet.mockResolvedValueOnce(
- [fetchedTasks[0], fetchedTasks[1], fetchedTasks[2]].map(asOk)
- );
- store.bulkUpdate.mockResolvedValueOnce(
- [fetchedTasks[0], fetchedTasks[1], fetchedTasks[2]].map(asOk)
- );
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: [],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 3; stale: 0; conflicts: 0; missing: 0; capacity reached: 3; updateErrors: 0; removed: 0;',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith([
- 'task:id-1',
- 'task:id-2',
- 'task:id-3',
- 'task:id-4',
- 'task:id-5',
- 'task:id-6',
- ]);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(1);
- expect(store.bulkUpdate).toHaveBeenCalledWith(
- [
- {
- ...fetchedTasks[0],
- ownerId: 'test-test',
- retryAt: fetchedTasks[0].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[1],
- ownerId: 'test-test',
- retryAt: fetchedTasks[1].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkGet).toHaveBeenCalledWith(['id-1', 'id-2', 'id-3']);
-
- expect(result.stats).toEqual({
- tasksClaimed: 3,
- tasksConflicted: 0,
- tasksUpdated: 3,
- tasksLeftUnclaimed: 3,
- });
- expect(result.docs.length).toEqual(3);
- });
-
- test('should not claim tasks of removed type', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
-
- store.bulkGet.mockResolvedValueOnce([fetchedTasks[2]].map(asOk));
- store.bulkUpdate.mockResolvedValueOnce([fetchedTasks[2]].map(asOk));
- store.bulkUpdate.mockResolvedValueOnce([fetchedTasks[0], fetchedTasks[1]].map(asOk));
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: ['report'],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 1; stale: 0; conflicts: 0; missing: 0; capacity reached: 0; updateErrors: 0; removed: 2;',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith(['task:id-1', 'task:id-2', 'task:id-3']);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(2);
- expect(store.bulkUpdate).toHaveBeenNthCalledWith(
- 1,
- [
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkUpdate).toHaveBeenNthCalledWith(
- 2,
- [
- {
- ...fetchedTasks[0],
- status: 'unrecognized',
- },
- {
- ...fetchedTasks[1],
- status: 'unrecognized',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkGet).toHaveBeenCalledWith(['id-3']);
-
- expect(result.stats).toEqual({
- tasksClaimed: 1,
- tasksConflicted: 0,
- tasksUpdated: 1,
- tasksLeftUnclaimed: 0,
- });
- expect(result.docs.length).toEqual(1);
- });
-
- test('should log warning if error updating single removed task as unrecognized', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
-
- store.bulkGet.mockResolvedValueOnce([fetchedTasks[2]].map(asOk));
- store.bulkUpdate.mockResolvedValueOnce([fetchedTasks[2]].map(asOk));
- store.bulkUpdate.mockResolvedValueOnce([
- asOk(fetchedTasks[0]),
- // @ts-expect-error
- asErr({
- type: 'task',
- id: fetchedTasks[1].id,
- error: SavedObjectsErrorHelpers.createBadRequestError(),
- }),
- ]);
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: ['report'],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.warn).toHaveBeenCalledWith(
- 'Error updating task id-2:task to mark as unrecognized during claim: Bad Request',
- { tags: ['claimAvailableTasksMget'] }
- );
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 1; stale: 0; conflicts: 0; missing: 0; capacity reached: 0; updateErrors: 0; removed: 1;',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith(['task:id-1', 'task:id-2', 'task:id-3']);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(2);
- expect(store.bulkUpdate).toHaveBeenNthCalledWith(
- 1,
- [
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkUpdate).toHaveBeenNthCalledWith(
- 2,
- [
- {
- ...fetchedTasks[0],
- status: 'unrecognized',
- },
- {
- ...fetchedTasks[1],
- status: 'unrecognized',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkGet).toHaveBeenCalledWith(['id-3']);
-
- expect(result.stats).toEqual({
- tasksClaimed: 1,
- tasksConflicted: 0,
- tasksUpdated: 1,
- tasksLeftUnclaimed: 0,
- });
- expect(result.docs.length).toEqual(1);
- });
-
- test('should log warning if error updating all removed tasks as unrecognized', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
-
- store.bulkGet.mockResolvedValueOnce([fetchedTasks[2]].map(asOk));
- store.bulkUpdate.mockResolvedValueOnce([fetchedTasks[2]].map(asOk));
- store.bulkUpdate.mockRejectedValueOnce(new Error('Oh no'));
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: ['report'],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.warn).toHaveBeenCalledWith(
- 'Error updating tasks to mark as unrecognized during claim: Error: Oh no',
- { tags: ['claimAvailableTasksMget'] }
- );
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 1; stale: 0; conflicts: 0; missing: 0; capacity reached: 0; updateErrors: 0; removed: 0;',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith(['task:id-1', 'task:id-2', 'task:id-3']);
- expect(store.bulkGet).toHaveBeenCalledWith(['id-3']);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(2);
- expect(store.bulkUpdate).toHaveBeenNthCalledWith(
- 1,
- [
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkUpdate).toHaveBeenNthCalledWith(
- 2,
- [
- {
- ...fetchedTasks[0],
- status: 'unrecognized',
- },
- {
- ...fetchedTasks[1],
- status: 'unrecognized',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
-
- expect(result.stats).toEqual({
- tasksClaimed: 1,
- tasksConflicted: 0,
- tasksUpdated: 1,
- tasksLeftUnclaimed: 0,
- });
- expect(result.docs.length).toEqual(1);
- });
-
- test('should handle no tasks to claim', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks: ConcreteTaskInstance[] = [];
-
- const { versionMap } = getVersionMapsFromTasks(fetchedTasks);
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: [],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.debug).not.toHaveBeenCalled();
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).not.toHaveBeenCalled();
- expect(store.bulkGet).not.toHaveBeenCalled();
- expect(store.bulkUpdate).not.toHaveBeenCalled();
-
- expect(result.stats).toEqual({
- tasksClaimed: 0,
- tasksConflicted: 0,
- tasksUpdated: 0,
- });
- expect(result.docs.length).toEqual(0);
- });
-
- test('should handle tasks with no search version', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- versionMap.delete('id-1');
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
-
- store.bulkGet.mockResolvedValueOnce([fetchedTasks[1], fetchedTasks[2]].map(asOk));
- store.bulkUpdate.mockResolvedValueOnce([fetchedTasks[1], fetchedTasks[2]].map(asOk));
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: [],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 2; stale: 0; conflicts: 0; missing: 1; capacity reached: 0; updateErrors: 0; removed: 0;',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith(['task:id-1', 'task:id-2', 'task:id-3']);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(1);
- expect(store.bulkUpdate).toHaveBeenCalledWith(
- [
- {
- ...fetchedTasks[1],
- ownerId: 'test-test',
- retryAt: fetchedTasks[1].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkGet).toHaveBeenCalledWith(['id-2', 'id-3']);
-
- expect(result.stats).toEqual({
- tasksClaimed: 2,
- tasksConflicted: 0,
- tasksUpdated: 2,
- tasksLeftUnclaimed: 0,
- });
- expect(result.docs.length).toEqual(2);
- });
-
- test('should handle tasks with no latest version', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- docLatestVersions.delete('task:id-1');
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
-
- store.bulkGet.mockResolvedValueOnce([fetchedTasks[1], fetchedTasks[2]].map(asOk));
- store.bulkUpdate.mockResolvedValueOnce([fetchedTasks[1], fetchedTasks[2]].map(asOk));
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: [],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 2; stale: 0; conflicts: 0; missing: 1; capacity reached: 0; updateErrors: 0; removed: 0;',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith(['task:id-1', 'task:id-2', 'task:id-3']);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(1);
- expect(store.bulkUpdate).toHaveBeenCalledWith(
- [
- {
- ...fetchedTasks[1],
- ownerId: 'test-test',
- retryAt: fetchedTasks[1].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkGet).toHaveBeenCalledWith(['id-2', 'id-3']);
-
- expect(result.stats).toEqual({
- tasksClaimed: 2,
- tasksConflicted: 0,
- tasksUpdated: 2,
- tasksLeftUnclaimed: 0,
- });
- expect(result.docs.length).toEqual(2);
- });
-
- test('should handle stale tasks', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- docLatestVersions.set('task:id-1', { esId: 'task:id-1', seqNo: 33, primaryTerm: 33 });
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
-
- store.bulkGet.mockResolvedValueOnce([fetchedTasks[1], fetchedTasks[2]].map(asOk));
- store.bulkUpdate.mockResolvedValueOnce([fetchedTasks[1], fetchedTasks[2]].map(asOk));
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: [],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 2; stale: 1; conflicts: 1; missing: 0; capacity reached: 0; updateErrors: 0; removed: 0;',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith(['task:id-1', 'task:id-2', 'task:id-3']);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(1);
- expect(store.bulkUpdate).toHaveBeenCalledWith(
- [
- {
- ...fetchedTasks[1],
- ownerId: 'test-test',
- retryAt: fetchedTasks[1].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkGet).toHaveBeenCalledWith(['id-2', 'id-3']);
-
- expect(result.stats).toEqual({
- tasksClaimed: 2,
- tasksConflicted: 1,
- tasksUpdated: 2,
- tasksLeftUnclaimed: 0,
- });
- expect(result.docs.length).toEqual(2);
- });
-
- test('should correctly handle limited concurrency tasks', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- mockInstance({ id: `id-4`, taskType: 'yawn' }),
- mockInstance({ id: `id-5`, taskType: 'report' }),
- mockInstance({ id: `id-6`, taskType: 'yawn' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
-
- store.bulkGet.mockResolvedValueOnce(
- [fetchedTasks[0], fetchedTasks[1], fetchedTasks[2], fetchedTasks[4]].map(asOk)
- );
- store.bulkUpdate.mockResolvedValueOnce(
- [fetchedTasks[0], fetchedTasks[1], fetchedTasks[2], fetchedTasks[4]].map(asOk)
- );
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: [],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 4; stale: 0; conflicts: 0; missing: 0; capacity reached: 0; updateErrors: 0; removed: 0;',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith([
- 'task:id-1',
- 'task:id-2',
- 'task:id-3',
- 'task:id-4',
- 'task:id-5',
- 'task:id-6',
- ]);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(1);
- expect(store.bulkUpdate).toHaveBeenCalledWith(
- [
- {
- ...fetchedTasks[0],
- ownerId: 'test-test',
- retryAt: fetchedTasks[1].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[1],
- ownerId: 'test-test',
- retryAt: fetchedTasks[1].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[4],
- ownerId: 'test-test',
- retryAt: fetchedTasks[1].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkGet).toHaveBeenCalledWith(['id-1', 'id-2', 'id-3', 'id-5']);
-
- expect(result.stats).toEqual({
- tasksClaimed: 4,
- tasksConflicted: 0,
- tasksUpdated: 4,
- tasksLeftUnclaimed: 0,
- });
- expect(result.docs.length).toEqual(4);
- });
-
- test('should handle individual errors when bulk getting the full task doc', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- mockInstance({ id: `id-4`, taskType: 'report' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
- store.bulkUpdate.mockResolvedValueOnce(
- [fetchedTasks[0], fetchedTasks[1], fetchedTasks[2], fetchedTasks[3]].map(asOk)
- );
- store.bulkGet.mockResolvedValueOnce([
- asOk(fetchedTasks[0]),
- // @ts-expect-error
- asErr({
- type: 'task',
- id: fetchedTasks[1].id,
- error: new Error('Oh no'),
- }),
- asOk(fetchedTasks[2]),
- asOk(fetchedTasks[3]),
- ]);
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: [],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 3; stale: 0; conflicts: 0; missing: 0; capacity reached: 0; updateErrors: 0; removed: 0;',
- { tags: ['claimAvailableTasksMget'] }
- );
- expect(taskManagerLogger.warn).toHaveBeenCalledWith(
- 'Error getting full task id-2:task during claim: Oh no',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith([
- 'task:id-1',
- 'task:id-2',
- 'task:id-3',
- 'task:id-4',
- ]);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(1);
- expect(store.bulkUpdate).toHaveBeenCalledWith(
- [
- {
- ...fetchedTasks[0],
- ownerId: 'test-test',
- retryAt: fetchedTasks[0].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[1],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[3],
- ownerId: 'test-test',
- retryAt: fetchedTasks[3].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkGet).toHaveBeenCalledWith(['id-1', 'id-2', 'id-3', 'id-4']);
-
- expect(result.stats).toEqual({
- tasksClaimed: 3,
- tasksConflicted: 0,
- tasksUpdated: 3,
- tasksLeftUnclaimed: 0,
- });
- expect(result.docs.length).toEqual(3);
- });
-
- test('should handle error when bulk getting all full task docs', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- mockInstance({ id: `id-4`, taskType: 'report' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
- store.bulkUpdate.mockResolvedValueOnce(
- [fetchedTasks[0], fetchedTasks[1], fetchedTasks[2], fetchedTasks[3]].map(asOk)
- );
- store.bulkGet.mockRejectedValueOnce(new Error('oh no'));
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: [],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk(resultOrErr)) {
- expect(resultOrErr).toBe(undefined);
- }
-
- const result = unwrap(resultOrErr) as ClaimOwnershipResult;
-
- expect(apm.startTransaction).toHaveBeenCalledWith(
- TASK_MANAGER_MARK_AS_CLAIMED,
- TASK_MANAGER_TRANSACTION_TYPE
- );
- expect(mockApmTrans.end).toHaveBeenCalledWith('success');
-
- expect(taskManagerLogger.debug).toHaveBeenCalledWith(
- 'task claimer claimed: 0; stale: 0; conflicts: 0; missing: 0; capacity reached: 0; updateErrors: 0; removed: 0;',
- { tags: ['claimAvailableTasksMget'] }
- );
- expect(taskManagerLogger.warn).toHaveBeenCalledWith(
- 'Error getting full task documents during claim: Error: oh no',
- { tags: ['claimAvailableTasksMget'] }
- );
-
- expect(store.fetch.mock.calls[0][0]).toMatchObject({ size: 40, seq_no_primary_term: true });
- expect(store.getDocVersions).toHaveBeenCalledWith([
- 'task:id-1',
- 'task:id-2',
- 'task:id-3',
- 'task:id-4',
- ]);
- expect(store.bulkUpdate).toHaveBeenCalledTimes(1);
- expect(store.bulkUpdate).toHaveBeenCalledWith(
- [
- {
- ...fetchedTasks[0],
- ownerId: 'test-test',
- retryAt: fetchedTasks[0].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[1],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[2],
- ownerId: 'test-test',
- retryAt: fetchedTasks[2].runAt,
- status: 'claiming',
- },
- {
- ...fetchedTasks[3],
- ownerId: 'test-test',
- retryAt: fetchedTasks[3].runAt,
- status: 'claiming',
- },
- ],
- { validate: false, excludeLargeFields: true }
- );
- expect(store.bulkGet).toHaveBeenCalledWith(['id-1', 'id-2', 'id-3', 'id-4']);
-
- expect(result.stats).toEqual({
- tasksClaimed: 0,
- tasksConflicted: 0,
- tasksUpdated: 0,
- tasksLeftUnclaimed: 0,
- });
- expect(result.docs.length).toEqual(0);
- });
-
- test('should handle individual errors when bulk updating the task doc', async () => {
- const store = taskStoreMock.create({ taskManagerId: 'test-test' });
- store.convertToSavedObjectIds.mockImplementation((ids) => ids.map((id) => `task:${id}`));
-
- const fetchedTasks = [
- mockInstance({ id: `id-1`, taskType: 'report' }),
- mockInstance({ id: `id-2`, taskType: 'report' }),
- mockInstance({ id: `id-3`, taskType: 'yawn' }),
- mockInstance({ id: `id-4`, taskType: 'report' }),
- ];
-
- const { versionMap, docLatestVersions } = getVersionMapsFromTasks(fetchedTasks);
- store.fetch.mockResolvedValueOnce({ docs: fetchedTasks, versionMap });
- store.getDocVersions.mockResolvedValueOnce(docLatestVersions);
- store.bulkUpdate.mockResolvedValueOnce([
- asOk(fetchedTasks[0]),
- // @ts-expect-error
- asErr({
- type: 'task',
- id: fetchedTasks[1].id,
- error: new Error('Oh no'),
- }),
- asOk(fetchedTasks[2]),
- asOk(fetchedTasks[3]),
- ]);
- store.bulkGet.mockResolvedValueOnce([
- asOk(fetchedTasks[0]),
- asOk(fetchedTasks[2]),
- asOk(fetchedTasks[3]),
- ]);
-
- const taskClaiming = new TaskClaiming({
- logger: taskManagerLogger,
- strategy: CLAIM_STRATEGY_MGET,
- definitions: taskDefinitions,
- taskStore: store,
- excludedTaskTypes: [],
- unusedTypes: [],
- maxAttempts: 2,
- getAvailableCapacity: () => 10,
- taskPartitioner,
- });
-
- const [resultOrErr] = await getAllAsPromise(
- taskClaiming.claimAvailableTasksIfCapacityIsAvailable({ claimOwnershipUntil: new Date() })
- );
-
- if (!isOk