diff --git a/api/console-sm/console-sm-openapi.yaml b/api/administration-sm/administration-sm-openapi.yaml
similarity index 98%
rename from api/console-sm/console-sm-openapi.yaml
rename to api/administration-sm/administration-sm-openapi.yaml
index dd81c4cb7d1..4e5c73fef9f 100644
--- a/api/console-sm/console-sm-openapi.yaml
+++ b/api/administration-sm/administration-sm-openapi.yaml
@@ -155,8 +155,8 @@ components:
scheme: bearer
bearerFormat: JWT
info:
- title: Console SM Admin API
- description: Access the administration API of Console SM.
+ title: Administration API (Self-Managed)
+ description: Access the administration API of Console Self-Managed.
version: 1.0.0
contact:
url: https://www.camunda.com
diff --git a/api/console-sm/generation-strategy.js b/api/administration-sm/generation-strategy.js
similarity index 56%
rename from api/console-sm/generation-strategy.js
rename to api/administration-sm/generation-strategy.js
index 2ebe0da279d..3d0908043a2 100644
--- a/api/console-sm/generation-strategy.js
+++ b/api/administration-sm/generation-strategy.js
@@ -1,15 +1,17 @@
const { makeServerDynamic } = require("../make-server-dynamic");
const removeDuplicateVersionBadge = require("../remove-duplicate-version-badge");
-const outputDir = "docs/apis-tools/console-sm-api/specifications";
-const specFile = "api/console-sm/console-sm-openapi.yaml";
+const outputDir = "docs/apis-tools/administration-sm-api/specifications";
+const specFile = "api/administration-sm/administration-sm-openapi.yaml";
function preGenerateDocs() {
makeServerDynamic(specFile);
}
function postGenerateDocs() {
- removeDuplicateVersionBadge(`${outputDir}/console-sm-admin-api.info.mdx`);
+ removeDuplicateVersionBadge(
+ `${outputDir}/administration-api-self-managed.info.mdx`
+ );
}
module.exports = {
diff --git a/api/camunda/camunda-openapi.yaml b/api/camunda/camunda-openapi.yaml
index 8c6e3c1dc14..18390c8a3f3 100644
--- a/api/camunda/camunda-openapi.yaml
+++ b/api/camunda/camunda-openapi.yaml
@@ -49,6 +49,7 @@ tags:
- name: Signal
- name: Tenant
- name: User
+ - name: Usage metrics
- name: User task
- name: Variable
@@ -100,6 +101,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/CamundaUser"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/CamundaUserNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/CamundaUser"
"401":
$ref: "#/components/responses/Unauthorized"
"500":
@@ -126,6 +133,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/JobActivationResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/JobActivationResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/JobActivationResponse"
"400":
description: >
The provided data is not valid.
@@ -385,6 +398,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/TenantCreateResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/TenantCreateResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/TenantCreateResponse"
"400":
description: The provided data is not valid.
content:
@@ -430,6 +449,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/TenantUpdateResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/TenantUpdateResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/TenantUpdateResponse"
"400":
description: The provided data is not valid.
content:
@@ -468,6 +493,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/TenantItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/TenantItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/TenantItem"
"400":
description: The provided data is not valid.
content:
@@ -790,6 +821,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/TenantSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/TenantSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/TenantSearchQueryResponse"
"400":
description: The provided data is not valid.
content:
@@ -932,6 +969,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/UserTaskItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/UserTaskItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/UserTaskItem"
"400":
description: >
The provided data is not valid.
@@ -1027,6 +1070,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/FormItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/FormItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/FormItem"
"204":
description: >
The user task was found, but no form is associated with it.
@@ -1113,6 +1162,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/UserTaskSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/UserTaskSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/UserTaskSearchQueryResponse"
"400":
description: >
The user task search query failed.
@@ -1158,6 +1213,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/VariableSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/VariableSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/VariableSearchQueryResponse"
"400":
description: >
The user task variables search query failed.
@@ -1191,6 +1252,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/VariableSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/VariableSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/VariableSearchQueryResponse"
"400":
description: >
The user task search query failed.
@@ -1230,6 +1297,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/VariableItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/VariableItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/VariableItem"
"400":
description: "Bad request"
content:
@@ -1326,6 +1399,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/ProcessDefinitionSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/ProcessDefinitionSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/ProcessDefinitionSearchQueryResponse"
"400":
description: >
The process definition search query failed.
@@ -1365,6 +1444,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/ProcessDefinitionItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/ProcessDefinitionItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/ProcessDefinitionItem"
"400":
description: >
The process definition request failed.
@@ -1468,6 +1553,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/FormItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/FormItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/FormItem"
"204":
description: >
The process was found, but no form is associated with it.
@@ -1528,6 +1619,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/CreateProcessInstanceResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/CreateProcessInstanceResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/CreateProcessInstanceResponse"
"400":
description: The provided data is not valid.
"500":
@@ -1556,6 +1653,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/ProcessInstanceItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/ProcessInstanceItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/ProcessInstanceItem"
"400":
description: The provided data is not valid.
content:
@@ -1596,6 +1699,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/ProcessInstanceSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/ProcessInstanceSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/ProcessInstanceSearchQueryResponse"
"400":
description: >
The process instance search query failed.
@@ -1763,6 +1872,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/FlowNodeInstanceSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/FlowNodeInstanceSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/FlowNodeInstanceSearchQueryResponse"
"400":
description: >
The Flow node instance search query failed.
@@ -1802,6 +1917,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/FlowNodeInstanceItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/FlowNodeInstanceItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/FlowNodeInstanceItem"
"400":
description: >
The flow node instance request failed.
@@ -1847,6 +1968,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/DecisionDefinitionSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/DecisionDefinitionSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/DecisionDefinitionSearchQueryResponse"
"400":
description: >
The decision definition search query failed.
@@ -1886,6 +2013,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/DecisionDefinitionItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/DecisionDefinitionItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/DecisionDefinitionItem"
"400":
description: >
The decision definition request failed.
@@ -1978,6 +2111,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/DecisionRequirementsSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/DecisionRequirementsSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/DecisionRequirementsSearchQueryResponse"
"400":
description: >
The search query failed.
@@ -2017,6 +2156,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/DecisionRequirementsItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/DecisionRequirementsItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/DecisionRequirementsItem"
"400":
description: >
The decision requirements request failed.
@@ -2109,6 +2254,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/DecisionInstanceSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/DecisionInstanceSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/DecisionInstanceSearchQueryResponse"
"400":
description: >
The decision instance search query failed.
@@ -2205,6 +2356,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/EvaluateDecisionResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/EvaluateDecisionResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/EvaluateDecisionResponse"
"400":
description: The provided data is not valid.
content:
@@ -2291,6 +2448,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/AuthorizationSearchResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/AuthorizationSearchResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/AuthorizationSearchResponse"
"400":
description: >
The authorization search query failed.
@@ -2327,6 +2490,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/RoleCreateResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/RoleCreateResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/RoleCreateResponse"
"400":
description: |
The role could not be created.
@@ -2364,6 +2533,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/RoleItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/RoleItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/RoleItem"
"401":
$ref: "#/components/responses/Unauthorized"
"403":
@@ -2464,6 +2639,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/RoleSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/RoleSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/RoleSearchQueryResponse"
"400":
description: >
The role search query failed.
@@ -2501,6 +2682,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/GroupCreateResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/GroupCreateResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/GroupCreateResponse"
"400":
description: |
The group could not be created.
@@ -2538,6 +2725,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/GroupItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/GroupItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/GroupItem"
"401":
$ref: "#/components/responses/Unauthorized"
"403":
@@ -2731,6 +2924,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/GroupSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/GroupSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/GroupSearchQueryResponse"
"400":
description: >
The group search query failed.
@@ -2770,6 +2969,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/MappingRuleCreateResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/MappingRuleCreateResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/MappingRuleCreateResponse"
"400":
description: The mapping rule could not be created.
content:
@@ -2841,7 +3046,13 @@ paths:
content:
application/json:
schema:
- $ref: "#/components/schemas/MappingSearchResponse"
+ $ref: "#/components/schemas/MappingSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/MappingSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/MappingSearchQueryResponse"
"400":
description: >
The mapping rule search query failed.
@@ -2881,6 +3092,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/MessagePublicationResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/MessagePublicationResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/MessagePublicationResponse"
"400":
description: The provided data is not valid.
content:
@@ -2912,6 +3129,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/MessageCorrelationResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/MessageCorrelationResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/MessageCorrelationResponse"
"400":
description: The provided data is not valid
content:
@@ -3146,6 +3369,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/UserCreateResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/UserCreateResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/UserCreateResponse"
"400":
description: |
Unable to create the user.
@@ -3189,6 +3418,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/UserSearchResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/UserSearchResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/UserSearchResponse"
"400":
description: >
The user search query failed.
@@ -3233,6 +3468,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/AuthorizationSearchResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/AuthorizationSearchResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/AuthorizationSearchResponse"
"400":
description: >
The user authorization search query failed.
@@ -3304,6 +3545,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/IncidentSearchQueryResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/IncidentSearchQueryResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/IncidentSearchQueryResponse"
"400":
description: >
The incident search query failed.
@@ -3339,6 +3586,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/IncidentItem"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/IncidentItemNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/IncidentItem"
"400":
description: >
The incident request failed.
@@ -3361,7 +3614,50 @@ paths:
$ref: "#/components/schemas/ProblemDetail"
"500":
$ref: "#/components/responses/InternalServerError"
-
+ /usage-metrics:
+ get:
+ tags:
+ - Usage metrics
+ operationId: getUsageMetrics
+ summary: Get usage metrics
+ description: Retrieve the usage metrics by given start and end date.
+ parameters:
+ - name: startTime
+ in: query
+ required: true
+ description: The start date for usage metrics, including this date.
+ schema:
+ type: string
+ format: date-time
+ - name: endTime
+ in: query
+ required: true
+ description: The end date for usage metrics, including this date.
+ schema:
+ type: string
+ format: date-time
+ responses:
+ "200":
+ description: >
+ The usage metrics search result.
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/UsageMetricsResponse"
+ "400":
+ description: >
+ The usage metrics request failed.
+ More details are provided in the response body.
+ content:
+ application/problem+json:
+ schema:
+ $ref: "#/components/schemas/ProblemDetail"
+ "401":
+ $ref: "#/components/responses/Unauthorized"
+ "403":
+ $ref: "#/components/responses/Forbidden"
+ "500":
+ $ref: "#/components/responses/InternalServerError"
/deployments:
post:
tags:
@@ -3397,6 +3693,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/DeploymentResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/DeploymentResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/DeploymentResponse"
"400":
description: >
The document upload failed. More details are provided in the response body.
@@ -3507,6 +3809,12 @@ paths:
application/json:
schema:
$ref: "#/components/schemas/SignalBroadcastResponse"
+ application/vnd.camunda.api.keys.number+json:
+ schema:
+ $ref: "#/components/schemas/SignalBroadcastResponseNumberKeys"
+ application/vnd.camunda.api.keys.string+json:
+ schema:
+ $ref: "#/components/schemas/SignalBroadcastResponse"
"400":
description: The provided data is not valid.
content:
@@ -3536,13 +3844,20 @@ components:
required:
- tenantId
+ TenantCreateResponseNumberKeys:
+ deprecated: true
+ type: object
+ properties:
+ tenantKey:
+ description: The external key of the created tenant
+ type: integer
+ format: int64
TenantCreateResponse:
- type: "object"
+ type: object
properties:
tenantKey:
description: The external key of the created tenant
- type: "integer"
- format: "int64"
+ type: string
TenantUpdateRequest:
type: object
@@ -3553,40 +3868,76 @@ components:
required:
- name
- TenantUpdateResponse:
+ TenantUpdateResponseBase:
+ description: Base properties for TenantUpdateResponse.
type: object
properties:
- tenantKey:
- type: integer
- description: The unique system-generated internal tenant ID
- format: int64
tenantId:
type: string
description: The unique external tenant ID
name:
type: string
description: The name of the tenant.
-
- TenantItem:
- description: Tenant search response item.
+ TenantUpdateResponseNumberKeys:
+ deprecated: true
type: object
+ allOf:
+ - $ref: "#/components/schemas/TenantUpdateResponseBase"
properties:
tenantKey:
type: integer
description: The unique system-generated internal tenant ID.
format: int64
+ TenantUpdateResponse:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/TenantUpdateResponseBase"
+ properties:
+ tenantKey:
+ type: string
+ description: The unique system-generated internal tenant ID.
+
+ TenantItemBase:
+ description: Base properties for TenantItem.
+ type: object
+ properties:
name:
type: string
description: The tenant name.
tenantId:
type: string
description: The unique external tenant ID.
+ TenantItemNumberKeys:
+ description: Tenant search response item. Key attributes as numeric values.
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/TenantItemBase"
+ properties:
+ tenantKey:
+ type: integer
+ description: The unique system-generated internal tenant ID.
+ format: int64
assignedMemberKeys:
type: array
description: The set of keys of members assigned to the tenant.
items:
type: integer
format: int64
+ TenantItem:
+ description: Tenant search response item.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/TenantItemBase"
+ properties:
+ tenantKey:
+ type: string
+ description: The unique system-generated internal tenant ID.
+ assignedMemberKeys:
+ type: array
+ description: The set of keys of members assigned to the tenant.
+ items:
+ type: string
TenantSearchQueryRequest:
description: Tenant search request
@@ -3610,44 +3961,29 @@ components:
type: string
description: The name of the tenant.
- TenantSearchQueryResponse:
- description: Tenant search response.
+ TenantSearchQueryResponseNumberKeys:
+ description: Tenant search response. Key attributes as numeric values.
+ deprecated: true
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
- type: object
properties:
items:
description: The matching tenants.
type: array
items:
- $ref: "#/components/schemas/TenantItem"
-
- MappingItem:
- description: Mapping rule search response item.
+ $ref: "#/components/schemas/TenantItemNumberKeys"
+ TenantSearchQueryResponse:
+ description: Tenant search response.
type: object
- properties:
- mappingKey:
- type: integer
- description: The unique system-generated internal mapping ID.
- format: int64
- claimName:
- type: string
- description: The claim name to match against a token.
- claimValue:
- type: string
- description: The value of the claim to match.
-
- MappingSearchQueryResponse:
- description: Mapping rule search response.
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
- type: object
properties:
items:
- description: The matching mapping rules.
+ description: The matching tenants.
type: array
items:
- $ref: "#/components/schemas/MappingItem"
+ $ref: "#/components/schemas/TenantItem"
UserTaskSearchQueryRequest:
allOf:
@@ -3664,11 +4000,23 @@ components:
- $ref: "#/components/schemas/SearchQueryRequest"
description: User task search query request.
type: object
- UserTaskSearchQueryResponse:
+ UserTaskSearchQueryResponseNumberKeys:
+ description: User task search query response. Key attributes as numeric values.
+ deprecated: true
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching user tasks.
+ type: array
+ items:
+ $ref: "#/components/schemas/UserTaskItemNumberKeys"
+ UserTaskSearchQueryResponse:
description: User task search query response.
type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
properties:
items:
description: The matching user tasks.
@@ -3747,13 +4095,10 @@ components:
value:
type: string
description: The value of the variable.
- UserTaskItem:
+ UserTaskItemBase:
+ description: Base properties for UserTaskItem.
type: object
properties:
- userTaskKey:
- description: The key of the user task.
- type: integer
- format: int64
name:
type: string
description: The name for this user task.
@@ -3771,10 +4116,6 @@ components:
elementId:
type: string
description: The element ID of the user task.
- elementInstanceKey:
- type: integer
- description: The key of the element instance.
- format: int64
candidateGroups:
type: array
description: The candidate groups for this user task.
@@ -3788,18 +4129,6 @@ components:
processDefinitionId:
type: string
description: The ID of the process definition.
- processDefinitionKey:
- type: integer
- description: The key of the process definition.
- format: int64
- processInstanceKey:
- type: integer
- description: The key of the process instance.
- format: int64
- formKey:
- type: integer
- description: The key of the form.
- format: int64
creationDate:
type: string
description: The creation date of a user task.
@@ -3837,6 +4166,52 @@ components:
minimum: 0
maximum: 100
default: 50
+ UserTaskItemNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/UserTaskItemBase"
+ properties:
+ userTaskKey:
+ description: The key of the user task.
+ type: integer
+ format: int64
+ elementInstanceKey:
+ type: integer
+ description: The key of the element instance.
+ format: int64
+ processDefinitionKey:
+ type: integer
+ description: The key of the process definition.
+ format: int64
+ processInstanceKey:
+ type: integer
+ description: The key of the process instance.
+ format: int64
+ formKey:
+ type: integer
+ description: The key of the form.
+ format: int64
+ UserTaskItem:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/UserTaskItemBase"
+ properties:
+ userTaskKey:
+ description: The key of the user task.
+ type: string
+ elementInstanceKey:
+ type: string
+ description: The key of the element instance.
+ processDefinitionKey:
+ type: string
+ description: The key of the process definition.
+ processInstanceKey:
+ type: string
+ description: The key of the process instance.
+ formKey:
+ type: string
+ description: The key of the form.
VariableSearchQueryRequest:
allOf:
- $ref: "#/components/schemas/SearchQueryRequest"
@@ -3877,25 +4252,33 @@ components:
isTruncated:
description: Whether the value is truncated or not.
type: boolean
- VariableSearchQueryResponse:
+ VariableSearchQueryResponseNumberKeys:
+ description: Variable search query response. Key attributes as numeric values.
+ deprecated: true
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching variables.
+ type: array
+ items:
+ $ref: "#/components/schemas/VariableItemNumberKeys"
+ VariableSearchQueryResponse:
description: Variable search query response.
type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
properties:
items:
description: The matching variables.
type: array
items:
$ref: "#/components/schemas/VariableItem"
- VariableItem:
- description: Variable search response item.
+ VariableItemBase:
+ description: Base properties for VariableItem.
type: object
properties:
- variableKey:
- description: The key for this variable.
- type: integer
- format: int64
name:
description: Name of this variable.
type: string
@@ -3905,6 +4288,23 @@ components:
fullValue:
description: Full value of this variable.
type: string
+ tenantId:
+ description: Tenant ID of this variable.
+ type: string
+ isTruncated:
+ description: Whether the value is truncated or not.
+ type: boolean
+ VariableItemNumberKeys:
+ description: Variable search response item. Key attributes as numeric values.
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/VariableItemBase"
+ properties:
+ variableKey:
+ description: The key for this variable.
+ type: integer
+ format: int64
scopeKey:
description: The key of the scope of this variable.
type: integer
@@ -3913,12 +4313,21 @@ components:
description: The key of the process instance of this variable.
type: integer
format: int64
- tenantId:
- description: Tenant ID of this variable.
+ VariableItem:
+ description: Variable search response item.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/VariableItemBase"
+ properties:
+ variableKey:
+ description: The key for this variable.
+ type: string
+ scopeKey:
+ description: The key of the scope of this variable.
+ type: string
+ processInstanceKey:
+ description: The key of the process instance of this variable.
type: string
- isTruncated:
- description: Whether the value is truncated or not.
- type: boolean
ProcessDefinitionSearchQueryRequest:
allOf:
- $ref: "#/components/schemas/SearchQueryRequest"
@@ -3955,23 +4364,31 @@ components:
tenantId:
description: Tenant ID of this process definition.
type: string
- ProcessDefinitionSearchQueryResponse:
+ ProcessDefinitionSearchQueryResponseNumberKeys:
+ deprecated: true
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching process definitions.
+ type: array
+ items:
+ $ref: "#/components/schemas/ProcessDefinitionItemNumberKeys"
+ ProcessDefinitionSearchQueryResponse:
type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
properties:
items:
description: The matching process definitions.
type: array
items:
$ref: "#/components/schemas/ProcessDefinitionItem"
- ProcessDefinitionItem:
+ ProcessDefinitionItemBase:
+ description: Base properties for ProcessDefinitionItem.
type: object
properties:
- processDefinitionKey:
- description: The key for this process definition.
- type: integer
- format: int64
name:
description: Name of this process definition.
type: string
@@ -3991,6 +4408,24 @@ components:
tenantId:
description: Tenant ID of this process definition.
type: string
+ ProcessDefinitionItemNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/ProcessDefinitionItemBase"
+ properties:
+ processDefinitionKey:
+ description: The key for this process definition.
+ type: integer
+ format: int64
+ ProcessDefinitionItem:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/ProcessDefinitionItemBase"
+ properties:
+ processDefinitionKey:
+ description: The key for this process definition.
+ type: string
ProcessInstanceSearchQueryRequest:
description: Process instance search request.
allOf:
@@ -4276,25 +4711,33 @@ components:
value:
description: The value of the variable.
type: string
+ ProcessInstanceSearchQueryResponseNumberKeys:
+ description: Process instance search response. Key attributes as numeric values.
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching process instances.
+ type: array
+ items:
+ $ref: "#/components/schemas/ProcessInstanceItemNumberKeys"
ProcessInstanceSearchQueryResponse:
description: Process instance search response.
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
- type: object
properties:
items:
description: The matching process instances.
type: array
items:
$ref: "#/components/schemas/ProcessInstanceItem"
- ProcessInstanceItem:
- description: Process instance search response item.
+ ProcessInstanceItemBase:
+ description: Base properties for ProcessInstanceItem.
type: object
properties:
- processInstanceKey:
- type: integer
- description: The key of this process instance.
- format: int64
processDefinitionId:
type: string
description: The process definition ID.
@@ -4308,18 +4751,6 @@ components:
processDefinitionVersionTag:
type: string
description: The process definition version tag.
- processDefinitionKey:
- type: integer
- description: The process definition key.
- format: int64
- parentProcessInstanceKey:
- type: integer
- description: The parent process instance key.
- format: int64
- parentFlowNodeInstanceKey:
- type: integer
- description: The parent flow node instance key.
- format: int64
startDate:
type: string
description: The start date.
@@ -4336,6 +4767,47 @@ components:
tenantId:
type: string
description: The tenant ID.
+ ProcessInstanceItemNumberKeys:
+ description: Process instance search response item. Key attributes as numeric values.
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/ProcessInstanceItemBase"
+ properties:
+ processInstanceKey:
+ type: integer
+ description: The key of this process instance.
+ format: int64
+ processDefinitionKey:
+ type: integer
+ description: The process definition key.
+ format: int64
+ parentProcessInstanceKey:
+ type: integer
+ description: The parent process instance key.
+ format: int64
+ parentFlowNodeInstanceKey:
+ type: integer
+ description: The parent flow node instance key.
+ format: int64
+ ProcessInstanceItem:
+ description: Process instance search response item.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/ProcessInstanceItemBase"
+ properties:
+ processInstanceKey:
+ type: string
+ description: The key of this process instance.
+ processDefinitionKey:
+ type: string
+ description: The process definition key.
+ parentProcessInstanceKey:
+ type: string
+ description: The parent process instance key.
+ parentFlowNodeInstanceKey:
+ type: string
+ description: The parent flow node instance key.
ProcessInstanceStateEnum:
description: The state, one of ACTIVE, COMPLETED, CANCELED.
enum:
@@ -4435,31 +4907,31 @@ components:
tenantId:
description: The tenant ID.
type: string
- FlowNodeInstanceSearchQueryResponse:
+ FlowNodeInstanceSearchQueryResponseNumberKeys:
+ deprecated: true
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching flow node instances.
+ type: array
+ items:
+ $ref: "#/components/schemas/FlowNodeInstanceItemNumberKeys"
+ FlowNodeInstanceSearchQueryResponse:
type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
properties:
items:
description: The matching flow node instances.
type: array
items:
$ref: "#/components/schemas/FlowNodeInstanceItem"
- FlowNodeInstanceItem:
+ FlowNodeInstanceItemBase:
+ description: Base properties for FlowNodeInstanceItem.
type: object
properties:
- flowNodeInstanceKey:
- type: integer
- description: The assigned key, which acts as a unique identifier for this flow node instance.
- format: int64
- processInstanceKey:
- description: The process instance key associated to this flow node instance.
- type: integer
- format: int64
- processDefinitionKey:
- description: The process definition key associated to this flow node instance.
- type: integer
- format: int64
processDefinitionId:
description: The process definition ID associated to this flow node instance.
type: string
@@ -4516,12 +4988,47 @@ components:
hasIncident:
description: Shows whether this flow node instance has an incident. If true also an incidentKey is provided.
type: boolean
+ tenantId:
+ description: The tenant ID of the incident.
+ type: string
+ FlowNodeInstanceItemNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/FlowNodeInstanceItemBase"
+ properties:
+ flowNodeInstanceKey:
+ type: integer
+ description: The assigned key, which acts as a unique identifier for this flow node instance.
+ format: int64
+ processInstanceKey:
+ description: The process instance key associated to this flow node instance.
+ type: integer
+ format: int64
+ processDefinitionKey:
+ description: The process definition key associated to this flow node instance.
+ type: integer
+ format: int64
incidentKey:
description: Incident key associated with this flow node instance.
type: integer
format: int64
- tenantId:
- description: The tenant ID of the incident.
+ FlowNodeInstanceItem:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/FlowNodeInstanceItemBase"
+ properties:
+ flowNodeInstanceKey:
+ type: string
+ description: The assigned key, which acts as a unique identifier for this flow node instance.
+ processInstanceKey:
+ description: The process instance key associated to this flow node instance.
+ type: string
+ processDefinitionKey:
+ description: The process definition key associated to this flow node instance.
+ type: string
+ incidentKey:
+ description: Incident key associated with this flow node instance.
type: string
DecisionDefinitionSearchQueryRequest:
allOf:
@@ -4633,34 +5140,34 @@ components:
tenantId:
description: The tenant ID of the incident.
type: string
- IncidentSearchQueryResponse:
+ IncidentSearchQueryResponseNumberKeys:
+ deprecated: true
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching incidents.
+ type: array
+ items:
+ $ref: "#/components/schemas/IncidentItemNumberKeys"
+ IncidentSearchQueryResponse:
type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
properties:
items:
description: The matching incidents.
type: array
items:
$ref: "#/components/schemas/IncidentItem"
- IncidentItem:
+ IncidentItemBase:
+ description: Base properties for IncidentItem.
type: object
properties:
- incidentKey:
- type: integer
- format: int64
- description: The assigned key, which acts as a unique identifier for this incident.
- processDefinitionKey:
- type: integer
- format: int64
- description: The process definition key associated to this incident.
processDefinitionId:
type: string
description: The process definition ID associated to this incident.
- processInstanceKey:
- type: integer
- format: int64
- description: The process instance key associated to this incident.
errorType:
type: string
description: Incident error type with a defined set of values.
@@ -4684,10 +5191,6 @@ components:
flowNodeId:
type: string
description: The flow node ID associated to this incident.
- flowNodeInstanceKey:
- type: integer
- format: int64
- description: The flow node instance key associated to this incident.
creationTime:
type: string
description: Date of incident creation.
@@ -4700,30 +5203,80 @@ components:
- MIGRATED
- RESOLVED
- PENDING
+ tenantId:
+ description: The tenant ID of the incident.
+ type: string
+ IncidentItemNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/IncidentItemBase"
+ properties:
+ incidentKey:
+ type: integer
+ format: int64
+ description: The assigned key, which acts as a unique identifier for this incident.
+ processDefinitionKey:
+ type: integer
+ format: int64
+ description: The process definition key associated to this incident.
+ processInstanceKey:
+ type: integer
+ format: int64
+ description: The process instance key associated to this incident.
+ flowNodeInstanceKey:
+ type: integer
+ format: int64
+ description: The flow node instance key associated to this incident.
jobKey:
type: integer
description: The job key, if exists, associated with this incident.
format: int64
- tenantId:
- description: The tenant ID of the incident.
+ IncidentItem:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/IncidentItemBase"
+ properties:
+ incidentKey:
type: string
- DecisionDefinitionSearchQueryResponse:
+ description: The assigned key, which acts as a unique identifier for this incident.
+ processDefinitionKey:
+ type: string
+ description: The process definition key associated to this incident.
+ processInstanceKey:
+ type: string
+ description: The process instance key associated to this incident.
+ flowNodeInstanceKey:
+ type: string
+ description: The flow node instance key associated to this incident.
+ jobKey:
+ type: string
+ description: The job key, if exists, associated with this incident.
+ DecisionDefinitionSearchQueryResponseNumberKeys:
+ deprecated: true
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching decision definitions.
+ type: array
+ items:
+ $ref: "#/components/schemas/DecisionDefinitionItemNumberKeys"
+ DecisionDefinitionSearchQueryResponse:
type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
properties:
items:
description: The matching decision definitions.
type: array
items:
$ref: "#/components/schemas/DecisionDefinitionItem"
- DecisionDefinitionItem:
+ DecisionDefinitionItemBase:
+ description: Base properties for DecisionDefinitionItem.
type: object
properties:
- decisionDefinitionKey:
- type: integer
- format: int64
- description: The assigned key, which acts as a unique identifier for this decision definition.
decisionDefinitionId:
type: string
description: The DMN ID of the decision definition.
@@ -4737,13 +5290,49 @@ components:
decisionRequirementsId:
type: string
description: the DMN ID of the decision requirements graph that the decision definition is part of.
+ tenantId:
+ type: string
+ description: The tenant ID of the decision definition.
+ DecisionDefinitionItemNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DecisionDefinitionItemBase"
+ properties:
+ decisionDefinitionKey:
+ type: integer
+ format: int64
+ description: The assigned key, which acts as a unique identifier for this decision definition.
decisionRequirementsKey:
type: integer
format: int64
description: The assigned key of the decision requirements graph that the decision definition is part of.
- tenantId:
+ DecisionDefinitionItem:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DecisionDefinitionItemBase"
+ properties:
+ decisionDefinitionKey:
type: string
- description: The tenant ID of the decision definition.
+ description: The assigned key, which acts as a unique identifier for this decision definition.
+ decisionRequirementsKey:
+ type: string
+ description: The assigned key of the decision requirements graph that the decision definition is part of.
+ UsageMetricsResponse:
+ type: object
+ properties:
+ assignees:
+ description: The amount of unique active users.
+ type: integer
+ format: int64
+ processInstances:
+ description: The amount of created root process instances.
+ type: integer
+ format: int64
+ decisionInstances:
+ description: The amount of executed decision instances.
+ type: integer
+ format: int64
PermissionTypeEnum:
description: Specifies the type of permissions.
enum:
@@ -4843,13 +5432,9 @@ components:
description: The id of the owner of permissions.
type: integer
format: int64
- AuthorizationResponse:
+ AuthorizationResponseBase:
type: "object"
properties:
- ownerKey:
- description: The id of the owner of permissions.
- type: integer
- format: int64
ownerType:
description: The type of the owner of permissions.
type: object
@@ -4865,6 +5450,34 @@ components:
description: The permissions.
items:
$ref: "#/components/schemas/PermissionDTO"
+ AuthorizationResponseNumberKeys:
+ type: "object"
+ allOf:
+ - $ref: "#/components/schemas/AuthorizationResponseBase"
+ properties:
+ ownerKey:
+ description: The id of the owner of permissions.
+ type: integer
+ format: int64
+ AuthorizationResponse:
+ type: "object"
+ allOf:
+ - $ref: "#/components/schemas/AuthorizationResponseBase"
+ properties:
+ ownerKey:
+ description: The id of the owner of permissions.
+ type: string
+ AuthorizationSearchResponseNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching authorizations.
+ type: array
+ items:
+ $ref: "#/components/schemas/AuthorizationResponseNumberKeys"
AuthorizationSearchResponse:
type: object
allOf:
@@ -4890,13 +5503,20 @@ components:
email:
description: The email of the user.
type: "string"
+ UserCreateResponseNumberKeys:
+ deprecated: true
+ type: object
+ properties:
+ userKey:
+ description: The key of the created user
+ type: integer
+ format: int64
UserCreateResponse:
- type: "object"
+ type: object
properties:
userKey:
description: The key of the created user
- type: "integer"
- format: "int64"
+ type: string
UserSearchQueryRequest:
allOf:
- $ref: "#/components/schemas/SearchQueryRequest"
@@ -4938,60 +5558,60 @@ components:
claimValue:
type: string
description: The value of the claim to match.
- CamundaUser:
- type: "object"
+ name:
+ type: string
+ description: The name of the mapping.
+ CamundaUserBase:
+ description: Base properties for CamundaUser.
+ type: object
properties:
userId:
description: The ID of the user.
- type: "string"
- userKey:
- description: The system generated key of the user.
- type: "integer"
- format: "int64"
+ type: string
displayName:
description: The display name of the user.
- type: "string"
+ type: string
authorizedApplications:
description: The applications the user is authorized to use.
type: array
items:
- type: "string"
+ type: string
tenants:
description: The tenants the user is a member of.
type: array
items:
- type: "object"
+ type: object
properties:
tenantId:
- type: "string"
+ type: string
description: The ID of the tenant.
name:
- type: "string"
+ type: string
description: The name of the tenant.
groups:
description: The groups assigned to the user.
type: array
items:
- type: "string"
+ type: string
roles:
description: The roles assigned to the user.
type: array
items:
- type: "string"
+ type: string
salesPlanType:
description: The plan of the user.
- type: "string"
+ type: string
c8Links:
description: The links to the components in the C8 stack.
type: array
items:
- type: "object"
+ type: object
properties:
name:
- type: "string"
+ type: string
description: The name of the component.
link:
- type: "string"
+ type: string
description: A link to the component.
canLogout:
description: Flag for understanding if the user is able to perform logout.
@@ -4999,17 +5619,31 @@ components:
apiUser:
description: Flag for understanding if the user is an API user.
type: boolean
- UserResponse:
+ CamundaUserNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/CamundaUserBase"
+ properties:
+ userKey:
+ description: The system generated key of the user.
+ type: integer
+ format: int64
+ CamundaUser:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/CamundaUserBase"
+ properties:
+ userKey:
+ description: The system generated key of the user.
+ type: string
+ UserResponseBase:
type: "object"
properties:
id:
description: The ID of the user.
type: "integer"
format: "int64"
- key:
- description: The key of the user.
- type: "integer"
- format: "int64"
username:
description: The username of the user.
type: "string"
@@ -5019,6 +5653,34 @@ components:
email:
description: The email of the user.
type: "string"
+ UserResponseNumberKeys:
+ type: "object"
+ allOf:
+ - $ref: "#/components/schemas/UserResponseBase"
+ properties:
+ key:
+ description: The key of the user.
+ type: "integer"
+ format: "int64"
+ UserResponse:
+ type: "object"
+ allOf:
+ - $ref: "#/components/schemas/UserResponseBase"
+ properties:
+ key:
+ description: The key of the user.
+ type: "string"
+ UserSearchResponseNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching users.
+ type: array
+ items:
+ $ref: "#/components/schemas/UserResponseNumberKeys"
UserSearchResponse:
type: object
allOf:
@@ -5054,13 +5716,20 @@ components:
name:
type: "string"
description: The display name of the new role.
+ RoleCreateResponseNumberKeys:
+ deprecated: true
+ type: object
+ properties:
+ roleKey:
+ description: The key of the created role.
+ type: integer
+ format: int64
RoleCreateResponse:
- type: "object"
+ type: object
properties:
roleKey:
description: The key of the created role.
- type: "integer"
- format: "int64"
+ type: string
RoleUpdateRequest:
type: object
properties:
@@ -5077,33 +5746,66 @@ components:
name:
type: string
description: The updated display name of the role.
- RoleItem:
- description: Role search response item.
+ RoleItemBase:
+ description: Base properties for RoleItem.
+ type: object
+ properties:
+ name:
+ type: string
+ description: The role name.
+ RoleItemNumberKeys:
+ description: Role search response item. Key attributes as numeric values.
+ deprecated: true
type: object
+ allOf:
+ - $ref: "#/components/schemas/RoleItemBase"
properties:
key:
type: integer
description: The role key.
format: int64
- name:
- type: string
- description: The role name.
assignedMemberKeys:
type: array
description: The set of keys of members assigned to the role.
items:
type: integer
format: int64
+ RoleItem:
+ description: Role search response item.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/RoleItemBase"
+ properties:
+ key:
+ type: string
+ description: The role key.
+ assignedMemberKeys:
+ type: array
+ description: The set of keys of members assigned to the role.
+ items:
+ type: string
RoleSearchQueryRequest:
description: Role search request.
allOf:
- $ref: "#/components/schemas/SearchQueryRequest"
type: object
+ RoleSearchQueryResponseNumberKeys:
+ description: Role search response. Key attributes as numeric values.
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching roles.
+ type: array
+ items:
+ $ref: "#/components/schemas/RoleItemNumberKeys"
RoleSearchQueryResponse:
description: Role search response.
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
- type: object
properties:
items:
description: The matching roles.
@@ -5117,13 +5819,20 @@ components:
name:
type: "string"
description: The display name of the new group.
+ GroupCreateResponseNumberKeys:
+ deprecated: true
+ type: object
+ properties:
+ groupKey:
+ description: The key of the created group.
+ type: integer
+ format: int64
GroupCreateResponse:
- type: "object"
+ type: object
properties:
groupKey:
description: The key of the created group.
- type: "integer"
- format: "int64"
+ type: string
GroupUpdateRequest:
type: object
properties:
@@ -5140,23 +5849,44 @@ components:
name:
type: string
description: The updated display name of the group.
- GroupItem:
- description: Group search response item.
+ GroupItemBase:
+ description: Base properties for GroupItem.
+ type: object
+ properties:
+ name:
+ type: string
+ description: The group name.
+ GroupItemNumberKeys:
+ description: Group search response item. Key attributes as numeric values.
+ deprecated: true
type: object
+ allOf:
+ - $ref: "#/components/schemas/GroupItemBase"
properties:
groupKey:
type: integer
description: The group key.
format: int64
- name:
- type: string
- description: The group name.
assignedMemberKeys:
type: array
description: The set of keys of members assigned to the group.
items:
type: integer
format: int64
+ GroupItem:
+ description: Group search response item.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/GroupItemBase"
+ properties:
+ groupKey:
+ type: string
+ description: The group key.
+ assignedMemberKeys:
+ type: array
+ description: The set of keys of members assigned to the group.
+ items:
+ type: string
GroupSearchQueryRequest:
description: Group search request.
allOf:
@@ -5174,11 +5904,23 @@ components:
name:
type: string
description: The name of the group.
+ GroupSearchQueryResponseNumberKeys:
+ description: Group search response. Key attributes as numeric values.
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching groups.
+ type: array
+ items:
+ $ref: "#/components/schemas/GroupItemNumberKeys"
GroupSearchQueryResponse:
description: Group search response.
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
- type: object
properties:
items:
description: The matching groups.
@@ -5195,23 +5937,54 @@ components:
claimValue:
type: string
description: The value of the claim to map.
+ name:
+ type: string
+ description: The name of the mapping.
required:
- claimName
- claimValue
- MappingRuleCreateResponse:
+ - name
+ MappingRuleCreateResponseBase:
type: object
properties:
- mappingKey:
- description: The key of the created mapping rule.
- type: integer
- format: int64
claimName:
type: string
description: The name of the claim to map.
claimValue:
type: string
- description: The value of the claim to map.
- MappingSearchResponse:
+ description: The value of the claim to map.
+ name:
+ type: string
+ description: The name of the mapping.
+ MappingRuleCreateResponseNumberKeys:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/MappingRuleCreateResponseBase"
+ properties:
+ mappingKey:
+ description: The key of the created mapping rule.
+ type: integer
+ format: int64
+ MappingRuleCreateResponse:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/MappingRuleCreateResponseBase"
+ properties:
+ mappingKey:
+ description: The key of the created mapping rule.
+ type: string
+ MappingSearchQueryResponseNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching mapping rules.
+ type: array
+ items:
+ $ref: "#/components/schemas/MappingItemNumberKeys"
+ MappingSearchQueryResponse:
type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
@@ -5220,20 +5993,36 @@ components:
description: The matching mapping rules.
type: array
items:
- $ref: "#/components/schemas/MappingResponse"
- MappingResponse:
+ $ref: "#/components/schemas/MappingItem"
+ MappingItemBase:
type: "object"
properties:
- mappingKey:
- description: The key of the created mapping rule.
- type: integer
- format: int64
claimName:
type: string
description: The name of the claim to map.
claimValue:
type: string
description: The value of the claim to map.
+ name:
+ type: string
+ description: The name of the mapping.
+ MappingItemNumberKeys:
+ type: "object"
+ allOf:
+ - $ref: "#/components/schemas/MappingItemBase"
+ properties:
+ mappingKey:
+ description: The key of the created mapping rule.
+ type: integer
+ format: int64
+ MappingItem:
+ type: "object"
+ allOf:
+ - $ref: "#/components/schemas/MappingItemBase"
+ properties:
+ mappingKey:
+ description: The key of the created mapping rule.
+ type: string
TopologyResponse:
description: The response of a topology request.
@@ -5485,6 +6274,15 @@ components:
- type
- timeout
- maxJobsToActivate
+ JobActivationResponseNumberKeys:
+ description: The list of activated jobs
+ type: object
+ properties:
+ jobs:
+ description: The activated jobs.
+ type: array
+ items:
+ $ref: "#/components/schemas/ActivatedJobNumberKeys"
JobActivationResponse:
description: The list of activated jobs
type: object
@@ -5494,20 +6292,12 @@ components:
type: array
items:
$ref: "#/components/schemas/ActivatedJob"
- ActivatedJob:
+ ActivatedJobBase:
type: object
properties:
- jobKey:
- description: the key, a unique identifier for the job
- type: integer
- format: int64
type:
description: the type of the job (should match what was requested)
type: string
- processInstanceKey:
- description: the job's process instance key
- type: integer
- format: int64
processDefinitionId:
description: the bpmn process ID of the job's process definition
type: string
@@ -5515,19 +6305,9 @@ components:
description: the version of the job's process definition
type: integer
format: int32
- processDefinitionKey:
- description: the key of the job's process definition
- type: integer
- format: int64
elementId:
description: the associated task element ID
type: string
- elementInstanceKey:
- description: >
- the unique key identifying the associated task, unique within the scope of the
- process instance
- type: integer
- format: int64
customHeaders:
description: a set of custom headers defined during modelling; returned as a serialized JSON document
type: object
@@ -5550,6 +6330,48 @@ components:
tenantId:
description: The ID of the tenant that owns the job
type: string
+ ActivatedJobNumberKeys:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/ActivatedJobBase"
+ properties:
+ jobKey:
+ description: the key, a unique identifier for the job
+ type: integer
+ format: int64
+ processInstanceKey:
+ description: the job's process instance key
+ type: integer
+ format: int64
+ processDefinitionKey:
+ description: the key of the job's process definition
+ type: integer
+ format: int64
+ elementInstanceKey:
+ description: >
+ the unique key identifying the associated task, unique within the scope of the
+ process instance
+ type: integer
+ format: int64
+ ActivatedJob:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/ActivatedJobBase"
+ properties:
+ jobKey:
+ description: the key, a unique identifier for the job
+ type: string
+ processInstanceKey:
+ description: the job's process instance key
+ type: string
+ processDefinitionKey:
+ description: the key of the job's process definition
+ type: string
+ elementInstanceKey:
+ description: >
+ the unique key identifying the associated task, unique within the scope of the
+ process instance
+ type: string
JobFailRequest:
type: object
properties:
@@ -5623,6 +6445,59 @@ components:
As a result, the completion request is rejected and the task remains active.
Defaults to false.
nullable: true
+ corrections:
+ $ref: "#/components/schemas/JobResultCorrections"
+ JobResultCorrections:
+ type: object
+ description: |
+ JSON object with attributes that were corrected by the worker.
+
+ The following attributes can be corrected, additional attributes will be ignored:
+
+ * `assignee` - reset by providing an empty String
+ * `dueDate` - reset by providing an empty String
+ * `followUpDate` - reset by providing an empty String
+ * `candidateGroups` - reset by providing an empty list
+ * `candidateUsers` - reset by providing an empty list
+ * `priority` - minimum 0, maximum 100, default 50
+
+ Providing any of those attributes with a `null` value or omitting it preserves
+ the persisted attribute's value.
+ nullable: true
+ properties:
+ assignee:
+ type: string
+ description: Assignee of the task.
+ nullable: true
+ dueDate:
+ type: string
+ format: date-time
+ description: The due date of the task.
+ nullable: true
+ followUpDate:
+ type: string
+ format: date-time
+ description: The follow-up date of the task.
+ nullable: true
+ candidateUsers:
+ type: array
+ description: The list of candidate users of the task.
+ items:
+ type: string
+ nullable: true
+ candidateGroups:
+ type: array
+ description: The list of candidate groups of the task.
+ items:
+ type: string
+ nullable: true
+ priority:
+ type: integer
+ format: int32
+ description: The priority of the task.
+ minimum: 0
+ maximum: 100
+ nullable: true
JobUpdateRequest:
type: object
properties:
@@ -5783,23 +6658,31 @@ components:
tenantId:
type: string
description: The tenant ID of the decision requirements.
- DecisionRequirementsSearchQueryResponse:
+ DecisionRequirementsSearchQueryResponseNumberKeys:
+ deprecated: true
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching decision requirements.
+ type: array
+ items:
+ $ref: "#/components/schemas/DecisionRequirementsItemNumberKeys"
+ DecisionRequirementsSearchQueryResponse:
type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
properties:
items:
description: The matching decision requirements.
type: array
items:
$ref: "#/components/schemas/DecisionRequirementsItem"
- DecisionRequirementsItem:
+ DecisionRequirementsItemBase:
+ description: Base properties for DecisionRequirementsItem.
type: object
properties:
- decisionRequirementsKey:
- type: integer
- format: int64
- description: The assigned key, which acts as a unique identifier for this decision requirements.
name:
type: string
description: The DMN name of the decision requirements.
@@ -5816,6 +6699,24 @@ components:
tenantId:
type: string
description: The tenant ID of the decision requirements.
+ DecisionRequirementsItemNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DecisionRequirementsItemBase"
+ properties:
+ decisionRequirementsKey:
+ type: integer
+ format: int64
+ description: The assigned key, which acts as a unique identifier for this decision requirements.
+ DecisionRequirementsItem:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DecisionRequirementsItemBase"
+ properties:
+ decisionRequirementsKey:
+ type: string
+ description: The assigned key, which acts as a unique identifier for this decision requirements.
EvaluateDecisionRequest:
type: object
oneOf:
@@ -5853,13 +6754,10 @@ components:
tenantId:
description: The tenant ID of the decision.
type: string
- EvaluateDecisionResponse:
+ EvaluateDecisionResponseBase:
+ description: Base properties for EvaluateDecisionResponse.
type: object
properties:
- decisionDefinitionKey:
- description: The unique key identifying the decision which was evaluated.
- type: integer
- format: int64
decisionDefinitionId:
description: The ID of the decision which was evaluated.
type: string
@@ -5873,10 +6771,6 @@ components:
decisionRequirementsId:
description: The ID of the decision requirements graph that the decision which was evaluated is part of.
type: string
- decisionRequirementsKey:
- description: The unique key identifying the decision requirements graph that the decision which was evaluated is part of.
- type: integer
- format: int64
output:
description: |
JSON document that will instantiate the result of the decision which was evaluated.
@@ -5890,15 +6784,43 @@ components:
tenantId:
description: The tenant ID of the evaluated decision.
type: string
- decisionInstanceKey:
- description: The unique key identifying this decision evaluation.
- type: integer
- format: int64
evaluatedDecisions:
description: Decisions that were evaluated within the requested decision evaluation.
type: array
items:
$ref: "#/components/schemas/EvaluatedDecisionItem"
+ EvaluateDecisionResponseNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/EvaluateDecisionResponseBase"
+ properties:
+ decisionDefinitionKey:
+ description: The unique key identifying the decision which was evaluated.
+ type: integer
+ format: int64
+ decisionRequirementsKey:
+ description: The unique key identifying the decision requirements graph that the decision which was evaluated is part of.
+ type: integer
+ format: int64
+ decisionInstanceKey:
+ description: The unique key identifying this decision evaluation.
+ type: integer
+ format: int64
+ EvaluateDecisionResponse:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/EvaluateDecisionResponseBase"
+ properties:
+ decisionDefinitionKey:
+ description: The unique key identifying the decision which was evaluated.
+ type: string
+ decisionRequirementsKey:
+ description: The unique key identifying the decision requirements graph that the decision which was evaluated is part of.
+ type: string
+ decisionInstanceKey:
+ description: The unique key identifying this decision evaluation.
+ type: string
EvaluatedDecisionItem:
type: object
description: A decision that was evaluated.
@@ -6036,10 +6958,21 @@ components:
tenantId:
type: string
description: The tenant ID of the decision instance.
- DecisionInstanceSearchQueryResponse:
+ DecisionInstanceSearchQueryResponseNumberKeys:
+ deprecated: true
+ type: object
allOf:
- $ref: "#/components/schemas/SearchQueryResponse"
+ properties:
+ items:
+ description: The matching decision instances.
+ type: array
+ items:
+ $ref: "#/components/schemas/DecisionInstanceItemNumberKeys"
+ DecisionInstanceSearchQueryResponse:
type: object
+ allOf:
+ - $ref: "#/components/schemas/SearchQueryResponse"
properties:
items:
description: The matching decision instances.
@@ -6047,13 +6980,9 @@ components:
items:
$ref: "#/components/schemas/DecisionInstanceItem"
- DecisionInstanceItem:
+ DecisionInstanceItemBase:
type: object
properties:
- decisionInstanceKey:
- type: integer
- format: int64
- description: The key of the decision instance. Note that this is not the unique identifier of the entity itself; the `decisionInstanceId` serves as the primary identifier.
decisionInstanceId:
type: string
description: The ID of the decision instance.
@@ -6066,6 +6995,33 @@ components:
evaluationFailure:
type: string
description: The evaluation failure of the decision instance.
+ decisionDefinitionId:
+ type: string
+ description: The ID of the DMN decision.
+ decisionDefinitionName:
+ type: string
+ description: The name of the DMN decision.
+ decisionDefinitionVersion:
+ type: integer
+ format: int32
+ description: The version of the decision.
+ decisionDefinitionType:
+ $ref: "#/components/schemas/DecisionDefinitionTypeEnum"
+ result:
+ type: string
+ description: The result of the decision instance.
+ tenantId:
+ type: string
+ description: The tenant ID of the decision instance.
+ DecisionInstanceItemNumberKeys:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DecisionInstanceItemBase"
+ properties:
+ decisionInstanceKey:
+ type: integer
+ format: int64
+ description: The key of the decision instance. Note that this is not the unique identifier of the entity itself; the `decisionInstanceId` serves as the primary identifier.
processDefinitionKey:
type: integer
format: int64
@@ -6078,24 +7034,23 @@ components:
type: integer
format: int64
description: The key of the decision.
- decisionDefinitionId:
+ DecisionInstanceItem:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DecisionInstanceItemBase"
+ properties:
+ decisionInstanceKey:
type: string
- description: The ID of the DMN decision.
- decisionDefinitionName:
+ description: The key of the decision instance. Note that this is not the unique identifier of the entity itself; the `decisionInstanceId` serves as the primary identifier.
+ processDefinitionKey:
type: string
- description: The name of the DMN decision.
- decisionDefinitionVersion:
- type: integer
- format: int32
- description: The version of the decision.
- decisionDefinitionType:
- $ref: "#/components/schemas/DecisionDefinitionTypeEnum"
- result:
+ description: The key of the process definition.
+ processInstanceKey:
type: string
- description: The result of the decision instance.
- tenantId:
+ description: The key of the process instance.
+ decisionDefinitionKey:
type: string
- description: The tenant ID of the decision instance.
+ description: The key of the decision.
DecisionInstanceGetQueryResponse:
allOf:
@@ -6156,23 +7111,44 @@ components:
description: the tenant for which the message is published
type: string
nullable: true
- MessageCorrelationResponse:
+ MessageCorrelationResponseBase:
+ description: Base properties for MessageCorrelationResponse.
+ type: object
+ properties:
+ tenantId:
+ description: The tenant ID of the correlated message
+ type: string
+ MessageCorrelationResponseNumberKeys:
description: |
The message key of the correlated message, as well as the first process instance key it
- correlated with.
+ correlated with. Key attributes as numeric values.
+ deprecated: true
type: object
+ allOf:
+ - $ref: "#/components/schemas/MessageCorrelationResponseBase"
properties:
messageKey:
description: The key of the correlated message
type: integer
format: int64
- tenantId:
- description: The tenant ID of the correlated message
- type: string
processInstanceKey:
description: The key of the first process instance the message correlated with
type: integer
format: int64
+ MessageCorrelationResponse:
+ description: |
+ The message key of the correlated message, as well as the first process instance key it
+ correlated with.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/MessageCorrelationResponseBase"
+ properties:
+ messageKey:
+ description: The key of the correlated message
+ type: string
+ processInstanceKey:
+ description: The key of the first process instance the message correlated with
+ type: string
MessagePublicationRequest:
type: object
properties:
@@ -6206,16 +7182,32 @@ components:
required:
- name
- correlationKey
- MessagePublicationResponse:
- description: The message key of the published message.
+ MessagePublicationResponseBase:
+ description: Base properties for MessagePublicationResponse.
+ type: object
+ properties:
+ tenantId:
+ description: The tenant ID of the message.
+ type: string
+ MessagePublicationResponseNumberKeys:
+ description: The message key of the published message. Key attributes as numeric values.
+ deprecated: true
type: object
+ allOf:
+ - $ref: "#/components/schemas/MessagePublicationResponseBase"
properties:
messageKey:
description: The key of the message
type: integer
format: int64
- tenantId:
- description: The tenant ID of the message.
+ MessagePublicationResponse:
+ description: The message key of the published message.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/MessagePublicationResponseBase"
+ properties:
+ messageKey:
+ description: The key of the message
type: string
DocumentReference:
@@ -6276,8 +7268,18 @@ components:
format: date-time
description: The date and time when the link expires.
- DeploymentResponse:
+ DeploymentResponseBase:
+ description: Base properties for DeploymentResponse.
+ type: object
+ properties:
+ tenantId:
+ description: The tenant ID associated with the deployment.
+ type: string
+ DeploymentResponseNumberKeys:
+ deprecated: true
type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentResponseBase"
properties:
deploymentKey:
type: integer
@@ -6287,10 +7289,20 @@ components:
description: Items deployed by the request.
type: array
items:
- $ref: "#/components/schemas/DeploymentMetadata"
- tenantId:
- description: The tenant ID associated with the deployment.
+ $ref: "#/components/schemas/DeploymentMetadataNumberKeys"
+ DeploymentResponse:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentResponseBase"
+ properties:
+ deploymentKey:
type: string
+ description: The unique key identifying the deployment.
+ deployments:
+ description: Items deployed by the request.
+ type: array
+ items:
+ $ref: "#/components/schemas/DeploymentMetadata"
DeploymentMetadata:
type: object
properties:
@@ -6302,8 +7314,19 @@ components:
$ref: "#/components/schemas/DeploymentDecisionRequirements"
form:
$ref: "#/components/schemas/DeploymentForm"
- DeploymentProcess:
- description: A deployed process.
+ DeploymentMetadataNumberKeys:
+ type: object
+ properties:
+ processDefinition:
+ $ref: "#/components/schemas/DeploymentProcessNumberKeys"
+ decisionDefinition:
+ $ref: "#/components/schemas/DeploymentDecisionNumberKeys"
+ decisionRequirements:
+ $ref: "#/components/schemas/DeploymentDecisionRequirementsNumberKeys"
+ form:
+ $ref: "#/components/schemas/DeploymentFormNumberKeys"
+ DeploymentProcessBase:
+ description: Base properties for DeploymentProcess.
type: object
properties:
processDefinitionId:
@@ -6315,18 +7338,33 @@ components:
type: integer
format: int32
description: The assigned process version.
- processDefinitionKey:
- type: integer
- format: int64
- description: The assigned key, which acts as a unique identifier for this process.
resourceName:
type: string
description: The resource name from which this process was parsed.
tenantId:
type: string
description: The tenant ID of the deployed process.
- DeploymentDecision:
- description: A deployed decision.
+ DeploymentProcessNumberKeys:
+ description: A deployed process.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentProcessBase"
+ properties:
+ processDefinitionKey:
+ type: integer
+ format: int64
+ description: The assigned key, which acts as a unique identifier for this process.
+ DeploymentProcess:
+ description: A deployed process.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentProcessBase"
+ properties:
+ processDefinitionKey:
+ type: string
+ description: The assigned key, which acts as a unique identifier for this process.
+ DeploymentDecisionBase:
+ description: Base properties for DeploymentDecision.
type: object
properties:
decisionDefinitionId:
@@ -6338,11 +7376,6 @@ components:
type: integer
format: int32
description: The assigned decision version.
- decisionDefinitionKey:
- type: integer
- format: int64
- description: |
- The assigned decision key, which acts as a unique identifier for this decision.
name:
type: string
description: The DMN name of the decision, as parsed during deployment.
@@ -6353,13 +7386,38 @@ components:
type: string
description: |
The dmn ID of the decision requirements graph that this decision is part of, as parsed during deployment.
+ DeploymentDecisionNumberKeys:
+ description: A deployed decision.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentDecisionBase"
+ properties:
+ decisionDefinitionKey:
+ type: integer
+ format: int64
+ description: |
+ The assigned decision key, which acts as a unique identifier for this decision.
decisionRequirementsKey:
type: integer
format: int64
description: |
The assigned key of the decision requirements graph that this decision is part of.
- DeploymentDecisionRequirements:
- description: Deployed decision requirements.
+ DeploymentDecision:
+ description: A deployed decision.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentDecisionBase"
+ properties:
+ decisionDefinitionKey:
+ type: string
+ description: |
+ The assigned decision key, which acts as a unique identifier for this decision.
+ decisionRequirementsKey:
+ type: string
+ description: |
+ The assigned key of the decision requirements graph that this decision is part of.
+ DeploymentDecisionRequirementsBase:
+ description: Base properties for DeploymentDecisionRequirements.
type: object
properties:
decisionRequirementsId:
@@ -6376,16 +7434,32 @@ components:
tenantId:
type: string
description: The tenant ID of the deployed decision requirements.
+ resourceName:
+ type: string
+ description: The resource name from which this decision requirements was parsed.
+ DeploymentDecisionRequirementsNumberKeys:
+ description: Deployed decision requirements.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentDecisionRequirementsBase"
+ properties:
decisionRequirementsKey:
type: integer
format: int64
description: |
The assigned decision requirements key, which acts as a unique identifier for this decision requirements.
- resourceName:
+ DeploymentDecisionRequirements:
+ description: Deployed decision requirements.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentDecisionRequirementsBase"
+ properties:
+ decisionRequirementsKey:
type: string
- description: The resource name from which this decision requirements was parsed.
- DeploymentForm:
- description: A deployed form.
+ description: |
+ The assigned decision requirements key, which acts as a unique identifier for this decision requirements.
+ DeploymentFormBase:
+ description: Base properties for DeploymentForm.
type: object
properties:
formId:
@@ -6397,16 +7471,31 @@ components:
type: integer
format: int32
description: The assigned form version.
- formKey:
- type: integer
- format: int64
- description: The assigned key, which acts as a unique identifier for this form.
resourceName:
type: string
description: The resource name from which this form was parsed.
tenantId:
type: string
description: The tenant ID of the deployed form.
+ DeploymentFormNumberKeys:
+ description: A deployed form.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentFormBase"
+ properties:
+ formKey:
+ type: integer
+ format: int64
+ description: The assigned key, which acts as a unique identifier for this form.
+ DeploymentForm:
+ description: A deployed form.
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/DeploymentFormBase"
+ properties:
+ formKey:
+ type: string
+ description: The assigned key, which acts as a unique identifier for this form.
CreateProcessInstanceRequest:
type: object
@@ -6498,14 +7587,10 @@ components:
For now, however, the start instruction is implicitly a "startBeforeElement" instruction
type: string
- CreateProcessInstanceResponse:
+ CreateProcessInstanceResponseBase:
+ description: Base properties for CreateProcessInstanceResponse.
type: object
properties:
- processDefinitionKey:
- description: |
- The key of the process definition which was used to create the process instance.
- type: integer
- format: int64
processDefinitionId:
description: |
The BPMN process ID of the process definition which was used to create the process.
@@ -6516,12 +7601,6 @@ components:
The version of the process definition which was used to create the process instance.
type: integer
format: int32
- processInstanceKey:
- description: |
- The unique identifier of the created process instance; to be used wherever a request
- needs a process instance key (e.g. CancelProcessInstanceRequest).
- type: integer
- format: int64
tenantId:
description: The tenant ID of the created process instance.
type: string
@@ -6529,6 +7608,37 @@ components:
additionalProperties: true
description: All the variables visible in the root scope.
type: object
+ CreateProcessInstanceResponseNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/CreateProcessInstanceResponseBase"
+ properties:
+ processDefinitionKey:
+ description: |
+ The key of the process definition which was used to create the process instance.
+ type: integer
+ format: int64
+ processInstanceKey:
+ description: |
+ The unique identifier of the created process instance; to be used wherever a request
+ needs a process instance key (e.g. CancelProcessInstanceRequest).
+ type: integer
+ format: int64
+ CreateProcessInstanceResponse:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/CreateProcessInstanceResponseBase"
+ properties:
+ processDefinitionKey:
+ description: |
+ The key of the process definition which was used to create the process instance.
+ type: string
+ processInstanceKey:
+ description: |
+ The unique identifier of the created process instance; to be used wherever a request
+ needs a process instance key (e.g. CancelProcessInstanceRequest).
+ type: string
MigrateProcessInstanceRequest:
type: object
properties:
@@ -6700,23 +7810,35 @@ components:
type: string
required:
- signalName
- SignalBroadcastResponse:
+ SignalBroadcastResponseBase:
+ description: Base properties for SignalBroadcastResponse.
type: object
properties:
- signalKey:
- description: The unique ID of the signal that was broadcast.
- type: integer
- format: int64
tenantId:
description: The tenant ID of the signal that was broadcast.
type: string
- FormItem:
+ SignalBroadcastResponseNumberKeys:
+ deprecated: true
type: object
+ allOf:
+ - $ref: "#/components/schemas/SignalBroadcastResponseBase"
properties:
- formKey:
- description: The key of the form.
+ signalKey:
+ description: The unique ID of the signal that was broadcast.
type: integer
format: int64
+ SignalBroadcastResponse:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/SignalBroadcastResponseBase"
+ properties:
+ signalKey:
+ description: The unique ID of the signal that was broadcast.
+ type: string
+ FormItemBase:
+ description: Base properties for FormItem.
+ type: object
+ properties:
tenantId:
description: The tenant ID of the form.
type: string
@@ -6730,6 +7852,24 @@ components:
description: The version of the form.
type: integer
format: int64
+ FormItemNumberKeys:
+ deprecated: true
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/FormItemBase"
+ properties:
+ formKey:
+ description: The key of the form.
+ type: integer
+ format: int64
+ FormItem:
+ type: object
+ allOf:
+ - $ref: "#/components/schemas/FormItemBase"
+ properties:
+ formKey:
+ description: The key of the form.
+ type: string
responses:
InternalServerError:
description: >
diff --git a/api/generate-api-docs.js b/api/generate-api-docs.js
index 6c6f2de56a4..e8dd7335ebd 100644
--- a/api/generate-api-docs.js
+++ b/api/generate-api-docs.js
@@ -3,12 +3,12 @@ const { execSync } = require("child_process");
// More strategies to come, for other APIs.
const operate = require("./operate/generation-strategy");
const tasklist = require("./tasklist/generation-strategy");
-const consolesm = require("./console-sm/generation-strategy");
+const adminsm = require("./administration-sm/generation-strategy");
const camunda = require("./camunda/generation-strategy");
const apiStrategies = {
operate,
tasklist,
- consolesm,
+ adminsm,
camunda,
};
@@ -21,7 +21,7 @@ function runCommand(command) {
// API name must be passed in as an arg.
const api = process.argv[2];
if (api === undefined) {
- const validAPIs = string.join(apiStrategies.join, ", ");
+ const validAPIs = Object.keys(apiStrategies).join(", ");
console.log(`Please specify an API name. Valid names: ${validAPIs}`);
process.exit();
}
@@ -29,7 +29,7 @@ if (api === undefined) {
// The API name must be recognized.
const strategy = apiStrategies[api];
if (strategy === undefined) {
- const validAPIs = string.join(apiStrategies.join, ", ");
+ const validAPIs = Object.keys(apiStrategies).join(", ");
console.error(`Invalid API name ${api}. Valid names: ${validAPIs}`);
process.exit();
}
diff --git a/docs/apis-tools/administration-sm-api/administration-sm-api-overview.md b/docs/apis-tools/administration-sm-api/administration-sm-api-overview.md
index 932693c07f7..45bfb7b3056 100644
--- a/docs/apis-tools/administration-sm-api/administration-sm-api-overview.md
+++ b/docs/apis-tools/administration-sm-api/administration-sm-api-overview.md
@@ -15,4 +15,4 @@ See [the interactive Administration API Self-Managed Explorer][administration-ap
An Open API UI is also available within a running instance of Console Self-Managed at `https://${base-url}/admin-api/openapi/docs`.
-[administration-api-explorer]: ./specifications/sm-administration-api.info.mdx
+[administration-api-explorer]: ./specifications/administration-api-self-managed.info.mdx
diff --git a/docs/apis-tools/administration-sm-api/specifications/sm-administration-api.info.mdx b/docs/apis-tools/administration-sm-api/specifications/administration-api-self-managed.info.mdx
similarity index 97%
rename from docs/apis-tools/administration-sm-api/specifications/sm-administration-api.info.mdx
rename to docs/apis-tools/administration-sm-api/specifications/administration-api-self-managed.info.mdx
index 5d84abb5475..357adbd146d 100644
--- a/docs/apis-tools/administration-sm-api/specifications/sm-administration-api.info.mdx
+++ b/docs/apis-tools/administration-sm-api/specifications/administration-api-self-managed.info.mdx
@@ -1,5 +1,5 @@
---
-id: sm-admin-api
+id: administration-api-self-managed
title: "Administration API (Self-Managed)"
description: "Access the administration API of Console Self-Managed."
sidebar_label: Introduction
diff --git a/docs/apis-tools/administration-sm-api/specifications/get-clusters.api.mdx b/docs/apis-tools/administration-sm-api/specifications/get-clusters.api.mdx
index 43b170c8562..cca9bf58fdd 100644
--- a/docs/apis-tools/administration-sm-api/specifications/get-clusters.api.mdx
+++ b/docs/apis-tools/administration-sm-api/specifications/get-clusters.api.mdx
@@ -5,9 +5,9 @@ description: "Returns a list of all automation and management clusters. Each clu
sidebar_label: "Get current clusters"
hide_title: true
hide_table_of_contents: true
-api: eJzFV21v2zYQ/iuEvuyLIntvQGEMA7ygKzI0a9E4KLAgH2jpbLGmSJWk6rmG/vvujrIlvyQxsDcgiCXy7uHdcy88bRNbg5NBWXNTJJNkCeFaNz6A80maOPC1NR58Mtkm343H9FOAz52qSQPl361QLLcmgAm0K+taq5zxRp88iWwTn5dQSXpSASrGqh0dG1REbhpV0G/Y1ICYPjhlloh7eNS9UZ8bEKrAo9RCgRN2IUIJIo8GJ22aGFnBy0i/o9QTyr6W+YUILDrEEMoL1xiDCkKZjCB9kKHxp7TdmIJYAi/UQsghQAlSh3IjrBPGBjz42BAwTZVMHpJOEFcaM3xeGbs2ySMqqqBJ8xojaDXc3U6LSplprbK7aBXatwTTRf9ln2clmod/7HHjHIZBfME8wd0dmTvvB6RGzGP/P8I3DkShfEDxRvmStOYQ1gBGyCbYio0S0hSikkYuoaLjOlyfidvTReQcHwsQgxwka2UQMg9iqe1car1BMYTF/xgbraEQnL1/hlRotQLRsUX839oCNLhMTHuD9odJF2P/B8Acjm0gbw6sYOh3XGmQipn0K42+s3vvkJNKfYXsmVj3jOBiz8fzUe7qeEaYGAi0xz9Tg0/EidnrPTlM8WHyd/QxHempq6nIsuc8/Ep6V3NnV5g2afe6RIi1pLSOTQoIoAPkxcgccRJDFXsR8YBPsVGEi6tiWtc7rv5WOxrQdXkLMMcs/7d9oHH6kgYAom7maKW4//D2CXf/pY5ydIoDiUUG3l9m9V78GcMrQP38QsBO+Ek4NvFzoxwUFCMG5LTa50Pk/ICvx32Q7fwT5BR0WRSK9qR+P6jWhdQeXkjlvvcm0jkuolM3uAmh/Qe96nx9n/jEl3Z34w7vzoGHA9/SHQfchv45T6+Pb5rO27Yle38Yf39aeL9aN1cFli0p/XhuqLkDh2l45bG2BThno/OHMh8gNA7Zkj2JWl90db2Webm/8XHLbfgKksr445T3jIKLyonIKvdQufQUgf2ghnR6wAKiZjd52CZzwNvJ4eNji1u1dBgcFsQVTvTSdrNeQtuhxJeRJEqvZK1GeT8AembCMyr3iGQbp7l2MhptS+tDO9nW1oUWhb9Ip+Rcx/uE9iKzC9lofEy0zaXm5XOpSBtmMJdN39+IeDoPUnTGIdyr8avx+RaFok+g9JNoj1OGUJ/FicJnkdp2QPkdyUWnd8TvW0gHzlD03gmk3QOmImYLrv/2ccY5pszCsvphwou7W8EpT2acGDvNc2pt3IRICPMxVh0bjfb3KJQ+XYdFxW+zcTbu5ncckXgU5yCT2R4jvF6vs1xWjSlkltuKKMQ+AfhFQLJx2E7edivpkXJhc7/XVpbfRw4WgH0+h1EH5EfseI3hx2oZwL6BsL8UBhl54Pq2//T4XwsyBpumyFGtUW9wpcbywhFuV17E96BwY508JNvtXHq4d7ptaRlHC7eJ9borK67nlO77got7m6xg04W/Dlx/uuEOePwN1g6r/s3rGbXbhsza5+lRXjL6rp+azQD7p19YQMxwUDM/40a0IdArnoOV8RcY7wHV
+api: eJzFV9uO2zYQ/RViXtoCXNu9AYFQFHAXSbBFtgmyXgSo4QeaGluMKVJLUus4hv69GFKW5cvuGujtyRI1PJw5M3M43oKt0ImgrLnJIYMlhmtd+4DOAweHvrLGo4dsCz+MRvSTo5dOVbQDMni/Ag7SmoAm0FdRVVrJiDf87MlkC14WWAp6UgHLiFU5OjaohFzXKqffsKkQMvDBKbMEfnTUvVEPNTKVowlqodAxu2ChQCaTw9BwMKLEl5H+ECU+sdlXQl6IEE37GEx55mpjlFkyZQYE6YMItT+l7cbkxBJ6phZM9AEKFDoUG2YdMzYAP3EETV1CNoXWEDjUpv+8MnZtYMYhqKBp57U13mq8ux3npTLjSg3uklcNhyWaNvsvxzwplCcPY8S1c2gCe0TnlTU7MnfR90hNmMfxf8JvHLJc+aDMsla+oF1zDGtEw0QdbBmdYsLkrBRGLLGk41pcP2C3p4tMGanrHFmvBslbEZiQgS21nQutN0wRLFPGB6E15ixW75fAmVYrZC1bxP+tzVGjG7Dx3qHuMOFS7v9EnOOxDxTNgRcR+n3sNORsIvxKKx9ieO+roEr1FQfP5HrPCHDY8/F8lts+nhBmw6kz/TM9+ESeInv7SA5LvF/8LX2RDn4aKmeDwXMRfqV9V3NnV+iAt69LEXAtqKyTSCEBtIBxMTFHnKRUJS0iHoBDEopwcVeMq2rH1d+Sox5dl0uAOWb5v9WB2ulLBABZVc+1kuz+47snwv2XFOXoFIciVwa9v8zrzvwZx0sMTskLAVvjJ+Giiw+1cphTjiJgLKuuHhLnB3zNuiTb+WeUlHSR54q+Cf2h160LoT2+UMp77QXhXGyi0zCiCNnFoVad7++TmOKl3d64/buzF2EvNr7jIMrQPxfp9fFN00bbNOTvT6MfTxvvjXVzlecYs/7zuaHmDt0juiuvcmTonE3BH9p8xFA745nYk6j1RVfXayGL7sZHE9wmXkFCGX9c8j6ihAKVY4nVqKFi6SkD3aA24+BR1o7ELptuYY7CoYNsOmtmHCrhRInRMJvOYqEXtp31gD6HAjIYCqL0SlRqKPcDoI9M+IgaNQK2aZprsuFwW1gfmmxbWRca4PAonBJzne4T+paYXYhaB8hAWyl0XD5XivTB9Oay8Ycblk6PgxSdcQj3avRqdF6irAtPoOwn0T1OEUJ1FicZn0Vqmh7ld2SXgt4R30lICx6h6L014O3DG+tKQU78/mkSa0yZhY3b24KPda58SF0Unfj2DvXiKs0/+Xcnno+lJJ2LinS62S66AacPQ1XVCi9k8P1gNBi1Y72QkfaUe4rGZ8Pher0eSFHWJhcDaUtiViuJxscJIs3g8K5d4Uebcyt9t1vZ+D50uECHRuKwBfLDyEdlfSiF6cG+xdDdFb1CPSBhu/9H8r/2aaoBGi6HlRbK9G7a1HVT6LqO+O71c2qfKWy3c+Hx3ummoeWHGt0mtfGu22KbcxoD8tjzW1jhpi2EKsS21HUUxuO/Zk1fDN6+npAK1+RWV75H5RrRdzJrNj3sX36LBmxiV2h+Bd76EOgVmlnTNH8B6kAKmA==
sidebar_class_name: "get api-method"
-info_path: docs/apis-tools/administration-sm-api/specifications/sm-administration-api
+info_path: docs/apis-tools/administration-sm-api/specifications/administration-api-self-managed
custom_edit_url: null
hide_send_button: true
---
diff --git a/docs/apis-tools/administration-sm-api/specifications/get-usage-metrics.api.mdx b/docs/apis-tools/administration-sm-api/specifications/get-usage-metrics.api.mdx
index 1de2f6077bd..6b108eaf1ed 100644
--- a/docs/apis-tools/administration-sm-api/specifications/get-usage-metrics.api.mdx
+++ b/docs/apis-tools/administration-sm-api/specifications/get-usage-metrics.api.mdx
@@ -5,9 +5,9 @@ description: "Returns usage metrics for a specific cluster for a given time rang
sidebar_label: "Get usage metrics for clusters"
hide_title: true
hide_table_of_contents: true
-api: eJzlV9tu4zYQ/RVCTy3g2O4NWARFgXTRXaRo2sXGQQsEeRhTY5kbidSSlLOGoH/vDClZsi0b2zZBH/YlkcThmTO3Q7pOTIkWvDL6Ok0ukwz9nYMMb9BbJV0ySSy60miHLrmsk2/nc/6XopNWlbyL9vzxSGbSaI/a8yqUZa5kwJx9cGxSJ06usQB+Ki179CoCqpT/+m2JBOTIp84IbB9/sUahUgJXK4VWmJXw9EXmlfNop0kzYUyJzl1r50HLiHyMUXFgooiRiZWxgsytx1S0+4XqAKbJ5ICoNx7yAVddFUu0ZEY4BVDcSWqqZY6j7KMxMz/mAMKVKCk0KRia4mk46R8rZZGSc996fph0ns3yA0pPfiBNFTuB/N2A6gpyh2SsfM7Wr6l2Jsfbm6u0UPqqVNNhffuMNcxbKseN8E/TiJ9QVpzHDuELT6QH93jn0H52AsE5lWlKIO+kddr6vyaOUhIZtUPqsXDHc9rsMgnWwnbUb4iF5hV8iM2JNWxQLBH1IGhzqlZDHi9Tt8WuVIcMSJlGhGVsSoYFfxmWb4x9HeWOaDLR7+ffHfcWWS1VSkLJlflhTKlv0W7QXjhSU4HWGhui3rd5j76y2p3vk1Z828+Z2lA9vSpQWNAZTsVxm4NFAVlmMQNWCkM8goz3uwTolISDoNNhv57U6Mk53ZkEtBNz5SFzXOKQYtEddVQ6R3hW+S0t1skSibOlx4eGlkqwQLGEob4fnWqtPlZnDiryq9iWjCzPiiY4eg1t1vedtxV1RX9aHkxcMxlzHVIUMklPRdk5HqbWibvfr/8a2CgtCpXnikI2OnUn2AXkswSPxWdfnk5QRqrOSxAm3P9Il2pNdV6b9jbEIgB+TS8z4NG8gFLNQmtfFLs7kgtzFTujsiTSSR1dNpezWb02zjeXdWmsb8h4A1YB+Q6KymtxTldQ5UwsNxLy8HlMT3mBQ+1SdvXuWkTv8SJEPvbhXs1fzUeR2PQESp+uHmftfTmKE41HkUIyu5m6ZbsYdDdZu/ZuwQMUv7cGk/bhTVe0X/9cBMVSemXC9n31FLc3Iugn0zgieyWDfjDLUElFQxUuqYE08e9RWCS4oHHjN9P5dN5ecUGG/MYiM21HFX56eppKKCqdwlSaglNIF2CkSzPbtp35W/tlcrA5NdLtdisT3mcWV2iRhGzWArlZCLyk8hegB7Bv0Y9Idas57jAJdX9P/yKEPraXx09+VuagwskY0l+3U32f7KaaK7M319S8cTzvk7pegsM7mzcNf46iE84B5XiW092BfjLdX71vRelr8ZnHxSj7R9x2p8YG8ooNkiCx/5bI8xweZ7h2Z8jz0H2Og+MM2Xh+9FQfer0ONwFqCYQ0XAvqdgvrSjkM8Oj3796Z8vaXBV8KK+6+nQAeCF5A7+7WejvA/vHnYCAW5hH1T0lH2/Mr+SHJ/Rs/aYl5
+api: eJzlV99v2zYQ/leIe2oBxvZ+AYUwDMiKpciwbEWTYAMMP5zJs81GIhWSShsI+t+HoyRbsWWvGxLsoS+JJR8/fvzu7ju6BleSx2icvdSQwZribcA1XVH0RgWQ4CmUzgYKkNXw7WzG/zQF5U3JqyCDP+5AgnI2ko38LZZlblTCnH4MHFJDUBsqkD+VnneMpgU0mv/Gx5IggxC9sWuQe/g3GxJGk41mZcgLtxJxQ0LlVYjkJ9BIxlQUwqUNEa1qkQ8xKj6YKNqTiZXzIkT0kbTo1gvTA0xA7hGNLmI+4GqrYkkeJKycLzBCBtpVy5xG2bfBzPyQA4pQkjIrowRDT6BpWPT7ynjSkM27nRey39ktP5KKIAG1NrwJ5u8HVFeYB5IQTcw5+q2zweV0fXWuC2PPSzMZ5nenWMO8lQlcCP9WRvpMqmIde4SvXMiI4e42kP9iATEEs7akBa8UFS/9X4WT0DHqmjRSEQ77tNkqid7j4+i+6SwibjCmswWxwQcSSyI7OLQ7lqshj5fJ2802VfsMjIYRYxnrkmHCX4blhfNvW7uDpmGi38++O6ytC+eXRmuynJkfxpz6mvwD+bNgNAny3vl06qcxHyhW3obTddKZb/d6bR7IimgKEh7tmibisMzRk8D12tMa2SncA/lk47tVAq0Wxqq80sN6PerR8pTvyIR2pK8irgOnOEks+lG3kBBIVd7ER8jmNSwJPXnI5otmIaFEjwXF1NTz0a625r46MahAguHY+4o894rFghOfymxXd9FXJAfTcq/jGjm2dZIoKRkiFmW/8VDaIG5/v/xrEGOsKEyem0DKWR2OsEvIJwkems9TezpCmax+EcJkT+v5z3QXEgqKG9fdhtgEMG4ggylya55haaaptM+K7R0ppL5qK6PyOWRQt1s22XRab1yITVaXzscGJDygN7jMW2vl79o+XWGVM7HcKczT6zE/5S/4qL1k5+8vRbt7exFyfg/uzezNbBSJQ4+g7OTa4WxiLEdx2uBRpCRm31PXHNceuu+sbXl34AmKn7sA2X246JP26583ybGMXbm0vHPPZJomxPYim0i8uqZ8dXaFFtekXx8wP1fJTJgyHi52K9EZshjCsHdwnluIbyazyay7+aJKsre559OEbDr99OnTRGFRWY0T5QpWNjeKbCCO7Qr2t+6N3FusnQrb1cal56mnFXmyiqYdUJgmPUoXYoF2APuO4oiDd1YU9uWod9f3r8L/26qL9DlOyxxNGphJ/rpr9jlsm50z86TdF7Lr2jnU9RID3fq8afh160VpPJjALa63c/6o3K8+dF71WnzhFBllf0eP/TB5wLziAEjO+1+JPM9MOcG1Hy3PQ/c55skJsu1Y2VFd7Gw8XRAkbAh1ui3U3RJ2mHJ4wIOfxU9GzbtfbviuWHH1bX1xzwcTen/lto8D7B9/TgHixt2R/Ql62pEfoVk0TfM3+WOSPA==
sidebar_class_name: "get api-method"
-info_path: docs/apis-tools/administration-sm-api/specifications/sm-administration-api
+info_path: docs/apis-tools/administration-sm-api/specifications/administration-api-self-managed
custom_edit_url: null
hide_send_button: true
---
diff --git a/docs/apis-tools/administration-sm-api/specifications/sidebar.js b/docs/apis-tools/administration-sm-api/specifications/sidebar.js
index 5c1953863bc..3d3722141b7 100644
--- a/docs/apis-tools/administration-sm-api/specifications/sidebar.js
+++ b/docs/apis-tools/administration-sm-api/specifications/sidebar.js
@@ -1,7 +1,7 @@
module.exports = [
{
type: "doc",
- id: "apis-tools/administration-sm-api/specifications/sm-admin-api",
+ id: "apis-tools/administration-sm-api/specifications/administration-api-self-managed",
},
{
type: "category",
diff --git a/docs/apis-tools/camunda-api-rest/specifications/activate-jobs.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/activate-jobs.api.mdx
index 888d6b44c49..59492ca42e9 100644
--- a/docs/apis-tools/camunda-api-rest/specifications/activate-jobs.api.mdx
+++ b/docs/apis-tools/camunda-api-rest/specifications/activate-jobs.api.mdx
@@ -5,7 +5,7 @@ description: "Iterate through all known partitions and activate jobs up to the r
sidebar_label: "Activate jobs"
hide_title: true
hide_table_of_contents: true
-api: eJztWVtv4zYW/isHfNkJVrE93Wl36rYLuMl069k2DRJPu0CSB0o6tpihSA1J2XEN//fFIXWzLSce7O7bBAgSiYfn+p0LqQ1zfGHZ+I691zF7iJgu0HAntJqmbMx44sSSO3yvY8silqJNjChomY3Z1BEpgsuMLhcZcCnho9IrBQU3ThCVBa5SqLnAo44tlAU4DS5DMPipROswhZw/ibzMB/eKRax6/aNO12y88Y/CYMrGzpQYsUQrh8rREi8KKRKv7vDRklIbZpMMc07/uXWBbMx0/IiJYxErDBnnBNp2dbNnE6n1qGOg1Qi4hRTnQmEKQnmVf7z+9QoKoxO0Fl7hYDGA7/9EjHHsuP14ScTecM/gh3tW8HWOyp1bNEuR4D2D4T/OvJWVdtYZoRZsG7GVNh/R9KukeI6g516FQFc7VagFVErbCHJtnVxDaTGFuTYg9WJBFEVpCm3RHsiNmCql5LHE4N5txJzIUZfuUBHuPWPQlYY8wucODbhMWEgo9CshJSjtIMYm4inEa+BKuwxNoCqVE9JrXMmBV0JBbs8g4xZiRAUGeZJh2vWSUA4XaFjE5trk3IVX37whv+X8ieA505NKaL8LK4gFDDrdgjJeByMq2L0k9G9fkdA5uiT7nRsRXHfoKimso4gtKxov0+8iVNUwa1a/AzEHzAu3jnweLYUVscTOdu4arxHfOvxa+UDTkk100aCEuPuIxNiErOtRbgxfH4Q/YsJh3smPBp/bJi9nx/Axa1O6EZ3ovJBIQFhlqMgIiZw8o4KKwnawok0DqobTbBclA5jO+5Z/gFEEnLKVl9I12BLW58KxXd/TLqkpQbSUlCjCQiosOST1pauzidZac0SeYyq4Q7mOAJeogoFKH5g1OAnIVFy97mw86ktKVFy5aWqfw9r00vrwe1rrcbHKRJLtwP0xlPI9GBwL+4EqFQ5CQb4L5G3N6MvGB1p2xIB6zKTB7U2Vb9ttYGoLrWyozV+NRv3wqi1tMUPmDNhndIVTmbY+OtJAPFEvw0PtXvD2cQn/wnV/OfuIVCmgVOJTiSBSVE7MBZqmGjzq+LT6ebwR0kq3nryymS4ldWsqY6uMO1hx27bxs762VjXLqbKOqwSPGvSo47/YprWKipzsPM2MamfbgmmA6RMUF7lq5EwvOwZ25KcNm2dsamX9jsaKgLRDgcuweLqg5xrPgejnEPLfigyeRYk0wxzzJ7dWJ8LDnWYgqMhhetnnuprZS3CogE1GVOBe16POnsCopl0Jl1Vj2k4r3MfUqYNFUlqn85+Rp2h6665FXzcCHWSBsJkY05Jshlyn6HvLd53RyQLtNoJL8Sem8P72tytIdVKSa3oKD09THy8urzsFomoNnzk3ho7Qlig/++wUizZYBp2palFP3HNdKu+Aigwkzl2Y7QPLpmBwueJrmu6g0FY4scSz09CeIk+lUD31yXfbujAlXO1OnXzBhYrAEg69sz9cTf8NWGhqhiJH63henIaCZgI71GFCg1ozoNUjW3W48bngcRj5saH0ernu4EaafG6060Ggv/O05SzQgaMirVfKHvaEzmRX9+e6Z6d0GNx2V/Y6d+jVVet+c6xbF0YvRUqpwB2nmYhOB0suRTUTHenZhdGxxPyvL/XuCVwHSkjRcSEheDCc2ogwDue2u5ufLuDbN1///eFV5lxhx8PharUamHlyjqlw2gy0WQzNPKFfojsbwCxDQ2eGtcdVExBoGzTYAhMxF0kd8Ept3zV3Zr4XDqD7B7IGgqURB2fuCXy4mR5UxB3R3VGS8ViXbhxLrj6yNpyHQvel2DLPuVl3SmhHwDZi1nFXdkeYo1m8z5uA8fNsdg2BBSQ6xWpoEbYWREbkQtGBjY3fjEZ+rgxP34xGvjBQxE+wRAE+FZKrkHF75tCZQhus8OMNq5vE/ygy2oiF2Jc72Em6CsSXwaKQUl/3pdREAXnZEA7RGG1AJ0lpjD9cCdk0ulp2NZd9ybUvufYl147lGl0gocs03XYW2l//FNxlbMyGdHgbtu2aRYxu8fwseLdhpZFszDYhYbbj4XCTaeu2402hjdsOlxSNndmBlkNi1YCROuEyC0IPA0cL3fntguelSjm8hZt3tzP4J3e44mvvSBK5y/rt6O2olyuRHuE4uZ5CsDDArlMKaraU071sA/EpjLfbB3JkUhrh1re0LbgnRm7QTEryfQOHSp7nTs+BiEXVPz/VIHn/x8zHmcrYTXt9/O6J031NH77qqbl909x7jnrvFEcHl3539d6Hw7ux0c6VTUvpUT/XXqMKk4fOIuTUB0o2Grw+xP/11KdxovO8VL6Wq4U//wDvOD+RpXXk9IhJkSBNbOMNI0x1xP4SVqA6wsLrAQEnoLsu4QvhsjIeJDofJmFb8zeWOh7mXKhhJcIOLya/fri6nJz/Mr14d3X77vz1YDRwT84HiFIs56qjx2TvYmrH0k3bt/5vnxsqZDh8csNCcqEI/N7+TVUK7lilW6cYPERVQt+xzSbmFj8Yud3S608lmjUb3z20+U9P24iFE6KvHnStMWYXwbjzWbhFW3JZ+mui/QusbVTvmCQJFu5Z2odORbv+7XZG2VJ9SqGzKBszw1f0mYWv2Jjds3vG6LOP95//3kLvN0xytSj5gugDX/r5Dxt+IOU=
+api: eJztWm1vGzcS/isEv1yMW6+UXtrLbdsDFDtt5baOYSu9A2x/4O6OtLS55IbkSlYF/ffDkPsmaWUrRQ5oAQUIEomz8/rMPFyRK2rZzNDoll6omN4HVBWgmeVKjlMaUZZYPmcWLlRsaEBTMInmBS7TiI4tigKxmVblLCNMCPIo1UKSgmnLUcoQJlNSayEPKjakLIhVxGZANHwqwVhISc6eeF7m4Z2kAa2+fqfSJY1W7iPXkNLI6hICmihpQVpcYkUheOLcHTwYdGpFTZJBzvB/dlkAjaiKHyCxNKCFxuAsB9OurrZiQrceVExwNSDMkBSmXEJKuHQuv7v69ZIUWiVgDHkF4Swk3/0OEENkmXk8R2EXuFPw/R0t2DIHaU8N6DlP4I6Swb9PXJSVd8ZqLmd0HdCF0o+g+12SLAeips4FL1cnlcsZqZw2AcmVsWJJSgMpmSpNhJrNUKIodaEMmB27AZWlECwW4NO7DqjlOajS7jrCXGY02FJjRtjUgiY244YkWPoFF4JIZUkMTcVTEi8Jk8pmoL1UKS0XzuPKDnnFJcnNCcmYITGAJBpYkkHazRKXFmagaUCnSufM+q++eYN5y9kTwnOiRpXR/hRWEPMYtKoFZbz0QVSwe8noP75Co1OwSfYb09ynbjdVghuLFZtXMs6mewpRVcOsWf2W8CmBvLDLwPXRnBseC+g8zmyTNdRbl19JV2hcMokqGpSgdleRGJqSdTPKtGbLnfIHlFvIO/3R4HPd9OVkHz4mbUs3phOVFwIQCIsMJAYhgGFmpHeRmw5WlG5A1WiabKIkJONp3/L3ZBgQht3KSmEbbHHjemHfU9/hU0JhgyghsFG4ISk3mJDUja7OQ7jWhsPzHFLOLIhlQGAO0gco1U5Y4UFAxuHqfKfRsK8pQTJpx6l5Dmvjc+PK72SNw8Ui40m2AfcHP8q3YLCv7DuuVDjwA/nWi7czo68b73HZogLkmFGD2+uq39Zrr9QUSho/m78aDvvhVUfaYgbDCelnsMKhStsc7SEQJ9SrcNe7F7LdWGBCfJjS6PaPkxeudGfAK5OpUiDD4uhZZMySBTMt9Z70UVFFcC2d4Wagz1pc5LKhw/F5x/DfTPN92qg5yNZvoA33Vds1OPeLhxt6boiDACTnfcExY1TCXR2R3EklTsbnfXEkpbEq/wlYCrq3Sw04lHk5knnBZn+RlqiI5CoFN4m+7RCtIfi05kzw3yElFzcfLkmqkhK96YEpS1OXBCauOqCpBsln7jL8/GgB7ZjyQcV9GdBgdYXPnmTmqpQuAZUYETC1fifoVTZQZWLBlrgXIIUy3PI5nBxWzxRYKrjs6Qw3m+uWSJjc3KOwGeMyIAaL65L98XL8XwKFwtHJczCW5cVhm5GGr3d9GCGtN3ReE3y1FXYAcwQeOJIpnV+2S/PoyedWu6aN/jnVNqyXIxbHg1rIZoOyW+Z1O83rCZ9eqPgdM0DX9z0z8mdY9iPiEXCvQ0rJP5VAeArS8ikH3exn9sCs6vaxNJbJBPbq35wNvBJHsweNoef8/iNzrh42L7ld5QOtVDlZ1nv8rYEU1LILbrPq/WRjD7gde997x5560nV3ZYu3PVO7Z7tcO5dpmLC8lCkLWcHDR1iaUJZ5DPrvRx4+8vCRh488fOThvzgPP5f0L8nMB9j50lz9nMk/GXvvd3UPLi4dDf8MS3MAs28IH8DxHpNHjj9y/JHjjxx/5Pi/Oscf37X/hO/aKPNm3+/ihVZznuJoY5bh6QOew82Z4NXpw55fxwutYgH5i8w9IldekqRgGRfEd4Q/H0XB2J+Q3l7/cEb+9ebrf96/yqwtTDQYLBaLUE+TU0i5VTpUejbQ0wT/otxJSCYZaDydW7o50TQYacFNTAEJn/KkbuDKbce/G6crLzD49tFnM1JKzXdOt0fk4/V4BxYbpruHNpTFqrRRLJh8pG05d41uWzFlnjO97OCoY2AdUGOZLbsbmL1TeVs3AuOnyeSKeBUkUSlUDc9NbQiDyLnEo1EavRkO3QmO//TNcOgGPVb8gEgkgadCMOkn6FY4eHqnNFT4cYHVnfKFKqM0n/Ftu+FG01UgPvcR+Zb6uq+lRpJgljXiELRWmqgkKbV2x5hcNN1e2652eMdeO/basdf29Rpe1QCbKbxXVCh30aJgNqMRHeCr26DdftGA4n0Zt7e/XdFSCxrRlW+YdTQYrDJl7DpaFUrb9WCO1djYC+Kyb6waMEIlTGTe6G7hcKG7Hz/zL7jkLbl+fzMhPzILC7Z0iUSTm6rfDt8Oe7Wi6B6No6sx8RF62HVGQa0We7pXrRc+RPEa94gGklJzu7zBx3x6YmAa9KjE3DdwqOw57fjZC9Gg+s8PNUgu/jNxdcYxdt1e1Hr/xPBmRB++6reg9pvmhtGw9/bOcOd6zW397P3uLZThxuWIVtKhfqqcRxUmd5OFyKlfgekwfL2L/6uxa+NE5Xkp3SyXM7cJJKyT/ESUxmLSAyp4Arhji1YUMdUx+4tfIdVLN3kdInA8uusRPuM2K+MwUfmg+pml+TcWKh7kjMtBZcIMzka/frw8H53+Mj57f3nz/vR1OAztk3UFwhbLmez4Mdq6ArIR6arlrf/bxb4KGRae7KAQjEsEv4t/VY2CW1r51hkG90HV0Ld0tYqZgY9arNf49acS9JJGt/dt/+OndUD9G7+bHvgOEtEzH9zpxN9XmTNRuh+Jtq+KrIP6iVGSQGGflb3vTLSrDzcT7Jbq0iL+tkAjqtkCLzSyBY3oHb2jFC9Yuvy5m434/YoKJmclm6G814t//gffQZhf
sidebar_class_name: "post api-method"
info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api
custom_edit_url: null
@@ -41,15 +41,15 @@ The list of activated jobs.
The activated jobs.
-
The provided data is not valid.
diff --git a/docs/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx b/docs/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx
index 455e45a6940..f8240c2d514 100644
--- a/docs/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx
+++ b/docs/apis-tools/camunda-api-rest/specifications/update-user-task.api.mdx
@@ -5,7 +5,7 @@ description: "Update a user task with the given key."
sidebar_label: "Update user task"
hide_title: true
hide_table_of_contents: true
-api: eJztWW1v2zYQ/isHflm3ybKSOF2rb17abdlLFyTOBiwJUFo622wlUuNLHMPQfx+OlGz5JU3Wdd9cIKhj3hvv7nnIHJfM8qlh6Q27NqjBcvOR3UVMVai5FUqe5yxlrsq5RRIY0XrEcjSZFhUJsJRd+2Xg4FoTMBd2BnaGMBX3KOEjLmIWsYprXqJFTQ6XTPISyXpj9xdcsIgJslhxO2MR0/i3ExpzllrtcNvtaIZkGNTEe1o7twpCxOTTZDMsOUuXzC4qciekxSlqFrGJ0iW34auXA1bXd8ElGvu9yheks45gwguDEcuUtCgtrfGqKkTms9T/YCik5a43Nf6AmaW9a8qpFWhoNZtxOUWD3tDmtn6++v0dBLWQxiCbh71xa7UYO4twzwuHJr6Vt5IyMVFFoeZCTtciBjIuYYzA8w/OWMzbsggDKPNKCWkj4HkuyDUvOpq3ci6KgnTFVCqNeUp+voH3GZe5oNz+qJWrzHvogaZ9wHgBlVb3IvchSMCysgsohLGbetRFz1fLHb7hFp+Sv7JayKnXCHm4rv6lWqWF0sIuSKUUUpSuhCSCkj/4j0dJEkGOE+4KC6cJJeOiY69pQmWwm32fbQ7vpSuK96FeoDSoUlhLesJCReHpe0o49XCF2ghfqZWZr0zQXBWaGyOmEpGKK5V9sr7OoMfH0OuFLmpXYzLppY3T1C9Ka+o7vEdpAUthjFASJkp33IbWjW8li3a6nLbKxwW2iF0310Wn/8PaJiKaSnegY3x1ujil/ulZUeIOBVFicofgiaghBNppDJdP1j/eibsmn+s2+g8xBTM9V325yDaR1ImNa80Xe6MgOJHvlaonS/P8cMgABSMslmY3HfWnwgxE8ZlxTr3y/xpoi/wnj4iT471Bt/obQbKINSzC0oS0PHOw9DSJWMMpLCVS2Y6njpgVln5lZ6szoo4Yz4LL7dNiCJkzVpUQBBqWsTNOh0dgcJ5laIwYFwgTrcrOQelhbogiXeEZya9v8AcIea/CIRfD+QSIcULeMY+IwbyXlhmtgtvmtnDLNhhiBZzH99veL8J14jIcxKyuSUajqZQ0gSqOk8FuIkYbV4A5N80dIAfjfAImrigWMeVykCRP6u/cXzp02xiO4TelEXK0XBQGuMZVZkBIr92GDWOVL0I+HrlAVFqNCyy/3b1IbNf7Ikg2ftubAjcQBMfB+83lD2fwenD63d2LmbWVSfv9+Xwe60nWw1xYpWOlp309yeiH5L6OYTRDjVDyRThTVteCNVGDqTATE5FRpf2B1QRDVd5/ImyyfFj9BJ06LXZQNoTry3MQOUorJgvq0x3XrIMxxsfK2XRccPmRrftr1+m2F+PKkusVkDcd1BEzlltnPpcnfhqNLiCYgEzl6E9Vj7XG0QZpDJIuU7xMkppsUsWfsRMJ+FAVXPrW2t6OkFCu+9ZvTEhjucy+VGWUFlOx7TdmXaw3Tfwm7ChgfPAMWO/CkpBOuJwoJ/P4ALADwA4AexRgrz8DYMK0p9lcKzn1GUbInNYobbE4nIIHkB5A+uVAerrvcjqUQFnW1IeoNQ0RMo/AHOYzUXjz/pLf+G6mWAesHbB2wNpjWKsjVqKdqTwMnLOZn1DbGUtZn07EHp2Ipr/szKdrGijTvK6ZYDtdsJQtA3TqtN9fzpSxdbqslLZ1/57qcs+1oD94fRlpOUCsbZ1CZbzwX+8rIS3QmLzd2hkvncw5vILLt1cj+JFbnPPwVy253DT9KnmV7B9ZKG0fsTi8OIeww9CAHVJozRK695oNws8x7EftBjNHg5MrUgvpGSPXqIeOirBqjMaft06/ByEWNR9+aNvl5z9HvuJEaJfrMf7bB15WAY6bU/d24MiOk+NB7+i4d3QyOjpJj16ng+N4cHLyF9seA35Kcnssd9M29N2eWVh3cT1/Ok06g57OyErIifL5aAdDO5mlNqPRsVdM4qNd2Fyce/Rnqiyd9EeAnLYD6rW9rKA5siZeKESG0vi0Ne80rdivYQX+CB7hKKYuC1BomX8q7MyN40yV/Syorf4fF2rcL7mQ/caF6Z8Nf7t+92bY+/X87O27q7e9oziJ7YP11ayUsSWXnTia56bVpXV7s8v1iff8p6mm1yw+2H5VcCEp8X5Py4YUbtiaFFjE0u6z1V3UIPuGLZdjbvBaF3VNX//tUC9YenO3JgLPHLkw9Hn9sPToFl5cNm9QX8PzXrz2bqWdvcqFp6TC0W8sYh9xsfUEV9/VEZshz1H7SIPEWYinNyI7aws7b2B11GoMswwr+4jsxuWDyGDFxBfD0dlPBO7mBa5UOSlrPqcHQT5nKbtltxS68snyvOG/X7KCy6njU5IPhunfP2aHBSg=
+api: eJztWW1v2zYQ/isHftmbLCtZ0jb65qVdm67rgsTZgCUBSktnm61EqnyJYxj678ORki2/pMm67psLBHXMe+PdPQ+Z44JZPjEsvWZXBjVYbj6x24ipCjW3QsmznKXMVTm3SAJDWo9YjibToiIBlrIrvwwcXGsCZsJOwU4RJuIOJXzCecwiVnHNS7SoyeGCSV4iWW/s/oZzFjFBFitupyxiGj87oTFnqdUON90Op0iGQY29p5VzqyBETD5NNsWSs3TB7Lwid0JanKBmERsrXXIbvnp2xOr6NrhEY39R+Zx0VhGMeWEwYpmSFqWlNV5Vhch8lvofDYW02PamRh8xs7R3TTm1Ag2tZlMuJ2jQG1rf1tvLP95DUAtpDLJ52Bu3VouRswh3vHBo4ht5IykTY1UUaibkZCViIOMSRgg8/+iMxbwtizCAMq+UkDYCnueCXPOio3kjZ6IoSFdMpNKYp+TnR/iQcZkLyu1rrVxlPkAPNO0DRnOotLoTuQ9BApaVnUMhjF3Xoy56ulru8CW3+Jj8pdVCTrxGyMNV9S/VKi2UFnZOKqWQonQlJBGU/N5/PEiSCHIcc1dYOE4oGecde00TKoPd7Ptsc/ggXVF8CPUCpUGVwlrSExYqCk/fUcKphyvURvhKLc18Z4LmstDcGDGRiFRcqeyj9XUGPT4GXi90Ubsak0kvbZymflFaU9/hHUoLWApjhJIwVrrjNrRufCNZtNXltFU+KrBF7Kq5zjv9H9bWEdFUugMd46vTxSn1T8+KErcoiBKTOwRPRA0h0E5juHi0/vFW3DX5XLXRf4gpmOm56ttFto6kTmxcaz7fGQXBiXwvVT1ZmqeHQwYoGGGxNNvpqL8UZiCKr4xz4pX/10Bb5D96RPx8uDPoVn8tSBaxhkVYmpCWZw6WHicRaziFpUQqm/HUEbPC0q/sdHlG1BHjWXC5eVoMIHPGqhKCQMMydsrp8AgMzrMMjRGjAmGsVdk5KD3MDVGkKzwj+fU1/gAh71Q45GI4GwMxTsg75hExmPfSMqNVcNPcFm7YGkMsgfPwftv7RbhOXISDmNU1yWg0lZImUMVhcrSdiOHaFWDGTXMHyME4n4CxK4p5TLk8SpJH9bfuLx26bQzH8LvSCDlaLgoDXOMyMyCk127DhpHK5yEfD1wgKq1GBZY/bV8kNut9HiQbv+1NgRsIgqPg/fri11M4OTp+fvv91NrKpP3+bDaL9TjrYS6s0rHSk74eZ/RDcj/EMJyiRij5PJwpy2vBiqjBVJiJscio0v7AaoKhKu8+EdZZPqx+gU6dFlsoG8DVxRmIHKUV4zn16ZZr1sEY4yPlbDoquPzEVv217XTTi3FlyfUSyOsO6ogZy60zX8sTb4bDcwgmIFM5+lPVY61xtEYaR0mXKZ4lSU02qeJP2IkEvK8KLn1rbW5HSChXfes3JqSxXGbfqjJKi4nY9BuzLtabJn4ZdhQwfvQEWG/DkpBOuBwrJ/N4D7A9wPYAexBgJ18BMGHa02ymlZz4DCNkTmuUtpjvT8E9SPcg/XYgPd51OR1IoCxr6kPUmoYImUdgDrOpKLx5f8lvfDdTrD3W9ljbY+0hrNURK9FOVR4GztnUT6jtlKWsTydij05E01905tM1DZRpXtdMsJ0uWMoWATp12u8vpsrYOl1UStu6f0d1ueNa0B+8voy0HCDWtk6hMl74r3eVkBZoTN5u7ZSXTuYcXsDFq8shvOYWZzz8VUsu102/SF4ku0cWStsHLA7OzyDsMDRghxRas4TunWaD8FMM+1G7wczR4OSS1EJ6Rsg16oGjIiwbo/HnrdPvQYhFzYdf23Z5+9fQV5wI7WI1xn91z8sqwHF96t4OHNlhcnjUOzjsHZwMD47T5Hn680l88vzZ32xzDPglyc2x3HXb0Lc7ZmHdxdX86TjpDHo6Iyshx8rnox0MbWWW2oxGx14xiQ+2YXN+5tGfqbJ00h8BctIOqFf2soLmyJp4oRAZSuPT1rzTtGLvwgr8GTzCQUxdFqDQMv9E2KkbxZkq+1lQW/4/KtSoX3Ih+40L0z8d/H71/uWg9+7s9NX7y1e9gziJ7b311ayUsSWXnTia56blpXVzs4vViff0p6mm1yze235VcCEp8X5Pi4YUrtmKFFjE0u6z1W3UIPuaLRYjbvBKF3VNX392qOcsvb5dEYFnjlwY+rx6WHpwC99fNG9QP8DTXrx2bqWdvcq5p6TC0W8sYp9wvvEEV9/WEZsiz1H7SIPEaYinNyQ7Kwtbb2B11GoMsgwr+4Ds2uWDyGDJxOeD4ekbAnfzAleqnJQ1n9GDIJ+xlN2wGwpd+WR53vDfL1jB5cTxCckHw/TvHxGiBVY=
sidebar_class_name: "patch api-method"
info_path: docs/apis-tools/camunda-api-rest/specifications/camunda-8-rest-api
custom_edit_url: null
diff --git a/docs/apis-tools/node-js-sdk.md b/docs/apis-tools/node-js-sdk.md
index 583c837affb..b11e5f327d7 100644
--- a/docs/apis-tools/node-js-sdk.md
+++ b/docs/apis-tools/node-js-sdk.md
@@ -1,7 +1,7 @@
---
id: node-js-sdk
title: Node.js
-description: Get started with the official Camunda 8 JavaScript SDK for Node.js, available via npm.
+description: Get started with the official Camunda 8 JavaScript SDK for Node.js.
---
As of 8.5.0, the official [Camunda 8 JavaScript SDK for Node.js](https://github.com/camunda/camunda-8-js-sdk) is available via [npm](https://www.npmjs.com/package/@camunda8/sdk).
diff --git a/docs/apis-tools/spring-zeebe-sdk/configuration.md b/docs/apis-tools/spring-zeebe-sdk/configuration.md
index 6cff515bf51..5c644b42928 100644
--- a/docs/apis-tools/spring-zeebe-sdk/configuration.md
+++ b/docs/apis-tools/spring-zeebe-sdk/configuration.md
@@ -275,7 +275,7 @@ A custom maxMessageSize allows the client to receive larger or smaller responses
camunda:
client:
zeebe:
- max-message-size: 3
+ max-message-size: 4194304
```
### Request timeout
diff --git a/docs/apis-tools/working-with-apis-tools.md b/docs/apis-tools/working-with-apis-tools.md
index 10907399b5d..0f75b9bb805 100644
--- a/docs/apis-tools/working-with-apis-tools.md
+++ b/docs/apis-tools/working-with-apis-tools.md
@@ -72,6 +72,9 @@ Additionally, visit our documentation on [Operate](../self-managed/operate-deplo
### SDKs
### Postman
diff --git a/docs/components/modeler/bpmn/user-tasks/user-tasks.md b/docs/components/modeler/bpmn/user-tasks/user-tasks.md
index 90a64a6191c..b1a4919be5d 100644
--- a/docs/components/modeler/bpmn/user-tasks/user-tasks.md
+++ b/docs/components/modeler/bpmn/user-tasks/user-tasks.md
@@ -36,6 +36,11 @@ attributes can be specified simultaneously:
- `candidateUsers`: Specifies the users that the task can be assigned to.
- `candidateGroups`: Specifies the groups of users that the task can be assigned to.
+:::info
+The assignee attribute must adhere to the userId field’s case-sensitivity requirements.
+Note that in SaaS, all user IDs are converted to lowercase by default, as they are based on email addresses.
+:::
+
:::info
Assignment resources can also be used for set user task restrictions ([SaaS](/components/concepts/access-control/user-task-access-restrictions.md)/[Self-Managed](docs/self-managed/concepts/access-control/user-task-access-restrictions.md)), where users will see only the tasks they have authorization to work on.
:::
diff --git a/docs/components/modeler/desktop-modeler/use-connectors.md b/docs/components/modeler/desktop-modeler/use-connectors.md
index 921b5c0f693..6a0f28ee837 100644
--- a/docs/components/modeler/desktop-modeler/use-connectors.md
+++ b/docs/components/modeler/desktop-modeler/use-connectors.md
@@ -12,7 +12,7 @@ Desktop Modeler automatically fetches and updates [element templates](./element-
## Automatic Connector template fetching
-Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors.
+Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors. The fetch is triggered whenever you start the application, or every 24 hours if the application is not closed.
After an update check has concluded, a notification indicates if the templates are up to date or have been updated:
diff --git a/docs/self-managed/console-deployment/configuration/configuration.md b/docs/self-managed/console-deployment/configuration/configuration.md
index 1c09cea48e1..69365322378 100644
--- a/docs/self-managed/console-deployment/configuration/configuration.md
+++ b/docs/self-managed/console-deployment/configuration/configuration.md
@@ -41,6 +41,20 @@ Console environment variables could be set in Helm via the `console.env` key. Fo
Camunda 8 components without a valid license may display **Non-Production License** in the navigation bar and issue warnings in the logs. These warnings have no impact on Console startup or functionality. To obtain a license, visit the [Camunda Enterprise page](https://camunda.com/platform/camunda-platform-enterprise-contact/).
:::
+### Proxy
+
+These settings are useful when the application needs to make outgoing network requests in environments that require traffic to pass through a proxy server.
+
+| Environment variable | Description | Example value | Default value |
+| -------------------- | ---------------------------------------------------------------------------------------------- | ------------------------------------- | ------------- |
+| `http_proxy` | Specifies the proxy server to be used for outgoing HTTP requests. | `http://proxy.example.com:8080` | - |
+| `https_proxy` | Specifies the proxy server to be used for outgoing HTTPS requests. | `https://secureproxy.example.com:443` | - |
+| `no_proxy` | A comma-separated list of domain names or IP addresses for which the proxy should be bypassed. | `localhost,127.0.0.1,.example.com` | - |
+
+:::note
+The proxy-related environment variables are lowercase because they follow a widely accepted convention used in many system environments and tools.
+:::
+
## Telemetry
You can enable telemetry and usage collection to help us improve our product by sending several telemetry metrics to Camunda. The information we collect will contribute to continuous product enhancement and help us understand how Camunda is used. We do not collect sensitive information and limit data points to several metrics. For more information, you can download collected data set metrics from the telemetry page at anytime.
diff --git a/docs/self-managed/modeler/web-modeler/configuration/configuration.md b/docs/self-managed/modeler/web-modeler/configuration/configuration.md
index 0fa011b7f38..5585791bf52 100644
--- a/docs/self-managed/modeler/web-modeler/configuration/configuration.md
+++ b/docs/self-managed/modeler/web-modeler/configuration/configuration.md
@@ -20,7 +20,9 @@ import Licensing from '../../../../self-managed/react-components/licensing.md'
### Clusters
-Clusters configured using the following options can be selected when deploying from Web Modeler. If no clusters are configured, you will not be able to preform any actions that require a cluster (for example, deploy, start an instance, or Play a process). The Camunda 8 [Helm](/self-managed/setup/install.md) and [Docker Compose](/self-managed/setup/deploy/local/docker-compose.md) distributions provide a local Zeebe cluster configured by default.
+Clusters must be configured using the following options to access the cluster from within Web Modeler. If no clusters are configured, you will not be able to perform any actions that require a cluster (for example, deploy, start an instance, or Play a process).
+
+The Camunda 8 [Helm](/self-managed/setup/install.md) and [Docker Compose](/self-managed/setup/deploy/local/docker-compose.md) distributions provide a local Zeebe cluster configured by default.
To add additional clusters, increment the `0` value for each variable (`CAMUNDA_MODELER_CLUSTERS_1_NAME`).
diff --git a/docs/self-managed/operational-guides/update-guide/860-to-870.md b/docs/self-managed/operational-guides/update-guide/860-to-870.md
index 205beda3075..d8604906a77 100644
--- a/docs/self-managed/operational-guides/update-guide/860-to-870.md
+++ b/docs/self-managed/operational-guides/update-guide/860-to-870.md
@@ -12,3 +12,50 @@ The following sections explain which adjustments must be made to migrate from Ca
Configuring a non-existing bucket for backups will not prevent Zeebe to start up anymore and will only result
in logs (at WARN) in the startup phase.
+
+## Exported records
+
+### `USER_TASK` records
+
+To support User Task Listeners, some backward incompatible changes were necessary to the exported `USER_TASK` records.
+
+#### `assignee` no longer provided in `CREATING/CREATED` events
+
+Previously, when a user task was activating with a specified `assignee`,
+we appended the following events of the `USER_TASK` value type:
+
+- `CREATING` with `assignee` property as provided
+- `CREATED` with `assignee` property as provided
+
+The `ASSIGNING` and `ASSIGNED` events were not appended in this case.
+
+To support the new User Task Listeners feature, the `assignee` value will not be filled in the `CREATING` and `CREATED` events anymore.
+
+With 8.7, the following events are now appended:
+
+- `CREATING` with `assignee` always `""` (empty string)
+- `CREATED` with `assignee` always `""` (empty string)
+- `ASSIGNING` with `assignee` property as provided
+- `ASSIGNED` with `assignee` property as provided
+
+#### `ASSIGNING` has become `CLAIMING` for `CLAIM` operation
+
+When claiming a user task, we previously appended the following records of the `USER_TASK` value type:
+
+- `CLAIM`
+- `ASSIGNING`
+- `ASSIGNED`
+
+A new `CLAIMING` intent was introduced to distinquish between claiming and regular assigning.
+We now append the following records when claiming a user task:
+
+- `CLAIM`
+- `CLAIMING`
+- `ASSIGNED`
+
+The `ASSIGNING` event is still appended for assigning a user task.
+In that case, we append the following records:
+
+- `ASSIGN`
+- `ASSIGNING`
+- `ASSIGNED`
diff --git a/docs/self-managed/reference-architecture/img/management-cluster.jpg b/docs/self-managed/reference-architecture/img/management-cluster.jpg
new file mode 100644
index 00000000000..026ecfcdb9f
Binary files /dev/null and b/docs/self-managed/reference-architecture/img/management-cluster.jpg differ
diff --git a/docs/self-managed/reference-architecture/img/orchestration-cluster.jpg b/docs/self-managed/reference-architecture/img/orchestration-cluster.jpg
new file mode 100644
index 00000000000..276984d34ae
Binary files /dev/null and b/docs/self-managed/reference-architecture/img/orchestration-cluster.jpg differ
diff --git a/docs/self-managed/reference-architecture/manual/img/manual-ha.jpg b/docs/self-managed/reference-architecture/manual/img/manual-ha.jpg
new file mode 100644
index 00000000000..5d06e9a1146
Binary files /dev/null and b/docs/self-managed/reference-architecture/manual/img/manual-ha.jpg differ
diff --git a/docs/self-managed/reference-architecture/manual/img/manual-single.jpg b/docs/self-managed/reference-architecture/manual/img/manual-single.jpg
new file mode 100644
index 00000000000..2d54eb33b3f
Binary files /dev/null and b/docs/self-managed/reference-architecture/manual/img/manual-single.jpg differ
diff --git a/docs/self-managed/reference-architecture/manual/manual.md b/docs/self-managed/reference-architecture/manual/manual.md
new file mode 100644
index 00000000000..f82e348be42
--- /dev/null
+++ b/docs/self-managed/reference-architecture/manual/manual.md
@@ -0,0 +1,123 @@
+---
+id: manual
+title: "Manual JAR deployment overview"
+sidebar_label: Manual JAR
+description: "Camunda 8 Manual (Java) deployment Reference architecture home "
+---
+
+
+
+This reference architecture provides guidance on deploying Camunda 8 Self-Managed as a standalone Java application. This deployment method is ideal for users who prefer manual deployment on bare metal servers or virtual machines (VMs), offering full control over the environment and configuration. It is particularly suited for scenarios with specific infrastructure requirements or highly customized setups.
+
+:::note
+This method of deployment requires a solid understanding of infrastructure, networking, and application management. Consider evaluating your [deployment platform options](../reference-architecture.md) based on your familiarity and need. If you prefer a simpler and managed solution, [Camunda 8 SaaS](https://camunda.com/platform/) can significantly reduce maintenance efforts, allowing you to focus on your core business needs.
+:::
+
+## Key features
+
+- **Single application JAR**: Starting from Camunda 8.7, all core components (Zeebe, Tasklist, Operate, Optimize, and Identity) are bundled into a single JAR file. This simplifies deployment by reducing the number of artifacts to manage.
+- **Full control**: Users are responsible for all aspects of deployment, including installation, configuration, scaling, and maintenance. This offers maximum flexibility for custom environments.
+
+Other deployment options, such as containerized deployments or managed services, might offer more convenience and automation. However, VM based deployment gives you the flexibility to tailor the deployment to your exact needs, which can be beneficial for regulated or highly customized environments.
+
+For documentation on the orchestration cluster, Web Modeler and Console separation, refer to the [reference architecture overview](/self-managed/reference-architecture/reference-architecture.md#orchestration-cluster-vs-web-modeler-and-console).
+
+## Reference implementations
+
+This section includes deployment reference architectures for manual setups:
+
+- [Amazon EC2 deployment](/self-managed/setup/deploy/amazon/aws-ec2.md) - a standard production setup with support for high availability.
+
+## Considerations
+
+- This overview page focuses on deploying the [orchestration cluster](/self-managed/reference-architecture/reference-architecture.md#orchestration-cluster), the single JAR compromised of Identity, Operate, Optimize, Tasklist, and Zeebe, as well as the Connectors runtime. Web Modeler and Console deployments are not included.
+- General guidance and examples focuses on **unix** users, but can be adapted by Windows users with options like [WSL](https://learn.microsoft.com/en-us/windows/wsl/install) or included `batch` files.
+- The Optimize importer is not highly available and must only run once within the whole setup.
+
+## Architecture
+
+![Single JAR](./img/manual-single.jpg)
+
+This above diagram illustrates a single-machine deployment using the single JAR package. While simple and effective for lightweight setups, scaling to multiple machines requires careful planning.
+
+Compared to the generalized architecture depicted in the [reference architecture](/self-managed/reference-architecture/reference-architecture.md#architecture), the `Optimize importer` can be enabled as part of the single JAR.
+
+### High Availability (HA)
+
+:::caution Non-HA Optimize importer
+When scaling from a single machine to multiple machines, ensure that the `Optimize importer` is enabled on only one machine and disabled on the others. Enabling it on multiple machines will cause data inconsistencies. This limitation is known and will be addressed in future updates.
+:::
+
+![HA JAR](./img/manual-ha.jpg)
+
+For high availability, a minimum of three machines is recommended to ensure fault tolerance and enable master election in case of failures. Refer to the [clustering documentation](/components/zeebe/technical-concepts/clustering.md) to learn more about the raft protocol and clustering concepts.
+
+### Components
+
+The orchestration core is packaged as a single JAR file and includes the following components:
+
+- **Zeebe**
+- **Operate**
+- **Tasklist**
+- **Optimize**
+- **Identity**
+
+The core facilitates:
+
+1. **gRPC communication**: For client workers.
+2. **HTTP endpoints**: Used by the REST API and Web UI.
+
+Both types of endpoints can be routed through a load balancer to maintain availability, ensuring that the system remains accessible even if a machine becomes unavailable. While using a load balancer is optional, it is recommended for enhanced availability and security. Alternatively, you can expose static machines, ports, and IPs directly. However, direct exposure is generally discouraged due to security concerns.
+
+Connectors expose additional HTTP(s) endpoints for handling incoming webhooks, which can also be routed through the same http load balancer.
+
+The orchestration components rely on **Elasticsearch** or **OpenSearch** as their data store.
+
+Components within the orchestration core communicate seamlessly, particularly:
+
+- **Zeebe brokers** exchange data over gRPC endpoints for efficient inter-broker communication.
+
+## Requirements
+
+Before implementing a reference architecture, review the requirements and guidance outlined below. We are differentiating between `Infrastructure` and `Application` requirements.
+
+### Infrastructure
+
+Any of the following are just suggestions for the minimum viable setup, the sizing heavily depends on your use cases and usage. It is recommended to understand the documentation on [sizing your environment](/components/best-practices/architecture/sizing-your-environment.md) and run benchmarking to confirm your required needs.
+
+#### Minimum Requirements Per Host
+
+- Modern CPU: 4 cores
+- Memory: 8 GB RAM
+- Storage: 32 GB SSD (**1,000** IOPS recommended; avoid burstable disk types)
+
+Suggested instance types from cloud providers:
+
+- AWS: [m7i](https://aws.amazon.com/ec2/instance-types/m7i/) series
+- GCP: [n1](https://cloud.google.com/compute/docs/general-purpose-machines#n1_machines) series
+
+#### Networking
+
+- Stable and high-speed network connection
+- Configured firewall rules to allow necessary traffic:
+ - **8080**: Web UI / REST endpoint.
+ - **9090**: Connector port.
+ - **9600**: Metrics endpoint.
+ - **26500**: gRPC endpoint.
+ - **26501**: Gateway-to-broker communication.
+ - **26502**: Inter-broker communication.
+- Load balancer for distributing traffic (if required)
+
+:::info Customizing ports
+Some ports can be overwritten and are not definitive, you may conduct the documentation of each component to see how it can be done, in case you want to use a different port. Or in our example `Connectors` and `Web UIs` overlap on 8080 due to which we moved connectors to a different port.
+:::
+
+### Application
+
+- Java Virtual Machine, see [supported environments](/reference/supported-environments.md) for version details.
+
+### Database
+
+- Elasticsearch / OpenSearch, see [supported environments](/reference/supported-environments.md) for version details.
+
+Our recommendation is to use an external managed offer as we will not go into detail on how to manage and maintain your database.
diff --git a/docs/self-managed/reference-architecture/reference-architecture.md b/docs/self-managed/reference-architecture/reference-architecture.md
new file mode 100644
index 00000000000..da8b632d49c
--- /dev/null
+++ b/docs/self-managed/reference-architecture/reference-architecture.md
@@ -0,0 +1,144 @@
+---
+id: reference-architecture
+title: "Camunda 8 reference architectures"
+sidebar_label: "Overview"
+description: "Learn about the Self-Managed reference architectures and how they can help you get started."
+---
+
+Reference architectures provide a comprehensive blueprint for designing and implementing scalable, robust, and adaptable systems. The reference architectures published here offer guidance to help enterprise architects, developers, and IT managers streamline deployments and improve system reliability.
+
+## Overview
+
+Reference architectures are not a one-size-fits-all solution, and each organization has unique requirements and constraints that may necessitate modifications to the provided blueprints.
+
+While these reference architectures offer a solid foundation and best practices, they should be adapted to fit the specific needs of your project. Use them as a starting point to start your Camunda 8 implementation process, but be prepared to make adjustments to ensure they align with your goals and infrastructure.
+
+### Target users
+
+- **Enterprise architects**: To design and plan the overall system structure.
+- **Developers**: To understand the components and their interactions.
+- **IT managers**: To ensure the system meets business requirements and is maintainable.
+
+### Key benefits
+
+- **Accelerated deployment**: Predefined best practices and guidelines simplify the deployment process, reducing the time and effort required to set up a reliable workflow automation solution.
+- **Consistency**: Ensures consistency across deployments by standardizing system components and their configurations, which helps reduce the risk of errors and simplifies maintenance.
+- **Enhanced security**: Reference architectures incorporate best practices for securing Camunda 8 deployments, ensuring that sensitive data and processes are protected through standard security measures like encryption, authentication, and access controls.
+
+### Support considerations
+
+Deviations from the reference architecture are unavoidable. However, such changes will introduce additional complexity, making troubleshooting more difficult. When modifications are required, ensure they are well-documented to facilitate future maintenance and troubleshooting more quickly. Camunda publishes [supported environment](/reference/supported-environments.md) information to help you navigate supported configurations.
+
+## Architecture
+
+### Orchestration cluster vs Web Modeler and Console
+
+When designing a reference architecture, it's essential to understand the differences between an orchestration cluster and Web Modeler and Console Self-Managed. These components play crucial roles in the deployment and operation of processes, but they serve different purposes and include distinct components.
+
+#### Orchestration Cluster
+
+![Orchestration Cluster](./img/orchestration-cluster.jpg)
+
+The orchestration cluster is the core of Camunda.
+
+The included components are:
+
+- [Zeebe](/components/zeebe/zeebe-overview.md): A workflow engine for orchestrating microservices and managing stateful, long-running business processes.
+- [Operate](/components/operate/operate-introduction.md): A monitoring tool for visualizing and troubleshooting workflows running in Zeebe.
+- [Tasklist](/components/tasklist/introduction-to-tasklist.md): A user interface for managing and completing human tasks within workflows.
+- [Optimize]($optimize$/components/what-is-optimize/): An analytics tool for generating reports and insights based on workflow data.
+- [Identity](/self-managed/identity/what-is-identity.md): A service for managing user authentication and authorization.
+- [Connectors](/components/connectors/introduction.md): Pre-built integrations for connecting Zeebe with external systems and services.
+
+Each component within the orchestration cluster is part of an integrated system that works together to provide end-to-end process orchestration. These components form a unified cluster that is tightly integrated to ensure seamless communication and data flow.
+
+This design ensures that all components are in sync, working collectively to maintain consistent state management, data integrity, and smooth process orchestration across the entire cluster. This architecture ensures reliable process execution with clear boundaries between each workflow engine's operation.
+
+#### Web Modeler and Console
+
+![Web Modeler and Console](./img/management-cluster.jpg)
+
+Web Modeler and Console are designed to interact with multiple orchestration clusters. Console offers tools and interfaces for administrators to monitor clusters, and Web Modeler allows developers to create and deploy BPMN models.
+
+- [Console](/components/console/introduction-to-console.md): A central management interface for monitoring and managing multiple orchestration clusters.
+- [Web Modeler](/self-managed/modeler/web-modeler/installation.md): A web-based tool for designing and deploying workflow models to any available orchestration cluster.
+
+Additionally, Web Modeler and Console require the following:
+
+- [Identity](/self-managed/identity/what-is-identity.md): A service for managing user authentication and authorization.
+
+Unlike the orchestration cluster, Web Modeler and Console run a separate and dedicated Identity deployment. For production environments, using an external [identity provider](/self-managed/setup/guides/connect-to-an-oidc-provider.md) is recommended.
+
+### Databases
+
+Databases can be deployed as part of the Camunda clusters, but using external databases or managed services offers several advantages:
+
+- **Flexibility**: Allows you to choose the database technology that best fits your needs and existing infrastructure while choosing one of the [supported environments](/reference/supported-environments.md#component-requirements).
+- **Scalability**: External databases can be scaled independently of the Camunda components, providing better performance and resource management.
+- **Maintenance**: Simplifies the maintenance and upgrade processes, as database management can be handled separately.
+- **Compliance**: Ensures that you can adhere to specific data governance and compliance requirements.
+
+While some guides go into detail on how to deploy databases together with Camunda, the recommendation is to maintain this outside of Camunda.
+
+By decoupling databases from Camunda, you gain greater control and customization over your data storage and management strategies.
+
+### High availability (HA)
+
+High availability (HA) ensures that a system remains operational and accessible even in the event of component failures. While all components are equipped to be run in a highly available manner, some components need extra considerations when run in HA mode.
+
+
+
+While high availability is one part of the increased fault tolerance and resilience, you should also consider regional or zonal placement of your workloads.
+
+If you run infrastructure on cloud providers, you are often met with different regions and zones. For ideal high availability you should consider a minimum setup of 3 zones within a region as this will guarantee that in case of a zonal failure that the remaining two workloads can still process data. For more information on how Zeebe handles fault tolerance, have a look at the [raft consensus chapter](/components/zeebe/technical-concepts/clustering.md#raft-consensus-and-replication-protocol).
+
+If running a single instance is preferred, make sure to implement [regular backups](/self-managed/operational-guides/backup-restore/backup-and-restore.md) since resilience will be limited.
+
+## Available reference architectures
+
+:::note Documentation Update in Progress
+This is a work in progress as the existing documentation is updated to provide better general guidance on the topic. The Kubernetes and Docker documentation may point to older guides.
+:::
+
+Choosing the right reference architecture depends on various factors such as the organization's goals, existing infrastructure, and specific requirements. The following guides are available to help choose and guide deployments:
+
+### Kubernetes
+
+Kubernetes is a powerful orchestration platform for containerized applications. Using a reference architecture for Kubernetes can help organizations deploy and manage their applications more effectively. It provides guidelines for setting up clusters, managing workloads, and ensuring high availability and scalability. This approach is ideal for organizations looking to leverage the benefits of containerization and self-healing capabilities.
+
+- Ideal for organizations adopting containerization and microservices (see [Cloud Native computing foundation](https://www.cncf.io/)).
+- Suitable for dynamic scaling and high availability.
+- Best for teams with experience in managing containerized environments.
+- A steeper learning curve but offers scalable and highly resilient platform.
+
+For more information and guides, see the reference for [Kubernetes](/self-managed/setup/install.md).
+
+
+
+### Containers
+
+Containers, such as Docker, offer a middle ground between the manual JAR and Kubernetes approaches. They provide a lightweight, portable, and consistent runtime environment, making it easier to develop, test, and deploy applications across different environments. Containers encapsulate an application and its dependencies, ensuring that it runs reliably regardless of where it is deployed.
+
+- Advisable as a middle ground between manual JAR and Kubernetes. Profit from containerization while not having the whole overhead of Kubernetes.
+- Containers can run on any system that supports the container runtime, ensuring consistency across development, testing, and production environments.
+- Each container runs in its own isolated environment, which helps prevent conflicts between applications and improves security.
+- Containers can be easily scaled up or down to handle varying workloads, providing flexibility in resource management.
+
+For more information and guides, see the reference for [containers](/self-managed/setup/deploy/other/docker.md).
+
+### Manual JAR (bare metal/virtual machines)
+
+For organizations that prefer traditional infrastructure, reference architectures for bare metal or virtual machines (VMs) offer a structured approach to system deployment. These architectures provide best practices for setting up physical servers or VMs, configuring networks, and managing storage using Infrastructure as Service cloud providers. They are suitable for environments where containerization or use of Kubernetes services may not be feasible.
+
+- Suitable for organizations requiring use of IaaS, bare metal, and other traditional infrastructures.
+- Ideal for traditional setups needing highly customized security, strict data residency, or industry-specific regulatory compliance.
+- Applicable for high availability but requires more detailed planning.
+- Best for teams with expertise in managing physical servers or virtual machines.
+
+For more information and guides, see the reference for [manual setups](./manual/manual.md).
+
+
+
+### Local development
+
+While the above options are suitable for trying out Camunda 8 locally, [Camunda 8 Run](/self-managed/setup/deploy/local/c8run.md) provides a simplified, developer-focused experience.
diff --git a/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md b/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md
index d196001567e..bc70916d34f 100644
--- a/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md
+++ b/docs/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md
@@ -4,6 +4,8 @@ title: "Install Camunda 8 on an EKS cluster"
description: "Set up the Camunda 8 environment with Helm and an optional Ingress setup on Amazon EKS."
---
+
+
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
@@ -30,6 +32,7 @@ Multi-tenancy is disabled by default and is not covered further in this guide. I
:::caution Optimize compatibility with OpenSearch
**Migration:** The migration step will be disabled during the installation. For more information, refer to [using Amazon OpenSearch Service](/self-managed/setup/guides/using-existing-opensearch.md).
+
:::
## Architecture
@@ -427,7 +430,6 @@ https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.7/
Use these environment variables in the `kubectl` command to create the secret.
-- The values for `postgres-password` and `password` are not required if you are using an external database. If you choose not to use an external database, you must provide those values.
- The `smtp-password` should be replaced with the appropriate external value ([see how it's used by Web Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md#smtp--email)).
```bash reference
@@ -569,7 +571,7 @@ Below is a summary of the necessary instructions:
1. Open Identity in your browser at `https://${DOMAIN_NAME}/identity`. You will be redirected to Keycloak and prompted to log in with a username and password.
2. Use `demo` as both the username and password.
3. Select **Add application** and select **M2M** as the type. Assign a name like "test."
-4. Select the newly created application. Then, select **Access to APIs > Assign permissions**, and select the **Zeebe API** with "write" permission.
+4. Select the newly created application. Then, select **Access to APIs > Assign permissions**, and select the **Core API** with "read" and "write" permission.
5. Retrieve the `client-id` and `client-secret` values from the application details
```shell
@@ -591,7 +593,7 @@ kubectl port-forward services/camunda-keycloak 18080:80 --namespace camunda
1. Open Identity in your browser at `http://localhost:8080`. You will be redirected to Keycloak and prompted to log in with a username and password.
2. Use `demo` as both the username and password.
3. Select **Add application** and select **M2M** as the type. Assign a name like "test."
-4. Select the newly created application. Then, select **Access to APIs > Assign permissions**, and select the **Zeebe API** with "write" permission.
+4. Select the newly created application. Then, select **Access to APIs > Assign permissions**, and select the **Core API** with "read" and "write" permission.
5. Retrieve the `client-id` and `client-secret` values from the application details
```shell
diff --git a/docs/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md b/docs/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md
index 7a440c0e3b0..cad212c143a 100644
--- a/docs/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md
+++ b/docs/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md
@@ -96,25 +96,25 @@ Advanced users may want to handle this part differently and use a different back
#### Set up AWS authentication
The [AWS Terraform provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) is required to create resources in AWS. Before you can use the provider, you must authenticate it using your AWS credentials.
-You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods.
-We recommend using the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). If you have configured your AWS CLI, Terraform will automatically detect and use those credentials.
+:::caution Ownership of the created resources
-To configure the AWS CLI:
+A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters created. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) for Terraform purposes, ensuring that the resources are managed and owned by that user.
-```bash
-aws configure
-```
+:::
-Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, region, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html).
+You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods:
-:::caution Ownership of the created resources
+- For development or testing purposes you can use the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). If you have configured your AWS CLI, Terraform will automatically detect and use those credentials.
+ To configure the AWS CLI:
-A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters created. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) for Terraform purposes, ensuring that the resources are managed and owned by that user.
+ ```bash
+ aws configure
+ ```
-[Create access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` variables to use with the AWS CLI and `eksctl`
+ Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, region, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html).
-:::
+- For production environments, we recommend the use of a dedicated IAM user. Create [access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console, and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
#### Create an S3 bucket for Terraform state management
diff --git a/docs/self-managed/setup/deploy/amazon/assets/aws-ec2-arch.jpg b/docs/self-managed/setup/deploy/amazon/assets/aws-ec2-arch.jpg
new file mode 100644
index 00000000000..0ef72f02ab6
Binary files /dev/null and b/docs/self-managed/setup/deploy/amazon/assets/aws-ec2-arch.jpg differ
diff --git a/docs/self-managed/setup/deploy/amazon/assets/aws-ec2-arch.pdf b/docs/self-managed/setup/deploy/amazon/assets/aws-ec2-arch.pdf
new file mode 100644
index 00000000000..f53db5315da
Binary files /dev/null and b/docs/self-managed/setup/deploy/amazon/assets/aws-ec2-arch.pdf differ
diff --git a/docs/self-managed/setup/deploy/amazon/aws-ec2.md b/docs/self-managed/setup/deploy/amazon/aws-ec2.md
new file mode 100644
index 00000000000..8ffb1beba7c
--- /dev/null
+++ b/docs/self-managed/setup/deploy/amazon/aws-ec2.md
@@ -0,0 +1,344 @@
+---
+id: aws-ec2
+title: "Amazon EC2"
+description: "Learn how to install Camunda 8 on AWS EC2 instances."
+---
+
+This guide provides a detailed walkthrough for installing the Camunda 8 single JAR on AWS EC2 instances. It focuses on managed services by AWS and their cloud offering. Finally, you will verify that the connection to your Self-Managed Camunda 8 environment is working.
+
+This guide focuses on setting up the [orchestration cluster](/self-managed/reference-architecture/reference-architecture.md#orchestration-cluster-vs-web-modeler-and-console) for Camunda 8. The Web Modeler and Console are not covered in this manual deployment approach. These components are supported on Kubernetes and should be [deployed using Kubernetes](/self-managed/setup/install.md/#install-web-modeler).
+
+:::note Using other Cloud providers
+This guide is built around the available tools and services that AWS offers, but is not limited to AWS. The scripts and ideas included can be adjusted for any other cloud provider and use case.
+
+When using this guide with a different cloud provider, note that you will be responsible for configuring and maintaining the resulting infrastructure. Our support is limited to questions related to the guide itself, not to the specific tools and services of the chosen cloud provider.
+:::
+
+:::warning Cost management
+Following this guide will incur costs on your Cloud provider account, namely for the EC2 instances, and OpenSearch. More information can be found on AWS and their [pricing calculator](https://calculator.aws/#/) as the total cost varies per region.
+
+To get an estimate, you can refer to this [example calculation](https://calculator.aws/#/estimate?id=8ce855e2d02d182c4910ec8b4ea2dbf42ea5fd1d), which can be further optimized to suit your specific use cases.
+:::
+
+## Architecture
+
+The architecture as depicted focuses on a standard deployment consisting of a three-node setup distributed over 3 [availability zones](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html) within an AWS region, as well as an OpenSearch domain with the same conditions. The focus is on a highly available setup and redundancy in case a zone should fail.
+
+
+
+
+_Infrastructure diagram for a 3 node EC2 architecture (click on the image to open the PDF version)_
+[![AWS EC2 Architecture](./assets/aws-ec2-arch.jpg)](./assets/aws-ec2-arch.pdf)
+
+The setup consists of:
+
+- [Virtual Private Cloud](https://docs.aws.amazon.com/vpc/latest/userguide/what-is-amazon-vpc.html) (VPC) is a logically isolated virtual network.
+ - a [Private Subnet](https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html), which does not have direct access to the internet and cannot be easily reached.
+ - three [EC2](https://aws.amazon.com/ec2/) instances using Ubuntu, one within each availability zone, which will run Camunda 8.
+ - a [managed OpenSearch](https://aws.amazon.com/what-is/opensearch/) cluster stretched over the three availability zones.
+ - a [Public Subnet](https://docs.aws.amazon.com/vpc/latest/userguide/configure-subnets.html), which allows direct access to the Internet via an [Internet Gateway](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Internet_Gateway.html).
+ - (optional) an [Application Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html) (ALB) is used to expose the WebUIs like Operate, Tasklist, and Connectors, as well as the REST API to the outside world. This is done using sticky sessions, as generally requests are distributed round-robin across all EC2 instances.
+ - (optional) a [Network Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/network/introduction.html) (NLB) is used to expose the gRPC endpoint of the Zeebe Gateway, in case external applications require it.
+ - (optional) a [Bastion Host](https://en.wikipedia.org/wiki/Bastion_host) to allow access to the private EC2 instances since they're not publicly exposed.
+ - Alternatively, utilize the [AWS Client VPN](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/what-is.html) instead to reach the private subnet within the VPC. The setup requires extra work and certificates, but can be set up by following the [getting started tutorial by AWS](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/cvpn-getting-started.html).
+ - a NAT Gateway that allows the private EC2 instances to reach the internet to download and update software packages. This cannot be used to access the EC2 instances.
+- [Security Groups](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html) to handle traffic flow to the VMs.
+- an [Internet Gateway](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Internet_Gateway.html) to allow traffic between the VPC and the Internet.
+
+Both types of subnets are distributed over three availability zones of a single AWS region, allowing for a highly available setup.
+
+:::note Single Deployment
+Alternatively, the same setup can run with a single AWS EC2 instance, but be aware that in case of a zone failure, the whole setup would be unreachable.
+:::
+
+## Requirements
+
+- An AWS account to create any resources within AWS.
+ - On a high level, permissions are required on the **ec2**, **iam**, **elasticloadbalancing**, **kms**, **logs**, and **es** level.
+ - For a more fine-grained view of the permissions, check this [example policy](https://github.com/camunda/camunda-deployment-references/blob/main/aws/ec2/example/policy.json).
+- Terraform (1.7+)
+- Unix based Operating System (OS) with ssh and sftp
+ - Windows may be used with [Cygwin](https://www.cygwin.com/) or [Windows WSL](https://learn.microsoft.com/en-us/windows/wsl/install) but has not been tested
+
+### Considerations
+
+- The Optimize importer is not highly available and must only run once within the whole setup.
+
+### Outcome
+
+The outcome is a fully working Camunda orchestration cluster running in a high availability setup using AWS EC2 and utilizing a managed OpenSearch domain.
+The EC2 instances come with an extra disk meant for Camunda to ensure that the content is separated from the operating system.
+
+## 1. Create the required infrastructure
+
+:::note Terraform infrastructure example
+We do not recommend using the below Terraform related infrastructure as module as we do not guarantee compatibility.
+Therefore, we recommend extending or reusing some elements of the Terraform example to ensure compatibility with your environments.
+:::
+
+### Download the reference architecture GitHub repository
+
+The provided reference architecture repository allows you to directly reuse and extend the existing Terraform example base. This sample implementation is flexible to extend to your own needs without the potential limitations of a Terraform module.
+
+```sh
+wget https://github.com/camunda/camunda-deployment-references/archive/refs/heads/main.zip
+```
+
+### Update the configuration files
+
+1. Navigate to the new directory:
+
+```sh
+cd camunda-deployment-references-main/aws/ec2/terraform
+```
+
+2. Edit the `variables.tf` file to customize the settings, such as the prefix for resource names and CIDR blocks:
+
+```hcl
+variable "prefix" {
+ default = "example"
+}
+
+variable "cidr_blocks" {
+ default = "10.0.1.0/24"
+}
+```
+
+3. In `config.tf`, configure a new Terraform backend by updating `backend "local"` to [AWS 3](https://developer.hashicorp.com/terraform/language/backend/s3) (or any other non-`local` backend that fits your organization).
+
+:::note
+`local` is meant for testing and development purposes. The state is saved locally, and does not allow to easily share it with colleagues. More information on alternatives can be found in the [Terraform documentation](https://developer.hashicorp.com/terraform/language/backend).
+:::
+
+### Configure the Terraform AWS provider
+
+1. Add the [Terraform AWS provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) in the `config.tf`:
+
+```hcl
+provider "aws" {}
+```
+
+This can be done via a simple script or manually:
+
+```sh
+echo 'provider "aws" {}' >> config.tf
+```
+
+:::note
+This is a current technical limitation, as the same files are used for testing. Terraform does not allow defining the provider twice.
+:::
+
+1. Configure authentication to allow the [AWS Terraform provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) to create resources in AWS. You must configure the provider with the proper credentials before using it. You can further change the region and other preferences and explore different authentication methods.
+
+There are several ways to authenticate the AWS provider:
+
+ - **Testing/development**: Use the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html) to configure access. Terraform will automatically default to AWS CLI configuration when present.
+ - **CI/CD**: Set the environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`, which can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html).
+ - **Enterprise grade security**: Use an [AWS IAM role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#assuming-an-iam-role).
+
+Ensure you have set the `AWS_REGION` either as environment variable or in the Terraform AWS provider to deploy the infrastructure in your desired region. AWS resources are region bound on creation.
+
+:::note Secret management
+We strongly recommend managing sensitive information using a secure secrets management solution like HashiCorp Vault. For details on how to inject secrets directly into Terraform via Vault, see the [Terraform Vault secrets injection guide](https://developer.hashicorp.com/terraform/tutorials/secrets/secrets-vault).
+:::
+
+### Initialize and deploy Terraform
+
+1. Initialize the Terraform working directory. This step downloads the necessary provider plugins:
+
+```sh
+terraform init
+```
+
+1. Plan the configuration files:
+
+```sh
+terraform plan -out infrastructure.plan # describe what will be created
+```
+
+1. After reviewing the plan, confirm and apply the changes:
+
+```sh
+terraform apply infrastructure.plan # apply the creation
+```
+
+The execution takes roughly 30 minutes. Around 25 minutes is solely for the creation of a managed highly available OpenSearch cluster.
+
+1. After the infrastructure is created, access the outputs defined in `outputs.tf` using `terraform output`.
+
+For example, to retrieve the OpenSearch endpoint:
+
+```sh
+terraform output aws_opensearch_domain
+```
+
+### Connect to remote machines via Bastion host (optional)
+
+The EC2 instances are not public and have to be reached via a Bastion host. Alternatively, utilize the [AWS VPN Client](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/what-is.html) to connect securely to a private VPC. This step is not described, as setup requires specific manual user interaction.
+
+```sh
+export BASTION_HOST=$(terraform output -raw bastion_ip)
+# retrieves the first IP from the camunda_ips array
+export CAMUNDA_IP=$(tf output -json camunda_ips | jq -r '.[0]')
+
+ssh -J admin@${BASTION_HOST} admin@${CAMUNDA_IP}
+```
+
+## 2. Deploy Camunda 8
+
+### Configure and run the installation script
+
+1. Navigate to the script directory:
+
+```sh
+cd camunda-deployment-references-main/aws/ec2/scripts
+```
+
+The script directory contains bash scripts that can be used to install and configure Camunda 8.
+
+2. Configure any script features using the following environment variables:
+
+ - `CLOUDWATCH_ENABLED`: The default is false. If set to true will install the CloudWatch agent on each EC2 instance and export Camunda logs and Prometheus metrics to AWS CloudWatch.
+ - `SECURITY`: The default is false. If set to true will use self-signed certificates to secure cluster communication, based on the procedure described in the [documentation](/self-managed/zeebe-deployment/security/secure-cluster-communication.md). This requires a manual step as a prerequisite as described below in step 3.
+
+3. Configure any variables in the `camunda-install.sh` script to overwrite the default for Camunda and Java versions:
+
+ - `OPENJDK_VERSION`: The Temurin Java version.
+ - `CAMUNDA_VERSION`: The Camunda 8 version.
+ - `CAMUNDA_CONNECTORS_VERSION`: The Camunda 8 connectors version.
+
+ :::note
+ The above variables must be set in `camunda-install.sh` . They cannot be set as environment variables.
+ :::
+
+4. Execute the `SECURITY` script (optional):
+
+If `SECURITY` was enabled in step 2, execute the `generate-self-signed-cert-authority.sh` script to create a certificate authority.
+
+This certificate should be saved somewhere securely, as it will be required to upgrade or change configuations in an automated way. If the certificate is lost, recreate the certificate authority via the script and all manually created client certificates.
+
+:::note Self-signed certificates for testing
+Self-signed certificates are advocated for development and testing purposes. Check the [documentation](/self-managed/zeebe-deployment/security/secure-cluster-communication.md) on secure cluster communication to learn more about PEM certificates.
+:::
+
+1. Execute the `all-in-one-install.sh` script.
+
+This script installs all required dependencies. Additionally, it configures Camunda 8 to run in a highly available setup by using a managed OpenSearch instance.
+
+The script will pull all required IPs and other information from the Terraform state via Terraform outputs.
+
+During the first installation, you will be asked to confirm the connection to each EC2 instance by typing `yes`.
+
+### Connect and use Camunda 8
+
+The Application Load Balancer (ALB) and the Network Load Balancer (NLB) can be accessed via the following Terraform outputs:
+
+- `terraform output alb_endpoint`: Access Operate (or the Connectors instance on port `9090`). The ALB is designed for handling Web UIs, such as Operate, Tasklist, Optimize, and Connectors.
+- `terraform output nlb_endpoint`: Access the gRPC endpoint of the Zeebe gateway. The NLB is intended for managing the gRPC endpoint of the Zeebe Gateway. This is due to the difference of protocols with ALB focusing on HTTP and NLB on TCP.
+
+The two endpoints above use the publicly assigned hostname of AWS. Add your domain via CNAME records or use [Route53](https://aws.amazon.com/route53/) to map to the load balancers, allowing them to easily enable SSL. This will require extra work in the Terraform blueprint as it listens to HTTP by default.
+
+Alternatively, if you have decided not to expose your environment, you can use the jump host to access relevant services on your local machine via port-forwarding.
+
+For an enterprise grade solution, you can utilize the [AWS Client VPN](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/what-is.html) instead to reach the private subnet within the VPC. The setup requires extra work and certificates, described in the [getting started tutorial by AWS](https://docs.aws.amazon.com/vpn/latest/clientvpn-admin/cvpn-getting-started.html).
+
+The following can be executed from within the Terraform folder to bind the remote ports to your local machine:
+
+```sh
+export BASTION_HOST=$(terraform output -raw bastion_ip)
+# retrieves the first IP from the camunda_ips array
+export CAMUNDA_IP=$(tf output -json camunda_ips | jq -r '.[0]')
+
+# 26500 - gRPC; 8080 - WebUI; 9090 - Connectors
+ssh -L 26500:${CAMUNDA_IP}:26500 -L 8080:${CAMUNDA_IP}:8080 -L 9090:${CAMUNDA_IP}:9090 admin@${BASTION_HOST}
+```
+
+### Turn off bastion host (optional)
+
+If you used the [bastion host](#turn-off-bastion-host-optional) for access, it can be turned off when longer needed for direct access to the EC2 instances.
+
+To turn off the bastion host, set the `enable_jump_host` variable to `false` in the `variables.tf` file, and reapply Terraform.
+
+## 3. Verify connectivity to Camunda 8
+
+Using Terraform, you can obtain the HTTP endpoint of the Application Load Balancer and interact with Camunda through the [REST API](/apis-tools/camunda-api-rest/camunda-api-rest-overview.md).
+
+1. Navigate to the Terraform folder:
+
+```sh
+cd camunda-deployment-references-main/aws/ec2/terraform
+```
+
+2. Retrieve the Application Load Balancer output:
+
+```sh
+terraform output -raw alb_endpoint
+```
+
+3. Use the REST API to communicate with Camunda:
+
+Follow the example in the [REST API documentation](/apis-tools/camunda-api-rest/camunda-api-rest-authentication.md) to authenticate and retrieve the cluster topology.
+
+## Manage Camunda 8
+
+### Upgrade Camunda 8
+
+:::info Direct upgrade not supported
+Upgrading directly from a Camunda 8.6 release to 8.7 is not supported and cannot be performed.
+:::
+
+To update to a new patch release, the recommended approach is as follows:
+
+1. Remove the `jars` folder: This step ensures that outdated dependencies from previous versions are completely removed.
+2. Overwrite remaining files: Replace the existing files with those from the downloaded patch release package.
+3. Restart Camunda 8.
+
+The update process can be automated using the `all-in-one-install.sh` script, which performs the following steps:
+
+- Detects an existing Camunda 8 installation.
+- Deletes the jars folder to clear outdated dependencies.
+- Overwrites the remaining files with the updated version.
+- Regenerates configuration files.
+- Restarts the application to apply the updates.
+
+### Monitoring
+
+Our default way of exposing metrics is in the Prometheus format, please conduct the general [metrisc related documentation](/self-managed/zeebe-deployment/operations/metrics.md) to learn more how to scrape Camunda 8.
+
+In an AWS environment, you can leverage CloudWatch not only for log collection but also for gathering [Prometheus metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ContainerInsights-Prometheus-metrics.html). It's important to note that while Camunda natively supports Grafana and Prometheus, integrating CloudWatch for metric visualization is possible but requires additional configuration.
+
+### Backups
+
+Please conduct the general topic of backups in the [documentation](/self-managed/operational-guides/backup-restore/backup-and-restore.md).
+
+With AWS as chosen platform you can utilize [S3](https://aws.amazon.com/s3/) for the backups both for Zeebe and Elasticsearch.
+
+If you are using a managed OpenSearch domain instead, you should check out the [official documentation](https://docs.aws.amazon.com/opensearch-service/latest/developerguide/managedomains-snapshots.html) on creating backups and snapshots in OpenSearch.
+
+## Troubleshooting
+
+Please conduct the general topic of troubleshooting in the [documentation](/self-managed/operational-guides/troubleshooting/troubleshooting.md).
+
+
+
+
diff --git a/docs/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.jpg b/docs/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.jpg
new file mode 100644
index 00000000000..862eacd4fe0
Binary files /dev/null and b/docs/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.jpg differ
diff --git a/docs/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.pdf b/docs/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.pdf
new file mode 100644
index 00000000000..5506e354c8c
Binary files /dev/null and b/docs/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.pdf differ
diff --git a/docs/self-managed/setup/deploy/amazon/openshift/terraform-setup.md b/docs/self-managed/setup/deploy/amazon/openshift/terraform-setup.md
new file mode 100644
index 00000000000..150827308ef
--- /dev/null
+++ b/docs/self-managed/setup/deploy/amazon/openshift/terraform-setup.md
@@ -0,0 +1,384 @@
+---
+id: terraform-setup
+title: "Deploy a ROSA HCP Cluster with Terraform"
+description: "Deploy Red Hat OpenShift on AWS using a Terraform module for a quick Camunda 8 setup."
+---
+
+
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+This guide provides a detailed tutorial for deploying a [Red Hat OpenShift on AWS (ROSA) cluster with Hosted Control Plane (HCP)](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4/html-single/architecture/index#architecture-overview) capabilities. It is specifically tailored for deploying Camunda 8 using Terraform, a widely-used Infrastructure as Code (IaC) tool.
+
+We recommend this guide for building a robust and sustainable infrastructure. However, if you are looking for a quicker trial or proof of concept, or if your needs aren't fully met by our module, consider following the official [ROSA Quickstart Guide](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4/html/getting_started/rosa-quickstart-guide-ui#rosa-quickstart-guide-ui).
+
+This guide aims to help you leverage IaC to streamline and reproduce your cloud infrastructure setup. While it covers the essentials for deploying an ROSA HCP cluster, for more advanced use cases, please refer to the official [Red Hat OpenShift on AWS Documentation](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4).
+
+:::tip
+
+If you are completely new to Terraform and the idea of IaC, read through the [Terraform IaC documentation](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code) and give their [interactive quick start](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code#quick-start) a try for a basic understanding.
+
+:::
+
+## Requirements
+
+- A [Red Hat Account](https://www.redhat.com/) to create the Red Hat OpenShift cluster.
+- An [AWS account](https://docs.aws.amazon.com/accounts/latest/reference/accounts-welcome.html) to create any resources within AWS.
+- [AWS CLI (2.17+)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), a CLI tool for creating AWS resources.
+- [Terraform (1.9+)](https://developer.hashicorp.com/terraform/downloads)
+- [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster.
+- [ROSA CLI](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4/html/getting_started/rosa-quickstart-guide-ui#rosa-getting-started-environment-setup_rosa-quickstart-guide-ui) to interact with the cluster.
+- [jq (1.7+)](https://jqlang.github.io/jq/download/) to interact with some Terraform variables.
+- This guide uses GNU/Bash for all the shell commands listed.
+
+### Considerations
+
+This setup provides a foundational starting point for working with Camunda 8, though it is not optimized for peak performance. It serves as a solid initial step in preparing a production environment by leveraging [Infrastructure as Code (IaC) tools](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code).
+
+Terraform can seem complex at first. If you're interested in understanding what each component does, consider trying out the [Red Hat OpenShift on AWS UI-based tutorial](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4/html/tutorials/getting-started-with-rosa#creating-account-wide-roles). This guide will show you what resources are created and how they interact with each other.
+
+If you require managed services for PostgreSQL Aurora or OpenSearch, you can refer to the definitions provided in the [EKS setup with Terraform](../amazon-eks/terraform-setup.md) guide. However, please note that these configurations may need adjustments to fit your specific requirements and have not been tested. By default, this guide assumes that the database services (PostgreSQL and Elasticsearch) integrated into the default chart will be used.
+
+For testing Camunda 8 or developing against it, you might consider signing up for our [SaaS offering](https://camunda.com/platform/). If you already have a Red Hat OpenShift cluster on AWS, you can skip ahead to the [Helm setup guide](/self-managed/setup/deploy/openshift/redhat-openshift.md).
+
+To keep this guide concise, we provide links to additional documentation covering best practices, allowing you to explore each topic in greater depth.
+
+:::warning Cost management
+
+Following this guide will incur costs on your cloud provider account and your Red Hat account, specifically for the managed OpenShift service, OpenShift worker nodes running in EC2, the hosted control plane, Elastic Block Storage (EBS), and Route 53. For more details, refer to [ROSA AWS pricing](https://aws.amazon.com/rosa/pricing/) and the [AWS Pricing Calculator](https://calculator.aws/#/) as total costs vary by region.
+
+:::
+
+### Variants
+
+Unlike the [EKS Terraform setup](../amazon-eks/terraform-setup.md), we currently support only one main variant of this setup:
+
+- The **standard installation** uses a username and password connection for Camunda components (or relies solely on network isolation for certain components). This option is straightforward and easier to implement, making it ideal for environments where simplicity and rapid deployment are priorities, or where network isolation provides adequate security.
+
+- The second variant, **IRSA** (IAM Roles for Service Accounts), may work but has not been tested. If you’re interested in setting it up, please refer to the EKS guide as a foundational resource.
+
+### Outcome
+
+
+
+
+_Infrastructure diagram for a single region ROSA setup (click on the image to open the PDF version)_
+[![Infrastructure Diagram ROSA Single-Region](./assets/rosa-single-region.jpg)](./assets/rosa-single-region.pdf)
+
+Following this tutorial and steps will result in:
+
+- A [Red Hat OpenShift with Hosted Control Plane](https://www.redhat.com/en/topics/containers/what-are-hosted-control-planes#rosa-with-hcp) cluster running the latest ROSA version with six nodes ready for Camunda 8 installation.
+- The [EBS CSI driver](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) is installed and configured, which is used by the Camunda 8 Helm chart to create [persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/).
+
+## 1. Configure AWS and initialize Terraform
+
+### Terraform prerequisites
+
+To manage the infrastructure for Camunda 8 on AWS using Terraform, we need to set up Terraform's backend to store the state file remotely in an S3 bucket. This ensures secure and persistent storage of the state file.
+
+:::note
+Advanced users may want to handle this part differently and use a different backend. The backend setup provided is an example for new users.
+:::
+
+#### Set up AWS authentication
+
+The [AWS Terraform provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) is required to create resources in AWS. Before you can use the provider, you must authenticate it using your AWS credentials.
+
+:::caution Ownership of the created resources
+
+A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters created. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) for Terraform purposes, ensuring that the resources are managed and owned by that user.
+
+:::
+
+You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods:
+
+- For development or testing purposes you can use the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). If you have configured your AWS CLI, Terraform will automatically detect and use those credentials.
+ To configure the AWS CLI:
+
+ ```bash
+ aws configure
+ ```
+
+ Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, region, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html).
+
+- For production environments, we recommend the use of a dedicated IAM user. Create [access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console, and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
+
+#### Create an S3 bucket for Terraform state management
+
+Before setting up Terraform, you need to create an S3 bucket that will store the state file. This is important for collaboration and to prevent issues like state file corruption.
+
+To start, set the region as an environment variable upfront to avoid repeating it in each command:
+
+```bash
+export AWS_REGION=
+```
+
+Replace `` with your chosen AWS region (for example, `eu-central-1`).
+
+Now, follow these steps to create the S3 bucket with versioning enabled:
+
+1. Open your terminal and ensure the AWS CLI is installed and configured.
+
+1. Run the following command to create an S3 bucket for storing your Terraform state. Make sure to use a unique bucket name and set the `AWS_REGION` environment variable beforehand:
+
+ ```bash
+ # Replace "my-rosa-tf-state" with your unique bucket name
+ export S3_TF_BUCKET_NAME="my-rosa-tf-state"
+
+ aws s3api create-bucket --bucket "$S3_TF_BUCKET_NAME" --region "$AWS_REGION" \
+ --create-bucket-configuration LocationConstraint="$AWS_REGION"
+ ```
+
+1. Enable versioning on the S3 bucket to track changes and protect the state file from accidental deletions or overwrites:
+
+ ```bash
+ aws s3api put-bucket-versioning --bucket "$S3_TF_BUCKET_NAME" --versioning-configuration Status=Enabled --region "$AWS_REGION"
+ ```
+
+1. Secure the bucket by blocking public access:
+
+ ```bash
+ aws s3api put-public-access-block --bucket "$S3_TF_BUCKET_NAME" --public-access-block-configuration \
+ "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" --region "$AWS_REGION"
+ ```
+
+1. Verify versioning is enabled on the bucket:
+
+ ```bash
+ aws s3api get-bucket-versioning --bucket "$S3_TF_BUCKET_NAME" --region "$AWS_REGION"
+ ```
+
+This S3 bucket will now securely store your Terraform state files with versioning enabled.
+
+#### Create a `config.tf` with the following setup
+
+Once the S3 bucket is created, configure your `config.tf` file to use the S3 backend for managing the Terraform state:
+
+```hcl reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/config.tf
+```
+
+#### Initialize Terraform
+
+Once your `config.tf` and authentication are set up, you can initialize your Terraform project. The previous steps configured a dedicated S3 Bucket (`S3_TF_BUCKET_NAME`) to store your state, and the following creates a bucket key that will be used by your configuration.
+
+Configure the backend and download the necessary provider plugins:
+
+```bash
+export S3_TF_BUCKET_KEY="camunda-terraform/terraform.tfstate"
+
+echo "Storing terraform state in s3://$S3_TF_BUCKET_NAME/$S3_TF_BUCKET_KEY"
+
+terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY"
+```
+
+Terraform will connect to the S3 bucket to manage the state file, ensuring remote and persistent storage.
+
+### OpenShift cluster module setup
+
+This module sets up the foundational configuration for ROSA HCP and Terraform usage.
+
+We will leverage [Terraform modules](https://developer.hashicorp.com/terraform/language/modules), which allow us to abstract resources into reusable components, simplifying infrastructure management.
+
+The [Camunda-provided module](https://github.com/camunda/camunda-tf-rosa) is publicly available and serves as a robust starting point for deploying a Red Hat OpenShift cluster on AWS using a Hosted Control Plane. It is highly recommended to review this module before implementation to understand its structure and capabilities.
+
+Please note that this module is based on the official [ROSA HCP Terraform module documentation](https://docs.openshift.com/rosa/rosa_hcp/terraform/rosa-hcp-creating-a-cluster-quickly-terraform.html). It is presented as an example for running Camunda 8 in ROSA. For advanced use cases or custom setups, we encourage you to use the official module, which includes vendor-supported features.
+
+#### Set up ROSA authentication
+
+To set up a ROSA cluster, certain prerequisites must be configured on your AWS account. Below is an excerpt from the [official ROSA planning prerequisites checklist](https://docs.openshift.com/rosa/rosa_planning/rosa-cloud-expert-prereq-checklist.html):
+
+1. Verify that your AWS account is correctly configured:
+
+ ```bash
+ aws sts get-caller-identity
+ ```
+
+1. Check if the ELB service role exists, as if you have never created a load balancer in your AWS account, the role for Elastic Load Balancing (ELB) might not exist yet:
+
+ ```bash
+ aws iam get-role --role-name "AWSServiceRoleForElasticLoadBalancing"
+ ```
+
+ If it doesn't exist, create it:
+
+ ```bash
+ aws iam create-service-linked-role --aws-service-name "elasticloadbalancing.amazonaws.com"
+ ```
+
+1. Create a Red Hat Hybrid Cloud Console account if you don’t already have one: [Red Hat Hybrid Cloud Console](https://console.redhat.com/).
+
+1. Enable ROSA on your AWS account via the [AWS Console](https://console.aws.amazon.com/rosa/).
+
+1. Enable HCP ROSA on [AWS Marketplace](https://docs.openshift.com/rosa/cloud_experts_tutorials/cloud-experts-rosa-hcp-activation-and-account-linking-tutorial.html):
+
+ - Navigate to the ROSA console: [AWS ROSA Console](https://console.aws.amazon.com/rosa).
+ - Choose **Get started**.
+ - On the **Verify ROSA prerequisites** page, select **I agree to share my contact information with Red Hat**.
+ - Choose **Enable ROSA**.
+
+ **Note**: Only a single AWS account can be associated with a Red Hat account for service billing.
+
+1. Install the ROSA CLI from the [OpenShift AWS Console](https://console.redhat.com/openshift/downloads#tool-rosa).
+
+1. Get an API token, go to the [OpenShift Cluster Management API Token](https://console.redhat.com/openshift/token/rosa), click **Load token**, and save it. Use the token to log in with ROSA CLI:
+
+ ```bash
+ export RHCS_TOKEN=""
+ rosa login --token="$RHCS_TOKEN"
+
+ # Verify the login
+ rosa whoami
+ ```
+
+1. Verify your AWS quotas:
+
+ ```bash
+ rosa verify quota --region="$AWS_REGION"
+ ```
+
+ **Note**: This may fail due to organizational policies.
+
+1. Create the required account roles:
+
+ ```bash
+ rosa create account-roles --mode auto
+ ```
+
+1. Verify your AWS quotas, and if quotas are insufficient, consult the following:
+
+ - [Provisioned AWS Infrastructure](https://docs.openshift.com/rosa/rosa_planning/rosa-sts-aws-prereqs.html#rosa-aws-policy-provisioned_rosa-sts-aws-prereqs)
+ - [Required AWS Service Quotas](https://docs.openshift.com/rosa/rosa_planning/rosa-sts-required-aws-service-quotas.html#rosa-sts-required-aws-service-quotas)
+
+1. Ensure the `oc` CLI is installed. If it’s not already installed, follow the [official ROSA oc installation guide](https://docs.openshift.com/rosa/cli_reference/openshift_cli/getting-started-cli.html#cli-getting-started):
+
+ ```bash
+ rosa verify openshift-client
+ ```
+
+#### Set up the ROSA cluster module
+
+1. Create a `cluster.tf` file in the same directory as your `config.tf` file.
+2. Add the following content to your newly created `cluster.tf` file to utilize the provided module:
+
+ :::note Configure your cluster
+
+ Customize the cluster name, availability zones, with the values you previously retrieved from the Red Hat Console.
+ Additionally, provide a secure username and password for the cluster administrator.
+
+ Ensure that you have set the environment `RHCS_TOKEN` is set with your [OpenShift Cluster Management API Token](https://console.redhat.com/openshift/token/rosa).
+
+ By default, this cluster will be accessible from the internet. If you prefer to restrict access, please refer to the official documentation of the module.
+
+ :::
+
+ ```hcl reference
+ https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/cluster.tf
+ ```
+
+ :::caution Camunda Terraform module
+
+ This ROSA module is based on the [official Red Hat Terraform module for ROSA HCP](https://registry.terraform.io/modules/terraform-redhat/rosa-hcp/rhcs/latest). Please be aware of potential differences and choices in implementation between this module and the official one.
+
+ We invite you to consult the [Camunda ROSA module documentation](https://github.com/camunda/camunda-tf-rosa/blob/v2.0.0/modules/rosa-hcp/README.md) for more information.
+
+ :::
+
+3. [Initialize](#initialize-terraform) Terraform for this module using the following Terraform command:
+
+ ```bash
+ terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY"
+ ```
+
+4. Configure user access to the cluster. By default, the user who creates the OpenShift cluster has administrative access. If you want to grant access to other users, follow the [Red Hat documentation for granting admin rights to users](https://docs.openshift.com/rosa/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin-rights.html) when the cluster is created.
+
+5. Customize the cluster setup. The module offers various input options that allow you to further customize the cluster configuration. For a comprehensive list of available options and detailed usage instructions, refer to the [ROSA module documentation](https://github.com/camunda/camunda-tf-rosa/blob/v2.0.0/modules/rosa-hcp/README.md).
+
+### Define outputs
+
+**Terraform** allows you to define outputs, which make it easier to retrieve important values generated during execution, such as cluster endpoints and other necessary configurations for Helm setup.
+
+Each module that you have previously set up contains an output definition at the end of the file. You can adjust them to your needs.
+
+### Execution
+
+:::note Secret management
+
+We strongly recommend managing sensitive information (for example, the OpenSearch or Aurora username and password) using a secure secrets management solution like HashiCorp Vault. For details on how to inject secrets directly into Terraform via Vault, see the [Terraform Vault Secrets Injection Guide](https://developer.hashicorp.com/terraform/tutorials/secrets/secrets-vault).
+
+:::
+
+1. Open a terminal in the created Terraform folder where `config.tf` and other `.tf` files are.
+
+2. Plan the configuration files:
+
+ ```bash
+ terraform plan -out cluster.plan # describe what will be created
+ ```
+
+3. After reviewing the plan, you can confirm and apply the changes.
+
+ ```bash
+ terraform apply cluster.plan # apply the creation
+ ```
+
+Terraform will now create the OpenShift cluster with all the necessary configurations. The completion of this process may require approximately 20-30 minutes for each component.
+
+### Reference files
+
+Depending on the installation path you have chosen, you can find the reference files used on this page:
+
+- **Standard installation:** [Reference Files](https://github.com/camunda/camunda-deployment-references/tree/feature/openshift-ra-standard/aws/rosa-hcp/camunda-versions/8.7)
+
+## 2. Preparation for Camunda 8 installation
+
+### Access the created OpenShift cluster
+
+You can access the created OpenShift cluster using the following steps:
+
+Set up the required environment variables:
+
+```shell
+export CLUSTER_NAME="$(terraform console <<
+
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
Red Hat OpenShift, a Kubernetes distribution maintained by [Red Hat](https://www.redhat.com/en/technologies/cloud-computing/openshift), provides options for both managed and on-premises hosting.
-:::note
-Deploying Camunda 8 on Red Hat OpenShift is achievable using Helm, given the appropriate configurations. However, it's important to note that the [Security Context Constraints (SCCs)](#security-context-constraints-sccs) and [Routes](./redhat-openshift.md?current-ingress=openshift-routes#using-openshift-routes) configurations might require slight deviations from the guidelines provided in the [general Helm deployment guide](/self-managed/setup/install.md).
-:::
+Deploying Camunda 8 on Red Hat OpenShift is supported using Helm, given the appropriate configurations.
+
+However, it's important to note that the [Security Context Constraints (SCCs)](#security-context-constraints-sccs) and [Routes](./redhat-openshift.md?current-ingress=openshift-routes#using-openshift-routes) configurations might require slight deviations from the guidelines provided in the [general Helm deployment guide](/self-managed/setup/install.md).
## Cluster Specification
When deploying Camunda 8 on an OpenShift cluster, the cluster specification should align with your specific requirements and workload characteristics. Here's a suggested configuration to begin with:
-- **Instance type:** 4 vCPUs (x86_64, >3.1 GHz), 16 GiB Memory (for example, [m5.xlarge on AWS](https://aws.amazon.com/en/ebs/general-purpose/))
+- **Instance type:** 4 vCPUs (x86_64, >3.1 GHz), 16 GiB Memory (for example, [mi7.xlarge on AWS](https://aws.amazon.com/en/ebs/general-purpose/))
- **Number of dedicated nodes:** 4
- **Volume type:** SSD volumes (with between 1000 and 3000 IOPS per volume, and a throughput of 1,000 MB/s per volume, for instance, [gp3 on AWS](https://aws.amazon.com/en/ebs/general-purpose/))
+If you need to set up an OpenShift cluster on a cloud provider, we recommend our [guide to deploying a ROSA cluster](/self-managed/setup/deploy/amazon/openshift/terraform-setup.md).
+
### Supported Versions
We conduct testing and ensure compatibility against the following OpenShift versions:
| OpenShift Version | [End of Support Date](https://access.redhat.com/support/policy/updates/openshift) |
| ----------------- | --------------------------------------------------------------------------------- |
+| 4.17.x | June 27, 2025 |
| 4.16.x | December 27, 2025 |
| 4.15.x | August 27, 2025 |
| 4.14.x | May 1, 2025 |
-| 4.13.x | November 17, 2024 |
-:::caution
+:::caution Versions compatibility
+
Camunda 8 supports OpenShift versions in the Red Hat General Availability, Full Support, and Maintenance Support life cycle phases. For more information, refer to the [Red Hat OpenShift Container Platform Life Cycle Policy](https://access.redhat.com/support/policy/updates/openshift).
+
:::
-## Deploying Camunda 8 in OpenShift
+## Requirements
-Depending on your OpenShift cluster's Security Context Constraints (SCCs) configuration, the deployment process may vary.
+- [Helm (3.16+)](https://helm.sh/docs/intro/install/)
+- [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster.
+- [jq (1.7+)](https://jqlang.github.io/jq/download/) to interact with some variables.
+- [GNU envsubst](https://www.gnu.org/software/gettext/manual/html_node/envsubst-Invocation.html) to generate manifests.
+- [oc (version supported by your OpenShift)](https://docs.openshift.com/container-platform/4.17/cli_reference/openshift_cli/getting-started-cli.html) to interact with OpenShift.
+- A namespace to host the Camunda Platform, in this guide we will reference `camunda` as the target namespace.
-
-
+## Deploy Camunda 8 via Helm charts
+
+### Configure your deployment
+
+Start by creating a `values.yml` file to store the configuration for your environment.
+This file will contain key-value pairs that will be substituted using `envsubst`.
+Over this guide, you will add and merge values in this file to configure your deployment to fit your needs.
+
+You can find a reference example of this file here:
+
+```hcl reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/base.yml
+```
+
+:::warning Merging YAML files
+
+This guide references multiple configuration files that need to be merged into a single YAML file. Be cautious to avoid duplicate keys when merging the files. Additionally, pay close attention when copying and pasting YAML content. Ensure that the separator notation `---` does not inadvertently split the configuration into multiple documents.
+
+We strongly recommend double-checking your YAML file before applying it. You can use tools like [yamllint.com](https://www.yamllint.com/) or the [YAML Lint CLI](https://github.com/adrienverge/yamllint) if you prefer not to share your information online.
+
+:::
+
+#### Configuring the Ingress
+
+Before exposing services outside the cluster, we need an Ingress component. Here's how you can configure it:
+
+
+
+
+
+[Routes](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html) expose services externally by linking a URL to a service within the cluster. OpenShift supports both the [standard Kubernetes Ingress and routes](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html), giving cluster users the flexibility to choose.
+
+The presence of routes is rooted in their specification predating Ingress. The functionality of routes differs from Ingress; for example, unlike Ingress, routes don't allow multiple services to be linked to a single route or the use of paths.
+
+To use these routes for the Zeebe Gateway, configure this through Ingress as well.
+
+#### Setting Up the application domain for Camunda 8
+
+The route created by OpenShift will use a domain to provide access to the platform. By default, you can use the OpenShift applications domain, but any other domain supported by the router can also be used.
+
+To retrieve the OpenShift applications domain (used as an example here), run the following command:
+
+```bash
+export OPENSHIFT_APPS_DOMAIN=$(oc get ingresses.config.openshift.io cluster -o jsonpath='{.spec.domain}')
+```
+
+Next, define the route domain that will be used for the Camunda 8 deployment. For example:
+
+```bash
+export DOMAIN_NAME="camunda.$OPENSHIFT_APPS_DOMAIN"
+
+echo "Camunda 8 will be reachable from $DOMAIN_NAME"
+```
+
+If you choose to use a custom domain instead, ensure it is supported by your router configuration and replace the example domain with your desired domain. For more details on configuring custom domains in OpenShift, refer to the official [custom domain OpenShift documentation](https://docs.openshift.com/dedicated/applications/deployments/osd-config-custom-domains-applications.html).
+
+#### Checking if HTTP/2 is enabled
+
+As the Zeebe Gateway also uses `gRPC` (which relies on `HTTP/2`), [HTTP/2 Ingress Connectivity must be enabled](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress).
+
+To check if HTTP/2 is already enabled on your OpenShift cluster, run the following command:
+
+```bash
+oc get ingresses.config/cluster -o json | jq '.metadata.annotations."ingress.operator.openshift.io/default-enable-http2"'
+```
+
+Alternatively, if you use a dedicated IngressController for the deployment:
+
+```bash
+# List your IngressControllers
+oc -n openshift-ingress-operator get ingresscontrollers
+
+# Replace with your IngressController name
+oc -n openshift-ingress-operator get ingresscontrollers/ -o json | jq '.metadata.annotations."ingress.operator.openshift.io/default-enable-http2"'
+```
+
+- If the output is `"true"`, it means HTTP/2 is enabled.
+- If the output is `null` or empty, HTTP/2 is not enabled.
+
+
+ Enable HTTP/2
+
+If HTTP/2 is not enabled, you can enable it by running the following command:
+
+**IngressController configuration:**
+
+```bash
+oc -n openshift-ingress-operator annotate ingresscontrollers/ ingress.operator.openshift.io/default-enable-http2=true
+```
+
+**Global cluster configuration:**
+
+```bash
+oc annotate ingresses.config/cluster ingress.operator.openshift.io/default-enable-http2=true
+```
+
+This will add the necessary annotation to [enable HTTP/2 for Ingress in your OpenShift cluster](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress) globally on the cluster.
+
+
+
+#### Configure Route TLS
+
+Additionally, the Zeebe Gateway should be configured to use an encrypted connection with TLS. In OpenShift, the connection from HAProxy to the Zeebe Gateway service can use HTTP/2 only for re-encryption or pass-through routes, and not for edge-terminated or insecure routes.
-### With restrictive SCCs
+1. **Core Pod:** two [TLS secrets](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) for the Zeebe Gateway are required, one for the **service** and the other one for the **route**:
+ - The first TLS secret is issued to the Zeebe Gateway Service Name. This must use the [PKCS #8 syntax](https://en.wikipedia.org/wiki/PKCS_8) or [PKCS #1 syntax](https://en.wikipedia.org/wiki/PKCS_1) as Zeebe only supports these, referenced as `camunda-platform-internal-service-certificate`. This certificate is also use in the other components such as Operate, Tasklist.
+
+ In the example below, a TLS certificate is generated for the Zeebe Gateway service with an [annotation](https://docs.openshift.com/container-platform/latest/security/certificates/service-serving-certificate.html). The generated certificate will be in the form of a secret.
+
+ Another option is [Cert Manager](https://docs.openshift.com/container-platform/latest/security/cert_manager_operator/index.html). For more details, review the [OpenShift documentation](https://docs.openshift.com/container-platform/latest/networking/routes/secured-routes.html#nw-ingress-creating-a-reencrypt-route-with-a-custom-certificate_secured-routes).
+
+
+ PKCS #8, PKCS #1 syntax
+
+ > PKCS #1 private key encoding. PKCS #1 produces a PEM block that contains the private key algorithm in the header and the private key in the body. A key that uses this can be recognised by its BEGIN RSA PRIVATE KEY or BEGIN EC PRIVATE KEY header. NOTE: This encoding is not supported for Ed25519 keys. Attempting to use this encoding with an Ed25519 key will be ignored and default to PKCS #8.
+
+ > PKCS #8 private key encoding. PKCS #8 produces a PEM block with a static header and both the private key algorithm and the private key in the body. A key that uses this encoding can be recognised by its BEGIN PRIVATE KEY header.
+
+ [PKCS #1, PKCS #8 syntax definitionfrom cert-manager](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.PrivateKeyEncoding)
+
+
+
+ - The second TLS secret is used on the exposed route, referenced as `camunda-platform-external-certificate`. For example, this would be the same TLS secret used for Ingress. We also configure the Zeebe Gateway Ingress to create a [Re-encrypt Route](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html#nw-ingress-creating-a-route-via-an-ingress_route-configuration).
+
+ To configure a Zeebe cluster securely, it's essential to set up a secure communication configuration between pods:
+
+ - We enable gRPC ingress for the Core pod, which sets up a secure proxy that we'll use to communicate with the Zeebe cluster. To avoid conflicts with other services, we use a specific domain (`zeebe-$DOMAIN_NAME`) for the gRPC proxy, different from the one used by other services (`$DOMAIN_NAME`). We also note that the port used for gRPC is `443`.
+
+ - We mount the **Service Certificate Secret** (`camunda-platform-internal-service-certificate`) to the Core pod and configure a secure TLS connection.
+
+ Update your `values.yml` file with the following:
+
+ ```yaml reference
+ https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/core-route.yml
+ ```
+
+ The actual configuration properties can be reviewed:
+
+ - [in the Operate configuration documentation](/self-managed/operate-deployment/operate-configuration.md#zeebe-broker-connection),
+ - [in the Tasklist configuration documentation](/self-managed/tasklist-deployment/tasklist-configuration.md#zeebe-broker-connection),
+ - [in the Zeebe Gateway configuration documentation](/self-managed/zeebe-deployment/configuration/gateway.md).
+
+2. **Connectors:** update your `values.yml` file with the following:
+
+```yaml reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/connectors-route.yml
+```
+
+The actual configuration properties can be reviewed [in the Connectors configuration documentation](/self-managed/connectors-deployment/connectors-configuration.md#zeebe-broker-connection).
+
+1. Configure all other applications running inside the cluster and connecting to the Zeebe Gateway to also use TLS.
+
+1. Set up the global configuration to enable the single Ingress definition with the host. Update your configuration file as shown below:
+
+```yaml reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/domain.yml
+```
+
+
+
+
+
+
+
+
+[Routes](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html) serve as OpenShift's default Ingress implementation.
+
+If you find that its features aren't suitable for your needs, or if you prefer to use a Kubernetes-native Ingress controller, such as the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx), [you have that option](https://www.redhat.com/en/blog/a-guide-to-using-routes-ingress-and-gateway-apis-in-kubernetes-without-vendor-lock-in).
+
+For guidance on installing an Ingress controller, you can refer to the [Ingress Setup documentation](/self-managed/setup/guides/ingress-setup.md).
+
+:::note Difference between ingress-nginx and NGINX Ingress
+
+Do not confuse the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx) with the [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift) that is endorsed by Red Hat for usage with OpenShift. Despite very similar names, they are two different products.
+
+If you should decide to use the Red Hat endorsed [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift), you would require additional adjustments done to the Camunda 8 Ingress objects and the NGINX Ingress Controller itself to make `gRPC` and `HTTP/2` connections work. In that case, please refer to the [example and the prerequisites](https://github.com/nginxinc/kubernetes-ingress/blob/main/examples/ingress-resources/grpc-services/README.md).
+
+:::
+
+
+
+If you do not have a domain name or do not intend to use one for your Camunda 8 deployment, external access to Camunda 8 web endpoints from outside the OpenShift cluster will not be possible.
+
+However, you can use `kubectl port-forward` to access the Camunda platform without a domain name or Ingress configuration. For more information, refer to the [kubectl port-forward documentation](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_port-forward/).
+
+To make this work, you will need to configure the deployment to reference `localhost` with the forwarded port. Update your `values.yml` file with the following:
+
+```yaml reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/no-domain.yml
+```
+
+
+
+
+#### Configuring the Security Context Constraints
+
+Depending on your OpenShift cluster's Security Context Constraints (SCCs) configuration, the deployment process may vary.
By default, OpenShift employs more restrictive SCCs. The Helm chart must assign `null` to the user running all components and dependencies.
+
+
+
+
The `global.compatibility.openshift.adaptSecurityContext` variable in your values.yaml can be used to set the following possible values:
- `force`: The `runAsUser` and `fsGroup` values will be null in all components.
- `disabled`: The `runAsUser` and `fsGroup` values will not be modified (default).
-To deploy Camunda 8 on OpenShift:
-
-1. Install [Helm and other CLI tools](/self-managed/setup/install.md#prerequisites).
-2. Install the [Camunda Helm chart repository](/self-managed/setup/install.md#helm-repository).
-3. Set `global.compatibility.openshift.adaptSecurityContext` to `force`
-
-```shell
-helm install camunda camunda/camunda-platform --skip-crds \
- --set global.compatibility.openshift.adaptSecurityContext=force
+```hcl reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/scc.yml
```
-### With permissive SCCs
-
To use permissive SCCs, simply install the charts as they are. Follow the [general Helm deployment guide](/self-managed/setup/install.md).
+```hcl reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/helm-values/no-scc.yml
+```
+
-## Available Configurations of OpenShift Components
+#### Enable Enterprise components
+
+Some components are not enabled by default in this deployment. For more information on how to configure and enable these components, refer to [configuring Enterprise components and Connectors](/self-managed/setup/install.md#configuring-enterprise-components-and-connectors).
+
+#### Fill your deployment with actual values
+
+Once you've prepared the `values.yml` file, run the following `envsubst` command to substitute the environment variables with their actual values:
+
+```bash
+# generate the final values
+envsubst < values.yml > generated-values.yml
+
+# print the result
+cat generated-values.yml
+```
+
+:::info Camunda Helm chart no longer automatically generates passwords
+
+Starting from **Camunda 8.6**, the Helm chart deprecated the automatic generation of secrets, and this feature has been fully removed in **Camunda 8.7**.
+
+:::
+
+Next, store various passwords in a Kubernetes secret, which will be used by the Helm chart. Below is an example of how to set up the required secret. You can use `openssl` to generate random secrets and store them in environment variables:
+
+```bash reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/generate-passwords.sh
+```
+
+Use these environment variables in the `kubectl` command to create the secret.
+
+- The `smtp-password` should be replaced with the appropriate external value ([see how it's used by Web Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md#smtp--email)).
+
+```bash reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/create-identity-secret.sh
+```
+
+### Install Camunda 8 using Helm
+
+Now that the `generated-values.yml` is ready, you can install Camunda 8 using Helm.
+
+The following are the required environment variables with some example values:
+
+```bash reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/chart-env.sh
+```
+
+Then run the following command:
+
+```bash reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.7/procedure/install/install-chart.sh
+```
+
+This command:
+
+- Installs (or upgrades) the Camunda platform using the Helm chart.
+- Substitutes the appropriate version using the `$CAMUNDA_HELM_CHART_VERSION` environment variable.
+- Applies the configuration from `generated-values.yml`.
+
+:::note
+
+This guide uses `helm upgrade --install` as it runs install on initial deployment and upgrades future usage. This may make it easier for future [Camunda 8 Helm upgrades](/self-managed/setup/upgrade.md) or any other component upgrades.
+
+:::
+
+You can track the progress of the installation using the following command:
+
+```bash
+watch -n 5 '
+ kubectl get pods -n camunda --output=wide;
+ if [ $(kubectl get pods -n camunda --field-selector=status.phase!=Running -o name | wc -l) -eq 0 ] &&
+ [ $(kubectl get pods -n camunda -o json | jq -r ".items[] | select(.status.containerStatuses[]?.ready == false)" | wc -l) -eq 0 ];
+ then
+ echo "All pods are Running and Healthy - Installation completed!";
+ else
+ echo "Some pods are not Running or Healthy";
+ fi
+'
+```
+
+## Verify connectivity to Camunda 8
+
+Please follow our [guide to verify connectivity to Camunda 8](/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md#verify-connectivity-to-camunda-8).
+
+The username of the first user is `demo`, the password is the one generated previously and stored in the environment variable `FIRST_USER_PASSWORD`.
+
+:::caution Domain name for gRPC Zeebe
+
+In this setup, the domain used for gRPC communication with Zeebe is slightly different from the one in the guide. Instead of using `zeebe.$DOMAIN_NAME`, you need to use `zeebe-$DOMAIN_NAME`.
+
+:::
+
+## Pitfalls to avoid
+
+For general deployment pitfalls, visit the [deployment troubleshooting guide](/self-managed/operational-guides/troubleshooting/troubleshooting.md).
### Security Context Constraints (SCCs)
@@ -144,220 +440,3 @@ If you deploy Camunda 8 (and related infrastructure) with permissive SCCs out of
-
-## Ingress Configuration
-
-Before exposing services outside the cluster, we need an Ingress component. Here's how you can configure it:
-
-
-
-
-### Using Kubernetes Ingress
-
-[Routes](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html) serve as OpenShift's default Ingress implementation.
-
-If you find that its features aren't suitable for your needs, or if you prefer to use a Kubernetes-native Ingress controller, such as the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx), [you have that option](https://www.redhat.com/en/blog/a-guide-to-using-routes-ingress-and-gateway-apis-in-kubernetes-without-vendor-lock-in).
-
-For guidance on installing an Ingress controller, you can refer to the [Ingress Setup documentation](/self-managed/setup/guides/ingress-setup.md).
-
-:::note Difference between ingress-nginx and NGINX Ingress
-
-Do not confuse the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx) with the [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift) that is endorsed by Red Hat for usage with OpenShift. Despite very similar names, they are two different products.
-
-If you should decide to use the Red Hat endorsed [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift), you would require additional adjustments done to the Camunda 8 Ingress objects and the NGINX Ingress Controller itself to make `gRPC` and `HTTP/2` connections work. In that case, please refer to the [example and the prerequisites](https://github.com/nginxinc/kubernetes-ingress/blob/main/examples/ingress-resources/grpc-services/README.md).
-
-:::
-
-
-
-
-### Using OpenShift Routes
-
-[Routes](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html) expose services externally by linking a URL to a service within the cluster. [OpenShift supports both the standard Kubernetes Ingress and routes](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html), giving cluster users the flexibility to choose.
-
-The presence of routes is rooted in their specification predating Ingress. It's worth noting that the functionality of routes differs from Ingress; for example, unlike Ingress, routes don't allow multiple services to be linked to a single route or the use of paths.
-
-To use these routes for the Zeebe Gateway, configure this through Ingress as well.
-
-#### Prerequisite
-
-As the Zeebe Gateway also uses `gRPC` (which relies on `HTTP/2`), [HTTP/2 Ingress Connectivity has to be enabled](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress).
-
-Additionally, the Zeebe Gateway should be configured to use an encrypted connection with TLS. In OpenShift, the connection from HAProxy to the Zeebe Gateway service can use HTTP/2 only for re-encryption or pass-through routes, and not for edge-terminated or insecure routes.
-
-#### Required Steps
-
-1. Provide two [TLS secrets](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) for the Zeebe Gateway.
-
- - The first TLS secret is issued to the Zeebe Gateway Service Name. This must use the [PKCS #8 syntax](https://en.wikipedia.org/wiki/PKCS_8) or [PKCS #1 syntax](https://en.wikipedia.org/wiki/PKCS_1) as Zeebe only supports these, referenced as `camunda-platform-internal-service-certificate`.
-
- In the example below, a TLS certificate is generated for the Zeebe Gateway service with an [annotation](https://docs.openshift.com/container-platform/latest/security/certificates/service-serving-certificate.html). The generated certificate will be in the form of a secret.
-
- ```yaml
- zeebeGateway:
- service:
- annotations:
- service.beta.openshift.io/serving-cert-secret-name: camunda-platform-internal-service-certificate
- ```
-
- Another option is [Cert Manager](https://docs.openshift.com/container-platform/latest/security/cert_manager_operator/index.html). For more details, review the [OpenShift documentation](https://docs.openshift.com/container-platform/latest/networking/routes/secured-routes.html#nw-ingress-creating-a-reencrypt-route-with-a-custom-certificate_secured-routes).
-
-
- PKCS #8, PKCS #1 syntax
-
- > PKCS #1 private key encoding. PKCS #1 produces a PEM block that contains the private key algorithm in the header and the private key in the body. A key that uses this can be recognised by its BEGIN RSA PRIVATE KEY or BEGIN EC PRIVATE KEY header. NOTE: This encoding is not supported for Ed25519 keys. Attempting to use this encoding with an Ed25519 key will be ignored and default to PKCS #8.
-
- > PKCS #8 private key encoding. PKCS #8 produces a PEM block with a static header and both the private key algorithm and the private key in the body. A key that uses this encoding can be recognised by its BEGIN PRIVATE KEY header.
-
- [PKCS #1, PKCS #8 syntax definitionfrom cert-manager](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.PrivateKeyEncoding)
-
-
-
- - The second TLS secret is used on the exposed route, referenced as `camunda-platform-external-certificate`. For example, this would be the same TLS secret used for Ingress.
-
-1. Configure your Zeebe Gateway Ingress to create a [Re-encrypt Route](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html#nw-ingress-creating-a-route-via-an-ingress_route-configuration):
-
- ```yaml
- zeebeGateway:
- ingress:
- grpc:
- annotations:
- route.openshift.io/termination: reencrypt
- route.openshift.io/destination-ca-certificate-secret: camunda-platform-internal-service-certificate
- className: openshift-default
- tls:
- enabled: true
- secretName: camunda-platform-external-certificate
- ```
-
-1. Mount the **Service Certificate Secret** to the Zeebe Gateway Pod:
-
- ```yaml
- zeebeGateway:
- env:
- - name: ZEEBE_GATEWAY_SECURITY_ENABLED
- value: "true"
- - name: ZEEBE_GATEWAY_SECURITY_CERTIFICATECHAINPATH
- value: /usr/local/zeebe/config/tls.crt
- - name: ZEEBE_GATEWAY_SECURITY_PRIVATEKEYPATH
- value: /usr/local/zeebe/config/tls.key
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/zeebe/config/tls.crt
- subPath: tls.crt
- - name: key
- mountPath: /usr/local/zeebe/config/tls.key
- subPath: tls.key
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
- - name: key
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.key
- path: tls.key
- defaultMode: 420
- ```
-
-1. Mount the **Service Certificate Secret** to the Operate and Tasklist pods and configure the secure TLS connection. Here, only the `tls.crt` file is required.
-
- For Operate:
-
- ```yaml
- operate:
- env:
- - name: CAMUNDA_OPERATE_ZEEBE_SECURE
- value: "true"
- - name: CAMUNDA_OPERATE_ZEEBE_CERTIFICATEPATH
- value: /usr/local/operate/config/tls.crt
- - name: CAMUNDA_OPERATE_ZEEBE_BROKERCONTACTPOINT
- value: camunda-zeebe-gateway.camunda.svc.cluster.local:26500
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/operate/config/tls.crt
- subPath: tls.crt
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
- ```
-
- The actual configuration properties can be reviewed [in the Operate configuration documentation](/self-managed/operate-deployment/operate-configuration.md#zeebe-broker-connection).
-
- For Tasklist:
-
- ```yaml
- tasklist:
- env:
- - name: CAMUNDA_TASKLIST_ZEEBE_SECURE
- value: "true"
- - name: CAMUNDA_TASKLIST_ZEEBE_CERTIFICATEPATH
- value: /usr/local/tasklist/config/tls.crt
- - name: CAMUNDA_TASKLIST_ZEEBE_BROKERCONTACTPOINT
- value: camunda-zeebe-gateway.camunda.svc.cluster.local:26500
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/tasklist/config/tls.crt
- subPath: tls.crt
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
- ```
-
- The actual configuration properties can be reviewed [in the Tasklist configuration documentation](/self-managed/tasklist-deployment/tasklist-configuration.md#zeebe-broker-connection).
-
-1. Configure Connectors:
-
- ```yaml
- connectors:
- inbound:
- mode: oauth
- env:
- - name: ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS
- value: "camunda-zeebe-gateway.camunda.svc.cluster.local:26500"
- - name: ZEEBE_CLIENT_SECURITY_PLAINTEXT
- value: "false"
- - name: CAMUNDA_CLIENT_ZEEBE_CACERTIFICATEPATH
- value: /usr/local/certificates/tls.crt
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/certificates/tls.crt
- subPath: tls.crt
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
- ```
-
- The actual configuration properties can be reviewed [in the Connectors configuration documentation](/self-managed/connectors-deployment/connectors-configuration.md#zeebe-broker-connection).
-
-1. Configure all other applications running inside the cluster and connecting to the Zeebe Gateway to also use TLS.
-
-
-
-
-
-
-
-## Pitfalls to avoid
-
-For general deployment pitfalls, visit the [deployment troubleshooting guide](/self-managed/operational-guides/troubleshooting/troubleshooting.md).
diff --git a/docusaurus.config.js b/docusaurus.config.js
index a90cf5de490..1280a40afbf 100644
--- a/docusaurus.config.js
+++ b/docusaurus.config.js
@@ -101,15 +101,15 @@ module.exports = {
},
],
[
- // Zeebe REST API docs generation
+ // Administration Self-Managed REST API docs generation
"docusaurus-plugin-openapi-docs",
{
- id: "api-consolesm-openapi",
+ id: "api-adminsm-openapi",
docsPluginId: "default",
config: {
- consolesm: {
- specPath: "api/console-sm/console-sm-openapi.yaml",
- outputDir: "docs/apis-tools/console-sm-api/specifications",
+ adminsm: {
+ specPath: "api/administration-sm/administration-sm-openapi.yaml",
+ outputDir: "docs/apis-tools/administration-sm-api/specifications",
sidebarOptions: {
groupPathsBy: "tag",
},
diff --git a/optimize_sidebars.js b/optimize_sidebars.js
index ab7552d80e3..a423fa57aa2 100644
--- a/optimize_sidebars.js
+++ b/optimize_sidebars.js
@@ -2075,12 +2075,1053 @@ function docsLink(label, href) {
),
],
},
+ {
+ "Clients & SDKs": [
+ {
+ SDKs: [
+ docsLink("Node.js", "apis-tools/node-js-sdk/"),
+
+ {
+ "Spring Zeebe": [
+ docsLink(
+ "Getting started",
+ "apis-tools/spring-zeebe-sdk/getting-started/"
+ ),
+ docsLink(
+ "Configuration",
+ "apis-tools/spring-zeebe-sdk/configuration/"
+ ),
+ ],
+ },
+ ],
+ },
+ {
+ Clients: [
+ {
+ "Java client": [
+ docsLink("Quick reference", "apis-tools/java-client/"),
+ docsLink("Job worker", "apis-tools/java-client/job-worker/"),
+ docsLink("Logging", "apis-tools/java-client/logging/"),
+ docsLink(
+ "Zeebe Process Test",
+ "apis-tools/java-client/zeebe-process-test/"
+ ),
+
+ {
+ Examples: [
+ docsLink("Overview", "apis-tools/java-client-examples/"),
+ docsLink(
+ "Deploy a process",
+ "apis-tools/java-client-examples/process-deploy/"
+ ),
+ docsLink(
+ "Create a process instance",
+ "apis-tools/java-client-examples/process-instance-create/"
+ ),
+ docsLink(
+ "Create non-blocking process instances",
+ "apis-tools/java-client-examples/process-instance-create-nonblocking/"
+ ),
+ docsLink(
+ "Create a process instance with results",
+ "apis-tools/java-client-examples/process-instance-create-with-result/"
+ ),
+ docsLink(
+ "Evaluate a decision",
+ "apis-tools/java-client-examples/decision-evaluate/"
+ ),
+ docsLink(
+ "Open a job worker",
+ "apis-tools/java-client-examples/job-worker-open/"
+ ),
+ docsLink(
+ "Handle variables as POJO",
+ "apis-tools/java-client-examples/data-pojo/"
+ ),
+ docsLink(
+ "Request cluster topology",
+ "apis-tools/java-client-examples/cluster-topology-request/"
+ ),
+ ],
+ },
+ ],
+ },
+
+ {
+ "Community clients": [
+ docsLink("Component clients", "apis-tools/community-clients/"),
+
+ {
+ "Zeebe clients": [
+ docsLink(
+ "JavaScript/Node.js",
+ "apis-tools/community-clients/javascript/"
+ ),
+ docsLink("Spring", "apis-tools/community-clients/spring/"),
+ {
+ "CLI client": [
+ docsLink("Quick reference", "apis-tools/cli-client/"),
+ docsLink(
+ "Getting started with the CLI client",
+ "apis-tools/cli-client/cli-get-started/"
+ ),
+ ],
+ },
+
+ {
+ "Go client": [
+ docsLink("Quick reference", "apis-tools/go-client/"),
+ docsLink(
+ "Getting started with the Go client",
+ "apis-tools/go-client/go-get-started/"
+ ),
+ docsLink(
+ "Job worker",
+ "apis-tools/go-client/job-worker/"
+ ),
+ ],
+ },
+ ],
+ },
+ docsLink(
+ "Build your own client",
+ "apis-tools/build-your-own-client/"
+ ),
+ ],
+ },
+ ],
+ },
+ ],
+ },
+
+ {
+ "Frontend development": [
+ {
+ "Task applications": [
+ docsLink(
+ "Introduction to task applications",
+ "apis-tools/frontend-development/task-applications/introduction-to-task-applications/"
+ ),
+ docsLink(
+ "User task life cycle",
+ "apis-tools/frontend-development/task-applications/user-task-lifecycle/"
+ ),
+ docsLink(
+ "Task application architecture",
+ "apis-tools/frontend-development/task-applications/task-application-architecture/"
+ ),
+ ],
+ },
+
+ {
+ Forms: [
+ docsLink(
+ "Introduction to forms",
+ "apis-tools/frontend-development/forms/introduction-to-forms/"
+ ),
+
+ {
+ "Embed forms": [
+ docsLink(
+ "Concepts",
+ "apis-tools/frontend-development/forms/embed-forms/form-js-concepts/"
+ ),
+ docsLink(
+ "Embed forms in JavaScript",
+ "apis-tools/frontend-development/forms/embed-forms/embed-forms-in-javascript/"
+ ),
+ ],
+ },
+
+ {
+ "Customize & extend": [
+ docsLink(
+ "Styling",
+ "apis-tools/frontend-development/forms/customize-and-extend/form-styling/"
+ ),
+ docsLink(
+ "Custom components",
+ "apis-tools/frontend-development/forms/customize-and-extend/custom-components/"
+ ),
+ docsLink(
+ "Integrate API data",
+ "apis-tools/frontend-development/forms/customize-and-extend/integrate-api-data/"
+ ),
+ ],
+ },
+ ],
+ },
+ ],
+ },
+ ],
+
+ "Self-Managed": [
+ docsLink("Camunda 8 Self-Managed", "self-managed/about-self-managed/"),
+
+ {
+ Setup: [
+ docsLink("Overview", "self-managed/setup/overview/"),
+ docsLink("Install", "self-managed/setup/install/"),
+ docsLink("Upgrade", "self-managed/setup/upgrade/"),
+
+ {
+ Deploy: [
+ {
+ Local: [
+ docsLink(
+ "Camunda 8 Run",
+ "self-managed/setup/deploy/local/c8run/"
+ ),
+ docsLink(
+ "Local Kubernetes cluster",
+ "self-managed/setup/deploy/local/local-kubernetes-cluster/"
+ ),
+ docsLink(
+ "Docker Compose",
+ "self-managed/setup/deploy/local/docker-compose/"
+ ),
+ docsLink("Manual", "self-managed/setup/deploy/local/manual/"),
+ ],
+ },
+
+ {
+ "Amazon (AWS)": [
+ {
+ "Amazon EKS": [
+ docsLink(
+ "Deploy an EKS cluster with eksctl",
+ "self-managed/setup/deploy/amazon/amazon-eks/eks-eksctl/"
+ ),
+ docsLink(
+ "Deploy an EKS cluster with Terraform",
+ "self-managed/setup/deploy/amazon/amazon-eks/eks-terraform/"
+ ),
+ docsLink(
+ "Install Camunda 8 on an EKS cluster",
+ "self-managed/setup/deploy/amazon/amazon-eks/eks-helm/"
+ ),
+ docsLink(
+ "Dual-region setup (EKS)",
+ "self-managed/setup/deploy/amazon/amazon-eks/dual-region/"
+ ),
+ docsLink(
+ "IAM roles for service accounts",
+ "self-managed/setup/deploy/amazon/amazon-eks/irsa/"
+ ),
+ ],
+ ROSA: [
+ docsLink(
+ "Deploy a ROSA cluster with Terraform",
+ "self-managed/setup/deploy/amazon/openshift/terraform/"
+ ),
+ ],
+ },
+
+ docsLink(
+ "Install AWS Marketplace",
+ "self-managed/setup/deploy/amazon/aws-marketplace/"
+ ),
+ docsLink(
+ "Amazon EC2",
+ "self-managed/setup/deploy/amazon/aws-ec2/"
+ ),
+ ],
+ },
+
+ {
+ "Microsoft (Azure)": [
+ docsLink(
+ "Microsoft AKS",
+ "self-managed/setup/deploy/azure/microsoft-aks/"
+ ),
+ ],
+ },
+
+ {
+ "Google (GCP)": [
+ docsLink(
+ "Google GKE",
+ "self-managed/setup/deploy/gcp/google-gke/"
+ ),
+ ],
+ },
+
+ {
+ "Red Hat (OpenShift)": [
+ docsLink(
+ "Red Hat OpenShift",
+ "self-managed/setup/deploy/openshift/redhat-openshift/"
+ ),
+ ],
+ },
+
+ {
+ Other: [
+ docsLink("Docker", "self-managed/setup/deploy/other/docker/"),
+ docsLink("Manual", "self-managed/setup/deploy/local/manual/"),
+ ],
+ },
+ ],
+ },
+
+ {
+ Guides: [
+ docsLink(
+ "Accessing components without Ingress",
+ "self-managed/setup/guides/accessing-components-without-ingress/"
+ ),
+ docsLink(
+ "Ingress setup",
+ "self-managed/setup/guides/ingress-setup/"
+ ),
+ docsLink(
+ "Using existing Keycloak",
+ "self-managed/setup/guides/using-existing-keycloak/"
+ ),
+ docsLink(
+ "Using existing Elasticsearch",
+ "self-managed/setup/guides/using-existing-elasticsearch/"
+ ),
+ docsLink(
+ "Using Amazon OpenSearch Service",
+ "self-managed/setup/guides/using-existing-opensearch/"
+ ),
+ docsLink(
+ "Configure custom headers",
+ "self-managed/setup/guides/configure-db-custom-headers/"
+ ),
+ docsLink(
+ "Connect to an OpenID Connect provider",
+ "self-managed/setup/guides/connect-to-an-oidc-provider/"
+ ),
+ docsLink(
+ "Installing in an air-gapped environment",
+ "self-managed/setup/guides/air-gapped-installation/"
+ ),
+ docsLink(
+ "Running custom Connectors",
+ "self-managed/setup/guides/running-custom-connectors/"
+ ),
+ docsLink(
+ "Multi-namespace deployment",
+ "self-managed/setup/guides/multi-namespace-deployment/"
+ ),
+ docsLink(
+ "Verifying Camunda 8 installation with a demo app",
+ "self-managed/setup/guides/installing-payment-app-example/"
+ ),
+ ],
+ },
+ ],
+ },
+ {
+ "Reference Architecture": [
+ docsLink("Overview", "self-managed/reference-architecture/"),
+ docsLink("Manual JAR", "self-managed/reference-architecture/manual/"),
+ ],
+ },
+ {
+ "Operational guides": [
+ {
+ "Update guide": [
+ docsLink(
+ "Update 8.5 to 8.6",
+ "self-managed/operational-guides/update-guide/850-to-860/"
+ ),
+ docsLink(
+ "Update 8.4 to 8.5",
+ "self-managed/operational-guides/update-guide/840-to-850/"
+ ),
+ docsLink(
+ "Update 8.3 to 8.4",
+ "self-managed/operational-guides/update-guide/830-to-840/"
+ ),
+ docsLink(
+ "Update 8.2 to 8.3",
+ "self-managed/operational-guides/update-guide/820-to-830/"
+ ),
+
+ {
+ Elasticsearch: [
+ docsLink(
+ "Update 7 to 8",
+ "self-managed/operational-guides/update-guide/elasticsearch/7-to-8/"
+ ),
+ ],
+ },
+
+ {
+ Keycloak: [
+ docsLink(
+ "Update Keycloak",
+ "self-managed/operational-guides/update-guide/keycloak/keycloak-update/"
+ ),
+ ],
+ },
+ ],
+ },
+
+ docsLink(
+ "Configure multi-tenancy",
+ "self-managed/operational-guides/configure-multi-tenancy/"
+ ),
+
+ {
+ "Backup and restore": [
+ docsLink(
+ "Backup and restore Optimize data",
+ "self-managed/operational-guides/backup-restore/optimize-backup/"
+ ),
+ docsLink(
+ "Backup and restore Operate and Tasklist data",
+ "self-managed/operational-guides/backup-restore/operate-tasklist-backup/"
+ ),
+ docsLink(
+ "Backup and restore Zeebe data",
+ "self-managed/operational-guides/backup-restore/zeebe-backup-and-restore/"
+ ),
+ docsLink(
+ "Backup and restore Web Modeler data",
+ "self-managed/operational-guides/backup-restore/modeler-backup-and-restore/"
+ ),
+ ],
+ },
+
+ docsLink(
+ "Configure components",
+ "self-managed/operational-guides/application-configs/"
+ ),
+ docsLink(
+ "Configure flow control",
+ "self-managed/operational-guides/configure-flow-control/"
+ ),
+
+ {
+ "Multi-region": [
+ docsLink(
+ "Dual-region operational procedure",
+ "self-managed/operational-guides/multi-region/dual-region-operational-procedure/"
+ ),
+ ],
+ },
+
+ {
+ Troubleshooting: [
+ docsLink(
+ "Troubleshooting",
+ "self-managed/operational-guides/troubleshooting/"
+ ),
+ docsLink(
+ "Log levels",
+ "self-managed/operational-guides/troubleshooting/log-levels/"
+ ),
+ ],
+ },
+ ],
+ },
+
+ {
+ Concepts: [
+ {
+ "Access control": [
+ docsLink(
+ "Applications",
+ "self-managed/concepts/access-control/applications/"
+ ),
+ docsLink(
+ "Resource authorizations",
+ "self-managed/concepts/access-control/resource-authorizations/"
+ ),
+ docsLink(
+ "User task access restrictions",
+ "self-managed/concepts/access-control/user-task-access-restrictions/"
+ ),
+ ],
+ },
+
+ docsLink("Exporters", "self-managed/concepts/exporters/"),
+
+ {
+ "Multi-region": [
+ docsLink(
+ "Dual-region",
+ "self-managed/concepts/multi-region/dual-region/"
+ ),
+ ],
+ },
+
+ docsLink("Multi-tenancy", "self-managed/concepts/multi-tenancy/"),
+ docsLink("Mapping rules", "self-managed/concepts/mapping-rules/"),
+ docsLink(
+ "Elasticsearch privileges",
+ "self-managed/concepts/elasticsearch-privileges/"
+ ),
+ docsLink(
+ "OpenSearch privileges",
+ "self-managed/concepts/opensearch-privileges/"
+ ),
+ ],
+ },
+
+ {
+ Components: [
+ {
+ Console: [
+ docsLink("Overview", "self-managed/console-deployment/overview/"),
+ docsLink(
+ "Installation",
+ "self-managed/console-deployment/installation/"
+ ),
+ docsLink(
+ "Configuration",
+ "self-managed/console-deployment/configuration/"
+ ),
+ docsLink("Telemetry", "self-managed/console-deployment/telemetry/"),
+ ],
+ },
+
+ {
+ Zeebe: [
+ docsLink(
+ "Overview",
+ "self-managed/zeebe-deployment/zeebe-installation/"
+ ),
+
+ {
+ "Zeebe Gateway": [
+ docsLink(
+ "Overview",
+ "self-managed/zeebe-deployment/zeebe-gateway/overview/"
+ ),
+ docsLink(
+ "Interceptors",
+ "self-managed/zeebe-deployment/zeebe-gateway/interceptors/"
+ ),
+ docsLink(
+ "Filters",
+ "self-managed/zeebe-deployment/zeebe-gateway/filters/"
+ ),
+ docsLink(
+ "Job streaming",
+ "self-managed/zeebe-deployment/zeebe-gateway/job-streaming/"
+ ),
+ ],
+ },
+
+ {
+ Configuration: [
+ docsLink(
+ "Overview",
+ "self-managed/zeebe-deployment/configuration/"
+ ),
+ docsLink(
+ "Logging",
+ "self-managed/zeebe-deployment/configuration/logging/"
+ ),
+ docsLink(
+ "Gateway health probes",
+ "self-managed/zeebe-deployment/configuration/gateway-health-probes/"
+ ),
+ docsLink(
+ "Environment variables",
+ "self-managed/zeebe-deployment/configuration/environment-variables/"
+ ),
+ docsLink(
+ "Fixed partitioning",
+ "self-managed/zeebe-deployment/configuration/fixed-partitioning/"
+ ),
+ docsLink(
+ "Priority election",
+ "self-managed/zeebe-deployment/configuration/priority-election/"
+ ),
+ docsLink(
+ "Broker configuration",
+ "self-managed/zeebe-deployment/configuration/broker-config/"
+ ),
+ docsLink(
+ "Gateway configuration",
+ "self-managed/zeebe-deployment/configuration/gateway-config/"
+ ),
+ ],
+ },
+
+ {
+ Security: [
+ docsLink("Overview", "self-managed/zeebe-deployment/security/"),
+ docsLink(
+ "Client authorization",
+ "self-managed/zeebe-deployment/security/client-authorization/"
+ ),
+ docsLink(
+ "Secure client communication",
+ "self-managed/zeebe-deployment/security/secure-client-communication/"
+ ),
+ docsLink(
+ "Secure cluster communication",
+ "self-managed/zeebe-deployment/security/secure-cluster-communication/"
+ ),
+ ],
+ },
+
+ {
+ Operation: [
+ docsLink(
+ "Overview",
+ "self-managed/zeebe-deployment/operations/zeebe-in-production/"
+ ),
+ docsLink(
+ "Resource planning",
+ "self-managed/zeebe-deployment/operations/resource-planning/"
+ ),
+ docsLink(
+ "Network ports",
+ "self-managed/zeebe-deployment/operations/network-ports/"
+ ),
+ docsLink(
+ "Setting up a Zeebe cluster",
+ "self-managed/zeebe-deployment/operations/setting-up-a-cluster/"
+ ),
+ docsLink(
+ "Metrics",
+ "self-managed/zeebe-deployment/operations/metrics/"
+ ),
+ docsLink(
+ "Health status",
+ "self-managed/zeebe-deployment/operations/health/"
+ ),
+ docsLink(
+ "Backpressure",
+ "self-managed/zeebe-deployment/operations/backpressure/"
+ ),
+ docsLink(
+ "Disk space",
+ "self-managed/zeebe-deployment/operations/disk-space/"
+ ),
+ docsLink(
+ "Update Zeebe",
+ "self-managed/zeebe-deployment/operations/update-zeebe/"
+ ),
+ docsLink(
+ "Rebalancing",
+ "self-managed/zeebe-deployment/operations/rebalancing/"
+ ),
+ docsLink(
+ "Management API",
+ "self-managed/zeebe-deployment/operations/management-api/"
+ ),
+ docsLink(
+ "Backups",
+ "self-managed/zeebe-deployment/operations/backups/"
+ ),
+ docsLink(
+ "Cluster scaling",
+ "self-managed/zeebe-deployment/operations/cluster-scaling/"
+ ),
+ ],
+ },
+
+ {
+ Exporters: [
+ docsLink(
+ "Install Zeebe exporters",
+ "self-managed/zeebe-deployment/exporters/install-zeebe-exporters/"
+ ),
+ docsLink(
+ "Elasticsearch",
+ "self-managed/zeebe-deployment/exporters/elasticsearch-exporter/"
+ ),
+ docsLink(
+ "OpenSearch",
+ "self-managed/zeebe-deployment/exporters/opensearch-exporter/"
+ ),
+ ],
+ },
+ ],
+ },
+
+ {
+ Operate: [
+ docsLink(
+ "Installation",
+ "self-managed/operate-deployment/install-and-start/"
+ ),
+ docsLink(
+ "Configuration",
+ "self-managed/operate-deployment/operate-configuration/"
+ ),
+ docsLink(
+ "Data retention",
+ "self-managed/operate-deployment/data-retention/"
+ ),
+ docsLink(
+ "Schema and migration",
+ "self-managed/operate-deployment/schema-and-migration/"
+ ),
+ docsLink(
+ "Importer and archiver",
+ "self-managed/operate-deployment/importer-and-archiver/"
+ ),
+ docsLink(
+ "Authentication and authorization",
+ "self-managed/operate-deployment/operate-authentication/"
+ ),
+ docsLink(
+ "Usage metrics",
+ "self-managed/operate-deployment/usage-metrics/"
+ ),
+ ],
+ },
+
+ {
+ Tasklist: [
+ docsLink(
+ "Installation",
+ "self-managed/tasklist-deployment/install-and-start/"
+ ),
+ docsLink(
+ "Configuration",
+ "self-managed/tasklist-deployment/tasklist-configuration/"
+ ),
+ docsLink(
+ "Custom styling",
+ "self-managed/tasklist-deployment/tasklist-custom-styling/"
+ ),
+ docsLink(
+ "Data retention",
+ "self-managed/tasklist-deployment/data-retention/"
+ ),
+ docsLink(
+ "Importer and archiver",
+ "self-managed/tasklist-deployment/importer-and-archiver/"
+ ),
+ docsLink(
+ "Authentication",
+ "self-managed/tasklist-deployment/tasklist-authentication/"
+ ),
+ docsLink(
+ "Usage metrics",
+ "self-managed/tasklist-deployment/usage-metrics/"
+ ),
+ ],
+ },
+
+ {
+ Connectors: [
+ docsLink(
+ "Installation",
+ "self-managed/connectors-deployment/install-and-start/"
+ ),
+ docsLink(
+ "Configuration",
+ "self-managed/connectors-deployment/connectors-configuration/"
+ ),
+ ],
+ },
+
+ {
+ Optimize: [
+ "self-managed/optimize-deployment/install-and-start",
+ "self-managed/optimize-deployment/version-policy",
+ {
+ Configuration: [
+ "self-managed/optimize-deployment/configuration/getting-started",
+ {
+ "System configuration": [
+ "self-managed/optimize-deployment/configuration/system-configuration",
+ "self-managed/optimize-deployment/configuration/system-configuration-platform-8",
+ "self-managed/optimize-deployment/configuration/system-configuration-platform-7",
+ "self-managed/optimize-deployment/configuration/event-based-process-configuration",
+ ],
+ },
+ "self-managed/optimize-deployment/configuration/logging",
+ "self-managed/optimize-deployment/configuration/optimize-license",
+ "self-managed/optimize-deployment/configuration/security-instructions",
+ "self-managed/optimize-deployment/configuration/shared-elasticsearch-cluster",
+ "self-managed/optimize-deployment/configuration/history-cleanup",
+ "self-managed/optimize-deployment/configuration/localization",
+ "self-managed/optimize-deployment/configuration/object-variables",
+ "self-managed/optimize-deployment/configuration/clustering",
+ "self-managed/optimize-deployment/configuration/webhooks",
+ "self-managed/optimize-deployment/configuration/authorization-management",
+ "self-managed/optimize-deployment/configuration/user-management",
+ "self-managed/optimize-deployment/configuration/multi-tenancy",
+ "self-managed/optimize-deployment/configuration/multiple-engines",
+ "self-managed/optimize-deployment/configuration/setup-event-based-processes",
+ "self-managed/optimize-deployment/configuration/common-problems",
+ ],
+ },
+
+ {
+ Plugins: [
+ "self-managed/optimize-deployment/plugins/plugin-system",
+ "self-managed/optimize-deployment/plugins/businesskey-import-plugin",
+ "self-managed/optimize-deployment/plugins/decision-import-plugin",
+ "self-managed/optimize-deployment/plugins/elasticsearch-header",
+ "self-managed/optimize-deployment/plugins/engine-rest-filter-plugin",
+ "self-managed/optimize-deployment/plugins/single-sign-on",
+ "self-managed/optimize-deployment/plugins/variable-import-plugin",
+ ],
+ },
+ "self-managed/optimize-deployment/reimport",
+ {
+ "Migration & update": [
+ {
+ "Camunda 8": [
+ "self-managed/optimize-deployment/migration-update/camunda-8/instructions",
+ "self-managed/optimize-deployment/migration-update/camunda-8/3.13_8.5-to-8.6",
+ "self-managed/optimize-deployment/migration-update/camunda-8/3.12_8.4-to-3.13_8.5",
+ "self-managed/optimize-deployment/migration-update/camunda-8/3.11_8.3-to-3.12_8.4",
+ "self-managed/optimize-deployment/migration-update/camunda-8/3.10-to-3.11_8.3",
+ "self-managed/optimize-deployment/migration-update/camunda-8/3.9-to-3.10",
+ "self-managed/optimize-deployment/migration-update/camunda-8/3.9-preview-1-to-3.9",
+ "self-managed/optimize-deployment/migration-update/camunda-8/3.8-to-3.9-preview-1",
+ "self-managed/optimize-deployment/migration-update/camunda-8/3.7-to-3.8",
+ ],
+ "Camunda 7": [
+ "self-managed/optimize-deployment/migration-update/camunda-7/instructions",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.13-to-3.14",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.12-to-3.13",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.11-to-3.12",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.10-to-3.11",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.9-to-3.10",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.9-preview-1-to-3.9",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.8-to-3.9-preview-1",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.7-to-3.8",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.6-to-3.7",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.5-to-3.6",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.4-to-3.5",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.3-to-3.4",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.2-to-3.3",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.1-to-3.2",
+ "self-managed/optimize-deployment/migration-update/camunda-7/3.0-to-3.1",
+ "self-managed/optimize-deployment/migration-update/camunda-7/2.7-to-3.0",
+ "self-managed/optimize-deployment/migration-update/camunda-7/2.6-to-2.7",
+ "self-managed/optimize-deployment/migration-update/camunda-7/2.5-to-2.6",
+ "self-managed/optimize-deployment/migration-update/camunda-7/2.4-to-2.5",
+ "self-managed/optimize-deployment/migration-update/camunda-7/2.3-to-2.4",
+ "self-managed/optimize-deployment/migration-update/camunda-7/2.2-to-2.3",
+ "self-managed/optimize-deployment/migration-update/camunda-7/2.1-to-2.2",
+ ],
+ },
+ ],
+ },
+
+ {
+ "Advanced features": [
+ "self-managed/optimize-deployment/advanced-features/engine-data-deletion",
+ "self-managed/optimize-deployment/advanced-features/import-guide",
+ ],
+ },
+ ],
+ },
+
+ {
+ Identity: [
+ docsLink(
+ "What is Identity?",
+ "self-managed/identity/what-is-identity/"
+ ),
+ docsLink(
+ "Installation and first steps",
+ "self-managed/identity/getting-started/install-identity/"
+ ),
+
+ {
+ "User guide": [
+ {
+ Configuration: [
+ docsLink(
+ "Making Identity production ready",
+ "self-managed/identity/user-guide/configuration/making-identity-production-ready/"
+ ),
+ docsLink(
+ "Configuring an external identity provider",
+ "self-managed/identity/user-guide/configuration/configure-external-identity-provider/"
+ ),
+ docsLink(
+ "Configure logging",
+ "self-managed/identity/user-guide/configuration/configure-logging/"
+ ),
+ docsLink(
+ "Connect to an existing Keycloak instance",
+ "self-managed/identity/user-guide/configuration/connect-to-an-existing-keycloak/"
+ ),
+ ],
+ },
+
+ {
+ Roles: [
+ docsLink(
+ "Add and assign a role",
+ "self-managed/identity/user-guide/roles/add-assign-role/"
+ ),
+ docsLink(
+ "Add and assign a permission",
+ "self-managed/identity/user-guide/roles/add-assign-permission/"
+ ),
+ ],
+ },
+
+ {
+ Groups: [
+ docsLink(
+ "Create a group",
+ "self-managed/identity/user-guide/groups/create-group/"
+ ),
+ docsLink(
+ "Assign users and roles to a group",
+ "self-managed/identity/user-guide/groups/assign-users-roles-to-group/"
+ ),
+ ],
+ },
+
+ {
+ Authorizations: [
+ docsLink(
+ "Managing resource authorizations",
+ "self-managed/identity/user-guide/authorizations/managing-resource-authorizations/"
+ ),
+ docsLink(
+ "Managing user access",
+ "self-managed/identity/user-guide/authorizations/managing-user-access/"
+ ),
+ docsLink(
+ "Generating machine-to-machine (M2M) tokens",
+ "self-managed/identity/user-guide/authorizations/generating-m2m-tokens/"
+ ),
+ ],
+ },
+
+ {
+ Tenants: [
+ docsLink(
+ "Managing tenants",
+ "self-managed/identity/user-guide/tenants/managing-tenants/"
+ ),
+ ],
+ },
+
+ {
+ "Mapping rules": [
+ docsLink(
+ "Managing mapping rules",
+ "self-managed/identity/user-guide/mapping-rules/managing-mapping-rules/"
+ ),
+ ],
+ },
+
+ {
+ "Additional features": [
+ docsLink(
+ "Adding an API",
+ "self-managed/identity/user-guide/additional-features/adding-an-api/"
+ ),
+ docsLink(
+ "Incorporate applications",
+ "self-managed/identity/user-guide/additional-features/incorporate-applications/"
+ ),
+ ],
+ },
+ ],
+ },
+
+ {
+ Deployment: [
+ docsLink(
+ "Configuration variables",
+ "self-managed/identity/deployment/configuration-variables/"
+ ),
+ docsLink(
+ "Application monitoring",
+ "self-managed/identity/deployment/application-monitoring/"
+ ),
+ docsLink(
+ "Starting configuration",
+ "self-managed/identity/deployment/starting-configuration-for-identity/"
+ ),
+ docsLink(
+ "Resource management",
+ "self-managed/identity/deployment/resource-management/"
+ ),
+ ],
+ },
+
+ docsLink(
+ "Troubleshoot Identity",
+ "self-managed/identity/troubleshooting/troubleshoot-identity/"
+ ),
+ ],
+ },
+
+ {
+ Modeler: [
+ {
+ "Web Modeler": [
+ docsLink(
+ "Installation",
+ "self-managed/modeler/web-modeler/installation/"
+ ),
+
+ {
+ Configuration: [
+ docsLink(
+ "Overview",
+ "self-managed/modeler/web-modeler/configuration/"
+ ),
+ docsLink(
+ "Database",
+ "self-managed/modeler/web-modeler/configuration/database/"
+ ),
+ docsLink(
+ "Identity",
+ "self-managed/modeler/web-modeler/configuration/identity/"
+ ),
+ docsLink(
+ "Logging",
+ "self-managed/modeler/web-modeler/configuration/logging/"
+ ),
+ docsLink(
+ "SSL",
+ "self-managed/modeler/web-modeler/configuration/ssl/"
+ ),
+ ],
+ },
+
+ {
+ Troubleshooting: [
+ docsLink(
+ "Database connection",
+ "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-database-connection/"
+ ),
+ docsLink(
+ "Zeebe connection",
+ "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-zeebe-connection/"
+ ),
+ docsLink(
+ "Missing data",
+ "self-managed/modeler/web-modeler/troubleshooting/troubleshoot-missing-data/"
+ ),
+ ],
+ },
+ ],
+ },
+
+ {
+ "Desktop Modeler": [
+ docsLink(
+ "Deploy diagram",
+ "self-managed/modeler/desktop-modeler/deploy-to-self-managed/"
+ ),
+ ],
+ },
+ ],
+ },
],
},
],
}),
{
- "Clients & SDKs": [
+ 7& SDKs": [
{
SDKs: [
docsLink("Node.js", "apis-tools/node-js-sdk/"),
diff --git a/package.json b/package.json
index 0bc82be266a..1c85373daab 100644
--- a/package.json
+++ b/package.json
@@ -28,7 +28,7 @@
"api:generate:operate": "npm run api:generate operate",
"api:generate:tasklist": "npm run api:generate tasklist",
"api:generate:zeebe": "npm run api:generate zeebe",
- "api:generate:consolesm": "npm run api:generate consolesm",
+ "api:generate:adminsm": "npm run api:generate adminsm",
"api:generate:camunda": "npm run api:generate camunda"
},
"dependencies": {
diff --git a/sidebars.js b/sidebars.js
index 4add84d1d97..9b598667a66 100644
--- a/sidebars.js
+++ b/sidebars.js
@@ -965,7 +965,27 @@ module.exports = {
"self-managed/setup/deploy/amazon/amazon-eks/irsa",
],
},
- "self-managed/setup/deploy/amazon/aws-marketplace",
+ {
+ type: "category",
+ label: "ROSA",
+ link: {
+ type: "doc",
+ id: "self-managed/setup/deploy/amazon/openshift/terraform-setup",
+ },
+ items: [
+ "self-managed/setup/deploy/amazon/openshift/terraform-setup",
+ ],
+ },
+ {
+ type: "category",
+ label: "Amazon MarketPlace",
+ link: {
+ type: "doc",
+ id: "self-managed/setup/deploy/amazon/aws-marketplace",
+ },
+ items: [],
+ },
+ "self-managed/setup/deploy/amazon/aws-ec2",
],
"Microsoft (Azure)": [
"self-managed/setup/deploy/azure/microsoft-aks",
@@ -1003,6 +1023,14 @@ module.exports = {
],
},
],
+ },
+ {
+ "Reference architecture": [
+ "self-managed/reference-architecture/reference-architecture",
+ "self-managed/reference-architecture/manual/manual",
+ ],
+ },
+ {
"Operational guides": [
{
type: "category",
diff --git a/static/.htaccess b/static/.htaccess
index 99205f43616..20496bb89ff 100644
--- a/static/.htaccess
+++ b/static/.htaccess
@@ -98,6 +98,9 @@ RewriteRule ^docs/reference/bpmn-processes/?(.*)$ /docs/components/modeler/bpmn/
# Remove Tasklist GraphQL API
RewriteRule ^docs/next/apis-tools/tasklist-api/(.*)$ /docs/next/apis-tools/camunda-api-rest/camunda-api-rest-overview/ [R=301,L]
+# Finalize the renaming of Console SM API to Administration API (Self-Managed)
+RewriteRule ^docs/next/apis-tools/administration-sm-api/specifications/sm-administration-api/?$ /docs/next/apis-tools/administration-sm-api/specifications/administration-api-self-managed/ [R=301,L]
+
# Remove Zeebe REST API
RewriteRule ^docs/next/apis-tools/zeebe-api-rest/specifications/?$ /docs/next/apis-tools/camunda-api-rest/specifications/$1 [R=301,L]
RewriteRule ^docs/next/apis-tools/zeebe-api-rest/zeebe-api-rest-overview/?$ /docs/next/apis-tools/camunda-api-rest/camunda-api-rest-overview/$1 [R=301,L]
diff --git a/versioned_docs/version-8.4/components/modeler/bpmn/user-tasks/user-tasks.md b/versioned_docs/version-8.4/components/modeler/bpmn/user-tasks/user-tasks.md
index b475a31143c..6141163fc38 100644
--- a/versioned_docs/version-8.4/components/modeler/bpmn/user-tasks/user-tasks.md
+++ b/versioned_docs/version-8.4/components/modeler/bpmn/user-tasks/user-tasks.md
@@ -56,6 +56,11 @@ attributes can be specified simultaneously:
- `candidateUsers`: Specifies the users that the task can be assigned to.
- `candidateGroups`: Specifies the groups of users that the task can be assigned to.
+:::info
+The assignee attribute must adhere to the userId field’s case-sensitivity requirements.
+Note that in SaaS, all user IDs are converted to lowercase by default, as they are based on email addresses.
+:::
+
Typically, the assignee, candidate users, and candidate groups are defined as [static values](/docs/components/concepts/expressions.md#expressions-vs-static-values) (e.g. `some_username`, `some_username, another_username` and
`sales, operations`), but they can also be defined as
[expressions](/components/concepts/expressions.md) (e.g. `= book.author` and `= remove(reviewers, book.author)` and `= reviewer_roles`). The expressions are evaluated on activating the user task and must result in a
diff --git a/versioned_docs/version-8.5/apis-tools/node-js-sdk.md b/versioned_docs/version-8.5/apis-tools/node-js-sdk.md
index 3186f048f21..ffe800bbc5f 100644
--- a/versioned_docs/version-8.5/apis-tools/node-js-sdk.md
+++ b/versioned_docs/version-8.5/apis-tools/node-js-sdk.md
@@ -1,7 +1,7 @@
---
id: node-js-sdk
title: Node.js
-description: Get started with the official Camunda 8 JavaScript SDK for Node.js, available via npm.
+description: Get started with the official Camunda 8 JavaScript SDK for Node.js.
---
As of 8.5.0, the official [Camunda 8 JavaScript SDK for Node.js](https://github.com/camunda/camunda-8-js-sdk) is available via [npm](https://www.npmjs.com/package/@camunda8/sdk).
diff --git a/versioned_docs/version-8.5/apis-tools/working-with-apis-tools.md b/versioned_docs/version-8.5/apis-tools/working-with-apis-tools.md
index 9ee4f2daecb..57e5bbf0d50 100644
--- a/versioned_docs/version-8.5/apis-tools/working-with-apis-tools.md
+++ b/versioned_docs/version-8.5/apis-tools/working-with-apis-tools.md
@@ -84,6 +84,9 @@ Additionally, visit our documentation on [Operate](../self-managed/operate-deplo
### SDKs
### Postman
diff --git a/versioned_docs/version-8.5/components/modeler/bpmn/user-tasks/user-tasks.md b/versioned_docs/version-8.5/components/modeler/bpmn/user-tasks/user-tasks.md
index 7f7d2d48815..b00e97e7c75 100644
--- a/versioned_docs/version-8.5/components/modeler/bpmn/user-tasks/user-tasks.md
+++ b/versioned_docs/version-8.5/components/modeler/bpmn/user-tasks/user-tasks.md
@@ -35,6 +35,11 @@ attributes can be specified simultaneously:
- `candidateUsers`: Specifies the users that the task can be assigned to.
- `candidateGroups`: Specifies the groups of users that the task can be assigned to.
+:::info
+The assignee attribute must adhere to the userId field’s case-sensitivity requirements.
+Note that in SaaS, all user IDs are converted to lowercase by default, as they are based on email addresses.
+:::
+
:::info
Assignment resources can also be used for set user task restrictions ([SaaS](/components/concepts/access-control/user-task-access-restrictions.md)/[Self-Managed](docs/self-managed/concepts/access-control/user-task-access-restrictions.md)), where users will see only the tasks they have authorization to work on.
:::
diff --git a/versioned_docs/version-8.5/components/modeler/desktop-modeler/use-connectors.md b/versioned_docs/version-8.5/components/modeler/desktop-modeler/use-connectors.md
index 921b5c0f693..6a0f28ee837 100644
--- a/versioned_docs/version-8.5/components/modeler/desktop-modeler/use-connectors.md
+++ b/versioned_docs/version-8.5/components/modeler/desktop-modeler/use-connectors.md
@@ -12,7 +12,7 @@ Desktop Modeler automatically fetches and updates [element templates](./element-
## Automatic Connector template fetching
-Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors.
+Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors. The fetch is triggered whenever you start the application, or every 24 hours if the application is not closed.
After an update check has concluded, a notification indicates if the templates are up to date or have been updated:
diff --git a/versioned_docs/version-8.6/apis-tools/node-js-sdk.md b/versioned_docs/version-8.6/apis-tools/node-js-sdk.md
index 583c837affb..b11e5f327d7 100644
--- a/versioned_docs/version-8.6/apis-tools/node-js-sdk.md
+++ b/versioned_docs/version-8.6/apis-tools/node-js-sdk.md
@@ -1,7 +1,7 @@
---
id: node-js-sdk
title: Node.js
-description: Get started with the official Camunda 8 JavaScript SDK for Node.js, available via npm.
+description: Get started with the official Camunda 8 JavaScript SDK for Node.js.
---
As of 8.5.0, the official [Camunda 8 JavaScript SDK for Node.js](https://github.com/camunda/camunda-8-js-sdk) is available via [npm](https://www.npmjs.com/package/@camunda8/sdk).
diff --git a/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/configuration.md b/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/configuration.md
index 6cff515bf51..5c644b42928 100644
--- a/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/configuration.md
+++ b/versioned_docs/version-8.6/apis-tools/spring-zeebe-sdk/configuration.md
@@ -275,7 +275,7 @@ A custom maxMessageSize allows the client to receive larger or smaller responses
camunda:
client:
zeebe:
- max-message-size: 3
+ max-message-size: 4194304
```
### Request timeout
diff --git a/versioned_docs/version-8.6/apis-tools/working-with-apis-tools.md b/versioned_docs/version-8.6/apis-tools/working-with-apis-tools.md
index 766d9a434e5..37a4f546afe 100644
--- a/versioned_docs/version-8.6/apis-tools/working-with-apis-tools.md
+++ b/versioned_docs/version-8.6/apis-tools/working-with-apis-tools.md
@@ -72,6 +72,9 @@ Additionally, visit our documentation on [Operate](../self-managed/operate-deplo
### SDKs
### Postman
diff --git a/versioned_docs/version-8.6/components/modeler/bpmn/user-tasks/user-tasks.md b/versioned_docs/version-8.6/components/modeler/bpmn/user-tasks/user-tasks.md
index 90a64a6191c..b1a4919be5d 100644
--- a/versioned_docs/version-8.6/components/modeler/bpmn/user-tasks/user-tasks.md
+++ b/versioned_docs/version-8.6/components/modeler/bpmn/user-tasks/user-tasks.md
@@ -36,6 +36,11 @@ attributes can be specified simultaneously:
- `candidateUsers`: Specifies the users that the task can be assigned to.
- `candidateGroups`: Specifies the groups of users that the task can be assigned to.
+:::info
+The assignee attribute must adhere to the userId field’s case-sensitivity requirements.
+Note that in SaaS, all user IDs are converted to lowercase by default, as they are based on email addresses.
+:::
+
:::info
Assignment resources can also be used for set user task restrictions ([SaaS](/components/concepts/access-control/user-task-access-restrictions.md)/[Self-Managed](docs/self-managed/concepts/access-control/user-task-access-restrictions.md)), where users will see only the tasks they have authorization to work on.
:::
diff --git a/versioned_docs/version-8.6/components/modeler/desktop-modeler/use-connectors.md b/versioned_docs/version-8.6/components/modeler/desktop-modeler/use-connectors.md
index 921b5c0f693..6a0f28ee837 100644
--- a/versioned_docs/version-8.6/components/modeler/desktop-modeler/use-connectors.md
+++ b/versioned_docs/version-8.6/components/modeler/desktop-modeler/use-connectors.md
@@ -12,7 +12,7 @@ Desktop Modeler automatically fetches and updates [element templates](./element-
## Automatic Connector template fetching
-Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors.
+Automatic Connector template fetching is enabled by default, and notifies you of any updates or errors. The fetch is triggered whenever you start the application, or every 24 hours if the application is not closed.
After an update check has concluded, a notification indicates if the templates are up to date or have been updated:
diff --git a/versioned_docs/version-8.6/self-managed/console-deployment/configuration.md b/versioned_docs/version-8.6/self-managed/console-deployment/configuration.md
index 8bfa775bf15..9da243ab378 100644
--- a/versioned_docs/version-8.6/self-managed/console-deployment/configuration.md
+++ b/versioned_docs/version-8.6/self-managed/console-deployment/configuration.md
@@ -33,6 +33,20 @@ Console environment variables could be set in Helm via the `console.env` key. Fo
Camunda 8 components without a valid license may display **Non-Production License** in the navigation bar and issue warnings in the logs. These warnings have no impact on Console startup or functionality. To obtain a license, visit the [Camunda Enterprise page](https://camunda.com/platform/camunda-platform-enterprise-contact/).
:::
+### Proxy
+
+These settings are useful when the application needs to make outgoing network requests in environments that require traffic to pass through a proxy server.
+
+| Environment variable | Description | Example value | Default value |
+| -------------------- | ---------------------------------------------------------------------------------------------- | ------------------------------------- | ------------- |
+| `http_proxy` | Specifies the proxy server to be used for outgoing HTTP requests. | `http://proxy.example.com:8080` | - |
+| `https_proxy` | Specifies the proxy server to be used for outgoing HTTPS requests. | `https://secureproxy.example.com:443` | - |
+| `no_proxy` | A comma-separated list of domain names or IP addresses for which the proxy should be bypassed. | `localhost,127.0.0.1,.example.com` | - |
+
+:::note
+The proxy-related environment variables are lowercase because they follow a widely accepted convention used in many system environments and tools.
+:::
+
## Telemetry
You can enable telemetry and usage collection to help us improve our product by sending several telemetry metrics to Camunda. The information we collect will contribute to continuous product enhancement and help us understand how Camunda is used. We do not collect sensitive information and limit data points to several metrics. For more information, you can download collected data set metrics from the telemetry page at anytime.
diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md
index 1a46c925e7b..328387b3915 100644
--- a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md
+++ b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md
@@ -4,6 +4,8 @@ title: "Install Camunda 8 on an EKS cluster"
description: "Set up the Camunda 8 environment with Helm and an optional Ingress setup on Amazon EKS."
---
+
+
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
@@ -427,7 +429,6 @@ https://github.com/camunda/camunda-tf-eks-module/blob/main/examples/camunda-8.6/
Use these environment variables in the `kubectl` command to create the secret.
-- The values for `postgres-password` and `password` are not required if you are using an external database. If you choose not to use an external database, you must provide those values.
- The `smtp-password` should be replaced with the appropriate external value ([see how it's used by Web Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md#smtp--email)).
```bash reference
diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md
index 40fe91e4dc8..73c9e6c48ce 100644
--- a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md
+++ b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/amazon-eks/terraform-setup.md
@@ -4,6 +4,8 @@ title: "Deploy an EKS cluster with Terraform (advanced)"
description: "Deploy an Amazon Kubernetes Cluster (EKS) with a Terraform module for a quick Camunda 8 setup."
---
+
+
import Tabs from "@theme/Tabs";
import TabItem from "@theme/TabItem";
@@ -96,25 +98,25 @@ Advanced users may want to handle this part differently and use a different back
#### Set up AWS authentication
The [AWS Terraform provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) is required to create resources in AWS. Before you can use the provider, you must authenticate it using your AWS credentials.
-You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods.
-We recommend using the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). If you have configured your AWS CLI, Terraform will automatically detect and use those credentials.
+:::caution Ownership of the created resources
-To configure the AWS CLI:
+A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters created. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) for Terraform purposes, ensuring that the resources are managed and owned by that user.
-```bash
-aws configure
-```
+:::
-Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, region, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html).
+You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods:
-:::caution Ownership of the created resources
+- For development or testing purposes you can use the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). If you have configured your AWS CLI, Terraform will automatically detect and use those credentials.
+ To configure the AWS CLI:
-A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters created. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) for Terraform purposes, ensuring that the resources are managed and owned by that user.
+ ```bash
+ aws configure
+ ```
-[Create access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` variables to use with the AWS CLI and `eksctl`
+ Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, region, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html).
-:::
+- For production environments, we recommend the use of a dedicated IAM user and [create access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
#### Create an S3 bucket for Terraform state management
diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.jpg b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.jpg
new file mode 100644
index 00000000000..862eacd4fe0
Binary files /dev/null and b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.jpg differ
diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.pdf b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.pdf
new file mode 100644
index 00000000000..5506e354c8c
Binary files /dev/null and b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/openshift/assets/rosa-single-region.pdf differ
diff --git a/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/openshift/terraform-setup.md b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/openshift/terraform-setup.md
new file mode 100644
index 00000000000..eca6ea8c5eb
--- /dev/null
+++ b/versioned_docs/version-8.6/self-managed/setup/deploy/amazon/openshift/terraform-setup.md
@@ -0,0 +1,384 @@
+---
+id: terraform-setup
+title: "Deploy a ROSA HCP Cluster with Terraform"
+description: "Deploy Red Hat OpenShift on AWS using a Terraform module for a quick Camunda 8 setup."
+---
+
+
+
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+This guide provides a detailed tutorial for deploying a [Red Hat OpenShift on AWS (ROSA) cluster with Hosted Control Plane (HCP)](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4/html-single/architecture/index#architecture-overview) capabilities. It is specifically tailored for deploying Camunda 8 using Terraform, a widely-used Infrastructure as Code (IaC) tool.
+
+We recommend this guide for building a robust and sustainable infrastructure. However, if you are looking for a quicker trial or proof of concept, or if your needs aren't fully met by our module, consider following the official [ROSA Quickstart Guide](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4/html/getting_started/rosa-quickstart-guide-ui#rosa-quickstart-guide-ui).
+
+This guide aims to help you leverage IaC to streamline and reproduce your cloud infrastructure setup. While it covers the essentials for deploying an ROSA HCP cluster, for more advanced use cases, please refer to the official [Red Hat OpenShift on AWS Documentation](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4).
+
+:::tip
+
+If you are completely new to Terraform and the idea of IaC, read through the [Terraform IaC documentation](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code) and give their [interactive quick start](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code#quick-start) a try for a basic understanding.
+
+:::
+
+## Requirements
+
+- A [Red Hat Account](https://www.redhat.com/) to create the Red Hat OpenShift cluster.
+- An [AWS account](https://docs.aws.amazon.com/accounts/latest/reference/accounts-welcome.html) to create any resources within AWS.
+- [AWS CLI (2.17+)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html), a CLI tool for creating AWS resources.
+- [Terraform (1.9+)](https://developer.hashicorp.com/terraform/downloads)
+- [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster.
+- [ROSA CLI](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4/html/getting_started/rosa-quickstart-guide-ui#rosa-getting-started-environment-setup_rosa-quickstart-guide-ui) to interact with the cluster.
+- [jq (1.7+)](https://jqlang.github.io/jq/download/) to interact with some Terraform variables.
+- This guide uses GNU/Bash for all the shell commands listed.
+
+### Considerations
+
+This setup provides a foundational starting point for working with Camunda 8, though it is not optimized for peak performance. It serves as a solid initial step in preparing a production environment by leveraging [Infrastructure as Code (IaC) tools](https://developer.hashicorp.com/terraform/tutorials/aws-get-started/infrastructure-as-code).
+
+Terraform can seem complex at first. If you're interested in understanding what each component does, consider trying out the [Red Hat OpenShift on AWS UI-based tutorial](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4/html/tutorials/getting-started-with-rosa#creating-account-wide-roles). This guide will show you what resources are created and how they interact with each other.
+
+If you require managed services for PostgreSQL Aurora or OpenSearch, you can refer to the definitions provided in the [EKS setup with Terraform](../amazon-eks/terraform-setup.md) guide. However, please note that these configurations may need adjustments to fit your specific requirements and have not been tested. By default, this guide assumes that the database services (PostgreSQL and Elasticsearch) integrated into the default chart will be used.
+
+For testing Camunda 8 or developing against it, you might consider signing up for our [SaaS offering](https://camunda.com/platform/). If you already have a Red Hat OpenShift cluster on AWS, you can skip ahead to the [Helm setup guide](/self-managed/setup/deploy/openshift/redhat-openshift.md).
+
+To keep this guide concise, we provide links to additional documentation covering best practices, allowing you to explore each topic in greater depth.
+
+:::warning Cost management
+
+Following this guide will incur costs on your cloud provider account and your Red Hat account, specifically for the managed OpenShift service, OpenShift worker nodes running in EC2, the hosted control plane, Elastic Block Storage (EBS), and Route 53. For more details, refer to [ROSA AWS pricing](https://aws.amazon.com/rosa/pricing/) and the [AWS Pricing Calculator](https://calculator.aws/#/) as total costs vary by region.
+
+:::
+
+### Variants
+
+Unlike the [EKS Terraform setup](../amazon-eks/terraform-setup.md), we currently support only one main variant of this setup:
+
+- The **standard installation** uses a username and password connection for Camunda components (or relies solely on network isolation for certain components). This option is straightforward and easier to implement, making it ideal for environments where simplicity and rapid deployment are priorities, or where network isolation provides adequate security.
+
+- The second variant, **IRSA** (IAM Roles for Service Accounts), may work but has not been tested. If you’re interested in setting it up, please refer to the EKS guide as a foundational resource.
+
+### Outcome
+
+
+
+
+_Infrastructure diagram for a single region ROSA setup (click on the image to open the PDF version)_
+[![Infrastructure Diagram ROSA Single-Region](./assets/rosa-single-region.jpg)](./assets/rosa-single-region.pdf)
+
+Following this tutorial and steps will result in:
+
+- A [Red Hat OpenShift with Hosted Control Plane](https://www.redhat.com/en/topics/containers/what-are-hosted-control-planes#rosa-with-hcp) cluster running the latest ROSA version with six nodes ready for Camunda 8 installation.
+- The [EBS CSI driver](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) is installed and configured, which is used by the Camunda 8 Helm chart to create [persistent volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/).
+
+## 1. Configure AWS and initialize Terraform
+
+### Terraform prerequisites
+
+To manage the infrastructure for Camunda 8 on AWS using Terraform, we need to set up Terraform's backend to store the state file remotely in an S3 bucket. This ensures secure and persistent storage of the state file.
+
+:::note
+Advanced users may want to handle this part differently and use a different backend. The backend setup provided is an example for new users.
+:::
+
+#### Set up AWS authentication
+
+The [AWS Terraform provider](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) is required to create resources in AWS. Before you can use the provider, you must authenticate it using your AWS credentials.
+
+:::caution Ownership of the created resources
+
+A user who creates resources in AWS will always retain administrative access to those resources, including any Kubernetes clusters created. It is recommended to create a dedicated [AWS IAM user](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users.html) for Terraform purposes, ensuring that the resources are managed and owned by that user.
+
+:::
+
+You can further change the region and other preferences and explore different [authentication](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#authentication-and-configuration) methods:
+
+- For development or testing purposes you can use the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). If you have configured your AWS CLI, Terraform will automatically detect and use those credentials.
+ To configure the AWS CLI:
+
+ ```bash
+ aws configure
+ ```
+
+ Enter your `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, region, and output format. These can be retrieved from the [AWS Console](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html).
+
+- For production environments, we recommend the use of a dedicated IAM user. Create [access keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html) for the new IAM user via the console, and export them as `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.
+
+#### Create an S3 bucket for Terraform state management
+
+Before setting up Terraform, you need to create an S3 bucket that will store the state file. This is important for collaboration and to prevent issues like state file corruption.
+
+To start, set the region as an environment variable upfront to avoid repeating it in each command:
+
+```bash
+export AWS_REGION=
+```
+
+Replace `` with your chosen AWS region (for example, `eu-central-1`).
+
+Now, follow these steps to create the S3 bucket with versioning enabled:
+
+1. Open your terminal and ensure the AWS CLI is installed and configured.
+
+1. Run the following command to create an S3 bucket for storing your Terraform state. Make sure to use a unique bucket name and set the `AWS_REGION` environment variable beforehand:
+
+ ```bash
+ # Replace "my-rosa-tf-state" with your unique bucket name
+ export S3_TF_BUCKET_NAME="my-rosa-tf-state"
+
+ aws s3api create-bucket --bucket "$S3_TF_BUCKET_NAME" --region "$AWS_REGION" \
+ --create-bucket-configuration LocationConstraint="$AWS_REGION"
+ ```
+
+1. Enable versioning on the S3 bucket to track changes and protect the state file from accidental deletions or overwrites:
+
+ ```bash
+ aws s3api put-bucket-versioning --bucket "$S3_TF_BUCKET_NAME" --versioning-configuration Status=Enabled --region "$AWS_REGION"
+ ```
+
+1. Secure the bucket by blocking public access:
+
+ ```bash
+ aws s3api put-public-access-block --bucket "$S3_TF_BUCKET_NAME" --public-access-block-configuration \
+ "BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" --region "$AWS_REGION"
+ ```
+
+1. Verify versioning is enabled on the bucket:
+
+ ```bash
+ aws s3api get-bucket-versioning --bucket "$S3_TF_BUCKET_NAME" --region "$AWS_REGION"
+ ```
+
+This S3 bucket will now securely store your Terraform state files with versioning enabled.
+
+#### Create a `config.tf` with the following setup
+
+Once the S3 bucket is created, configure your `config.tf` file to use the S3 backend for managing the Terraform state:
+
+```hcl reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/config.tf
+```
+
+#### Initialize Terraform
+
+Once your `config.tf` and authentication are set up, you can initialize your Terraform project. The previous steps configured a dedicated S3 Bucket (`S3_TF_BUCKET_NAME`) to store your state, and the following creates a bucket key that will be used by your configuration.
+
+Configure the backend and download the necessary provider plugins:
+
+```bash
+export S3_TF_BUCKET_KEY="camunda-terraform/terraform.tfstate"
+
+echo "Storing terraform state in s3://$S3_TF_BUCKET_NAME/$S3_TF_BUCKET_KEY"
+
+terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY"
+```
+
+Terraform will connect to the S3 bucket to manage the state file, ensuring remote and persistent storage.
+
+### OpenShift cluster module setup
+
+This module sets up the foundational configuration for ROSA HCP and Terraform usage.
+
+We will leverage [Terraform modules](https://developer.hashicorp.com/terraform/language/modules), which allow us to abstract resources into reusable components, simplifying infrastructure management.
+
+The [Camunda-provided module](https://github.com/camunda/camunda-tf-rosa) is publicly available and serves as a robust starting point for deploying a Red Hat OpenShift cluster on AWS using a Hosted Control Plane. It is highly recommended to review this module before implementation to understand its structure and capabilities.
+
+Please note that this module is based on the official [ROSA HCP Terraform module documentation](https://docs.openshift.com/rosa/rosa_hcp/terraform/rosa-hcp-creating-a-cluster-quickly-terraform.html). It is presented as an example for running Camunda 8 in ROSA. For advanced use cases or custom setups, we encourage you to use the official module, which includes vendor-supported features.
+
+#### Set up ROSA authentication
+
+To set up a ROSA cluster, certain prerequisites must be configured on your AWS account. Below is an excerpt from the [official ROSA planning prerequisites checklist](https://docs.openshift.com/rosa/rosa_planning/rosa-cloud-expert-prereq-checklist.html):
+
+1. Verify that your AWS account is correctly configured:
+
+ ```bash
+ aws sts get-caller-identity
+ ```
+
+1. Check if the ELB service role exists, as if you have never created a load balancer in your AWS account, the role for Elastic Load Balancing (ELB) might not exist yet:
+
+ ```bash
+ aws iam get-role --role-name "AWSServiceRoleForElasticLoadBalancing"
+ ```
+
+ If it doesn't exist, create it:
+
+ ```bash
+ aws iam create-service-linked-role --aws-service-name "elasticloadbalancing.amazonaws.com"
+ ```
+
+1. Create a Red Hat Hybrid Cloud Console account if you don’t already have one: [Red Hat Hybrid Cloud Console](https://console.redhat.com/).
+
+1. Enable ROSA on your AWS account via the [AWS Console](https://console.aws.amazon.com/rosa/).
+
+1. Enable HCP ROSA on [AWS Marketplace](https://docs.openshift.com/rosa/cloud_experts_tutorials/cloud-experts-rosa-hcp-activation-and-account-linking-tutorial.html):
+
+ - Navigate to the ROSA console: [AWS ROSA Console](https://console.aws.amazon.com/rosa).
+ - Choose **Get started**.
+ - On the **Verify ROSA prerequisites** page, select **I agree to share my contact information with Red Hat**.
+ - Choose **Enable ROSA**.
+
+ **Note**: Only a single AWS account can be associated with a Red Hat account for service billing.
+
+1. Install the ROSA CLI from the [OpenShift AWS Console](https://console.redhat.com/openshift/downloads#tool-rosa).
+
+1. Get an API token, go to the [OpenShift Cluster Management API Token](https://console.redhat.com/openshift/token/rosa), click **Load token**, and save it. Use the token to log in with ROSA CLI:
+
+ ```bash
+ export RHCS_TOKEN=""
+ rosa login --token="$RHCS_TOKEN"
+
+ # Verify the login
+ rosa whoami
+ ```
+
+1. Verify your AWS quotas:
+
+ ```bash
+ rosa verify quota --region="$AWS_REGION"
+ ```
+
+ **Note**: This may fail due to organizational policies.
+
+1. Create the required account roles:
+
+ ```bash
+ rosa create account-roles --mode auto
+ ```
+
+1. Verify your AWS quotas, and if quotas are insufficient, consult the following:
+
+ - [Provisioned AWS Infrastructure](https://docs.openshift.com/rosa/rosa_planning/rosa-sts-aws-prereqs.html#rosa-aws-policy-provisioned_rosa-sts-aws-prereqs)
+ - [Required AWS Service Quotas](https://docs.openshift.com/rosa/rosa_planning/rosa-sts-required-aws-service-quotas.html#rosa-sts-required-aws-service-quotas)
+
+1. Ensure the `oc` CLI is installed. If it’s not already installed, follow the [official ROSA oc installation guide](https://docs.openshift.com/rosa/cli_reference/openshift_cli/getting-started-cli.html#cli-getting-started):
+
+ ```bash
+ rosa verify openshift-client
+ ```
+
+#### Set up the ROSA cluster module
+
+1. Create a `cluster.tf` file in the same directory as your `config.tf` file.
+2. Add the following content to your newly created `cluster.tf` file to utilize the provided module:
+
+ :::note Configure your cluster
+
+ Please customize the cluster name, availability zones, with the values you previously retrieved from the Red Hat Console.
+ Additionally, provide a secure username and password for the cluster administrator.
+
+ Ensure that you have set the environment `RHCS_TOKEN` is set with your [OpenShift Cluster Management API Token](https://console.redhat.com/openshift/token/rosa).
+
+ By default, this cluster will be accessible from the internet. If you prefer to restrict access, please refer to the official documentation of the module.
+
+ :::
+
+ ```hcl reference
+ https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/cluster.tf
+ ```
+
+ :::caution Camunda Terraform module
+
+ This ROSA module is based on the [official Red Hat Terraform module for ROSA HCP](https://registry.terraform.io/modules/terraform-redhat/rosa-hcp/rhcs/latest). Please be aware of potential differences and choices in implementation between this module and the official one.
+
+ We invite you to consult the [Camunda ROSA module documentation](https://github.com/camunda/camunda-tf-rosa/blob/v2.0.0/modules/rosa-hcp/README.md) for more information.
+
+ :::
+
+3. [Initialize](#initialize-terraform) Terraform for this module using the following Terraform command:
+
+ ```bash
+ terraform init -backend-config="bucket=$S3_TF_BUCKET_NAME" -backend-config="key=$S3_TF_BUCKET_KEY"
+ ```
+
+4. Configure user access to the cluster. By default, the user who creates the OpenShift cluster has administrative access, if you want to grant access to other users, please follow the [Red Hat documentation for granting admin rights to users](https://docs.openshift.com/rosa/cloud_experts_tutorials/cloud-experts-getting-started/cloud-experts-getting-started-admin-rights.html) when the cluster is created.
+
+5. Customize the cluster setup. The module offers various input options that allow you to further customize the cluster configuration. For a comprehensive list of available options and detailed usage instructions, refer to the [ROSA module documentation](https://github.com/camunda/camunda-tf-rosa/blob/v2.0.0/modules/rosa-hcp/README.md).
+
+### Define outputs
+
+**Terraform** allows you to define outputs, which make it easier to retrieve important values generated during execution, such as cluster endpoints and other necessary configurations for Helm setup.
+
+Each module that you have previously set up contains an output definition at the end of the file. You can adjust them to your needs.
+
+### Execution
+
+:::note Secret management
+
+We strongly recommend managing sensitive information such as the OpenSearch, Aurora username and password using a secure secrets management solution like HashiCorp Vault. For details on how to inject secrets directly into Terraform via Vault, see the [Terraform Vault Secrets Injection Guide](https://developer.hashicorp.com/terraform/tutorials/secrets/secrets-vault).
+
+:::
+
+1. Open a terminal in the created Terraform folder where `config.tf` and other `.tf` files are.
+
+2. Plan the configuration files:
+
+ ```bash
+ terraform plan -out cluster.plan # describe what will be created
+ ```
+
+3. After reviewing the plan, you can confirm and apply the changes.
+
+ ```bash
+ terraform apply cluster.plan # apply the creation
+ ```
+
+Terraform will now create the OpenShift cluster with all the necessary configurations. The completion of this process may require approximately 20-30 minutes for each component.
+
+### Reference files
+
+Depending on the installation path you have chosen, you can find the reference files used on this page:
+
+- **Standard installation:** [Reference Files](https://github.com/camunda/camunda-deployment-references/tree/feature/openshift-ra-standard/aws/rosa-hcp/camunda-versions/8.6)
+
+## 2. Preparation for Camunda 8 installation
+
+### Access the created OpenShift cluster
+
+You can access the created OpenShift cluster using the following steps:
+
+Set up the required environment variables:
+
+```shell
+export CLUSTER_NAME="$(terraform console <<
+
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
Red Hat OpenShift, a Kubernetes distribution maintained by [Red Hat](https://www.redhat.com/en/technologies/cloud-computing/openshift), provides options for both managed and on-premises hosting.
-:::note
-Deploying Camunda 8 on Red Hat OpenShift is achievable using Helm, given the appropriate configurations. However, it's important to note that the [Security Context Constraints (SCCs)](#security-context-constraints-sccs) and [Routes](./redhat-openshift.md?current-ingress=openshift-routes#using-openshift-routes) configurations might require slight deviations from the guidelines provided in the [general Helm deployment guide](/self-managed/setup/install.md).
-:::
+Deploying Camunda 8 on Red Hat OpenShift is supported using Helm, given the appropriate configurations.
+
+However, it's important to note that the [Security Context Constraints (SCCs)](#security-context-constraints-sccs) and [Routes](./redhat-openshift.md?current-ingress=openshift-routes#using-openshift-routes) configurations might require slight deviations from the guidelines provided in the [general Helm deployment guide](/self-managed/setup/install.md).
## Cluster Specification
When deploying Camunda 8 on an OpenShift cluster, the cluster specification should align with your specific requirements and workload characteristics. Here's a suggested configuration to begin with:
-- **Instance type:** 4 vCPUs (x86_64, >3.1 GHz), 16 GiB Memory (for example, [m5.xlarge on AWS](https://aws.amazon.com/en/ebs/general-purpose/))
+- **Instance type:** 4 vCPUs (x86_64, >3.1 GHz), 16 GiB Memory (for example, [mi7.xlarge on AWS](https://aws.amazon.com/en/ebs/general-purpose/))
- **Number of dedicated nodes:** 4
- **Volume type:** SSD volumes (with between 1000 and 3000 IOPS per volume, and a throughput of 1,000 MB/s per volume, for instance, [gp3 on AWS](https://aws.amazon.com/en/ebs/general-purpose/))
+If you need to set up an OpenShift cluster on a cloud provider, we recommend our [guide to deploying a ROSA cluster](/self-managed/setup/deploy/amazon/openshift/terraform-setup.md).
+
### Supported Versions
We conduct testing and ensure compatibility against the following OpenShift versions:
| OpenShift Version | [End of Support Date](https://access.redhat.com/support/policy/updates/openshift) |
| ----------------- | --------------------------------------------------------------------------------- |
+| 4.17.x | June 27, 2025 |
| 4.16.x | December 27, 2025 |
| 4.15.x | August 27, 2025 |
| 4.14.x | May 1, 2025 |
-| 4.13.x | November 17, 2024 |
-:::caution
+:::caution Version compatibility
+
Camunda 8 supports OpenShift versions in the Red Hat General Availability, Full Support, and Maintenance Support life cycle phases. For more information, refer to the [Red Hat OpenShift Container Platform Life Cycle Policy](https://access.redhat.com/support/policy/updates/openshift).
+
:::
-## Deploying Camunda 8 in OpenShift
+## Requirements
-Depending on your OpenShift cluster's Security Context Constraints (SCCs) configuration, the deployment process may vary.
+- [Helm (3.16+)](https://helm.sh/docs/intro/install/)
+- [kubectl (1.30+)](https://kubernetes.io/docs/tasks/tools/#kubectl) to interact with the cluster.
+- [jq (1.7+)](https://jqlang.github.io/jq/download/) to interact with some variables.
+- [GNU envsubst](https://www.gnu.org/software/gettext/manual/html_node/envsubst-Invocation.html) to generate manifests.
+- [oc (version supported by your OpenShift)](https://docs.openshift.com/container-platform/4.17/cli_reference/openshift_cli/getting-started-cli.html) to interact with OpenShift.
+- A namespace to host the Camunda Platform, in this guide we will reference `camunda` as the target namespace.
-
-
+## Deploy Camunda 8 via Helm charts
+
+### Configure your deployment
+
+Start by creating a `values.yml` file to store the configuration for your environment.
+This file will contain key-value pairs that will be substituted using `envsubst`.
+Over this guide, you will add and merge values in this file to configure your deployment to fit your needs.
+
+You can find a reference example of this file here:
+
+```hcl reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/base.yml
+```
+
+:::warning Merging YAML files
+
+This guide references multiple configuration files that need to be merged into a single YAML file. Be cautious to avoid duplicate keys when merging the files. Additionally, pay close attention when copying and pasting YAML content. Ensure that the separator notation `---` does not inadvertently split the configuration into multiple documents.
+
+We strongly recommend double-checking your YAML file before applying it. You can use tools like [yamllint.com](https://www.yamllint.com/) or the [YAML Lint CLI](https://github.com/adrienverge/yamllint) if you prefer not to share your information online.
+
+:::
+
+#### Configuring the Ingress
+
+Before exposing services outside the cluster, we need an Ingress component. Here's how you can configure it:
+
+
+
+
+
+[Routes](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html) expose services externally by linking a URL to a service within the cluster. OpenShift supports both the [standard Kubernetes Ingress and routes](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html), giving cluster users the flexibility to choose.
+
+The presence of routes is rooted in their specification predating Ingress. The functionality of routes differs from Ingress; for example, unlike Ingress, routes don't allow multiple services to be linked to a single route or the use of paths.
+
+To use these routes for the Zeebe Gateway, configure this through Ingress as well.
+
+#### Setting Up the application domain for Camunda 8
+
+The route created by OpenShift will use a domain to provide access to the platform. By default, you can use the OpenShift applications domain, but any other domain supported by the router can also be used.
+
+To retrieve the OpenShift applications domain (used as an example here), run the following command:
-### With restrictive SCCs
+```bash
+export OPENSHIFT_APPS_DOMAIN=$(oc get ingresses.config.openshift.io cluster -o jsonpath='{.spec.domain}')
+```
+
+Next, define the route domain that will be used for the Camunda 8 deployment. For example:
+
+```bash
+export DOMAIN_NAME="camunda.$OPENSHIFT_APPS_DOMAIN"
+
+echo "Camunda 8 will be reachable from $DOMAIN_NAME"
+```
+
+If you choose to use a custom domain instead, ensure it is supported by your router configuration and replace the example domain with your desired domain. For more details on configuring custom domains in OpenShift, refer to the official [custom domain OpenShift documentation](https://docs.openshift.com/dedicated/applications/deployments/osd-config-custom-domains-applications.html).
+
+#### Checking if HTTP/2 is enabled
+
+As the Zeebe Gateway also uses `gRPC` (which relies on `HTTP/2`), [HTTP/2 Ingress Connectivity must be enabled](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress).
+
+To check if HTTP/2 is already enabled on your OpenShift cluster, run the following command:
+
+```bash
+oc get ingresses.config/cluster -o json | jq '.metadata.annotations."ingress.operator.openshift.io/default-enable-http2"'
+```
+
+Alternatively, if you use a dedicated IngressController for the deployment:
+
+```bash
+# List your IngressControllers
+oc -n openshift-ingress-operator get ingresscontrollers
+
+# Replace with your IngressController name
+oc -n openshift-ingress-operator get ingresscontrollers/ -o json | jq '.metadata.annotations."ingress.operator.openshift.io/default-enable-http2"'
+```
+
+- If the output is `"true"`, it means HTTP/2 is enabled.
+- If the output is `null` or empty, HTTP/2 is not enabled.
+
+
+ Enable HTTP/2
+
+If HTTP/2 is not enabled, you can enable it by running the following command:
+
+**IngressController configuration:**
+
+```bash
+oc -n openshift-ingress-operator annotate ingresscontrollers/ ingress.operator.openshift.io/default-enable-http2=true
+```
+
+**Global cluster configuration:**
+
+```bash
+oc annotate ingresses.config/cluster ingress.operator.openshift.io/default-enable-http2=true
+```
+
+This will add the necessary annotation to [enable HTTP/2 for Ingress in your OpenShift cluster](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress) globally on the cluster.
+
+
+
+#### Configure Route TLS
+
+Additionally, the Zeebe Gateway should be configured to use an encrypted connection with TLS. In OpenShift, the connection from HAProxy to the Zeebe Gateway service can use HTTP/2 only for re-encryption or pass-through routes, and not for edge-terminated or insecure routes.
+
+1. **Zeebe Gateway:** two [TLS secrets](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) for the Zeebe Gateway are required, one for the **service** and the other one for the **route**:
+
+ - The first TLS secret is issued to the Zeebe Gateway Service Name. This must use the [PKCS #8 syntax](https://en.wikipedia.org/wiki/PKCS_8) or [PKCS #1 syntax](https://en.wikipedia.org/wiki/PKCS_1) as Zeebe only supports these, referenced as `camunda-platform-internal-service-certificate`.
+
+ In the example below, a TLS certificate is generated for the Zeebe Gateway service with an [annotation](https://docs.openshift.com/container-platform/latest/security/certificates/service-serving-certificate.html). The generated certificate will be in the form of a secret.
+
+ Another option is [Cert Manager](https://docs.openshift.com/container-platform/latest/security/cert_manager_operator/index.html). For more details, review the [OpenShift documentation](https://docs.openshift.com/container-platform/latest/networking/routes/secured-routes.html#nw-ingress-creating-a-reencrypt-route-with-a-custom-certificate_secured-routes).
+
+
+ PKCS #8, PKCS #1 syntax
+
+ > PKCS #1 private key encoding. PKCS #1 produces a PEM block that contains the private key algorithm in the header and the private key in the body. A key that uses this can be recognised by its BEGIN RSA PRIVATE KEY or BEGIN EC PRIVATE KEY header. NOTE: This encoding is not supported for Ed25519 keys. Attempting to use this encoding with an Ed25519 key will be ignored and default to PKCS #8.
+
+ > PKCS #8 private key encoding. PKCS #8 produces a PEM block with a static header and both the private key algorithm and the private key in the body. A key that uses this encoding can be recognised by its BEGIN PRIVATE KEY header.
+
+ [PKCS #1, PKCS #8 syntax definitionfrom cert-manager](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.PrivateKeyEncoding)
+
+
+
+ - The second TLS secret is used on the exposed route, referenced as `camunda-platform-external-certificate`. For example, this would be the same TLS secret used for Ingress. We also configure the Zeebe Gateway Ingress to create a [Re-encrypt Route](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html#nw-ingress-creating-a-route-via-an-ingress_route-configuration).
+
+ Finally, we mount the **Service Certificate Secret** (`camunda-platform-internal-service-certificate`) to the Zeebe Gateway Pod.
+ Update your `values.yml` file with the following:
+
+ ```yaml reference
+ https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/zeebe-gateway-route.yml
+ ```
+
+ The domain used by the Zeebe Gateway for gRPC is `zeebe-$DOMAIN_NAME` which different from the one used for the rest, namely `$DOMAIN_NAME`, to avoid any conflicts. It is also important to note that the port used for gRPC is `443`.
+
+2. **Operate:** mount the **Service Certificate Secret** to the Operate pod and configure the secure TLS connection. Here, only the `tls.crt` file is required.
+
+Update your `values.yml` file with the following:
+
+```yaml reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/operate-route.yml
+```
+
+The actual configuration properties can be reviewed [in the Operate configuration documentation](/self-managed/operate-deployment/operate-configuration.md#zeebe-broker-connection).
+
+1. **Tasklist:** mount the **Service Certificate Secret** to the Tasklist pod and configure the secure TLS connection. Here, only the `tls.crt` file is required.
+
+ Update your `values.yml` file with the following:
+
+```yaml reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/tasklist-route.yml
+```
+
+The actual configuration properties can be reviewed [in the Tasklist configuration documentation](/self-managed/tasklist-deployment/tasklist-configuration.md#zeebe-broker-connection).
+
+1. **Connectors:** update your `values.yml` file with the following:
+
+```yaml reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/connectors-route.yml
+```
+
+The actual configuration properties can be reviewed [in the Connectors configuration documentation](/self-managed/connectors-deployment/connectors-configuration.md#zeebe-broker-connection).
+
+1. Configure all other applications running inside the cluster and connecting to the Zeebe Gateway to also use TLS.
+
+1. Set up the global configuration to enable the single Ingress definition with the host. Update your configuration file as shown below:
+
+```yaml reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/domain.yml
+```
+
+
+
+
+
+
+
+
+[Routes](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html) serve as OpenShift's default Ingress implementation.
+
+If you find that its features aren't suitable for your needs, or if you prefer to use a Kubernetes-native Ingress controller, such as the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx), [you have that option](https://www.redhat.com/en/blog/a-guide-to-using-routes-ingress-and-gateway-apis-in-kubernetes-without-vendor-lock-in).
+
+For guidance on installing an Ingress controller, you can refer to the [Ingress Setup documentation](/self-managed/setup/guides/ingress-setup.md).
+
+:::note Difference between ingress-nginx and NGINX Ingress
+
+Do not confuse the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx) with the [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift) that is endorsed by Red Hat for usage with OpenShift. Despite very similar names, they are two different products.
+
+If you should decide to use the Red Hat endorsed [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift), you would require additional adjustments done to the Camunda 8 Ingress objects and the NGINX Ingress Controller itself to make `gRPC` and `HTTP/2` connections work. In that case, please refer to the [example and the prerequisites](https://github.com/nginxinc/kubernetes-ingress/blob/main/examples/ingress-resources/grpc-services/README.md).
+
+:::
+
+
+
+If you do not have a domain name or do not intend to use one for your Camunda 8 deployment, external access to Camunda 8 web endpoints from outside the OpenShift cluster will not be possible.
+
+However, you can use `kubectl port-forward` to access the Camunda platform without a domain name or Ingress configuration. For more information, refer to the [kubectl port-forward documentation](https://kubernetes.io/docs/reference/kubectl/generated/kubectl_port-forward/).
+
+To make this work, you will need to configure the deployment to reference `localhost` with the forwarded port. Update your `values.yml` file with the following:
+
+```yaml reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/no-domain.yml
+```
+
+
+
+#### Configuring the Security Context Constraints
+
+Depending on your OpenShift cluster's Security Context Constraints (SCCs) configuration, the deployment process may vary.
By default, OpenShift employs more restrictive SCCs. The Helm chart must assign `null` to the user running all components and dependencies.
+
+
+
+
The `global.compatibility.openshift.adaptSecurityContext` variable in your values.yaml can be used to set the following possible values:
- `force`: The `runAsUser` and `fsGroup` values will be null in all components.
- `disabled`: The `runAsUser` and `fsGroup` values will not be modified (default).
-To deploy Camunda 8 on OpenShift:
-
-1. Install [Helm and other CLI tools](/self-managed/setup/install.md#prerequisites).
-2. Install the [Camunda Helm chart repository](/self-managed/setup/install.md#helm-repository).
-3. Set `global.compatibility.openshift.adaptSecurityContext` to `force`
-
-```shell
-helm install camunda camunda/camunda-platform --skip-crds \
- --set global.compatibility.openshift.adaptSecurityContext=force
+```hcl reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/scc.yml
```
-### With permissive SCCs
-
To use permissive SCCs, simply install the charts as they are. Follow the [general Helm deployment guide](/self-managed/setup/install.md).
+```hcl reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/helm-values/no-scc.yml
+```
+
-## Available Configurations of OpenShift Components
+#### Enable Enterprise components
+
+Some components are not enabled by default in this deployment. For more information on how to configure and enable these components, refer to [configuring Enterprise components and Connectors](/self-managed/setup/install.md#configuring-enterprise-components-and-connectors).
+
+#### Fill your deployment with actual values
+
+Once you've prepared the `values.yml` file, run the following `envsubst` command to substitute the environment variables with their actual values:
+
+```bash
+# generate the final values
+envsubst < values.yml > generated-values.yml
+
+# print the result
+cat generated-values.yml
+```
+
+:::info Camunda Helm chart no longer automatically generates passwords
+
+Starting from **Camunda 8.6**, the Helm chart deprecated the automatic generation of secrets, and this feature has been fully removed in **Camunda 8.7**.
+
+:::
+
+Next, store various passwords in a Kubernetes secret, which will be used by the Helm chart. Below is an example of how to set up the required secret. You can use `openssl` to generate random secrets and store them in environment variables:
+
+```bash reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/generate-passwords.sh
+```
+
+Use these environment variables in the `kubectl` command to create the secret.
+
+- The `smtp-password` should be replaced with the appropriate external value ([see how it's used by Web Modeler](/self-managed/modeler/web-modeler/configuration/configuration.md#smtp--email)).
+
+```bash reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/create-identity-secret.sh
+```
+
+### Install Camunda 8 using Helm
+
+Now that the `generated-values.yml` is ready, you can install Camunda 8 using Helm.
+
+The following are the required environment variables with some example values:
+
+```bash reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/chart-env.sh
+```
+
+Then run the following command:
+
+```bash reference
+https://github.com/camunda/camunda-deployment-references/blob/main/aws/rosa-hcp/camunda-versions/8.6/procedure/install/install-chart.sh
+```
+
+This command:
+
+- Installs (or upgrades) the Camunda platform using the Helm chart.
+- Substitutes the appropriate version using the `$CAMUNDA_HELM_CHART_VERSION` environment variable.
+- Applies the configuration from `generated-values.yml`.
+
+:::note
+
+This guide uses `helm upgrade --install` as it runs install on initial deployment and upgrades future usage. This may make it easier for future [Camunda 8 Helm upgrades](/self-managed/setup/upgrade.md) or any other component upgrades.
+
+:::
+
+You can track the progress of the installation using the following command:
+
+```bash
+watch -n 5 '
+ kubectl get pods -n camunda --output=wide;
+ if [ $(kubectl get pods -n camunda --field-selector=status.phase!=Running -o name | wc -l) -eq 0 ] &&
+ [ $(kubectl get pods -n camunda -o json | jq -r ".items[] | select(.status.containerStatuses[]?.ready == false)" | wc -l) -eq 0 ];
+ then
+ echo "All pods are Running and Healthy - Installation completed!";
+ else
+ echo "Some pods are not Running or Healthy";
+ fi
+'
+```
+
+## Verify connectivity to Camunda 8
+
+Please follow our [guide to verify connectivity to Camunda 8](/self-managed/setup/deploy/amazon/amazon-eks/eks-helm.md#verify-connectivity-to-camunda-8)
+
+:::caution Domain name for gRPC Zeebe
+
+In this setup, the domain used for gRPC communication with Zeebe is slightly different from the one in the guide. Instead of using `zeebe.$DOMAIN_NAME`, you need to use `zeebe-$DOMAIN_NAME`.
+
+:::
+
+## Pitfalls to avoid
+
+For general deployment pitfalls, visit the [deployment troubleshooting guide](/self-managed/operational-guides/troubleshooting/troubleshooting.md).
### Security Context Constraints (SCCs)
@@ -144,220 +449,3 @@ If you deploy Camunda 8 (and related infrastructure) with permissive SCCs out of
-
-## Ingress Configuration
-
-Before exposing services outside the cluster, we need an Ingress component. Here's how you can configure it:
-
-
-
-
-### Using Kubernetes Ingress
-
-[Routes](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html) serve as OpenShift's default Ingress implementation.
-
-If you find that its features aren't suitable for your needs, or if you prefer to use a Kubernetes-native Ingress controller, such as the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx), [you have that option](https://www.redhat.com/en/blog/a-guide-to-using-routes-ingress-and-gateway-apis-in-kubernetes-without-vendor-lock-in).
-
-For guidance on installing an Ingress controller, you can refer to the [Ingress Setup documentation](/self-managed/setup/guides/ingress-setup.md).
-
-:::note Difference between ingress-nginx and NGINX Ingress
-
-Do not confuse the [ingress-nginx controller](https://github.com/kubernetes/ingress-nginx) with the [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift) that is endorsed by Red Hat for usage with OpenShift. Despite very similar names, they are two different products.
-
-If you should decide to use the Red Hat endorsed [NGINX Ingress Controller](https://www.redhat.com/en/blog/using-nginx-ingress-controller-red-hat-openshift), you would require additional adjustments done to the Camunda 8 Ingress objects and the NGINX Ingress Controller itself to make `gRPC` and `HTTP/2` connections work. In that case, please refer to the [example and the prerequisites](https://github.com/nginxinc/kubernetes-ingress/blob/main/examples/ingress-resources/grpc-services/README.md).
-
-:::
-
-
-
-
-### Using OpenShift Routes
-
-[Routes](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html) expose services externally by linking a URL to a service within the cluster. [OpenShift supports both the standard Kubernetes Ingress and routes](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html), giving cluster users the flexibility to choose.
-
-The presence of routes is rooted in their specification predating Ingress. It's worth noting that the functionality of routes differs from Ingress; for example, unlike Ingress, routes don't allow multiple services to be linked to a single route or the use of paths.
-
-To use these routes for the Zeebe Gateway, configure this through Ingress as well.
-
-#### Prerequisite
-
-As the Zeebe Gateway also uses `gRPC` (which relies on `HTTP/2`), [HTTP/2 Ingress Connectivity has to be enabled](https://docs.openshift.com/container-platform/latest/networking/ingress-operator.html#nw-http2-haproxy_configuring-ingress).
-
-Additionally, the Zeebe Gateway should be configured to use an encrypted connection with TLS. In OpenShift, the connection from HAProxy to the Zeebe Gateway service can use HTTP/2 only for re-encryption or pass-through routes, and not for edge-terminated or insecure routes.
-
-#### Required Steps
-
-1. Provide two [TLS secrets](https://kubernetes.io/docs/concepts/configuration/secret/#tls-secrets) for the Zeebe Gateway.
-
- - The first TLS secret is issued to the Zeebe Gateway Service Name. This must use the [PKCS #8 syntax](https://en.wikipedia.org/wiki/PKCS_8) or [PKCS #1 syntax](https://en.wikipedia.org/wiki/PKCS_1) as Zeebe only supports these, referenced as `camunda-platform-internal-service-certificate`.
-
- In the example below, a TLS certificate is generated for the Zeebe Gateway service with an [annotation](https://docs.openshift.com/container-platform/latest/security/certificates/service-serving-certificate.html). The generated certificate will be in the form of a secret.
-
- ```yaml
- zeebeGateway:
- service:
- annotations:
- service.beta.openshift.io/serving-cert-secret-name: camunda-platform-internal-service-certificate
- ```
-
- Another option is [Cert Manager](https://docs.openshift.com/container-platform/latest/security/cert_manager_operator/index.html). For more details, review the [OpenShift documentation](https://docs.openshift.com/container-platform/latest/networking/routes/secured-routes.html#nw-ingress-creating-a-reencrypt-route-with-a-custom-certificate_secured-routes).
-
-
- PKCS #8, PKCS #1 syntax
-
- > PKCS #1 private key encoding. PKCS #1 produces a PEM block that contains the private key algorithm in the header and the private key in the body. A key that uses this can be recognised by its BEGIN RSA PRIVATE KEY or BEGIN EC PRIVATE KEY header. NOTE: This encoding is not supported for Ed25519 keys. Attempting to use this encoding with an Ed25519 key will be ignored and default to PKCS #8.
-
- > PKCS #8 private key encoding. PKCS #8 produces a PEM block with a static header and both the private key algorithm and the private key in the body. A key that uses this encoding can be recognised by its BEGIN PRIVATE KEY header.
-
- [PKCS #1, PKCS #8 syntax definitionfrom cert-manager](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.PrivateKeyEncoding)
-
-
-
- - The second TLS secret is used on the exposed route, referenced as `camunda-platform-external-certificate`. For example, this would be the same TLS secret used for Ingress.
-
-1. Configure your Zeebe Gateway Ingress to create a [Re-encrypt Route](https://docs.openshift.com/container-platform/latest/networking/routes/route-configuration.html#nw-ingress-creating-a-route-via-an-ingress_route-configuration):
-
- ```yaml
- zeebeGateway:
- ingress:
- grpc:
- annotations:
- route.openshift.io/termination: reencrypt
- route.openshift.io/destination-ca-certificate-secret: camunda-platform-internal-service-certificate
- className: openshift-default
- tls:
- enabled: true
- secretName: camunda-platform-external-certificate
- ```
-
-1. Mount the **Service Certificate Secret** to the Zeebe Gateway Pod:
-
- ```yaml
- zeebeGateway:
- env:
- - name: ZEEBE_GATEWAY_SECURITY_ENABLED
- value: "true"
- - name: ZEEBE_GATEWAY_SECURITY_CERTIFICATECHAINPATH
- value: /usr/local/zeebe/config/tls.crt
- - name: ZEEBE_GATEWAY_SECURITY_PRIVATEKEYPATH
- value: /usr/local/zeebe/config/tls.key
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/zeebe/config/tls.crt
- subPath: tls.crt
- - name: key
- mountPath: /usr/local/zeebe/config/tls.key
- subPath: tls.key
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
- - name: key
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.key
- path: tls.key
- defaultMode: 420
- ```
-
-1. Mount the **Service Certificate Secret** to the Operate and Tasklist pods and configure the secure TLS connection. Here, only the `tls.crt` file is required.
-
- For Operate:
-
- ```yaml
- operate:
- env:
- - name: CAMUNDA_OPERATE_ZEEBE_SECURE
- value: "true"
- - name: CAMUNDA_OPERATE_ZEEBE_CERTIFICATEPATH
- value: /usr/local/operate/config/tls.crt
- - name: CAMUNDA_OPERATE_ZEEBE_BROKERCONTACTPOINT
- value: camunda-zeebe-gateway.camunda.svc.cluster.local:26500
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/operate/config/tls.crt
- subPath: tls.crt
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
- ```
-
- The actual configuration properties can be reviewed [in the Operate configuration documentation](/self-managed/operate-deployment/operate-configuration.md#zeebe-broker-connection).
-
- For Tasklist:
-
- ```yaml
- tasklist:
- env:
- - name: CAMUNDA_TASKLIST_ZEEBE_SECURE
- value: "true"
- - name: CAMUNDA_TASKLIST_ZEEBE_CERTIFICATEPATH
- value: /usr/local/tasklist/config/tls.crt
- - name: CAMUNDA_TASKLIST_ZEEBE_BROKERCONTACTPOINT
- value: camunda-zeebe-gateway.camunda.svc.cluster.local:26500
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/tasklist/config/tls.crt
- subPath: tls.crt
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
- ```
-
- The actual configuration properties can be reviewed [in the Tasklist configuration documentation](/self-managed/tasklist-deployment/tasklist-configuration.md#zeebe-broker-connection).
-
-1. Configure Connectors:
-
- ```yaml
- connectors:
- inbound:
- mode: oauth
- env:
- - name: ZEEBE_CLIENT_BROKER_GATEWAY-ADDRESS
- value: "camunda-zeebe-gateway.camunda.svc.cluster.local:26500"
- - name: ZEEBE_CLIENT_SECURITY_PLAINTEXT
- value: "false"
- - name: CAMUNDA_CLIENT_ZEEBE_CACERTIFICATEPATH
- value: /usr/local/certificates/tls.crt
- extraVolumeMounts:
- - name: certificate
- mountPath: /usr/local/certificates/tls.crt
- subPath: tls.crt
- extraVolumes:
- - name: certificate
- secret:
- secretName: camunda-platform-internal-service-certificate
- items:
- - key: tls.crt
- path: tls.crt
- defaultMode: 420
- ```
-
- The actual configuration properties can be reviewed [in the Connectors configuration documentation](/self-managed/connectors-deployment/connectors-configuration.md#zeebe-broker-connection).
-
-1. Configure all other applications running inside the cluster and connecting to the Zeebe Gateway to also use TLS.
-
-
-
-
-
-
-
-## Pitfalls to avoid
-
-For general deployment pitfalls, visit the [deployment troubleshooting guide](/self-managed/operational-guides/troubleshooting/troubleshooting.md).
diff --git a/versioned_sidebars/version-8.6-sidebars.json b/versioned_sidebars/version-8.6-sidebars.json
index 66f10535b88..cff47e1dc55 100644
--- a/versioned_sidebars/version-8.6-sidebars.json
+++ b/versioned_sidebars/version-8.6-sidebars.json
@@ -1834,6 +1834,17 @@
"self-managed/setup/deploy/amazon/amazon-eks/irsa"
]
},
+ {
+ "type": "category",
+ "label": "ROSA",
+ "link": {
+ "type": "doc",
+ "id": "self-managed/setup/deploy/amazon/openshift/terraform-setup"
+ },
+ "items": [
+ "self-managed/setup/deploy/amazon/openshift/terraform-setup"
+ ]
+ },
"self-managed/setup/deploy/amazon/aws-marketplace"
],
"Microsoft (Azure)": [