diff --git a/.ci/scripts/set_cloud_env_params.sh b/.ci/scripts/set_cloud_env_params.sh deleted file mode 100755 index f803b892d3..0000000000 --- a/.ci/scripts/set_cloud_env_params.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/bash - -KIBANA_URL="$(terraform output -raw kibana_url)" -echo "KIBANA_URL=$KIBANA_URL" >>"$GITHUB_ENV" -ES_URL="$(terraform output -raw elasticsearch_url)" -echo "ES_URL=$ES_URL" >>"$GITHUB_ENV" -ES_USER="$(terraform output -raw elasticsearch_username)" -echo "ES_USER=$ES_USER" >>"$GITHUB_ENV" - -ES_PASSWORD=$(terraform output -raw elasticsearch_password) -echo "::add-mask::$ES_PASSWORD" -echo "ES_PASSWORD=$ES_PASSWORD" >>"$GITHUB_ENV" - -# Remove 'https://' from the URLs -KIBANA_URL_STRIPPED="${KIBANA_URL//https:\/\//}" -ES_URL_STRIPPED="${ES_URL//https:\/\//}" - -# Create test URLs with credentials -TEST_KIBANA_URL="https://${ES_USER}:${ES_PASSWORD}@${KIBANA_URL_STRIPPED}" -echo "::add-mask::${TEST_KIBANA_URL}" -echo "TEST_KIBANA_URL=${TEST_KIBANA_URL}" >>"$GITHUB_ENV" - -TEST_ES_URL="https://${ES_USER}:${ES_PASSWORD}@${ES_URL_STRIPPED}" -echo "::add-mask::${TEST_ES_URL}" -echo "TEST_ES_URL=${TEST_ES_URL}" >>"$GITHUB_ENV" - -EC2_CSPM=$(terraform output -raw ec2_cspm_ssh_cmd) -echo "::add-mask::$EC2_CSPM" -echo "EC2_CSPM=$EC2_CSPM" >>"$GITHUB_ENV" - -EC2_KSPM=$(terraform output -raw ec2_kspm_ssh_cmd) -echo "::add-mask::$EC2_KSPM" -echo "EC2_KSPM=$EC2_KSPM" >>"$GITHUB_ENV" - -EC2_ASSET_INV=$(terraform output -raw ec2_asset_inventory_ssh_cmd) -echo "::add-mask::$EC2_ASSET_INV" -echo "EC2_ASSET_INV=$EC2_ASSET_INV" >>"$GITHUB_ENV" - -EC2_CSPM_KEY=$(terraform output -raw ec2_cspm_key) -echo "::add-mask::$EC2_CSPM_KEY" -echo "EC2_CSPM_KEY=$EC2_CSPM_KEY" >>"$GITHUB_ENV" - -EC2_KSPM_KEY=$(terraform output -raw ec2_kspm_key) -echo "::add-mask::$EC2_KSPM_KEY" -echo "EC2_KSPM_KEY=$EC2_KSPM_KEY" >>"$GITHUB_ENV" - -EC2_ASSET_INV_KEY=$(terraform output -raw ec2_asset_inventory_key) -echo "::add-mask::$EC2_ASSET_INV_KEY" -echo "EC2_ASSET_INV_KEY=$EC2_ASSET_INV_KEY" >>"$GITHUB_ENV" - -KSPM_PUBLIC_IP=$(terraform output -raw ec2_kspm_public_ip) -echo "::add-mask::$KSPM_PUBLIC_IP" -echo "KSPM_PUBLIC_IP=$KSPM_PUBLIC_IP" >>"$GITHUB_ENV" - -ASSET_INV_PUBLIC_IP=$(terraform output -raw ec2_asset_inventory_public_ip) -echo "::add-mask::$ASSET_INV_PUBLIC_IP" -echo "ASSET_INV_PUBLIC_IP=$ASSET_INV_PUBLIC_IP" >>"$GITHUB_ENV" - -CSPM_PUBLIC_IP=$(terraform output -raw ec2_cspm_public_ip) -echo "::add-mask::$CSPM_PUBLIC_IP" -echo "CSPM_PUBLIC_IP=$CSPM_PUBLIC_IP" >>"$GITHUB_ENV" - -if [[ ${TF_VAR_cdr_infra:-} == "true" ]]; then - ec2_cloudtrail_public_ip=$(terraform output -raw ec2_cloudtrail_public_ip) - echo "::add-mask::$ec2_cloudtrail_public_ip" - echo "CLOUDTRAIL_PUBLIC_IP=$ec2_cloudtrail_public_ip" >>"$GITHUB_ENV" - - ec2_cloudtrail_key=$(terraform output -raw ec2_cloudtrail_key) - echo "::add-mask::$ec2_cloudtrail_key" - echo "CLOUDTRAIL_KEY=$ec2_cloudtrail_key" >>"$GITHUB_ENV" - - az_vm_activity_logs_public_ip=$(terraform output -raw az_vm_activity_logs_public_ip) - echo "::add-mask::$az_vm_activity_logs_public_ip" - echo "ACTIVITY_LOGS_PUBLIC_IP=$az_vm_activity_logs_public_ip" >>"$GITHUB_ENV" - - az_vm_activity_logs_key=$(terraform output -raw az_vm_activity_logs_key) - echo "::add-mask::$az_vm_activity_logs_key" - echo "ACTIVITY_LOGS_KEY=$az_vm_activity_logs_key" >>"$GITHUB_ENV" - - gcp_audit_logs_public_ip=$(terraform output -raw gcp_audit_logs_public_ip) - echo "::add-mask::$gcp_audit_logs_public_ip" - echo "AUDIT_LOGS_PUBLIC_IP=$gcp_audit_logs_public_ip" >>"$GITHUB_ENV" - - gcp_audit_logs_key=$(terraform output -raw gcp_audit_logs_key) - echo "::add-mask::$gcp_audit_logs_key" - echo "AUDIT_LOGS_KEY=$gcp_audit_logs_key" >>"$GITHUB_ENV" -fi diff --git a/.github/actions/cdr/action.yml b/.github/actions/cdr/action.yml new file mode 100644 index 0000000000..087cc5c718 --- /dev/null +++ b/.github/actions/cdr/action.yml @@ -0,0 +1,293 @@ +name: 'CDR Integrations Installation' +description: 'Deploy CDR Integrations to Elastic Cloud' +inputs: + deployment-name: + description: | + Name with letters, numbers, hyphens; start with a letter. Max 20 chars. e.g., 'my-env-123' + required: true + type: string + env-s3-bucket: + description: "S3 bucket" + required: true + type: string + aws-region: + description: "AWS region" + default: "eu-west-1" + required: false + type: string + gcp-project-id: + description: "GCP project ID" + default: "default" + required: false + type: string + gcp-service-account-json: + description: "GCP Service Account JSON" + default: "default" + required: false + type: string + aws-cloudtrail-s3-bucket: + description: "AWS Cloudtrail S3 bucket" + default: "default" + required: false + type: string + azure-eventhub-connection-string: + description: "Azure EventHub connection string" + default: "default" + required: false + type: string + azure-storage-account-key: + description: "Azure Storage Account key" + default: "default" + required: false + type: string + es-user: + description: "Elasticsearch user" + default: "elastic" + required: false + type: string + es-password: + description: "Elasticsearch password" + default: "changeme" + required: false + type: string + elk-stack-version: + description: "ELK Stack version" + default: "8.16.0" + required: false + type: string + kibana-url: + description: "Kibana URL" + default: "default" + required: false + type: string + azure-tags: + description: "Azure default tags" + default: "Key=division,Value=engineering" + required: false + type: string + tag-project: + description: "Optional project resource tag" + default: "test-environments" + required: false + type: string + tag-owner: + description: "Optional owner tag" + default: "cloudbeat" + required: false + type: string + +runs: + using: composite + steps: + - name: Deploy CDR Infrastructure + id: deploy-cdr-infra + env: + TF_VAR_deployment_name: ${{ inputs.deployment-name }} + TF_VAR_region: ${{ inputs.aws-region }} + TF_VAR_gcp_project_id: ${{ inputs.gcp-project-id }} + TF_VAR_gcp_service_account_json: ${{ inputs.gcp-service-account-json }} + TF_VAR_project: ${{ inputs.tag-project }} + TF_VAR_owner: ${{ inputs.tag-owner }} + shell: bash + working-directory: "deploy/test-environments/cdr" + run: | + terraform init + terraform validate + terraform apply -auto-approve + + - name: Get CDR Outputs + id: generate-data + if: success() + shell: bash + working-directory: "deploy/test-environments/cdr" + run: | + aws_ec2_cloudtrail_public_ip=$(terraform output -raw ec2_cloudtrail_public_ip) + echo "::add-mask::$aws_ec2_cloudtrail_public_ip" + echo "aws-ec2-cloudtrail-public-ip=$aws_ec2_cloudtrail_public_ip" >> "$GITHUB_OUTPUT" + + aws_ec2_cloudtrail_key=$(terraform output -raw ec2_cloudtrail_key) + echo "::add-mask::$aws_ec2_cloudtrail_key" + echo "aws-ec2-cloudtrail-key=$aws_ec2_cloudtrail_key" >>"$GITHUB_OUTPUT" + + az_vm_activity_logs_public_ip=$(terraform output -raw az_vm_activity_logs_public_ip) + echo "::add-mask::$az_vm_activity_logs_public_ip" + echo "az-vm-activity-logs-public-ip=$az_vm_activity_logs_public_ip" >> "$GITHUB_OUTPUT" + + az_vm_activity_logs_key=$(terraform output -raw az_vm_activity_logs_key) + echo "::add-mask::$az_vm_activity_logs_key" + echo "az-vm-activity-logs-key=$az_vm_activity_logs_key" >> "$GITHUB_OUTPUT" + + gcp_audit_logs_public_ip=$(terraform output -raw gcp_audit_logs_public_ip) + echo "::add-mask::$gcp_audit_logs_public_ip" + echo "gcp-audit-logs-public-ip=$gcp_audit_logs_public_ip" >> "$GITHUB_OUTPUT" + + gcp_audit_logs_key=$(terraform output -raw gcp_audit_logs_key) + echo "::add-mask::$gcp_audit_logs_key" + echo "gcp-audit-logs-key=$gcp_audit_logs_key" >> "$GITHUB_OUTPUT" + + ec2_asset_inv_key=$(terraform output -raw ec2_asset_inventory_key) + echo "::add-mask::$ec2_asset_inv_key" + echo "ec2-asset-inv-key=$ec2_asset_inv_key" >> "$GITHUB_OUTPUT" + + asset_inv_public_ip=$(terraform output -raw ec2_asset_inventory_public_ip) + echo "::add-mask::$asset_inv_public_ip" + echo "asset-inv-public-ip=$asset_inv_public_ip" >> "$GITHUB_OUTPUT" + + - name: Install AWS Cloudtrail integration + id: cloudtrail-integration + if: ${{ !cancelled() && steps.deploy-cdr-infra.outcome == 'success' }} + working-directory: tests/integrations_setup + shell: bash + env: + CLOUDTRAIL_S3: ${{ inputs.aws-cloudtrail-s3-bucket }} + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_cloudtrail_integration.py + + - name: Deploy AWS Cloudtrail agent + if: ${{ !cancelled() && steps.deploy-cdr-infra.outcome == 'success' && steps.cloudtrail-integration.outcome == 'success' }} + working-directory: deploy/test-environments/cdr + shell: bash + env: + CLOUDTRAIL_KEY: ${{ steps.generate-data.outputs.aws-ec2-cloudtrail-key }} + CLOUDTRAIL_PUBLIC_IP: ${{ steps.generate-data.outputs.aws-ec2-cloudtrail-public-ip }} + run: | + scriptname="cloudtrail-linux.sh" + src="../../../tests/integrations_setup/$scriptname" + cmd="chmod +x $scriptname && ./$scriptname" + ../remote_setup.sh -k "$CLOUDTRAIL_KEY" -s "$src" -h "$CLOUDTRAIL_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" + + - name: Install Azure Activity Logs integration + id: az-activity-logs-integration + if: ${{ !cancelled() && steps.deploy-cdr-infra.outcome == 'success' }} + working-directory: tests/integrations_setup + shell: bash + env: + EVENTHUB: "activity-logs" + CONNECTION_STRING: ${{ inputs.azure-eventhub-connection-string }} + STORAGE_ACCOUNT: "testenvsactivitylogs" + STORAGE_ACCOUNT_KEY: ${{ inputs.azure-storage-account-key }} + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_az_activity_logs_integration.py + + - name: Deploy Azure Activity Logs agent + if: ${{ !cancelled() && steps.deploy-cdr-infra.outcome == 'success' && steps.az-activity-logs-integration.outcome == 'success' }} + working-directory: deploy/test-environments/cdr + shell: bash + env: + ACTIVITY_LOGS_KEY: ${{ steps.generate-data.outputs.az-vm-activity-logs-key }} + ACTIVITY_LOGS_PUBLIC_IP: ${{ steps.generate-data.outputs.az-vm-activity-logs-public-ip }} + run: | + scriptname="az_activity_logs.sh" + src="../../../tests/integrations_setup/$scriptname" + cmd="chmod +x $scriptname && ./$scriptname" + ../remote_setup.sh -k "$ACTIVITY_LOGS_KEY" -s "$src" -h "$ACTIVITY_LOGS_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" + + - name: Install GCP Audit Logs integration + id: gcp-audit-logs-integration + if: ${{ !cancelled() && steps.deploy-cdr-infra.outcome == 'success' }} + working-directory: tests/integrations_setup + shell: bash + env: + GCP_TOPIC_NAME: "test-envs-topic" + GCP_SUBSCRIPTION_NAME: "test-envs-topic-sub-id" + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_gcp_audit_logs_integration.py + + - name: Deploy GCP Audit Logs agent + if: ${{ !cancelled() && steps.deploy-cdr-infra.outcome == 'success' && steps.gcp-audit-logs-integration.outcome == 'success' }} + working-directory: deploy/test-environments/cdr + shell: bash + env: + AUDIT_LOGS_KEY: ${{ steps.generate-data.outputs.gcp-audit-logs-key }} + AUDIT_LOGS_PUBLIC_IP: ${{ steps.generate-data.outputs.gcp-audit-logs-public-ip }} + run: | + scriptname="gcp_audit_logs.sh" + src="../../../tests/integrations_setup/$scriptname" + cmd="chmod +x $scriptname && ./$scriptname" + ../remote_setup.sh -k "$AUDIT_LOGS_KEY" -s "$src" -h "$AUDIT_LOGS_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" + + - name: Check Asset Inventory supported version + id: asset-inventory-version-check + if: ${{ !cancelled() && steps.deploy-cdr-infra.outcome == 'success' }} + shell: bash + env: + STACK_VERSION: ${{ inputs.elk-stack-version }} + run: | + MIN_VERSION="8.16.0" + if [[ "$(echo -e "$MIN_VERSION\n$STACK_VERSION" | sort -V | head -n 1)" == "$MIN_VERSION" ]]; then + echo "Stack version meets the requirement: $STACK_VERSION >= $MIN_VERSION." + echo "asset_inventory_supported=true" >> $GITHUB_OUTPUT + else + echo "Stack version is below the requirement: $STACK_VERSION < $MIN_VERSION." + echo "asset_inventory_supported=false" >> $GITHUB_OUTPUT + fi + + - name: Install Azure Asset Inventory integration + id: azure-asset-inventory-integration + working-directory: tests/integrations_setup + if: ${{ !cancelled() && steps.asset-inventory-version-check.outputs.asset_inventory_supported == 'true'}} + shell: bash + env: + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_azure_asset_inventory_integration.py + + - name: Deploy Azure Asset Inventory agent + id: azure-asset-inventory-agent + working-directory: deploy/azure + if: ${{ !cancelled() && steps.asset-inventory-version-check.outputs.asset_inventory_supported == 'true' }} + shell: bash + env: + AZURE_TAGS: ${{ inputs.azure-tags }} + DEPLOYMENT_NAME: "${{ inputs.deployment-name }}-inventory" + run: ./install_agent_az_cli.sh + + - name: Install AWS Asset Inventory integration + id: aws-asset-inventory + if: ${{ !cancelled() && steps.asset-inventory-version-check.outputs.asset_inventory_supported == 'true' }} + working-directory: tests/integrations_setup + shell: bash + env: + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_aws_asset_inventory_integration.py + + - name: Deploy AWS Asset Inventory agent + if: ${{ !cancelled() && steps.asset-inventory-version-check.outputs.asset_inventory_supported == 'true' }} + working-directory: deploy/test-environments/cdr + shell: bash + env: + EC2_ASSET_INV_KEY: ${{ steps.generate-data.outputs.ec2-asset-inv-key }} + ASSET_INV_PUBLIC_IP: ${{ steps.generate-data.outputs.asset-inv-public-ip }} + run: | + scriptname="aws-asset-inventory-linux.sh" + src="../../../tests/integrations_setup/$scriptname" + cmd="chmod +x $scriptname && ./$scriptname" + ../remote_setup.sh -k "$EC2_ASSET_INV_KEY" -s "$src" -h "$ASSET_INV_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" + + - name: Upload CDR state + id: upload-state-cdr + if: always() + working-directory: deploy/test-environments + shell: bash + env: + S3_BUCKET: ${{ inputs.env-s3-bucket }} + CLOUDTRAIL_KEY: ${{ steps.generate-data.outputs.aws-ec2-cloudtrail-key }} + ACTIVITY_LOGS_KEY: ${{ steps.generate-data.outputs.az-vm-activity-logs-key }} + AUDIT_LOGS_KEY: ${{ steps.generate-data.outputs.gcp-audit-logs-key }} + EC2_ASSET_INV_KEY: ${{ steps.generate-data.outputs.ec2-asset-inv-key }} + run: | + ./manage_infrastructure.sh "cdr" "upload-state" diff --git a/.github/actions/cis/action.yml b/.github/actions/cis/action.yml new file mode 100644 index 0000000000..e89338985f --- /dev/null +++ b/.github/actions/cis/action.yml @@ -0,0 +1,304 @@ +name: 'CIS Integrations Installation' +description: 'Deploy CIS Integrations to Elastic Cloud' +inputs: + deployment-name: + description: | + Name with letters, numbers, hyphens; start with a letter. Max 20 chars. e.g., 'my-env-123' + required: true + type: string + aws-region: + description: "AWS region" + required: false + default: "eu-west-1" + type: string + cnvm-stack-name: + description: "CNVM CloudFormation stack name" + required: true + type: string + cspm-gcp-zone: + description: "GCP zone for CSPM agent deployment" + required: true + type: string + cspm-azure-creds: + description: "Azure credentials for CSPM agent deployment" + required: true + type: string + cspm-azure-tags: + description: "Azure tags for CSPM agent deployment" + required: true + type: string + stack-enrollment-token: + description: "Stack enrollment token" + required: true + type: string + env-s3-bucket: + description: "S3 bucket" + required: true + type: string + test-agentless: + description: "Run agentless integrations" + type: boolean + default: false + es-user: + description: "Elasticsearch user" + default: "elastic" + required: false + type: string + es-password: + description: "Elasticsearch password" + default: "changeme" + required: false + type: string + kibana-url: + description: "Kibana URL" + default: "default" + required: false + type: string + docker-image-override: + required: false + description: "Provide the full Docker image path to override the default image (e.g. for testing BC/SNAPSHOT)" + type: string + tag-project: + description: "Optional project resource tag" + default: "test-environments" + required: false + type: string + tag-owner: + description: "Optional owner tag" + default: "cloudbeat" + required: false + type: string + +runs: + using: composite + steps: + - name: Deploy CIS Infrastructure + id: deploy-cis-infra + env: + TF_VAR_deployment_name: ${{ inputs.deployment-name }} + TF_VAR_project: ${{ inputs.tag-project }} + TF_VAR_owner: ${{ inputs.tag-owner }} + shell: bash + working-directory: "deploy/test-environments/cis" + run: | + terraform init + terraform validate + terraform apply -auto-approve + + - name: Get CIS Outputs + id: generate-data + if: success() + shell: bash + working-directory: "deploy/test-environments/cis" + run: | + ec2_cspm=$(terraform output -raw ec2_cspm_ssh_cmd) + echo "::add-mask::$ec2_cspm" + echo "ec2-cspm=$ec2_cspm" >> "$GITHUB_OUTPUT" + + ec2_kspm=$(terraform output -raw ec2_kspm_ssh_cmd) + echo "::add-mask::$ec2_kspm" + echo "ec2-kspm=$ec2_kspm" >> "$GITHUB_OUTPUT" + + ec2_cspm_key=$(terraform output -raw ec2_cspm_key) + echo "::add-mask::$ec2_cspm_key" + echo "ec2-cspm-key=$ec2_cspm_key" >> "$GITHUB_OUTPUT" + + ec2_kspm_key=$(terraform output -raw ec2_kspm_key) + echo "::add-mask::$ec2_kspm_key" + echo "ec2-kspm-key=$ec2_kspm_key" >> "$GITHUB_OUTPUT" + + kspm_public_ip=$(terraform output -raw ec2_kspm_public_ip) + echo "::add-mask::$kspm_public_ip" + echo "kspm-public-ip=$kspm_public_ip" >> "$GITHUB_OUTPUT" + + cspm_public_ip=$(terraform output -raw ec2_cspm_public_ip) + echo "::add-mask::$cspm_public_ip" + echo "cspm-public-ip=$cspm_public_ip" >> "$GITHUB_OUTPUT" + + - name: Install CNVM integration + id: cnvm + if: ${{ !cancelled() && steps.deploy-cis-infra.outcome == 'success' }} + shell: bash + working-directory: tests/integrations_setup + env: + # CNVM_STACK_NAME: "${{ inputs.cnvm-stack-name }}" + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_cnvm_integration.py + + - name: Deploy CNVM agent + if: ${{ !cancelled() && steps.cnvm.outcome == 'success' }} + shell: bash + env: + STACK_NAME: "${{ inputs.cnvm-stack-name }}" + ENROLLMENT_TOKEN: "${{ inputs.stack-enrollment-token }}" + run: | + unset ENROLLMENT_TOKEN + just deploy-cloudformation + + - name: Install CSPM GCP integration + id: cspm-gcp-integration + if: ${{ !cancelled() && steps.deploy-cis-infra.outcome == 'success' }} + shell: bash + working-directory: tests/integrations_setup + env: + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_cspm_gcp_integration.py + + - name: Deploy CSPM GCP agent + id: cspm-gcp-agent + if: ${{ !cancelled() && steps.cspm-gcp-integration.outcome == 'success' }} + shell: bash + working-directory: deploy/deployment-manager + env: + ACTOR: ${{ github.actor }} + DEPLOYMENT_NAME: "${{ inputs.deployment-name }}" + GCP_ZONE: "${{ inputs.cspm-gcp-zone }}" + run: | + # GCP labeling rules: + # Only hyphens (-), underscores (_), lowercase characters, and numbers are allowed. International characters are allowed. + # Convert github.actor to lowercase, replace disallowed characters + gcp_label=$(echo "$ACTOR" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9_-]/_/g') + gcp_default_tag="division=engineering,org=security,team=cloud-security-posture,project=test-environments,owner=$gcp_label" + . ./set_env.sh && ./deploy.sh && gcloud compute instances update "${DEPLOYMENT_NAME}" --update-labels "${gcp_default_tag}" --zone="${GCP_ZONE}" + + - name: Install CSPM Azure integration + id: cspm-azure-integration + if: ${{ !cancelled() && steps.deploy-cis-infra.outcome == 'success' }} + working-directory: tests/integrations_setup + shell: bash + env: + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_cspm_azure_integration.py + + - name: Deploy CSPM Azure agent + id: cspm-azure-agent + if: ${{ !cancelled() && steps.cspm-azure-integration.outcome == 'success' }} + working-directory: deploy/azure + shell: bash + env: + AZURE_TAGS: "${{ inputs.cspm-azure-tags }}" + run: ./install_agent_az_cli.sh + + - name: Install D4C integration + id: kspm-d4c + if: ${{ !cancelled() && steps.deploy-cis-infra.outcome == 'success' }} + working-directory: tests/integrations_setup + shell: bash + env: + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + DOCKER_IMAGE_OVERRIDE: ${{ inputs.docker-image-override }} + run: | + poetry run python ./install_d4c_integration.py + + - name: Install KSPM EKS integration + id: kspm-eks + if: ${{ !cancelled() && steps.deploy-cis-infra.outcome == 'success' }} + working-directory: tests/integrations_setup + shell: bash + env: + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + DOCKER_IMAGE_OVERRIDE: ${{ inputs.docker-image-override }} + run: | + poetry run python ./install_kspm_eks_integration.py + + - name: Deploy KSPM EKS agent + if: ${{ !cancelled() && steps.kspm-eks.outcome == 'success' }} + shell: bash + env: + DEPLOYMENT_NAME: "${{ inputs.deployment-name }}" + S3_BUCKET: "${{ inputs.env-s3-bucket }}" + AWS_REGION: "${{ inputs.aws-region }}" + run: | + aws eks --region ${AWS_REGION} update-kubeconfig --name ${DEPLOYMENT_NAME} --alias eks-config + echo 'KUBE_CONFIG_DATA=$(cat ~/.kube/config | base64)' >> $GITHUB_ENV + kubectl config use-context eks-config + kubectl apply -f tests/integrations_setup/kspm_d4c.yaml + + - name: Install KSPM Unmanaged integration + id: kspm-unmanaged + if: ${{ !cancelled() && steps.deploy-cis-infra.outcome == 'success' }} + working-directory: tests/integrations_setup + shell: bash + env: + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_kspm_unmanaged_integration.py + + - name: Deploy KSPM Unmanaged agent + if: ${{ !cancelled() && steps.kspm-unmanaged.outcome == 'success' }} + working-directory: deploy/test-environments/cis + shell: bash + env: + EC2_KSPM_KEY: ${{ steps.generate-data.outputs.ec2-kspm-key }} + KSPM_PUBLIC_IP: ${{ steps.generate-data.outputs.kspm-public-ip }} + DOCKER_IMAGE_OVERRIDE: ${{ inputs.docker-image-override }} + run: | + scriptname="kspm_unmanaged.yaml" + src="../../../tests/integrations_setup/$scriptname" + cmd="kubectl apply -f $scriptname" + ../remote_setup.sh -k "$EC2_KSPM_KEY" -s "$src" -h "$KSPM_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" + + - name: Install CSPM AWS integration + id: cspm-aws-integration + if: ${{ !cancelled() && steps.deploy-cis-infra.outcome == 'success' }} + working-directory: tests/integrations_setup + shell: bash + env: + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_cspm_integration.py + + - name: Deploy CSPM agent + if: ${{ !cancelled() && steps.cspm-aws-integration.outcome == 'success' }} + working-directory: deploy/test-environments/cis + shell: bash + env: + EC2_CSPM_KEY: ${{ steps.generate-data.outputs.ec2-cspm-key }} + CSPM_PUBLIC_IP: ${{ steps.generate-data.outputs.cspm-public-ip }} + run: | + scriptname="cspm-linux.sh" + src="../../../tests/integrations_setup/$scriptname" + cmd="chmod +x $scriptname && ./$scriptname" + ../remote_setup.sh -k "$EC2_CSPM_KEY" -s "$src" -h "$CSPM_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" + + - name: Install Agentless integrations + id: agentless + if: ${{ !cancelled() && inputs.test-agentless == 'true' }} + working-directory: tests/integrations_setup + shell: bash + env: + AZURE_CREDENTIALS: ${{ inputs.cspm-azure-creds }} + ES_USER: ${{ inputs.es-user }} + ES_PASSWORD: ${{ inputs.es-password }} + KIBANA_URL: ${{ inputs.kibana-url }} + run: | + poetry run python ./install_agentless_integrations.py + + - name: Upload tf state + id: upload-state-cis + if: always() + working-directory: deploy/test-environments + env: + S3_BUCKET: ${{ inputs.env-s3-bucket }} + EC2_CSPM_KEY: ${{ steps.generate-data.outputs.ec2-cspm-key }} + EC2_KSPM_KEY: ${{ steps.generate-data.outputs.ec2-kspm-key }} + shell: bash + run: | + ./manage_infrastructure.sh "cis" "upload-state" diff --git a/.github/actions/elk-stack/action.yml b/.github/actions/elk-stack/action.yml new file mode 100644 index 0000000000..0f9b864732 --- /dev/null +++ b/.github/actions/elk-stack/action.yml @@ -0,0 +1,126 @@ +name: 'ELK Cloud Stack Installation' +description: 'Install ELK Cloud Stack ESS or Serverless' +inputs: + ec-api-key: + description: "API key for authenticating with Elastic Cloud." + type: string + required: true + ess-region: + description: "Elastic Cloud deployment region" + default: "gcp-us-west2" + type: string + required: false + deployment-name: + description: | + Name with letters, numbers, hyphens; start with a letter. Max 20 chars. e.g., 'my-env-123' + required: true + type: string + serverless-mode: + description: "Deploy a serverless project instead of an ESS deployment" + type: boolean + default: false + required: false + elk-stack-version: + description: "Stack version: For released version use 8.x.y, for BC use version with hash 8.x.y-hash, for SNAPSHOT use 8.x.y-SNAPSHOT" + default: "latest" + type: string + required: false + docker-image-version-override: + description: "Optional Docker image version to override the default stack image. Accepts formats like 8.x.y, 8.x.y-hash, or 8.x.y-SNAPSHOT." + type: string + required: false + env-s3-bucket: + description: "S3 bucket" + required: true + type: string + tag-project: + description: "Optional project resource tag" + default: "test-environments" + required: false + type: string + tag-owner: + description: "Optional owner tag" + default: "cloudbeat" + required: false + type: string +outputs: + kibana-url: + description: "Kibana URL" + value: ${{ steps.generate-data.outputs.kibana-url }} + es-url: + description: "Elasticsearch URL" + value: ${{ steps.generate-data.outputs.es-url }} + es-user: + description: "Elasticsearch username" + value: ${{ steps.generate-data.outputs.es-user }} + es-password: + description: "Elasticsearch password" + value: ${{ steps.generate-data.outputs.es-password }} + test-kibana-url: + description: "Test Kibana URL" + value: ${{ steps.generate-data.outputs.test-kibana-url }} + test-es-url: + description: "Test Elasticsearch URL" + value: ${{ steps.generate-data.outputs.test-es-url }} + +runs: + using: composite + steps: + - name: Deploy ELK Cloud Stack + id: deploy-elk-cloud-stack + env: + TF_VAR_deployment_name: ${{ inputs.deployment-name }} + TF_VAR_serverless_mode: ${{ inputs.serverless-mode }} + TF_VAR_stack_version: ${{ inputs.elk-stack-version }} + TF_VAR_ess_region: ${{ inputs.ess-region }} + TF_VAR_pin_version: ${{ inputs.docker-image-version-override }} + TF_VAR_ec_api_key: ${{ inputs.ec-api-key }} + TF_VAR_project: ${{ inputs.tag-project }} + TF_VAR_owner: ${{ inputs.tag-owner }} + shell: bash + working-directory: "deploy/test-environments/elk-stack" + run: | + terraform init + terraform validate + terraform apply -auto-approve + + - name: Get ELK Cloud Stack Outputs + id: generate-data + if: success() + shell: bash + working-directory: "deploy/test-environments/elk-stack" + run: | + kibana_url="$(terraform output -raw kibana_url)" + echo "kibana-url=$kibana_url" >> "$GITHUB_OUTPUT" + + es_url="$(terraform output -raw elasticsearch_url)" + echo "es-url=$es_url" >> "$GITHUB_OUTPUT" + + es_user="$(terraform output -raw elasticsearch_username)" + echo "es-user=$es_user" >> "$GITHUB_OUTPUT" + + es_password=$(terraform output -raw elasticsearch_password) + echo "::add-mask::$es_password" + echo "es-password=$es_password" >>"$GITHUB_OUTPUT" + + # Remove 'https://' from the URLs + kibana_url_stripped="${kibana_url//https:\/\//}" + es_url_stripped="${es_url//https:\/\//}" + + # Create test URLs with credentials + test_kibana_url="https://${ES_USER}:${ES_PASSWORD}@${kibana_url_stripped}" + echo "::add-mask::${test_kibana_url}" + echo "test-kibana-url=${test_kibana_url}" >> "$GITHUB_OUTPUT" + + test_es_url="https://${ES_USER}:${ES_PASSWORD}@${es_url_stripped}" + echo "::add-mask::${test_es_url}" + echo "test-es-url=${test_es_url}" >> "$GITHUB_OUTPUT" + + - name: Upload tf state + id: upload-state-elk + working-directory: deploy/test-environments + env: + S3_BUCKET: ${{ inputs.env-s3-bucket }} + shell: bash + run: | + ./manage_infrastructure.sh "elk-stack" "upload-state" diff --git a/.github/workflows/cdr-infra.yml b/.github/workflows/cdr-infra.yml index 1ed52a64fd..78efccd963 100644 --- a/.github/workflows/cdr-infra.yml +++ b/.github/workflows/cdr-infra.yml @@ -20,13 +20,35 @@ on: description: "The version of the ELK stack: For BC use version without hash 8.x.y, for SNAPSHOT use 8.x.y-SNAPSHOT" default: "8.16.0" type: string + cis-infra: + required: false + description: "Deploy the CIS infrastructure" + type: boolean + default: false docker-image-override: required: false description: "Provide the full Docker image path to override the default image (e.g. for testing BC/SNAPSHOT)" type: string jobs: + init: + runs-on: ubuntu-latest + outputs: + infra-type: ${{ steps.set_infra_type.outputs.infra_type }} + steps: + - name: Set infra type + id: set_infra_type + env: + CIS_INFRA: ${{ fromJSON(inputs.cis-infra) }} + run: | + if [[ "$CIS_INFRA" == "true" ]]; then + echo "infra_type=all" >> $GITHUB_OUTPUT + else + echo "infra_type=cdr" >> $GITHUB_OUTPUT + fi + deploy: + needs: init uses: ./.github/workflows/test-environment.yml secrets: inherit # Required for the 'Deploy' job in the 'test-environment.yml' to authenticate with Google Cloud (gcloud). @@ -38,4 +60,4 @@ jobs: ess-region: 'gcp-us-west2' elk-stack-version: ${{ inputs.elk-stack-version }} serverless_mode: ${{ fromJSON(inputs.serverless_mode) }} - cdr-infra: true + infra-type: ${{ needs.init.outputs.infra-type }} diff --git a/.github/workflows/cloudformation-ci.yml b/.github/workflows/cloudformation-ci.yml index 8904f5fe06..fa23ecd7f6 100644 --- a/.github/workflows/cloudformation-ci.yml +++ b/.github/workflows/cloudformation-ci.yml @@ -76,22 +76,12 @@ jobs: TF_VAR_stack_version: ${{ env.ELK_VERSION }} TF_VAR_ess_region: "gcp-us-west2" run: | - terraform init - terraform validate - # To address the conditional statement outputting stack values in the deployment stack, - # we need to specify both targets: ec_project and ec_deployment. - terraform apply --auto-approve -target=module.ec_deployment -target="module.ec_project" + ./manage_infrastructure.sh "elk-stack" "apply" - name: Set Environment Output id: env-output run: | - echo "KIBANA_URL=$(terraform output -raw kibana_url)" >> $GITHUB_ENV - echo "ES_URL=$(terraform output -raw elasticsearch_url)" >> $GITHUB_ENV - echo "ES_USER=$(terraform output -raw elasticsearch_username)" >> $GITHUB_ENV - - export ES_PASSWORD=$(terraform output -raw elasticsearch_password) - echo "::add-mask::$ES_PASSWORD" - echo "ES_PASSWORD=$ES_PASSWORD" >> $GITHUB_ENV + ./manage_infrastructure.sh "elk-stack" "output" - name: Install integrations working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} @@ -121,7 +111,7 @@ jobs: - name: Cleanup Environment if: always() run: | - terraform destroy --auto-approve -target="module.ec_deployment" -target="module.ec_project" + ./manage_infrastructure.sh "elk-stack" "destroy" aws cloudformation delete-stack --stack-name ${{ env.CNVM_STACK_NAME }} aws cloudformation wait stack-delete-complete --stack-name ${{ env.CNVM_STACK_NAME }} diff --git a/.github/workflows/test-environment.yml b/.github/workflows/test-environment.yml index b48d63c3cb..8283165f95 100644 --- a/.github/workflows/test-environment.yml +++ b/.github/workflows/test-environment.yml @@ -45,7 +45,7 @@ on: expiration_days: description: "Number of days until environment expiration" required: false - default: 14 + default: 5 type: string ec-api-key: type: string @@ -92,17 +92,17 @@ on: expiration_days: description: "Number of days until environment expiration" required: false - default: 14 + default: 5 type: string ec-api-key: type: string description: "**Optional** By default, the environment will be created in our Cloud Security Organization. If you want to use your own cloud account, enter your Elastic Cloud API key." required: false - cdr-infra: - description: "Flag to indicate that the CDR infrastructure is being created" - type: boolean + infra-type: + description: "Type of infrastructure to create" + type: string required: false - default: false + default: "cis" outputs: s3-bucket: description: "Terraform state s3 bucket folder" @@ -216,15 +216,15 @@ jobs: echo "::add-mask::$enrollment_token" echo "ENROLLMENT_TOKEN=$enrollment_token" >> $GITHUB_ENV - - name: Init CDR Infra - id: init-cdr-infra + - name: Init Infra Type + id: init-infra-type env: - CDR_INFRA: ${{ inputs.cdr-infra }} + INPUT_INFRA_TYPE: ${{ inputs.infra-type }} run: | - if [[ "${CDR_INFRA:-}" == "true" ]]; then - echo "TF_VAR_cdr_infra=true" >> $GITHUB_ENV + if [[ -z "${INPUT_INFRA_TYPE}" ]]; then + echo "INFRA_TYPE=cis" >> $GITHUB_ENV else - echo "TF_VAR_cdr_infra=false" >> $GITHUB_ENV + echo "INFRA_TYPE=$INPUT_INFRA_TYPE" >> $GITHUB_ENV fi - name: Set up Python @@ -268,279 +268,98 @@ jobs: echo "TF_VAR_gcp_project_id=$GCP_PROJECT" >> $GITHUB_ENV echo "TF_STATE_FOLDER=$(date +'%Y-%m-%d_%H-%M-%S')" >> $GITHUB_ENV - - name: Terraform Init - run: terraform init - - - name: Terraform Validate - run: terraform validate - - - name: Provision Test Environment (EC + EC2 K8s + EC2 CSPM) - id: apply - if: success() - run: | - terraform apply --auto-approve \ - -var="deployment_name=${{ env.DEPLOYMENT_NAME }}" \ - -var="region=${{ env.AWS_REGION }}" \ - -var="project=${{ github.actor }}" \ - -var="owner=${{ github.actor }}" - - - name: Set Environment Output - id: env-output - run: ../../.ci/scripts/set_cloud_env_params.sh - - - name: Upload tf state + - name: Deploy ELK Cloud Stack + id: elk-stack + uses: ./.github/actions/elk-stack + with: + deployment-name: ${{ env.DEPLOYMENT_NAME }} + serverless-mode: ${{ env.TEST_AGENTLESS }} + elk-stack-version: ${{ env.STACK_VERSION }} + ess-region: ${{ env.TF_VAR_ess_region }} + ec-api-key: ${{ env.TF_VAR_ec_api_key }} + docker-image-version-override: ${{ env.TF_VAR_pin_version }} + env-s3-bucket: "${{ env.S3_BASE_BUCKET }}/${{ env.DEPLOYMENT_NAME }}_${{ env.TF_STATE_FOLDER }}" + tag-project: ${{ github.actor }} + tag-owner: ${{ github.actor }} + + - name: Upload environment info id: upload-state if: always() env: S3_BUCKET: "${{ env.S3_BASE_BUCKET }}/${{ env.DEPLOYMENT_NAME }}_${{ env.TF_STATE_FOLDER }}" EXPIRATION_DAYS: ${{ inputs.expiration_days }} run: | - aws s3 cp "./terraform.tfstate" "${S3_BUCKET}/terraform.tfstate" - aws s3 cp "${EC2_CSPM_KEY}" "${S3_BUCKET}/cspm.pem" - aws s3 cp "${EC2_KSPM_KEY}" "${S3_BUCKET}/kspm.pem" - aws s3 cp "${EC2_ASSET_INV_KEY}" "${S3_BUCKET}/asset_inv.pem" echo "s3-bucket-folder=${S3_BUCKET}" >> $GITHUB_OUTPUT echo "aws-cnvm-stack=${CNVM_STACK_NAME}" >> $GITHUB_OUTPUT python3 ../../.ci/scripts/create_env_config.py aws s3 cp "./env_config.json" "${S3_BUCKET}/env_config.json" - if [[ ${TF_VAR_cdr_infra:-} == "true" ]]; then - aws s3 cp "${CLOUDTRAIL_KEY}" "${S3_BUCKET}/cloudtrail.pem" - aws s3 cp "${ACTIVITY_LOGS_KEY}" "${S3_BUCKET}/az_activity_logs.pem" - aws s3 cp "${AUDIT_LOGS_KEY}" "${S3_BUCKET}/gcp_audit_logs.pem" - fi + + - name: Update Stack Vars + env: + STACK_ES_USER: ${{ steps.elk-stack.outputs.es-user }} + STACK_ES_PASSWORD: ${{ steps.elk-stack.outputs.es-password }} + STACK_KIBANA_URL: ${{ steps.elk-stack.outputs.kibana-url }} + STACK_ES_URL: ${{ steps.elk-stack.outputs.es-url }} + run: | + echo "ES_USER=$STACK_ES_USER" >> $GITHUB_ENV + echo "ES_PASSWORD=$STACK_ES_PASSWORD" >> $GITHUB_ENV + echo "KIBANA_URL=$STACK_KIBANA_URL" >> $GITHUB_ENV + echo "ES_URL=$STACK_ES_URL" >> $GITHUB_ENV - name: Summary if: success() run: | - kibana_url=$(terraform output -raw kibana_url) - summary="Kibana URL: $kibana_url" - bucket_name="${{ env.S3_BASE_BUCKET }}" + summary="Kibana URL: $KIBANA_URL" + bucket_name="$S3_BASE_BUCKET" bucket_name="${bucket_name#s3://}" s3_bucket_link="[creds and keys](https://s3.console.aws.amazon.com/s3/buckets/$bucket_name)" summary=$(cat <<-EOF - Kibana URL: [kibana]($kibana_url) + Kibana URL: [kibana]($KIBANA_URL) Environment Details: $s3_bucket_link EOF ) echo "$summary" >> $GITHUB_STEP_SUMMARY echo "$summary" # Print the summary to the workflow log - - name: Install AWS Cloudtrail integration - id: cloudtrail-integration - if: env.TF_VAR_cdr_infra == 'true' - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - env: - CLOUDTRAIL_S3: ${{ secrets.CLOUDTRAIL_S3 }} - run: | - poetry run python ./install_cloudtrail_integration.py - - - name: Deploy AWS Cloudtrail agent - if: env.TF_VAR_cdr_infra == 'true' - run: | - scriptname="cloudtrail-linux.sh" - src="../../$INTEGRATIONS_SETUP_DIR/$scriptname" - cmd="chmod +x $scriptname && ./$scriptname" - ../../.ci/scripts/remote_setup.sh -k "$CLOUDTRAIL_KEY" -s "$src" -h "$CLOUDTRAIL_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" - - - name: Install Azure Activity Logs integration - id: az-activity-logs-integration - if: env.TF_VAR_cdr_infra == 'true' - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - env: - EVENTHUB: "activity-logs" - CONNECTION_STRING: ${{ secrets.AZURE_EVENTHUB_CONNECTION_STRING }} - STORAGE_ACCOUNT: "testenvsactivitylogs" - STORAGE_ACCOUNT_KEY: ${{ secrets.AZURE_STORAGE_ACCOUNT_KEY }} - run: | - poetry run python ./install_az_activity_logs_integration.py - - - name: Deploy Azure Activity Logs agent - if: env.TF_VAR_cdr_infra == 'true' - run: | - scriptname="az_activity_logs.sh" - src="../../$INTEGRATIONS_SETUP_DIR/$scriptname" - cmd="chmod +x $scriptname && ./$scriptname" - ../../.ci/scripts/remote_setup.sh -k "$ACTIVITY_LOGS_KEY" -s "$src" -h "$ACTIVITY_LOGS_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" - - - name: Install GCP Audit Logs integration - id: gcp-audit-logs-integration - if: env.TF_VAR_cdr_infra == 'true' - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - env: - GCP_TOPIC_NAME: "test-envs-topic" - GCP_SUBSCRIPTION_NAME: "test-envs-topic-sub-id" - run: | - poetry run python ./install_gcp_audit_logs_integration.py - - - name: Deploy GCP Audit Logs agent - if: env.TF_VAR_cdr_infra == 'true' - run: | - scriptname="gcp_audit_logs.sh" - src="../../$INTEGRATIONS_SETUP_DIR/$scriptname" - cmd="chmod +x $scriptname && ./$scriptname" - ../../.ci/scripts/remote_setup.sh -k "$AUDIT_LOGS_KEY" -s "$src" -h "$AUDIT_LOGS_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" - - - name: Install CNVM integration - id: cnvm - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - run: | - poetry run python ./install_cnvm_integration.py - - - name: Deploy CNVM agent - env: - STACK_NAME: "${{ env.CNVM_STACK_NAME}}" - run: | - unset ENROLLMENT_TOKEN - just deploy-cloudformation - - - name: Install CSPM GCP integration - id: cspm-gcp-integration - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - run: | - poetry run python ./install_cspm_gcp_integration.py - - - name: Deploy CSPM GCP agent - id: cspm-gcp-agent - working-directory: deploy/deployment-manager - env: - ACTOR: ${{ github.actor }} - run: | - # GCP labeling rules: - # Only hyphens (-), underscores (_), lowercase characters, and numbers are allowed. International characters are allowed. - # Convert github.actor to lowercase, replace disallowed characters - GCP_LABEL=$(echo "$ACTOR" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-z0-9_-]/_/g') - GCP_DEFAULT_TAGS="division=engineering,org=security,team=cloud-security-posture,project=test-environments,owner=$GCP_LABEL" - . ./set_env.sh && ./deploy.sh && gcloud compute instances update "${DEPLOYMENT_NAME}" --update-labels "${GCP_DEFAULT_TAGS}" --zone="${GCP_ZONE}" - - - name: Install CSPM Azure integration - id: cspm-azure-integration - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - run: | - poetry run python ./install_cspm_azure_integration.py - - - name: Deploy CSPM Azure agent - id: cspm-azure-agent - working-directory: deploy/azure - env: - AZURE_TAGS: ${{ env.AZURE_DEFAULT_TAGS }} - run: ./install_agent_az_cli.sh - - - name: Check Asset Inventory supported version - id: asset-inventory-version-check - run: | - MIN_VERSION="8.16.0" - if [[ "$(echo -e "$MIN_VERSION\n$STACK_VERSION" | sort -V | head -n 1)" == "$MIN_VERSION" ]]; then - echo "Stack version meets the requirement: $STACK_VERSION >= $MIN_VERSION." - echo "asset_inventory_supported=true" >> $GITHUB_ENV - else - echo "Stack version is below the requirement: $STACK_VERSION < $MIN_VERSION." - echo "asset_inventory_supported=false" >> $GITHUB_ENV - fi - - - name: Install Azure Asset Inventory integration - id: azure-asset-inventory-integration - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - if: env.asset_inventory_supported == 'true' - run: | - poetry run python ./install_azure_asset_inventory_integration.py - - - name: Deploy Azure Asset Inventory agent - id: azure-asset-inventory-agent - working-directory: deploy/azure - if: env.asset_inventory_supported == 'true' - env: - AZURE_TAGS: ${{ env.AZURE_DEFAULT_TAGS }} - DEPLOYMENT_NAME: "${{ env.DEPLOYMENT_NAME }}-inventory" - run: ./install_agent_az_cli.sh - - - name: Install D4C integration - id: kspm-d4c - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - run: | - poetry run python ./install_d4c_integration.py - - - name: Install KSPM EKS integration - id: kspm-eks - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - run: | - poetry run python ./install_kspm_eks_integration.py - - - name: Deploy KSPM EKS agent - env: - S3_BUCKET: "${{ env.S3_BASE_BUCKET }}/${{ env.DEPLOYMENT_NAME }}_${{ env.TF_STATE_FOLDER }}" - run: | - aws eks --region ${{ env.AWS_REGION }} update-kubeconfig \ - --name $(terraform output -raw deployment_name) --alias eks-config - echo 'KUBE_CONFIG_DATA=$(cat ~/.kube/config | base64)' >> $GITHUB_ENV - aws s3 cp ~/.kube/config "${{ env.S3_BUCKET }}/kubeconfig" - kubectl config use-context eks-config - kubectl apply -f ../../${{ env.INTEGRATIONS_SETUP_DIR }}/kspm_d4c.yaml - - - name: Install KSPM Unmanaged integration - id: kspm-unmanaged - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - run: | - poetry run python ./install_kspm_unmanaged_integration.py - - - name: Deploy KSPM Unmanaged agent - run: | - scriptname="kspm_unmanaged.yaml" - src="../../$INTEGRATIONS_SETUP_DIR/$scriptname" - cmd="kubectl apply -f $scriptname" - ../../.ci/scripts/remote_setup.sh -k "$EC2_KSPM_KEY" -s "$src" -h "$KSPM_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" - - - name: Install CSPM integration - id: cspm - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - run: | - poetry run python ./install_cspm_integration.py - - - name: Deploy CSPM agent - run: | - scriptname="cspm-linux.sh" - src="../../$INTEGRATIONS_SETUP_DIR/$scriptname" - cmd="chmod +x $scriptname && ./$scriptname" - ../../.ci/scripts/remote_setup.sh -k "$EC2_CSPM_KEY" -s "$src" -h "$CSPM_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" - - - name: Install AWS Asset Inventory integration - id: aws-asset-inventory - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - if: env.asset_inventory_supported == 'true' - run: | - poetry run python ./install_aws_asset_inventory_integration.py - - - name: Deploy AWS Asset Inventory agent - if: env.asset_inventory_supported == 'true' - run: | - scriptname="aws-asset-inventory-linux.sh" - src="../../$INTEGRATIONS_SETUP_DIR/$scriptname" - cmd="chmod +x $scriptname && ./$scriptname" - ../../.ci/scripts/remote_setup.sh -k "$EC2_ASSET_INV_KEY" -s "$src" -h "$ASSET_INV_PUBLIC_IP" -d "~/$scriptname" -c "$cmd" - - - name: Upload Integrations data - if: always() - env: - S3_BUCKET: "${{ env.S3_BASE_BUCKET }}/${{ env.DEPLOYMENT_NAME }}_${{ env.TF_STATE_FOLDER }}" - ASSET_INVENTORY_SUPPORTED: "${{ env.asset_inventory_supported }}" - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - run: | - aws s3 cp "./kspm_unmanaged.yaml" "$S3_BUCKET/kspm_unmanaged.yaml" - aws s3 cp "./kspm_d4c.yaml" "$S3_BUCKET/kspm_d4c.yaml" - aws s3 cp "./kspm_eks.yaml" "$S3_BUCKET/kspm_eks.yaml" - aws s3 cp "./cspm-linux.sh" "$S3_BUCKET/cspm-linux.sh" - if [[ "${ASSET_INVENTORY_SUPPORTED}" == "true" ]]; then - aws s3 cp "./aws-asset-inventory-linux.sh" "$S3_BUCKET/aws-asset-inventory-linux.sh" - fi - aws s3 cp "./state_data.json" "$S3_BUCKET/state_data.json" - - - name: Install Agentless integrations - id: agentless - if: env.TEST_AGENTLESS == 'true' - working-directory: ${{ env.INTEGRATIONS_SETUP_DIR }} - env: - AZURE_CREDENTIALS: ${{ secrets.AZURE_CREDENTIALS }} - run: | - poetry run python ./install_agentless_integrations.py + - name: Deploy CDR Integrations + id: cdr-integrations + if: ${{ !cancelled() && steps.elk-stack.outcome == 'success' && env.INFRA_TYPE != 'cis' }} + uses: ./.github/actions/cdr + with: + deployment-name: ${{ env.DEPLOYMENT_NAME }} + aws-region: ${{ env.AWS_REGION }} + gcp-project-id: ${{ env.GCP_PROJECT }} + gcp-service-account-json: ${{ secrets.GCP_AGENT_CREDENTIALS }} + aws-cloudtrail-s3-bucket: ${{ secrets.CLOUDTRAIL_S3 }} + azure-eventhub-connection-string: ${{ secrets.AZURE_EVENTHUB_CONNECTION_STRING }} + azure-storage-account-key: ${{ secrets.AZURE_STORAGE_ACCOUNT_KEY }} + env-s3-bucket: "${{ env.S3_BASE_BUCKET }}/${{ env.DEPLOYMENT_NAME }}_${{ env.TF_STATE_FOLDER }}" + es-user: ${{ steps.elk-stack.outputs.es-user }} + es-password: ${{ steps.elk-stack.outputs.es-password }} + kibana-url: ${{ steps.elk-stack.outputs.kibana-url }} + elk-stack-version: ${{ env.STACK_VERSION }} + azure-tags: ${{ env.AZURE_DEFAULT_TAGS }} + tag-project: ${{ github.actor }} + tag-owner: ${{ github.actor }} + + - name: Deploy CIS Integrations + id: cis-integrations + if: ${{ !cancelled() && steps.elk-stack.outcome == 'success' && env.INFRA_TYPE != 'cdr' }} + uses: ./.github/actions/cis + with: + deployment-name: ${{ env.DEPLOYMENT_NAME }} + cnvm-stack-name: ${{ env.CNVM_STACK_NAME }} + cspm-gcp-zone: ${{ env.GCP_ZONE }} + cspm-azure-creds: ${{ secrets.AZURE_CREDENTIALS }} + cspm-azure-tags: ${{ env.AZURE_DEFAULT_TAGS }} + stack-enrollment-token: ${{ env.ENROLLMENT_TOKEN }} + env-s3-bucket: "${{ env.S3_BASE_BUCKET }}/${{ env.DEPLOYMENT_NAME }}_${{ env.TF_STATE_FOLDER }}" + es-user: ${{ steps.elk-stack.outputs.es-user }} + es-password: ${{ steps.elk-stack.outputs.es-password }} + kibana-url: ${{ steps.elk-stack.outputs.kibana-url }} + test-agentless: ${{ env.TEST_AGENTLESS }} + tag-project: ${{ github.actor }} + tag-owner: ${{ github.actor }} - name: Wait for agents to enroll id: wait-for-agents @@ -549,23 +368,23 @@ jobs: poetry run python ./agents_enrolled.py - name: Run Sanity checks - if: ${{ success() && inputs.run-sanity-tests == true }} + if: ${{ success() && inputs.run-sanity-tests == true && env.INFRA_TYPE != 'cdr' }} working-directory: ./tests run: | poetry run pytest -m "sanity" --alluredir=./allure/results/ --clean-alluredir --maxfail=4 - name: Run UI Sanity checks (Kibana) uses: ./.github/actions/kibana-ftr - if: ${{ success() && inputs.run-ui-sanity-tests == true }} + if: ${{ success() && inputs.run-ui-sanity-tests == true && env.INFRA_TYPE != 'cdr' }} with: - test_kibana_url: ${{ env.TEST_KIBANA_URL }} - test_es_url: ${{ env.TEST_ES_URL }} + test_kibana_url: ${{ steps.elk-stack.outputs.test-kibana-url }} + test_es_url: ${{ steps.elk-stack.outputs.test-es-url }} es_version: ${{ env.STACK_VERSION }} kibana_ref: ${{ inputs.kibana_ref }} - name: Create Slack Payload if: always() - id: prepare-data + id: prepare-slack-data working-directory: ./ env: WORKFLOW: "${{ github.workflow }}" @@ -585,4 +404,4 @@ jobs: vault-url: ${{ secrets.VAULT_ADDR }} vault-role-id: ${{ secrets.CSP_VAULT_ROLE_ID }} vault-secret-id: ${{ secrets.CSP_VAULT_SECRET_ID }} - slack-payload: ${{ steps.prepare-data.outputs.payload }} + slack-payload: ${{ steps.prepare-slack-data.outputs.payload }} diff --git a/.github/workflows/test-gcp-dm.yml b/.github/workflows/test-gcp-dm.yml index a6207b0399..0198148d2a 100644 --- a/.github/workflows/test-gcp-dm.yml +++ b/.github/workflows/test-gcp-dm.yml @@ -77,18 +77,8 @@ jobs: if: success() working-directory: ${{ env.TEST_ENVS_DIR }} run: | - terraform -v - terraform init - terraform validate - terraform apply --auto-approve -target="module.ec_deployment" -target="module.ec_project" - terraform output - echo "KIBANA_URL=$(terraform output -raw kibana_url)" >> $GITHUB_ENV - echo "ES_URL=$(terraform output -raw elasticsearch_url)" >> $GITHUB_ENV - echo "ES_USER=$(terraform output -raw elasticsearch_username)" >> $GITHUB_ENV - - export ES_PASSWORD=$(terraform output -raw elasticsearch_password) - echo "::add-mask::$ES_PASSWORD" - echo "ES_PASSWORD=$ES_PASSWORD" >> $GITHUB_ENV + ./manage_infrastructure.sh "elk-stack" "apply" + ./manage_infrastructure.sh "elk-stack" "output" - name: Install CSPM GCP integration id: cspm-gcp-integration @@ -121,7 +111,7 @@ jobs: if: always() working-directory: ${{ env.TEST_ENVS_DIR }} run: | - terraform destroy --auto-approve -target="module.ec_deployment" -target="module.ec_project" + ./manage_infrastructure.sh "elk-stack" "destroy" - name: Set up GCP Cloud SDK if: always() @@ -188,18 +178,8 @@ jobs: if: success() working-directory: ${{ env.TEST_ENVS_DIR }} run: | - terraform -v - terraform init - terraform validate - terraform apply --auto-approve -target="module.ec_deployment" -target="module.ec_project" - terraform output - echo "KIBANA_URL=$(terraform output -raw kibana_url)" >> $GITHUB_ENV - echo "ES_URL=$(terraform output -raw elasticsearch_url)" >> $GITHUB_ENV - echo "ES_USER=$(terraform output -raw elasticsearch_username)" >> $GITHUB_ENV - - export ES_PASSWORD=$(terraform output -raw elasticsearch_password) - echo "::add-mask::$ES_PASSWORD" - echo "ES_PASSWORD=$ES_PASSWORD" >> $GITHUB_ENV + ./manage_infrastructure.sh "elk-stack" "apply" + ./manage_infrastructure.sh "elk-stack" "output" - name: Set up GCP Cloud SDK if: always() @@ -240,7 +220,7 @@ jobs: if: always() working-directory: ${{ env.TEST_ENVS_DIR }} run: | - terraform destroy --auto-approve -target="module.ec_deployment" -target="module.ec_project" + ./manage_infrastructure.sh "elk-stack" "destroy" - name: Delete GCP Deployments if: always() diff --git a/.github/workflows/upgrade-environment.yml b/.github/workflows/upgrade-environment.yml index 5f14920cf3..e9370e1d93 100644 --- a/.github/workflows/upgrade-environment.yml +++ b/.github/workflows/upgrade-environment.yml @@ -145,54 +145,57 @@ jobs: env: S3_BUCKET: ${{ needs.deploy.outputs.s3-bucket }} run: | - aws s3 cp "${{ env.S3_BUCKET }}/terraform.tfstate" "./terraform.tfstate" - - - name: Terraform Init - run: terraform init - - - name: Terraform Validate - run: terraform validate + aws s3 cp "$S3_BUCKET/elk-stack-terraform.tfstate" "./elk-stack/terraform.tfstate" + aws s3 cp "$S3_BUCKET/cis-terraform.tfstate" "./cis/terraform.tfstate" - name: Update ELK stack version - id: apply + id: provision-elk-terraform if: success() + env: + TF_VAR_deployment_name: ${{ inputs.deployment_name }} + TF_VAR_region: ${{ env.AWS_REGION }} + TF_VAR_project: ${{ github.actor }} + TF_VAR_owner: ${{ github.actor }} + INFRA_TYPE: elk-stack run: | - terraform apply --auto-approve \ - -var="deployment_name=${{ inputs.deployment_name }}" \ - -var="region=${{ env.AWS_REGION }}" \ - -var="project=${{ github.actor }}" \ - -var="owner=${{ github.actor }}" + ./manage_infrastructure.sh "$INFRA_TYPE" "apply" - name: Set Environment Output id: env-output - run: ../../.ci/scripts/set_cloud_env_params.sh + env: + INFRA_TYPE: cis + run: | + ./manage_infrastructure.sh "$INFRA_TYPE" "output" - name: Set Docker Image version if: ${{ ! inputs.docker-image-override }} env: VERSION: 'docker.elastic.co/beats/elastic-agent:${{ inputs.target-elk-stack-version }}' run: | - echo "DOCKER_IMAGE=${{ env.VERSION }}" >> $GITHUB_ENV + echo "DOCKER_IMAGE=${VERSION}" >> $GITHUB_ENV - name: Download Integrations data + working-directory: ${{ env.WORKING_DIR }}/cis env: S3_BUCKET: ${{ needs.deploy.outputs.s3-bucket }} run: | - aws s3 cp "${{ env.S3_BUCKET }}/kspm.pem" "${{ env.EC2_KSPM_KEY }}" - aws s3 cp "${{ env.S3_BUCKET }}/state_data.json" "../../${{ env.INTEGRATIONS_SETUP_DIR }}/state_data.json" + aws s3 cp "${S3_BUCKET}/kspm.pem" "${EC2_KSPM_KEY}" + aws s3 cp "${S3_BUCKET}/state_data.json" "../../../${INTEGRATIONS_SETUP_DIR}/state_data.json" - name: Upgrade KSPM Unmanaged agent + working-directory: ${{ env.WORKING_DIR }}/cis run: | - chmod 600 ${{ env.EC2_KSPM_KEY }} + chmod 600 ${EC2_KSPM_KEY} # Update image - ssh -o StrictHostKeyChecking=no -v -i ${{ env.EC2_KSPM_KEY }} "ubuntu@${{ env.KSPM_PUBLIC_IP }}" "kubectl set image daemonset elastic-agent -n kube-system elastic-agent=${{ env.DOCKER_IMAGE }}" + ssh -o StrictHostKeyChecking=no -v -i ${EC2_KSPM_KEY} "ubuntu@${KSPM_PUBLIC_IP}" "kubectl set image daemonset elastic-agent -n kube-system elastic-agent=${DOCKER_IMAGE}" - name: Upgrade KSPM EKS agent + env: + DEPLOYMENT_NAME: ${{ inputs.deployment_name }} run: | - aws eks --region ${{ env.AWS_REGION }} update-kubeconfig \ - --name $(terraform output -raw deployment_name) --alias eks-config + aws eks --region $AWS_REGION update-kubeconfig --name $DEPLOYMENT_NAME --alias eks-config kubectl config use-context eks-config - kubectl set image daemonset elastic-agent -n kube-system elastic-agent=${{ env.DOCKER_IMAGE }} + kubectl set image daemonset elastic-agent -n kube-system elastic-agent=${DOCKER_IMAGE} kubectl rollout restart daemonset/elastic-agent -n kube-system - name: Upgrade Linux agents diff --git a/deploy/test-environments/README.md b/deploy/test-environments/README.md index e26ecbc987..04309aaf09 100644 --- a/deploy/test-environments/README.md +++ b/deploy/test-environments/README.md @@ -6,7 +6,7 @@ To provide an easy and deterministic way to set up the latest cloud environment, **Prerequisite** -This project utilizes AWS and Elastic Cloud accounts. To ensure proper deployment and usage, it is essential to obtain appropriate licenses in compliance with the licensing terms and conditions provided by the respective service providers. +This project utilizes AWS, Elastic Cloud, Azure, and GCP accounts. To ensure proper deployment and usage, you need to obtain appropriate licenses and credentials in compliance with the licensing terms and conditions provided by the respective service providers. Follow the [prerequisites](/README.md#prerequisites) chapter of our main README. @@ -21,25 +21,55 @@ To generate an Elastic Cloud token, you have two options: Choose the method that is most convenient for you to obtain the Elastic Cloud token required for deployment. +For AWS: + Ensure that the following AWS credentials are defined: - `AWS_ACCESS_KEY_ID`: Your AWS access key ID. - `AWS_SECRET_ACCESS_KEY`: Your AWS secret access key. +For GCP: + +Ensure that you have your GCP service account key file. This file is usually stored at a path like ~/.config/gcloud, but the exact location may vary. + +For Azure: + +Ensure that you are logged in to Azure using: + +```bash +az login +``` To successfully deploy the environment, ensure that the following variables are provided as deployment parameters or exported as environment variables: ```bash export TF_VAR_ec_api_key={TOKEN} # <-- should be replaced by Elastic Cloud TOKEN -export TF_VAR_stack_version=8.7.2-SNAPSHOT +export TF_VAR_stack_version=8.16.0-SNAPSHOT export TF_VAR_ess_region=gcp-us-west2 ``` -## Modules +## Directory Structure -This project leverages a set of foundational modules specifically designed for [cloud deployment](../cloud/modules/). +### elk-stack -### EC2 +This directory handles the deployment of the Elastic Stack. It includes: + +- Deployment Types: `deployment` and `project`, defined by the var.serverless_mode key. +- Required Variable: `ec_api_key`. It is also recommended to provide `deployment_name` as an input parameter during development. + +**ec_deployment** - This module facilitates the deployment of Elastic Cloud instance. + +| Variable | Default Value | Comment | +|:-------------:|:-------------:|:------------| +| ec_api_key | None | The API key for Elastic Cloud can also be defined using the `TF_VAR_ec_api_key` environment variable | +| ess_region | gcp-us-west2 | The ESS deployment region can also be defined using the `TF_VAR_stack_version` environment variable| +| stack_version | latest | The ELK stack version can also be defined using the `TF_VAR_stack_version` environment variable | +| pin_version | None | Optional: The ELK pin version (docker tag override) can also be defined using the `TF_VAR_pin_version` environment variable | + + +### cis + +This directory is responsible for provisioning EC2 machines and EKS clusters related to CSPM and KSPM. **aws_ec2_for_kspm** - This module facilitates the deployment of an EC2 instance with a Kubernetes cluster using the kind tool. The deployment process relies on a customized image that includes the necessary components for running kind. @@ -47,92 +77,82 @@ This project leverages a set of foundational modules specifically designed for [ Please note that the customized image is currently available in the following regions: **eu-west-1** and **eu-west-3**. Therefore, ensure that you deploy this module in one of these regions to leverage the customized image effectively. -**Module variables (CSPM / KSPM)** - -| Variable | Default Value | Comment | -|:-------------:|:-------------:|:------------| -| region | eu-west-1 | AWS EC2 deployment region | +### cdr +This directory includes modules for provisioning infrastructure for CDR, including: +- GCP VM (requires gcp_project_id as an input variable) +- Azure VM +- AWS EC2 for CloudTrail +- Additional EC2 for asset inventory -### Elastic Cloud +### Modules -**ec_deployment** - This module facilitates the deployment of Elastic Cloud instance. +All projects utilize a set of foundational modules specifically designed for [cloud deployment](../cloud/modules/). -| Variable | Default Value | Comment | -|:-------------:|:-------------:|:------------| -| ec_api_key | None | The API key for Elastic Cloud can also be defined using the `TF_VAR_ec_api_key` environment variable | -| ess_region | gcp-us-west2 | The ESS deployment region can also be defined using the `TF_VAR_stack_version` environment variable| -| stack_version | latest | The ELK stack version can also be defined using the `TF_VAR_stack_version` environment variable | -| pin_version | None | Optional: The ELK pin version (docker tag override) can also be defined using the `TF_VAR_pin_version` environment variable | ## Execution -To execute the full project, encompassing the deployment of an EC2 instance, setting up a Kubernetes cluster using kind, and deploying Elastic Cloud, follow the steps outlined below +There is no single Terraform command to execute the full project. Instead, each module can be executed separately using Terraform commands. The scripts provided in the project are responsible for managing the execution of the entire setup. + +### Full Project Execution -- Initiate the project +The full project execution is managed by scripts, not by Terraform directly. Use the following scripts to handle the deployment process: +- `manage_infrastructure.sh`: This script manages Terraform provisioning with commands for {elk-stack|cis|cdr|all} {apply|destroy|output|upload-state}. +The following command applies all Terraform configurations for the elk-stack, cis, and cdr directories: ```bash -cd test-environments -terraform init +./manage_infrastructure.sh all apply ``` - -- Deploy test environment +The following command destroys all Terraform configurations for the elk-stack, cis, and cdr directories: ```bash -terraform apply --auto-approve -var="deployment_name=dev-env" +./manage_infrastructure.sh all destroy ``` -For development purposes, it is possible to deploy each module separately, allowing for focused and independent development and testing. Each module within the project represents a specific component or functionality and can be deployed individually to streamline the development process. - -Below are examples demonstrating how to execute individual modules separately: - -- EC2 for CSPM - +The following command retrieves outputs from all deployed environments: ```bash -terraform apply --auto-approve -target "module.aws_ec2_for_cspm" +./manage_infrastructure.sh all output ``` -- EC2 + Kind Kubernetes (KSPM) + +### Running Individual Modules + +- Elastic Stack ```bash -terraform apply --auto-approve -target "module.aws_ec2_for_kspm" +cd elk-stack +terraform init +terraform apply --auto-approve ``` -- EC Deployment +- CIS modules ```bash -terraform apply --auto-approve -target "module.ec_deployment" +cd cis +terraform init +terraform apply --auto-approve ``` -BC version +- Specific module in CIS ```bash -terraform apply --auto-approve -var="stack_version=8.12.0" -var="pin_version=8.12.0-9f05a310" -target "module.ec_deployment" +cd cis +terraform apply --auto-approve -target "module.aws_ec2_for_kspm" ``` - -- EKS Deployment +- CDR modules ```bash -terraform apply --auto-approve -target "module.eks" +cd cdr +terraform init +terraform apply --auto-approve ``` ## Environment Cleanup To destroy local environment use -``` bash -terraform destroy -var="region=eu-west-1" -``` - - -To destroy the environment provisioned using the Sanity job, follow these steps: - -1. [Download](https://s3.console.aws.amazon.com/s3/buckets/tf-state-bucket-test-infra?region=eu-west-3&tab=objects) the Terraform state file to the [test-environments](../test-environments/) folder. -2. Rename the state file, for example, `terraform-sanity.tfstate`. -3. Run the following command: - -``` bash -terraform destroy -var="region=eu-west-1" -state terraform-sanity.tfstate +```bash +./manage_infrastructure.sh all destroy ``` diff --git a/deploy/test-environments/cdr/main.tf b/deploy/test-environments/cdr/main.tf new file mode 100644 index 0000000000..d7e9a2a6bc --- /dev/null +++ b/deploy/test-environments/cdr/main.tf @@ -0,0 +1,72 @@ +provider "aws" { + region = var.region +} + +provider "google" { + project = var.gcp_project_id +} + +provider "azurerm" { + features {} +} + +locals { + common_tags = { + division = "${var.division}" + org = "${var.org}" + team = "${var.team}" + project = "${var.project}" + owner = "${var.owner}" + deployment = "${var.deployment_name}" + } +} + +resource "random_string" "suffix" { + length = 3 + special = false +} + +# ===== CDR Infrastructure Resources ===== + +module "gcp_audit_logs" { + count = var.deploy_gcp_vm ? 1 : 0 + providers = { google : google } + source = "../../cloud/modules/gcp/vm" + gcp_service_account_json = var.gcp_service_account_json + deployment_name = var.deployment_name + network = "default" + specific_tags = merge(local.common_tags, { "vm_instance" : "audit-logs" }) + +} + +module "aws_ec2_for_cloudtrail" { + count = var.deploy_aws_ec2 ? 1 : 0 + source = "../../cloud/modules/ec2" + providers = { aws : aws } + aws_ami = var.ami_map[var.region] + deploy_k8s = false + deploy_agent = false + deployment_name = "${var.deployment_name}-${random_string.suffix.result}" + specific_tags = merge(local.common_tags, { "ec2_type" : "cloudtrail" }) +} + +module "azure_vm_activity_logs" { + count = var.deploy_az_vm ? 1 : 0 + source = "../../cloud/modules/azure/vm" + providers = { azurerm : azurerm } + location = var.location + deployment_name = var.deployment_name + specific_tags = merge(local.common_tags, { "vm_type" : "activity-logs" }) +} + +module "aws_ec2_for_asset_inventory" { + count = var.deploy_aws_asset_inventory ? 1 : 0 + source = "../../cloud/modules/ec2" + providers = { aws : aws } + aws_ami = var.ami_map[var.region] + deploy_k8s = false + deploy_agent = false + deployment_name = "${var.deployment_name}-${random_string.suffix.result}" + specific_tags = merge(local.common_tags, { "ec2_type" : "asset_inventory" }) +} +# ===== End Of CDR Infrastructure Resources ===== diff --git a/deploy/test-environments/cdr/output.tf b/deploy/test-environments/cdr/output.tf new file mode 100644 index 0000000000..b9c4f99791 --- /dev/null +++ b/deploy/test-environments/cdr/output.tf @@ -0,0 +1,64 @@ +output "deployment_name" { + value = var.deployment_name + description = "Terraform deployment name" +} + +output "ec2_cloudtrail_ssh_cmd" { + value = var.deploy_aws_ec2 ? module.aws_ec2_for_cloudtrail[0].cloudbeat_ssh_cmd : null + sensitive = true +} + +output "ec2_cloudtrail_public_ip" { + value = var.deploy_aws_ec2 ? module.aws_ec2_for_cloudtrail[0].aws_instance_cloudbeat_public_ip : null + sensitive = true +} + +output "ec2_cloudtrail_key" { + value = var.deploy_aws_ec2 ? module.aws_ec2_for_cloudtrail[0].ec2_ssh_key : null + sensitive = true +} + +output "gcp_audit_logs_ssh_cmd" { + value = var.deploy_gcp_vm ? module.gcp_audit_logs[0].gcp_vm_ssh_cmd : null + sensitive = true +} + +output "gcp_audit_logs_public_ip" { + value = var.deploy_gcp_vm ? module.gcp_audit_logs[0].gcp_vm_puglic_ip : null + sensitive = true +} + +output "gcp_audit_logs_key" { + value = var.deploy_gcp_vm ? module.gcp_audit_logs[0].gcp_vm_ssh_key : null + sensitive = true +} + +output "az_vm_activity_logs_ssh_cmd" { + value = var.deploy_az_vm ? module.azure_vm_activity_logs[0].azure_vm_ssh_cmd : null + sensitive = true +} + +output "az_vm_activity_logs_public_ip" { + value = var.deploy_az_vm ? module.azure_vm_activity_logs[0].azure_vm_public_ip : null + sensitive = true +} + +output "az_vm_activity_logs_key" { + value = var.deploy_az_vm ? module.azure_vm_activity_logs[0].azure_vm_ssh_key : null + sensitive = true +} + +output "ec2_asset_inventory_ssh_cmd" { + value = module.aws_ec2_for_asset_inventory[0].cloudbeat_ssh_cmd + sensitive = true +} + +output "ec2_asset_inventory_public_ip" { + value = module.aws_ec2_for_asset_inventory[0].aws_instance_cloudbeat_public_ip + sensitive = true +} + +output "ec2_asset_inventory_key" { + value = module.aws_ec2_for_asset_inventory[0].ec2_ssh_key + sensitive = true +} diff --git a/deploy/test-environments/terraform.tf b/deploy/test-environments/cdr/terraform.tf similarity index 72% rename from deploy/test-environments/terraform.tf rename to deploy/test-environments/cdr/terraform.tf index 42f07e302e..5842b235c6 100644 --- a/deploy/test-environments/terraform.tf +++ b/deploy/test-environments/cdr/terraform.tf @@ -15,16 +15,6 @@ terraform { version = ">= 3.11, < 4.0" } - ec = { - source = "elastic/ec" - version = ">=0.9.0" - } - - restapi = { - source = "mastercard/restapi" - version = "~> 1.18.0" - } - random = { source = "hashicorp/random" version = "~> 3.5.1" diff --git a/deploy/test-environments/cdr/variables.tf b/deploy/test-environments/cdr/variables.tf new file mode 100644 index 0000000000..dd62f01fd0 --- /dev/null +++ b/deploy/test-environments/cdr/variables.tf @@ -0,0 +1,99 @@ +variable "deployment_name" { + default = "test-env-ci-tf" + description = "Optional set a prefix of the deployment. Defaults to test-env-ci-tf" +} + +# AWS provider variable +variable "region" { + description = "AWS region" + type = string + default = "eu-west-1" +} + +# Azure provider variable +variable "location" { + description = "Azure location" + type = string + default = "East US" +} + +# EC2 variable +variable "ami_map" { + description = "Mapping of regions to AMI IDs" + type = map(any) + default = { + "eu-west-1" = "ami-0a5b3305c37e58e04" + "eu-west-3" = "ami-0532b3f7436b93d52" + # Add more regions and respective AMI IDs here + } +} + +# GCP project ID +variable "gcp_project_id" { + description = "GCP project ID" + type = string + default = "default" +} + +variable "gcp_service_account_json" { + description = "GCP Service Account JSON" + type = string + default = "default" + sensitive = true +} + +variable "deploy_az_vm" { + description = "Deploy Azure VM resources" + type = bool + default = true +} + +variable "deploy_gcp_vm" { + description = "Deploy GCP VM resources" + type = bool + default = true +} + +variable "deploy_aws_ec2" { + description = "Deploy AWS EC2 resources" + type = bool + default = true +} + +variable "deploy_aws_asset_inventory" { + description = "Deploy AWS Asset Inventory EC2 resources" + type = bool + default = true +} + +# ========= Cloud Tags ======================== +variable "division" { + default = "engineering" + type = string + description = "Optional division resource tag" +} + +variable "org" { + default = "security" + type = string + description = "Optional org resource tag" +} + +variable "team" { + default = "cloud-security-posture" + type = string + description = "Optional team resource tag" +} + +variable "project" { + default = "test-environments" + type = string + description = "Optional project resource tag" +} + +variable "owner" { + default = "cloudbeat" + type = string + description = "Optional owner tag" +} +# ========= End Of Cloud Tags =================== diff --git a/deploy/test-environments/cis/main.tf b/deploy/test-environments/cis/main.tf new file mode 100644 index 0000000000..a725c528be --- /dev/null +++ b/deploy/test-environments/cis/main.tf @@ -0,0 +1,51 @@ +provider "aws" { + region = var.region +} + +locals { + common_tags = { + division = "${var.division}" + org = "${var.org}" + team = "${var.team}" + project = "${var.project}" + owner = "${var.owner}" + deployment = "${var.deployment_name}" + } +} + +resource "random_string" "suffix" { + length = 3 + special = false +} + +# EC2 + kind deployment +module "aws_ec2_for_kspm" { + count = var.deploy_aws_kspm ? 1 : 0 + source = "../../cloud/modules/ec2" + providers = { aws : aws } + aws_ami = var.ami_map[var.region] + deploy_agent = false # Agent will not be deployed + deployment_name = "${var.deployment_name}-${random_string.suffix.result}" + specific_tags = merge(local.common_tags, { "ec2_type" : "kspm" }) +} + +module "aws_ec2_for_cspm" { + count = var.deploy_aws_cspm ? 1 : 0 + source = "../../cloud/modules/ec2" + providers = { aws : aws } + aws_ami = var.ami_map[var.region] + deploy_k8s = false + deploy_agent = false + deployment_name = "${var.deployment_name}-${random_string.suffix.result}" + specific_tags = merge(local.common_tags, { "ec2_type" : "cspm" }) +} + +module "eks" { + source = "../../cloud/modules/provision-eks-cluster" + region = var.region + cluster_name = var.deployment_name + node_group_one_desired_size = 2 + # node_group_two_desired_size = 1 + enable_node_group_two = false + tags = merge(local.common_tags, { "ec2_type" : "kspm_eks" }) +} diff --git a/deploy/test-environments/cis/output.tf b/deploy/test-environments/cis/output.tf new file mode 100644 index 0000000000..cee27acdd3 --- /dev/null +++ b/deploy/test-environments/cis/output.tf @@ -0,0 +1,40 @@ +# Global output +# ============================================================ +output "deployment_name" { + value = var.deployment_name + description = "Terraform deployment name" +} + +# EC2 output +# ============================================================ +output "ec2_kspm_ssh_cmd" { + value = module.aws_ec2_for_kspm[0].cloudbeat_ssh_cmd + sensitive = true +} + +output "ec2_kspm_public_ip" { + value = module.aws_ec2_for_kspm[0].aws_instance_cloudbeat_public_ip + sensitive = true +} + +output "ec2_kspm_key" { + value = module.aws_ec2_for_kspm[0].ec2_ssh_key + sensitive = true +} + +output "ec2_cspm_ssh_cmd" { + value = module.aws_ec2_for_cspm[0].cloudbeat_ssh_cmd + sensitive = true +} + +output "ec2_cspm_public_ip" { + value = module.aws_ec2_for_cspm[0].aws_instance_cloudbeat_public_ip + sensitive = true +} + +output "ec2_cspm_key" { + value = module.aws_ec2_for_cspm[0].ec2_ssh_key + sensitive = true +} + +# ============================================================= diff --git a/deploy/test-environments/cis/terraform.tf b/deploy/test-environments/cis/terraform.tf new file mode 100644 index 0000000000..d6f038de93 --- /dev/null +++ b/deploy/test-environments/cis/terraform.tf @@ -0,0 +1,16 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "~> 4.15.0" + } + + random = { + source = "hashicorp/random" + version = "~> 3.5.1" + } + + } + + required_version = ">= 1.3, <2.0.0" +} diff --git a/deploy/test-environments/cis/variables.tf b/deploy/test-environments/cis/variables.tf new file mode 100644 index 0000000000..6896d9fc6a --- /dev/null +++ b/deploy/test-environments/cis/variables.tf @@ -0,0 +1,65 @@ +# AWS provider variable +variable "region" { + description = "AWS region" + type = string + default = "eu-west-1" +} + +# EC2 variable +variable "ami_map" { + description = "Mapping of regions to AMI IDs" + type = map(any) + default = { + "eu-west-1" = "ami-0a5b3305c37e58e04" + "eu-west-3" = "ami-0532b3f7436b93d52" + # Add more regions and respective AMI IDs here + } +} + +variable "deployment_name" { + default = "test-env-ci-tf" + description = "Optional set a prefix of the deployment. Defaults to test-env-ci-tf" +} + +variable "deploy_aws_kspm" { + description = "Deploy AWS KSPM EC2 resources" + type = bool + default = true +} + +variable "deploy_aws_cspm" { + description = "Deploy AWS CSPM EC2 resources" + type = bool + default = true +} + +variable "division" { + default = "engineering" + type = string + description = "Optional division resource tag" +} + +variable "org" { + default = "security" + type = string + description = "Optional org resource tag" +} + +variable "team" { + default = "cloud-security-posture" + type = string + description = "Optional team resource tag" +} + +variable "project" { + default = "test-environments" + type = string + description = "Optional project resource tag" +} + +variable "owner" { + default = "cloudbeat" + type = string + description = "Optional owner tag" +} +# ============================================ diff --git a/deploy/test-environments/delete_env.sh b/deploy/test-environments/delete_env.sh index 563139a9b7..078dce26de 100755 --- a/deploy/test-environments/delete_env.sh +++ b/deploy/test-environments/delete_env.sh @@ -17,6 +17,28 @@ AWS_REGION="eu-west-1" # Add your desired default AWS region here DELETED_ENVS=() FAILED_ENVS=() +# Function to delete all terraform states from given bucket +function delete_all_states() { + local bucket_folder=$1 + echo "Deleting all Terraform states from bucket: $bucket_folder" + + states=("cdr" "cis" "elk-stack") + # Get all states + for state in "${states[@]}"; do + local state_file="./$state/terraform.tfstate" + aws s3 cp "$BUCKET/$bucket_folder/${state}-terraform.tfstate" "$state_file" || true + done + # Destroy all states and remove environment data from S3 + if ./manage_infrastructure.sh "all" "destroy" && + aws s3 rm "$BUCKET/$bucket_folder" --recursive; then + echo "Successfully deleted $bucket_folder" + DELETED_ENVS+=("$bucket_folder") + else + echo "Failed to delete $bucket_folder" + FAILED_ENVS+=("$bucket_folder") + fi +} + # Function to delete Terraform environment function delete_environment() { local ENV=$1 @@ -131,7 +153,11 @@ fi # Delete the Terraform environments for ENV in "${TO_DELETE_ENVS[@]}"; do - delete_environment "$ENV" + if aws s3 ls "$BUCKET/$ENV/terraform.tfstate" >/dev/null 2>&1; then + delete_environment "$ENV" + else + delete_all_states "$ENV" + fi done # Print summary of environment deletions diff --git a/deploy/test-environments/elk-stack/main.tf b/deploy/test-environments/elk-stack/main.tf new file mode 100644 index 0000000000..c30a59f1bd --- /dev/null +++ b/deploy/test-environments/elk-stack/main.tf @@ -0,0 +1,72 @@ +locals { + common_tags = { + division = "${var.division}" + org = "${var.org}" + team = "${var.team}" + project = "${var.project}" + owner = "${var.owner}" + deployment = "${var.deployment_name}" + } + + ec_url = "https://cloud.elastic.co" + ec_headers = { + Content-type = "application/json" + Authorization = "ApiKey ${var.ec_api_key}" + } +} + +provider "ec" { + apikey = var.ec_api_key +} + +provider "restapi" { + alias = "ec" + uri = local.ec_url + write_returns_object = true + headers = local.ec_headers +} + +resource "random_string" "suffix" { + length = 3 + special = false +} + +# Elastic Cloud (EC) deployment +module "ec_deployment" { + count = var.serverless_mode ? 0 : 1 + + source = "../../cloud/modules/ec" + ec_api_key = var.ec_api_key + region = var.ess_region + stack_version = var.stack_version + tags = local.common_tags + + deployment_template = var.deployment_template + deployment_name_prefix = "${var.deployment_name}-${random_string.suffix.result}" + + elasticsearch_autoscale = true + elasticsearch_size = var.elasticsearch_size + elasticsearch_zone_count = var.elasticsearch_zone_count + + docker_image_tag_override = var.pin_version != "" ? { + "elasticsearch" = "${var.pin_version}", + "kibana" = "${var.pin_version}", + "apm" = "${var.pin_version}" + } : { + "elasticsearch" = "", + "kibana" = "", + "apm" = "" + } +} + +module "ec_project" { + providers = { + restapi.elastic_cloud = restapi.ec + } + count = var.serverless_mode ? 1 : 0 + source = "../../cloud/modules/serverless" + ec_apikey = var.ec_api_key + ec_url = local.ec_url + project_name = "${var.deployment_name}-${random_string.suffix.result}" + region_id = "aws-us-east-1" # TODO: replace with var.ess_region when more regions are supported +} diff --git a/deploy/test-environments/elk-stack/output.tf b/deploy/test-environments/elk-stack/output.tf new file mode 100644 index 0000000000..d02773ba2b --- /dev/null +++ b/deploy/test-environments/elk-stack/output.tf @@ -0,0 +1,32 @@ +# Global output +# ============================================================ +output "deployment_name" { + value = var.deployment_name + description = "Terraform deployment name" +} + +# Elastic Cloud output +# ============================================================= +output "elasticsearch_url" { + value = var.serverless_mode ? module.ec_project[0].elasticsearch_url : module.ec_deployment[0].elasticsearch_url + description = "The secure Elasticsearch URL" +} + +output "elasticsearch_username" { + value = var.serverless_mode ? module.ec_project[0].elasticsearch_username : module.ec_deployment[0].elasticsearch_username + description = "The Elasticsearch username" + sensitive = true +} + +output "elasticsearch_password" { + value = var.serverless_mode ? module.ec_project[0].elasticsearch_password : module.ec_deployment[0].elasticsearch_password + description = "The Elasticsearch password" + sensitive = true +} + +output "kibana_url" { + value = var.serverless_mode ? module.ec_project[0].kibana_url : module.ec_deployment[0].kibana_url + description = "The secure Kibana URL" +} + +# ============================================================= diff --git a/deploy/test-environments/elk-stack/terraform.tf b/deploy/test-environments/elk-stack/terraform.tf new file mode 100644 index 0000000000..ca60daac66 --- /dev/null +++ b/deploy/test-environments/elk-stack/terraform.tf @@ -0,0 +1,21 @@ +terraform { + required_providers { + ec = { + source = "elastic/ec" + version = ">=0.9.0" + } + + restapi = { + source = "mastercard/restapi" + version = "~> 1.18.0" + } + + random = { + source = "hashicorp/random" + version = "~> 3.5.1" + } + + } + + required_version = ">= 1.3, <2.0.0" +} diff --git a/deploy/test-environments/variables.tf b/deploy/test-environments/elk-stack/variables.tf similarity index 67% rename from deploy/test-environments/variables.tf rename to deploy/test-environments/elk-stack/variables.tf index 3c8c6e7a77..6aec3f7d5a 100644 --- a/deploy/test-environments/variables.tf +++ b/deploy/test-environments/elk-stack/variables.tf @@ -1,44 +1,4 @@ -# AWS provider variable -variable "region" { - description = "AWS region" - type = string - default = "eu-west-1" -} - -# Azure provider variable -variable "location" { - description = "Azure location" - type = string - default = "East US" -} - -# EC2 variable -variable "ami_map" { - description = "Mapping of regions to AMI IDs" - type = map(any) - default = { - "eu-west-1" = "ami-0a5b3305c37e58e04" - "eu-west-3" = "ami-0532b3f7436b93d52" - # Add more regions and respective AMI IDs here - } -} - -# GCP project ID -variable "gcp_project_id" { - description = "GCP project ID" - type = string - default = "default" -} - -variable "gcp_service_account_json" { - description = "GCP Service Account JSON" - type = string - default = "default" - sensitive = true -} - -# Elastic Cloud variables -# =========================================== +# ========Elastic Cloud Variables Section=============== variable "ec_api_key" { description = "Provide Elastic Cloud API key or use export TF_VAR_ec_api_key={TOKEN}" type = string @@ -68,12 +28,6 @@ variable "serverless_mode" { type = bool } -variable "cdr_infra" { - default = false - description = "Set to true to create a CDR infra deployment" - type = bool -} - variable "deployment_template" { default = "gcp-general-purpose" description = "Optional deployment template. Defaults to the CPU optimized template for GCP" @@ -106,7 +60,9 @@ variable "docker_image_tag_override" { description = "Optional docker image tag override" type = map(string) } +# ========End Of Elastic Cloud Variables Section======== +# ============= Tags Section============================ variable "division" { default = "engineering" type = string @@ -136,4 +92,4 @@ variable "owner" { type = string description = "Optional owner tag" } -# ============================================ +# ======End Of Tags section===================== diff --git a/deploy/test-environments/main.tf b/deploy/test-environments/main.tf deleted file mode 100644 index 4d6e897b00..0000000000 --- a/deploy/test-environments/main.tf +++ /dev/null @@ -1,155 +0,0 @@ -provider "aws" { - region = var.region -} - -provider "google" { - project = var.gcp_project_id -} - -provider "azurerm" { - features {} -} - -locals { - common_tags = { - division = "${var.division}" - org = "${var.org}" - team = "${var.team}" - project = "${var.project}" - owner = "${var.owner}" - deployment = "${var.deployment_name}" - } - - ec_url = "https://cloud.elastic.co" - ec_headers = { - Content-type = "application/json" - Authorization = "ApiKey ${var.ec_api_key}" - } -} - -# EC2 + kind deployment -module "aws_ec2_for_kspm" { - source = "../cloud/modules/ec2" - providers = { aws : aws } - aws_ami = var.ami_map[var.region] - deploy_agent = false # Agent will not be deployed - deployment_name = "${var.deployment_name}-${random_string.suffix.result}" - specific_tags = merge(local.common_tags, { "ec2_type" : "kspm" }) -} - -module "aws_ec2_for_cspm" { - source = "../cloud/modules/ec2" - providers = { aws : aws } - aws_ami = var.ami_map[var.region] - deploy_k8s = false - deploy_agent = false # Agent will not be deployed - deployment_name = "${var.deployment_name}-${random_string.suffix.result}" - specific_tags = merge(local.common_tags, { "ec2_type" : "cspm" }) -} - -module "aws_ec2_for_asset_inventory" { - source = "../cloud/modules/ec2" - providers = { aws : aws } - aws_ami = var.ami_map[var.region] - deploy_k8s = false - deploy_agent = false # Agent will not be deployed - deployment_name = "${var.deployment_name}-${random_string.suffix.result}" - specific_tags = merge(local.common_tags, { "ec2_type" : "asset_inventory" }) -} - -module "gcp_audit_logs" { - count = var.cdr_infra ? 1 : 0 - providers = { google : google } - source = "../cloud/modules/gcp/vm" - gcp_service_account_json = var.gcp_service_account_json - deployment_name = var.deployment_name - network = "default" - specific_tags = merge(local.common_tags, { "vm_instance" : "audit-logs" }) -} - -resource "random_string" "suffix" { - length = 3 - special = false -} - -provider "ec" { - apikey = var.ec_api_key -} - -provider "restapi" { - alias = "ec" - uri = local.ec_url - write_returns_object = true - headers = local.ec_headers -} - -# Elastic Cloud (EC) deployment -module "ec_deployment" { - count = var.serverless_mode ? 0 : 1 - - source = "../cloud/modules/ec" - ec_api_key = var.ec_api_key - region = var.ess_region - stack_version = var.stack_version - tags = local.common_tags - - deployment_template = var.deployment_template - deployment_name_prefix = "${var.deployment_name}-${random_string.suffix.result}" - - elasticsearch_autoscale = true - elasticsearch_size = var.elasticsearch_size - elasticsearch_zone_count = var.elasticsearch_zone_count - - docker_image_tag_override = var.pin_version != "" ? { - "elasticsearch" = "${var.pin_version}", - "kibana" = "${var.pin_version}", - "apm" = "${var.pin_version}" - } : { - "elasticsearch" = "", - "kibana" = "", - "apm" = "" - } -} - -module "ec_project" { - providers = { - restapi.elastic_cloud = restapi.ec - } - count = var.serverless_mode ? 1 : 0 - source = "../cloud/modules/serverless" - ec_apikey = var.ec_api_key - ec_url = local.ec_url - project_name = "${var.deployment_name}-${random_string.suffix.result}" - region_id = "aws-us-east-1" # TODO: replace with var.ess_region when more regions are supported -} - -module "eks" { - source = "../cloud/modules/provision-eks-cluster" - region = var.region - cluster_name = var.deployment_name - node_group_one_desired_size = 2 - # node_group_two_desired_size = 1 - enable_node_group_two = false - tags = merge(local.common_tags, { "ec2_type" : "kspm_eks" }) -} - -# ===== CDR Infrastructure Resources ===== -module "aws_ec2_for_cloudtrail" { - count = var.cdr_infra ? 1 : 0 - source = "../cloud/modules/ec2" - providers = { aws : aws } - aws_ami = var.ami_map[var.region] - deploy_k8s = false - deploy_agent = false # Agent will not be deployed - deployment_name = "${var.deployment_name}-${random_string.suffix.result}" - specific_tags = merge(local.common_tags, { "ec2_type" : "cloudtrail" }) -} - -module "azure_vm_activity_logs" { - count = var.cdr_infra ? 1 : 0 - source = "../cloud/modules/azure/vm" - providers = { azurerm : azurerm } - location = var.location - deployment_name = var.deployment_name - specific_tags = merge(local.common_tags, { "vm_type" : "activity-logs" }) -} diff --git a/deploy/test-environments/manage_infrastructure.sh b/deploy/test-environments/manage_infrastructure.sh new file mode 100755 index 0000000000..7a40e29aba --- /dev/null +++ b/deploy/test-environments/manage_infrastructure.sh @@ -0,0 +1,72 @@ +#!/bin/bash + +# Function to run Terraform in a given directory for apply, destroy, output, or upload operation +run_terraform() { + local dir=$1 + local operation=$2 + + echo "Running Terraform $operation in $dir..." + cd "$dir" || exit + + case $operation in + "apply") + terraform init + terraform validate + terraform apply -auto-approve + ;; + "destroy") + terraform init + if [ "$dir" == "cis" ] && terraform state list | grep -q "kubernetes_config_map_v1_data.aws_auth"; then + echo "Removing aws_auth resource from state in cis..." + terraform state rm "$(terraform state list | grep "kubernetes_config_map_v1_data.aws_auth")" + fi + terraform destroy -auto-approve && rm terraform.tfstate + ;; + "output") + ../set_cloud_env_params.sh "$dir" + ;; + "upload-state") + ../upload_state.sh "$dir" + ;; + *) + echo "Invalid operation. Use 'apply', 'destroy', 'output', or 'upload-state'." + cd - >/dev/null || exit 1 + ;; + esac + + cd - >/dev/null || exit +} + +# Check for valid input +if [ "$#" -ne 2 ]; then + echo "Usage: $0 {elk-stack|cis|cdr|all} {apply|destroy|output|upload-state}" + exit 1 +fi + +# Main script logic +action=$2 + +case $1 in +elk-stack) + run_terraform "elk-stack" "$action" + ;; +cis) + run_terraform "elk-stack" "$action" + run_terraform "cis" "$action" + ;; +cdr) + run_terraform "elk-stack" "$action" + run_terraform "cdr" "$action" + ;; +all) + run_terraform "elk-stack" "$action" + run_terraform "cdr" "$action" + run_terraform "cis" "$action" + ;; +*) + echo "Usage: $0 {elk-stack|cis|cdr|all} {apply|destroy|output|upload-state}" + exit 1 + ;; +esac + +echo "Terraform $action operation completed." diff --git a/deploy/test-environments/output.tf b/deploy/test-environments/output.tf deleted file mode 100644 index 34fa1561da..0000000000 --- a/deploy/test-environments/output.tf +++ /dev/null @@ -1,125 +0,0 @@ -# Global output -# ============================================================ -output "deployment_name" { - value = var.deployment_name - description = "Terraform deployment name" -} - -# EC2 output -# ============================================================ -output "ec2_kspm_ssh_cmd" { - value = module.aws_ec2_for_kspm.cloudbeat_ssh_cmd - sensitive = true -} - -output "ec2_kspm_public_ip" { - value = module.aws_ec2_for_kspm.aws_instance_cloudbeat_public_ip - sensitive = true -} - -output "ec2_kspm_key" { - value = module.aws_ec2_for_kspm.ec2_ssh_key - sensitive = true -} - -output "ec2_cspm_ssh_cmd" { - value = module.aws_ec2_for_cspm.cloudbeat_ssh_cmd - sensitive = true -} - -output "ec2_cspm_public_ip" { - value = module.aws_ec2_for_cspm.aws_instance_cloudbeat_public_ip - sensitive = true -} - -output "ec2_cspm_key" { - value = module.aws_ec2_for_cspm.ec2_ssh_key - sensitive = true -} - -output "ec2_asset_inventory_ssh_cmd" { - value = module.aws_ec2_for_asset_inventory.cloudbeat_ssh_cmd - sensitive = true -} - -output "ec2_asset_inventory_public_ip" { - value = module.aws_ec2_for_asset_inventory.aws_instance_cloudbeat_public_ip - sensitive = true -} - -output "ec2_asset_inventory_key" { - value = module.aws_ec2_for_asset_inventory.ec2_ssh_key - sensitive = true -} - -output "ec2_cloudtrail_ssh_cmd" { - value = var.cdr_infra ? module.aws_ec2_for_cloudtrail[0].cloudbeat_ssh_cmd : null - sensitive = true -} - -output "ec2_cloudtrail_public_ip" { - value = var.cdr_infra ? module.aws_ec2_for_cloudtrail[0].aws_instance_cloudbeat_public_ip : null - sensitive = true -} - -output "ec2_cloudtrail_key" { - value = var.cdr_infra ? module.aws_ec2_for_cloudtrail[0].ec2_ssh_key : null - sensitive = true -} - -output "gcp_audit_logs_ssh_cmd" { - value = var.cdr_infra ? module.gcp_audit_logs[0].gcp_vm_ssh_cmd : null - sensitive = true -} - -output "gcp_audit_logs_public_ip" { - value = var.cdr_infra ? module.gcp_audit_logs[0].gcp_vm_puglic_ip : null - sensitive = true -} - -output "gcp_audit_logs_key" { - value = var.cdr_infra ? module.gcp_audit_logs[0].gcp_vm_ssh_key : null - sensitive = true -} - -output "az_vm_activity_logs_ssh_cmd" { - value = var.cdr_infra ? module.azure_vm_activity_logs[0].azure_vm_ssh_cmd : null - sensitive = true -} - -output "az_vm_activity_logs_public_ip" { - value = var.cdr_infra ? module.azure_vm_activity_logs[0].azure_vm_public_ip : null - sensitive = true -} - -output "az_vm_activity_logs_key" { - value = var.cdr_infra ? module.azure_vm_activity_logs[0].azure_vm_ssh_key : null - sensitive = true -} -# ============================================================= - -# Elastic Cloud output -# ============================================================= -output "elasticsearch_url" { - value = var.serverless_mode ? module.ec_project[0].elasticsearch_url : module.ec_deployment[0].elasticsearch_url - description = "The secure Elasticsearch URL" -} - -output "elasticsearch_username" { - value = var.serverless_mode ? module.ec_project[0].elasticsearch_username : module.ec_deployment[0].elasticsearch_username - description = "The Elasticsearch username" - sensitive = true -} - -output "elasticsearch_password" { - value = var.serverless_mode ? module.ec_project[0].elasticsearch_password : module.ec_deployment[0].elasticsearch_password - description = "The Elasticsearch password" - sensitive = true -} - -output "kibana_url" { - value = var.serverless_mode ? module.ec_project[0].kibana_url : module.ec_deployment[0].kibana_url - description = "The secure Kibana URL" -} - -# ============================================================= diff --git a/.ci/scripts/remote_setup.sh b/deploy/test-environments/remote_setup.sh similarity index 100% rename from .ci/scripts/remote_setup.sh rename to deploy/test-environments/remote_setup.sh diff --git a/deploy/test-environments/set_cloud_env_params.sh b/deploy/test-environments/set_cloud_env_params.sh new file mode 100755 index 0000000000..6516e21438 --- /dev/null +++ b/deploy/test-environments/set_cloud_env_params.sh @@ -0,0 +1,114 @@ +#!/bin/bash + +# Function to output kibana and elasticsearch variables +output_kibana_vars() { + KIBANA_URL="$(terraform output -raw kibana_url)" + echo "KIBANA_URL=$KIBANA_URL" >>"$GITHUB_ENV" + ES_URL="$(terraform output -raw elasticsearch_url)" + echo "ES_URL=$ES_URL" >>"$GITHUB_ENV" + ES_USER="$(terraform output -raw elasticsearch_username)" + echo "ES_USER=$ES_USER" >>"$GITHUB_ENV" + + ES_PASSWORD=$(terraform output -raw elasticsearch_password) + echo "::add-mask::$ES_PASSWORD" + echo "ES_PASSWORD=$ES_PASSWORD" >>"$GITHUB_ENV" + + # Remove 'https://' from the URLs + KIBANA_URL_STRIPPED="${KIBANA_URL//https:\/\//}" + ES_URL_STRIPPED="${ES_URL//https:\/\//}" + + # Create test URLs with credentials + TEST_KIBANA_URL="https://${ES_USER}:${ES_PASSWORD}@${KIBANA_URL_STRIPPED}" + echo "::add-mask::${TEST_KIBANA_URL}" + echo "TEST_KIBANA_URL=${TEST_KIBANA_URL}" >>"$GITHUB_ENV" + + TEST_ES_URL="https://${ES_USER}:${ES_PASSWORD}@${ES_URL_STRIPPED}" + echo "::add-mask::${TEST_ES_URL}" + echo "TEST_ES_URL=${TEST_ES_URL}" >>"$GITHUB_ENV" +} + +# Function to output cdr machine variables +output_cis_vars() { + EC2_CSPM=$(terraform output -raw ec2_cspm_ssh_cmd) + echo "::add-mask::$EC2_CSPM" + echo "EC2_CSPM=$EC2_CSPM" >>"$GITHUB_ENV" + + EC2_KSPM=$(terraform output -raw ec2_kspm_ssh_cmd) + echo "::add-mask::$EC2_KSPM" + echo "EC2_KSPM=$EC2_KSPM" >>"$GITHUB_ENV" + + EC2_CSPM_KEY=$(terraform output -raw ec2_cspm_key) + echo "::add-mask::$EC2_CSPM_KEY" + echo "EC2_CSPM_KEY=$EC2_CSPM_KEY" >>"$GITHUB_ENV" + + EC2_KSPM_KEY=$(terraform output -raw ec2_kspm_key) + echo "::add-mask::$EC2_KSPM_KEY" + echo "EC2_KSPM_KEY=$EC2_KSPM_KEY" >>"$GITHUB_ENV" + + KSPM_PUBLIC_IP=$(terraform output -raw ec2_kspm_public_ip) + echo "::add-mask::$KSPM_PUBLIC_IP" + echo "KSPM_PUBLIC_IP=$KSPM_PUBLIC_IP" >>"$GITHUB_ENV" + + CSPM_PUBLIC_IP=$(terraform output -raw ec2_cspm_public_ip) + echo "::add-mask::$CSPM_PUBLIC_IP" + echo "CSPM_PUBLIC_IP=$CSPM_PUBLIC_IP" >>"$GITHUB_ENV" + +} + +# Function to output cis variables +output_cdr_vars() { + ec2_cloudtrail_public_ip=$(terraform output -raw ec2_cloudtrail_public_ip) + echo "::add-mask::$ec2_cloudtrail_public_ip" + echo "CLOUDTRAIL_PUBLIC_IP=$ec2_cloudtrail_public_ip" >>"$GITHUB_ENV" + + ec2_cloudtrail_key=$(terraform output -raw ec2_cloudtrail_key) + echo "::add-mask::$ec2_cloudtrail_key" + echo "CLOUDTRAIL_KEY=$ec2_cloudtrail_key" >>"$GITHUB_ENV" + + az_vm_activity_logs_public_ip=$(terraform output -raw az_vm_activity_logs_public_ip) + echo "::add-mask::$az_vm_activity_logs_public_ip" + echo "ACTIVITY_LOGS_PUBLIC_IP=$az_vm_activity_logs_public_ip" >>"$GITHUB_ENV" + + az_vm_activity_logs_key=$(terraform output -raw az_vm_activity_logs_key) + echo "::add-mask::$az_vm_activity_logs_key" + echo "ACTIVITY_LOGS_KEY=$az_vm_activity_logs_key" >>"$GITHUB_ENV" + + gcp_audit_logs_public_ip=$(terraform output -raw gcp_audit_logs_public_ip) + echo "::add-mask::$gcp_audit_logs_public_ip" + echo "AUDIT_LOGS_PUBLIC_IP=$gcp_audit_logs_public_ip" >>"$GITHUB_ENV" + + gcp_audit_logs_key=$(terraform output -raw gcp_audit_logs_key) + echo "::add-mask::$gcp_audit_logs_key" + echo "AUDIT_LOGS_KEY=$gcp_audit_logs_key" >>"$GITHUB_ENV" + + ec2_asset_inv_key=$(terraform output -raw ec2_asset_inventory_key) + echo "::add-mask::$ec2_asset_inv_key" + echo "EC2_ASSET_INV_KEY=$ec2_asset_inv_key" >>"$GITHUB_ENV" + + asset_inv_public_ip=$(terraform output -raw ec2_asset_inventory_public_ip) + echo "::add-mask::$asset_inv_public_ip" + echo "ASSET_INV_PUBLIC_IP=$asset_inv_public_ip" >>"$GITHUB_ENV" +} + +# Check for valid input +if [ "$#" -ne 1 ]; then + echo "Usage: $0 {elk-stack|cis|cdr}" + exit 1 +fi + +# Determine which function to call based on argument +case "$1" in +"elk-stack") + output_kibana_vars + ;; +"cdr") + output_cdr_vars + ;; +"cis") + output_cis_vars + ;; +*) + echo "Usage: $0 {elk-stack|cis|cdr}" + exit 1 + ;; +esac diff --git a/deploy/test-environments/upload_state.sh b/deploy/test-environments/upload_state.sh new file mode 100755 index 0000000000..1b8320603c --- /dev/null +++ b/deploy/test-environments/upload_state.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Function to upload state and key files for ELK stack +upload_elk_stack() { + aws s3 cp "./terraform.tfstate" "${S3_BUCKET}/elk-stack-terraform.tfstate" +} + +# Function to upload state and key files for CIS +upload_cis() { + aws s3 cp "./terraform.tfstate" "${S3_BUCKET}/cis-terraform.tfstate" + aws s3 cp "${EC2_CSPM_KEY}" "${S3_BUCKET}/cspm.pem" + aws s3 cp "${EC2_KSPM_KEY}" "${S3_BUCKET}/kspm.pem" + aws s3 cp "./state_data.json" "$S3_BUCKET/state_data.json" +} + +# Function to upload additional keys for CDR +upload_cdr() { + aws s3 cp "./terraform.tfstate" "${S3_BUCKET}/cdr-terraform.tfstate" + aws s3 cp "${CLOUDTRAIL_KEY}" "${S3_BUCKET}/cloudtrail.pem" + aws s3 cp "${ACTIVITY_LOGS_KEY}" "${S3_BUCKET}/az_activity_logs.pem" + aws s3 cp "${AUDIT_LOGS_KEY}" "${S3_BUCKET}/gcp_audit_logs.pem" + aws s3 cp "${EC2_ASSET_INV_KEY}" "${S3_BUCKET}/asset_inv.pem" + aws s3 cp "./state_data.json" "$S3_BUCKET/state_data.json" +} + +# Check for valid input +if [ "$#" -ne 1 ]; then + echo "Usage: $0 {elk-stack|cis|cdr}" + exit 1 +fi + +# Determine which function to call based on argument +case $1 in +"elk-stack") + upload_elk_stack + ;; +"cis") + upload_cis + ;; +"cdr") + upload_cdr + ;; +*) + echo "Usage: $0 {elk-stack|cis|cdr}" + exit 1 + ;; +esac + +echo "Upload operation completed." diff --git a/tests/commonlib/agents_map.py b/tests/commonlib/agents_map.py index 74bd805c21..8056b55c67 100644 --- a/tests/commonlib/agents_map.py +++ b/tests/commonlib/agents_map.py @@ -54,6 +54,7 @@ def load_map(self): cfg = Munch() cfg.auth = elasticsearch.basic_auth cfg.kibana_url = elasticsearch.kibana_url + cfg.stack_version = elasticsearch.stack_version active_agents = get_agents(cfg) logger.info(f"found {len(active_agents)} agents") diff --git a/tests/fleet_api/agent_policy_api.py b/tests/fleet_api/agent_policy_api.py index 1a8ca166af..ca79272ef4 100644 --- a/tests/fleet_api/agent_policy_api.py +++ b/tests/fleet_api/agent_policy_api.py @@ -152,8 +152,9 @@ def get_agents(cfg: Munch) -> list: url=url, auth=cfg.auth, ) - response_obj = munchify(response) - return response_obj.list + if cfg.stack_version.startswith("9."): + return munchify(response.get("items", [])) + return munchify(response.get("list", [])) except APICallException as api_ex: logger.error( f"API call failed, status code {api_ex.status_code}. Response: {api_ex.response_text}", diff --git a/tests/fleet_api/common_api.py b/tests/fleet_api/common_api.py index 3f9a5bb799..cc94ca5658 100644 --- a/tests/fleet_api/common_api.py +++ b/tests/fleet_api/common_api.py @@ -38,10 +38,11 @@ def get_enrollment_token(cfg: Munch, policy_id: str) -> str: url=url, auth=cfg.auth, ) - response_obj = munchify(response) - + api_keys = munchify(response.get("list", [])) + if cfg.stack_version.startswith("9."): + api_keys = munchify(response.get("items", [])) api_key = "" - for item in response_obj.list: + for item in api_keys: if item.policy_id == policy_id: api_key = item.api_key break @@ -318,7 +319,10 @@ def get_package_version( ) cloud_security_posture_version = None - for package in response["response"]: + packages = response.get("response", []) + if cfg.stack_version.startswith("9."): + packages = response.get("items", []) + for package in packages: if package.get("name", "") == package_name: cloud_security_posture_version = package.get("version", "") break @@ -365,7 +369,10 @@ def get_package( auth=cfg.auth, params={"params": request_params}, ) - return response.get("response", {}) + package_data = response.get("response", {}) + if cfg.stack_version.startswith("9."): + package_data = response.get("item", {}) + return package_data except APICallException as api_ex: logger.error( f"API call failed, status code {api_ex.status_code}. Response: {api_ex.response_text}", diff --git a/tests/fleet_api/package_policy_api.py b/tests/fleet_api/package_policy_api.py index d38786c408..423609af27 100644 --- a/tests/fleet_api/package_policy_api.py +++ b/tests/fleet_api/package_policy_api.py @@ -128,7 +128,10 @@ def create_integration(cfg: Munch, pkg_policy: dict, agent_policy_id: str, data: auth=cfg.auth, params={"json": pkg_policy}, ) - package_policy_id = munchify(response).item.id + policy_data = response.get("response", {}).get("item", {}) + if cfg.stack_version.startswith("9."): + policy_data = response.get("item", {}) + package_policy_id = policy_data.get("id", "") logger.info(f"Package policy '{package_policy_id}' created successfully") return package_policy_id except APICallException as api_ex: diff --git a/tests/integrations_setup/install_azure_asset_inventory_integration.py b/tests/integrations_setup/install_azure_asset_inventory_integration.py index 2c60c6ebcf..525f78fec0 100755 --- a/tests/integrations_setup/install_azure_asset_inventory_integration.py +++ b/tests/integrations_setup/install_azure_asset_inventory_integration.py @@ -74,7 +74,6 @@ ): logger.warning(f"{INTEGRATION_NAME} is not supported in version {package_version}") sys.exit(0) - logger.info(f"Package version: {package_version}") update_package_version( cfg=cnfg.elk_config, package_name=PACKAGE_NAME,