From e6f6049303d8cfe66a73cd047edfba2a20e28d2f Mon Sep 17 00:00:00 2001 From: Fabien Boucher Date: Mon, 10 Jun 2024 13:28:43 +0200 Subject: [PATCH] This change brings in one patch the changes of 3.8 Depends-On: https://softwarefactory-project.io/r/c/software-factory/sf-ci/+/31675 Depends-On: https://softwarefactory-project.io/r/c/software-factory/sf-ci/+/31690 Here are the stashed commits from the common 3.8.3 tag. git format-patch -N 20d7af3ff4a3cddeeb52b8c9b9d2461f60eecaf3..origin/3.8 git am *.patch There was some conflicts that have been fixed manually. Remove Opensearch Dashboards autologin feature After moving to Keycloak, such feature is not required. Fixes - After d/s upgrade - logprocessing clean of old components - opensearch-dashboard and opensearch use CA chain ca-trust - add sf_purgelogs_additional_params vars (mount addtional volume) Set host network binding for some services and contenerized tools Almost all containers that we are starting in Software Factory are using host binding. Render zuul_api_url as python list The logscraper tool gets zuul_api_url parameter as a list and there can be multiple values provided. Change url path for Opensearch Dashboards The new URL will not use autologin feature. Add condition to verify that stdout item exists The item might not exists when infrastructure is updated each time when Software Factory is released. sf-keycloak: quote passwords in parameters Passwords may include special characters that break command lines. Add option gerrit_use_truststore Enable increase innodb_log_file_size and innodb_buffer_pool_size After increasing parameters, some queries performed by Zuul are working faster. This change is mostly helpful for those Zuul deployments, where some scripts are making a complicated query with many job_name variables to Zuul web to receive latest build results and the SQL "inner join" takes long time. Ensure backup dir exists; change backup host After changing service name from Kibana to Opensearch Dashboards, when the arch.yaml file was not updated to new values, the backup directory for opensearch-dashboards service might not be available on the host. Use new mysql container version Depends-on: https://softwarefactory-project.io/r/c/containers/+/27429 Adding conditional for zuul-web check on grafana postconfig stage Add debug flag for purgelogs; remove :Z flag for log dir in purgelogs The log directory might have a lot of files, so restarting the purgelogs script might take ages until the SELinux labeling is done. Also added debug flag parameter into the purgelogs service to see removal progress logs. Logserver trailing slash fix This change fixes the trailing slash problem raised by OSP CI team. The issue is due to requests not working when made to logserver without an ending trailing slash. Mount MariaDB cache dir Without mounting the cache dir, the container delta overlay dir might be very big. Change retention policy in influxdb; increase buffer This commit fixes various issues related to the telegraf and influxdb errors: Metric buffer overflow; 831 metrics have been dropped Also changed retention policy to wipe data after 4 weeks. Update purgelogs container image The new purgelogs container image will provide log messages about its progress. config-repo: Pull centos image from quay rather than registry.centos.org registry.centos.org seems down, investigation pending. This breaks config-update jobs, which rebuild containers defined in the config repo. In the meantime, switch to quay.io for pulling. zuul-web: mount /var/lib/zuul/ When a connection requires a SSH key, it is stored in /var/lib/zuul/.ssh - which isn't exposed to zuul-web, resulting in errors when the configuration is loaded. Use zuul-executor-ubi-sf38 to benefit last managesf release See https://softwarefactory-project.io/cgit/containers/commit/images-sf/3.8?id=87dea1ceae4719e48193e85a8bc7fdfd5553216f Set log_size_max size for podman logs The service logs after a while can be really huge. This change is limiting log file size to 1GB. The feature has been added into the podman containers.conf file in podman 2.2.0 release [1], but on Centos 7, version is below 2.2.0. According to the libpod.conf man [2], that option should be also available in podman 1.6.4, but it is located in libpod.conf file. More info [3]. [1] https://github.com/containers/podman/releases/tag/v2.2.0 [2] https://manpages.debian.org/unstable/podman/libpod.conf.5.en.html [3] https://github.com/unifi-utilities/unifios-utilities/issues/100 Depends-On: https://softwarefactory-project.io/r/c/software-factory/sf-ci/+/28529 Use managesf-sf38 last container image; drop encoding parameter in managesf The "encoding" parameter is raising an error on starting managesf service. Ensure nodepool services are restarted when config files is updated Nodepool services must be restarted when labels are added zuul/nodepool: bump to the latest version (10.0.0) This change sets the ansible_root zuul.conf variable to avoid ansible installation on startup. Also bump MariaDB version 10.5 because of the renaming index feature (needed for Zuul DB Migration) not available in 10.3. Depends-On: https://softwarefactory-project.io/r/c/containers/+/31361 Depends-On: https://softwarefactory-project.io/r/c/software-factory/sf-ci/+/31362 Depends-On: https://softwarefactory-project.io/r/c/containers/+/31412 Provided fixes to enable mariadb upgrade from 10.3 to 10.5 Running the sfconfig --upgrade is then required. Depends-On: https://softwarefactory-project.io/r/c/software-factory/sf-ci/+/31390 arch allinone - add missing zuul-merger component Update sf-gerrit to latest build 3.7.8-2 was built somewhat recently[1] and addresses a couple of CVEs. [1] https://quay.io/repository/software-factory/gerrit-sf38?tab=tags Add --golden-tests feature to validate generated playbooks This change enables testing the deployment playbooks without installing sf-config. Run with: PYTHONPATH=$(pwd) python3 ./sfconfig/cmd.py \ --golden-tests ./refarch-golden-tests/ \ --arch ./refarch/softwarefactory-project.io.yaml \ --config ./defaults/sfconfig.yaml --share $(pwd) Remove unused host_public_url facts This change remove a fact that is no longer used. Sort the /etc/hosts alias to avoid random update This change ensures the /etc/hosts is defiened in a fixed order Combine zuul-executor and zuul-merger hosts in the generated deployment playbook This change improves the deployment process by combining the common host into a single target so that the roles can be applied in parallel Setup user_namespaces before the restore tasks When restoring a backup on a fresh instance, make sure that the userns is configured to ensure the container can be created correctly. Do not use the zuul_wrapper for restore tasks When restoring a backup on a fresh instance, the zuul_wrapper command does not exist. Restore zookeeper lib ownership after a restore This change ensure the zookeeper setup is correct after restore. Revert "Combine zuul-executor and zuul-merger hosts in the generated deployment playbook" Change-Id: I1742905336af06de3d35814413932f7558317036 --- ansible/roles/sf-base/defaults/main.yml | 3 + ansible/roles/sf-base/tasks/setup.yml | 8 + .../roles/sf-container/templates/creation.j2 | 2 +- ansible/roles/sf-gateway/defaults/main.yml | 1 - ansible/roles/sf-gateway/meta/sfconfig.py | 12 - .../tasks/basicauth_configuration.yml | 59 ----- .../sf-gateway/tasks/jwt_configuration.yml | 66 ----- ansible/roles/sf-gateway/tasks/postconf.yml | 9 - ansible/roles/sf-gateway/tasks/setup.yml | 7 - ansible/roles/sf-gateway/tasks/upgrade.yml | 3 +- .../sf-gateway/templates/gateway.common.j2 | 3 + .../opensearch_dashboards_autologin.conf.j2 | 36 --- ansible/roles/sf-gerrit/defaults/main.yml | 4 +- .../sf-gerrit/templates/entrypoint.sh.j2 | 2 + .../roles/sf-install-server/defaults/main.yml | 2 +- .../roles/sf-install-server/meta/sfconfig.py | 2 +- .../roles/sf-install-server/tasks/setup.yml | 2 +- ansible/roles/sf-keycloak/defaults/main.yml | 8 +- .../roles/sf-log-processing/defaults/main.yml | 5 +- .../roles/sf-log-processing/tasks/setup.yml | 37 +++ .../roles/sf-log-processing/tasks/upgrade.yml | 38 --- .../templates/logscraper.yaml.j2 | 2 +- ansible/roles/sf-logserver/defaults/main.yml | 10 +- ansible/roles/sf-managesf/defaults/main.yml | 4 +- .../roles/sf-managesf/templates/config.py.j2 | 3 +- ansible/roles/sf-mosquitto/tasks/setup.yml | 2 +- ansible/roles/sf-mysql/defaults/main.yml | 25 +- ansible/roles/sf-mysql/tasks/disable.yml | 2 - ansible/roles/sf-mysql/tasks/setup.yml | 27 +- ansible/roles/sf-mysql/tasks/upgrade.yml | 51 ++-- ansible/roles/sf-mysql/templates/my.cnf.j2 | 199 --------------- ansible/roles/sf-nodepool/defaults/main.yml | 2 +- .../roles/sf-nodepool/tasks/config_update.yml | 5 +- .../config/containers/centos-7/Dockerfile | 2 +- .../config/zuul.d/_pipelines.yaml.j2 | 9 +- ansible/roles/sf-zookeeper/tasks/restore.yml | 5 +- ansible/roles/sf-zuul/defaults/main.yml | 5 +- ansible/roles/sf-zuul/tasks/restore.yml | 2 +- ansible/roles/sf-zuul/tasks/setup.yml | 2 +- ansible/roles/sf-zuul/tasks/upgrade.yml | 1 + defaults/sfconfig.yaml | 7 +- refarch-golden-tests/config_initialize.yml | 70 ++++++ refarch-golden-tests/config_update.yml | 52 ++++ refarch-golden-tests/etc-hosts | 55 +++++ refarch-golden-tests/get_logs.yml | 232 ++++++++++++++++++ refarch-golden-tests/hosts | 111 +++++++++ refarch-golden-tests/nodepool_restart.yml | 32 +++ refarch-golden-tests/sf_backup.yml | 133 ++++++++++ refarch-golden-tests/sf_erase.yml | 114 +++++++++ refarch-golden-tests/tenant_update.yml | 52 ++++ refarch-golden-tests/zuul_restart.yml | 97 ++++++++ refarch-golden-tests/zuul_start.yml | 38 +++ refarch-golden-tests/zuul_stop.yml | 29 +++ refarch/allinone.yaml | 1 + refarch/softwarefactory-project.io.yaml | 6 +- sfconfig/arch.py | 2 +- sfconfig/cmd.py | 23 ++ sfconfig/inventory.py | 12 +- 58 files changed, 1206 insertions(+), 527 deletions(-) delete mode 100644 ansible/roles/sf-gateway/tasks/basicauth_configuration.yml delete mode 100644 ansible/roles/sf-gateway/tasks/jwt_configuration.yml delete mode 100644 ansible/roles/sf-gateway/tasks/postconf.yml delete mode 100644 ansible/roles/sf-gateway/templates/opensearch_dashboards_autologin.conf.j2 delete mode 100755 ansible/roles/sf-mysql/templates/my.cnf.j2 create mode 100644 refarch-golden-tests/config_initialize.yml create mode 100644 refarch-golden-tests/config_update.yml create mode 100644 refarch-golden-tests/etc-hosts create mode 100644 refarch-golden-tests/get_logs.yml create mode 100644 refarch-golden-tests/hosts create mode 100644 refarch-golden-tests/nodepool_restart.yml create mode 100644 refarch-golden-tests/sf_backup.yml create mode 100644 refarch-golden-tests/sf_erase.yml create mode 100644 refarch-golden-tests/tenant_update.yml create mode 100644 refarch-golden-tests/zuul_restart.yml create mode 100644 refarch-golden-tests/zuul_start.yml create mode 100644 refarch-golden-tests/zuul_stop.yml diff --git a/ansible/roles/sf-base/defaults/main.yml b/ansible/roles/sf-base/defaults/main.yml index 3621e853d..d609ebaf7 100644 --- a/ansible/roles/sf-base/defaults/main.yml +++ b/ansible/roles/sf-base/defaults/main.yml @@ -30,3 +30,6 @@ gpg_keys_url: - https://softwarefactory-project.io/cgit/software-factory/sf-release/plain/RPM-GPG-KEY-SOFTWARE-FACTORY - https://www.centos.org/keys/RPM-GPG-KEY-CentOS-SIG-Cloud - https://www.centos.org/keys/RPM-GPG-KEY-CentOS-Official + +# set 1000MB in bytes +log_size_max: 1048576000 diff --git a/ansible/roles/sf-base/tasks/setup.yml b/ansible/roles/sf-base/tasks/setup.yml index 0997dd943..6ac6ad9e2 100644 --- a/ansible/roles/sf-base/tasks/setup.yml +++ b/ansible/roles/sf-base/tasks/setup.yml @@ -131,3 +131,11 @@ - name: Trust crt shell: update-ca-trust when: new_crt is changed + +- name: Set max log limit in libpod configuration file + become: true + lineinfile: + path: /etc/containers/libpod.conf + regexp: "^max_log_size" + line: "max_log_size={{ log_size_max }}" + create: true diff --git a/ansible/roles/sf-container/templates/creation.j2 b/ansible/roles/sf-container/templates/creation.j2 index 14eafd922..be213feb0 100755 --- a/ansible/roles/sf-container/templates/creation.j2 +++ b/ansible/roles/sf-container/templates/creation.j2 @@ -8,7 +8,7 @@ if [ "$1" ]; then {% else %} {% set module = 'unknown' %} {% endif %} - container_exec_path=$(podman run --rm {{ item.image }} python3 -c "import {{ module }}, os.path; print(os.path.dirname({{ module }}.__file__) + '/')") + container_exec_path=$(podman run --network host --rm {{ item.image }} python3 -c "import {{ module }}, os.path; print(os.path.dirname({{ module }}.__file__) + '/')") patched_volume=" --volume $1:$container_exec_path:z " fi podman create --name {{ item.service }} {{ item.params }} {{ self_signed_certs }} $patched_volume {{ item.image }} {{ item.run | default(None) }} diff --git a/ansible/roles/sf-gateway/defaults/main.yml b/ansible/roles/sf-gateway/defaults/main.yml index 80e4a648a..1e9f15579 100644 --- a/ansible/roles/sf-gateway/defaults/main.yml +++ b/ansible/roles/sf-gateway/defaults/main.yml @@ -2,7 +2,6 @@ role_actions: - install - setup - - postconf - get_logs - disable - backup diff --git a/ansible/roles/sf-gateway/meta/sfconfig.py b/ansible/roles/sf-gateway/meta/sfconfig.py index 5b7d9a2c8..8150576ff 100644 --- a/ansible/roles/sf-gateway/meta/sfconfig.py +++ b/ansible/roles/sf-gateway/meta/sfconfig.py @@ -103,15 +103,3 @@ def configure(self, args, host): args.glue['external_opensearch_dashboards_host'] = \ args.sfconfig.get('opensearch_dashboards', {}).get('host_url') - - args.glue['readonly_user_autologin'] = \ - args.sfconfig.get('opensearch_dashboards', {}).get( - 'readonly_user_autologin', 'Basic') - - if args.sfconfig.get('external_opensearch', {}).get('users', {}): - for user, creds in args.sfconfig.get('external_opensearch' - ).get('users').items(): - if creds.get('role') == 'readonly': - args.glue['external_opensearch_readonly_user'] = user - args.glue['external_opensearch_readonly_password'] = \ - creds.get('password') diff --git a/ansible/roles/sf-gateway/tasks/basicauth_configuration.yml b/ansible/roles/sf-gateway/tasks/basicauth_configuration.yml deleted file mode 100644 index 59f75fe4f..000000000 --- a/ansible/roles/sf-gateway/tasks/basicauth_configuration.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -- name: Check if Opensearch Dashboards user basicauth is in secrets - command: grep -q 'opensearch_kibanauser_basicauth' /var/lib/software-factory/bootstrap-data/secrets.yaml - delegate_to: "{{ install_server_host }}" - register: _opensearch_dashboards_basicauth_old - no_log: "{{ show_hidden_logs }}" - ignore_errors: true - -- name: Setup autologin for Basic auth - block: - - name: Gen Opensearch Dashboards basicauth when external - block: - - name: "Get user:password string for external opensearch user" - set_fact: - _opensearch_dashboards_basicauth_external_plain: "{{ external_opensearch_readonly_user }}:{{ external_opensearch_readonly_password }}" - - name: Set encoded password string for external Opensearch dashboards user - set_fact: - _opensearch_dashboards_basicauth_external: "{{ _opensearch_dashboards_basicauth_external_plain | b64encode }}" - when: external_opensearch_readonly_user is defined and external_opensearch_readonly_password is defined - - - name: Gen Opensearch Dashboards basicauth when internal - block: - - name: "Get user:password string for local Opensearch Dashboards user" - set_fact: - _opensearch_dashboards_basicauth_local_plain: "{{ opensearch_readonly_user }}:{{ opensearch_readonly_password }}" - - name: Set encoded password string for local Opensearch Dashboards user - set_fact: - _opensearch_dashboards_basicauth_local: "{{ _opensearch_dashboards_basicauth_local_plain | b64encode }}" - when: _opensearch_dashboards_basicauth_external is not defined - - - name: Set basicauth info - set_fact: - opensearch_kibanauser_basicauth: "{{ _opensearch_dashboards_basicauth_external | default(_opensearch_dashboards_basicauth_local) }}" - - - name: Copy basicauth in secrets.yaml - lineinfile: - path: /var/lib/software-factory/bootstrap-data/secrets.yaml - regexp: "^opensearch_kibanauser_basicauth" - line: "opensearch_kibanauser_basicauth: {{ opensearch_kibanauser_basicauth }}" - delegate_to: "{{ install_server_host }}" - - - name: Add autologin include file - template: - src: opensearch_dashboards_autologin.conf.j2 - dest: /etc/httpd/conf.d/opensearch_dashboards_autologin.conf - mode: "0444" - owner: apache - group: apache - register: _opensearch_dashboards_autologin_httpd - - # NOTE: Handler is not restarting apache2 service before - # other postjobs are done. - - name: Restart httpd service without handler - service: - name: httpd - state: restarted - when: _opensearch_dashboards_autologin_httpd.changed - - when: readonly_user_autologin is defined and readonly_user_autologin | lower == 'basic' or _opensearch_dashboards_basicauth_old.rc == 1 diff --git a/ansible/roles/sf-gateway/tasks/jwt_configuration.yml b/ansible/roles/sf-gateway/tasks/jwt_configuration.yml deleted file mode 100644 index ce838c5be..000000000 --- a/ansible/roles/sf-gateway/tasks/jwt_configuration.yml +++ /dev/null @@ -1,66 +0,0 @@ ---- -- name: Check if Opensearch Dashboards user token is in secrets - command: grep -q 'opensearch_kibanauser_token' /var/lib/software-factory/bootstrap-data/secrets.yaml - delegate_to: '{{ install_server_host }}' - register: _opensearch_dashboards_jwt_old_token - no_log: "{{ show_hidden_logs }}" - ignore_errors: true - -- name: Setup autologin for JWT token - block: - - name: Install required package - yum: - name: python3-jwt - state: present - - - name: Generate secret for jwt - command: | - uuidgen - register: _opensearch_dashboards_jwt_secret - no_log: "{{ show_hidden_logs }}" - - - name: Set jwt secret - set_fact: - _opensearch_dashboards_jwt_secret_b64: "{{ _opensearch_dashboards_jwt_secret.stdout | b64encode }}" - - - name: Copy jwt generator script - template: - src: jwt_generator.j2 - dest: /usr/local/bin/jwt_generator - mode: '0755' - become: true - - - name: Generate jwt token - command: python3 /usr/local/bin/jwt_generator - register: _opensearch_dashboards_jwt_token - no_log: "{{ show_hidden_logs }}" - - - name: Set jwt facts - set_fact: - opensearch_kibanauser_token: "{{ _opensearch_dashboards_jwt_token.stdout }}" - - - name: Copy jwt token in secrets.yaml - lineinfile: - path: /var/lib/software-factory/bootstrap-data/secrets.yaml - regexp: '^opensearch_kibanauser_token' - line: 'opensearch_kibanauser_token: {{ _opensearch_dashboards_jwt_token.stdout }}' - delegate_to: '{{ install_server_host }}' - - - name: Add autologin include file - template: - src: opensearch_dashboards_autologin.conf.j2 - dest: /etc/httpd/conf.d/opensearch_dashboards_autologin.conf - mode: '0444' - owner: apache - group: apache - register: _opensearch_dashboards_autologin_httpd - - # NOTE: Handler is not restarting apache2 service before - # other postjobs are done. - - name: Restart httpd service without handler - service: - name: httpd - state: restarted - when: _opensearch_dashboards_autologin_httpd.changed - - when: readonly_user_autologin is defined and readonly_user_autologin | lower == "jwt" or _opensearch_dashboards_jwt_old_token.rc == 1 diff --git a/ansible/roles/sf-gateway/tasks/postconf.yml b/ansible/roles/sf-gateway/tasks/postconf.yml deleted file mode 100644 index 18d3b041e..000000000 --- a/ansible/roles/sf-gateway/tasks/postconf.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Setup Opensearch Dashboards autologin - block: - - include_tasks: jwt_configuration.yml - - include_tasks: basicauth_configuration.yml - when: > - 'opensearch-dashboards' in roles or - external_opensearch_dashboards_host is defined and - external_opensearch_dashboards_host diff --git a/ansible/roles/sf-gateway/tasks/setup.yml b/ansible/roles/sf-gateway/tasks/setup.yml index feb6792fe..bd6f37f20 100644 --- a/ansible/roles/sf-gateway/tasks/setup.yml +++ b/ansible/roles/sf-gateway/tasks/setup.yml @@ -115,13 +115,6 @@ when: '"kibana" in roles or external_opensearch_dashboards_host is defined' notify: apache reload -- name: Create autologin empty file - file: - path: /etc/httpd/conf.d/opensearch_dashboards_autologin.conf - state: touch - mode: '0444' - when: readonly_user_autologin is defined and readonly_user_autologin - - name: Add server status configuration file template: src: server-status.conf.j2 diff --git a/ansible/roles/sf-gateway/tasks/upgrade.yml b/ansible/roles/sf-gateway/tasks/upgrade.yml index d8d8b6704..b8ef61a9c 100644 --- a/ansible/roles/sf-gateway/tasks/upgrade.yml +++ b/ansible/roles/sf-gateway/tasks/upgrade.yml @@ -19,11 +19,12 @@ state: absent when: sf_previous_version < 3.7 -- name: Remove deprecated repoxplorer files +- name: Remove deprecated config files file: path: "{{ item }}" state: absent loop: - /etc/httpd/conf.d/gateway-repoxplorer.conf - /var/www/static/repoxplorer.keycloak.json + - /etc/httpd/conf.d/opensearch_dashboards_autologin.conf notify: apache reload diff --git a/ansible/roles/sf-gateway/templates/gateway.common.j2 b/ansible/roles/sf-gateway/templates/gateway.common.j2 index a2a4f8a68..7e715845f 100644 --- a/ansible/roles/sf-gateway/templates/gateway.common.j2 +++ b/ansible/roles/sf-gateway/templates/gateway.common.j2 @@ -176,6 +176,9 @@ {% endif %} {% if 'logserver' in roles %} + RewriteCond %{REQUEST_URI} /logs/+[^\.]+$ + RewriteRule ^/logs/(.+[^/])$ %{REQUEST_URI}/ [R=301,L] + ProxyPass /logs/ http://{{ logserver_host }}:{{ logserver_http_port }}/logs/ ProxyPassReverse /logs/ http://{{ logserver_host }}:{{ logserver_http_port }}/logs/ diff --git a/ansible/roles/sf-gateway/templates/opensearch_dashboards_autologin.conf.j2 b/ansible/roles/sf-gateway/templates/opensearch_dashboards_autologin.conf.j2 deleted file mode 100644 index ead220135..000000000 --- a/ansible/roles/sf-gateway/templates/opensearch_dashboards_autologin.conf.j2 +++ /dev/null @@ -1,36 +0,0 @@ -# NOTE: Can not enable authorization via HTTP form and with injecting -# http header to the location: /analytics even when added condition -# to remove headers when page is e.g. /analytics/app/login. -# In that case, if someone will go to /analytics_autologin, -# authorization header will be injected, then Kibana will automatically -# redirect to the /analytics location. - - # Common settings for Basic/JWT auth - RequestHeader set "Host" "{{ fqdn }}" - - # ip address of opensearch_dashboards_host - RequestHeader set "X-Forwarded-For" "{{ fqdn }}" - RequestHeader set "X-Forwarded-Host" "{{ fqdn }}" - - {% if readonly_user_autologin | lower == 'basic' and opensearch_kibanauser_basicauth is defined %} - # Basic auth - RequestHeader set "Authorization" "Basic {{ opensearch_kibanauser_basicauth }}" - {% endif %} - - {% if readonly_user_autologin | lower == 'jwt' and opensearch_kibanauser_token is defined %} - RequestHeader set "Authorization" "Bearer {{ opensearch_kibanauser_token }}" - {% endif %} - -{% if external_opensearch_dashboards_host %} - ProxyPass {{ external_opensearch_dashboards_host }} - ProxyPassReverse {{ external_opensearch_dashboards_host }} -{% else %} - ProxyPass {{ opensearch_dashboards_internal_url }} - ProxyPassReverse {{ opensearch_dashboards_internal_url }} -{% endif %} - - RewriteEngine on - RewriteCond %{QUERY_STRING} ^$ - RewriteRule /analytics_autologin/ /analytics/ [L] - - diff --git a/ansible/roles/sf-gerrit/defaults/main.yml b/ansible/roles/sf-gerrit/defaults/main.yml index a6f4ad3da..4fdc9f138 100644 --- a/ansible/roles/sf-gerrit/defaults/main.yml +++ b/ansible/roles/sf-gerrit/defaults/main.yml @@ -18,8 +18,8 @@ gerrit_conf_dir: "/etc/gerrit" gerrit_lib_dir: "/var/lib/gerrit" gerrit_log_dir: "/var/log/gerrit" -gerrit_version: 3.7.4 -gerrit_container_version: "{{ gerrit_version }}-1" +gerrit_version: 3.7.8 +gerrit_container_version: "{{ gerrit_version }}-5" gerrit_components: - service: "gerrit" diff --git a/ansible/roles/sf-gerrit/templates/entrypoint.sh.j2 b/ansible/roles/sf-gerrit/templates/entrypoint.sh.j2 index ca2646019..c4515da7d 100755 --- a/ansible/roles/sf-gerrit/templates/entrypoint.sh.j2 +++ b/ansible/roles/sf-gerrit/templates/entrypoint.sh.j2 @@ -4,8 +4,10 @@ JAVA_OPTIONS="-Djava.security.egd=file:/dev/./urandom" JAVA_OPTIONS="${JAVA_OPTIONS} -Djavax.net.ssl.keyStore=/var/gerrit/etc/keystore" JAVA_OPTIONS="${JAVA_OPTIONS} -Djavax.net.ssl.keyStorePassword={{ gerrit_keystore_password }}" +{% if gerrit_use_truststore|default(True) %} JAVA_OPTIONS="${JAVA_OPTIONS} -Djavax.net.ssl.trustStore=/var/gerrit/etc/truststore" JAVA_OPTIONS="${JAVA_OPTIONS} -Djavax.net.ssl.trustStorePassword=changeit" +{% endif %} configure_keystore () { keytool -importkeystore -srckeystore /var/gerrit/etc/{{ fqdn }}.pkcs12 \ diff --git a/ansible/roles/sf-install-server/defaults/main.yml b/ansible/roles/sf-install-server/defaults/main.yml index 9d14edb0c..88f61b0e3 100644 --- a/ansible/roles/sf-install-server/defaults/main.yml +++ b/ansible/roles/sf-install-server/defaults/main.yml @@ -12,4 +12,4 @@ rdo_release_url: "https://rdoproject.org/repos/openstack-stein/rdo-release-stein role_package: sf-config mysql_config_dir: /etc/mysql -mysql_var_run_dir: /var/run/mysqld +mysql_var_lib: /var/lib/mysql diff --git a/ansible/roles/sf-install-server/meta/sfconfig.py b/ansible/roles/sf-install-server/meta/sfconfig.py index 276abcacc..745991d6d 100644 --- a/ansible/roles/sf-install-server/meta/sfconfig.py +++ b/ansible/roles/sf-install-server/meta/sfconfig.py @@ -102,7 +102,7 @@ def get_links(_type): + status_link + service("nodepool", "/nodepool") + service("keycloak", "/auth/realms/SF/account/") - + service("opensearch-dashboards", "/analytics_autologin") + + service("opensearch-dashboards", "/analytics") + service("grafana", "/grafana") + service("etherpad", "/etherpad") + service("lodgeit", "/paste") diff --git a/ansible/roles/sf-install-server/tasks/setup.yml b/ansible/roles/sf-install-server/tasks/setup.yml index 4d232cf9d..f99134761 100644 --- a/ansible/roles/sf-install-server/tasks/setup.yml +++ b/ansible/roles/sf-install-server/tasks/setup.yml @@ -100,7 +100,7 @@ priv: 'zuul.*:ALL' state: present config_file: "{{ mysql_config_dir }}/.my.cnf" - login_unix_socket: "{{ mysql_var_run_dir }}/mysqld.sock" + login_unix_socket: "{{ mysql_var_lib }}/mysql.sock" delegate_to: "{{ mysql_host }}" no_log: true diff --git a/ansible/roles/sf-keycloak/defaults/main.yml b/ansible/roles/sf-keycloak/defaults/main.yml index 708073816..c01fc18af 100644 --- a/ansible/roles/sf-keycloak/defaults/main.yml +++ b/ansible/roles/sf-keycloak/defaults/main.yml @@ -27,7 +27,7 @@ keycloak_container_release: 4 gsku_container_version: 0.0.4 gsku_container_release: 1 -events_listener_config: "{% if 'firehose' in roles %} --spi-events-listener-mqtt-server-uri=\"tcp://{{ firehose_host }}:1883\" --spi-events-listener-mqtt-username=SF_SERVICE_USER --spi-events-listener-mqtt-password={{ sf_service_user_password }} --spi-events-listener-mqtt-topic=keycloak {% else %} {% endif %}" +events_listener_config: "{% if 'firehose' in roles %} --spi-events-listener-mqtt-server-uri=\"tcp://{{ firehose_host }}:1883\" --spi-events-listener-mqtt-username=SF_SERVICE_USER --spi-events-listener-mqtt-password=\"{{ sf_service_user_password }}\" --spi-events-listener-mqtt-topic=keycloak {% else %} {% endif %}" keycloak_components: - service: "keycloak" @@ -42,7 +42,7 @@ keycloak_components: --db-url-database=keycloak --db-url-host={{ mysql_host }} --db-username=keycloak - --db-password={{ keycloak_mysql_password }} + --db-password="{{ keycloak_mysql_password }}" --health-enabled=true --metrics-enabled=true --hostname={{ fqdn }} @@ -54,7 +54,7 @@ keycloak_components: --volume {{ keycloak_certs_dir }}:/etc/x509/https:Z -p {{ keycloak_http_port }}:{{ keycloak_http_port }} --env KEYCLOAK_ADMIN=admin - --env KEYCLOAK_ADMIN_PASSWORD={{ authentication.admin_password }} + --env KEYCLOAK_ADMIN_PASSWORD="{{ authentication.admin_password }}" {% if sf_keycloak_additional_params is defined %} {{ sf_keycloak_additional_params }} {% endif %} @@ -73,7 +73,7 @@ keycloak_components: kcadm_options: | --no-config - --password {{ authentication.admin_password }} + --password "{{ authentication.admin_password }}" --realm master --server http://localhost:{{ keycloak_http_port }}/auth --user admin diff --git a/ansible/roles/sf-log-processing/defaults/main.yml b/ansible/roles/sf-log-processing/defaults/main.yml index 049ef521d..c04e600b0 100644 --- a/ansible/roles/sf-log-processing/defaults/main.yml +++ b/ansible/roles/sf-log-processing/defaults/main.yml @@ -5,6 +5,8 @@ role_actions: - get_logs - disable +role_package: "log-processing" + owner: logscraper group: logscraper container_gid: 1000 @@ -15,7 +17,8 @@ opensearch_host: opensearch.example.com opensearch_logstash_password: "CHANGE_ME" -zuul_api_url: https://{{ fqdn }}/zuul/api/tenant/{{ tenant_name }} +zuul_api_url: + - "https://{{ fqdn }}/zuul/api/tenant/{{ tenant_name }}" insecure: false download: true logscraper_dir: /var/lib/logscraper diff --git a/ansible/roles/sf-log-processing/tasks/setup.yml b/ansible/roles/sf-log-processing/tasks/setup.yml index 16901e77d..678034791 100644 --- a/ansible/roles/sf-log-processing/tasks/setup.yml +++ b/ansible/roles/sf-log-processing/tasks/setup.yml @@ -1,4 +1,41 @@ --- +# NOTE: Remove this block in the future +- name: Remove logstash and log-gearman + block: + - name: Disable services + service: + name: "{{ item }}" + state: stopped + enabled: "no" + ignore_errors: true + loop: + - logstash + - log-gearman-client + - log-gearman-worker + + - name: Remove not needed packages + yum: + name: + - elasticsearch-curator + - python-log2gearman-client + - python-log2gearman-worker + state: absent + + - name: Delete logstash container + command: "podman rm logstash" + ignore_errors: true + + - name: Remove unecessary directories + file: + path: "{{ item }}" + state: absent + loop: + - /etc/logstash + - /etc/log-gearman-client + - /etc/log-gearman-worker + + when: sf_previous_version < 3.8 + - name: Check container image version include_role: name: sf-container diff --git a/ansible/roles/sf-log-processing/tasks/upgrade.yml b/ansible/roles/sf-log-processing/tasks/upgrade.yml index 5ca0e372d..ed97d539c 100644 --- a/ansible/roles/sf-log-processing/tasks/upgrade.yml +++ b/ansible/roles/sf-log-processing/tasks/upgrade.yml @@ -1,39 +1 @@ --- -# NOTE: Remove it in the future -- name: Remove logstash - block: - - name: Disable services - service: - name: "{{ item }}" - state: stopped - enabled: "no" - ignore_errors: true - loop: - - logstash - - log-gearman-client - - log-gearman-worker - - - name: Remove not needed packages - yum: - name: - - elasticsearch-curator - - python-log2gearman-client - - python-log2gearman-worker - state: absent - - - name: Delete containers - logstash - include_role: - name: sf-container - tasks_from: delete.yaml - loop: "{{ logstash_components }}" - - - name: Remove unecessary directories - file: - path: "{{ item }}" - state: absent - loop: - - /etc/logstash - - /etc/log-gearman-client - - /etc/log-gearman-worker - - when: sf_previous_version < 3.8 diff --git a/ansible/roles/sf-log-processing/templates/logscraper.yaml.j2 b/ansible/roles/sf-log-processing/templates/logscraper.yaml.j2 index de8953fba..ad89f06fe 100644 --- a/ansible/roles/sf-log-processing/templates/logscraper.yaml.j2 +++ b/ansible/roles/sf-log-processing/templates/logscraper.yaml.j2 @@ -1,5 +1,5 @@ --- -zuul_api_url: {{ zuul_api_url.split(', ') }} +zuul_api_url: {{ zuul_api_url | list }} follow: {{ follow | default(true) }} checkpoint_file: {{ checkpoint_file | default(logscraper_dir + '/checkpoint') }} workers: {{ logscraper_workers | default(1) }} diff --git a/ansible/roles/sf-logserver/defaults/main.yml b/ansible/roles/sf-logserver/defaults/main.yml index ae4441d8a..746f5ed98 100644 --- a/ansible/roles/sf-logserver/defaults/main.yml +++ b/ansible/roles/sf-logserver/defaults/main.yml @@ -22,8 +22,9 @@ logs_directory_prefix: logs/ logserver_version: 4.9.3 logserver_container_version: "{{ logserver_version }}-2" -purgelogs_version: 0.2.0 +purgelogs_version: 0.2.3 purgelogs_container_version: "{{ purgelogs_version }}-1" +purgelogs_debug: false logserver_http_port: 31215 @@ -34,13 +35,16 @@ logserver_components: - service: "purgelogs" image: "quay.io/software-factory/purgelogs:{{ purgelogs_container_version }}" params: >- - --volume /var/www/:/var/www/:Z + --volume /var/www/:/var/www/ --uidmap 0:4000:100 --gidmap 0:4000:100 --uidmap {{ container_uid}}:{{ logserver_uid | default(1000) }}:1 --gidmap {{ container_gid }}:{{ logserver_gid | default(1000) }}:1 + {% if sf_purgelogs_additional_params is defined %} + {{ sf_purgelogs_additional_params }} + {% endif %} run: >- - bash -c "/usr/local/bin/purgelogs --retention-days '{{ logs_expiry }}' --loop 3600 --log-path-dir /var/www/logs" + bash -c "/usr/local/bin/purgelogs --retention-days '{{ logs_expiry }}' --loop 3600 --log-path-dir /var/www/logs {% if purgelogs_debug %}--debug{% endif %}" - service: "{{ role_package }}" image: "quay.io/software-factory/{{ role_package }}:{{ logserver_container_version }}" diff --git a/ansible/roles/sf-managesf/defaults/main.yml b/ansible/roles/sf-managesf/defaults/main.yml index a9a7a62a3..fe0cd61d9 100644 --- a/ansible/roles/sf-managesf/defaults/main.yml +++ b/ansible/roles/sf-managesf/defaults/main.yml @@ -28,7 +28,7 @@ nodepool_conf_dir: "/etc/nodepool" nodepool_lib_dir: "/var/lib/nodepool" nodepool_webapp_port: 8006 -managesf_version: 0.30.0 +managesf_version: 0.32.1 managesf_container_version: "{{ managesf_version }}-1" managesf_config_dir: /etc/managesf @@ -37,7 +37,7 @@ managesf_lib_dir: /var/lib/managesf managesf_components: - service: "{{ role_package }}" - image: "quay.io/software-factory/{{ role_package }}:{{ managesf_container_version }}" + image: "quay.io/software-factory/{{ role_package }}-sf38:{{ managesf_container_version }}" params: >- --network host --uidmap {{ container_uid }}:{{ managesf_uid | default(1000) }}:1 diff --git a/ansible/roles/sf-managesf/templates/config.py.j2 b/ansible/roles/sf-managesf/templates/config.py.j2 index dddeccdf6..2870824d7 100644 --- a/ansible/roles/sf-managesf/templates/config.py.j2 +++ b/ansible/roles/sf-managesf/templates/config.py.j2 @@ -84,8 +84,7 @@ zuul = { {% endif %} sqlalchemy = { - 'url': 'mysql://{{ managesf_mysql_user }}:{{ managesf_mysql_password }}@{{ managesf_mysql_host }}:{{ managesf_mysql_port }}/{{ managesf_mysql_db }}?charset=utf8', - 'encoding': 'utf-8', + 'url': 'mysql://{{ managesf_mysql_user }}:{{ managesf_mysql_password }}@{{ managesf_mysql_host }}:{{ managesf_mysql_port }}/{{ managesf_mysql_db }}?charset=utf8' } policy = { diff --git a/ansible/roles/sf-mosquitto/tasks/setup.yml b/ansible/roles/sf-mosquitto/tasks/setup.yml index 8a84cfb74..64ead0cde 100644 --- a/ansible/roles/sf-mosquitto/tasks/setup.yml +++ b/ansible/roles/sf-mosquitto/tasks/setup.yml @@ -57,7 +57,7 @@ - name: update password file shell: "{{ item }}" loop: - - podman run --rm {{ mosquitto_components[0].params }} {{ mosquitto_components[0].image }} mosquitto_passwd -b {{ mosquitto_config_dir }}/passwords SF_SERVICE_USER {{sf_service_user_password}} + - podman run --network host --rm {{ mosquitto_components[0].params }} {{ mosquitto_components[0].image }} mosquitto_passwd -b {{ mosquitto_config_dir }}/passwords SF_SERVICE_USER {{sf_service_user_password}} changed_when: false - name: finish mosquitto configuration diff --git a/ansible/roles/sf-mysql/defaults/main.yml b/ansible/roles/sf-mysql/defaults/main.yml index 931292afb..9cef2179d 100644 --- a/ansible/roles/sf-mysql/defaults/main.yml +++ b/ansible/roles/sf-mysql/defaults/main.yml @@ -7,16 +7,15 @@ role_actions: - restore - disable - role_package: mysql owner: mysql group: mysql -container_uid: 999 -container_gid: 999 +container_uid: 1000 +container_gid: 1000 -container_tag: "10.3.10" -container_version: 1 +container_tag: "10.5.9" +container_version: 2 mysql_host: "mysql.example.com" mysql_root_password: "CHANGE_ME" @@ -25,28 +24,28 @@ mysql_databases: {} mysql_config_dir: /etc/mysql mysql_config_drop_in_dir: /etc/mysql/conf.d -mysql_var_run_dir: /var/run/mysqld mysql_var_lib: /var/lib/mysql +mysql_cache_dir: /var/tmp +mysql_host_cache_dir: "{{ mysql_cache_dir }}/mysql" mysql_port: 3306 mysql_components: - service: "{{ role_package }}" - image: "quay.io/software-factory/mariadb:{{container_tag}}-{{container_version}}" + image: "quay.io/software-factory/mariadb-sf38:{{container_tag}}-{{container_version}}" params: >- --network host --user {{ container_uid }}:{{ container_gid }} --uidmap {{ container_uid }}:{{ user_uid | default(1000) }}:1 --gidmap {{ container_uid }}:{{ usergroup_gid | default(1000) }}:1 - --uidmap 0:4000:998 - --gidmap 0:4000:998 + --uidmap 0:4000:999 + --gidmap 0:4000:999 --env MYSQL_ROOT_PASSWORD={{ mysql_root_password }} - --volume {{ mysql_config_dir }}/my.cnf:{{ mysql_config_dir }}/my.cnf:z - --volume {{ mysql_config_dir }}/.my.cnf:/root/.my.cnf:z + --env MARIADB_DISABLE_UPGRADE_BACKUP=1 + --env MARIADB_AUTO_UPGRADE=1 --volume {{ mariadb_sql_dir }}:{{ container_dir }}:z - --volume {{ mysql_config_drop_in_dir }}:{{ mysql_config_drop_in_dir }}:z - --volume {{ mysql_var_run_dir }}:{{ mysql_var_run_dir }}:z --volume {{ mysql_var_lib }}:{{ mysql_var_lib }}:z + --volume {{ mysql_host_cache_dir }}:{{ mysql_cache_dir }}:z {% if sf_mysql_additional_params is defined %} {{ sf_mysql_additional_params }} {% endif %} diff --git a/ansible/roles/sf-mysql/tasks/disable.yml b/ansible/roles/sf-mysql/tasks/disable.yml index 22eb44c5d..eeaa8b846 100644 --- a/ansible/roles/sf-mysql/tasks/disable.yml +++ b/ansible/roles/sf-mysql/tasks/disable.yml @@ -20,9 +20,7 @@ path: "{{ item }}" state: absent loop: - - "{{ mysql_config_drop_in_dir }}" - "{{ mysql_config_dir }}" - - "{{ mysql_var_run_dir }}" - "{{ mysql_var_lib }}" when: erase == True diff --git a/ansible/roles/sf-mysql/tasks/setup.yml b/ansible/roles/sf-mysql/tasks/setup.yml index 8e5a5e3df..788e975ed 100644 --- a/ansible/roles/sf-mysql/tasks/setup.yml +++ b/ansible/roles/sf-mysql/tasks/setup.yml @@ -4,10 +4,7 @@ name: sf-container tasks_from: create_directory.yaml loop: - - path: "{{ mysql_config_drop_in_dir }}" - mode: "0755" - recurse: "yes" - - path: "{{ mysql_var_run_dir }}" + - path: "{{ mysql_config_dir }}" mode: "0755" recurse: "yes" - path: "{{ mysql_var_lib }}" @@ -16,6 +13,9 @@ - path: "{{ mariadb_sql_dir }}" mode: "0755" recurse: "yes" + - path: "{{ mysql_host_cache_dir }}" + mode: "0755" + recurse: "yes" # /run or /var/run (symlink to /run) is a tmpfs and mysqld directory must be added # to it before starting mysqld @@ -23,7 +23,7 @@ copy: content: "d /run/mysqld 0755 mysql mysql" dest: /usr/lib/tmpfiles.d/mysqld.conf - mode: '0644' + mode: "0644" owner: root group: root @@ -52,8 +52,6 @@ group: "root" mode: "0644" loop: - - src: "templates/my.cnf.j2" - dest: "{{ mysql_config_dir }}/my.cnf" - src: "templates/client.cnf.j2" dest: "{{ mysql_config_dir }}/.my.cnf" @@ -63,13 +61,6 @@ dest: "{{ mariadb_sql_dir }}/databases.sql" register: databases_setup -- name: Set wait_timeout to 2 weeks - ini_file: - dest: "{{ mysql_config_drop_in_dir }}/mariadb-server.cnf" - section: server - option: wait_timeout - value: '1209600' - - name: "Get {{ owner }} uid" command: "id -u {{ owner }}" register: _user_id @@ -115,15 +106,15 @@ - name: ensure anonymous users are not in the database mysql_user: - name: '' + name: "" host: "{{ item }}" state: absent config_file: "{{ mysql_config_dir }}/.my.cnf" - login_unix_socket: "{{ mysql_var_run_dir }}/mysqld.sock" - no_log: true + login_unix_socket: "{{ mysql_var_lib }}/mysql.sock" loop: - localhost - "{{ ansible_hostname }}" + ignore_errors: true - name: Ensure all databases are created shell: > @@ -136,7 +127,7 @@ name: "{{ item }}" state: absent config_file: "{{ mysql_config_dir }}/.my.cnf" - login_unix_socket: "{{ mysql_var_run_dir }}/mysqld.sock" + login_unix_socket: "{{ mysql_var_lib }}/mysql.sock" loop: - test # TODO either add this line or provide a manual removal procedure post-upgrade to 3.8 diff --git a/ansible/roles/sf-mysql/tasks/upgrade.yml b/ansible/roles/sf-mysql/tasks/upgrade.yml index 07f5e5d4a..bc0fe4476 100644 --- a/ansible/roles/sf-mysql/tasks/upgrade.yml +++ b/ansible/roles/sf-mysql/tasks/upgrade.yml @@ -2,33 +2,13 @@ - name: Gather the rpm packages as facts package_facts: manager: auto - no_log : true + no_log: true - service_facts: run_once: true -- name: If mariadb-server is running +- name: If mariadb-server is running (installed via package) block: - - - name: Creating backup of old config files - copy: - src: "{{ item.src }}" - dest: "{{ item.target }}" - owner: "{{ owner }}" - group: "{{ group }}" - mode: "0644" - loop: - - src: "/root/.my.cnf" - target: "{{ mysql_config_dir }}/.my.cnf.bck" - - src: "/root/.my.cnf" - target: "{{ mysql_config_dir }}/.my.cnf" - - src: "/etc/my.cnf" - target: "{{ mysql_config_dir }}/my.cnf.bck" - - src: "/etc/my.cnf" - target: "{{ mysql_config_dir }}/my.cnf" - - src: "/etc/my.cnf.d/mariadb-server.cnf" - target: "{{ mysql_config_drop_in_dir }}/mariadb-server.cnf.bck" - - name: Stop the service service: name: mariadb @@ -49,8 +29,31 @@ - "/etc/my.cnf" - "/etc/my.cnf.d/mariadb-server.cnf" when: - - "'mariadb-server' in ansible_facts.packages" - - "'mariadb-server.service' in {{ ansible_facts.services }}" + - "'mariadb-server' in ansible_facts.packages" + - "'mariadb-server.service' in {{ ansible_facts.services }}" + +- name: Check if mariadb 10.3.28 is installed + shell: "podman images | grep mariadb | grep 10.3.28" + register: check_mariadb_10_3_28 + ignore_errors: true + +- name: If mariadb-server 10.3.28 is running (installed via container sf-3.8) + block: + # podman stop attempt to kill SIGTERM mysqld_safe instead of mysqld + # and as it fails it send SIGKILL and the mysql is shutdown in impropoer state + # preventing the service to be upgraded. + - name: Fix systemctl stop command + shell: | + sed -i 's|^ExecStop.*|ExecStop=/bin/pkill mysqld|g' /etc/systemd/system/mysql.service + systemctl daemon-reload + + - name: Stop the service + service: + name: mysql + state: stopped + when: + - "'mysql.service' in {{ ansible_facts.services }}" + - check_mariadb_10_3_28 is success - name: Pull image include_role: diff --git a/ansible/roles/sf-mysql/templates/my.cnf.j2 b/ansible/roles/sf-mysql/templates/my.cnf.j2 deleted file mode 100755 index 6b9cbea70..000000000 --- a/ansible/roles/sf-mysql/templates/my.cnf.j2 +++ /dev/null @@ -1,199 +0,0 @@ -# This file is the original file from {{role_package}}:{{container_tag}} -# container, converted to a Jinja file. -# -# MariaDB database server configuration file. -# -# You can copy this file to one of: -# - "/etc/mysql/my.cnf" to set global options, -# - "~/.my.cnf" to set user-specific options. -# -# One can use all long options that the program supports. -# Run program with --help to get a list of available options and with -# --print-defaults to see which it would actually understand and use. -# -# For explanations see -# http://dev.mysql.com/doc/mysql/en/server-system-variables.html - -# This will be passed to all mysql clients -# It has been reported that passwords should be enclosed with ticks/quotes -# escpecially if they contain "#" chars... -# Remember to edit /etc/mysql/debian.cnf when changing the socket location. - -[server] -wait_timeout = 1209600 - -[client] -port = {{ mysql_port }} -socket = {{ mysql_var_run_dir }}/mysqld.sock - -# Here is entries for some specific programs -# The following values assume you have at least 32M ram - -# This was formally known as [safe_mysqld]. Both versions are currently parsed. -[mysqld_safe] -socket = {{ mysql_var_run_dir }}/mysqld.sock -nice = 0 - -[mysqld] -# -# * Basic Settings -# -#user = mysql -pid-file = {{ mysql_var_run_dir }}/mysqld.pid -socket = {{ mysql_var_run_dir }}/mysqld.sock -port = {{ mysql_port }} -basedir = /usr -datadir = /var/lib/mysql -tmpdir = /tmp -lc_messages_dir = /usr/share/mysql -lc_messages = en_US -skip-external-locking -# -# Instead of skip-networking the default is now to listen only on -# localhost which is more compatible and is not less secure. -#bind-address = 127.0.0.1 -# -# * Fine Tuning -# -max_connections = 100 -connect_timeout = 5 -wait_timeout = 600 -max_allowed_packet = 16M -thread_cache_size = 128 -sort_buffer_size = 4M -bulk_insert_buffer_size = 16M -tmp_table_size = 32M -max_heap_table_size = 32M -# -# * MyISAM -# -# This replaces the startup script and checks MyISAM tables if needed -# the first time they are touched. On error, make copy and try a repair. -myisam_recover_options = BACKUP -key_buffer_size = 128M -#open-files-limit = 2000 -table_open_cache = 400 -myisam_sort_buffer_size = 512M -concurrent_insert = 2 -read_buffer_size = 2M -read_rnd_buffer_size = 1M -# -# * Query Cache Configuration -# -# Cache only tiny result sets, so we can fit more in the query cache. -query_cache_limit = 128K -query_cache_size = 64M -# for more write intensive setups, set to DEMAND or OFF -#query_cache_type = DEMAND -# -# * Logging and Replication -# -# Both location gets rotated by the cronjob. -# Be aware that this log type is a performance killer. -# As of 5.1 you can enable the log at runtime! -#general_log_file = /var/log/mysql/mysql.log -#general_log = 1 -# -# Error logging goes to syslog due to /etc/mysql/conf.d/mysqld_safe_syslog.cnf. -# -# we do want to know about network errors and such -#log_warnings = 2 -# -# Enable the slow query log to see queries with especially long duration -#slow_query_log[={0|1}] -slow_query_log_file = /var/log/mysql/mariadb-slow.log -long_query_time = 10 -#log_slow_rate_limit = 1000 -#log_slow_verbosity = query_plan - -#log-queries-not-using-indexes -#log_slow_admin_statements -# -# The following can be used as easy to replay backup logs or for replication. -# note: if you are setting up a replication slave, see README.Debian about -# other settings you may need to change. -#server-id = 1 -#report_host = master1 -#auto_increment_increment = 2 -#auto_increment_offset = 1 -#log_bin = /var/log/mysql/mariadb-bin -#log_bin_index = /var/log/mysql/mariadb-bin.index -# not fab for performance, but safer -#sync_binlog = 1 -expire_logs_days = 10 -max_binlog_size = 100M -# slaves -#relay_log = /var/log/mysql/relay-bin -#relay_log_index = /var/log/mysql/relay-bin.index -#relay_log_info_file = /var/log/mysql/relay-bin.info -#log_slave_updates -#read_only -# -# If applications support it, this stricter sql_mode prevents some -# mistakes like inserting invalid dates etc. -#sql_mode = NO_ENGINE_SUBSTITUTION,TRADITIONAL -# -# * InnoDB -# -# InnoDB is enabled by default with a 10MB datafile in /var/lib/mysql/. -# Read the manual for more InnoDB related options. There are many! -default_storage_engine = InnoDB -# you can't just change log file size, requires special procedure -#innodb_log_file_size = 50M -innodb_buffer_pool_size = 256M -innodb_log_buffer_size = 8M -innodb_file_per_table = 1 -innodb_open_files = 400 -innodb_io_capacity = 400 -innodb_flush_method = O_DIRECT -# -# * Security Features -# -# Read the manual, too, if you want chroot! -# chroot = /var/lib/mysql/ -# -# For generating SSL certificates I recommend the OpenSSL GUI "tinyca". -# -# ssl-ca=/etc/mysql/cacert.pem -# ssl-cert=/etc/mysql/server-cert.pem -# ssl-key=/etc/mysql/server-key.pem - -# -# * Galera-related settings -# -[galera] -# Mandatory settings -#wsrep_on=ON -#wsrep_provider= -#wsrep_cluster_address= -#binlog_format=row -#default_storage_engine=InnoDB -#innodb_autoinc_lock_mode=2 -# -# Allow server to accept connections on all interfaces. -# -#bind-address=0.0.0.0 -# -# Optional setting -#wsrep_slave_threads=1 -#innodb_flush_log_at_trx_commit=0 - -[mysqldump] -quick -quote-names -max_allowed_packet = 16M - -[mysql] -#no-auto-rehash # faster start of mysql but no tab completion - -[isamchk] -key_buffer = 16M - -# -# * IMPORTANT: Additional settings that can override those from this file! -# The files must end with '.cnf', otherwise they'll be ignored. -# -!include /etc/mysql/mariadb.cnf -!includedir /etc/mysql/conf.d/ - -character_set_server = utf8 diff --git a/ansible/roles/sf-nodepool/defaults/main.yml b/ansible/roles/sf-nodepool/defaults/main.yml index c71972295..4507909ca 100644 --- a/ansible/roles/sf-nodepool/defaults/main.yml +++ b/ansible/roles/sf-nodepool/defaults/main.yml @@ -16,7 +16,7 @@ container_gid: 10001 dib_upstream_elements: True -nodepool_version: 9.0.0 +nodepool_version: 10.0.0 nodepool_container_version: "{{ nodepool_version }}-1" nodepool_services: diff --git a/ansible/roles/sf-nodepool/tasks/config_update.yml b/ansible/roles/sf-nodepool/tasks/config_update.yml index 57743c712..88c29a660 100644 --- a/ansible/roles/sf-nodepool/tasks/config_update.yml +++ b/ansible/roles/sf-nodepool/tasks/config_update.yml @@ -19,14 +19,11 @@ - include_tasks: "fetch_update_configuration.yml" -- name: Restart service when fqdn is updated to refresh statsd client +- name: Restart service to ensure labels are updated in zk service: name: "{{ item }}" state: restarted loop: "{{ nodepool_services }}" - when: - - _provider_count.stdout != "0" - - update_fqdn - name: Write config repo checksum matching current configuration copy: diff --git a/ansible/roles/sf-repos/files/config/containers/centos-7/Dockerfile b/ansible/roles/sf-repos/files/config/containers/centos-7/Dockerfile index ebec03c20..75c882a0c 100644 --- a/ansible/roles/sf-repos/files/config/containers/centos-7/Dockerfile +++ b/ansible/roles/sf-repos/files/config/containers/centos-7/Dockerfile @@ -12,7 +12,7 @@ # License for the specific language governing permissions and limitations # under the License. -FROM registry.centos.org/centos:7 +FROM quay.io/centos/centos:centos7 # Zuul minimal package RUN yum install -y sudo rsync git traceroute iproute python3-setuptools python3-pip rpm-build python3-rpm-macros diff --git a/ansible/roles/sf-repos/templates/config/zuul.d/_pipelines.yaml.j2 b/ansible/roles/sf-repos/templates/config/zuul.d/_pipelines.yaml.j2 index 16794035d..9ebe24d91 100644 --- a/ansible/roles/sf-repos/templates/config/zuul.d/_pipelines.yaml.j2 +++ b/ansible/roles/sf-repos/templates/config/zuul.d/_pipelines.yaml.j2 @@ -30,7 +30,14 @@ - event: patchset-created - event: change-restored - event: comment-added - comment: (?i)^(Patch Set [0-9]+:)?( [\w\\+-]*)*(\n\n)?\s*recheck + comment: (?i)^(Patch Set [0-9]+:)?( [\w\\+-]*)*(\n\n)?\s*(recheck|reverify) + - event: comment-added + require: + approval: + - username: zuul + Verified: [-1, -2] + approval: + - Workflow: 1 {% endfor %} {% for github_connection in zuul_github_connections_pipelines %} {{ github_connection.name }}: diff --git a/ansible/roles/sf-zookeeper/tasks/restore.yml b/ansible/roles/sf-zookeeper/tasks/restore.yml index a29304911..131a9cf46 100644 --- a/ansible/roles/sf-zookeeper/tasks/restore.yml +++ b/ansible/roles/sf-zookeeper/tasks/restore.yml @@ -6,8 +6,11 @@ - name: "Restore files ownership" file: - path: "/etc/zookeeper" + path: "{{ item }}" owner: "zookeeper" group: "zookeeper" recurse: "yes" state: "directory" + loop: + - "/etc/zookeeper" + - "/var/lib/zookeeper" diff --git a/ansible/roles/sf-zuul/defaults/main.yml b/ansible/roles/sf-zuul/defaults/main.yml index dea2ff447..d94421396 100644 --- a/ansible/roles/sf-zuul/defaults/main.yml +++ b/ansible/roles/sf-zuul/defaults/main.yml @@ -21,8 +21,8 @@ zuul_lib_dir: "/var/lib/zuul" zuul_log_dir: "/var/log/zuul" zuul_share_dir: "/usr/share/javascript/zuul" -zuul_version: "9.1.0" -zuul_container_version: "{{ zuul_version }}-4" +zuul_version: "10.0.0" +zuul_container_version: "{{ zuul_version }}-1" zuul_client_version: "f96ddd00fc69d8a4d51eb207ef322b99983d1fe8" zuul_client_container_version: "0.1.0-{{ zuul_client_version }}-1" @@ -76,6 +76,7 @@ zuul_components: image: "quay.io/software-factory/zuul-web-sf38:{{ zuul_container_version }}" params: >- {{ zuul_components_default_params }} + --volume /var/lib/zuul/:/var/lib/zuul/:z {% if sf_zuul_web_additional_params is defined %} {{ sf_zuul_web_additional_params }} {% endif %} diff --git a/ansible/roles/sf-zuul/tasks/restore.yml b/ansible/roles/sf-zuul/tasks/restore.yml index 0b173482b..454d3f85c 100644 --- a/ansible/roles/sf-zuul/tasks/restore.yml +++ b/ansible/roles/sf-zuul/tasks/restore.yml @@ -31,6 +31,6 @@ mode: '0700' - name: Import zuul keys - command: "zuul_wrapper import-keys {{ zuul_lib_dir }}/keys/backup-keys" + command: "podman run -it --rm --network host --user root --volume /etc/zuul/:/etc/zuul/:Z --volume /var/lib/zuul/:/var/lib/zuul/:Z quay.io/software-factory/zuul-scheduler-sf38:{{ zuul_container_version }} zuul import-keys {{ zuul_lib_dir }}/keys/backup-keys" when: - "'zuul-scheduler' in zuul_services" diff --git a/ansible/roles/sf-zuul/tasks/setup.yml b/ansible/roles/sf-zuul/tasks/setup.yml index 68f3d579d..7b0f68e28 100644 --- a/ansible/roles/sf-zuul/tasks/setup.yml +++ b/ansible/roles/sf-zuul/tasks/setup.yml @@ -96,7 +96,7 @@ dest: /bin/zuul-client content: | #!/bin/sh -e - exec podman run --rm --name zc_container -v /etc/zuul/:/etc/zuul/:Z -v /etc/ssl/certs:/etc/ssl/certs:Z -v /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem --env REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-bundle.crt quay.io/software-factory/zuul-client:{{ zuul_client_container_version }} --zuul-url {{ zuul_pub_url }} $* + exec podman run --network host --rm --name zc_container -v /etc/zuul/:/etc/zuul/:Z -v /etc/ssl/certs:/etc/ssl/certs:Z -v /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem --env REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-bundle.crt quay.io/software-factory/zuul-client:{{ zuul_client_container_version }} --zuul-url {{ zuul_pub_url }} $* mode: "0550" when: "'zuul-scheduler' in zuul_services" diff --git a/ansible/roles/sf-zuul/tasks/upgrade.yml b/ansible/roles/sf-zuul/tasks/upgrade.yml index 90fb1f8cb..9876a66fc 100644 --- a/ansible/roles/sf-zuul/tasks/upgrade.yml +++ b/ansible/roles/sf-zuul/tasks/upgrade.yml @@ -3,6 +3,7 @@ command: "find {{ zuul_lib_dir }}/keys/ -name \"*.pub\" -delete" when: - _previous_version is defined + - "'stdout' in {{ _previous_version }}" - _previous_version.stdout.startswith('rh-python35-zuul-3.2.0') - name: Remove legacy public key and files diff --git a/defaults/sfconfig.yaml b/defaults/sfconfig.yaml index df3d466ea..6f19bcfbc 100644 --- a/defaults/sfconfig.yaml +++ b/defaults/sfconfig.yaml @@ -126,14 +126,9 @@ opensearch: # logstash_sftests_com: # password: logstash # role: logstash -# kibana_sftests_com: -# password: kibana -# role: readonly ## Uncomment below lines if you are using EXTERNAL Opensearch dashboards service. -opensearch_dashboards: - # You can choose None, Basic or JWT - readonly_user_autologin: Basic +opensearch_dashboards: {} # host_url: https://kibana-host-2:5601 logs: diff --git a/refarch-golden-tests/config_initialize.yml b/refarch-golden-tests/config_initialize.yml new file mode 100644 index 000000000..befde449e --- /dev/null +++ b/refarch-golden-tests/config_initialize.yml @@ -0,0 +1,70 @@ +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - debug: + msg: Run initial minimal Zuul tenant config + - include_role: + name: sf-zuul + tasks_from: fetch_update_configuration.yml +- any_errors_fatal: true + hosts: install-server + tasks: + - debug: + msg: Start Zuul with minimal tenant config +- any_errors_fatal: true + hosts: zuul-web + tasks: + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/zuul-web-updated ]; then podman + rm zuul-web; /usr/local/bin/container-zuul-web.sh; rm /var/lib/software-factory/versions/zuul-web-updated; + fi + - name: Setting service zuul-web to started + service: + name: zuul-web + state: started +- any_errors_fatal: true + hosts: zuul-executor + tasks: + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/zuul-executor-updated ]; then + podman rm zuul-executor; /usr/local/bin/container-zuul-executor.sh; rm /var/lib/software-factory/versions/zuul-executor-updated; + fi + - name: Setting service zuul-executor to started + service: + name: zuul-executor + state: started +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/zuul-scheduler-updated ]; then + podman rm zuul-scheduler; /usr/local/bin/container-zuul-scheduler.sh; rm /var/lib/software-factory/versions/zuul-scheduler-updated; + fi + - name: Setting service zuul-scheduler to started + service: + name: zuul-scheduler + state: started +- any_errors_fatal: true + hosts: install-server + tasks: + - include_tasks: '{{ sf_tasks_dir }}/ensure_zuul_running.yml' + name: Wait for Zuul running +- any_errors_fatal: true + hosts: install-server + tasks: + - debug: + msg: Run syncing config repo content + - include_role: + name: sf-repos + vars: + role_action: fetch_zuul_key + - include_role: + name: sf-repos + vars: + role_action: setup + - debug: + msg: Set a Zuul and Nodepool need restart + - set_fact: + zuul_need_restart: true + - set_fact: + nodepool_need_restart: true diff --git a/refarch-golden-tests/config_update.yml b/refarch-golden-tests/config_update.yml new file mode 100644 index 000000000..cd8d1ec37 --- /dev/null +++ b/refarch-golden-tests/config_update.yml @@ -0,0 +1,52 @@ +- any_errors_fatal: true + hosts: install-server + roles: + - sf-repos + vars: + role_action: reset_config_repo +- any_errors_fatal: true + hosts: managesf:gerrit:pages:gerritbot:zuul:nodepool:grafana:hound:cgit:keycloak:hypervisor-k1s:zuul-scheduler:nodepool-launcher:nodepool-builder:!install-server + roles: + - sf-repos + vars: + role_action: copy_config_repo +- any_errors_fatal: true + hosts: managesf.sftests.com + roles: + - sf-managesf + - sf-gerrit + - sf-gerritbot + - sf-grafana + - sf-keycloak + vars: + role_action: config_update +- any_errors_fatal: true + hosts: elk.sftests.com + roles: + - sf-hound + vars: + role_action: config_update +- any_errors_fatal: true + hosts: nodepool-builder.sftests.com + roles: + - sf-nodepool + vars: + nodepool_services: + - nodepool-builder + role_action: config_update +- any_errors_fatal: true + hosts: zs.sftests.com + roles: + - sf-zuul + - sf-nodepool + vars: + nodepool_services: + - nodepool-launcher + role_action: config_update + zuul_services: + - zuul-scheduler + - zuul-web +- any_errors_fatal: true + hosts: runc01.sftests.com + vars: + role_action: config_update diff --git a/refarch-golden-tests/etc-hosts b/refarch-golden-tests/etc-hosts new file mode 100644 index 000000000..7f0c3499a --- /dev/null +++ b/refarch-golden-tests/etc-hosts @@ -0,0 +1,55 @@ +# This file is managed by ansible, don't edit + +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 + +10.0.0.88 managesf.sftests.com etherpad etherpad.sftests.com firehose firehose.sftests.com gerrit gerrit.sftests.com gerritbot gerritbot.sftests.com grafana grafana.sftests.com install-server install-server.sftests.com keycloak keycloak.sftests.com lodgeit lodgeit.sftests.com managesf managesf.sftests.com murmur murmur.sftests.com mysql mysql.sftests.com opensearch-dashboards opensearch-dashboards.sftests.com sftests.com zookeeper zookeeper.sftests.com +192.168.240.14 elk.sftests.com elk hound hound.sftests.com influxdb influxdb.sftests.com log-processing log-processing.sftests.com logserver logserver.sftests.com opensearch opensearch.sftests.com +192.168.240.15 zs.sftests.com zs zuul-scheduler zuul-scheduler.sftests.com zuul-web zuul-web.sftests.com +192.168.240.8 ze01.sftests.com ze01 +192.168.240.9 ze02.sftests.com ze02 +38.145.33.82 runc01.sftests.com hypervisor-runc hypervisor-runc.sftests.com runc01 + +10.0.0.88 managesf.sftests.com etherpad etherpad.sftests.com firehose firehose.sftests.com gerrit gerrit.sftests.com gerritbot gerritbot.sftests.com grafana grafana.sftests.com install-server install-server.sftests.com keycloak keycloak.sftests.com lodgeit lodgeit.sftests.com managesf managesf.sftests.com murmur murmur.sftests.com mysql mysql.sftests.com opensearch-dashboards opensearch-dashboards.sftests.com sftests.com zookeeper zookeeper.sftests.com +192.168.240.14 elk.sftests.com elk hound hound.sftests.com influxdb influxdb.sftests.com log-processing log-processing.sftests.com logserver logserver.sftests.com opensearch opensearch.sftests.com +192.168.240.15 zs.sftests.com zs zuul-scheduler zuul-scheduler.sftests.com zuul-web zuul-web.sftests.com +192.168.240.8 ze01.sftests.com ze01 +192.168.240.9 ze02.sftests.com ze02 +38.145.33.82 runc01.sftests.com hypervisor-runc hypervisor-runc.sftests.com runc01 + +10.0.0.88 managesf.sftests.com etherpad etherpad.sftests.com firehose firehose.sftests.com gerrit gerrit.sftests.com gerritbot gerritbot.sftests.com grafana grafana.sftests.com install-server install-server.sftests.com keycloak keycloak.sftests.com lodgeit lodgeit.sftests.com managesf managesf.sftests.com murmur murmur.sftests.com mysql mysql.sftests.com opensearch-dashboards opensearch-dashboards.sftests.com sftests.com zookeeper zookeeper.sftests.com +192.168.240.14 elk.sftests.com elk hound hound.sftests.com influxdb influxdb.sftests.com log-processing log-processing.sftests.com logserver logserver.sftests.com opensearch opensearch.sftests.com +192.168.240.15 zs.sftests.com zs zuul-scheduler zuul-scheduler.sftests.com zuul-web zuul-web.sftests.com +192.168.240.8 ze01.sftests.com ze01 +192.168.240.9 ze02.sftests.com ze02 +38.145.33.82 runc01.sftests.com hypervisor-runc hypervisor-runc.sftests.com runc01 + +10.0.0.88 managesf.sftests.com etherpad etherpad.sftests.com firehose firehose.sftests.com gerrit gerrit.sftests.com gerritbot gerritbot.sftests.com grafana grafana.sftests.com install-server install-server.sftests.com keycloak keycloak.sftests.com lodgeit lodgeit.sftests.com managesf managesf.sftests.com murmur murmur.sftests.com mysql mysql.sftests.com opensearch-dashboards opensearch-dashboards.sftests.com sftests.com zookeeper zookeeper.sftests.com +192.168.240.14 elk.sftests.com elk hound hound.sftests.com influxdb influxdb.sftests.com log-processing log-processing.sftests.com logserver logserver.sftests.com opensearch opensearch.sftests.com +192.168.240.15 zs.sftests.com zs zuul-scheduler zuul-scheduler.sftests.com zuul-web zuul-web.sftests.com +192.168.240.8 ze01.sftests.com ze01 +192.168.240.9 ze02.sftests.com ze02 +38.145.33.82 runc01.sftests.com hypervisor-runc hypervisor-runc.sftests.com runc01 + +10.0.0.88 managesf.sftests.com etherpad etherpad.sftests.com firehose firehose.sftests.com gerrit gerrit.sftests.com gerritbot gerritbot.sftests.com grafana grafana.sftests.com install-server install-server.sftests.com keycloak keycloak.sftests.com lodgeit lodgeit.sftests.com managesf managesf.sftests.com murmur murmur.sftests.com mysql mysql.sftests.com opensearch-dashboards opensearch-dashboards.sftests.com sftests.com zookeeper zookeeper.sftests.com +192.168.240.14 elk.sftests.com elk hound hound.sftests.com influxdb influxdb.sftests.com log-processing log-processing.sftests.com logserver logserver.sftests.com opensearch opensearch.sftests.com +192.168.240.15 zs.sftests.com zs zuul-scheduler zuul-scheduler.sftests.com zuul-web zuul-web.sftests.com +192.168.240.8 ze01.sftests.com ze01 +192.168.240.9 ze02.sftests.com ze02 +38.145.33.82 runc01.sftests.com hypervisor-runc hypervisor-runc.sftests.com runc01 + +10.0.0.88 managesf.sftests.com etherpad etherpad.sftests.com firehose firehose.sftests.com gerrit gerrit.sftests.com gerritbot gerritbot.sftests.com grafana grafana.sftests.com install-server install-server.sftests.com keycloak keycloak.sftests.com lodgeit lodgeit.sftests.com managesf managesf.sftests.com murmur murmur.sftests.com mysql mysql.sftests.com opensearch-dashboards opensearch-dashboards.sftests.com sftests.com zookeeper zookeeper.sftests.com +192.168.240.14 elk.sftests.com elk hound hound.sftests.com influxdb influxdb.sftests.com log-processing log-processing.sftests.com logserver logserver.sftests.com opensearch opensearch.sftests.com +192.168.240.15 zs.sftests.com zs zuul-scheduler zuul-scheduler.sftests.com zuul-web zuul-web.sftests.com +192.168.240.8 ze01.sftests.com ze01 +192.168.240.9 ze02.sftests.com ze02 +38.145.33.82 runc01.sftests.com hypervisor-runc hypervisor-runc.sftests.com runc01 + +10.0.0.88 managesf.sftests.com etherpad etherpad.sftests.com firehose firehose.sftests.com gerrit gerrit.sftests.com gerritbot gerritbot.sftests.com grafana grafana.sftests.com install-server install-server.sftests.com keycloak keycloak.sftests.com lodgeit lodgeit.sftests.com managesf managesf.sftests.com murmur murmur.sftests.com mysql mysql.sftests.com opensearch-dashboards opensearch-dashboards.sftests.com sftests.com zookeeper zookeeper.sftests.com +192.168.240.14 elk.sftests.com elk hound hound.sftests.com influxdb influxdb.sftests.com log-processing log-processing.sftests.com logserver logserver.sftests.com opensearch opensearch.sftests.com +192.168.240.15 zs.sftests.com zs zuul-scheduler zuul-scheduler.sftests.com zuul-web zuul-web.sftests.com +192.168.240.8 ze01.sftests.com ze01 +192.168.240.9 ze02.sftests.com ze02 +38.145.33.82 runc01.sftests.com hypervisor-runc hypervisor-runc.sftests.com runc01 + + diff --git a/refarch-golden-tests/get_logs.yml b/refarch-golden-tests/get_logs.yml new file mode 100644 index 000000000..ae1d8f35c --- /dev/null +++ b/refarch-golden-tests/get_logs.yml @@ -0,0 +1,232 @@ +- any_errors_fatal: true + hosts: install-server + tasks: + - file: + path: /root/sf-logs + state: absent + name: Cleanup sf-logs directory + - file: + mode: 448 + path: /root/sf-logs + state: directory + name: Create sf-logs directory + - file: + path: /root/sf-logs/install-server + state: directory + name: Create install-server log storage directory + - file: + path: /root/sf-logs/mysql + state: directory + name: Create mysql log storage directory + - file: + path: /root/sf-logs/gerrit + state: directory + name: Create gerrit log storage directory + - file: + path: /root/sf-logs/gateway + state: directory + name: Create gateway log storage directory + - file: + path: /root/sf-logs/managesf + state: directory + name: Create managesf log storage directory + - file: + path: /root/sf-logs/etherpad + state: directory + name: Create etherpad log storage directory + - file: + path: /root/sf-logs/lodgeit + state: directory + name: Create lodgeit log storage directory + - file: + path: /root/sf-logs/gerritbot + state: directory + name: Create gerritbot log storage directory + - file: + path: /root/sf-logs/murmur + state: directory + name: Create murmur log storage directory + - file: + path: /root/sf-logs/opensearch-dashboards + state: directory + name: Create opensearch-dashboards log storage directory + - file: + path: /root/sf-logs/keycloak + state: directory + name: Create keycloak log storage directory + - file: + path: /root/sf-logs/firehose + state: directory + name: Create firehose log storage directory + - file: + path: /root/sf-logs/grafana + state: directory + name: Create grafana log storage directory + - file: + path: /root/sf-logs/zookeeper + state: directory + name: Create zookeeper log storage directory + - file: + path: /root/sf-logs/logserver + state: directory + name: Create logserver log storage directory + - file: + path: /root/sf-logs/opensearch + state: directory + name: Create opensearch log storage directory + - file: + path: /root/sf-logs/influxdb + state: directory + name: Create influxdb log storage directory + - file: + path: /root/sf-logs/log-processing + state: directory + name: Create log-processing log storage directory + - file: + path: /root/sf-logs/hound + state: directory + name: Create hound log storage directory + - file: + path: /root/sf-logs/nodepool-builder + state: directory + name: Create nodepool-builder log storage directory + - file: + path: /root/sf-logs/zuul-scheduler + state: directory + name: Create zuul-scheduler log storage directory + - file: + path: /root/sf-logs/zuul-web + state: directory + name: Create zuul-web log storage directory + - file: + path: /root/sf-logs/nodepool-launcher + state: directory + name: Create nodepool-launcher log storage directory + - file: + path: /root/sf-logs/zuul-executor + state: directory + name: Create zuul-executor log storage directory + - file: + path: /root/sf-logs/hypervisor-runc + state: directory + name: Create hypervisor-runc log storage directory + - file: + path: /root/sf-logs/nodepool + state: directory + name: Create nodepool log storage directory + - file: + path: /root/sf-logs/zuul + state: directory + name: Create zuul log storage directory +- any_errors_fatal: true + hosts: managesf.sftests.com + roles: + - log_dest: /root/sf-logs/base + role: sf-base + - log_dest: /root/sf-logs/install-server + role: sf-install-server + - log_dest: /root/sf-logs/mysql + role: sf-mysql + - log_dest: /root/sf-logs/gerrit + role: sf-gerrit + - log_dest: /root/sf-logs/gateway + role: sf-gateway + - log_dest: /root/sf-logs/managesf + role: sf-managesf + - log_dest: /root/sf-logs/etherpad + role: sf-etherpad + - log_dest: /root/sf-logs/lodgeit + role: sf-lodgeit + - log_dest: /root/sf-logs/gerritbot + role: sf-gerritbot + - log_dest: /root/sf-logs/murmur + role: sf-murmur + - log_dest: /root/sf-logs/opensearch-dashboards + role: sf-opensearch-dashboards + - log_dest: /root/sf-logs/keycloak + role: sf-keycloak + - log_dest: /root/sf-logs/firehose + role: sf-firehose + - log_dest: /root/sf-logs/grafana + role: sf-grafana + - log_dest: /root/sf-logs/zookeeper + role: sf-zookeeper + vars: + role_action: get_logs +- any_errors_fatal: true + hosts: elk.sftests.com + roles: + - log_dest: /root/sf-logs/base + role: sf-base + - log_dest: /root/sf-logs/logserver + role: sf-logserver + - log_dest: /root/sf-logs/opensearch + role: sf-opensearch + - log_dest: /root/sf-logs/influxdb + role: sf-influxdb + - log_dest: /root/sf-logs/log-processing + role: sf-log-processing + - log_dest: /root/sf-logs/hound + role: sf-hound + - log_dest: /root/sf-logs/telegraf + role: sf-telegraf + vars: + role_action: get_logs +- any_errors_fatal: true + hosts: nodepool-builder.sftests.com + roles: + - log_dest: /root/sf-logs/base + role: sf-base + - log_dest: /root/sf-logs/nodepool + role: sf-nodepool + vars: + nodepool_services: + - nodepool-builder + role_action: get_logs +- any_errors_fatal: true + hosts: zs.sftests.com + roles: + - log_dest: /root/sf-logs/base + role: sf-base + - log_dest: /root/sf-logs/nodepool + role: sf-nodepool + - log_dest: /root/sf-logs/zuul + role: sf-zuul + vars: + nodepool_services: + - nodepool-launcher + role_action: get_logs + zuul_services: + - zuul-scheduler + - zuul-web +- any_errors_fatal: true + hosts: ze01.sftests.com + roles: + - log_dest: /root/sf-logs/base + role: sf-base + - log_dest: /root/sf-logs/zuul + role: sf-zuul + vars: + role_action: get_logs + zuul_services: + - zuul-executor +- any_errors_fatal: true + hosts: ze02.sftests.com + roles: + - log_dest: /root/sf-logs/base + role: sf-base + - log_dest: /root/sf-logs/zuul + role: sf-zuul + vars: + role_action: get_logs + zuul_services: + - zuul-executor +- any_errors_fatal: true + hosts: runc01.sftests.com + roles: + - log_dest: /root/sf-logs/base + role: sf-base + - log_dest: /root/sf-logs/hypervisor-runc + role: sf-hypervisor-runc + vars: + role_action: get_logs diff --git a/refarch-golden-tests/hosts b/refarch-golden-tests/hosts new file mode 100644 index 000000000..dc935d633 --- /dev/null +++ b/refarch-golden-tests/hosts @@ -0,0 +1,111 @@ +# This file is managed by ansible, don't edit +# It's the list of host sort by roles group. + +[install-server] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[mysql] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[gerrit] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[gateway] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[managesf] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[etherpad] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[lodgeit] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[gerritbot] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[murmur] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[opensearch-dashboards] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[keycloak] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[firehose] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[grafana] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[zookeeper] +managesf.sftests.com ansible_connection=local ansible_python_interpreter=/usr/bin/python + + +[logserver] +elk.sftests.com ansible_python_interpreter=/usr/bin/python + + +[opensearch] +elk.sftests.com ansible_python_interpreter=/usr/bin/python + + +[influxdb] +elk.sftests.com ansible_python_interpreter=/usr/bin/python + + +[log-processing] +elk.sftests.com ansible_python_interpreter=/usr/bin/python + + +[hound] +elk.sftests.com ansible_python_interpreter=/usr/bin/python + + +[nodepool-builder] +nodepool-builder.sftests.com ansible_python_interpreter=/usr/bin/python + + +[zuul-scheduler] +zs.sftests.com ansible_python_interpreter=/usr/bin/python + + +[zuul-web] +zs.sftests.com ansible_python_interpreter=/usr/bin/python + + +[nodepool-launcher] +zs.sftests.com ansible_python_interpreter=/usr/bin/python + + +[zuul-executor] +ze01.sftests.com ansible_python_interpreter=/usr/bin/python +ze02.sftests.com ansible_python_interpreter=/usr/bin/python + + +[hypervisor-runc] +runc01.sftests.com ansible_python_interpreter=/usr/bin/python + + +[nodepool] +nodepool-builder.sftests.com ansible_python_interpreter=/usr/bin/python + + +[zuul] +zs.sftests.com ansible_python_interpreter=/usr/bin/python + diff --git a/refarch-golden-tests/nodepool_restart.yml b/refarch-golden-tests/nodepool_restart.yml new file mode 100644 index 000000000..f08a07150 --- /dev/null +++ b/refarch-golden-tests/nodepool_restart.yml @@ -0,0 +1,32 @@ +- any_errors_fatal: true + hosts: nodepool-launcher + tasks: + - failed_when: false + name: Setting service nodepool-launcher to stopped + service: + name: nodepool-launcher + state: stopped + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/nodepool-launcher-updated ]; + then podman rm nodepool-launcher; /usr/local/bin/container-nodepool-launcher.sh; + rm /var/lib/software-factory/versions/nodepool-launcher-updated; fi + - name: Setting service nodepool-launcher to started + service: + name: nodepool-launcher + state: started +- any_errors_fatal: true + hosts: nodepool-builder + tasks: + - failed_when: false + name: Setting service nodepool-builder to stopped + service: + name: nodepool-builder + state: stopped + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/nodepool-builder-updated ]; + then podman rm nodepool-builder; /usr/local/bin/container-nodepool-builder.sh; + rm /var/lib/software-factory/versions/nodepool-builder-updated; fi + - name: Setting service nodepool-builder to started + service: + name: nodepool-builder + state: started diff --git a/refarch-golden-tests/sf_backup.yml b/refarch-golden-tests/sf_backup.yml new file mode 100644 index 000000000..908b3072c --- /dev/null +++ b/refarch-golden-tests/sf_backup.yml @@ -0,0 +1,133 @@ +- any_errors_fatal: true + hosts: install-server + tasks: + - file: + mode: '0700' + path: /var/lib/software-factory/backup + state: directory + name: Create backup directory + - file: + path: /var/lib/software-factory/backup/{{ item }} + state: directory + loop: + - install-server + - mysql + - gerrit + - gateway + - managesf + - etherpad + - lodgeit + - gerritbot + - murmur + - opensearch-dashboards + - keycloak + - firehose + - grafana + - zookeeper + - logserver + - opensearch + - influxdb + - log-processing + - hound + - nodepool-builder + - zuul-scheduler + - zuul-web + - nodepool-launcher + - zuul-executor + - hypervisor-runc + - nodepool + - zuul +- any_errors_fatal: true + hosts: managesf.sftests.com + roles: + - backup_dest: /var/lib/software-factory/backup/install-server + role: sf-install-server + - backup_dest: /var/lib/software-factory/backup/mysql + role: sf-mysql + - backup_dest: /var/lib/software-factory/backup/gerrit + role: sf-gerrit + - backup_dest: /var/lib/software-factory/backup/gateway + role: sf-gateway + - backup_dest: /var/lib/software-factory/backup/managesf + role: sf-managesf + - backup_dest: /var/lib/software-factory/backup/etherpad + role: sf-etherpad + - backup_dest: /var/lib/software-factory/backup/lodgeit + role: sf-lodgeit + - backup_dest: /var/lib/software-factory/backup/gerritbot + role: sf-gerritbot + - backup_dest: /var/lib/software-factory/backup/murmur + role: sf-murmur + - backup_dest: /var/lib/software-factory/backup/opensearch-dashboards + role: sf-opensearch-dashboards + - backup_dest: /var/lib/software-factory/backup/keycloak + role: sf-keycloak + - backup_dest: /var/lib/software-factory/backup/firehose + role: sf-firehose + - backup_dest: /var/lib/software-factory/backup/grafana + role: sf-grafana + - backup_dest: /var/lib/software-factory/backup/zookeeper + role: sf-zookeeper + vars: + role_action: backup +- any_errors_fatal: true + hosts: elk.sftests.com + roles: + - backup_dest: /var/lib/software-factory/backup/logserver + role: sf-logserver + - backup_dest: /var/lib/software-factory/backup/opensearch + role: sf-opensearch + - backup_dest: /var/lib/software-factory/backup/influxdb + role: sf-influxdb + - backup_dest: /var/lib/software-factory/backup/log-processing + role: sf-log-processing + - backup_dest: /var/lib/software-factory/backup/hound + role: sf-hound + - backup_dest: /var/lib/software-factory/backup/telegraf + role: sf-telegraf + vars: + role_action: backup +- any_errors_fatal: true + hosts: nodepool-builder.sftests.com + roles: + - backup_dest: /var/lib/software-factory/backup/nodepool + role: sf-nodepool + vars: + nodepool_services: + - nodepool-builder + role_action: backup +- any_errors_fatal: true + hosts: zs.sftests.com + roles: + - backup_dest: /var/lib/software-factory/backup/nodepool + role: sf-nodepool + - backup_dest: /var/lib/software-factory/backup/zuul + role: sf-zuul + vars: + nodepool_services: + - nodepool-launcher + role_action: backup + zuul_services: + - zuul-scheduler + - zuul-web +- any_errors_fatal: true + hosts: ze01.sftests.com + roles: [] + vars: + role_action: backup + zuul_services: + - zuul-executor +- any_errors_fatal: true + hosts: ze02.sftests.com + roles: [] + vars: + role_action: backup + zuul_services: + - zuul-executor +- any_errors_fatal: true + hosts: runc01.sftests.com + roles: + - backup_dest: /var/lib/software-factory/backup/hypervisor-runc + role: sf-hypervisor-runc + vars: + role_action: backup diff --git a/refarch-golden-tests/sf_erase.yml b/refarch-golden-tests/sf_erase.yml new file mode 100644 index 000000000..0db575f36 --- /dev/null +++ b/refarch-golden-tests/sf_erase.yml @@ -0,0 +1,114 @@ +- any_errors_fatal: true + hosts: install-server + tasks: + - pause: + prompt: 'WARNING: this playbook will *DESTROY* software factory data , type + the fqdn to continue or CTRL-C to abort' + register: erase_prompt + when: sfconfig_batch is not defined +- any_errors_fatal: true + hosts: install-server + tasks: + - fail: + msg: Incorrect hostname + when: + - sfconfig_batch is not defined + - erase_prompt.user_input != fqdn +- any_errors_fatal: true + hosts: managesf.sftests.com + roles: + - sf-gerrit + - sf-gateway + - sf-managesf + - sf-etherpad + - sf-lodgeit + - sf-gerritbot + - sf-murmur + - sf-opensearch-dashboards + - sf-keycloak + - sf-firehose + - sf-grafana + - sf-zookeeper + vars: + erase: true + role_action: disable +- any_errors_fatal: true + hosts: elk.sftests.com + roles: + - sf-logserver + - sf-opensearch + - sf-influxdb + - sf-log-processing + - sf-hound + - sf-telegraf + vars: + erase: true + role_action: disable +- any_errors_fatal: true + hosts: nodepool-builder.sftests.com + roles: + - sf-nodepool + vars: + erase: true + nodepool_services: + - nodepool-builder + role_action: disable +- any_errors_fatal: true + hosts: zs.sftests.com + roles: + - sf-nodepool + - sf-zuul + vars: + erase: true + nodepool_services: + - nodepool-launcher + role_action: disable + zuul_services: + - zuul-scheduler + - zuul-web +- any_errors_fatal: true + hosts: ze01.sftests.com + roles: + - sf-zuul + vars: + erase: true + role_action: disable + zuul_services: + - zuul-executor +- any_errors_fatal: true + hosts: ze02.sftests.com + roles: + - sf-zuul + vars: + erase: true + role_action: disable + zuul_services: + - zuul-executor +- any_errors_fatal: true + hosts: runc01.sftests.com + roles: + - sf-hypervisor-runc + vars: + erase: true + role_action: disable +- any_errors_fatal: true + hosts: mysql + roles: + - sf-mysql + vars: + erase: true + role_action: disable +- any_errors_fatal: true + hosts: install-server + roles: + - sf-install-server + vars: + erase: true + role_action: disable +- any_errors_fatal: true + hosts: all + roles: + - sf-base + vars: + erase: true + role_action: disable diff --git a/refarch-golden-tests/tenant_update.yml b/refarch-golden-tests/tenant_update.yml new file mode 100644 index 000000000..90eed887b --- /dev/null +++ b/refarch-golden-tests/tenant_update.yml @@ -0,0 +1,52 @@ +- any_errors_fatal: true + hosts: managesf.sftests.com + vars: + force_update: true + role_action: config_update +- any_errors_fatal: true + hosts: elk.sftests.com + vars: + force_update: true + role_action: config_update +- any_errors_fatal: true + hosts: nodepool-builder.sftests.com + vars: + force_update: true + nodepool_services: + - nodepool-builder + role_action: config_update +- any_errors_fatal: true + hosts: zs.sftests.com + roles: + - sf-zuul + vars: + force_update: true + nodepool_services: + - nodepool-launcher + role_action: config_update + zuul_services: + - zuul-scheduler + - zuul-web +- any_errors_fatal: true + hosts: ze01.sftests.com + roles: + - sf-zuul + vars: + force_update: true + role_action: config_update + zuul_services: + - zuul-executor +- any_errors_fatal: true + hosts: ze02.sftests.com + roles: + - sf-zuul + vars: + force_update: true + role_action: config_update + zuul_services: + - zuul-executor +- any_errors_fatal: true + hosts: runc01.sftests.com + vars: + force_update: true + role_action: config_update diff --git a/refarch-golden-tests/zuul_restart.yml b/refarch-golden-tests/zuul_restart.yml new file mode 100644 index 000000000..5e1030b20 --- /dev/null +++ b/refarch-golden-tests/zuul_restart.yml @@ -0,0 +1,97 @@ +- any_errors_fatal: true + hosts: install-server + tasks: + - command: 'logger --tag event-sfconfig "zuul restart process: begin"' + name: Log system message +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - command: systemctl -q is-active zuul-scheduler + failed_when: zuul_scheduler_status.rc not in [0, 3] + name: Check if zuul is running + register: zuul_scheduler_status + - command: podman exec -ti zuul-scheduler python3 /var/lib/zuul/scripts/zuul-changes.py + dump --dump_file /var/lib/zuul/scripts/zuul-change-dump.sh + delay: '5' + name: Dump zuul changes + retries: '50' + when: zuul_scheduler_status.rc == 0 +- any_errors_fatal: true + hosts: zuul-web + tasks: + - failed_when: false + name: Setting service zuul-web to stopped + service: + name: zuul-web + state: stopped +- any_errors_fatal: true + hosts: zuul-executor + tasks: + - failed_when: false + name: Setting service zuul-executor to stopped + service: + name: zuul-executor + state: stopped +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - failed_when: false + name: Setting service zuul-scheduler to stopped + service: + name: zuul-scheduler + state: stopped +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - name: Cleaning Zookeeper data + shell: echo yes | zuul_wrapper delete-state +- any_errors_fatal: true + hosts: zuul-web + tasks: + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/zuul-web-updated ]; then podman + rm zuul-web; /usr/local/bin/container-zuul-web.sh; rm /var/lib/software-factory/versions/zuul-web-updated; + fi + - name: Setting service zuul-web to started + service: + name: zuul-web + state: started +- any_errors_fatal: true + hosts: zuul-executor + tasks: + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/zuul-executor-updated ]; then + podman rm zuul-executor; /usr/local/bin/container-zuul-executor.sh; rm /var/lib/software-factory/versions/zuul-executor-updated; + fi + - name: Setting service zuul-executor to started + service: + name: zuul-executor + state: started +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/zuul-scheduler-updated ]; then + podman rm zuul-scheduler; /usr/local/bin/container-zuul-scheduler.sh; rm /var/lib/software-factory/versions/zuul-scheduler-updated; + fi + - name: Setting service zuul-scheduler to started + service: + name: zuul-scheduler + state: started +- any_errors_fatal: true + hosts: install-server + tasks: + - include_tasks: '{{ sf_tasks_dir }}/ensure_zuul_running.yml' + name: Wait for Zuul running +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - name: Reload zuul queues + shell: if [ -f /var/lib/zuul/scripts/zuul-change-dump.sh ]; then /var/lib/zuul/scripts/zuul-change-dump.sh + && rm /var/lib/zuul/scripts/zuul-change-dump.sh; fi +- any_errors_fatal: true + hosts: install-server + tasks: + - command: 'logger --tag event-sfconfig "zuul restart process: zuul restart process: + done"' + name: Log system message diff --git a/refarch-golden-tests/zuul_start.yml b/refarch-golden-tests/zuul_start.yml new file mode 100644 index 000000000..65718a609 --- /dev/null +++ b/refarch-golden-tests/zuul_start.yml @@ -0,0 +1,38 @@ +- any_errors_fatal: true + hosts: zuul-web + tasks: + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/zuul-web-updated ]; then podman + rm zuul-web; /usr/local/bin/container-zuul-web.sh; rm /var/lib/software-factory/versions/zuul-web-updated; + fi + - name: Setting service zuul-web to started + service: + name: zuul-web + state: started +- any_errors_fatal: true + hosts: zuul-executor + tasks: + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/zuul-executor-updated ]; then + podman rm zuul-executor; /usr/local/bin/container-zuul-executor.sh; rm /var/lib/software-factory/versions/zuul-executor-updated; + fi + - name: Setting service zuul-executor to started + service: + name: zuul-executor + state: started +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - name: Update container if needed + shell: if [ -f /var/lib/software-factory/versions/zuul-scheduler-updated ]; then + podman rm zuul-scheduler; /usr/local/bin/container-zuul-scheduler.sh; rm /var/lib/software-factory/versions/zuul-scheduler-updated; + fi + - name: Setting service zuul-scheduler to started + service: + name: zuul-scheduler + state: started +- any_errors_fatal: true + hosts: install-server + tasks: + - include_tasks: '{{ sf_tasks_dir }}/ensure_zuul_running.yml' + name: Wait for Zuul running diff --git a/refarch-golden-tests/zuul_stop.yml b/refarch-golden-tests/zuul_stop.yml new file mode 100644 index 000000000..7ed052dc4 --- /dev/null +++ b/refarch-golden-tests/zuul_stop.yml @@ -0,0 +1,29 @@ +- any_errors_fatal: true + hosts: zuul-web + tasks: + - failed_when: false + name: Setting service zuul-web to stopped + service: + name: zuul-web + state: stopped +- any_errors_fatal: true + hosts: zuul-executor + tasks: + - failed_when: false + name: Setting service zuul-executor to stopped + service: + name: zuul-executor + state: stopped +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - failed_when: false + name: Setting service zuul-scheduler to stopped + service: + name: zuul-scheduler + state: stopped +- any_errors_fatal: true + hosts: zuul-scheduler + tasks: + - name: Cleaning Zookeeper data + shell: echo yes | zuul_wrapper delete-state diff --git a/refarch/allinone.yaml b/refarch/allinone.yaml index 8366030ed..48dd5b9b2 100644 --- a/refarch/allinone.yaml +++ b/refarch/allinone.yaml @@ -16,6 +16,7 @@ inventory: - zuul-scheduler - zuul-executor - zuul-fingergw + - zuul-merger - zuul-web - gerritbot - nodepool-launcher diff --git a/refarch/softwarefactory-project.io.yaml b/refarch/softwarefactory-project.io.yaml index 4c7fbb41a..790cdf52f 100644 --- a/refarch/softwarefactory-project.io.yaml +++ b/refarch/softwarefactory-project.io.yaml @@ -4,11 +4,11 @@ inventory: roles: - install-server - mysql + - gerrit - gateway - managesf - etherpad - lodgeit - - gerrit - gerritbot - murmur - opensearch-dashboards @@ -38,6 +38,10 @@ inventory: ip: 192.168.240.8 roles: - zuul-executor +- name: ze02 + ip: 192.168.240.9 + roles: + - zuul-executor - name: runc01 ip: 38.145.33.82 max-servers: 24 diff --git a/sfconfig/arch.py b/sfconfig/arch.py index 1ce637123..5ee599866 100644 --- a/sfconfig/arch.py +++ b/sfconfig/arch.py @@ -116,7 +116,7 @@ def process(args): list(aliases) args.glue["hosts_file"][host["ip"]] = [host["hostname"]] + \ - list(aliases) + list(sorted(aliases)) # NOTE(dpawlik) Remove it after moving elasticsearch role to opensearch if 'elasticsearch' in aliases: diff --git a/sfconfig/cmd.py b/sfconfig/cmd.py index 1829d6e39..cd10b5170 100755 --- a/sfconfig/cmd.py +++ b/sfconfig/cmd.py @@ -110,6 +110,7 @@ def usage(components): p.add_argument("--erase", action='store_true', help="Erase data") p.add_argument("--upgrade", action='store_true', help="Run upgrade task") p.add_argument("--update", action='store_true', help="Run upgrade task") + p.add_argument("--golden-tests", metavar='PATH', help="Run golden tests") # Deprecated p.add_argument("--skip-install", default=False, action='store_true', @@ -176,6 +177,28 @@ def main(): components = sfconfig.utils.load_components() args = usage(components) + if args.golden_tests: + # Update the golden tests and abort the execution + args.sfconfig = yaml_load(args.config) + args.sfarch = remove_unused_role(yaml_load(args.arch)) + args.ansible_root = args.golden_tests + args.etc_hosts = "%s/etc-hosts" % args.golden_tests + args.glue = dict( + sf_playbooks_dir=args.golden_tests + ) + for host in args.sfarch["inventory"]: + # TODO: do not force $fqdn as host domain name + if "hostname" not in host: + host["hostname"] = "%s.%s" % ( + host["name"], args.sfconfig["fqdn"]) + + sfconfig.arch.process(args) + sfconfig.inventory.generate(args) + exit(0) + + # When not running golden tests, write /etc/hosts to the right place + args.etc_hosts = '/etc/hosts' + # Ensure environment is UTF-8 os.environ["LC_ALL"] = "en_US.UTF-8" diff --git a/sfconfig/inventory.py b/sfconfig/inventory.py index 479a3c806..1c5298f38 100644 --- a/sfconfig/inventory.py +++ b/sfconfig/inventory.py @@ -153,8 +153,14 @@ def recover(args, pb): 'loop': [role for role in args.glue['roles']]} ])) + user_ns_task = {'sysctl': { + 'name': 'user.max_user_namespaces', + 'value': '31089' + }} + # Start mysql pb.append(host_play('mysql', tasks=[ + user_ns_task, {'include_role': { 'name': 'sf-mysql', 'tasks_from': 'install.yml', @@ -168,6 +174,7 @@ def recover(args, pb): # Start zookeeper pb.append(host_play('zookeeper', tasks=[ + user_ns_task, {'include_role': { 'name': 'sf-zookeeper', 'tasks_from': 'install.yml', @@ -181,6 +188,7 @@ def recover(args, pb): # Call restore task for host in args.inventory: play = host_play(host, params={'role_action': 'restore'}) + play['pre_tasks'] = [user_ns_task] play['roles'] = [] for role in host["roles"]: # Only recover zuul data from the scheduler @@ -651,7 +659,7 @@ def generate(args): # Adds playbooks to architecture for host in arch["inventory"]: # Host params are generic roles parameters - host["params"] = {'host_public_url': host['public_url']} + host["params"] = {} if "influxdb" in host["roles"]: # Add telegraf for statsd gateway @@ -713,6 +721,6 @@ def ensure_role_services(role_name, meta_names): # including network static_hostname defined in sfconfig.yaml host_arch = copy.copy(arch) host_arch["network"] = args.sfconfig["network"] - render_template("/etc/hosts", + render_template(args.etc_hosts, "%s/etc-hosts.j2" % templates, host_arch)