From f9224f32afbbe6316acaf1b0ad02e2909ac6009a Mon Sep 17 00:00:00 2001 From: Neha Oudin <17551419+Gu1nness@users.noreply.github.com> Date: Thu, 17 Oct 2024 10:02:47 +0200 Subject: [PATCH] [DPE-3908] Minor version upgrades (#335) --- .github/workflows/release.yaml | 3 + actions.yaml | 18 + charm_version | 1 + charmcraft.yaml | 26 +- metadata.yaml | 3 + poetry.lock | 334 +++++++++-------- pyproject.toml | 2 + src/charm.py | 336 ++++++++++++++---- src/config.py | 11 +- src/exceptions.py | 12 + src/upgrades/__init__.py | 4 + src/upgrades/kubernetes_upgrades.py | 256 +++++++++++++ src/upgrades/mongodb_upgrades.py | 261 ++++++++++++++ .../integration/backup_tests/test_backups.py | 12 +- .../backup_tests/test_sharding_backups.py | 3 + tests/integration/ha_tests/helpers.py | 1 + tests/integration/helpers.py | 6 + .../integration/metrics_tests/test_metrics.py | 6 +- .../relation_tests/test_charm_relations.py | 2 + tests/integration/sharding_tests/helpers.py | 6 + .../integration/sharding_tests/test_mongos.py | 2 + .../sharding_tests/test_sharding.py | 4 + .../sharding_tests/test_sharding_relations.py | 4 + tests/integration/test_charm.py | 1 + tests/integration/test_teardown.py | 1 + tests/integration/tls_tests/test_tls.py | 4 +- tests/integration/upgrades/helpers.py | 36 +- .../upgrades/test_local_sharding_upgrades.py | 195 ++++++++++ .../upgrades/test_local_upgrades.py | 107 ++++++ tests/integration/upgrades/test_rollback.py | 108 ++++++ .../upgrades/test_sharding_upgrades.py | 12 +- tests/integration/upgrades/test_upgrades.py | 12 +- tests/unit/test_charm.py | 112 +++--- tests/unit/test_mongodb_backups.py | 7 + tests/unit/test_mongodb_provider.py | 8 + tests/unit/test_upgrade.py | 97 ++++- workload_version | 1 + 37 files changed, 1689 insertions(+), 325 deletions(-) create mode 100644 charm_version create mode 100644 src/upgrades/__init__.py create mode 100644 src/upgrades/kubernetes_upgrades.py create mode 100644 src/upgrades/mongodb_upgrades.py create mode 100644 tests/integration/upgrades/test_local_sharding_upgrades.py create mode 100644 tests/integration/upgrades/test_local_upgrades.py create mode 100644 tests/integration/upgrades/test_rollback.py create mode 100644 workload_version diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 770a87bc2..3318d3c61 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -15,6 +15,9 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + - run: | + # Workaround for https://github.com/canonical/charmcraft/issues/1389#issuecomment-1880921728 + touch requirements.txt - name: Check libs uses: canonical/charming-actions/check-libraries@2.6.2 with: diff --git a/actions.yaml b/actions.yaml index a82f6a978..68d77d740 100644 --- a/actions.yaml +++ b/actions.yaml @@ -42,3 +42,21 @@ set-tls-private-key: internal-key: type: string description: The content of private key for internal communications with clients. Content will be auto-generated if this option is not specified. + +pre-refresh-check: + description: Check if charm is ready to refresh. + +resume-refresh: + description: Refresh remaining units (after you manually verified that refreshed units are healthy). + params: + force: + type: boolean + default: false + description: | + Potential of *data loss* and *downtime* + + Force refresh of next unit. + + Use to + - force incompatible refresh and/or + - continue refresh if 1+ refreshed units have non-active status diff --git a/charm_version b/charm_version new file mode 100644 index 000000000..d00491fd7 --- /dev/null +++ b/charm_version @@ -0,0 +1 @@ +1 diff --git a/charmcraft.yaml b/charmcraft.yaml index c4f803ece..39882a819 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -2,8 +2,19 @@ # See LICENSE file for licensing details. type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "22.04" + run-on: + - name: "ubuntu" + channel: "22.04" parts: charm: + charm-strict-dependencies: true + override-build: | + rustup default stable + craftctl default build-snaps: - rustup build-packages: @@ -17,18 +28,13 @@ parts: echo 'ERROR: Use "tox run -e build-dev" instead of calling "charmcraft pack" directly' >&2 exit 1 fi - override-build: | - rustup default stable - craftctl default files: plugin: dump source: . prime: - charm_internal_version -bases: - - build-on: - - name: "ubuntu" - channel: "22.04" - run-on: - - name: "ubuntu" - channel: "22.04" + - charm_version + - workload_version + override-build: | + rustup default stable + craftctl default diff --git a/metadata.yaml b/metadata.yaml index 61ac27464..ce62189a3 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -21,6 +21,9 @@ website: peers: database-peers: interface: mongodb-peers + upgrade-version-a: + interface: upgrade + provides: database: interface: mongodb_client diff --git a/poetry.lock b/poetry.lock index da52cda24..52cfc41d0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -298,101 +298,116 @@ pycparser = "*" [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -1092,72 +1107,72 @@ six = ">=1.11.0,<2.0" [[package]] name = "markupsafe" -version = "3.0.0" +version = "3.0.1" description = "Safely add untrusted strings to HTML/XML markup." optional = false python-versions = ">=3.9" files = [ - {file = "MarkupSafe-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:380faf314c3c84c1682ca672e6280c6c59e92d0bc13dc71758ffa2de3cd4e252"}, - {file = "MarkupSafe-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ee9790be6f62121c4c58bbced387b0965ab7bffeecb4e17cc42ef290784e363"}, - {file = "MarkupSafe-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ddf5cb8e9c00d9bf8b0c75949fb3ff9ea2096ba531693e2e87336d197fdb908"}, - {file = "MarkupSafe-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b36473a2d3e882d1873ea906ce54408b9588dc2c65989664e6e7f5a2de353d7"}, - {file = "MarkupSafe-3.0.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dba0f83119b9514bc37272ad012f0cc03f0805cc6a2bea7244e19250ac8ff29f"}, - {file = "MarkupSafe-3.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:409535e0521c4630d5b5a1bf284e9d3c76d2fc2f153ebb12cf3827797798cc99"}, - {file = "MarkupSafe-3.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:64a7c7856c3a409011139b17d137c2924df4318dab91ee0530800819617c4381"}, - {file = "MarkupSafe-3.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4deea1d9169578917d1f35cdb581bc7bab56a7e8c5be2633bd1b9549c3c22a01"}, - {file = "MarkupSafe-3.0.0-cp310-cp310-win32.whl", hash = "sha256:3cd0bba31d484fe9b9d77698ddb67c978704603dc10cdc905512af308cfcca6b"}, - {file = "MarkupSafe-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:4ca04c60006867610a06575b46941ae616b19da0adc85b9f8f3d9cbd7a3da385"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e64b390a306f9e849ee809f92af6a52cda41741c914358e0e9f8499d03741526"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c524203207f5b569df06c96dafdc337228921ee8c3cc5f6e891d024c6595352"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c409691696bec2b5e5c9efd9593c99025bf2f317380bf0d993ee0213516d908a"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64f7d04410be600aa5ec0626d73d43e68a51c86500ce12917e10fd013e258df5"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:105ada43a61af22acb8774514c51900dc820c481cc5ba53f17c09d294d9c07ca"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a5fd5500d4e4f7cc88d8c0f2e45126c4307ed31e08f8ec521474f2fd99d35ac3"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:25396abd52b16900932e05b7104bcdc640a4d96c914f39c3b984e5a17b01fba0"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3efde9a8c56c3b6e5f3fa4baea828f8184970c7c78480fedb620d804b1c31e5c"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-win32.whl", hash = "sha256:12ddac720b8965332d36196f6f83477c6351ba6a25d4aff91e30708c729350d7"}, - {file = "MarkupSafe-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:658fdf6022740896c403d45148bf0c36978c6b48c9ef8b1f8d0c7a11b6cdea86"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d261ec38b8a99a39b62e0119ed47fe3b62f7691c500bc1e815265adc016438c1"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e363440c8534bf2f2ef1b8fdc02037eb5fff8fce2a558519b22d6a3a38b3ec5e"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7835de4c56066e096407a1852e5561f6033786dd987fa90dc384e45b9bd21295"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6cc46a27d904c9be5732029769acf4b0af69345172ed1ef6d4db0c023ff603b"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f0411641d31aa6f7f0cc13f0f18b63b8dc08da5f3a7505972a42ab059f479ba3"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b2a7afd24d408b907672015555bc10be2382e6c5f62a488e2d452da670bbd389"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c8ab7efeff1884c5da8e18f743b667215300e09043820d11723718de0b7db934"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8219e2207f6c188d15614ea043636c2b36d2d79bf853639c124a179412325a13"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-win32.whl", hash = "sha256:59420b5a9a5d3fee483a32adb56d7369ae0d630798da056001be1e9f674f3aa6"}, - {file = "MarkupSafe-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:7ed789d0f7f11fcf118cf0acb378743dfdd4215d7f7d18837c88171405c9a452"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:27d6a73682b99568916c54a4bfced40e7d871ba685b580ea04bbd2e405dfd4c5"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:494a64efc535e147fcc713dba58eecfce3a79f1e93ebe81995b387f5cd9bc2e1"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5243044a927e8a6bb28517838662a019cd7f73d7f106bbb37ab5e7fa8451a92"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63dae84964a9a3d2610808cee038f435d9a111620c37ccf872c2fcaeca6865b3"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcbee57fedc9b2182c54ffc1c5eed316c3da8bbfeda8009e1b5d7220199d15da"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f846fd7c241e5bd4161e2a483663eb66e4d8e12130fcdc052f310f388f1d61c6"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:678fbceb202382aae42c1f0cd9f56b776bc20a58ae5b553ee1fe6b802983a1d6"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bd9b8e458e2bab52f9ad3ab5dc8b689a3c84b12b2a2f64cd9a0dfe209fb6b42f"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-win32.whl", hash = "sha256:1fd02f47596e00a372f5b4af2b4c45f528bade65c66dfcbc6e1ea1bfda758e98"}, - {file = "MarkupSafe-3.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:b94bec9eda10111ec7102ef909eca4f3c2df979643924bfe58375f560713a7d1"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:509c424069dd037d078925b6815fc56b7271f3aaec471e55e6fa513b0a80d2aa"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:81be2c0084d8c69e97e3c5d73ce9e2a6e523556f2a19c4e195c09d499be2f808"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b43ac1eb9f91e0c14aac1d2ef0f76bc7b9ceea51de47536f61268191adf52ad7"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b231255770723f1e125d63c14269bcd8b8136ecfb620b9a18c0297e046d0736"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c182d45600556917f811aa019d834a89fe4b6f6255da2fd0bdcf80e970f95918"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9f91c90f8f3bf436f81c12eeb4d79f9ddd263c71125e6ad71341906832a34386"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a7171d2b869e9be238ea318c196baf58fbf272704e9c1cd4be8c380eea963342"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:cb244adf2499aa37d5dc43431990c7f0b632d841af66a51d22bd89c437b60264"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-win32.whl", hash = "sha256:96e3ed550600185d34429477f1176cedea8293fa40e47fe37a05751bcb64c997"}, - {file = "MarkupSafe-3.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:1d151b9cf3307e259b749125a5a08c030ba15a8f1d567ca5bfb0e92f35e761f5"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:23efb2be7221105c8eb0e905433414d2439cb0a8c5d5ca081c1c72acef0f5613"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:81ee9c967956b9ea39b3a5270b7cb1740928d205b0dc72629164ce621b4debf9"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5509a8373fed30b978557890a226c3d30569746c565b9daba69df80c160365a5"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1c13c6c908811f867a8e9e66efb2d6c03d1cdd83e92788fe97f693c457dc44f"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d7e63d1977d3806ce0a1a3e0099b089f61abdede5238ca6a3f3bf8877b46d095"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:d2c099be5274847d606574234e494f23a359e829ba337ea9037c3a72b0851942"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e042ccf8fe5bf8b6a4b38b3f7d618eb10ea20402b0c9f4add9293408de447974"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:98fb3a2bf525ad66db96745707b93ba0f78928b7a1cb2f1cb4b143bc7e2ba3b3"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-win32.whl", hash = "sha256:a80c6740e1bfbe50cea7cbf74f48823bb57bd59d914ee22ff8a81963b08e62d2"}, - {file = "MarkupSafe-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:5d207ff5cceef77796f8aacd44263266248cf1fbc601441524d7835613f8abec"}, - {file = "markupsafe-3.0.0.tar.gz", hash = "sha256:03ff62dea2fef3eadf2f1853bc6332bcb0458d9608b11dfb1cd5aeda1c178ea6"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:db842712984e91707437461930e6011e60b39136c7331e971952bb30465bc1a1"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ffb4a8e7d46ed96ae48805746755fadd0909fea2306f93d5d8233ba23dda12a"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67c519635a4f64e495c50e3107d9b4075aec33634272b5db1cde839e07367589"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48488d999ed50ba8d38c581d67e496f955821dc183883550a6fbc7f1aefdc170"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f31ae06f1328595d762c9a2bf29dafd8621c7d3adc130cbb46278079758779ca"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:80fcbf3add8790caddfab6764bde258b5d09aefbe9169c183f88a7410f0f6dea"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3341c043c37d78cc5ae6e3e305e988532b072329639007fd408a476642a89fd6"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cb53e2a99df28eee3b5f4fea166020d3ef9116fdc5764bc5117486e6d1211b25"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win32.whl", hash = "sha256:db15ce28e1e127a0013dfb8ac243a8e392db8c61eae113337536edb28bdc1f97"}, + {file = "MarkupSafe-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:4ffaaac913c3f7345579db4f33b0020db693f302ca5137f106060316761beea9"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:26627785a54a947f6d7336ce5963569b5d75614619e75193bdb4e06e21d447ad"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b954093679d5750495725ea6f88409946d69cfb25ea7b4c846eef5044194f583"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:973a371a55ce9ed333a3a0f8e0bcfae9e0d637711534bcb11e130af2ab9334e7"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:244dbe463d5fb6d7ce161301a03a6fe744dac9072328ba9fc82289238582697b"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d98e66a24497637dd31ccab090b34392dddb1f2f811c4b4cd80c230205c074a3"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad91738f14eb8da0ff82f2acd0098b6257621410dcbd4df20aaa5b4233d75a50"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:7044312a928a66a4c2a22644147bc61a199c1709712069a344a3fb5cfcf16915"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a4792d3b3a6dfafefdf8e937f14906a51bd27025a36f4b188728a73382231d91"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win32.whl", hash = "sha256:fa7d686ed9883f3d664d39d5a8e74d3c5f63e603c2e3ff0abcba23eac6542635"}, + {file = "MarkupSafe-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ba25a71ebf05b9bb0e2ae99f8bc08a07ee8e98c612175087112656ca0f5c8bf"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8ae369e84466aa70f3154ee23c1451fda10a8ee1b63923ce76667e3077f2b0c4"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40f1e10d51c92859765522cbd79c5c8989f40f0419614bcdc5015e7b6bf97fc5"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a4cb365cb49b750bdb60b846b0c0bc49ed62e59a76635095a179d440540c346"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee3941769bd2522fe39222206f6dd97ae83c442a94c90f2b7a25d847d40f4729"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62fada2c942702ef8952754abfc1a9f7658a4d5460fabe95ac7ec2cbe0d02abc"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4c2d64fdba74ad16138300815cfdc6ab2f4647e23ced81f59e940d7d4a1469d9"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fb532dd9900381d2e8f48172ddc5a59db4c445a11b9fab40b3b786da40d3b56b"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0f84af7e813784feb4d5e4ff7db633aba6c8ca64a833f61d8e4eade234ef0c38"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win32.whl", hash = "sha256:cbf445eb5628981a80f54087f9acdbf84f9b7d862756110d172993b9a5ae81aa"}, + {file = "MarkupSafe-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:a10860e00ded1dd0a65b83e717af28845bb7bd16d8ace40fe5531491de76b79f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e81c52638315ff4ac1b533d427f50bc0afc746deb949210bc85f05d4f15fd772"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:312387403cd40699ab91d50735ea7a507b788091c416dd007eac54434aee51da"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ae99f31f47d849758a687102afdd05bd3d3ff7dbab0a8f1587981b58a76152a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c97ff7fedf56d86bae92fa0a646ce1a0ec7509a7578e1ed238731ba13aabcd1c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a7420ceda262dbb4b8d839a4ec63d61c261e4e77677ed7c66c99f4e7cb5030dd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45d42d132cff577c92bfba536aefcfea7e26efb975bd455db4e6602f5c9f45e7"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c8817557d0de9349109acb38b9dd570b03cc5014e8aabf1cbddc6e81005becd"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a54c43d3ec4cf2a39f4387ad044221c66a376e58c0d0e971d47c475ba79c6b5"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win32.whl", hash = "sha256:c91b394f7601438ff79a4b93d16be92f216adb57d813a78be4446fe0f6bc2d8c"}, + {file = "MarkupSafe-3.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:fe32482b37b4b00c7a52a07211b479653b7fe4f22b2e481b9a9b099d8a430f2f"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:17b2aea42a7280db02ac644db1d634ad47dcc96faf38ab304fe26ba2680d359a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:852dc840f6d7c985603e60b5deaae1d89c56cb038b577f6b5b8c808c97580f1d"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0778de17cff1acaeccc3ff30cd99a3fd5c50fc58ad3d6c0e0c4c58092b859396"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:800100d45176652ded796134277ecb13640c1a537cad3b8b53da45aa96330453"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d06b24c686a34c86c8c1fba923181eae6b10565e4d80bdd7bc1c8e2f11247aa4"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:33d1c36b90e570ba7785dacd1faaf091203d9942bc036118fab8110a401eb1a8"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:beeebf760a9c1f4c07ef6a53465e8cfa776ea6a2021eda0d0417ec41043fe984"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bbde71a705f8e9e4c3e9e33db69341d040c827c7afa6789b14c6e16776074f5a"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win32.whl", hash = "sha256:82b5dba6eb1bcc29cc305a18a3c5365d2af06ee71b123216416f7e20d2a84e5b"}, + {file = "MarkupSafe-3.0.1-cp313-cp313t-win_amd64.whl", hash = "sha256:730d86af59e0e43ce277bb83970530dd223bf7f2a838e086b50affa6ec5f9295"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4935dd7883f1d50e2ffecca0aa33dc1946a94c8f3fdafb8df5c330e48f71b132"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e9393357f19954248b00bed7c56f29a25c930593a77630c719653d51e7669c2a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40621d60d0e58aa573b68ac5e2d6b20d44392878e0bfc159012a5787c4e35bc8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f94190df587738280d544971500b9cafc9b950d32efcb1fba9ac10d84e6aa4e6"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6a387d61fe41cdf7ea95b38e9af11cfb1a63499af2759444b99185c4ab33f5b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8ad4ad1429cd4f315f32ef263c1342166695fad76c100c5d979c45d5570ed58b"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:e24bfe89c6ac4c31792793ad9f861b8f6dc4546ac6dc8f1c9083c7c4f2b335cd"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:2a4b34a8d14649315c4bc26bbfa352663eb51d146e35eef231dd739d54a5430a"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win32.whl", hash = "sha256:242d6860f1fd9191aef5fae22b51c5c19767f93fb9ead4d21924e0bcb17619d8"}, + {file = "MarkupSafe-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:93e8248d650e7e9d49e8251f883eed60ecbc0e8ffd6349e18550925e31bd029b"}, + {file = "markupsafe-3.0.1.tar.gz", hash = "sha256:3e683ee4f5d0fa2dde4db77ed8dd8a876686e3fc417655c2ece9a90576905344"}, ] [[package]] @@ -1241,6 +1256,17 @@ websocket-client = "==1.*" [package.extras] docs = ["canonical-sphinx-extensions", "furo", "linkify-it-py", "myst-parser", "pyspelling", "sphinx (==6.2.1)", "sphinx-autobuild", "sphinx-copybutton", "sphinx-design", "sphinx-notfound-page", "sphinx-tabs", "sphinxcontrib-jquery", "sphinxext-opengraph"] +[[package]] +name = "overrides" +version = "7.7.0" +description = "A decorator to automatically detect mismatch when overriding a method." +optional = false +python-versions = ">=3.6" +files = [ + {file = "overrides-7.7.0-py3-none-any.whl", hash = "sha256:c7ed9d062f78b8e4c1a7b70bd8796b35ead4d9f510227ef9c5dc7626c60d7e49"}, + {file = "overrides-7.7.0.tar.gz", hash = "sha256:55158fa3d93b98cc75299b1e67078ad9003ca27945c76162c1c0766d6f91820a"}, +] + [[package]] name = "packaging" version = "24.1" @@ -2458,4 +2484,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "^3.10.12" -content-hash = "e96fa214d4bdc5d55c23df0ce82fb5f79f35b95da2ba7334f18eaf8b1f201402" +content-hash = "9943085c1de344b6ee2250b8b0add01ea424d0a0ca443e52568d83cb3437b0ef" diff --git a/pyproject.toml b/pyproject.toml index 185d4a1df..05200a9b6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -22,6 +22,8 @@ pyyaml = "^6.0.1" jinja2 = "^3.1.3" poetry-core = "^1.9.0" data-platform-helpers = "^0.1.3" +overrides = "^7.7.0" +lightkube = "^0.15.3" pyOpenSSL = "^24.2.1" setuptools = "^72.0.0" diff --git a/src/charm.py b/src/charm.py index 9e806fa4a..b0fb28867 100755 --- a/src/charm.py +++ b/src/charm.py @@ -6,6 +6,7 @@ import logging import re import time +from pathlib import Path from typing import Any, Dict, List, Optional, Set import jinja2 @@ -14,6 +15,11 @@ from charms.mongodb.v0.config_server_interface import ClusterProvider from charms.mongodb.v0.mongodb_secrets import SecretCache, generate_secret_label from charms.mongodb.v0.set_status import MongoDBStatusHandler +from charms.mongodb.v0.upgrade_helpers import ( + FailedToElectNewPrimaryError, + UnitState, + unit_number, +) from charms.mongodb.v1.helpers import ( generate_keyfile, generate_password, @@ -50,6 +56,7 @@ RelationEvent, StartEvent, UpdateStatusEvent, + UpgradeCharmEvent, ) from ops.main import main from ops.model import ( @@ -75,10 +82,22 @@ ) from config import Config -from exceptions import AdminUserCreationError, MissingSecretError +from exceptions import ( + AdminUserCreationError, + ContainerNotReadyError, + FailedToUpdateFilesystem, + MissingSecretError, + NotConfigServerError, +) +from upgrades import kubernetes_upgrades +from upgrades.mongodb_upgrades import MongoDBUpgrade logger = logging.getLogger(__name__) +# Disable spamming logs from lightkube +logging.getLogger("httpx").setLevel(logging.WARNING) +logging.getLogger("httpcore").setLevel(logging.WARNING) + UNIT_REMOVAL_TIMEOUT = 1000 APP_SCOPE = Config.Relations.APP_SCOPE @@ -99,6 +118,7 @@ def __init__(self, *args): self.framework.observe(self.on.mongod_pebble_ready, self._on_mongod_pebble_ready) self.framework.observe(self.on.config_changed, self._on_config_changed) self.framework.observe(self.on.start, self._on_start) + self.framework.observe(self.on.upgrade_charm, self._on_upgrade) self.framework.observe(self.on.update_status, self._on_update_status) self.framework.observe( self.on[Config.Relations.PEERS].relation_joined, self._relation_changes_handler @@ -132,6 +152,18 @@ def __init__(self, *args): self.shard = ConfigServerRequirer(self) self.config_server = ShardingProvider(self) self.cluster = ClusterProvider(self) + + self.upgrade = MongoDBUpgrade(self) + + self.version_checker = CrossAppVersionChecker( + self, + version=get_charm_revision(self.unit, local_version=self.get_charm_internal_revision), + relations_to_check=[ + Config.Relations.SHARDING_RELATIONS_NAME, + Config.Relations.CONFIG_SERVER_RELATIONS_NAME, + ], + ) + self.metrics_endpoint = MetricsEndpointProvider( self, refresh_event=[self.on.start, self.on.update_status], jobs=self.monitoring_jobs ) @@ -143,15 +175,6 @@ def __init__(self, *args): container_name=Config.CONTAINER_NAME, ) - self.version_checker = CrossAppVersionChecker( - self, - version=get_charm_revision(self.unit, local_version=self.get_charm_internal_revision), - relations_to_check=[ - Config.Relations.SHARDING_RELATIONS_NAME, - Config.Relations.CONFIG_SERVER_RELATIONS_NAME, - ], - ) - # BEGIN: properties @property @@ -317,7 +340,7 @@ def _log_rotate_layer(self) -> Layer: "command": f"sh -c 'logrotate {Config.LogRotate.RENDERED_TEMPLATE}; sleep 1'", "startup": "enabled", "override": "replace", - "backoff-delay": "1m", + "backoff-delay": "1m0s", "backoff-factor": 1, "user": Config.UNIX_USER, "group": Config.UNIX_GROUP, @@ -374,6 +397,21 @@ def db_initialised(self) -> bool: """Check if MongoDB is initialised.""" return json.loads(self.app_peer_data.get("db_initialised", "false")) + @property + def unit_departed(self) -> bool: + """Whether the unit has departed or not.""" + return json.loads(self.unit_peer_data.get("unit_departed", "false")) + + @unit_departed.setter + def unit_departed(self, value: bool) -> None: + """Set the unit_departed flag.""" + if isinstance(value, bool): + self.unit_peer_data["unit_departed"] = json.dumps(value) + else: + raise ValueError( + f"'unit_departed' must be a boolean value. Provided: {value} is of type {type(value)}" + ) + def is_role_changed(self) -> bool: """Checks if application is running in provided role.""" return self.role != self.model.config["role"] @@ -407,9 +445,11 @@ def db_initialised(self, value): ) @property - def upgrade_in_progress(self): - """TODO: implement this as part of upgrades.""" - return False + def upgrade_in_progress(self) -> bool: + """Returns true if upgrade is in progress.""" + if not self.upgrade._upgrade: + return False + return self.upgrade._upgrade.in_progress @property def replica_set_initialised(self) -> bool: @@ -548,24 +588,8 @@ def _compare_secret_ids(secret_id1: str, secret_id2: str) -> bool: return pure_id1 == pure_id2 return False - # BEGIN: charm events - def _on_mongod_pebble_ready(self, event) -> None: - """Configure MongoDB pebble layer specification.""" - # Get a reference the container attribute - container = self.unit.get_container(Config.CONTAINER_NAME) - if not container.can_connect(): - logger.debug("mongod container is not ready yet.") - event.defer() - return - - # We need to check that the storages are attached before starting the services. - # pebble-ready is not guaranteed to run after storage-attached so this check allows - # to ensure that the storages are attached before the pebble-ready hook is run. - if any(not storage for storage in self.model.storages.values()): - logger.debug("Storages are not attached yet") - event.defer() - return - + def _filesystem_handler(self, container: Container) -> None: + """Pushes files on the container and handle permissions.""" try: # mongod needs keyFile and TLS certificates on filesystem self.push_tls_certificate_to_workload() @@ -575,19 +599,52 @@ def _on_mongod_pebble_ready(self, event) -> None: except (PathError, ProtocolError, MissingSecretError) as e: logger.error("Cannot initialize workload: %r", e) - event.defer() - return - - # Add initial Pebble config layer using the Pebble API - container.add_layer("mongod", self._mongod_layer, combine=True) - container.add_layer("log_rotate", self._log_rotate_layer, combine=True) + raise FailedToUpdateFilesystem + + def _configure_layers(self, container: Container) -> None: + """Configure the layers of the container.""" + modified = False + current_layers = container.get_plan() + new_layers = { + Config.SERVICE_NAME: self._mongod_layer, + "log_rotate": self._log_rotate_layer, + } if self.is_role(Config.Role.CONFIG_SERVER): - container.add_layer("mongos", self._mongos_layer, combine=True) + new_layers["mongos"] = self._mongos_layer + + # Add Pebble config layers missing or modified + for layer_name, definition in new_layers.items(): + for service_name, service in definition.services.items(): + if current_layers.services.get(service_name) != service: + modified = True + logger.debug(f"Adding layer {service_name}.") + container.add_layer(layer_name, definition, combine=True) + # We'll always have a logrotate configuration at this point. container.exec(["chmod", "644", "/etc/logrotate.d/mongodb"]) - # Restart changed services and start startup-enabled services. - container.replan() + if modified: + container.replan() + + def _configure_container(self, container: Container) -> None: + """Configure MongoDB pebble layer specification.""" + if not container.can_connect(): + logger.debug("mongod container is not ready yet.") + raise ContainerNotReadyError + + # We need to check that the storages are attached before starting the services. + # pebble-ready is not guaranteed to run after storage-attached so this check allows + # to ensure that the storages are attached before the pebble-ready hook is run. + if any(not storage for storage in self.model.storages.values()): + logger.debug("Storages are not attached yet") + raise ContainerNotReadyError + + try: + self._filesystem_handler(container) + except FailedToUpdateFilesystem as err: + raise ContainerNotReadyError from err + + self._configure_layers(container) # when a network cuts and the pod restarts - reconnect to the exporter try: @@ -595,9 +652,50 @@ def _on_mongod_pebble_ready(self, event) -> None: self._connect_pbm_agent() except MissingSecretError as e: logger.error("Cannot connect mongodb exporter: %r", e) + raise ContainerNotReadyError + + # BEGIN: charm events + def _on_upgrade(self, event: UpgradeCharmEvent) -> None: + """Upgrade event handler. + + During an upgrade event, it will set the version in all relations, + replan the container and process the upgrade statuses. If the upgrade + is compatible, it will end up emitting a post upgrade event that + verifies the health of the cluster. + """ + if self.unit.is_leader(): + self.version_checker.set_version_across_all_relations() + + container = self.unit.get_container(Config.CONTAINER_NAME) + + # Just run the configure layers steps on the container and defer if it fails. + try: + self._configure_container(container) + except ContainerNotReadyError: + self.status.set_and_share_status(Config.Status.UNHEALTHY_UPGRADE) + self.upgrade._reconcile_upgrade(event, during_upgrade=True) event.defer() return + self.status.set_and_share_status(Config.Status.WAITING_POST_UPGRADE_STATUS) + self.upgrade._reconcile_upgrade(event, during_upgrade=True) + if self.upgrade._upgrade.is_compatible: + # Post upgrade event verifies the success of the upgrade. + self.upgrade.post_app_upgrade_event.emit() + + def _on_mongod_pebble_ready(self, event) -> None: + """Configure MongoDB pebble layer specification.""" + container = self.unit.get_container(Config.CONTAINER_NAME) + + # Just run the configure layers steps on the container and defer if it fails. + try: + self._configure_container(container) + except ContainerNotReadyError: + event.defer() + return + + self.upgrade._reconcile_upgrade(event) + def is_db_service_ready(self) -> bool: """Checks if the MongoDB service is ready to accept connections.""" with MongoDBConnection(self.mongodb_config, "localhost", direct=True) as direct_mongo: @@ -628,7 +726,30 @@ def _on_config_changed(self, event: ConfigChangedEvent) -> None: f"Migration of sharding components not permitted, revert config role to {self.role}" ) - def _on_start(self, event) -> None: + def __can_charm_start(self) -> bool: + """Runs the checks that are mandatory before trying to create anything mongodb related.""" + container = self.unit.get_container(Config.CONTAINER_NAME) + + try: + self._configure_container(container) + except ContainerNotReadyError: + return + + if not container.can_connect(): + logger.debug("mongod container is not ready yet.") + return False + + if not container.exists(Config.SOCKET_PATH): + logger.debug("The mongod socket is not ready yet.") + return False + + if not self.is_db_service_ready(): + logger.debug("mongodb service is not ready yet.") + return False + + return True + + def _on_start(self, event: StartEvent) -> None: """Initialise MongoDB. Initialisation of replSet should be made once after start. @@ -646,19 +767,7 @@ def _on_start(self, event) -> None: It is needed to install mongodb-clients inside the charm container to make this function work correctly. """ - container = self.unit.get_container(Config.CONTAINER_NAME) - if not container.can_connect(): - logger.debug("mongod container is not ready yet.") - event.defer() - return - - if not container.exists(Config.SOCKET_PATH): - logger.debug("The mongod socket is not ready yet.") - event.defer() - return - - if not self.is_db_service_ready(): - logger.debug("mongodb service is not ready yet.") + if not self.__can_charm_start(): event.defer() return @@ -673,6 +782,7 @@ def _on_start(self, event) -> None: # mongod is now active self.status.set_and_share_status(ActiveStatus()) + self.upgrade._reconcile_upgrade(event) if not self.unit.is_leader(): return @@ -688,12 +798,13 @@ def _on_start(self, event) -> None: def _relation_changes_handler(self, event: RelationEvent) -> None: """Handles different relation events and updates MongoDB replica set.""" + self.upgrade._reconcile_upgrade(event) self._connect_mongodb_exporter() self._connect_pbm_agent() if isinstance(event, RelationDepartedEvent): if event.departing_unit.name == self.unit.name: - self.unit_peer_data.setdefault("unit_departed", "True") + self.unit_departed = True if not self.unit.is_leader(): return @@ -760,19 +871,67 @@ def _reconcile_mongo_hosts_and_users(self, event: RelationEvent) -> None: logger.info("Deferring reconfigure: error=%r", e) event.defer() + def __handle_partition_on_stop(self) -> None: + """Raise partition to prevent other units from restarting if an upgrade is in progress. + + If an upgrade is not in progress, the leader unit will reset the partition to 0. + """ + current_unit_number = unit_number(self.unit) + if kubernetes_upgrades.partition.get(app_name=self.app.name) < current_unit_number: + kubernetes_upgrades.partition.set(app_name=self.app.name, value=current_unit_number) + logger.debug(f"Partition set to {current_unit_number} during stop event") + + def __handle_relation_departed_on_stop(self) -> None: + """Leaves replicaset. + + If the unit has not already left the replica set, this function + attempts to block operations until the unit is removed. Note that with + how Juju currently operates, we only have 30 seconds until SIGTERM + command, so we are by no means guaranteed to have removed the replica + before the pod is removed. However the leader will reconfigure the + replica set if this is the case on `update status`. + """ + logger.debug(f"{self.unit.name} blocking on_stop") + is_in_replica_set = True + timeout = UNIT_REMOVAL_TIMEOUT + while is_in_replica_set and timeout > 0: + is_in_replica_set = self.is_unit_in_replica_set() + time.sleep(1) + timeout -= 1 + if timeout < 0: + raise Exception(f"{self.unit.name}.on_stop timeout exceeded") + logger.debug("{self.unit.name} releasing on_stop") + self.unit_departed = False + + def __handle_upgrade_on_stop(self) -> None: + """Sets the unit state to RESTARTING and step down from replicaset. + + Note that with how Juju currently operates, we only have at most 30 + seconds until SIGTERM command, so we are by no means guaranteed to have + stepped down before the pod is removed. + Upon restart, the upgrade will still resume because all hooks run the + `_reconcile_upgrade` handler. + """ + self.upgrade._upgrade.unit_state = UnitState.RESTARTING + + # According to the MongoDB documentation, before upgrading the primary, we must ensure a + # safe primary re-election. + try: + if self.unit.name == self.primary: + logger.debug("Stepping down current primary, before upgrading service...") + self.upgrade.step_down_primary_and_wait_reelection() + except FailedToElectNewPrimaryError: + logger.error("Failed to reelect primary before upgrading unit.") + return + def _on_stop(self, event) -> None: - if "True" == self.unit_peer_data.get("unit_departed", "False"): - logger.debug(f"{self.unit.name} blocking on_stop") - is_in_replica_set = True - timeout = UNIT_REMOVAL_TIMEOUT - while is_in_replica_set and timeout > 0: - is_in_replica_set = self.is_unit_in_replica_set() - time.sleep(1) - timeout -= 1 - if timeout < 0: - raise Exception(f"{self.unit.name}.on_stop timeout exceeded") - logger.debug(f"{self.unit.name} releasing on_stop") - self.unit_peer_data["unit_departed"] = "" + self.__handle_partition_on_stop() + if self.unit_departed: + self.__handle_relation_departed_on_stop() + if not self.upgrade._upgrade: + logger.debug("Peer relation missing during stop event") + return + self.__handle_upgrade_on_stop() def _on_update_status(self, event: UpdateStatusEvent): # user-made mistakes might result in other incorrect statues. Prioritise informing users of @@ -800,6 +959,12 @@ def _on_update_status(self, event: UpdateStatusEvent): self.status.set_and_share_status(WaitingStatus("Waiting for MongoDB to start")) return + self.upgrade._reconcile_upgrade(event) + if self.upgrade_in_progress: + self.status.set_and_share_status(self.status.process_statuses()) + # Useless to reun relation changes handler if upgrade is in progress and will delay. + return + # leader should periodically handle configuring the replica set. Incidents such as network # cuts can lead to new IP addresses and therefore will require a reconfigure. Especially # in the case that the leader a change in IP address it will not receive a relation event. @@ -1049,7 +1214,11 @@ def _set_user_created(self, user: MongoDBUser) -> None: self.app_peer_data[f"{user.get_username()}-user-created"] = "True" def _get_mongodb_config_for_user( - self, user: MongoDBUser, hosts: List[str] + self, + user: MongoDBUser, + hosts: List[str], + replset: str | None = None, + standalone: bool = False, ) -> MongoConfiguration: external_ca, _ = self.tls.get_tls_files(internal=False) internal_ca, _ = self.tls.get_tls_files(internal=True) @@ -1060,7 +1229,7 @@ def _get_mongodb_config_for_user( ) else: return MongoConfiguration( - replset=self.app.name, + replset=replset or self.app.name, database=user.get_database_name(), username=user.get_username(), password=password, # type: ignore @@ -1068,6 +1237,7 @@ def _get_mongodb_config_for_user( roles=set(user.get_roles()), tls_external=external_ca is not None, tls_internal=internal_ca is not None, + standalone=standalone, ) def _get_mongos_config_for_user( @@ -1268,7 +1438,7 @@ def restart_charm_services(self): container = self.unit.get_container(Config.CONTAINER_NAME) container.stop(Config.SERVICE_NAME) - container.add_layer("mongod", self._mongod_layer, combine=True) + container.add_layer(Config.SERVICE_NAME, self._mongod_layer, combine=True) if self.is_role(Config.Role.CONFIG_SERVER): container.add_layer("mongos", self._mongos_layer, combine=True) @@ -1604,6 +1774,17 @@ def is_sharding_component(self) -> bool: """Returns true if charm is running as a sharded component.""" return self.is_role(Config.Role.SHARD) or self.is_role(Config.Role.CONFIG_SERVER) + def is_cluster_on_same_revision(self) -> bool: + """Returns True if the cluster is using the same charm revision. + + Note: This can only be determined by the config-server since shards are not integrated to + each other. + """ + if not self.is_role(Config.Role.CONFIG_SERVER): + raise NotConfigServerError("This check can only be ran by the config-server.") + + return self.version_checker.are_related_apps_valid() + # END: helper functions # BEGIN: static methods @@ -1619,13 +1800,11 @@ def _pull_licenses(container: Container) -> None: ] for license_name in licenses: - try: + # Lazy copy, only if the file wasn't already pulled. + filename = Path(f"LICENSE_{license_name}") + if not filename.is_file(): license_file = container.pull(path=Config.get_license_path(license_name)) - f = open(f"LICENSE_{license_name}", "x") - f.write(str(license_file.read())) - f.close() - except FileExistsError: - pass + filename.write_text(str(license_file.read())) @staticmethod def _set_data_dir_permissions(container: Container) -> None: @@ -1634,6 +1813,9 @@ def _set_data_dir_permissions(container: Container) -> None: Until the ability to set fsGroup and fsGroupChangePolicy via Pod securityContext is available, we fix permissions incorrectly with chown. """ + # Ensure the log status dir exists + container.make_dir(Config.LogRotate.LOG_STATUS_DIR, make_parents=True) + for path in [Config.DATA_DIR, Config.LOG_DIR, Config.LogRotate.LOG_STATUS_DIR]: paths = container.list_files(path, itself=True) assert len(paths) == 1, "list_files doesn't return only the directory itself" diff --git a/src/config.py b/src/config.py index 1c05a8cf2..06321ad20 100644 --- a/src/config.py +++ b/src/config.py @@ -5,7 +5,7 @@ from typing import List, Literal -from ops.model import BlockedStatus +from ops.model import BlockedStatus, WaitingStatus class Config: @@ -116,6 +116,11 @@ class TLS: SECRET_CSR_LABEL = "csr-secret" SECRET_CHAIN_LABEL = "chain-secret" + class Upgrade: + """Upgrade related constants.""" + + FEATURE_VERSION_6 = "6.0" + class Secrets: """Secrets related constants.""" @@ -143,6 +148,10 @@ class Status: # TODO Future PR add more status messages here as constants UNHEALTHY_UPGRADE = BlockedStatus("Unhealthy after upgrade.") + INCOMPATIBLE_UPGRADE = BlockedStatus( + "Refresh incompatible. Rollback to previous revision with `juju refresh`" + ) + WAITING_POST_UPGRADE_STATUS = WaitingStatus("Waiting for post upgrade checks") @staticmethod def get_license_path(license_name: str) -> str: diff --git a/src/exceptions.py b/src/exceptions.py index 946d7d73b..a874ebe7a 100644 --- a/src/exceptions.py +++ b/src/exceptions.py @@ -30,3 +30,15 @@ class MissingSecretError(MongoSecretError): class SecretAlreadyExistsError(MongoSecretError): """A secret that we want to create already exists.""" + + +class NotConfigServerError(Exception): + """Raised when an operation is performed on a component that is not a config server.""" + + +class ContainerNotReadyError(Exception): + """Raised when the container is not ready for a replan of services.""" + + +class FailedToUpdateFilesystem(Exception): + """Raised when the container is not ready for a replan of services.""" diff --git a/src/upgrades/__init__.py b/src/upgrades/__init__.py new file mode 100644 index 000000000..d05500bd9 --- /dev/null +++ b/src/upgrades/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Code for k8s upgrades on MongoDB.""" diff --git a/src/upgrades/kubernetes_upgrades.py b/src/upgrades/kubernetes_upgrades.py new file mode 100644 index 000000000..1ba92f478 --- /dev/null +++ b/src/upgrades/kubernetes_upgrades.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Kubernetes Upgrade Code. + +This code is slightly different from the code which was written originally. +It is required to deploy the application with `--trust` for this code to work +as it has to interact with the Kubernetes StatefulSet. +""" + +from functools import cached_property +from logging import getLogger +from typing import TYPE_CHECKING, List + +import lightkube +import lightkube.models.apps_v1 +import lightkube.resources.apps_v1 +import lightkube.resources.core_v1 +from charms.mongodb.v0.upgrade_helpers import AbstractUpgrade, UnitState, unit_number +from lightkube.core.exceptions import ApiError +from ops.charm import ActionEvent +from ops.model import ActiveStatus, StatusBase, Unit +from overrides import override + +if TYPE_CHECKING: + from charm import MongoDBCharm + +logger = getLogger() + + +class DeployedWithoutTrust(Exception): + """Deployed without `juju deploy --trust` or `juju trust`. + + Needed to access Kubernetes StatefulSet. + """ + + def __init__(self, *, app_name: str): + super().__init__( + f"Run `juju trust {app_name} --scope=cluster` and `juju resolve` for each unit (or remove & re-deploy {app_name} with `--trust`)" + ) + + +class _Partition: + """StatefulSet partition getter/setter. + + This very basic class allows the leader unit to interact with the + StatefulSet in order to change the partition. + This allows to have a rolling Update of the units. + """ + + # Note: I realize this isn't very Pythonic (it'd be nicer to use a property). Because of how + # ops is structured, we don't have access to the app name when we initialize this class. We + # need to only initialize this class once so that there is a single cache. Therefore, the app + # name needs to be passed as argument to the methods (instead of as an argument to __init__)— + # so we can't use a property. + + def __init__(self): + # Cache lightkube API call for duration of charm execution + self._cache: dict[str, int] = {} + + def get(self, *, app_name: str) -> int: + return self._cache.setdefault( + app_name, + lightkube.Client() + .get(res=lightkube.resources.apps_v1.StatefulSet, name=app_name) + .spec.updateStrategy.rollingUpdate.partition, + ) + + def set(self, *, app_name: str, value: int) -> None: + lightkube.Client().patch( + res=lightkube.resources.apps_v1.StatefulSet, + name=app_name, + obj={"spec": {"updateStrategy": {"rollingUpdate": {"partition": value}}}}, + ) + self._cache[app_name] = value + + +class KubernetesUpgrade(AbstractUpgrade): + """Code for Kubernetes Upgrade. + + This is the implementation of Kubernetes Upgrade methods. + """ + + def __init__(self, charm: "MongoDBCharm", *args, **kwargs): + try: + partition.get(app_name=charm.app.name) + except ApiError as err: + if err.status.code == 403: + raise DeployedWithoutTrust(app_name=charm.app.name) + raise + super().__init__(charm, *args, **kwargs) + + @override + def _get_unit_healthy_status(self) -> StatusBase: + version = self._unit_workload_container_versions[self._unit.name] + if version == self._app_workload_container_version: + return ActiveStatus( + f'MongoDB {self._current_versions["workload"]} running; Charm revision {self._current_versions["charm"]}' + ) + return ActiveStatus( + f'MongoDB {self._current_versions["workload"]} running (restart pending); Charm revision {self._current_versions["charm"]}' + ) + + @property + def _partition(self) -> int: + """Specifies which units should upgrade. + + Unit numbers >= partition should upgrade + Unit numbers < partition should not upgrade + + https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + + For Kubernetes, unit numbers are guaranteed to be sequential. + """ + return partition.get(app_name=self._app_name) + + @_partition.setter + def _partition(self, value: int) -> None: + """Sets the partition number.""" + partition.set(app_name=self._app_name, value=value) + + @property + def upgrade_resumed(self) -> bool: + """Whether user has resumed upgrade with Juju action.""" + return self._partition < unit_number(self._sorted_units[0]) + + @cached_property # Cache lightkube API call for duration of charm execution + @override + def _unit_workload_container_versions(self) -> dict[str, str]: + """{Unit name: Kubernetes controller revision hash}. + + Even if the workload container version is the same, the workload will restart if the + controller revision hash changes. (Juju bug: https://bugs.launchpad.net/juju/+bug/2036246). + + Therefore, we must use the revision hash instead of the workload container version. (To + satisfy the requirement that if and only if this version changes, the workload will + restart.) + """ + pods = lightkube.Client().list( + res=lightkube.resources.core_v1.Pod, labels={"app.kubernetes.io/name": self._app_name} + ) + + def get_unit_name(pod_name: str) -> str: + *app_name, unit_number = pod_name.split("-") + return f'{"-".join(app_name)}/{unit_number}' + + return { + get_unit_name(pod.metadata.name): pod.metadata.labels["controller-revision-hash"] + for pod in pods + } + + @cached_property + @override + def _app_workload_container_version(self) -> str: + """App's Kubernetes controller revision hash.""" + stateful_set = lightkube.Client().get( + res=lightkube.resources.apps_v1.StatefulSet, name=self._app_name + ) + return stateful_set.status.updateRevision + + def _determine_partition( + self, units: List[Unit], action_event: ActionEvent | None, force: bool + ) -> int: + """Determine the new partition to use. + + We get the current state of each unit, and according to `action_event`, + `force` and the state, we decide the new value of the partition. + A specific case: + * If we don't have action event and the upgrade_order_index is 1, we + return because it means we're waiting for the resume-refresh/force-refresh event to run. + """ + if not self.in_progress: + return 0 + logger.debug(f"{self._peer_relation.data=}") + for upgrade_order_index, unit in enumerate(units): + # Note: upgrade_order_index != unit number + state = self._peer_relation.data[unit].get("state") + if state: + state = UnitState(state) + if ( + not force and state is not UnitState.HEALTHY + ) or self._unit_workload_container_versions[ + unit.name + ] != self._app_workload_container_version: + if not action_event and upgrade_order_index == 1: + # User confirmation needed to resume upgrade (i.e. upgrade second unit) + return unit_number(units[0]) + return unit_number(unit) + return 0 + + def reconcile_partition( + self, *, action_event: ActionEvent | None = None + ) -> None: # noqa: C901 + """If ready, lower partition to upgrade next unit. + + If upgrade is not in progress, set partition to 0. (If a unit receives a stop event, it may + raise the partition even if an upgrade is not in progress.) + + Automatically upgrades next unit if all upgraded units are healthy—except if only one unit + has upgraded (need manual user confirmation [via Juju action] to upgrade next unit) + + Handle Juju action to: + - confirm first upgraded unit is healthy and resume upgrade + - force upgrade of next unit if 1 or more upgraded units are unhealthy + """ + force = bool(action_event and action_event.params["force"] is True) + + units = self._sorted_units + + partition_ = self._determine_partition( + units, + action_event, + force, + ) + logger.debug(f"{self._partition=}, {partition_=}") + # Only lower the partition—do not raise it. + # If this method is called during the action event and then called during another event a + # few seconds later, `determine_partition()` could return a lower number during the action + # and then a higher number a few seconds later. + # This can cause the unit to hang. + # Example: If partition is lowered to 1, unit 1 begins to upgrade, and partition is set to + # 2 right away, the unit/Juju agent will hang + # Details: https://chat.charmhub.io/charmhub/pl/on8rd538ufn4idgod139skkbfr + # This does not address the situation where another unit > 1 restarts and sets the + # partition during the `stop` event, but that is unlikely to occur in the small time window + # that causes the unit to hang. + if partition_ < self._partition: + self._partition = partition_ + logger.debug( + f"Lowered partition to {partition_} {action_event=} {force=} {self.in_progress=}" + ) + if action_event: + assert len(units) >= 2 + if self._partition > unit_number(units[1]): + message = "Highest number unit is unhealthy. Refresh will not resume." + logger.debug(f"Resume upgrade event failed: {message}") + action_event.fail(message) + return + if force: + # If a unit was unhealthy and the upgrade was forced, only the next unit will + # upgrade. As long as 1 or more units are unhealthy, the upgrade will need to be + # forced for each unit. + + # Include "Attempting to" because (on Kubernetes) we only control the partition, + # not which units upgrade. Kubernetes may not upgrade a unit even if the partition + # allows it (e.g. if the charm container of a higher unit is not ready). This is + # also applicable `if not force`, but is unlikely to happen since all units are + # healthy `if not force`. + message = f"Attempting to refresh unit {self._partition}." + else: + message = f"Refresh resumed. Unit {self._partition} is refreshing next." + action_event.set_results({"result": message}) + logger.debug(f"Resume refresh succeeded: {message}") + + +partition = _Partition() diff --git a/src/upgrades/mongodb_upgrades.py b/src/upgrades/mongodb_upgrades.py new file mode 100644 index 000000000..a1fbf22e1 --- /dev/null +++ b/src/upgrades/mongodb_upgrades.py @@ -0,0 +1,261 @@ +#!/usr/bin/env python3 +"""Kubernetes Upgrade Code. + +This code is slightly different from the code which was written originally. +It is required to deploy the application with `--trust` for this code to work +as it has to interact with the Kubernetes StatefulSet. +The main differences are: + * Add the handling of workload version + version sharing on the cluster in the + upgrade handler + relation created handler. + * Add the two post upgrade events that check the cluster health and run it if + we are in state `RESTARTING`. +""" +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +from logging import getLogger +from typing import TYPE_CHECKING + +from charms.mongodb.v0.upgrade_helpers import ( + PEER_RELATION_ENDPOINT_NAME, + PRECHECK_ACTION_NAME, + RESUME_ACTION_NAME, + ROLLBACK_INSTRUCTIONS, + GenericMongoDBUpgrade, + PeerRelationNotReady, + PrecheckFailed, + UnitState, +) +from charms.mongodb.v1.mongos import BalancerNotEnabledError, MongosConnection +from ops import ActiveStatus +from ops.charm import ActionEvent +from ops.framework import EventBase, EventSource +from ops.model import BlockedStatus +from overrides import override +from tenacity import RetryError + +from config import Config +from upgrades.kubernetes_upgrades import KubernetesUpgrade + +if TYPE_CHECKING: + from charm import MongoDBCharm + +logger = getLogger() + + +class _PostUpgradeCheckMongoDB(EventBase): + """Run post upgrade check on MongoDB to verify that the cluster is healhty.""" + + def __init__(self, handle): + super().__init__(handle) + + +class MongoDBUpgrade(GenericMongoDBUpgrade): + """Handlers for upgrade events.""" + + post_app_upgrade_event = EventSource(_PostUpgradeCheckMongoDB) + post_cluster_upgrade_event = EventSource(_PostUpgradeCheckMongoDB) + + def __init__(self, charm: "MongoDBCharm"): + self.charm = charm + super().__init__(charm, PEER_RELATION_ENDPOINT_NAME) + + @override + def _observe_events(self, charm: "MongoDBCharm") -> None: + self.framework.observe( + charm.on[PRECHECK_ACTION_NAME].action, self._on_pre_upgrade_check_action + ) + self.framework.observe( + charm.on[PEER_RELATION_ENDPOINT_NAME].relation_created, + self._on_upgrade_peer_relation_created, + ) + self.framework.observe( + charm.on[PEER_RELATION_ENDPOINT_NAME].relation_changed, self._reconcile_upgrade + ) + self.framework.observe(charm.on[RESUME_ACTION_NAME].action, self._on_resume_upgrade_action) + self.framework.observe(self.post_app_upgrade_event, self.run_post_app_upgrade_task) + self.framework.observe(self.post_cluster_upgrade_event, self.run_post_cluster_upgrade_task) + + def _reconcile_upgrade(self, _, during_upgrade: bool = False) -> None: + """Handle upgrade events.""" + if not self._upgrade: + logger.debug("Peer relation not available") + return + if not self._upgrade.versions_set: + logger.debug("Peer relation not ready") + return + if self.charm.unit.is_leader() and not self._upgrade.in_progress: + # Run before checking `self._upgrade.is_compatible` in case incompatible upgrade was + # forced & completed on all units. + self.charm.version_checker.set_version_across_all_relations() + self._upgrade.set_versions_in_app_databag() + + if self._upgrade.unit_state is UnitState.RESTARTING: # Kubernetes only + if not self._upgrade.is_compatible: + logger.info( + f"Refresh incompatible. If you accept potential *data loss* and *downtime*, you can continue with `{RESUME_ACTION_NAME} force=true`" + ) + self.charm.status.set_and_share_status(Config.Status.INCOMPATIBLE_UPGRADE) + return + if not during_upgrade and self.charm.db_initialised and self.charm.is_db_service_ready(): + self._upgrade.unit_state = UnitState.HEALTHY + self.charm.status.set_and_share_status(ActiveStatus()) + if self.charm.unit.is_leader(): + self._upgrade.reconcile_partition() + + self._set_upgrade_status() + + def _set_upgrade_status(self): + if self.charm.unit.is_leader(): + self.charm.app.status = self._upgrade.app_status or ActiveStatus() + # Set/clear upgrade unit status if no other unit status - upgrade status for units should + # have the lowest priority. + if ( + isinstance(self.charm.unit.status, ActiveStatus) + or ( + isinstance(self.charm.unit.status, BlockedStatus) + and self.charm.unit.status.message.startswith( + "Rollback with `juju refresh`. Pre-refresh check failed:" + ) + ) + or self.charm.unit.status == Config.Status.WAITING_POST_UPGRADE_STATUS + ): + self.charm.status.set_and_share_status( + self._upgrade.get_unit_juju_status() or ActiveStatus() + ) + + def _on_upgrade_peer_relation_created(self, _) -> None: + """First time the relation is created, we save the revisions.""" + if self.charm.unit.is_leader(): + self._upgrade.set_versions_in_app_databag() + + def _on_resume_upgrade_action(self, event: ActionEvent) -> None: + if not self.charm.unit.is_leader(): + message = f"Must run action on leader unit. (e.g. `juju run {self.charm.app.name}/leader {RESUME_ACTION_NAME}`)" + logger.debug(f"Resume refresh failed: {message}") + event.fail(message) + return + if not self._upgrade or not self._upgrade.in_progress: + message = "No upgrade in progress" + logger.debug(f"Resume refresh failed: {message}") + event.fail(message) + return + self._upgrade.reconcile_partition(action_event=event) + + def run_post_app_upgrade_task(self, event: EventBase): + """Runs the post upgrade check to verify that the cluster is healthy. + + By deferring before setting unit state to HEALTHY, the user will either: + 1. have to wait for the unit to resolve itself. + 2. have to run the force-upgrade action (to upgrade the next unit). + """ + logger.debug( + "Running post refresh checks to verify the deployment is not broken after refresh." + ) + self.run_post_upgrade_checks(event, finished_whole_cluster=False) + + if self._upgrade.unit_state != UnitState.HEALTHY: + logger.info( + f"Unit state is not healhy but {self._upgrade.unit_state}, not continuing post-refresh checks." + ) + return + + # Leader of config-server must wait for all shards to be upgraded before finalising the + # upgrade. + if not self.charm.unit.is_leader() or not self.charm.is_role(Config.Role.CONFIG_SERVER): + logger.debug("Post refresh check is completed.") + return + + self.charm.upgrade.post_cluster_upgrade_event.emit() + + def run_post_cluster_upgrade_task(self, event: EventBase) -> None: + """Waits for entire cluster to be upgraded before enabling the balancer.""" + # Leader of config-server must wait for all shards to be upgraded before finalising the + # upgrade. + if not self.charm.unit.is_leader() or not self.charm.is_role(Config.Role.CONFIG_SERVER): + return + + if not self.charm.is_cluster_on_same_revision(): + logger.debug("Waiting to finalise refresh, one or more shards need refresh.") + event.defer() + return + + logger.debug( + "Entire cluster has been refreshed, checking health of the cluster and enabling balancer." + ) + self.run_post_upgrade_checks(event, finished_whole_cluster=True) + + try: + with MongosConnection(self.charm.mongos_config) as mongos: + mongos.start_and_wait_for_balancer() + except BalancerNotEnabledError: + logger.debug( + "Need more time to enable the balancer after finishing the refresh. Deferring event." + ) + event.defer() + return + + self.set_mongos_feature_compatibilty_version(Config.Upgrade.FEATURE_VERSION_6) + + def _on_pre_upgrade_check_action(self, event: ActionEvent) -> None: + """Runs the pre-refresh checks to ensure that the deployment is ready for refresh.""" + if not self.charm.unit.is_leader(): + message = f"Must run action on leader unit. (e.g. `juju run {self.charm.app.name}/leader {PRECHECK_ACTION_NAME}`)" + logger.debug(f"Pre-refresh check failed: {message}") + event.fail(message) + return + if not self._upgrade or self._upgrade.in_progress: + message = "Upgrade already in progress" + logger.debug(f"Pre-refresh check failed: {message}") + event.fail(message) + return + try: + self._upgrade.pre_upgrade_check() + except PrecheckFailed as exception: + message = ( + f"Charm is *not* ready for refresh. Pre-refresh check failed: {exception.message}" + ) + logger.debug(f"Pre-refresh check failed: {message}") + event.fail(message) + return + message = "Charm is ready for upgrade" + event.set_results({"result": message}) + logger.debug(f"Pre-refresh check succeeded: {message}") + + @property + @override + def _upgrade(self) -> KubernetesUpgrade | None: + try: + return KubernetesUpgrade(self.charm) + except PeerRelationNotReady: + return None + + def run_post_upgrade_checks(self, event, finished_whole_cluster: bool) -> None: + """Runs post-upgrade checks for after a shard/config-server/replset/cluster upgrade.""" + upgrade_type = "unit" if not finished_whole_cluster else "sharded cluster" + try: + self.wait_for_cluster_healthy() + except RetryError: + logger.error( + "Cluster is not healthy after refreshing %s. Will retry next juju event.", + upgrade_type, + ) + logger.info(ROLLBACK_INSTRUCTIONS) + self.charm.status.set_and_share_status(Config.Status.UNHEALTHY_UPGRADE) + event.defer() + return + + if not self.is_cluster_able_to_read_write(): + logger.error( + "Cluster is not healthy after refreshing %s, writes not propagated throughout cluster. Deferring post refresh check.", + upgrade_type, + ) + logger.info(ROLLBACK_INSTRUCTIONS) + self.charm.status.set_and_share_status(Config.Status.UNHEALTHY_UPGRADE) + event.defer() + return + + if self.charm.unit.status == Config.Status.UNHEALTHY_UPGRADE: + self.charm.status.set_and_share_status(ActiveStatus()) + + self._upgrade.unit_state = UnitState.HEALTHY diff --git a/tests/integration/backup_tests/test_backups.py b/tests/integration/backup_tests/test_backups.py index fb0c56b44..f9c763dd2 100644 --- a/tests/integration/backup_tests/test_backups.py +++ b/tests/integration/backup_tests/test_backups.py @@ -103,7 +103,11 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: "mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"] } await ops_test.model.deploy( - my_charm, num_units=NUM_UNITS, resources=resources, series="jammy" + my_charm, + num_units=NUM_UNITS, + resources=resources, + series="jammy", + trust=True, ) await ops_test.model.wait_for_idle( apps=[DATABASE_APP_NAME], @@ -404,7 +408,11 @@ async def test_restore_new_cluster( db_charm = await ops_test.build_charm(".") resources = {"mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"]} await ops_test.model.deploy( - db_charm, num_units=3, resources=resources, application_name=new_cluster_app_name + db_charm, + num_units=3, + resources=resources, + application_name=new_cluster_app_name, + trust=True, ) await asyncio.gather( diff --git a/tests/integration/backup_tests/test_sharding_backups.py b/tests/integration/backup_tests/test_sharding_backups.py index 923413984..9dd60e6e0 100644 --- a/tests/integration/backup_tests/test_sharding_backups.py +++ b/tests/integration/backup_tests/test_sharding_backups.py @@ -311,6 +311,7 @@ async def deploy_cluster_backup_test( num_units=2, config={"role": "config-server"}, application_name=config_server_name, + trust=True, ) await ops_test.model.deploy( my_charm, @@ -318,6 +319,7 @@ async def deploy_cluster_backup_test( num_units=2, config={"role": "shard"}, application_name=shard_one_name, + trust=True, ) await ops_test.model.deploy( my_charm, @@ -325,6 +327,7 @@ async def deploy_cluster_backup_test( num_units=1, config={"role": "shard"}, application_name=shard_two_name, + trust=True, ) # deploy the s3 integrator charm diff --git a/tests/integration/ha_tests/helpers.py b/tests/integration/ha_tests/helpers.py index af2dfe187..83be99948 100644 --- a/tests/integration/ha_tests/helpers.py +++ b/tests/integration/ha_tests/helpers.py @@ -184,6 +184,7 @@ async def deploy_and_scale_mongodb( resources=resources, num_units=num_units, series="jammy", + trust=True, ) # TODO: remove raise_on_error when we move to juju 3.5 (DPE-4996) diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 9b4770112..723da4975 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -714,3 +714,9 @@ async def destroy_cluster(ops_test: OpsTest, applications: list[str]) -> None: # This case we don't raise an error in the context manager which fails to restore the # `update-status-hook-interval` value to it's former state. assert finished, "old cluster not destroyed successfully" + + +def get_juju_status(model_name: str, app_name: str) -> str: + return subprocess.check_output(f"juju status --model {model_name} {app_name}".split()).decode( + "utf-8" + ) diff --git a/tests/integration/metrics_tests/test_metrics.py b/tests/integration/metrics_tests/test_metrics.py index e183f1095..fc2d6a751 100644 --- a/tests/integration/metrics_tests/test_metrics.py +++ b/tests/integration/metrics_tests/test_metrics.py @@ -65,7 +65,11 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: my_charm = await ops_test.build_charm(".") resources = {"mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"]} await ops_test.model.deploy( - my_charm, num_units=NUM_UNITS, resources=resources, series="jammy" + my_charm, + num_units=NUM_UNITS, + resources=resources, + series="jammy", + trust=True, ) # TODO: remove raise_on_error when we move to juju 3.5 (DPE-4996) await ops_test.model.wait_for_idle( diff --git a/tests/integration/relation_tests/test_charm_relations.py b/tests/integration/relation_tests/test_charm_relations.py index 60825deac..652caf735 100644 --- a/tests/integration/relation_tests/test_charm_relations.py +++ b/tests/integration/relation_tests/test_charm_relations.py @@ -69,6 +69,7 @@ async def test_deploy_charms(ops_test: OpsTest): application_name=DATABASE_APP_NAME, resources=db_resources, num_units=REQUIRED_UNITS, + trust=True, ) ) @@ -83,6 +84,7 @@ async def test_deploy_charms(ops_test: OpsTest): application_name=ANOTHER_DATABASE_APP_NAME, resources=db_resources, num_units=REQUIRED_UNITS, + trust=True, ), ) diff --git a/tests/integration/sharding_tests/helpers.py b/tests/integration/sharding_tests/helpers.py index 9c7a98e79..5948d7268 100644 --- a/tests/integration/sharding_tests/helpers.py +++ b/tests/integration/sharding_tests/helpers.py @@ -65,6 +65,8 @@ async def deploy_cluster_components( config={"role": "config-server"}, application_name=CONFIG_SERVER_APP_NAME, channel=channel, + series="jammy", + trust=True, ) await ops_test.model.deploy( my_charm, @@ -73,6 +75,8 @@ async def deploy_cluster_components( config={"role": "shard"}, application_name=SHARD_ONE_APP_NAME, channel=channel, + series="jammy", + trust=True, ) await ops_test.model.deploy( my_charm, @@ -81,6 +85,8 @@ async def deploy_cluster_components( config={"role": "shard"}, application_name=SHARD_TWO_APP_NAME, channel=channel, + series="jammy", + trust=True, ) await ops_test.model.wait_for_idle( diff --git a/tests/integration/sharding_tests/test_mongos.py b/tests/integration/sharding_tests/test_mongos.py index 0492dd447..79582b802 100644 --- a/tests/integration/sharding_tests/test_mongos.py +++ b/tests/integration/sharding_tests/test_mongos.py @@ -32,6 +32,7 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: num_units=1, config={"role": "config-server"}, application_name=CONFIG_SERVER_APP_NAME, + trust=True, ) await ops_test.model.deploy( mongodb_charm, @@ -39,6 +40,7 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: num_units=1, config={"role": "shard"}, application_name=SHARD_ONE_APP_NAME, + trust=True, ) await ops_test.model.deploy( diff --git a/tests/integration/sharding_tests/test_sharding.py b/tests/integration/sharding_tests/test_sharding.py index 1e52114f0..9fa7c4584 100644 --- a/tests/integration/sharding_tests/test_sharding.py +++ b/tests/integration/sharding_tests/test_sharding.py @@ -53,6 +53,7 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: num_units=2, config={"role": "config-server"}, application_name=CONFIG_SERVER_APP_NAME, + trust=True, ) await ops_test.model.deploy( my_charm, @@ -60,6 +61,7 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: num_units=2, config={"role": "shard"}, application_name=SHARD_ONE_APP_NAME, + trust=True, ) await ops_test.model.deploy( my_charm, @@ -67,6 +69,7 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: num_units=2, config={"role": "shard"}, application_name=SHARD_TWO_APP_NAME, + trust=True, ) await ops_test.model.deploy( my_charm, @@ -74,6 +77,7 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: num_units=2, config={"role": "shard"}, application_name=SHARD_THREE_APP_NAME, + trust=True, ) # TODO: remove raise_on_error when we move to juju 3.5 (DPE-4996) diff --git a/tests/integration/sharding_tests/test_sharding_relations.py b/tests/integration/sharding_tests/test_sharding_relations.py index 1abdb0b56..3bd65e93d 100644 --- a/tests/integration/sharding_tests/test_sharding_relations.py +++ b/tests/integration/sharding_tests/test_sharding_relations.py @@ -40,6 +40,7 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: database_charm, application_name=REPLICATION_APP_NAME, resources=resources, + trust=True, ) await ops_test.model.deploy( @@ -47,18 +48,21 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: config={"role": "config-server"}, resources=resources, application_name=CONFIG_SERVER_ONE_APP_NAME, + trust=True, ) await ops_test.model.deploy( database_charm, config={"role": "config-server"}, resources=resources, application_name=CONFIG_SERVER_TWO_APP_NAME, + trust=True, ) await ops_test.model.deploy( database_charm, resources=resources, config={"role": "shard"}, application_name=SHARD_ONE_APP_NAME, + trust=True, ) # Will be enabled after DPE-5040 is done diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py index 4321c958e..a13562b9a 100644 --- a/tests/integration/test_charm.py +++ b/tests/integration/test_charm.py @@ -66,6 +66,7 @@ async def test_build_and_deploy(ops_test: OpsTest): application_name=app_name, num_units=len(UNIT_IDS), series="jammy", + trust=True, ) # issuing dummy update_status just to trigger an event diff --git a/tests/integration/test_teardown.py b/tests/integration/test_teardown.py index 589f7ad7f..a1047e713 100644 --- a/tests/integration/test_teardown.py +++ b/tests/integration/test_teardown.py @@ -37,6 +37,7 @@ async def test_build_and_deploy(ops_test: OpsTest): application_name=app_name, num_units=1, series=SERIES, + trust=True, ) # issuing dummy update_status just to trigger an event diff --git a/tests/integration/tls_tests/test_tls.py b/tests/integration/tls_tests/test_tls.py index 4fad4fcb4..6bfbe159f 100644 --- a/tests/integration/tls_tests/test_tls.py +++ b/tests/integration/tls_tests/test_tls.py @@ -40,7 +40,9 @@ async def test_build_and_deploy(ops_test: OpsTest) -> None: resources = { "mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"] } - await ops_test.model.deploy(my_charm, num_units=3, resources=resources, series="jammy") + await ops_test.model.deploy( + my_charm, num_units=3, resources=resources, series="jammy", trust=True + ) # TODO: remove raise_on_error when we move to juju 3.5 (DPE-4996) await ops_test.model.wait_for_idle( apps=[app_name], status="active", timeout=2000, raise_on_error=False diff --git a/tests/integration/upgrades/helpers.py b/tests/integration/upgrades/helpers.py index 9a646f09e..47f4308be 100644 --- a/tests/integration/upgrades/helpers.py +++ b/tests/integration/upgrades/helpers.py @@ -17,14 +17,14 @@ async def assert_successful_run_upgrade_sequence( ) -> None: """Runs the upgrade sequence on a given app.""" leader_unit = await backup_helpers.get_leader_unit(ops_test, app_name) - # action = await leader_unit.run_action("pre-upgrade-check") - # await action.wait() - # assert action.status == "completed", "pre-upgrade-check failed, expected to succeed." + action = await leader_unit.run_action("pre-refresh-check") + await action.wait() + assert action.status == "completed", "pre-refresh-check failed, expected to succeed." + + logger.info(f"Upgrading {app_name}") await ops_test.model.applications[app_name].refresh(path=new_charm) - await ops_test.model.wait_for_idle( - apps=[app_name], status="active", timeout=1000, idle_period=30 - ) + await ops_test.model.wait_for_idle(apps=[app_name], timeout=1000, idle_period=30) # resume upgrade only needs to be ran when: # 1. there are more than one units in the application @@ -32,14 +32,26 @@ async def assert_successful_run_upgrade_sequence( if len(ops_test.model.applications[app_name].units) < 2: return - if "resume-upgrade" not in ops_test.model.applications[app_name].status_message: + if "resume-refresh" not in ops_test.model.applications[app_name].status_message: return - logger.info(f"Calling resume-upgrade for {app_name}") - action = await leader_unit.run_action("resume-upgrade") + logger.info(f"Calling resume-refresh for {app_name}") + action = await leader_unit.run_action("resume-refresh") await action.wait() - assert action.status == "completed", "resume-upgrade failed, expected to succeed." + assert action.status == "completed", "resume-refresh failed, expected to succeed." - await ops_test.model.wait_for_idle( - apps=[app_name], status="active", timeout=1000, idle_period=30 + await ops_test.model.wait_for_idle(apps=[app_name], timeout=1000, idle_period=30) + + +async def get_workload_version(ops_test: OpsTest, unit_name: str) -> str: + """Get the workload version of the deployed router charm.""" + return_code, output, _ = await ops_test.juju( + "ssh", + unit_name, + "sudo", + "cat", + f"/var/lib/juju/agents/unit-{unit_name.replace('/', '-')}/charm/workload_version", ) + + assert return_code == 0 + return output.strip() diff --git a/tests/integration/upgrades/test_local_sharding_upgrades.py b/tests/integration/upgrades/test_local_sharding_upgrades.py new file mode 100644 index 000000000..65d2bdeda --- /dev/null +++ b/tests/integration/upgrades/test_local_sharding_upgrades.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +import shutil +import zipfile +from collections.abc import AsyncGenerator +from logging import getLogger +from pathlib import Path + +import pytest +import pytest_asyncio +from pytest_operator.plugin import OpsTest + +from ..backup_tests.helpers import get_leader_unit +from ..ha_tests.helpers import deploy_and_scale_application, get_direct_mongo_client +from ..helpers import MONGOS_PORT, mongodb_uri +from ..sharding_tests import writes_helpers +from ..sharding_tests.helpers import deploy_cluster_components, integrate_cluster +from .helpers import assert_successful_run_upgrade_sequence, get_workload_version + +SHARD_ONE_DB_NAME = "shard_one_db" +SHARD_TWO_DB_NAME = "shard_two_db" +SHARD_ONE_COLL_NAME = "test_collection" +SHARD_TWO_COLL_NAME = "test_collection" +SHARD_ONE_APP_NAME = "shard-one" +SHARD_TWO_APP_NAME = "shard-two" +CONFIG_SERVER_APP_NAME = "config-server" +CLUSTER_COMPONENTS = [SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME, CONFIG_SERVER_APP_NAME] +SHARD_APPS = [SHARD_ONE_APP_NAME, SHARD_TWO_APP_NAME] +WRITE_APP = "application" + +TIMEOUT = 15 * 60 + +logger = getLogger() + + +@pytest.fixture() +async def add_writes_to_shards(ops_test: OpsTest): + """Adds writes to each shard before test starts and clears writes at the end of the test.""" + application_unit = ops_test.model.applications[WRITE_APP].units[0] + + start_writes_action = await application_unit.run_action( + "start-continuous-writes", + **{"db-name": SHARD_ONE_DB_NAME, "coll-name": SHARD_ONE_COLL_NAME}, + ) + await start_writes_action.wait() + + start_writes_action = await application_unit.run_action( + "start-continuous-writes", + **{"db-name": SHARD_TWO_DB_NAME, "coll-name": SHARD_TWO_COLL_NAME}, + ) + await start_writes_action.wait() + + # move continuous writes so they are present on each shard + mongos_client = await get_direct_mongo_client( + ops_test, app_name=CONFIG_SERVER_APP_NAME, mongos=True + ) + mongos_client.admin.command("movePrimary", SHARD_ONE_DB_NAME, to=SHARD_ONE_APP_NAME) + mongos_client.admin.command("movePrimary", SHARD_TWO_DB_NAME, to=SHARD_TWO_APP_NAME) + + yield + clear_writes_action = await application_unit.run_action( + "clear-continuous-writes", + **{"db-name": SHARD_ONE_DB_NAME, "coll-name": SHARD_ONE_COLL_NAME}, + ) + await clear_writes_action.wait() + + clear_writes_action = await application_unit.run_action( + "clear-continuous-writes", + **{"db-name": SHARD_TWO_DB_NAME, "coll-name": SHARD_TWO_APP_NAME}, + ) + await clear_writes_action.wait() + + +@pytest_asyncio.fixture(scope="module") +async def local_charm(ops_test: OpsTest) -> AsyncGenerator[Path]: + """Builds the regular charm.""" + charm = await ops_test.build_charm(".") + yield charm + + +@pytest_asyncio.fixture +def righty_upgrade_charm(local_charm, tmp_path: Path): + right_charm = tmp_path / "right.charm" + shutil.copy(local_charm, right_charm) + workload_version = Path("workload_version").read_text().strip() + charm_internal_version = Path("charm_internal_version").read_text().strip() + + [major, minor, patch] = workload_version.split(".") + + with zipfile.ZipFile(right_charm, mode="a") as charm_zip: + charm_zip.writestr("workload_version", f"{major}.{int(minor)+1}.{patch}+testupgrade") + charm_zip.writestr("charm_internal_version", f"{charm_internal_version}-upgraded") + + yield right_charm + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest) -> None: + """Build deploy, and integrate, a sharded cluster.""" + await deploy_and_scale_application(ops_test) + num_units_cluster_config = { + CONFIG_SERVER_APP_NAME: 3, + SHARD_ONE_APP_NAME: 3, + SHARD_TWO_APP_NAME: 3, + } + await deploy_cluster_components( + ops_test, + num_units_cluster_config=num_units_cluster_config, + ) + + await ops_test.model.wait_for_idle( + apps=CLUSTER_COMPONENTS, + idle_period=20, + raise_on_blocked=False, + raise_on_error=False, + ) + + await integrate_cluster(ops_test) + await ops_test.model.wait_for_idle( + apps=CLUSTER_COMPONENTS, + idle_period=20, + timeout=TIMEOUT, + ) + # configure write app to use mongos uri + mongos_uri = await mongodb_uri(ops_test, app_name=CONFIG_SERVER_APP_NAME, port=MONGOS_PORT) + await ops_test.model.applications[WRITE_APP].set_config({"mongos-uri": mongos_uri}) + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_pre_upgrade_check_success(ops_test: OpsTest) -> None: + """Verify that the pre-refresh check succeeds in the happy path.""" + for sharding_component in CLUSTER_COMPONENTS: + leader_unit = await get_leader_unit(ops_test, sharding_component) + action = await leader_unit.run_action("pre-refresh-check") + await action.wait() + assert action.status == "completed", "pre-refresh-check failed, expected to succeed." + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_upgrade_cluster(ops_test: OpsTest, righty_upgrade_charm, add_writes_to_shards): + initial_version = Path("workload_version").read_text().strip() + [major, minor, patch] = initial_version.split(".") + new_version = f"{major}.{int(minor)+1}.{patch}+testupgrade" + + for sharding_component in CLUSTER_COMPONENTS: + await assert_successful_run_upgrade_sequence( + ops_test, sharding_component, new_charm=righty_upgrade_charm + ) + + await ops_test.model.wait_for_idle( + apps=CLUSTER_COMPONENTS, status="active", idle_period=30, timeout=TIMEOUT + ) + + application_unit = ops_test.model.applications[WRITE_APP].units[0] + stop_writes_action = await application_unit.run_action( + "stop-continuous-writes", + **{"db-name": SHARD_ONE_DB_NAME, "coll-name": SHARD_ONE_COLL_NAME}, + ) + await stop_writes_action.wait() + shard_one_expected_writes = int(stop_writes_action.results["writes"]) + stop_writes_action = await application_unit.run_action( + "stop-continuous-writes", + **{"db-name": SHARD_TWO_DB_NAME, "coll-name": SHARD_TWO_COLL_NAME}, + ) + await stop_writes_action.wait() + shard_two_total_expected_writes = int(stop_writes_action.results["writes"]) + + actual_shard_one_writes = await writes_helpers.count_shard_writes( + ops_test, + config_server_name=CONFIG_SERVER_APP_NAME, + db_name=SHARD_ONE_DB_NAME, + ) + actual_shard_two_writes = await writes_helpers.count_shard_writes( + ops_test, + config_server_name=CONFIG_SERVER_APP_NAME, + db_name=SHARD_TWO_DB_NAME, + ) + + assert ( + actual_shard_one_writes == shard_one_expected_writes + ), "missed writes during upgrade procedure." + assert ( + actual_shard_two_writes == shard_two_total_expected_writes + ), "missed writes during upgrade procedure." + logger.error(f"{actual_shard_one_writes = }, {actual_shard_two_writes = }") + + for sharding_component in CLUSTER_COMPONENTS: + for unit in ops_test.model.applications[sharding_component].units: + workload_version = await get_workload_version(ops_test, unit.name) + assert workload_version == new_version + assert initial_version != workload_version diff --git a/tests/integration/upgrades/test_local_upgrades.py b/tests/integration/upgrades/test_local_upgrades.py new file mode 100644 index 000000000..2634e1db5 --- /dev/null +++ b/tests/integration/upgrades/test_local_upgrades.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +import logging +import shutil +import zipfile +from collections.abc import AsyncGenerator +from pathlib import Path + +import pytest +import pytest_asyncio +import tenacity +from pytest_operator.plugin import OpsTest + +from ..helpers import APP_NAME, METADATA, get_juju_status, get_leader_id +from .helpers import get_workload_version + +logger = logging.getLogger(__name__) + +UPGRADE_TIMEOUT = 15 * 60 +SMALL_TIMEOUT = 5 * 60 + + +@pytest_asyncio.fixture(scope="module") +async def local_charm(ops_test: OpsTest) -> AsyncGenerator[Path]: + """Builds the regular charm.""" + charm = await ops_test.build_charm(".") + yield charm + + +@pytest_asyncio.fixture +def righty_upgrade_charm(local_charm, tmp_path: Path): + right_charm = tmp_path / "right.charm" + shutil.copy(local_charm, right_charm) + workload_version = Path("workload_version").read_text().strip() + + [major, minor, patch] = workload_version.split(".") + + with zipfile.ZipFile(right_charm, mode="a") as charm_zip: + charm_zip.writestr("workload_version", f"{major}.{int(minor)+1}.{patch}+testupgrade") + + yield right_charm + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest, local_charm: Path): + """Build and deploy a sharded cluster.""" + resources = {"mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"]} + await ops_test.model.deploy( + local_charm, + resources=resources, + application_name=APP_NAME, + num_units=3, + series="jammy", + trust=True, + ) + await ops_test.model.wait_for_idle( + apps=[APP_NAME], status="active", raise_on_blocked=True, timeout=1000, raise_on_error=False + ) + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_upgrade(ops_test: OpsTest, righty_upgrade_charm: Path) -> None: + mongodb_application = ops_test.model.applications[APP_NAME] + await mongodb_application.refresh(path=righty_upgrade_charm) + + initial_version = Path("workload_version").read_text().strip() + [major, minor, patch] = initial_version.split(".") + new_version = f"{major}.{int(minor)+1}.{patch}+testupgrade" + + logger.info("Wait for refresh to stall") + await ops_test.model.block_until( + lambda: mongodb_application.status == "blocked", timeout=UPGRADE_TIMEOUT + ) + assert ( + "resume-refresh" in mongodb_application.status_message + ), "MongoDB application status not indicating that user should resume refresh." + + for attempt in tenacity.Retrying( + reraise=True, + stop=tenacity.stop_after_delay(SMALL_TIMEOUT), + wait=tenacity.wait_fixed(10), + ): + with attempt: + assert "+testupgrade" in get_juju_status( + ops_test.model.name, APP_NAME + ), "None of the units are upgraded" + + logger.info("Running resume-refresh on the leader unit") + leader_id = await get_leader_id(ops_test, APP_NAME) + leader_unit = ops_test.model.units.get(f"{APP_NAME}/{leader_id}") + action = await leader_unit.run_action("resume-refresh") + await action.wait() + + await ops_test.model.wait_for_idle( + [APP_NAME], + status="active", + idle_period=30, + timeout=UPGRADE_TIMEOUT, + ) + + for unit in mongodb_application.units: + workload_version = await get_workload_version(ops_test, unit.name) + assert workload_version == new_version + assert initial_version != workload_version diff --git a/tests/integration/upgrades/test_rollback.py b/tests/integration/upgrades/test_rollback.py new file mode 100644 index 000000000..5d12d0c94 --- /dev/null +++ b/tests/integration/upgrades/test_rollback.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +import logging +import shutil +import time +import zipfile +from collections.abc import AsyncGenerator +from pathlib import Path + +import pytest +import pytest_asyncio +import tenacity +from pytest_operator.plugin import OpsTest + +from ..helpers import APP_NAME, METADATA, get_juju_status, get_leader_id +from .helpers import get_workload_version + +logger = logging.getLogger(__name__) + +UPGRADE_TIMEOUT = 15 * 60 + + +@pytest_asyncio.fixture +async def local_charm(ops_test: OpsTest) -> AsyncGenerator[Path]: + """Builds the regular charm.""" + charm = await ops_test.build_charm(".") + yield charm + + +@pytest_asyncio.fixture +def faulty_upgrade_charm(local_charm, tmp_path: Path): + fault_charm = tmp_path / "fault_charm.charm" + shutil.copy(local_charm, fault_charm) + workload_version = Path("workload_version").read_text().strip() + + [major, minor, patch] = workload_version.split(".") + + with zipfile.ZipFile(fault_charm, mode="a") as charm_zip: + charm_zip.writestr("workload_version", f"{int(major) -1}.{minor}.{patch}+testrollback") + + yield fault_charm + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest, local_charm: Path): + """Build and deploy a sharded cluster.""" + resources = {"mongodb-image": METADATA["resources"]["mongodb-image"]["upstream-source"]} + await ops_test.model.deploy( + local_charm, + resources=resources, + application_name=APP_NAME, + num_units=3, + series="jammy", + trust=True, + ) + await ops_test.model.wait_for_idle( + apps=[APP_NAME], status="active", raise_on_blocked=True, timeout=1000, raise_on_error=False + ) + + +@pytest.mark.group(1) +@pytest.mark.abort_on_fail +async def test_rollback(ops_test: OpsTest, local_charm, faulty_upgrade_charm) -> None: + mongodb_application = ops_test.model.applications[APP_NAME] + + initial_version = Path("workload_version").read_text().strip() + + await mongodb_application.refresh(path=faulty_upgrade_charm) + logger.info("Wait for refresh to fail") + + for attempt in tenacity.Retrying( + reraise=True, + stop=tenacity.stop_after_delay(UPGRADE_TIMEOUT), + wait=tenacity.wait_fixed(10), + ): + with attempt: + assert "Refresh incompatible" in get_juju_status( + ops_test.model.name, APP_NAME + ), "Not indicating charm incompatible" + + logger.info("Re-refresh the charm") + await mongodb_application.refresh(path=local_charm) + # sleep to ensure that active status from before re-refresh does not affect below check + time.sleep(15) + await ops_test.model.block_until( + lambda: all(unit.workload_status == "active" for unit in mongodb_application.units) + and all(unit.agent_status == "idle" for unit in mongodb_application.units) + ) + + logger.info("Running resume-refresh on the leader unit") + leader_id = await get_leader_id(ops_test, APP_NAME) + leader_unit = ops_test.model.units.get(f"{APP_NAME}/{leader_id}") + action = await leader_unit.run_action("resume-refresh") + await action.wait() + + logger.info("Wait for the charm to be rolled back") + await ops_test.model.wait_for_idle( + apps=[APP_NAME], + status="active", + timeout=1000, + idle_period=30, + ) + + for unit in mongodb_application.units: + workload_version = await get_workload_version(ops_test, unit.name) + assert workload_version == initial_version diff --git a/tests/integration/upgrades/test_sharding_upgrades.py b/tests/integration/upgrades/test_sharding_upgrades.py index e6cd986e8..4c43b3d1f 100644 --- a/tests/integration/upgrades/test_sharding_upgrades.py +++ b/tests/integration/upgrades/test_sharding_upgrades.py @@ -143,19 +143,19 @@ async def test_upgrade(ops_test: OpsTest, add_writes_to_shards) -> None: @pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_pre_upgrade_check_success(ops_test: OpsTest) -> None: - """Verify that the pre-upgrade check succeeds in the happy path.""" + """Verify that the pre-refresh check succeeds in the happy path.""" for sharding_component in CLUSTER_COMPONENTS: leader_unit = await backup_helpers.get_leader_unit(ops_test, sharding_component) - action = await leader_unit.run_action("pre-upgrade-check") + action = await leader_unit.run_action("pre-refresh-check") await action.wait() - assert action.status == "completed", "pre-upgrade-check failed, expected to succeed." + assert action.status == "completed", "pre-refresh-check failed, expected to succeed." @pytest.mark.skip() @pytest.mark.group(1) @pytest.mark.abort_on_fail async def test_pre_upgrade_check_failure(ops_test: OpsTest, chaos_mesh) -> None: - """Verify that the pre-upgrade check fails if there is a problem with one of the shards.""" + """Verify that the pre-refresh check fails if there is a problem with one of the shards.""" leader_unit = await backup_helpers.get_leader_unit(ops_test, SHARD_TWO_APP_NAME) non_leader_unit = None @@ -171,9 +171,9 @@ async def test_pre_upgrade_check_failure(ops_test: OpsTest, chaos_mesh) -> None: for sharding_component in CLUSTER_COMPONENTS: leader_unit = await backup_helpers.get_leader_unit(ops_test, sharding_component) - action = await leader_unit.run_action("pre-upgrade-check") + action = await leader_unit.run_action("pre-refresh-check") await action.wait() - assert action.status == "completed", "pre-upgrade-check failed, expected to succeed." + assert action.status == "completed", "pre-refresh-check failed, expected to succeed." # restore network after test remove_instance_isolation(ops_test) diff --git a/tests/integration/upgrades/test_upgrades.py b/tests/integration/upgrades/test_upgrades.py index 8a6a7d591..752e2f48a 100644 --- a/tests/integration/upgrades/test_upgrades.py +++ b/tests/integration/upgrades/test_upgrades.py @@ -76,10 +76,10 @@ async def test_preflight_check(ops_test: OpsTest) -> None: db_app_name = await get_app_name(ops_test) leader_unit = await backup_helpers.get_leader_unit(ops_test, db_app_name) - logger.info("Calling pre-upgrade-check") - action = await leader_unit.run_action("pre-upgrade-check") + logger.info("Calling pre-refresh-check") + action = await leader_unit.run_action("pre-refresh-check") await action.wait() - assert action.status == "completed", "pre-upgrade-check failed, expected to succeed." + assert action.status == "completed", "pre-refresh-check failed, expected to succeed." @pytest.mark.skip("skip until upgrades work has been released to charmhub") @@ -100,10 +100,10 @@ async def test_preflight_check_failure(ops_test: OpsTest, chaos_mesh) -> None: ops_test, non_leader_unit, leader_unit, "(not reachable/healthy)" ) - logger.info("Calling pre-upgrade-check") - action = await leader_unit.run_action("pre-upgrade-check") + logger.info("Calling pre-refresh-check") + action = await leader_unit.run_action("pre-refresh-check") await action.wait() - assert action.status == "completed", "pre-upgrade-check failed, expected to succeed." + assert action.status == "completed", "pre-refresh-check failed, expected to succeed." # restore network after test remove_instance_isolation(ops_test) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py index fbdaeeff0..e9dc6afff 100644 --- a/tests/unit/test_charm.py +++ b/tests/unit/test_charm.py @@ -35,6 +35,13 @@ logger = logging.getLogger(__name__) +@pytest.fixture(autouse=True) +def patch_upgrades(monkeypatch): + monkeypatch.setattr("charms.mongodb.v0.upgrade_helpers.AbstractUpgrade.in_progress", False) + monkeypatch.setattr("charm.kubernetes_upgrades._Partition.get", lambda *args, **kwargs: 0) + monkeypatch.setattr("charm.kubernetes_upgrades._Partition.set", lambda *args, **kwargs: None) + + class TestCharm(unittest.TestCase): @patch("charm.get_charm_revision") @patch_network_get(private_address="1.1.1.1") @@ -47,6 +54,7 @@ def setUp(self, *unused): self.harness.add_oci_resource("mongodb-image", mongo_resource) self.harness.begin() self.harness.add_relation("database-peers", "mongodb-peers") + self.harness.add_relation("upgrade-version-a", "upgrade-version-a") self.harness.set_leader(True) self.charm = self.harness.charm self.addCleanup(self.harness.cleanup) @@ -75,7 +83,7 @@ def test_mongod_pebble_ready(self, connect_exporter, fix_data_dir, defer, pull_l "command": "sh -c 'logrotate /etc/logrotate.d/mongodb; sleep 1'", "user": "mongodb", "group": "mongodb", - "backoff-delay": "1m", + "backoff-delay": "1m0s", "backoff-factor": 1, }, "mongod": { @@ -122,7 +130,9 @@ def test_mongod_pebble_ready(self, connect_exporter, fix_data_dir, defer, pull_l @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBCharm._push_keyfile_to_workload") - def test_pebble_ready_cannot_retrieve_container(self, push_keyfile_to_workload, defer): + def test_pebble_ready_cannot_retrieve_container( + self, push_keyfile_to_workload, defer, *unused + ): """Test verifies behavior when retrieving container results in ModelError in pebble ready. Verifies that when a failure to get a container occurs, that that failure is raised and @@ -144,7 +154,7 @@ def test_pebble_ready_cannot_retrieve_container(self, push_keyfile_to_workload, @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBCharm._push_keyfile_to_workload") - def test_pebble_ready_container_cannot_connect(self, push_keyfile_to_workload, defer): + def test_pebble_ready_container_cannot_connect(self, push_keyfile_to_workload, defer, *unused): """Test verifies behavior when cannot connect to container in pebble ready function. Verifies that when a failure to connect to container results in a deferral and that no @@ -166,7 +176,9 @@ def test_pebble_ready_container_cannot_connect(self, push_keyfile_to_workload, d @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBCharm._push_keyfile_to_workload") - def test_pebble_ready_push_keyfile_to_workload_failure(self, push_keyfile_to_workload, defer): + def test_pebble_ready_push_keyfile_to_workload_failure( + self, push_keyfile_to_workload, defer, *unused + ): """Test verifies behavior when setting keyfile fails. Verifies that when a failure to set keyfile occurs that there is no attempt to add layers @@ -207,7 +219,9 @@ def test_pebble_ready_no_storage_yet(self, defer): @patch("charm.MongoDBProvider") @patch("charm.MongoDBCharm._init_operator_user") @patch("charm.MongoDBConnection") - def test_start_cannot_retrieve_container(self, connection, init_user, provider, defer): + def test_start_cannot_retrieve_container( + self, connection, init_user, provider, defer, *unused + ): """Verifies that failures to get container result in a ModelError being raised. Further this function verifies that on error no attempts to set up the replica set or @@ -234,7 +248,7 @@ def test_start_cannot_retrieve_container(self, connection, init_user, provider, @patch("charm.MongoDBProvider") @patch("charm.MongoDBCharm._init_operator_user") @patch("charm.MongoDBConnection") - def test_start_container_cannot_connect(self, connection, init_user, provider, defer): + def test_start_container_cannot_connect(self, connection, init_user, provider, defer, *unused): """Tests inability to connect results in deferral. Verifies that if connection is not possible, that there are no attempts to set up the @@ -261,7 +275,7 @@ def test_start_container_cannot_connect(self, connection, init_user, provider, d @patch("charm.MongoDBProvider") @patch("charm.MongoDBCharm._init_operator_user") @patch("charm.MongoDBConnection") - def test_start_container_does_not_exist(self, connection, init_user, provider, defer): + def test_start_container_does_not_exist(self, connection, init_user, provider, defer, *unused): """Tests lack of existence of files on container results in deferral. Verifies that if files do not exists, that there are no attempts to set up the replica set @@ -285,11 +299,12 @@ def test_start_container_does_not_exist(self, connection, init_user, provider, d self.assertEqual("db_initialised" in self.harness.charm.app_peer_data, False) defer.assert_called() + @patch("charm.MongoDBCharm._configure_container", return_value=None) @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBProvider") @patch("charm.MongoDBCharm._init_operator_user") @patch("charm.MongoDBConnection") - def test_start_container_exists_fails(self, connection, init_user, provider, defer): + def test_start_container_exists_fails(self, connection, init_user, provider, defer, *unused): """Tests failure in checking file existence on container raises an APIError. Verifies that when checking container files raises an API Error, we raise that same error @@ -314,11 +329,12 @@ def test_start_container_exists_fails(self, connection, init_user, provider, def self.assertEqual("db_initialised" in self.harness.charm.app_peer_data, False) defer.assert_not_called() + @patch("charm.MongoDBCharm._configure_container", return_value=None) @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBProvider") @patch("charm.MongoDBCharm._init_operator_user") @patch("charm.MongoDBConnection") - def test_start_already_initialised(self, connection, init_user, provider, defer): + def test_start_already_initialised(self, connection, init_user, provider, defer, *unused): """Tests that if the replica set has already been set up that we return. Verifies that if the replica set is already set up that no attempts to set it up again are @@ -347,7 +363,7 @@ def test_start_already_initialised(self, connection, init_user, provider, defer) @patch("charm.MongoDBProvider") @patch("charm.MongoDBCharm._init_operator_user") @patch("charm.MongoDBConnection") - def test_start_mongod_not_ready(self, connection, init_user, provider, defer): + def test_start_mongod_not_ready(self, connection, init_user, provider, defer, *unused): """Tests that if mongod is not ready that we defer and return. Verifies that if mongod is not ready that no attempts to set up the replica set and set up @@ -374,13 +390,11 @@ def test_start_mongod_not_ready(self, connection, init_user, provider, defer): self.assertEqual("db_initialised" in self.harness.charm.app_peer_data, False) defer.assert_called() - @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBProvider") @patch("charm.MongoDBCharm._initialise_users") + @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBConnection") - def test_start_mongod_error_initalising_replica_set( - self, connection, init_users, provider, defer - ): + def test_start_mongod_error_initialising_replica_set(self, connection, defer, *unused): """Tests that failure to initialise replica set is properly handled. Verifies that when there is a failure to initialise replica set the defer is called and @@ -408,7 +422,7 @@ def test_start_mongod_error_initalising_replica_set( @patch("charm.MongoDBCharm._init_operator_user") @patch("charm.MongoDBConnection") @patch("tenacity.nap.time.sleep", MagicMock()) - def test_error_initalising_users(self, connection, init_user, provider, defer): + def test_error_initialising_users(self, connection, init_user, provider, defer, *unused): """Tests that failure to initialise users set is properly handled. Verifies that when there is a failure to initialise users that overseeing users is not @@ -433,9 +447,9 @@ def test_error_initalising_users(self, connection, init_user, provider, defer): # verify app data self.assertEqual("db_initialised" in self.harness.charm.app_peer_data, False) + @patch("charm.MongoDBCharm._init_operator_user") @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBProvider") - @patch("charm.MongoDBCharm._init_operator_user") @patch("charm.MongoDBConnection") @patch("tenacity.nap.time.sleep", MagicMock()) @patch("charm.USER_CREATING_MAX_ATTEMPTS", 1) @@ -444,7 +458,7 @@ def test_error_initalising_users(self, connection, init_user, provider, defer): @patch("charm.wait_fixed") @patch("charm.stop_after_attempt") def test_start_mongod_error_overseeing_users( - self, retry_stop, retry_wait, connection, init_user, provider, defer + self, retry_stop, retry_wait, connection, provider, defer, *unused ): """Tests failures related to pymongo are properly handled when overseeing users. @@ -475,7 +489,7 @@ def test_start_mongod_error_overseeing_users( @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBConnection") - def test_reconfigure_not_already_initialised(self, connection, defer): + def test_reconfigure_not_already_initialised(self, connection, defer, *unused): """Tests reconfigure does not execute when database has not been initialised. Verifies in case of relation_joined and relation departed, that when the the database has @@ -513,10 +527,10 @@ def test_reconfigure_not_already_initialised(self, connection, defer): defer.assert_not_called() + @patch("charms.mongodb.v0.mongo.MongoClient") @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBConnection") - @patch("charms.mongodb.v0.mongo.MongoClient") - def test_reconfigure_get_members_failure(self, client, connection, defer): + def test_reconfigure_get_members_failure(self, connection, defer, *unused): """Tests reconfigure does not execute when unable to get the replica set members. Verifies in case of relation_joined and relation departed, that when the the database @@ -552,7 +566,7 @@ def test_reconfigure_get_members_failure(self, client, connection, defer): @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBConnection") - def test_reconfigure_remove_member_failure(self, connection, defer): + def test_reconfigure_remove_member_failure(self, connection, defer, *unused): """Tests reconfigure does not proceed when unable to remove a member. Verifies in relation departed events, that when the database cannot remove a member that @@ -612,7 +626,7 @@ def test_reconfigure_peer_not_ready(self, connection, defer, *unused): @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBConnection") - def test_reconfigure_add_member_failure(self, connection, defer): + def test_reconfigure_add_member_failure(self, connection, defer, *unused): """Tests reconfigure does not proceed when unable to add a member. Verifies in relation joined events, that when the database cannot add a member that the @@ -640,10 +654,13 @@ def test_reconfigure_add_member_failure(self, connection, defer): connection.return_value.__enter__.return_value.add_replset_member.assert_called() defer.assert_called() + @patch("charm.MongoDBCharm._configure_container", return_value=None) @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBProvider.oversee_users") @patch("charm.MongoDBConnection") - def test_start_init_operator_user_after_second_call(self, connection, oversee_users, defer): + def test_start_init_operator_user_after_second_call( + self, connection, oversee_users, defer, *unused + ): """Tests that the creation of the admin user is only performed once. Verifies that if the user is already set up, that no attempts to set it up again are @@ -684,16 +701,16 @@ def test_start_init_operator_user_after_second_call(self, connection, oversee_us defer.assert_not_called() - def test_get_password(self): + def test_get_password(self, *unused): self._setup_secrets() assert isinstance(self.harness.charm.get_secret("app", "monitor-password"), str) - self.harness.charm.get_secret("app", "non-existing-secret") is None + assert self.harness.charm.get_secret("app", "non-existing-secret") is None self.harness.charm.set_secret("unit", "somekey", "bla") assert isinstance(self.harness.charm.get_secret("unit", "somekey"), str) - self.harness.charm.get_secret("unit", "non-existing-secret") is None + assert self.harness.charm.get_secret("unit", "non-existing-secret") is None - def test_set_reset_existing_password_app(self): + def test_set_reset_existing_password_app(self, *unused): """NOTE: currently ops.testing seems to allow for non-leader to set secrets too!""" self._setup_secrets() self.harness.set_leader(True) @@ -705,7 +722,7 @@ def test_set_reset_existing_password_app(self): self.harness.charm.set_secret("app", "monitor-password", "blablabla") assert self.harness.charm.get_secret("app", "monitor-password") == "blablabla" - def test_set_reset_existing_password_app_nonleader(self): + def test_set_reset_existing_password_app_nonleader(self, *unused): self._setup_secrets() self.harness.set_leader(False) @@ -719,7 +736,7 @@ def test_set_secret_returning_secret_id(self, scope): assert re.match(f"mongodb-k8s.{scope}", secret_id) @parameterized.expand([("app"), ("unit")]) - def test_set_reset_new_secret(self, scope): + def test_set_reset_new_secret(self, scope, *unused): if scope == "app": self.harness.set_leader(True) @@ -735,7 +752,7 @@ def test_set_reset_new_secret(self, scope): self.harness.charm.set_secret(scope, "new-secret2", "blablabla") assert self.harness.charm.get_secret(scope, "new-secret2") == "blablabla" - def test_set_reset_new_secret_non_leader(self): + def test_set_reset_new_secret_non_leader(self, *unused): self.harness.set_leader(True) # Getting current password @@ -760,7 +777,7 @@ def test_invalid_secret(self, scope): assert self.harness.charm.get_secret(scope, "somekey") is None @pytest.mark.usefixtures("use_caplog") - def test_delete_password(self): + def test_delete_password(self, *unused): self._setup_secrets() self.harness.set_leader(True) @@ -797,7 +814,7 @@ def test_delete_password(self): in self._caplog.text ) - def test_delete_password_non_leader(self): + def test_delete_password_non_leader(self, *unused): self._setup_secrets() self.harness.set_leader(False) assert self.harness.charm.get_secret("app", "monitor-password") @@ -837,9 +854,7 @@ def test_on_other_secret_changed(self, scope, connect_exporter): @patch("charm.MongoDBConnection") @patch("charm.MongoDBCharm._pull_licenses") @patch("charm.MongoDBCharm._connect_mongodb_exporter") - def test_connect_to_mongo_exporter_on_set_password( - self, connect_exporter, pull_licenses, connection - ): + def test_connect_to_mongo_exporter_on_set_password(self, connect_exporter, *unused): """Test _connect_mongodb_exporter is called when the password is set for 'montior' user.""" container = self.harness.model.unit.get_container("mongod") self.harness.set_can_connect(container, True) @@ -851,12 +866,12 @@ def test_connect_to_mongo_exporter_on_set_password( self.harness.charm._on_set_password(action_event) connect_exporter.assert_called() + @patch("charm.MongoDBConnection") @patch("charm.MongoDBBackups.get_pbm_status") @patch("charm.MongoDBCharm.has_backup_service") - @patch("charm.MongoDBConnection") @patch("charm.MongoDBCharm._connect_mongodb_exporter") def test_event_set_password_secrets( - self, connect_exporter, connection, has_backup_service, get_pbm_status + self, connect_exporter, has_backup_service, get_pbm_status, *unused ): """Test _connect_mongodb_exporter is called when the password is set for 'montior' user. @@ -883,12 +898,12 @@ def test_event_set_password_secrets( assert "password" in args_pw assert args_pw["password"] == pw + @patch("charm.MongoDBConnection") @patch("charm.MongoDBBackups.get_pbm_status") @patch("charm.MongoDBCharm.has_backup_service") - @patch("charm.MongoDBConnection") @patch("charm.MongoDBCharm._connect_mongodb_exporter") def test_event_auto_reset_password_secrets_when_no_pw_value_shipped( - self, connect_exporter, connection, has_backup_service, get_pbm_status + self, connect_exporter, has_backup_service, get_pbm_status, *unused ): """Test _connect_mongodb_exporter is called when the password is set for 'montior' user. @@ -926,7 +941,7 @@ def test_event_auto_reset_password_secrets_when_no_pw_value_shipped( @patch("charm.MongoDBConnection") @patch("charm.MongoDBCharm._connect_mongodb_exporter") - def test_event_any_unit_can_get_password_secrets(self, connect_exporter, connection): + def test_event_any_unit_can_get_password_secrets(self, *unused): """Test _connect_mongodb_exporter is called when the password is set for 'montior' user. Furthermore: in Juju 3.x we want to use secrets @@ -1003,6 +1018,7 @@ def test__connect_mongodb_exporter_success( @patch("charm.USER_CREATING_MAX_ATTEMPTS", 1) @patch("charm.USER_CREATION_COOLDOWN", 1) @patch("charm.REPLICA_SET_INIT_CHECK_TIMEOUT", 1) + @patch("charm.MongoDBCharm._configure_container", return_value=None) @patch("charm.MongoDBCharm._init_operator_user") @patch("charm.MongoDBCharm._init_monitor_user") @patch("charm.MongoDBCharm._connect_mongodb_exporter") @@ -1011,17 +1027,7 @@ def test__connect_mongodb_exporter_success( @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBCharm._set_data_dir_permissions") @patch("charm.MongoDBConnection") - def test__backup_user_created( - self, - connection, - fix_data_dir, - defer, - pull_licenses, - _socket_exists, - _connect_mongodb_exporter, - _init_operator_user, - _init_monitor_user, - ): + def test_backup_user_created(self, *unused): """Tests what backup user was created.""" self.harness.charm._initialise_users.retry.wait = wait_none() container = self.harness.model.unit.get_container("mongod") @@ -1033,7 +1039,7 @@ def test__backup_user_created( self.assertIsNotNone(password) # verify the password is set @patch("charm.MongoDBConnection") - def test_set_password_provided(self, connection): + def test_set_password_provided(self, *unused): """Tests that a given password is set as the new mongodb password for backup user.""" container = self.harness.model.unit.get_container("mongod") self.harness.set_leader(True) @@ -1050,7 +1056,7 @@ def test_set_password_provided(self, connection): @patch_network_get(private_address="1.1.1.1") @patch("charm.MongoDBCharm.has_backup_service") @patch("charm.MongoDBBackups.get_pbm_status") - def test_set_backup_password_pbm_busy(self, pbm_status, has_backup_service): + def test_set_backup_password_pbm_busy(self, pbm_status, has_backup_service, *unused): """Tests changes to passwords fail when pbm is restoring/backing up.""" self.harness.set_leader(True) original_password = "pass123" diff --git a/tests/unit/test_mongodb_backups.py b/tests/unit/test_mongodb_backups.py index 7f0a5eb17..6c0e08a49 100644 --- a/tests/unit/test_mongodb_backups.py +++ b/tests/unit/test_mongodb_backups.py @@ -5,6 +5,7 @@ from unittest import mock from unittest.mock import patch +import pytest from charms.mongodb.v1.helpers import current_pbm_op from charms.mongodb.v1.mongodb_backups import ( PBMBusyError, @@ -30,6 +31,12 @@ RELATION_NAME = "s3-credentials" +@pytest.fixture(autouse=True) +def patch_upgrades(monkeypatch): + monkeypatch.setattr("charm.kubernetes_upgrades._Partition.get", lambda *args, **kwargs: 0) + monkeypatch.setattr("charm.kubernetes_upgrades._Partition.set", lambda *args, **kwargs: None) + + class TestMongoBackups(unittest.TestCase): @patch("charm.get_charm_revision") @patch_network_get(private_address="1.1.1.1") diff --git a/tests/unit/test_mongodb_provider.py b/tests/unit/test_mongodb_provider.py index 75fbf06ef..9585d8c70 100644 --- a/tests/unit/test_mongodb_provider.py +++ b/tests/unit/test_mongodb_provider.py @@ -6,6 +6,7 @@ from unittest import mock from unittest.mock import patch +import pytest from ops.charm import RelationEvent from ops.testing import Harness from pymongo.errors import ConfigurationError, ConnectionFailure, OperationFailure @@ -24,6 +25,13 @@ DEPARTED_IDS = [None, 0] +@pytest.fixture(autouse=True) +def patch_upgrades(monkeypatch): + monkeypatch.setattr("charms.mongodb.v0.upgrade_helpers.AbstractUpgrade.in_progress", False) + monkeypatch.setattr("charm.kubernetes_upgrades._Partition.get", lambda *args, **kwargs: 0) + monkeypatch.setattr("charm.kubernetes_upgrades._Partition.set", lambda *args, **kwargs: None) + + class TestMongoProvider(unittest.TestCase): @patch("charm.get_charm_revision") @patch_network_get(private_address="1.1.1.1") diff --git a/tests/unit/test_upgrade.py b/tests/unit/test_upgrade.py index ef29cfd0a..b972ccbc2 100644 --- a/tests/unit/test_upgrade.py +++ b/tests/unit/test_upgrade.py @@ -3,19 +3,34 @@ import unittest from unittest.mock import Mock, PropertyMock, patch +import httpx +import pytest +from charms.mongodb.v0.upgrade_helpers import UnitState +from lightkube import ApiError +from ops import StartEvent from ops.model import ActiveStatus, Relation from ops.testing import ActionFailed, Harness from parameterized import parameterized +from tenacity import Future, RetryError from charm import MongoDBCharm from config import Config +from upgrades.kubernetes_upgrades import DeployedWithoutTrust, KubernetesUpgrade from .helpers import patch_network_get +@pytest.fixture(autouse=True) +def patch_upgrades(monkeypatch): + monkeypatch.setattr("charms.mongodb.v0.upgrade_helpers.AbstractUpgrade.in_progress", False) + monkeypatch.setattr("charm.kubernetes_upgrades._Partition.get", lambda *args, **kwargs: 0) + monkeypatch.setattr("charm.kubernetes_upgrades._Partition.set", lambda *args, **kwargs: None) + + class TestUpgrades(unittest.TestCase): @patch("charm.get_charm_revision") @patch_network_get(private_address="1.1.1.1") + @patch("charm.get_charm_revision") def setUp(self, *unused): self.harness = Harness(MongoDBCharm) self.addCleanup(self.harness.cleanup) @@ -26,6 +41,7 @@ def setUp(self, *unused): self.harness.begin() self.harness.set_leader(True) self.peer_rel_id = self.harness.add_relation("database-peers", "mongodb-peers") + self.harness.add_relation("upgrade-version-a", "upgrade-version-a") @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBCharm.upgrade_in_progress", new_callable=PropertyMock) @@ -45,6 +61,7 @@ def is_role_changed_mock(*args): @patch("charm.MongoDBCharm._connect_mongodb_exporter") @patch("ops.framework.EventBase.defer") @patch("charm.MongoDBCharm.upgrade_in_progress", new_callable=PropertyMock) + @patch("charm.MongoDBCharm.is_db_service_ready") def test_on_relation_handler(self, handler, mock_upgrade, defer, *unused): relation: Relation = self.harness.charm.model.get_relation("database-peers") mock_upgrade.return_value = True @@ -53,8 +70,8 @@ def test_on_relation_handler(self, handler, mock_upgrade, defer, *unused): @patch("charm.MongoDBCharm.upgrade_in_progress", new_callable=PropertyMock) def test_pass_pre_set_password_check_fails(self, mock_upgrade): - def mock_shard_role(*args): - return args != ("shard",) + def mock_shard_role(role_name: str): + return role_name != "shard" mock_pbm_status = Mock(return_value=ActiveStatus()) self.harness.charm.is_role = mock_shard_role @@ -68,3 +85,79 @@ def mock_shard_role(*args): action_failed.exception.message == "Cannot set passwords while an upgrade is in progress." ) + + @parameterized.expand([[403, DeployedWithoutTrust], [500, ApiError]]) + @patch("charm.kubernetes_upgrades._Partition.get") + def test_lightkube_errors(self, status_code, expected_error, patch_get): + # We need a valid API error due to error handling in lightkube + api_error = ApiError( + request=httpx.Request(url="http://controller/call", method="GET"), + response=httpx.Response(409, json={"message": "bad call", "code": status_code}), + ) + patch_get.side_effect = api_error + + with self.assertRaises(expected_error): + KubernetesUpgrade(self.harness.charm) + + @parameterized.expand( + [ + [{"mongodb-k8s/0": "6.0.6"}, "6.0.6", False], + [{"mongodb-k8s/0": "6.0.7"}, "6.0.6", True], + [{"mongodb-k8s/0": "6.0.6"}, "6.0.7", True], + ] + ) + @patch( + "charm.kubernetes_upgrades.KubernetesUpgrade._app_workload_container_version", + new_callable=PropertyMock, + ) + @patch( + "charm.kubernetes_upgrades.KubernetesUpgrade._unit_workload_container_versions", + new_callable=PropertyMock, + ) + def test__get_unit_healthy_status( + self, unit_versions, app_version, outdated_in_status, _unit_version, _app_version + ) -> None: + _unit_version.return_value = unit_versions + _app_version.return_value = app_version + + status = self.harness.charm.upgrade._upgrade._get_unit_healthy_status() + assert isinstance(status, ActiveStatus) + assert ("(restart pending)" in status.message) == outdated_in_status + + @parameterized.expand( + [ + [None, True, ActiveStatus(), "restarting", False], + [None, True, Config.Status.UNHEALTHY_UPGRADE, "restarting", False], + [None, False, ActiveStatus(), "restarting", True], + [None, False, Config.Status.UNHEALTHY_UPGRADE, "restarting", True], + [RetryError(Future(1)), False, Config.Status.UNHEALTHY_UPGRADE, "restarting", True], + ] + ) + @patch("ops.EventBase.defer") + @patch("charm.MongoDBUpgrade.wait_for_cluster_healthy") + @patch("charm.MongoDBUpgrade.is_cluster_able_to_read_write") + def test_run_post_upgrade_checks( + self, + cluster_healthy_return, + is_cluster_able_to_read_write_return, + initial_status, + initial_unit_state, + is_deferred, + mock_is_cluster, + mock_wait, + defer, + ): + """Tests the run post upgrade checks branching.""" + mock_wait.side_effect = cluster_healthy_return + mock_is_cluster.return_value = is_cluster_able_to_read_write_return + self.harness.charm.unit.status = initial_status + self.harness.charm.upgrade._upgrade.unit_state = UnitState(initial_unit_state) + + self.harness.charm.upgrade.run_post_upgrade_checks(StartEvent, False) + if is_deferred: + defer.assert_called() + assert self.harness.charm.unit.status == Config.Status.UNHEALTHY_UPGRADE + assert self.harness.charm.upgrade._upgrade.unit_state == UnitState(initial_unit_state) + else: + assert self.harness.charm.unit.status == ActiveStatus() + assert self.harness.charm.upgrade._upgrade.unit_state == UnitState.HEALTHY diff --git a/workload_version b/workload_version new file mode 100644 index 000000000..b7ff1516c --- /dev/null +++ b/workload_version @@ -0,0 +1 @@ +6.0.6