From a45dc9596faeb28d1c4575bec5a4ed0c9d03888c Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Thu, 18 Jan 2024 19:18:45 +0100
Subject: [PATCH 001/128] Bug 1875340 - remove incomplete test checking for
test manifests of a job
1. This was incomplete: existing code was broken and needed several fixes, and
the second part was missing.
2. The test paths are not a property of Job for performance reasons
---
tests/ui/job-view/Push_test.jsx | 103 +-------------------------------
1 file changed, 1 insertion(+), 102 deletions(-)
diff --git a/tests/ui/job-view/Push_test.jsx b/tests/ui/job-view/Push_test.jsx
index 5c5a8789cd5..5c78081dbe2 100644
--- a/tests/ui/job-view/Push_test.jsx
+++ b/tests/ui/job-view/Push_test.jsx
@@ -1,20 +1,7 @@
-import React from 'react';
-import fetchMock from 'fetch-mock';
-import { Provider } from 'react-redux';
-import { render, cleanup, waitFor } from '@testing-library/react';
-import { gzip } from 'pako';
-
-import { getProjectUrl, replaceLocation } from '../../../ui/helpers/location';
-import FilterModel from '../../../ui/models/filter';
-import pushListFixture from '../mock/push_list';
-import jobListFixture from '../mock/job_list/job_2';
-import configureStore from '../../../ui/job-view/redux/configureStore';
-import Push, {
+import {
transformTestPath,
transformedPaths,
} from '../../../ui/job-view/pushes/Push';
-import { getApiUrl } from '../../../ui/helpers/url';
-import { findInstance } from '../../../ui/helpers/job';
const manifestsByTask = {
'test-linux1804-64/debug-mochitest-devtools-chrome-e10s-1': [
@@ -103,91 +90,3 @@ describe('Transformations', () => {
});
});
});
-
-describe('Push', () => {
- const repoName = 'autoland';
- const currentRepo = {
- name: repoName,
- getRevisionHref: () => 'foo',
- getPushLogHref: () => 'foo',
- };
- const push = pushListFixture.results[1];
- const revision = 'd5b037941b0ebabcc9b843f24d926e9d65961087';
- const testPush = (store, filterModel) => (
-
-
-
- );
-
- beforeAll(async () => {
- fetchMock.get(getProjectUrl('/push/?full=true&count=10', repoName), {
- ...pushListFixture,
- results: pushListFixture.results[1],
- });
- fetchMock.mock(
- getApiUrl('/jobs/?push_id=511137', repoName),
- jobListFixture,
- );
- const tcUrl =
- 'https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.autoland.revision.d5b037941b0ebabcc9b843f24d926e9d65961087.taskgraph.decision/artifacts/public';
- // XXX: Fix this to re-enable test
- // I need to figure out the right options to get a gzip blob
- fetchMock.get(`${tcUrl}/manifests-by-task.json.gz`, {
- body: new Blob(await gzip(JSON.stringify(manifestsByTask)), {
- type: 'application/gzip',
- }),
- sendAsJson: false,
- });
- });
-
- afterAll(() => {
- fetchMock.reset();
- });
-
- afterEach(() => {
- cleanup();
- replaceLocation({});
- });
-
- // eslint-disable-next-line jest/no-disabled-tests
- test.skip('jobs should have test_path field to filter', async () => {
- const { store } = configureStore();
- const { getByText } = render(testPush(store, new FilterModel()));
-
- const validateJob = async (name, testPaths) => {
- const jobEl = await waitFor(() => getByText(name));
- // Fetch the React instance of an object from a DOM element.
- const { props } = findInstance(jobEl);
- const { job } = props;
- expect(job.test_paths).toStrictEqual(testPaths);
- };
-
- await validateJob('Jit8', []);
- // XXX: It should be returning test paths instead of manifest paths
- await validateJob('dt1', [
- 'devtools/client/framework/browser-toolbox/test/browser.ini',
- 'devtools/client/framework/test/browser.ini',
- 'devtools/client/framework/test/metrics/browser_metrics_inspector.ini',
- 'devtools/client/inspector/changes/test/browser.ini',
- 'devtools/client/inspector/extensions/test/browser.ini',
- 'devtools/client/inspector/markup/test/browser.ini',
- 'devtools/client/jsonview/test/browser.ini',
- 'devtools/client/shared/test/browser.ini',
- 'devtools/client/styleeditor/test/browser.ini',
- 'devtools/client/webconsole/test/node/fixtures/stubs/stubs.ini',
- ]);
- });
-});
From 9623348b9e197fe8d66629caf26d226040eb5373 Mon Sep 17 00:00:00 2001
From: EvaBardou
Date: Thu, 25 Jan 2024 11:52:31 +0100
Subject: [PATCH 002/128] Bug 1814315 - Bump Dockerflow + Add unit tests
(#7899)
* Bug 1814315 - Bump Dockerflow + Add unit tests
* Nit
---
requirements/common.in | 2 +-
requirements/common.txt | 1789 ++++++++++++++++++++------------------
tests/test_dockerflow.py | 47 +
3 files changed, 975 insertions(+), 863 deletions(-)
create mode 100644 tests/test_dockerflow.py
diff --git a/requirements/common.in b/requirements/common.in
index a0689f0f5a0..cff15e432b4 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -37,7 +37,7 @@ django-cache-memoize==0.1.10 # Imported as cache_memoize
mozci[cache]==2.3.2
# Dockerflow/CloudOps APIs
-dockerflow==2022.8.0
+dockerflow==2024.1.0
# Measuring noise of perf data
moz-measure-noise==2.60.1
diff --git a/requirements/common.txt b/requirements/common.txt
index 9e4c416e71f..d3e18440e2d 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -4,229 +4,219 @@
#
# pip-compile --generate-hashes --output-file=requirements/common.txt requirements/common.in
#
-aiohttp==3.8.4 \
- --hash=sha256:03543dcf98a6619254b409be2d22b51f21ec66272be4ebda7b04e6412e4b2e14 \
- --hash=sha256:03baa76b730e4e15a45f81dfe29a8d910314143414e528737f8589ec60cf7391 \
- --hash=sha256:0a63f03189a6fa7c900226e3ef5ba4d3bd047e18f445e69adbd65af433add5a2 \
- --hash=sha256:10c8cefcff98fd9168cdd86c4da8b84baaa90bf2da2269c6161984e6737bf23e \
- --hash=sha256:147ae376f14b55f4f3c2b118b95be50a369b89b38a971e80a17c3fd623f280c9 \
- --hash=sha256:176a64b24c0935869d5bbc4c96e82f89f643bcdf08ec947701b9dbb3c956b7dd \
- --hash=sha256:17b79c2963db82086229012cff93ea55196ed31f6493bb1ccd2c62f1724324e4 \
- --hash=sha256:1a45865451439eb320784918617ba54b7a377e3501fb70402ab84d38c2cd891b \
- --hash=sha256:1b3ea7edd2d24538959c1c1abf97c744d879d4e541d38305f9bd7d9b10c9ec41 \
- --hash=sha256:22f6eab15b6db242499a16de87939a342f5a950ad0abaf1532038e2ce7d31567 \
- --hash=sha256:3032dcb1c35bc330134a5b8a5d4f68c1a87252dfc6e1262c65a7e30e62298275 \
- --hash=sha256:33587f26dcee66efb2fff3c177547bd0449ab7edf1b73a7f5dea1e38609a0c54 \
- --hash=sha256:34ce9f93a4a68d1272d26030655dd1b58ff727b3ed2a33d80ec433561b03d67a \
- --hash=sha256:3a80464982d41b1fbfe3154e440ba4904b71c1a53e9cd584098cd41efdb188ef \
- --hash=sha256:3b90467ebc3d9fa5b0f9b6489dfb2c304a1db7b9946fa92aa76a831b9d587e99 \
- --hash=sha256:3d89efa095ca7d442a6d0cbc755f9e08190ba40069b235c9886a8763b03785da \
- --hash=sha256:3d8ef1a630519a26d6760bc695842579cb09e373c5f227a21b67dc3eb16cfea4 \
- --hash=sha256:3f43255086fe25e36fd5ed8f2ee47477408a73ef00e804cb2b5cba4bf2ac7f5e \
- --hash=sha256:40653609b3bf50611356e6b6554e3a331f6879fa7116f3959b20e3528783e699 \
- --hash=sha256:41a86a69bb63bb2fc3dc9ad5ea9f10f1c9c8e282b471931be0268ddd09430b04 \
- --hash=sha256:493f5bc2f8307286b7799c6d899d388bbaa7dfa6c4caf4f97ef7521b9cb13719 \
- --hash=sha256:4a6cadebe132e90cefa77e45f2d2f1a4b2ce5c6b1bfc1656c1ddafcfe4ba8131 \
- --hash=sha256:4c745b109057e7e5f1848c689ee4fb3a016c8d4d92da52b312f8a509f83aa05e \
- --hash=sha256:4d347a172f866cd1d93126d9b239fcbe682acb39b48ee0873c73c933dd23bd0f \
- --hash=sha256:4dac314662f4e2aa5009977b652d9b8db7121b46c38f2073bfeed9f4049732cd \
- --hash=sha256:4ddaae3f3d32fc2cb4c53fab020b69a05c8ab1f02e0e59665c6f7a0d3a5be54f \
- --hash=sha256:5393fb786a9e23e4799fec788e7e735de18052f83682ce2dfcabaf1c00c2c08e \
- --hash=sha256:59f029a5f6e2d679296db7bee982bb3d20c088e52a2977e3175faf31d6fb75d1 \
- --hash=sha256:5a7bdf9e57126dc345b683c3632e8ba317c31d2a41acd5800c10640387d193ed \
- --hash=sha256:5b3f2e06a512e94722886c0827bee9807c86a9f698fac6b3aee841fab49bbfb4 \
- --hash=sha256:5ce45967538fb747370308d3145aa68a074bdecb4f3a300869590f725ced69c1 \
- --hash=sha256:5e14f25765a578a0a634d5f0cd1e2c3f53964553a00347998dfdf96b8137f777 \
- --hash=sha256:618c901dd3aad4ace71dfa0f5e82e88b46ef57e3239fc7027773cb6d4ed53531 \
- --hash=sha256:652b1bff4f15f6287550b4670546a2947f2a4575b6c6dff7760eafb22eacbf0b \
- --hash=sha256:6c08e8ed6fa3d477e501ec9db169bfac8140e830aa372d77e4a43084d8dd91ab \
- --hash=sha256:6ddb2a2026c3f6a68c3998a6c47ab6795e4127315d2e35a09997da21865757f8 \
- --hash=sha256:6e601588f2b502c93c30cd5a45bfc665faaf37bbe835b7cfd461753068232074 \
- --hash=sha256:6e74dd54f7239fcffe07913ff8b964e28b712f09846e20de78676ce2a3dc0bfc \
- --hash=sha256:7235604476a76ef249bd64cb8274ed24ccf6995c4a8b51a237005ee7a57e8643 \
- --hash=sha256:7ab43061a0c81198d88f39aaf90dae9a7744620978f7ef3e3708339b8ed2ef01 \
- --hash=sha256:7c7837fe8037e96b6dd5cfcf47263c1620a9d332a87ec06a6ca4564e56bd0f36 \
- --hash=sha256:80575ba9377c5171407a06d0196b2310b679dc752d02a1fcaa2bc20b235dbf24 \
- --hash=sha256:80a37fe8f7c1e6ce8f2d9c411676e4bc633a8462844e38f46156d07a7d401654 \
- --hash=sha256:8189c56eb0ddbb95bfadb8f60ea1b22fcfa659396ea36f6adcc521213cd7b44d \
- --hash=sha256:854f422ac44af92bfe172d8e73229c270dc09b96535e8a548f99c84f82dde241 \
- --hash=sha256:880e15bb6dad90549b43f796b391cfffd7af373f4646784795e20d92606b7a51 \
- --hash=sha256:8b631e26df63e52f7cce0cce6507b7a7f1bc9b0c501fcde69742130b32e8782f \
- --hash=sha256:8c29c77cc57e40f84acef9bfb904373a4e89a4e8b74e71aa8075c021ec9078c2 \
- --hash=sha256:91f6d540163f90bbaef9387e65f18f73ffd7c79f5225ac3d3f61df7b0d01ad15 \
- --hash=sha256:92c0cea74a2a81c4c76b62ea1cac163ecb20fb3ba3a75c909b9fa71b4ad493cf \
- --hash=sha256:9bcb89336efa095ea21b30f9e686763f2be4478f1b0a616969551982c4ee4c3b \
- --hash=sha256:a1f4689c9a1462f3df0a1f7e797791cd6b124ddbee2b570d34e7f38ade0e2c71 \
- --hash=sha256:a3fec6a4cb5551721cdd70473eb009d90935b4063acc5f40905d40ecfea23e05 \
- --hash=sha256:a5d794d1ae64e7753e405ba58e08fcfa73e3fad93ef9b7e31112ef3c9a0efb52 \
- --hash=sha256:a86d42d7cba1cec432d47ab13b6637bee393a10f664c425ea7b305d1301ca1a3 \
- --hash=sha256:adfbc22e87365a6e564c804c58fc44ff7727deea782d175c33602737b7feadb6 \
- --hash=sha256:aeb29c84bb53a84b1a81c6c09d24cf33bb8432cc5c39979021cc0f98c1292a1a \
- --hash=sha256:aede4df4eeb926c8fa70de46c340a1bc2c6079e1c40ccf7b0eae1313ffd33519 \
- --hash=sha256:b744c33b6f14ca26b7544e8d8aadff6b765a80ad6164fb1a430bbadd593dfb1a \
- --hash=sha256:b7a00a9ed8d6e725b55ef98b1b35c88013245f35f68b1b12c5cd4100dddac333 \
- --hash=sha256:bb96fa6b56bb536c42d6a4a87dfca570ff8e52de2d63cabebfd6fb67049c34b6 \
- --hash=sha256:bbcf1a76cf6f6dacf2c7f4d2ebd411438c275faa1dc0c68e46eb84eebd05dd7d \
- --hash=sha256:bca5f24726e2919de94f047739d0a4fc01372801a3672708260546aa2601bf57 \
- --hash=sha256:bf2e1a9162c1e441bf805a1fd166e249d574ca04e03b34f97e2928769e91ab5c \
- --hash=sha256:c4eb3b82ca349cf6fadcdc7abcc8b3a50ab74a62e9113ab7a8ebc268aad35bb9 \
- --hash=sha256:c6cc15d58053c76eacac5fa9152d7d84b8d67b3fde92709195cb984cfb3475ea \
- --hash=sha256:c6cd05ea06daca6ad6a4ca3ba7fe7dc5b5de063ff4daec6170ec0f9979f6c332 \
- --hash=sha256:c844fd628851c0bc309f3c801b3a3d58ce430b2ce5b359cd918a5a76d0b20cb5 \
- --hash=sha256:c9cb1565a7ad52e096a6988e2ee0397f72fe056dadf75d17fa6b5aebaea05622 \
- --hash=sha256:cab9401de3ea52b4b4c6971db5fb5c999bd4260898af972bf23de1c6b5dd9d71 \
- --hash=sha256:cd468460eefef601ece4428d3cf4562459157c0f6523db89365202c31b6daebb \
- --hash=sha256:d1e6a862b76f34395a985b3cd39a0d949ca80a70b6ebdea37d3ab39ceea6698a \
- --hash=sha256:d1f9282c5f2b5e241034a009779e7b2a1aa045f667ff521e7948ea9b56e0c5ff \
- --hash=sha256:d265f09a75a79a788237d7f9054f929ced2e69eb0bb79de3798c468d8a90f945 \
- --hash=sha256:db3fc6120bce9f446d13b1b834ea5b15341ca9ff3f335e4a951a6ead31105480 \
- --hash=sha256:dbf3a08a06b3f433013c143ebd72c15cac33d2914b8ea4bea7ac2c23578815d6 \
- --hash=sha256:de04b491d0e5007ee1b63a309956eaed959a49f5bb4e84b26c8f5d49de140fa9 \
- --hash=sha256:e4b09863aae0dc965c3ef36500d891a3ff495a2ea9ae9171e4519963c12ceefd \
- --hash=sha256:e595432ac259af2d4630008bf638873d69346372d38255774c0e286951e8b79f \
- --hash=sha256:e75b89ac3bd27d2d043b234aa7b734c38ba1b0e43f07787130a0ecac1e12228a \
- --hash=sha256:ea9eb976ffdd79d0e893869cfe179a8f60f152d42cb64622fca418cd9b18dc2a \
- --hash=sha256:eafb3e874816ebe2a92f5e155f17260034c8c341dad1df25672fb710627c6949 \
- --hash=sha256:ee3c36df21b5714d49fc4580247947aa64bcbe2939d1b77b4c8dcb8f6c9faecc \
- --hash=sha256:f352b62b45dff37b55ddd7b9c0c8672c4dd2eb9c0f9c11d395075a84e2c40f75 \
- --hash=sha256:fabb87dd8850ef0f7fe2b366d44b77d7e6fa2ea87861ab3844da99291e81e60f \
- --hash=sha256:fe11310ae1e4cd560035598c3f29d86cef39a83d244c7466f95c27ae04850f10 \
- --hash=sha256:fe7ba4a51f33ab275515f66b0a236bcde4fb5561498fe8f898d4e549b2e4509f
+aiohttp==3.9.1 \
+ --hash=sha256:02ab6006ec3c3463b528374c4cdce86434e7b89ad355e7bf29e2f16b46c7dd6f \
+ --hash=sha256:04fa38875e53eb7e354ece1607b1d2fdee2d175ea4e4d745f6ec9f751fe20c7c \
+ --hash=sha256:0b0a6a36ed7e164c6df1e18ee47afbd1990ce47cb428739d6c99aaabfaf1b3af \
+ --hash=sha256:0d406b01a9f5a7e232d1b0d161b40c05275ffbcbd772dc18c1d5a570961a1ca4 \
+ --hash=sha256:0e49b08eafa4f5707ecfb321ab9592717a319e37938e301d462f79b4e860c32a \
+ --hash=sha256:0e7ba7ff228c0d9a2cd66194e90f2bca6e0abca810b786901a569c0de082f489 \
+ --hash=sha256:11cb254e397a82efb1805d12561e80124928e04e9c4483587ce7390b3866d213 \
+ --hash=sha256:11ff168d752cb41e8492817e10fb4f85828f6a0142b9726a30c27c35a1835f01 \
+ --hash=sha256:176df045597e674fa950bf5ae536be85699e04cea68fa3a616cf75e413737eb5 \
+ --hash=sha256:219a16763dc0294842188ac8a12262b5671817042b35d45e44fd0a697d8c8361 \
+ --hash=sha256:22698f01ff5653fe66d16ffb7658f582a0ac084d7da1323e39fd9eab326a1f26 \
+ --hash=sha256:237533179d9747080bcaad4d02083ce295c0d2eab3e9e8ce103411a4312991a0 \
+ --hash=sha256:289ba9ae8e88d0ba16062ecf02dd730b34186ea3b1e7489046fc338bdc3361c4 \
+ --hash=sha256:2c59e0076ea31c08553e868cec02d22191c086f00b44610f8ab7363a11a5d9d8 \
+ --hash=sha256:2c9376e2b09895c8ca8b95362283365eb5c03bdc8428ade80a864160605715f1 \
+ --hash=sha256:3135713c5562731ee18f58d3ad1bf41e1d8883eb68b363f2ffde5b2ea4b84cc7 \
+ --hash=sha256:3b9c7426923bb7bd66d409da46c41e3fb40f5caf679da624439b9eba92043fa6 \
+ --hash=sha256:3c0266cd6f005e99f3f51e583012de2778e65af6b73860038b968a0a8888487a \
+ --hash=sha256:41473de252e1797c2d2293804e389a6d6986ef37cbb4a25208de537ae32141dd \
+ --hash=sha256:4831df72b053b1eed31eb00a2e1aff6896fb4485301d4ccb208cac264b648db4 \
+ --hash=sha256:49f0c1b3c2842556e5de35f122fc0f0b721334ceb6e78c3719693364d4af8499 \
+ --hash=sha256:4b4c452d0190c5a820d3f5c0f3cd8a28ace48c54053e24da9d6041bf81113183 \
+ --hash=sha256:4ee8caa925aebc1e64e98432d78ea8de67b2272252b0a931d2ac3bd876ad5544 \
+ --hash=sha256:500f1c59906cd142d452074f3811614be04819a38ae2b3239a48b82649c08821 \
+ --hash=sha256:5216b6082c624b55cfe79af5d538e499cd5f5b976820eac31951fb4325974501 \
+ --hash=sha256:54311eb54f3a0c45efb9ed0d0a8f43d1bc6060d773f6973efd90037a51cd0a3f \
+ --hash=sha256:54631fb69a6e44b2ba522f7c22a6fb2667a02fd97d636048478db2fd8c4e98fe \
+ --hash=sha256:565760d6812b8d78d416c3c7cfdf5362fbe0d0d25b82fed75d0d29e18d7fc30f \
+ --hash=sha256:598db66eaf2e04aa0c8900a63b0101fdc5e6b8a7ddd805c56d86efb54eb66672 \
+ --hash=sha256:5c4fa235d534b3547184831c624c0b7c1e262cd1de847d95085ec94c16fddcd5 \
+ --hash=sha256:69985d50a2b6f709412d944ffb2e97d0be154ea90600b7a921f95a87d6f108a2 \
+ --hash=sha256:69da0f3ed3496808e8cbc5123a866c41c12c15baaaead96d256477edf168eb57 \
+ --hash=sha256:6c93b7c2e52061f0925c3382d5cb8980e40f91c989563d3d32ca280069fd6a87 \
+ --hash=sha256:70907533db712f7aa791effb38efa96f044ce3d4e850e2d7691abd759f4f0ae0 \
+ --hash=sha256:81b77f868814346662c96ab36b875d7814ebf82340d3284a31681085c051320f \
+ --hash=sha256:82eefaf1a996060602f3cc1112d93ba8b201dbf5d8fd9611227de2003dddb3b7 \
+ --hash=sha256:85c3e3c9cb1d480e0b9a64c658cd66b3cfb8e721636ab8b0e746e2d79a7a9eed \
+ --hash=sha256:8a22a34bc594d9d24621091d1b91511001a7eea91d6652ea495ce06e27381f70 \
+ --hash=sha256:8cef8710fb849d97c533f259103f09bac167a008d7131d7b2b0e3a33269185c0 \
+ --hash=sha256:8d44e7bf06b0c0a70a20f9100af9fcfd7f6d9d3913e37754c12d424179b4e48f \
+ --hash=sha256:8d7f98fde213f74561be1d6d3fa353656197f75d4edfbb3d94c9eb9b0fc47f5d \
+ --hash=sha256:8d8e4450e7fe24d86e86b23cc209e0023177b6d59502e33807b732d2deb6975f \
+ --hash=sha256:8fc49a87ac269d4529da45871e2ffb6874e87779c3d0e2ccd813c0899221239d \
+ --hash=sha256:90ec72d231169b4b8d6085be13023ece8fa9b1bb495e4398d847e25218e0f431 \
+ --hash=sha256:91c742ca59045dce7ba76cab6e223e41d2c70d79e82c284a96411f8645e2afff \
+ --hash=sha256:9b05d33ff8e6b269e30a7957bd3244ffbce2a7a35a81b81c382629b80af1a8bf \
+ --hash=sha256:9b05d5cbe9dafcdc733262c3a99ccf63d2f7ce02543620d2bd8db4d4f7a22f83 \
+ --hash=sha256:9c5857612c9813796960c00767645cb5da815af16dafb32d70c72a8390bbf690 \
+ --hash=sha256:a34086c5cc285be878622e0a6ab897a986a6e8bf5b67ecb377015f06ed316587 \
+ --hash=sha256:ab221850108a4a063c5b8a70f00dd7a1975e5a1713f87f4ab26a46e5feac5a0e \
+ --hash=sha256:b796b44111f0cab6bbf66214186e44734b5baab949cb5fb56154142a92989aeb \
+ --hash=sha256:b8c3a67eb87394386847d188996920f33b01b32155f0a94f36ca0e0c635bf3e3 \
+ --hash=sha256:bcb6532b9814ea7c5a6a3299747c49de30e84472fa72821b07f5a9818bce0f66 \
+ --hash=sha256:bcc0ea8d5b74a41b621ad4a13d96c36079c81628ccc0b30cfb1603e3dfa3a014 \
+ --hash=sha256:bea94403a21eb94c93386d559bce297381609153e418a3ffc7d6bf772f59cc35 \
+ --hash=sha256:bff7e2811814fa2271be95ab6e84c9436d027a0e59665de60edf44e529a42c1f \
+ --hash=sha256:c72444d17777865734aa1a4d167794c34b63e5883abb90356a0364a28904e6c0 \
+ --hash=sha256:c7b5d5d64e2a14e35a9240b33b89389e0035e6de8dbb7ffa50d10d8b65c57449 \
+ --hash=sha256:c7e939f1ae428a86e4abbb9a7c4732bf4706048818dfd979e5e2839ce0159f23 \
+ --hash=sha256:c88a15f272a0ad3d7773cf3a37cc7b7d077cbfc8e331675cf1346e849d97a4e5 \
+ --hash=sha256:c9110c06eaaac7e1f5562caf481f18ccf8f6fdf4c3323feab28a93d34cc646bd \
+ --hash=sha256:ca7ca5abfbfe8d39e653870fbe8d7710be7a857f8a8386fc9de1aae2e02ce7e4 \
+ --hash=sha256:cae4c0c2ca800c793cae07ef3d40794625471040a87e1ba392039639ad61ab5b \
+ --hash=sha256:cdefe289681507187e375a5064c7599f52c40343a8701761c802c1853a504558 \
+ --hash=sha256:cf2a0ac0615842b849f40c4d7f304986a242f1e68286dbf3bd7a835e4f83acfd \
+ --hash=sha256:cfeadf42840c1e870dc2042a232a8748e75a36b52d78968cda6736de55582766 \
+ --hash=sha256:d737e69d193dac7296365a6dcb73bbbf53bb760ab25a3727716bbd42022e8d7a \
+ --hash=sha256:d7481f581251bb5558ba9f635db70908819caa221fc79ee52a7f58392778c636 \
+ --hash=sha256:df9cf74b9bc03d586fc53ba470828d7b77ce51b0582d1d0b5b2fb673c0baa32d \
+ --hash=sha256:e1f80197f8b0b846a8d5cf7b7ec6084493950d0882cc5537fb7b96a69e3c8590 \
+ --hash=sha256:ecca113f19d5e74048c001934045a2b9368d77b0b17691d905af18bd1c21275e \
+ --hash=sha256:ee2527134f95e106cc1653e9ac78846f3a2ec1004cf20ef4e02038035a74544d \
+ --hash=sha256:f27fdaadce22f2ef950fc10dcdf8048407c3b42b73779e48a4e76b3c35bca26c \
+ --hash=sha256:f694dc8a6a3112059258a725a4ebe9acac5fe62f11c77ac4dcf896edfa78ca28 \
+ --hash=sha256:f800164276eec54e0af5c99feb9494c295118fc10a11b997bbb1348ba1a52065 \
+ --hash=sha256:ffcd828e37dc219a72c9012ec44ad2e7e3066bec6ff3aaa19e7d435dbf4032ca
# via taskcluster
aiosignal==1.3.1 \
--hash=sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc \
--hash=sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17
# via aiohttp
-amqp==5.1.1 \
- --hash=sha256:2c1b13fecc0893e946c65cbd5f36427861cffa4ea2201d8f6fca22e2a373b5e2 \
- --hash=sha256:6f0956d2c23d8fa6e7691934d8c3930eadb44972cbbd1a7ae3a520f735d43359
+amqp==5.2.0 \
+ --hash=sha256:827cb12fb0baa892aad844fd95258143bce4027fdac4fccddbc43330fd281637 \
+ --hash=sha256:a1ecff425ad063ad42a486c902807d1482311481c8ad95a72694b2975e75f7fd
# via kombu
appdirs==1.4.4 \
--hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \
--hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128
# via mozci
-arrow==1.2.3 \
- --hash=sha256:3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1 \
- --hash=sha256:5a49ab92e3b7b71d96cd6bfcc4df14efefc9dfa96ea19045815914a6ab6b1fe2
+arrow==1.3.0 \
+ --hash=sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80 \
+ --hash=sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85
# via mozci
asgiref==3.7.2 \
--hash=sha256:89b2ef2247e3b562a16eef663bc0e2e703ec6468e2fa8a5cd61cd449786d4f6e \
--hash=sha256:9e0ce3aa93a819ba5b45120216b23878cf6e8525eb3848653452b4192b92afed
# via django
-async-timeout==4.0.2 \
- --hash=sha256:2163e1640ddb52b7a8c80d0a67a08587e5d245cc9c553a74a847056bc2976b15 \
- --hash=sha256:8ca1e4fcf50d07413d66d1a5e416e42cfdf5851c981d679a09851a6853383b3c
+async-timeout==4.0.3 \
+ --hash=sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f \
+ --hash=sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028
# via
# aiohttp
# redis
# taskcluster
-attrs==23.1.0 \
- --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \
- --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015
+attrs==23.2.0 \
+ --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \
+ --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1
# via
# aiohttp
# jsonschema
-billiard==4.1.0 \
- --hash=sha256:0f50d6be051c6b2b75bfbc8bfd85af195c5739c281d3f5b86a5640c65563614a \
- --hash=sha256:1ad2eeae8e28053d729ba3373d34d9d6e210f6e4d8bf0a9c64f92bd053f1edf5
+billiard==4.2.0 \
+ --hash=sha256:07aa978b308f334ff8282bd4a746e681b3513db5c9a514cbdd810cbbdc19714d \
+ --hash=sha256:9a3c3184cb275aa17a732f93f65b20c525d3d9f253722d26a82194803ade5a2c
# via celery
blessed==1.20.0 \
--hash=sha256:0c542922586a265e699188e52d5f5ac5ec0dd517e5a1041d90d2bbf23f906058 \
--hash=sha256:2cdd67f8746e048f00df47a2880f4d6acbcdb399031b604e34ba8f71d5787680
# via mozlog
-boto3==1.28.5 \
- --hash=sha256:2c76db4a1208b8d09814261fc5e530fc36b3b952ef807312495e6869fa6eaad5 \
- --hash=sha256:a5c815ab81219a606f20362c9d9c190f5c224bf33c5dc4c20501036cc4a9034f
+boto3==1.34.26 \
+ --hash=sha256:0491a65e55de999d07f42bb28ff6a38bad493934154b6304fcdfb4699a612d6c \
+ --hash=sha256:881b07d0d55e5d85b62e6c965efcb2820bdfbd8f23a73a7bc9dac3a4997a1343
# via mozci
-botocore==1.31.5 \
- --hash=sha256:8aec97512587a5475036a982785e406c52efd260457b809846985f849c3d7cf3 \
- --hash=sha256:b35114dae9c451895a11fef13d76881e2bb5428e5de8a702cc8589a28fb34c7a
+botocore==1.34.26 \
+ --hash=sha256:4f3df0f6ed722e944d6f0eed964bc00b6489e50c6e8d5fdbbb68eb0c6c16c7c9 \
+ --hash=sha256:63543102467b3b5ba73903f11a14c3157ee442a360f3cb2f5316a8d6bc3e10e7
# via
# boto3
# s3transfer
-brotli==1.0.9 \
- --hash=sha256:02177603aaca36e1fd21b091cb742bb3b305a569e2402f1ca38af471777fb019 \
- --hash=sha256:11d3283d89af7033236fa4e73ec2cbe743d4f6a81d41bd234f24bf63dde979df \
- --hash=sha256:12effe280b8ebfd389022aa65114e30407540ccb89b177d3fbc9a4f177c4bd5d \
- --hash=sha256:160c78292e98d21e73a4cc7f76a234390e516afcd982fa17e1422f7c6a9ce9c8 \
- --hash=sha256:16d528a45c2e1909c2798f27f7bf0a3feec1dc9e50948e738b961618e38b6a7b \
- --hash=sha256:19598ecddd8a212aedb1ffa15763dd52a388518c4550e615aed88dc3753c0f0c \
- --hash=sha256:1c48472a6ba3b113452355b9af0a60da5c2ae60477f8feda8346f8fd48e3e87c \
- --hash=sha256:268fe94547ba25b58ebc724680609c8ee3e5a843202e9a381f6f9c5e8bdb5c70 \
- --hash=sha256:269a5743a393c65db46a7bb982644c67ecba4b8d91b392403ad8a861ba6f495f \
- --hash=sha256:26d168aac4aaec9a4394221240e8a5436b5634adc3cd1cdf637f6645cecbf181 \
- --hash=sha256:29d1d350178e5225397e28ea1b7aca3648fcbab546d20e7475805437bfb0a130 \
- --hash=sha256:2aad0e0baa04517741c9bb5b07586c642302e5fb3e75319cb62087bd0995ab19 \
- --hash=sha256:3148362937217b7072cf80a2dcc007f09bb5ecb96dae4617316638194113d5be \
- --hash=sha256:330e3f10cd01da535c70d09c4283ba2df5fb78e915bea0a28becad6e2ac010be \
- --hash=sha256:336b40348269f9b91268378de5ff44dc6fbaa2268194f85177b53463d313842a \
- --hash=sha256:3496fc835370da351d37cada4cf744039616a6db7d13c430035e901443a34daa \
- --hash=sha256:35a3edbe18e876e596553c4007a087f8bcfd538f19bc116917b3c7522fca0429 \
- --hash=sha256:3b78a24b5fd13c03ee2b7b86290ed20efdc95da75a3557cc06811764d5ad1126 \
- --hash=sha256:3b8b09a16a1950b9ef495a0f8b9d0a87599a9d1f179e2d4ac014b2ec831f87e7 \
- --hash=sha256:3c1306004d49b84bd0c4f90457c6f57ad109f5cc6067a9664e12b7b79a9948ad \
- --hash=sha256:3ffaadcaeafe9d30a7e4e1e97ad727e4f5610b9fa2f7551998471e3736738679 \
- --hash=sha256:40d15c79f42e0a2c72892bf407979febd9cf91f36f495ffb333d1d04cebb34e4 \
- --hash=sha256:44bb8ff420c1d19d91d79d8c3574b8954288bdff0273bf788954064d260d7ab0 \
- --hash=sha256:4688c1e42968ba52e57d8670ad2306fe92e0169c6f3af0089be75bbac0c64a3b \
- --hash=sha256:495ba7e49c2db22b046a53b469bbecea802efce200dffb69b93dd47397edc9b6 \
- --hash=sha256:4d1b810aa0ed773f81dceda2cc7b403d01057458730e309856356d4ef4188438 \
- --hash=sha256:503fa6af7da9f4b5780bb7e4cbe0c639b010f12be85d02c99452825dd0feef3f \
- --hash=sha256:56d027eace784738457437df7331965473f2c0da2c70e1a1f6fdbae5402e0389 \
- --hash=sha256:5913a1177fc36e30fcf6dc868ce23b0453952c78c04c266d3149b3d39e1410d6 \
- --hash=sha256:5b6ef7d9f9c38292df3690fe3e302b5b530999fa90014853dcd0d6902fb59f26 \
- --hash=sha256:5bf37a08493232fbb0f8229f1824b366c2fc1d02d64e7e918af40acd15f3e337 \
- --hash=sha256:5cb1e18167792d7d21e21365d7650b72d5081ed476123ff7b8cac7f45189c0c7 \
- --hash=sha256:61a7ee1f13ab913897dac7da44a73c6d44d48a4adff42a5701e3239791c96e14 \
- --hash=sha256:622a231b08899c864eb87e85f81c75e7b9ce05b001e59bbfbf43d4a71f5f32b2 \
- --hash=sha256:68715970f16b6e92c574c30747c95cf8cf62804569647386ff032195dc89a430 \
- --hash=sha256:6b2ae9f5f67f89aade1fab0f7fd8f2832501311c363a21579d02defa844d9296 \
- --hash=sha256:6c772d6c0a79ac0f414a9f8947cc407e119b8598de7621f39cacadae3cf57d12 \
- --hash=sha256:6d847b14f7ea89f6ad3c9e3901d1bc4835f6b390a9c71df999b0162d9bb1e20f \
- --hash=sha256:73fd30d4ce0ea48010564ccee1a26bfe39323fde05cb34b5863455629db61dc7 \
- --hash=sha256:76ffebb907bec09ff511bb3acc077695e2c32bc2142819491579a695f77ffd4d \
- --hash=sha256:7bbff90b63328013e1e8cb50650ae0b9bac54ffb4be6104378490193cd60f85a \
- --hash=sha256:7cb81373984cc0e4682f31bc3d6be9026006d96eecd07ea49aafb06897746452 \
- --hash=sha256:7ee83d3e3a024a9618e5be64648d6d11c37047ac48adff25f12fa4226cf23d1c \
- --hash=sha256:854c33dad5ba0fbd6ab69185fec8dab89e13cda6b7d191ba111987df74f38761 \
- --hash=sha256:85f7912459c67eaab2fb854ed2bc1cc25772b300545fe7ed2dc03954da638649 \
- --hash=sha256:87fdccbb6bb589095f413b1e05734ba492c962b4a45a13ff3408fa44ffe6479b \
- --hash=sha256:88c63a1b55f352b02c6ffd24b15ead9fc0e8bf781dbe070213039324922a2eea \
- --hash=sha256:8a674ac10e0a87b683f4fa2b6fa41090edfd686a6524bd8dedbd6138b309175c \
- --hash=sha256:8ed6a5b3d23ecc00ea02e1ed8e0ff9a08f4fc87a1f58a2530e71c0f48adf882f \
- --hash=sha256:93130612b837103e15ac3f9cbacb4613f9e348b58b3aad53721d92e57f96d46a \
- --hash=sha256:9744a863b489c79a73aba014df554b0e7a0fc44ef3f8a0ef2a52919c7d155031 \
- --hash=sha256:9749a124280a0ada4187a6cfd1ffd35c350fb3af79c706589d98e088c5044267 \
- --hash=sha256:97f715cf371b16ac88b8c19da00029804e20e25f30d80203417255d239f228b5 \
- --hash=sha256:9bf919756d25e4114ace16a8ce91eb340eb57a08e2c6950c3cebcbe3dff2a5e7 \
- --hash=sha256:9d12cf2851759b8de8ca5fde36a59c08210a97ffca0eb94c532ce7b17c6a3d1d \
- --hash=sha256:9ed4c92a0665002ff8ea852353aeb60d9141eb04109e88928026d3c8a9e5433c \
- --hash=sha256:a72661af47119a80d82fa583b554095308d6a4c356b2a554fdc2799bc19f2a43 \
- --hash=sha256:afde17ae04d90fbe53afb628f7f2d4ca022797aa093e809de5c3cf276f61bbfa \
- --hash=sha256:b1375b5d17d6145c798661b67e4ae9d5496920d9265e2f00f1c2c0b5ae91fbde \
- --hash=sha256:b336c5e9cf03c7be40c47b5fd694c43c9f1358a80ba384a21969e0b4e66a9b17 \
- --hash=sha256:b3523f51818e8f16599613edddb1ff924eeb4b53ab7e7197f85cbc321cdca32f \
- --hash=sha256:b43775532a5904bc938f9c15b77c613cb6ad6fb30990f3b0afaea82797a402d8 \
- --hash=sha256:b663f1e02de5d0573610756398e44c130add0eb9a3fc912a09665332942a2efb \
- --hash=sha256:b83bb06a0192cccf1eb8d0a28672a1b79c74c3a8a5f2619625aeb6f28b3a82bb \
- --hash=sha256:ba72d37e2a924717990f4d7482e8ac88e2ef43fb95491eb6e0d124d77d2a150d \
- --hash=sha256:c2415d9d082152460f2bd4e382a1e85aed233abc92db5a3880da2257dc7daf7b \
- --hash=sha256:c83aa123d56f2e060644427a882a36b3c12db93727ad7a7b9efd7d7f3e9cc2c4 \
- --hash=sha256:c8e521a0ce7cf690ca84b8cc2272ddaf9d8a50294fd086da67e517439614c755 \
- --hash=sha256:cab1b5964b39607a66adbba01f1c12df2e55ac36c81ec6ed44f2fca44178bf1a \
- --hash=sha256:cb02ed34557afde2d2da68194d12f5719ee96cfb2eacc886352cb73e3808fc5d \
- --hash=sha256:cc0283a406774f465fb45ec7efb66857c09ffefbe49ec20b7882eff6d3c86d3a \
- --hash=sha256:cfc391f4429ee0a9370aa93d812a52e1fee0f37a81861f4fdd1f4fb28e8547c3 \
- --hash=sha256:db844eb158a87ccab83e868a762ea8024ae27337fc7ddcbfcddd157f841fdfe7 \
- --hash=sha256:defed7ea5f218a9f2336301e6fd379f55c655bea65ba2476346340a0ce6f74a1 \
- --hash=sha256:e16eb9541f3dd1a3e92b89005e37b1257b157b7256df0e36bd7b33b50be73bcb \
- --hash=sha256:e1abbeef02962596548382e393f56e4c94acd286bd0c5afba756cffc33670e8a \
- --hash=sha256:e23281b9a08ec338469268f98f194658abfb13658ee98e2b7f85ee9dd06caa91 \
- --hash=sha256:e2d9e1cbc1b25e22000328702b014227737756f4b5bf5c485ac1d8091ada078b \
- --hash=sha256:e48f4234f2469ed012a98f4b7874e7f7e173c167bed4934912a29e03167cf6b1 \
- --hash=sha256:e4c4e92c14a57c9bd4cb4be678c25369bf7a092d55fd0866f759e425b9660806 \
- --hash=sha256:ec1947eabbaf8e0531e8e899fc1d9876c179fc518989461f5d24e2223395a9e3 \
- --hash=sha256:f909bbbc433048b499cb9db9e713b5d8d949e8c109a2a548502fb9aa8630f0b1
+brotli==1.1.0 \
+ --hash=sha256:03d20af184290887bdea3f0f78c4f737d126c74dc2f3ccadf07e54ceca3bf208 \
+ --hash=sha256:0541e747cce78e24ea12d69176f6a7ddb690e62c425e01d31cc065e69ce55b48 \
+ --hash=sha256:069a121ac97412d1fe506da790b3e69f52254b9df4eb665cd42460c837193354 \
+ --hash=sha256:0b63b949ff929fbc2d6d3ce0e924c9b93c9785d877a21a1b678877ffbbc4423a \
+ --hash=sha256:0c6244521dda65ea562d5a69b9a26120769b7a9fb3db2fe9545935ed6735b128 \
+ --hash=sha256:11d00ed0a83fa22d29bc6b64ef636c4552ebafcef57154b4ddd132f5638fbd1c \
+ --hash=sha256:141bd4d93984070e097521ed07e2575b46f817d08f9fa42b16b9b5f27b5ac088 \
+ --hash=sha256:19c116e796420b0cee3da1ccec3b764ed2952ccfcc298b55a10e5610ad7885f9 \
+ --hash=sha256:1ab4fbee0b2d9098c74f3057b2bc055a8bd92ccf02f65944a241b4349229185a \
+ --hash=sha256:1ae56aca0402a0f9a3431cddda62ad71666ca9d4dc3a10a142b9dce2e3c0cda3 \
+ --hash=sha256:224e57f6eac61cc449f498cc5f0e1725ba2071a3d4f48d5d9dffba42db196438 \
+ --hash=sha256:22fc2a8549ffe699bfba2256ab2ed0421a7b8fadff114a3d201794e45a9ff578 \
+ --hash=sha256:23032ae55523cc7bccb4f6a0bf368cd25ad9bcdcc1990b64a647e7bbcce9cb5b \
+ --hash=sha256:2333e30a5e00fe0fe55903c8832e08ee9c3b1382aacf4db26664a16528d51b4b \
+ --hash=sha256:2954c1c23f81c2eaf0b0717d9380bd348578a94161a65b3a2afc62c86467dd68 \
+ --hash=sha256:2de9d02f5bda03d27ede52e8cfe7b865b066fa49258cbab568720aa5be80a47d \
+ --hash=sha256:30924eb4c57903d5a7526b08ef4a584acc22ab1ffa085faceb521521d2de32dd \
+ --hash=sha256:316cc9b17edf613ac76b1f1f305d2a748f1b976b033b049a6ecdfd5612c70409 \
+ --hash=sha256:38025d9f30cf4634f8309c6874ef871b841eb3c347e90b0851f63d1ded5212da \
+ --hash=sha256:39da8adedf6942d76dc3e46653e52df937a3c4d6d18fdc94a7c29d263b1f5b50 \
+ --hash=sha256:3d7954194c36e304e1523f55d7042c59dc53ec20dd4e9ea9d151f1b62b4415c0 \
+ --hash=sha256:4093c631e96fdd49e0377a9c167bfd75b6d0bad2ace734c6eb20b348bc3ea180 \
+ --hash=sha256:43ce1b9935bfa1ede40028054d7f48b5469cd02733a365eec8a329ffd342915d \
+ --hash=sha256:4d4a848d1837973bf0f4b5e54e3bec977d99be36a7895c61abb659301b02c112 \
+ --hash=sha256:4ed11165dd45ce798d99a136808a794a748d5dc38511303239d4e2363c0695dc \
+ --hash=sha256:510b5b1bfbe20e1a7b3baf5fed9e9451873559a976c1a78eebaa3b86c57b4265 \
+ --hash=sha256:524f35912131cc2cabb00edfd8d573b07f2d9f21fa824bd3fb19725a9cf06327 \
+ --hash=sha256:587ca6d3cef6e4e868102672d3bd9dc9698c309ba56d41c2b9c85bbb903cdb95 \
+ --hash=sha256:5b3cc074004d968722f51e550b41a27be656ec48f8afaeeb45ebf65b561481dd \
+ --hash=sha256:5eeb539606f18a0b232d4ba45adccde4125592f3f636a6182b4a8a436548b914 \
+ --hash=sha256:5f4d5ea15c9382135076d2fb28dde923352fe02951e66935a9efaac8f10e81b0 \
+ --hash=sha256:5fb2ce4b8045c78ebbc7b8f3c15062e435d47e7393cc57c25115cfd49883747a \
+ --hash=sha256:6172447e1b368dcbc458925e5ddaf9113477b0ed542df258d84fa28fc45ceea7 \
+ --hash=sha256:6c3020404e0b5eefd7c9485ccf8393cfb75ec38ce75586e046573c9dc29967a0 \
+ --hash=sha256:70051525001750221daa10907c77830bc889cb6d865cc0b813d9db7fefc21451 \
+ --hash=sha256:7905193081db9bfa73b1219140b3d315831cbff0d8941f22da695832f0dd188f \
+ --hash=sha256:7c4855522edb2e6ae7fdb58e07c3ba9111e7621a8956f481c68d5d979c93032e \
+ --hash=sha256:7e4c4629ddad63006efa0ef968c8e4751c5868ff0b1c5c40f76524e894c50248 \
+ --hash=sha256:7f4bf76817c14aa98cc6697ac02f3972cb8c3da93e9ef16b9c66573a68014f91 \
+ --hash=sha256:81de08ac11bcb85841e440c13611c00b67d3bf82698314928d0b676362546724 \
+ --hash=sha256:861bf317735688269936f755fa136a99d1ed526883859f86e41a5d43c61d8966 \
+ --hash=sha256:890b5a14ce214389b2cc36ce82f3093f96f4cc730c1cffdbefff77a7c71f2a97 \
+ --hash=sha256:89f4988c7203739d48c6f806f1e87a1d96e0806d44f0fba61dba81392c9e474d \
+ --hash=sha256:8dadd1314583ec0bf2d1379f7008ad627cd6336625d6679cf2f8e67081b83acf \
+ --hash=sha256:901032ff242d479a0efa956d853d16875d42157f98951c0230f69e69f9c09bac \
+ --hash=sha256:906bc3a79de8c4ae5b86d3d75a8b77e44404b0f4261714306e3ad248d8ab0951 \
+ --hash=sha256:919e32f147ae93a09fe064d77d5ebf4e35502a8df75c29fb05788528e330fe74 \
+ --hash=sha256:929811df5462e182b13920da56c6e0284af407d1de637d8e536c5cd00a7daf60 \
+ --hash=sha256:949f3b7c29912693cee0afcf09acd6ebc04c57af949d9bf77d6101ebb61e388c \
+ --hash=sha256:a090ca607cbb6a34b0391776f0cb48062081f5f60ddcce5d11838e67a01928d1 \
+ --hash=sha256:a1fd8a29719ccce974d523580987b7f8229aeace506952fa9ce1d53a033873c8 \
+ --hash=sha256:a37b8f0391212d29b3a91a799c8e4a2855e0576911cdfb2515487e30e322253d \
+ --hash=sha256:a3daabb76a78f829cafc365531c972016e4aa8d5b4bf60660ad8ecee19df7ccc \
+ --hash=sha256:a469274ad18dc0e4d316eefa616d1d0c2ff9da369af19fa6f3daa4f09671fd61 \
+ --hash=sha256:a599669fd7c47233438a56936988a2478685e74854088ef5293802123b5b2460 \
+ --hash=sha256:a743e5a28af5f70f9c080380a5f908d4d21d40e8f0e0c8901604d15cfa9ba751 \
+ --hash=sha256:a77def80806c421b4b0af06f45d65a136e7ac0bdca3c09d9e2ea4e515367c7e9 \
+ --hash=sha256:aac0411d20e345dc0920bdec5548e438e999ff68d77564d5e9463a7ca9d3e7b1 \
+ --hash=sha256:ae15b066e5ad21366600ebec29a7ccbc86812ed267e4b28e860b8ca16a2bc474 \
+ --hash=sha256:be36e3d172dc816333f33520154d708a2657ea63762ec16b62ece02ab5e4daf2 \
+ --hash=sha256:c8146669223164fc87a7e3de9f81e9423c67a79d6b3447994dfb9c95da16e2d6 \
+ --hash=sha256:c8fd5270e906eef71d4a8d19b7c6a43760c6abcfcc10c9101d14eb2357418de9 \
+ --hash=sha256:caf9ee9a5775f3111642d33b86237b05808dafcd6268faa492250e9b78046eb2 \
+ --hash=sha256:cdad5b9014d83ca68c25d2e9444e28e967ef16e80f6b436918c700c117a85467 \
+ --hash=sha256:cdbc1fc1bc0bff1cef838eafe581b55bfbffaed4ed0318b724d0b71d4d377619 \
+ --hash=sha256:ceb64bbc6eac5a140ca649003756940f8d6a7c444a68af170b3187623b43bebf \
+ --hash=sha256:d0c5516f0aed654134a2fc936325cc2e642f8a0e096d075209672eb321cff408 \
+ --hash=sha256:d143fd47fad1db3d7c27a1b1d66162e855b5d50a89666af46e1679c496e8e579 \
+ --hash=sha256:d192f0f30804e55db0d0e0a35d83a9fead0e9a359a9ed0285dbacea60cc10a84 \
+ --hash=sha256:db85ecf4e609a48f4b29055f1e144231b90edc90af7481aa731ba2d059226b1b \
+ --hash=sha256:de6551e370ef19f8de1807d0a9aa2cdfdce2e85ce88b122fe9f6b2b076837e59 \
+ --hash=sha256:e1140c64812cb9b06c922e77f1c26a75ec5e3f0fb2bf92cc8c58720dec276752 \
+ --hash=sha256:e6a904cb26bfefc2f0a6f240bdf5233be78cd2488900a2f846f3c3ac8489ab80 \
+ --hash=sha256:e84799f09591700a4154154cab9787452925578841a94321d5ee8fb9a9a328f0 \
+ --hash=sha256:e93dfc1a1165e385cc8239fab7c036fb2cd8093728cbd85097b284d7b99249a2 \
+ --hash=sha256:efa8b278894b14d6da122a72fefcebc28445f2d3f880ac59d46c90f4c13be9a3 \
+ --hash=sha256:f0d8a7a6b5983c2496e364b969f0e526647a06b075d034f3297dc66f3b360c64 \
+ --hash=sha256:f296c40e23065d0d6650c4aefe7470d2a25fffda489bcc3eb66083f3ac9f6643 \
+ --hash=sha256:f66b5337fa213f1da0d9000bc8dc0cb5b896b726eefd9c6046f699b169c41b9e \
+ --hash=sha256:f733d788519c7e3e71f0855c96618720f5d3d60c3cb829d8bbb722dddce37985 \
+ --hash=sha256:fce1473f3ccc4187f75b4690cfc922628aed4d3dd013d047f95a9b3919a86596 \
+ --hash=sha256:fd5f17ff8f14003595ab414e45fce13d073e0762394f957182e69035c9f3d7c2 \
+ --hash=sha256:fdc3ff3bfccdc6b9cc7c342c03aa2400683f0cb891d46e94b64a197910dc4064
# via whitenoise
cached-property==1.5.2 \
--hash=sha256:9fa5755838eecbb2d234c3aa390bd80fbd3ac6b6869109bfc1b499f7bd89a130 \
@@ -246,92 +236,105 @@ certifi==2023.5.7 \
# via
# -r requirements/common.in
# requests
-charset-normalizer==3.2.0 \
- --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
- --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
- --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
- --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
- --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
- --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
- --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
- --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
- --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
- --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
- --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
- --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
- --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
- --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
- --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
- --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
- --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
- --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
- --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
- --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
- --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
- --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
- --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
- --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
- --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
- --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
- --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
- --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
- --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
- --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
- --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
- --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
- --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
- --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
- --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
- --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
- --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
- --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
- --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
- --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
- --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
- --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
- --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
- --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
- --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
- --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
- --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
- --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
- --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
- --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
- --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
- --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
- --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
- --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
- --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
- --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
- --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
- --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
- --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
- --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
- --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
- --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
- --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
- --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
- --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
- --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
- --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
- --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
- --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
- --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
- --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
- --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
- --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
- --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
- --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
- # via
- # aiohttp
- # requests
-cleo==2.0.1 \
- --hash=sha256:6eb133670a3ed1f3b052d53789017b6e50fca66d1287e6e6696285f4cb8ea448 \
- --hash=sha256:eb4b2e1f3063c11085cebe489a6e9124163c226575a3c3be69b2e51af4a15ec5
+charset-normalizer==3.3.2 \
+ --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \
+ --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \
+ --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \
+ --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \
+ --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \
+ --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \
+ --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \
+ --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \
+ --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \
+ --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \
+ --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \
+ --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \
+ --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \
+ --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \
+ --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \
+ --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \
+ --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \
+ --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \
+ --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \
+ --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \
+ --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \
+ --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \
+ --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \
+ --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \
+ --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \
+ --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \
+ --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \
+ --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \
+ --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \
+ --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \
+ --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \
+ --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \
+ --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \
+ --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \
+ --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \
+ --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \
+ --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \
+ --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \
+ --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \
+ --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \
+ --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \
+ --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \
+ --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \
+ --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \
+ --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \
+ --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \
+ --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \
+ --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \
+ --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \
+ --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \
+ --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \
+ --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \
+ --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \
+ --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \
+ --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \
+ --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \
+ --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \
+ --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \
+ --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \
+ --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \
+ --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \
+ --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \
+ --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \
+ --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \
+ --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \
+ --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \
+ --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \
+ --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \
+ --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \
+ --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \
+ --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \
+ --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \
+ --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \
+ --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \
+ --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \
+ --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \
+ --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \
+ --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \
+ --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \
+ --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \
+ --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \
+ --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \
+ --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \
+ --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \
+ --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \
+ --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \
+ --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \
+ --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \
+ --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \
+ --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561
+ # via requests
+cleo==2.1.0 \
+ --hash=sha256:0b2c880b5d13660a7ea651001fb4acb527696c01f15c9ee650f377aa543fd523 \
+ --hash=sha256:4a31bd4dd45695a64ee3c4758f583f134267c2bc518d8ae9a29cf237d009b07e
# via mozci
-click==8.1.6 \
- --hash=sha256:48ee849951919527a045bfe3bf7baa8a959c423134e1a5b98c05c20ba75a1cbd \
- --hash=sha256:fa244bb30b3b5ee2cae3da8f55c9e5e0c0e86093306301fb418eb9dc40fbded5
+click==8.1.7 \
+ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \
+ --hash=sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de
# via
# celery
# click-didyoumean
@@ -386,9 +389,9 @@ djangorestframework==3.14.0 \
--hash=sha256:579a333e6256b09489cbe0a067e66abe55c6595d8926be6b99423786334350c8 \
--hash=sha256:eb63f58c9f218e1a7d064d17a70751f528ed4e1d35547fdade9aaf4cd103fd08
# via -r requirements/common.in
-dockerflow==2022.8.0 \
- --hash=sha256:cebd5e12ff08be43b02ea4fcaf044fb2cd4cec63c93dbfbe6e3c5b610849924c \
- --hash=sha256:fcb95ea8226551e1fd03c3c82f2b11de50434ddfa63cebc164399dabf5c78908
+dockerflow==2024.1.0 \
+ --hash=sha256:38d6a60a01e87d33dcf802f1ef2f09ae2f375c829d3805923d88409387562d66 \
+ --hash=sha256:df1597fb3d58d759993e5b5e7f254162804882c04c09d5b7df97aa47b0a9d15b
# via -r requirements/common.in
ecdsa==0.18.0 \
--hash=sha256:190348041559e21b22a1d65cee485282ca11a6f81d503fddb84d5017e9ed1e49 \
@@ -402,68 +405,84 @@ flake8==4.0.1 \
--hash=sha256:479b1304f72536a55948cb40a32dce8bb0ffe3501e26eaf292c7e60eb5e0428d \
--hash=sha256:806e034dda44114815e23c16ef92f95c91e4c71100ff52813adf7132a6ad870d
# via mozci
-frozenlist==1.4.0 \
- --hash=sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6 \
- --hash=sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01 \
- --hash=sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251 \
- --hash=sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9 \
- --hash=sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b \
- --hash=sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87 \
- --hash=sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf \
- --hash=sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f \
- --hash=sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0 \
- --hash=sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2 \
- --hash=sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b \
- --hash=sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc \
- --hash=sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c \
- --hash=sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467 \
- --hash=sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9 \
- --hash=sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1 \
- --hash=sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a \
- --hash=sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79 \
- --hash=sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167 \
- --hash=sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300 \
- --hash=sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf \
- --hash=sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea \
- --hash=sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2 \
- --hash=sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab \
- --hash=sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3 \
- --hash=sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb \
- --hash=sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087 \
- --hash=sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc \
- --hash=sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8 \
- --hash=sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62 \
- --hash=sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f \
- --hash=sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326 \
- --hash=sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c \
- --hash=sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431 \
- --hash=sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963 \
- --hash=sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7 \
- --hash=sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef \
- --hash=sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3 \
- --hash=sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956 \
- --hash=sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781 \
- --hash=sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472 \
- --hash=sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc \
- --hash=sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839 \
- --hash=sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672 \
- --hash=sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3 \
- --hash=sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503 \
- --hash=sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d \
- --hash=sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8 \
- --hash=sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b \
- --hash=sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc \
- --hash=sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f \
- --hash=sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559 \
- --hash=sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b \
- --hash=sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95 \
- --hash=sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb \
- --hash=sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963 \
- --hash=sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919 \
- --hash=sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f \
- --hash=sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3 \
- --hash=sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1 \
- --hash=sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e
+frozenlist==1.4.1 \
+ --hash=sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7 \
+ --hash=sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98 \
+ --hash=sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad \
+ --hash=sha256:0c250a29735d4f15321007fb02865f0e6b6a41a6b88f1f523ca1596ab5f50bd5 \
+ --hash=sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae \
+ --hash=sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e \
+ --hash=sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a \
+ --hash=sha256:1d0ce09d36d53bbbe566fe296965b23b961764c0bcf3ce2fa45f463745c04701 \
+ --hash=sha256:20b51fa3f588ff2fe658663db52a41a4f7aa6c04f6201449c6c7c476bd255c0d \
+ --hash=sha256:23b2d7679b73fe0e5a4560b672a39f98dfc6f60df63823b0a9970525325b95f6 \
+ --hash=sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6 \
+ --hash=sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106 \
+ --hash=sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75 \
+ --hash=sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868 \
+ --hash=sha256:32453c1de775c889eb4e22f1197fe3bdfe457d16476ea407472b9442e6295f7a \
+ --hash=sha256:3a670dc61eb0d0eb7080890c13de3066790f9049b47b0de04007090807c776b0 \
+ --hash=sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1 \
+ --hash=sha256:410478a0c562d1a5bcc2f7ea448359fcb050ed48b3c6f6f4f18c313a9bdb1826 \
+ --hash=sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec \
+ --hash=sha256:48f6a4533887e189dae092f1cf981f2e3885175f7a0f33c91fb5b7b682b6bab6 \
+ --hash=sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950 \
+ --hash=sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19 \
+ --hash=sha256:55fdc093b5a3cb41d420884cdaf37a1e74c3c37a31f46e66286d9145d2063bd0 \
+ --hash=sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8 \
+ --hash=sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a \
+ --hash=sha256:5a7d70357e7cee13f470c7883a063aae5fe209a493c57d86eb7f5a6f910fae09 \
+ --hash=sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86 \
+ --hash=sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c \
+ --hash=sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5 \
+ --hash=sha256:693945278a31f2086d9bf3df0fe8254bbeaef1fe71e1351c3bd730aa7d31c41b \
+ --hash=sha256:6db4667b187a6742b33afbbaf05a7bc551ffcf1ced0000a571aedbb4aa42fc7b \
+ --hash=sha256:6eb73fa5426ea69ee0e012fb59cdc76a15b1283d6e32e4f8dc4482ec67d1194d \
+ --hash=sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0 \
+ --hash=sha256:7268252af60904bf52c26173cbadc3a071cece75f873705419c8681f24d3edea \
+ --hash=sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776 \
+ --hash=sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a \
+ --hash=sha256:82e8211d69a4f4bc360ea22cd6555f8e61a1bd211d1d5d39d3d228b48c83a897 \
+ --hash=sha256:89aa2c2eeb20957be2d950b85974b30a01a762f3308cd02bb15e1ad632e22dc7 \
+ --hash=sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09 \
+ --hash=sha256:96ec70beabbd3b10e8bfe52616a13561e58fe84c0101dd031dc78f250d5128b9 \
+ --hash=sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe \
+ --hash=sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd \
+ --hash=sha256:9d3e0c25a2350080e9319724dede4f31f43a6c9779be48021a7f4ebde8b2d742 \
+ --hash=sha256:a06339f38e9ed3a64e4c4e43aec7f59084033647f908e4259d279a52d3757d09 \
+ --hash=sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0 \
+ --hash=sha256:a7496bfe1da7fb1a4e1cc23bb67c58fab69311cc7d32b5a99c2007b4b2a0e932 \
+ --hash=sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1 \
+ --hash=sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a \
+ --hash=sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49 \
+ --hash=sha256:b7e3ed87d4138356775346e6845cccbe66cd9e207f3cd11d2f0b9fd13681359d \
+ --hash=sha256:b7f2f9f912dca3934c1baec2e4585a674ef16fe00218d833856408c48d5beee7 \
+ --hash=sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480 \
+ --hash=sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89 \
+ --hash=sha256:bfa4a17e17ce9abf47a74ae02f32d014c5e9404b6d9ac7f729e01562bbee601e \
+ --hash=sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b \
+ --hash=sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82 \
+ --hash=sha256:c6321c9efe29975232da3bd0af0ad216800a47e93d763ce64f291917a381b8eb \
+ --hash=sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068 \
+ --hash=sha256:c99169d4ff810155ca50b4da3b075cbde79752443117d89429595c2e8e37fed8 \
+ --hash=sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b \
+ --hash=sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb \
+ --hash=sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2 \
+ --hash=sha256:dca69045298ce5c11fd539682cff879cc1e664c245d1c64da929813e54241d11 \
+ --hash=sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b \
+ --hash=sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc \
+ --hash=sha256:e6a20a581f9ce92d389a8c7d7c3dd47c81fd5d6e655c8dddf341e14aa48659d0 \
+ --hash=sha256:e7004be74cbb7d9f34553a5ce5fb08be14fb33bc86f332fb71cbe5216362a497 \
+ --hash=sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17 \
+ --hash=sha256:edb678da49d9f72c9f6c609fbe41a5dfb9a9282f9e6a2253d5a91e0fc382d7c0 \
+ --hash=sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2 \
+ --hash=sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439 \
+ --hash=sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5 \
+ --hash=sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac \
+ --hash=sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825 \
+ --hash=sha256:fbeb989b5cc29e8daf7f976b421c220f1b8c731cbf22b9130d8815418ea45887 \
+ --hash=sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced \
+ --hash=sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74
# via
# aiohttp
# aiosignal
@@ -475,9 +494,9 @@ gunicorn==20.1.0 \
--hash=sha256:9dcc4547dbb1cb284accfb15ab5667a0e5d1881cc443e0677b4882a4067a807e \
--hash=sha256:e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8
# via -r requirements/common.in
-idna==3.4 \
- --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
- --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
+idna==3.6 \
+ --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \
+ --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f
# via
# requests
# yarl
@@ -501,153 +520,162 @@ jsonschema==4.17.3 \
--hash=sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d \
--hash=sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6
# via -r requirements/common.in
-kombu==5.3.1 \
- --hash=sha256:48ee589e8833126fd01ceaa08f8a2041334e9f5894e5763c8486a550454551e9 \
- --hash=sha256:fbd7572d92c0bf71c112a6b45163153dea5a7b6a701ec16b568c27d0fd2370f2
+kombu==5.3.5 \
+ --hash=sha256:0eac1bbb464afe6fb0924b21bf79460416d25d8abc52546d4f16cad94f789488 \
+ --hash=sha256:30e470f1a6b49c70dc6f6d13c3e4cc4e178aa6c469ceb6bcd55645385fc84b93
# via celery
-loguru==0.7.0 \
- --hash=sha256:1612053ced6ae84d7959dd7d5e431a0532642237ec21f7fd83ac73fe539e03e1 \
- --hash=sha256:b93aa30099fa6860d4727f1b81f8718e965bb96253fa190fab2077aaad6d15d3
+loguru==0.7.2 \
+ --hash=sha256:003d71e3d3ed35f0f8984898359d65b79e5b21943f78af86aa5491210429b8eb \
+ --hash=sha256:e671a53522515f34fd406340ee968cb9ecafbc4b36c679da03c18fd8d0bd51ac
# via mozci
-lru-dict==1.2.0 \
- --hash=sha256:00f6e8a3fc91481b40395316a14c94daa0f0a5de62e7e01a7d589f8d29224052 \
- --hash=sha256:020b93870f8c7195774cbd94f033b96c14f51c57537969965c3af300331724fe \
- --hash=sha256:05fb8744f91f58479cbe07ed80ada6696ec7df21ea1740891d4107a8dd99a970 \
- --hash=sha256:086ce993414f0b28530ded7e004c77dc57c5748fa6da488602aa6e7f79e6210e \
- --hash=sha256:0c316dfa3897fabaa1fe08aae89352a3b109e5f88b25529bc01e98ac029bf878 \
- --hash=sha256:0facf49b053bf4926d92d8d5a46fe07eecd2af0441add0182c7432d53d6da667 \
- --hash=sha256:1171ad3bff32aa8086778be4a3bdff595cc2692e78685bcce9cb06b96b22dcc2 \
- --hash=sha256:1184d91cfebd5d1e659d47f17a60185bbf621635ca56dcdc46c6a1745d25df5c \
- --hash=sha256:13c56782f19d68ddf4d8db0170041192859616514c706b126d0df2ec72a11bd7 \
- --hash=sha256:18ee88ada65bd2ffd483023be0fa1c0a6a051ef666d1cd89e921dcce134149f2 \
- --hash=sha256:203b3e78d03d88f491fa134f85a42919020686b6e6f2d09759b2f5517260c651 \
- --hash=sha256:20f5f411f7751ad9a2c02e80287cedf69ae032edd321fe696e310d32dd30a1f8 \
- --hash=sha256:21b3090928c7b6cec509e755cc3ab742154b33660a9b433923bd12c37c448e3e \
- --hash=sha256:22147367b296be31cc858bf167c448af02435cac44806b228c9be8117f1bfce4 \
- --hash=sha256:231d7608f029dda42f9610e5723614a35b1fff035a8060cf7d2be19f1711ace8 \
- --hash=sha256:25f9e0bc2fe8f41c2711ccefd2871f8a5f50a39e6293b68c3dec576112937aad \
- --hash=sha256:287c2115a59c1c9ed0d5d8ae7671e594b1206c36ea9df2fca6b17b86c468ff99 \
- --hash=sha256:291d13f85224551913a78fe695cde04cbca9dcb1d84c540167c443eb913603c9 \
- --hash=sha256:312b6b2a30188586fe71358f0f33e4bac882d33f5e5019b26f084363f42f986f \
- --hash=sha256:34a3091abeb95e707f381a8b5b7dc8e4ee016316c659c49b726857b0d6d1bd7a \
- --hash=sha256:35a142a7d1a4fd5d5799cc4f8ab2fff50a598d8cee1d1c611f50722b3e27874f \
- --hash=sha256:3838e33710935da2ade1dd404a8b936d571e29268a70ff4ca5ba758abb3850df \
- --hash=sha256:5345bf50e127bd2767e9fd42393635bbc0146eac01f6baf6ef12c332d1a6a329 \
- --hash=sha256:5919dd04446bc1ee8d6ecda2187deeebfff5903538ae71083e069bc678599446 \
- --hash=sha256:59f3df78e94e07959f17764e7fa7ca6b54e9296953d2626a112eab08e1beb2db \
- --hash=sha256:5b172fce0a0ffc0fa6d282c14256d5a68b5db1e64719c2915e69084c4b6bf555 \
- --hash=sha256:5c6acbd097b15bead4de8e83e8a1030bb4d8257723669097eac643a301a952f0 \
- --hash=sha256:5d90a70c53b0566084447c3ef9374cc5a9be886e867b36f89495f211baabd322 \
- --hash=sha256:604d07c7604b20b3130405d137cae61579578b0e8377daae4125098feebcb970 \
- --hash=sha256:6b7a031e47421d4b7aa626b8c91c180a9f037f89e5d0a71c4bb7afcf4036c774 \
- --hash=sha256:6da5b8099766c4da3bf1ed6e7d7f5eff1681aff6b5987d1258a13bd2ed54f0c9 \
- --hash=sha256:712e71b64da181e1c0a2eaa76cd860265980cd15cb0e0498602b8aa35d5db9f8 \
- --hash=sha256:71da89e134747e20ed5b8ad5b4ee93fc5b31022c2b71e8176e73c5a44699061b \
- --hash=sha256:756230c22257597b7557eaef7f90484c489e9ba78e5bb6ab5a5bcfb6b03cb075 \
- --hash=sha256:7d3336e901acec897bcd318c42c2b93d5f1d038e67688f497045fc6bad2c0be7 \
- --hash=sha256:7e51fa6a203fa91d415f3b2900e5748ec8e06ad75777c98cc3aeb3983ca416d7 \
- --hash=sha256:877801a20f05c467126b55338a4e9fa30e2a141eb7b0b740794571b7d619ee11 \
- --hash=sha256:87bbad3f5c3de8897b8c1263a9af73bbb6469fb90e7b57225dad89b8ef62cd8d \
- --hash=sha256:8bda3a9afd241ee0181661decaae25e5336ce513ac268ab57da737eacaa7871f \
- --hash=sha256:8dafc481d2defb381f19b22cc51837e8a42631e98e34b9e0892245cc96593deb \
- --hash=sha256:91d577a11b84387013815b1ad0bb6e604558d646003b44c92b3ddf886ad0f879 \
- --hash=sha256:981ef3edc82da38d39eb60eae225b88a538d47b90cce2e5808846fd2cf64384b \
- --hash=sha256:987b73a06bcf5a95d7dc296241c6b1f9bc6cda42586948c9dabf386dc2bef1cd \
- --hash=sha256:9e4c85aa8844bdca3c8abac3b7f78da1531c74e9f8b3e4890c6e6d86a5a3f6c0 \
- --hash=sha256:a3ea7571b6bf2090a85ff037e6593bbafe1a8598d5c3b4560eb56187bcccb4dc \
- --hash=sha256:a87bdc291718bbdf9ea4be12ae7af26cbf0706fa62c2ac332748e3116c5510a7 \
- --hash=sha256:aaecd7085212d0aa4cd855f38b9d61803d6509731138bf798a9594745953245b \
- --hash=sha256:ae301c282a499dc1968dd633cfef8771dd84228ae9d40002a3ea990e4ff0c469 \
- --hash=sha256:afdadd73304c9befaed02eb42f5f09fdc16288de0a08b32b8080f0f0f6350aa6 \
- --hash=sha256:b20b7c9beb481e92e07368ebfaa363ed7ef61e65ffe6e0edbdbaceb33e134124 \
- --hash=sha256:b30122e098c80e36d0117810d46459a46313421ce3298709170b687dc1240b02 \
- --hash=sha256:b55753ee23028ba8644fd22e50de7b8f85fa60b562a0fafaad788701d6131ff8 \
- --hash=sha256:b5ccfd2291c93746a286c87c3f895165b697399969d24c54804ec3ec559d4e43 \
- --hash=sha256:b6613daa851745dd22b860651de930275be9d3e9373283a2164992abacb75b62 \
- --hash=sha256:b710f0f4d7ec4f9fa89dfde7002f80bcd77de8024017e70706b0911ea086e2ef \
- --hash=sha256:b9ec7a4a0d6b8297102aa56758434fb1fca276a82ed7362e37817407185c3abb \
- --hash=sha256:bb12f19cdf9c4f2d9aa259562e19b188ff34afab28dd9509ff32a3f1c2c29326 \
- --hash=sha256:bd2cd1b998ea4c8c1dad829fc4fa88aeed4dee555b5e03c132fc618e6123f168 \
- --hash=sha256:c4da599af36618881748b5db457d937955bb2b4800db891647d46767d636c408 \
- --hash=sha256:c53b12b89bd7a6c79f0536ff0d0a84fdf4ab5f6252d94b24b9b753bd9ada2ddf \
- --hash=sha256:c9617583173a29048e11397f165501edc5ae223504a404b2532a212a71ecc9ed \
- --hash=sha256:cd46c94966f631a81ffe33eee928db58e9fbee15baba5923d284aeadc0e0fa76 \
- --hash=sha256:cd6806313606559e6c7adfa0dbeb30fc5ab625f00958c3d93f84831e7a32b71e \
- --hash=sha256:d0dd4cd58220351233002f910e35cc01d30337696b55c6578f71318b137770f9 \
- --hash=sha256:d0f7ec902a0097ac39f1922c89be9eaccf00eb87751e28915320b4f72912d057 \
- --hash=sha256:d5bb41bc74b321789803d45b124fc2145c1b3353b4ad43296d9d1d242574969b \
- --hash=sha256:d7ab0c10c4fa99dc9e26b04e6b62ac32d2bcaea3aad9b81ec8ce9a7aa32b7b1b \
- --hash=sha256:de24b47159e07833aeab517d9cb1c3c5c2d6445cc378b1c2f1d8d15fb4841d63 \
- --hash=sha256:de906e5486b5c053d15b7731583c25e3c9147c288ac8152a6d1f9bccdec72641 \
- --hash=sha256:df25a426446197488a6702954dcc1de511deee20c9db730499a2aa83fddf0df1 \
- --hash=sha256:e25b2e90a032dc248213af7f3f3e975e1934b204f3b16aeeaeaff27a3b65e128 \
- --hash=sha256:e707d93bae8f0a14e6df1ae8b0f076532b35f00e691995f33132d806a88e5c18 \
- --hash=sha256:ea2ac3f7a7a2f32f194c84d82a034e66780057fd908b421becd2f173504d040e \
- --hash=sha256:ead83ac59a29d6439ddff46e205ce32f8b7f71a6bd8062347f77e232825e3d0a \
- --hash=sha256:edad398d5d402c43d2adada390dd83c74e46e020945ff4df801166047013617e \
- --hash=sha256:f010cfad3ab10676e44dc72a813c968cd586f37b466d27cde73d1f7f1ba158c2 \
- --hash=sha256:f404dcc8172da1f28da9b1f0087009578e608a4899b96d244925c4f463201f2a \
- --hash=sha256:f54908bf91280a9b8fa6a8c8f3c2f65850ce6acae2852bbe292391628ebca42f \
- --hash=sha256:f5d5a5f976b39af73324f2b793862859902ccb9542621856d51a5993064f25e4 \
- --hash=sha256:f9484016e6765bd295708cccc9def49f708ce07ac003808f69efa386633affb9 \
- --hash=sha256:fbf36c5a220a85187cacc1fcb7dd87070e04b5fc28df7a43f6842f7c8224a388 \
- --hash=sha256:fc42882b554a86e564e0b662da47b8a4b32fa966920bd165e27bb8079a323bc1
+lru-dict==1.3.0 \
+ --hash=sha256:0213ab4e3d9a8d386c18e485ad7b14b615cb6f05df6ef44fb2a0746c6ea9278b \
+ --hash=sha256:04cda617f4e4c27009005d0a8185ef02829b14b776d2791f5c994cc9d668bc24 \
+ --hash=sha256:0ad6361e4dd63b47b2fc8eab344198f37387e1da3dcfacfee19bafac3ec9f1eb \
+ --hash=sha256:0e1845024c31e6ff246c9eb5e6f6f1a8bb564c06f8a7d6d031220044c081090b \
+ --hash=sha256:0e88dba16695f17f41701269fa046197a3fd7b34a8dba744c8749303ddaa18df \
+ --hash=sha256:0fce5f95489ca1fc158cc9fe0f4866db9cec82c2be0470926a9080570392beaf \
+ --hash=sha256:1470f5828c7410e16c24b5150eb649647986e78924816e6fb0264049dea14a2b \
+ --hash=sha256:170b66d29945391460351588a7bd8210a95407ae82efe0b855e945398a1d24ea \
+ --hash=sha256:1958cb70b9542773d6241974646e5410e41ef32e5c9e437d44040d59bd80daf2 \
+ --hash=sha256:1ecb7ae557239c64077e9b26a142eb88e63cddb104111a5122de7bebbbd00098 \
+ --hash=sha256:20c595764695d20bdc3ab9b582e0cc99814da183544afb83783a36d6741a0dac \
+ --hash=sha256:2682bfca24656fb7a643621520d57b7fe684ed5fa7be008704c1235d38e16a32 \
+ --hash=sha256:2789296819525a1f3204072dfcf3df6db8bcf69a8fc740ffd3de43a684ea7002 \
+ --hash=sha256:28aa1ea42a7e48174bf513dc2416fea7511a547961e678dc6f5670ca987c18cb \
+ --hash=sha256:2a47740652b25900ac5ce52667b2eade28d8b5fdca0ccd3323459df710e8210a \
+ --hash=sha256:350e2233cfee9f326a0d7a08e309372d87186565e43a691b120006285a0ac549 \
+ --hash=sha256:3b4f121afe10f5a82b8e317626eb1e1c325b3f104af56c9756064cd833b1950b \
+ --hash=sha256:3c497fb60279f1e1d7dfbe150b1b069eaa43f7e172dab03f206282f4994676c5 \
+ --hash=sha256:3ca5474b1649555d014be1104e5558a92497509021a5ba5ea6e9b492303eb66b \
+ --hash=sha256:3cb1de0ce4137b060abaafed8474cc0ebd12cedd88aaa7f7b3ebb1ddfba86ae0 \
+ --hash=sha256:4073333894db9840f066226d50e6f914a2240711c87d60885d8c940b69a6673f \
+ --hash=sha256:40a8daddc29c7edb09dfe44292cf111f1e93a8344349778721d430d336b50505 \
+ --hash=sha256:4eafb188a84483b3231259bf19030859f070321b00326dcb8e8c6cbf7db4b12f \
+ --hash=sha256:5247d1f011f92666010942434020ddc5a60951fefd5d12a594f0e5d9f43e3b3b \
+ --hash=sha256:54fd1966d6bd1fcde781596cb86068214edeebff1db13a2cea11079e3fd07b6b \
+ --hash=sha256:5ad659cbc349d0c9ba8e536b5f40f96a70c360f43323c29f4257f340d891531c \
+ --hash=sha256:6123aefe97762ad74215d05320a7f389f196f0594c8813534284d4eafeca1a96 \
+ --hash=sha256:64545fca797fe2c68c5168efb5f976c6e1459e058cab02445207a079180a3557 \
+ --hash=sha256:6a03170e4152836987a88dcebde61aaeb73ab7099a00bb86509d45b3fe424230 \
+ --hash=sha256:6af36166d22dba851e06a13e35bbf33845d3dd88872e6aebbc8e3e7db70f4682 \
+ --hash=sha256:6bba2863060caeaedd8386b0c8ee9a7ce4d57a7cb80ceeddf440b4eff2d013ba \
+ --hash=sha256:6cb0be5e79c3f34d69b90d8559f0221e374b974b809a22377122c4b1a610ff67 \
+ --hash=sha256:6ffaf595e625b388babc8e7d79b40f26c7485f61f16efe76764e32dce9ea17fc \
+ --hash=sha256:73593791047e36b37fdc0b67b76aeed439fcea80959c7d46201240f9ec3b2563 \
+ --hash=sha256:774ca88501a9effe8797c3db5a6685cf20978c9cb0fe836b6813cfe1ca60d8c9 \
+ --hash=sha256:784ca9d3b0730b3ec199c0a58f66264c63dd5d438119c739c349a6a9be8e5f6e \
+ --hash=sha256:7969cb034b3ccc707aff877c73c225c32d7e2a7981baa8f92f5dd4d468fe8c33 \
+ --hash=sha256:7ffbce5c2e80f57937679553c8f27e61ec327c962bf7ea0b15f1d74277fd5363 \
+ --hash=sha256:82eb230d48eaebd6977a92ddaa6d788f14cf4f4bcf5bbffa4ddfd60d051aa9d4 \
+ --hash=sha256:8551ccab1349d4bebedab333dfc8693c74ff728f4b565fe15a6bf7d296bd7ea9 \
+ --hash=sha256:8d9509d817a47597988615c1a322580c10100acad10c98dfcf3abb41e0e5877f \
+ --hash=sha256:8ee38d420c77eed548df47b7d74b5169a98e71c9e975596e31ab808e76d11f09 \
+ --hash=sha256:9537e1cee6fa582cb68f2fb9ce82d51faf2ccc0a638b275d033fdcb1478eb80b \
+ --hash=sha256:96fc87ddf569181827458ec5ad8fa446c4690cffacda66667de780f9fcefd44d \
+ --hash=sha256:9710737584650a4251b9a566cbb1a86f83437adb209c9ba43a4e756d12faf0d7 \
+ --hash=sha256:9bd13af06dab7c6ee92284fd02ed9a5613a07d5c1b41948dc8886e7207f86dfd \
+ --hash=sha256:9f725f2a0bdf1c18735372d5807af4ea3b77888208590394d4660e3d07971f21 \
+ --hash=sha256:a193a14c66cfc0c259d05dddc5e566a4b09e8f1765e941503d065008feebea9d \
+ --hash=sha256:a1efc59bfba6aac33684d87b9e02813b0e2445b2f1c444dae2a0b396ad0ed60c \
+ --hash=sha256:a3c9f746a9917e784fffcedeac4c8c47a3dbd90cbe13b69e9140182ad97ce4b7 \
+ --hash=sha256:a690c23fc353681ed8042d9fe8f48f0fb79a57b9a45daea2f0be1eef8a1a4aa4 \
+ --hash=sha256:a9fb71ba262c6058a0017ce83d343370d0a0dbe2ae62c2eef38241ec13219330 \
+ --hash=sha256:abd0c284b26b5c4ee806ca4f33ab5e16b4bf4d5ec9e093e75a6f6287acdde78e \
+ --hash=sha256:acd04b7e7b0c0c192d738df9c317093335e7282c64c9d1bb6b7ebb54674b4e24 \
+ --hash=sha256:b2bf2e24cf5f19c3ff69bf639306e83dced273e6fa775b04e190d7f5cd16f794 \
+ --hash=sha256:b50fbd69cd3287196796ab4d50e4cc741eb5b5a01f89d8e930df08da3010c385 \
+ --hash=sha256:b84c321ae34f2f40aae80e18b6fa08b31c90095792ab64bb99d2e385143effaa \
+ --hash=sha256:ba490b8972531d153ac0d4e421f60d793d71a2f4adbe2f7740b3c55dce0a12f1 \
+ --hash=sha256:bc1cd3ed2cee78a47f11f3b70be053903bda197a873fd146e25c60c8e5a32cd6 \
+ --hash=sha256:c0131351b8a7226c69f1eba5814cbc9d1d8daaf0fdec1ae3f30508e3de5262d4 \
+ --hash=sha256:c265f16c936a8ff3bb4b8a4bda0be94c15ec28b63e99fdb1439c1ffe4cd437db \
+ --hash=sha256:c279068f68af3b46a5d649855e1fb87f5705fe1f744a529d82b2885c0e1fc69d \
+ --hash=sha256:c637ab54b8cd9802fe19b260261e38820d748adf7606e34045d3c799b6dde813 \
+ --hash=sha256:c95f8751e2abd6f778da0399c8e0239321d560dbc58cb063827123137d213242 \
+ --hash=sha256:ca3703ff03b03a1848c563bc2663d0ad813c1cd42c4d9cf75b623716d4415d9a \
+ --hash=sha256:ca9ab676609cce85dd65d91c275e47da676d13d77faa72de286fbea30fbaa596 \
+ --hash=sha256:cd869cadba9a63e1e7fe2dced4a5747d735135b86016b0a63e8c9e324ab629ac \
+ --hash=sha256:cf9da32ef2582434842ab6ba6e67290debfae72771255a8e8ab16f3e006de0aa \
+ --hash=sha256:cfaf75ac574447afcf8ad998789071af11d2bcf6f947643231f692948839bd98 \
+ --hash=sha256:d9b30a8f50c3fa72a494eca6be5810a1b5c89e4f0fda89374f0d1c5ad8d37d51 \
+ --hash=sha256:dcec98e2c7da7631f0811730303abc4bdfe70d013f7a11e174a2ccd5612a7c59 \
+ --hash=sha256:df2e119c6ae412d2fd641a55f8a1e2e51f45a3de3449c18b1b86c319ab79e0c4 \
+ --hash=sha256:e13b2f58f647178470adaa14603bb64cc02eeed32601772ccea30e198252883c \
+ --hash=sha256:e5c20f236f27551e3f0adbf1a987673fb1e9c38d6d284502cd38f5a3845ef681 \
+ --hash=sha256:e90059f7701bef3c4da073d6e0434a9c7dc551d5adce30e6b99ef86b186f4b4a \
+ --hash=sha256:ebb03a9bd50c2ed86d4f72a54e0aae156d35a14075485b2127c4b01a3f4a63fa \
+ --hash=sha256:eed24272b4121b7c22f234daed99899817d81d671b3ed030c876ac88bc9dc890 \
+ --hash=sha256:efd3f4e0385d18f20f7ea6b08af2574c1bfaa5cb590102ef1bee781bdfba84bc \
+ --hash=sha256:f27c078b5d75989952acbf9b77e14c3dadc468a4aafe85174d548afbc5efc38b \
+ --hash=sha256:f5b88a7c39e307739a3701194993455968fcffe437d1facab93546b1b8a334c1 \
+ --hash=sha256:f8f7824db5a64581180ab9d09842e6dd9fcdc46aac9cb592a0807cd37ea55680
# via mozci
-markdown2==2.4.9 \
- --hash=sha256:58e1789543f47cdd4197760b04771671411f07699f958ad40a4b56c55ba3e668 \
- --hash=sha256:7a1742dade7ec29b90f5c1d5a820eb977eee597e314c428e6b0aa7929417cd1b
+markdown2==2.4.12 \
+ --hash=sha256:1bc8692696954d597778e0e25713c14ca56d87992070dedd95c17eddaf709204 \
+ --hash=sha256:98f47591006f0ace0644cbece03fed6f3845513286f6c6e9f8bcf6a575174e2c
# via mozci
-markupsafe==2.1.3 \
- --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \
- --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \
- --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \
- --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \
- --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \
- --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \
- --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \
- --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \
- --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \
- --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \
- --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \
- --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \
- --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \
- --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \
- --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \
- --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \
- --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \
- --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \
- --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \
- --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \
- --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \
- --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \
- --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \
- --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \
- --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \
- --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \
- --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \
- --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \
- --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \
- --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \
- --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \
- --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \
- --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \
- --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \
- --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \
- --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \
- --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \
- --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \
- --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \
- --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \
- --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \
- --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \
- --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \
- --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \
- --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \
- --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \
- --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \
- --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \
- --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \
- --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2
+markupsafe==2.1.4 \
+ --hash=sha256:0042d6a9880b38e1dd9ff83146cc3c9c18a059b9360ceae207805567aacccc69 \
+ --hash=sha256:0c26f67b3fe27302d3a412b85ef696792c4a2386293c53ba683a89562f9399b0 \
+ --hash=sha256:0fbad3d346df8f9d72622ac71b69565e621ada2ce6572f37c2eae8dacd60385d \
+ --hash=sha256:15866d7f2dc60cfdde12ebb4e75e41be862348b4728300c36cdf405e258415ec \
+ --hash=sha256:1c98c33ffe20e9a489145d97070a435ea0679fddaabcafe19982fe9c971987d5 \
+ --hash=sha256:21e7af8091007bf4bebf4521184f4880a6acab8df0df52ef9e513d8e5db23411 \
+ --hash=sha256:23984d1bdae01bee794267424af55eef4dfc038dc5d1272860669b2aa025c9e3 \
+ --hash=sha256:31f57d64c336b8ccb1966d156932f3daa4fee74176b0fdc48ef580be774aae74 \
+ --hash=sha256:3583a3a3ab7958e354dc1d25be74aee6228938312ee875a22330c4dc2e41beb0 \
+ --hash=sha256:36d7626a8cca4d34216875aee5a1d3d654bb3dac201c1c003d182283e3205949 \
+ --hash=sha256:396549cea79e8ca4ba65525470d534e8a41070e6b3500ce2414921099cb73e8d \
+ --hash=sha256:3a66c36a3864df95e4f62f9167c734b3b1192cb0851b43d7cc08040c074c6279 \
+ --hash=sha256:3aae9af4cac263007fd6309c64c6ab4506dd2b79382d9d19a1994f9240b8db4f \
+ --hash=sha256:3ab3a886a237f6e9c9f4f7d272067e712cdb4efa774bef494dccad08f39d8ae6 \
+ --hash=sha256:47bb5f0142b8b64ed1399b6b60f700a580335c8e1c57f2f15587bd072012decc \
+ --hash=sha256:49a3b78a5af63ec10d8604180380c13dcd870aba7928c1fe04e881d5c792dc4e \
+ --hash=sha256:4df98d4a9cd6a88d6a585852f56f2155c9cdb6aec78361a19f938810aa020954 \
+ --hash=sha256:5045e892cfdaecc5b4c01822f353cf2c8feb88a6ec1c0adef2a2e705eef0f656 \
+ --hash=sha256:5244324676254697fe5c181fc762284e2c5fceeb1c4e3e7f6aca2b6f107e60dc \
+ --hash=sha256:54635102ba3cf5da26eb6f96c4b8c53af8a9c0d97b64bdcb592596a6255d8518 \
+ --hash=sha256:54a7e1380dfece8847c71bf7e33da5d084e9b889c75eca19100ef98027bd9f56 \
+ --hash=sha256:55d03fea4c4e9fd0ad75dc2e7e2b6757b80c152c032ea1d1de487461d8140efc \
+ --hash=sha256:698e84142f3f884114ea8cf83e7a67ca8f4ace8454e78fe960646c6c91c63bfa \
+ --hash=sha256:6aa5e2e7fc9bc042ae82d8b79d795b9a62bd8f15ba1e7594e3db243f158b5565 \
+ --hash=sha256:7653fa39578957bc42e5ebc15cf4361d9e0ee4b702d7d5ec96cdac860953c5b4 \
+ --hash=sha256:765f036a3d00395a326df2835d8f86b637dbaf9832f90f5d196c3b8a7a5080cb \
+ --hash=sha256:78bc995e004681246e85e28e068111a4c3f35f34e6c62da1471e844ee1446250 \
+ --hash=sha256:7a07f40ef8f0fbc5ef1000d0c78771f4d5ca03b4953fc162749772916b298fc4 \
+ --hash=sha256:8b570a1537367b52396e53325769608f2a687ec9a4363647af1cded8928af959 \
+ --hash=sha256:987d13fe1d23e12a66ca2073b8d2e2a75cec2ecb8eab43ff5624ba0ad42764bc \
+ --hash=sha256:9896fca4a8eb246defc8b2a7ac77ef7553b638e04fbf170bff78a40fa8a91474 \
+ --hash=sha256:9e9e3c4020aa2dc62d5dd6743a69e399ce3de58320522948af6140ac959ab863 \
+ --hash=sha256:a0b838c37ba596fcbfca71651a104a611543077156cb0a26fe0c475e1f152ee8 \
+ --hash=sha256:a4d176cfdfde84f732c4a53109b293d05883e952bbba68b857ae446fa3119b4f \
+ --hash=sha256:a76055d5cb1c23485d7ddae533229039b850db711c554a12ea64a0fd8a0129e2 \
+ --hash=sha256:a76cd37d229fc385738bd1ce4cba2a121cf26b53864c1772694ad0ad348e509e \
+ --hash=sha256:a7cc49ef48a3c7a0005a949f3c04f8baa5409d3f663a1b36f0eba9bfe2a0396e \
+ --hash=sha256:abf5ebbec056817057bfafc0445916bb688a255a5146f900445d081db08cbabb \
+ --hash=sha256:b0fe73bac2fed83839dbdbe6da84ae2a31c11cfc1c777a40dbd8ac8a6ed1560f \
+ --hash=sha256:b6f14a9cd50c3cb100eb94b3273131c80d102e19bb20253ac7bd7336118a673a \
+ --hash=sha256:b83041cda633871572f0d3c41dddd5582ad7d22f65a72eacd8d3d6d00291df26 \
+ --hash=sha256:b835aba863195269ea358cecc21b400276747cc977492319fd7682b8cd2c253d \
+ --hash=sha256:bf1196dcc239e608605b716e7b166eb5faf4bc192f8a44b81e85251e62584bd2 \
+ --hash=sha256:c669391319973e49a7c6230c218a1e3044710bc1ce4c8e6eb71f7e6d43a2c131 \
+ --hash=sha256:c7556bafeaa0a50e2fe7dc86e0382dea349ebcad8f010d5a7dc6ba568eaaa789 \
+ --hash=sha256:c8f253a84dbd2c63c19590fa86a032ef3d8cc18923b8049d91bcdeeb2581fbf6 \
+ --hash=sha256:d18b66fe626ac412d96c2ab536306c736c66cf2a31c243a45025156cc190dc8a \
+ --hash=sha256:d5291d98cd3ad9a562883468c690a2a238c4a6388ab3bd155b0c75dd55ece858 \
+ --hash=sha256:d5c31fe855c77cad679b302aabc42d724ed87c043b1432d457f4976add1c2c3e \
+ --hash=sha256:d6e427c7378c7f1b2bef6a344c925b8b63623d3321c09a237b7cc0e77dd98ceb \
+ --hash=sha256:dac1ebf6983148b45b5fa48593950f90ed6d1d26300604f321c74a9ca1609f8e \
+ --hash=sha256:de8153a7aae3835484ac168a9a9bdaa0c5eee4e0bc595503c95d53b942879c84 \
+ --hash=sha256:e1a0d1924a5013d4f294087e00024ad25668234569289650929ab871231668e7 \
+ --hash=sha256:e7902211afd0af05fbadcc9a312e4cf10f27b779cf1323e78d52377ae4b72bea \
+ --hash=sha256:e888ff76ceb39601c59e219f281466c6d7e66bd375b4ec1ce83bcdc68306796b \
+ --hash=sha256:f06e5a9e99b7df44640767842f414ed5d7bedaaa78cd817ce04bbd6fd86e2dd6 \
+ --hash=sha256:f6be2d708a9d0e9b0054856f07ac7070fbe1754be40ca8525d5adccdbda8f475 \
+ --hash=sha256:f9917691f410a2e0897d1ef99619fd3f7dd503647c8ff2475bf90c3cf222ad74 \
+ --hash=sha256:fc1a75aa8f11b87910ffd98de62b29d6520b6d6e8a3de69a70ca34dea85d2a8a \
+ --hash=sha256:fe8512ed897d5daf089e5bd010c3dc03bb1bdae00b35588c49b98268d4a01e00
# via jinja2
mccabe==0.6.1 \
--hash=sha256:ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42 \
@@ -780,32 +808,43 @@ newrelic==8.8.0 \
--hash=sha256:da8f2dc31e182768fe314d8ceb6f42acd09956708846f8ae71f07f044a3aa05e \
--hash=sha256:ef9c178329f8c04f0574908c1f04ff1f18b9eba55b869744583fee3eac48e571
# via -r requirements/common.in
-numpy==1.25.1 \
- --hash=sha256:012097b5b0d00a11070e8f2e261128c44157a8689f7dedcf35576e525893f4fe \
- --hash=sha256:0d3fe3dd0506a28493d82dc3cf254be8cd0d26f4008a417385cbf1ae95b54004 \
- --hash=sha256:0def91f8af6ec4bb94c370e38c575855bf1d0be8a8fbfba42ef9c073faf2cf19 \
- --hash=sha256:1a180429394f81c7933634ae49b37b472d343cccb5bb0c4a575ac8bbc433722f \
- --hash=sha256:1d5d3c68e443c90b38fdf8ef40e60e2538a27548b39b12b73132456847f4b631 \
- --hash=sha256:20e1266411120a4f16fad8efa8e0454d21d00b8c7cee5b5ccad7565d95eb42dd \
- --hash=sha256:247d3ffdd7775bdf191f848be8d49100495114c82c2bd134e8d5d075fb386a1c \
- --hash=sha256:35a9527c977b924042170a0887de727cd84ff179e478481404c5dc66b4170009 \
- --hash=sha256:38eb6548bb91c421261b4805dc44def9ca1a6eef6444ce35ad1669c0f1a3fc5d \
- --hash=sha256:3d7abcdd85aea3e6cdddb59af2350c7ab1ed764397f8eec97a038ad244d2d105 \
- --hash=sha256:41a56b70e8139884eccb2f733c2f7378af06c82304959e174f8e7370af112e09 \
- --hash=sha256:4a90725800caeaa160732d6b31f3f843ebd45d6b5f3eec9e8cc287e30f2805bf \
- --hash=sha256:6b82655dd8efeea69dbf85d00fca40013d7f503212bc5259056244961268b66e \
- --hash=sha256:6c6c9261d21e617c6dc5eacba35cb68ec36bb72adcff0dee63f8fbc899362588 \
- --hash=sha256:77d339465dff3eb33c701430bcb9c325b60354698340229e1dff97745e6b3efa \
- --hash=sha256:791f409064d0a69dd20579345d852c59822c6aa087f23b07b1b4e28ff5880fcb \
- --hash=sha256:9a3a9f3a61480cc086117b426a8bd86869c213fc4072e606f01c4e4b66eb92bf \
- --hash=sha256:c1516db588987450b85595586605742879e50dcce923e8973f79529651545b57 \
- --hash=sha256:c40571fe966393b212689aa17e32ed905924120737194b5d5c1b20b9ed0fb171 \
- --hash=sha256:d412c1697c3853c6fc3cb9751b4915859c7afe6a277c2bf00acf287d56c4e625 \
- --hash=sha256:d5154b1a25ec796b1aee12ac1b22f414f94752c5f94832f14d8d6c9ac40bcca6 \
- --hash=sha256:d736b75c3f2cb96843a5c7f8d8ccc414768d34b0a75f466c05f3a739b406f10b \
- --hash=sha256:e8f6049c4878cb16960fbbfb22105e49d13d752d4d8371b55110941fb3b17800 \
- --hash=sha256:f76aebc3358ade9eacf9bc2bb8ae589863a4f911611694103af05346637df1b7 \
- --hash=sha256:fd67b306320dcadea700a8f79b9e671e607f8696e98ec255915c0c6d6b818503
+numpy==1.26.3 \
+ --hash=sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd \
+ --hash=sha256:0b7e807d6888da0db6e7e75838444d62495e2b588b99e90dd80c3459594e857b \
+ --hash=sha256:12c70ac274b32bc00c7f61b515126c9205323703abb99cd41836e8125ea0043e \
+ --hash=sha256:1666f634cb3c80ccbd77ec97bc17337718f56d6658acf5d3b906ca03e90ce87f \
+ --hash=sha256:18c3319a7d39b2c6a9e3bb75aab2304ab79a811ac0168a671a62e6346c29b03f \
+ --hash=sha256:211ddd1e94817ed2d175b60b6374120244a4dd2287f4ece45d49228b4d529178 \
+ --hash=sha256:21a9484e75ad018974a2fdaa216524d64ed4212e418e0a551a2d83403b0531d3 \
+ --hash=sha256:39763aee6dfdd4878032361b30b2b12593fb445ddb66bbac802e2113eb8a6ac4 \
+ --hash=sha256:3c67423b3703f8fbd90f5adaa37f85b5794d3366948efe9a5190a5f3a83fc34e \
+ --hash=sha256:46f47ee566d98849323f01b349d58f2557f02167ee301e5e28809a8c0e27a2d0 \
+ --hash=sha256:51c7f1b344f302067b02e0f5b5d2daa9ed4a721cf49f070280ac202738ea7f00 \
+ --hash=sha256:5f24750ef94d56ce6e33e4019a8a4d68cfdb1ef661a52cdaee628a56d2437419 \
+ --hash=sha256:697df43e2b6310ecc9d95f05d5ef20eacc09c7c4ecc9da3f235d39e71b7da1e4 \
+ --hash=sha256:6d45b3ec2faed4baca41c76617fcdcfa4f684ff7a151ce6fc78ad3b6e85af0a6 \
+ --hash=sha256:77810ef29e0fb1d289d225cabb9ee6cf4d11978a00bb99f7f8ec2132a84e0166 \
+ --hash=sha256:7ca4f24341df071877849eb2034948459ce3a07915c2734f1abb4018d9c49d7b \
+ --hash=sha256:7f784e13e598e9594750b2ef6729bcd5a47f6cfe4a12cca13def35e06d8163e3 \
+ --hash=sha256:806dd64230dbbfaca8a27faa64e2f414bf1c6622ab78cc4264f7f5f028fee3bf \
+ --hash=sha256:867e3644e208c8922a3be26fc6bbf112a035f50f0a86497f98f228c50c607bb2 \
+ --hash=sha256:8c66d6fec467e8c0f975818c1796d25c53521124b7cfb760114be0abad53a0a2 \
+ --hash=sha256:8ed07a90f5450d99dad60d3799f9c03c6566709bd53b497eb9ccad9a55867f36 \
+ --hash=sha256:9bc6d1a7f8cedd519c4b7b1156d98e051b726bf160715b769106661d567b3f03 \
+ --hash=sha256:9e1591f6ae98bcfac2a4bbf9221c0b92ab49762228f38287f6eeb5f3f55905ce \
+ --hash=sha256:9e87562b91f68dd8b1c39149d0323b42e0082db7ddb8e934ab4c292094d575d6 \
+ --hash=sha256:a7081fd19a6d573e1a05e600c82a1c421011db7935ed0d5c483e9dd96b99cf13 \
+ --hash=sha256:a8474703bffc65ca15853d5fd4d06b18138ae90c17c8d12169968e998e448bb5 \
+ --hash=sha256:af36e0aa45e25c9f57bf684b1175e59ea05d9a7d3e8e87b7ae1a1da246f2767e \
+ --hash=sha256:b1240f767f69d7c4c8a29adde2310b871153df9b26b5cb2b54a561ac85146485 \
+ --hash=sha256:b4d362e17bcb0011738c2d83e0a65ea8ce627057b2fdda37678f4374a382a137 \
+ --hash=sha256:b831295e5472954104ecb46cd98c08b98b49c69fdb7040483aff799a755a7374 \
+ --hash=sha256:b8c275f0ae90069496068c714387b4a0eba5d531aace269559ff2b43655edd58 \
+ --hash=sha256:bdd2b45bf079d9ad90377048e2747a0c82351989a2165821f0c96831b4a2a54b \
+ --hash=sha256:cc0743f0302b94f397a4a65a660d4cd24267439eb16493fb3caad2e4389bccbb \
+ --hash=sha256:da4b0c6c699a0ad73c810736303f7fbae483bcb012e38d7eb06a5e3b432c981b \
+ --hash=sha256:f25e2811a9c932e43943a2615e65fc487a0b6b49218899e62e426e7f0a57eeda \
+ --hash=sha256:f73497e8c38295aaa4741bdfa4fda1a5aedda5473074369eca10626835445511
# via
# moz-measure-noise
# scipy
@@ -813,9 +852,9 @@ orderedmultidict==1.0.1 \
--hash=sha256:04070bbb5e87291cc9bfa51df413677faf2141c73c61d2a5f7b26bea3cd882ad \
--hash=sha256:43c839a17ee3cdd62234c47deca1a8508a3f2ca1d0678a3bf791c87cf84adbf3
# via furl
-prompt-toolkit==3.0.39 \
- --hash=sha256:04505ade687dc26dc4284b1ad19a83be2f2afe83e7a828ace0c72f3a1df72aac \
- --hash=sha256:9dffbe1d8acf91e3de75f3b544e4842382fc06c6babe903ac9acb74dc6e08d88
+prompt-toolkit==3.0.43 \
+ --hash=sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d \
+ --hash=sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6
# via click-repl
psycopg2-binary==2.9.6 \
--hash=sha256:02c0f3757a4300cf379eb49f543fb7ac527fb00144d39246ee40e1df684ab514 \
@@ -881,9 +920,9 @@ psycopg2-binary==2.9.6 \
--hash=sha256:f81e65376e52f03422e1fb475c9514185669943798ed019ac50410fb4c4df232 \
--hash=sha256:ffe9dc0a884a8848075e576c1de0290d85a533a9f6e9c4e564f19adf8f6e54a7
# via -r requirements/common.in
-pyasn1==0.5.0 \
- --hash=sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57 \
- --hash=sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde
+pyasn1==0.5.1 \
+ --hash=sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58 \
+ --hash=sha256:6d391a96e59b23130a5cfa74d6fd7f388dbbe26cc8f1edf39fdddf08d9d6676c
# via
# python-jose
# rsa
@@ -891,72 +930,77 @@ pycodestyle==2.8.0 \
--hash=sha256:720f8b39dde8b293825e7ff02c475f3077124006db4f440dcbc9a20b76548a20 \
--hash=sha256:eddd5847ef438ea1c7870ca7eb78a9d47ce0cdb4851a5523949f2601d0cbbe7f
# via flake8
-pycryptodome==3.18.0 \
- --hash=sha256:01489bbdf709d993f3058e2996f8f40fee3f0ea4d995002e5968965fa2fe89fb \
- --hash=sha256:10da29526a2a927c7d64b8f34592f461d92ae55fc97981aab5bbcde8cb465bb6 \
- --hash=sha256:12600268763e6fec3cefe4c2dcdf79bde08d0b6dc1813887e789e495cb9f3403 \
- --hash=sha256:157c9b5ba5e21b375f052ca78152dd309a09ed04703fd3721dce3ff8ecced148 \
- --hash=sha256:16bfd98dbe472c263ed2821284118d899c76968db1a6665ade0c46805e6b29a4 \
- --hash=sha256:363dd6f21f848301c2dcdeb3c8ae5f0dee2286a5e952a0f04954b82076f23825 \
- --hash=sha256:3811e31e1ac3069988f7a1c9ee7331b942e605dfc0f27330a9ea5997e965efb2 \
- --hash=sha256:422c89fd8df8a3bee09fb8d52aaa1e996120eafa565437392b781abec2a56e14 \
- --hash=sha256:4604816adebd4faf8810782f137f8426bf45fee97d8427fa8e1e49ea78a52e2c \
- --hash=sha256:4944defabe2ace4803f99543445c27dd1edbe86d7d4edb87b256476a91e9ffa4 \
- --hash=sha256:51eae079ddb9c5f10376b4131be9589a6554f6fd84f7f655180937f611cd99a2 \
- --hash=sha256:53aee6be8b9b6da25ccd9028caf17dcdce3604f2c7862f5167777b707fbfb6cb \
- --hash=sha256:62a1e8847fabb5213ccde38915563140a5b338f0d0a0d363f996b51e4a6165cf \
- --hash=sha256:6f4b967bb11baea9128ec88c3d02f55a3e338361f5e4934f5240afcb667fdaec \
- --hash=sha256:78d863476e6bad2a592645072cc489bb90320972115d8995bcfbee2f8b209918 \
- --hash=sha256:795bd1e4258a2c689c0b1f13ce9684fa0dd4c0e08680dcf597cf9516ed6bc0f3 \
- --hash=sha256:7a3d22c8ee63de22336679e021c7f2386f7fc465477d59675caa0e5706387944 \
- --hash=sha256:83c75952dcf4a4cebaa850fa257d7a860644c70a7cd54262c237c9f2be26f76e \
- --hash=sha256:928078c530da78ff08e10eb6cada6e0dff386bf3d9fa9871b4bbc9fbc1efe024 \
- --hash=sha256:957b221d062d5752716923d14e0926f47670e95fead9d240fa4d4862214b9b2f \
- --hash=sha256:9ad6f09f670c466aac94a40798e0e8d1ef2aa04589c29faa5b9b97566611d1d1 \
- --hash=sha256:9c8eda4f260072f7dbe42f473906c659dcbadd5ae6159dfb49af4da1293ae380 \
- --hash=sha256:b1d9701d10303eec8d0bd33fa54d44e67b8be74ab449052a8372f12a66f93fb9 \
- --hash=sha256:b6a610f8bfe67eab980d6236fdc73bfcdae23c9ed5548192bb2d530e8a92780e \
- --hash=sha256:c9adee653fc882d98956e33ca2c1fb582e23a8af7ac82fee75bd6113c55a0413 \
- --hash=sha256:cb1be4d5af7f355e7d41d36d8eec156ef1382a88638e8032215c215b82a4b8ec \
- --hash=sha256:d1497a8cd4728db0e0da3c304856cb37c0c4e3d0b36fcbabcc1600f18504fc54 \
- --hash=sha256:d20082bdac9218649f6abe0b885927be25a917e29ae0502eaf2b53f1233ce0c2 \
- --hash=sha256:e8ad74044e5f5d2456c11ed4cfd3e34b8d4898c0cb201c4038fe41458a82ea27 \
- --hash=sha256:f022a4fd2a5263a5c483a2bb165f9cb27f2be06f2f477113783efe3fe2ad887b \
- --hash=sha256:f21efb8438971aa16924790e1c3dba3a33164eb4000106a55baaed522c261acf \
- --hash=sha256:fc0a73f4db1e31d4a6d71b672a48f3af458f548059aa05e83022d5f61aac9c08
+pycryptodome==3.20.0 \
+ --hash=sha256:06d6de87c19f967f03b4cf9b34e538ef46e99a337e9a61a77dbe44b2cbcf0690 \
+ --hash=sha256:09609209ed7de61c2b560cc5c8c4fbf892f8b15b1faf7e4cbffac97db1fffda7 \
+ --hash=sha256:210ba1b647837bfc42dd5a813cdecb5b86193ae11a3f5d972b9a0ae2c7e9e4b4 \
+ --hash=sha256:2a1250b7ea809f752b68e3e6f3fd946b5939a52eaeea18c73bdab53e9ba3c2dd \
+ --hash=sha256:2ab6ab0cb755154ad14e507d1df72de9897e99fd2d4922851a276ccc14f4f1a5 \
+ --hash=sha256:3427d9e5310af6680678f4cce149f54e0bb4af60101c7f2c16fdf878b39ccccc \
+ --hash=sha256:3cd3ef3aee1079ae44afaeee13393cf68b1058f70576b11439483e34f93cf818 \
+ --hash=sha256:405002eafad114a2f9a930f5db65feef7b53c4784495dd8758069b89baf68eab \
+ --hash=sha256:417a276aaa9cb3be91f9014e9d18d10e840a7a9b9a9be64a42f553c5b50b4d1d \
+ --hash=sha256:4401564ebf37dfde45d096974c7a159b52eeabd9969135f0426907db367a652a \
+ --hash=sha256:49a4c4dc60b78ec41d2afa392491d788c2e06edf48580fbfb0dd0f828af49d25 \
+ --hash=sha256:5601c934c498cd267640b57569e73793cb9a83506f7c73a8ec57a516f5b0b091 \
+ --hash=sha256:6e0e4a987d38cfc2e71b4a1b591bae4891eeabe5fa0f56154f576e26287bfdea \
+ --hash=sha256:76658f0d942051d12a9bd08ca1b6b34fd762a8ee4240984f7c06ddfb55eaf15a \
+ --hash=sha256:76cb39afede7055127e35a444c1c041d2e8d2f1f9c121ecef573757ba4cd2c3c \
+ --hash=sha256:8d6b98d0d83d21fb757a182d52940d028564efe8147baa9ce0f38d057104ae72 \
+ --hash=sha256:9b3ae153c89a480a0ec402e23db8d8d84a3833b65fa4b15b81b83be9d637aab9 \
+ --hash=sha256:a60fedd2b37b4cb11ccb5d0399efe26db9e0dd149016c1cc6c8161974ceac2d6 \
+ --hash=sha256:ac1c7c0624a862f2e53438a15c9259d1655325fc2ec4392e66dc46cdae24d044 \
+ --hash=sha256:acae12b9ede49f38eb0ef76fdec2df2e94aad85ae46ec85be3648a57f0a7db04 \
+ --hash=sha256:acc2614e2e5346a4a4eab6e199203034924313626f9620b7b4b38e9ad74b7e0c \
+ --hash=sha256:acf6e43fa75aca2d33e93409f2dafe386fe051818ee79ee8a3e21de9caa2ac9e \
+ --hash=sha256:baee115a9ba6c5d2709a1e88ffe62b73ecc044852a925dcb67713a288c4ec70f \
+ --hash=sha256:c18b381553638414b38705f07d1ef0a7cf301bc78a5f9bc17a957eb19446834b \
+ --hash=sha256:d29daa681517f4bc318cd8a23af87e1f2a7bad2fe361e8aa29c77d652a065de4 \
+ --hash=sha256:d5954acfe9e00bc83ed9f5cb082ed22c592fbbef86dc48b907238be64ead5c33 \
+ --hash=sha256:ec0bb1188c1d13426039af8ffcb4dbe3aad1d7680c35a62d8eaf2a529b5d3d4f \
+ --hash=sha256:ec1f93feb3bb93380ab0ebf8b859e8e5678c0f010d2d78367cf6bc30bfeb148e \
+ --hash=sha256:f0e6d631bae3f231d3634f91ae4da7a960f7ff87f2865b2d2b831af1dfb04e9a \
+ --hash=sha256:f35d6cee81fa145333137009d9c8ba90951d7d77b67c79cbe5f03c7eb74d8fe2 \
+ --hash=sha256:f47888542a0633baff535a04726948e876bf1ed880fddb7c10a736fa99146ab3 \
+ --hash=sha256:fb3b87461fa35afa19c971b0a2b7456a7b1db7b4eba9a8424666104925b78128
# via python-jose
pyflakes==2.4.0 \
--hash=sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c \
--hash=sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e
# via flake8
-pyrsistent==0.19.3 \
- --hash=sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8 \
- --hash=sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440 \
- --hash=sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a \
- --hash=sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c \
- --hash=sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3 \
- --hash=sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393 \
- --hash=sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9 \
- --hash=sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da \
- --hash=sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf \
- --hash=sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64 \
- --hash=sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a \
- --hash=sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3 \
- --hash=sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98 \
- --hash=sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2 \
- --hash=sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8 \
- --hash=sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf \
- --hash=sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc \
- --hash=sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7 \
- --hash=sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28 \
- --hash=sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2 \
- --hash=sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b \
- --hash=sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a \
- --hash=sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64 \
- --hash=sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19 \
- --hash=sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1 \
- --hash=sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9 \
- --hash=sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c
+pyrsistent==0.20.0 \
+ --hash=sha256:0724c506cd8b63c69c7f883cc233aac948c1ea946ea95996ad8b1380c25e1d3f \
+ --hash=sha256:09848306523a3aba463c4b49493a760e7a6ca52e4826aa100ee99d8d39b7ad1e \
+ --hash=sha256:0f3b1bcaa1f0629c978b355a7c37acd58907390149b7311b5db1b37648eb6958 \
+ --hash=sha256:21cc459636983764e692b9eba7144cdd54fdec23ccdb1e8ba392a63666c60c34 \
+ --hash=sha256:2e14c95c16211d166f59c6611533d0dacce2e25de0f76e4c140fde250997b3ca \
+ --hash=sha256:2e2c116cc804d9b09ce9814d17df5edf1df0c624aba3b43bc1ad90411487036d \
+ --hash=sha256:4021a7f963d88ccd15b523787d18ed5e5269ce57aa4037146a2377ff607ae87d \
+ --hash=sha256:4c48f78f62ab596c679086084d0dd13254ae4f3d6c72a83ffdf5ebdef8f265a4 \
+ --hash=sha256:4f5c2d012671b7391803263419e31b5c7c21e7c95c8760d7fc35602353dee714 \
+ --hash=sha256:58b8f6366e152092194ae68fefe18b9f0b4f89227dfd86a07770c3d86097aebf \
+ --hash=sha256:59a89bccd615551391f3237e00006a26bcf98a4d18623a19909a2c48b8e986ee \
+ --hash=sha256:5cdd7ef1ea7a491ae70d826b6cc64868de09a1d5ff9ef8d574250d0940e275b8 \
+ --hash=sha256:6288b3fa6622ad8a91e6eb759cfc48ff3089e7c17fb1d4c59a919769314af224 \
+ --hash=sha256:6d270ec9dd33cdb13f4d62c95c1a5a50e6b7cdd86302b494217137f760495b9d \
+ --hash=sha256:79ed12ba79935adaac1664fd7e0e585a22caa539dfc9b7c7c6d5ebf91fb89054 \
+ --hash=sha256:7d29c23bdf6e5438c755b941cef867ec2a4a172ceb9f50553b6ed70d50dfd656 \
+ --hash=sha256:8441cf9616d642c475684d6cf2520dd24812e996ba9af15e606df5f6fd9d04a7 \
+ --hash=sha256:881bbea27bbd32d37eb24dd320a5e745a2a5b092a17f6debc1349252fac85423 \
+ --hash=sha256:8c3aba3e01235221e5b229a6c05f585f344734bd1ad42a8ac51493d74722bbce \
+ --hash=sha256:a14798c3005ec892bbada26485c2eea3b54109cb2533713e355c806891f63c5e \
+ --hash=sha256:b14decb628fac50db5e02ee5a35a9c0772d20277824cfe845c8a8b717c15daa3 \
+ --hash=sha256:b318ca24db0f0518630e8b6f3831e9cba78f099ed5c1d65ffe3e023003043ba0 \
+ --hash=sha256:c1beb78af5423b879edaf23c5591ff292cf7c33979734c99aa66d5914ead880f \
+ --hash=sha256:c55acc4733aad6560a7f5f818466631f07efc001fd023f34a6c203f8b6df0f0b \
+ --hash=sha256:ca52d1ceae015859d16aded12584c59eb3825f7b50c6cfd621d4231a6cc624ce \
+ --hash=sha256:cae40a9e3ce178415040a0383f00e8d68b569e97f31928a3a8ad37e3fde6df6a \
+ --hash=sha256:e78d0c7c1e99a4a45c99143900ea0546025e41bb59ebc10182e947cf1ece9174 \
+ --hash=sha256:ef3992833fbd686ee783590639f4b8343a57f1f75de8633749d984dc0eb16c86 \
+ --hash=sha256:f058a615031eea4ef94ead6456f5ec2026c19fb5bd6bfe86e9665c4158cf802f \
+ --hash=sha256:f5ac696f02b3fc01a710427585c855f65cd9c640e14f52abe52020722bb4906b \
+ --hash=sha256:f920385a11207dc372a028b3f1e1038bb244b3ec38d448e6d8e43c6b3ba20e98 \
+ --hash=sha256:fed2c3216a605dc9a6ea50c7e84c82906e3684c4e80d2908208f662a6cbf9022
# via jsonschema
python-dateutil==2.8.2 \
--hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
@@ -974,9 +1018,9 @@ python-jose[pycryptodome]==3.3.0 \
python3-memcached==1.51 \
--hash=sha256:7cbe5951d68eef69d948b7a7ed7decfbd101e15e7f5be007dcd1219ccc584859
# via mozci
-pytz==2023.3 \
- --hash=sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588 \
- --hash=sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb
+pytz==2023.3.post1 \
+ --hash=sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b \
+ --hash=sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7
# via djangorestframework
pyyaml==6.0 \
--hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \
@@ -1022,99 +1066,97 @@ pyyaml==6.0 \
# via
# -r requirements/common.in
# mozci
-rapidfuzz==2.15.1 \
- --hash=sha256:040faca2e26d9dab5541b45ce72b3f6c0e36786234703fc2ac8c6f53bb576743 \
- --hash=sha256:074ee9e17912e025c72a5780ee4c7c413ea35cd26449719cc399b852d4e42533 \
- --hash=sha256:099e4c6befaa8957a816bdb67ce664871f10aaec9bebf2f61368cf7e0869a7a1 \
- --hash=sha256:0f73a04135a03a6e40393ecd5d46a7a1049d353fc5c24b82849830d09817991f \
- --hash=sha256:19b7460e91168229768be882ea365ba0ac7da43e57f9416e2cfadc396a7df3c2 \
- --hash=sha256:2084d36b95139413cef25e9487257a1cc892b93bd1481acd2a9656f7a1d9930c \
- --hash=sha256:22b9d22022b9d09fd4ece15102270ab9b6a5cfea8b6f6d1965c1df7e3783f5ff \
- --hash=sha256:2492330bc38b76ed967eab7bdaea63a89b6ceb254489e2c65c3824efcbf72993 \
- --hash=sha256:2577463d10811386e704a3ab58b903eb4e2a31b24dfd9886d789b0084d614b01 \
- --hash=sha256:2d93ba3ae59275e7a3a116dac4ffdb05e9598bf3ee0861fecc5b60fb042d539e \
- --hash=sha256:2dd03477feefeccda07b7659dd614f6738cfc4f9b6779dd61b262a73b0a9a178 \
- --hash=sha256:2e597b9dfd6dd180982684840975c458c50d447e46928efe3e0120e4ec6f6686 \
- --hash=sha256:3c53d57ba7a88f7bf304d4ea5a14a0ca112db0e0178fff745d9005acf2879f7d \
- --hash=sha256:3c89cfa88dc16fd8c9bcc0c7f0b0073f7ef1e27cceb246c9f5a3f7004fa97c4d \
- --hash=sha256:3fac40972cf7b6c14dded88ae2331eb50dfbc278aa9195473ef6fc6bfe49f686 \
- --hash=sha256:41dfea282844d0628279b4db2929da0dacb8ac317ddc5dcccc30093cf16357c1 \
- --hash=sha256:46599b2ad4045dd3f794a24a6db1e753d23304699d4984462cf1ead02a51ddf3 \
- --hash=sha256:46754fe404a9a6f5cbf7abe02d74af390038d94c9b8c923b3f362467606bfa28 \
- --hash=sha256:47e81767a962e41477a85ad7ac937e34d19a7d2a80be65614f008a5ead671c56 \
- --hash=sha256:49c4bcdb9238f11f8c4eba1b898937f09b92280d6f900023a8216008f299b41a \
- --hash=sha256:4d9f7d10065f657f960b48699e7dddfce14ab91af4bab37a215f0722daf0d716 \
- --hash=sha256:4f69e6199fec0f58f9a89afbbaea78d637c7ce77f656a03a1d6ea6abdc1d44f8 \
- --hash=sha256:509c5b631cd64df69f0f011893983eb15b8be087a55bad72f3d616b6ae6a0f96 \
- --hash=sha256:53de456ef020a77bf9d7c6c54860a48e2e902584d55d3001766140ac45c54bc7 \
- --hash=sha256:558224b6fc6124d13fa32d57876f626a7d6188ba2a97cbaea33a6ee38a867e31 \
- --hash=sha256:591f19d16758a3c55c9d7a0b786b40d95599a5b244d6eaef79c7a74fcf5104d8 \
- --hash=sha256:5a738fcd24e34bce4b19126b92fdae15482d6d3a90bd687fd3d24ce9d28ce82d \
- --hash=sha256:5efe035aa76ff37d1b5fa661de3c4b4944de9ff227a6c0b2e390a95c101814c0 \
- --hash=sha256:60368e1add6e550faae65614844c43f8a96e37bf99404643b648bf2dba92c0fb \
- --hash=sha256:6534afc787e32c4104f65cdeb55f6abe4d803a2d0553221d00ef9ce12788dcde \
- --hash=sha256:6986413cb37035eb796e32f049cbc8c13d8630a4ac1e0484e3e268bb3662bd1b \
- --hash=sha256:6d89c421702474c6361245b6b199e6e9783febacdbfb6b002669e6cb3ef17a09 \
- --hash=sha256:6e2a3b23e1e9aa13474b3c710bba770d0dcc34d517d3dd6f97435a32873e3f28 \
- --hash=sha256:7025fb105a11f503943f17718cdb8241ea3bb4d812c710c609e69bead40e2ff0 \
- --hash=sha256:785744f1270828cc632c5a3660409dee9bcaac6931a081bae57542c93e4d46c4 \
- --hash=sha256:79fc574aaf2d7c27ec1022e29c9c18f83cdaf790c71c05779528901e0caad89b \
- --hash=sha256:7c3ff75e647908ddbe9aa917fbe39a112d5631171f3fcea5809e2363e525a59d \
- --hash=sha256:7d150d90a7c6caae7962f29f857a4e61d42038cfd82c9df38508daf30c648ae7 \
- --hash=sha256:7e24a1b802cea04160b3fccd75d2d0905065783ebc9de157d83c14fb9e1c6ce2 \
- --hash=sha256:82b86d5b8c1b9bcbc65236d75f81023c78d06a721c3e0229889ff4ed5c858169 \
- --hash=sha256:87c30e9184998ff6eb0fa9221f94282ce7c908fd0da96a1ef66ecadfaaa4cdb7 \
- --hash=sha256:8ba013500a2b68c64b2aecc5fb56a2dad6c2872cf545a0308fd044827b6e5f6a \
- --hash=sha256:8c99d53138a2dfe8ada67cb2855719f934af2733d726fbf73247844ce4dd6dd5 \
- --hash=sha256:91abb8bf7610efe326394adc1d45e1baca8f360e74187f3fa0ef3df80cdd3ba6 \
- --hash=sha256:93c33c03e7092642c38f8a15ca2d8fc38da366f2526ec3b46adf19d5c7aa48ba \
- --hash=sha256:94e1c97f0ad45b05003806f8a13efc1fc78983e52fa2ddb00629003acf4676ef \
- --hash=sha256:a0e441d4c2025110ec3eba5d54f11f78183269a10152b3a757a739ffd1bb12bf \
- --hash=sha256:a3a769ca7580686a66046b77df33851b3c2d796dc1eb60c269b68f690f3e1b65 \
- --hash=sha256:a48ee83916401ac73938526d7bd804e01d2a8fe61809df7f1577b0b3b31049a3 \
- --hash=sha256:a4a54efe17cc9f53589c748b53f28776dfdfb9bc83619685740cb7c37985ac2f \
- --hash=sha256:a6ee758eec4cf2215dc8d8eafafcea0d1f48ad4b0135767db1b0f7c5c40a17dd \
- --hash=sha256:a72f26e010d4774b676f36e43c0fc8a2c26659efef4b3be3fd7714d3491e9957 \
- --hash=sha256:a7381c11cb590bbd4e6f2d8779a0b34fdd2234dfa13d0211f6aee8ca166d9d05 \
- --hash=sha256:aa1e5aad325168e29bf8e17006479b97024aa9d2fdbe12062bd2f8f09080acf8 \
- --hash=sha256:abde47e1595902a490ed14d4338d21c3509156abb2042a99e6da51f928e0c117 \
- --hash=sha256:b1b393f4a1eaa6867ffac6aef58cfb04bab2b3d7d8e40b9fe2cf40dd1d384601 \
- --hash=sha256:b5cd54c98a387cca111b3b784fc97a4f141244bbc28a92d4bde53f164464112e \
- --hash=sha256:b7461b0a7651d68bc23f0896bffceea40f62887e5ab8397bf7caa883592ef5cb \
- --hash=sha256:b89d1126be65c85763d56e3b47d75f1a9b7c5529857b4d572079b9a636eaa8a7 \
- --hash=sha256:bb8318116ecac4dfb84841d8b9b461f9bb0c3be5b616418387d104f72d2a16d1 \
- --hash=sha256:be7ccc45c4d1a7dfb595f260e8022a90c6cb380c2a346ee5aae93f85c96d362b \
- --hash=sha256:c2bb68832b140c551dbed691290bef4ee6719d4e8ce1b7226a3736f61a9d1a83 \
- --hash=sha256:c35da09ab9797b020d0d4f07a66871dfc70ea6566363811090353ea971748b5a \
- --hash=sha256:c525a3da17b6d79d61613096c8683da86e3573e807dfaecf422eea09e82b5ba6 \
- --hash=sha256:c71580052f9dbac443c02f60484e5a2e5f72ad4351b84b2009fbe345b1f38422 \
- --hash=sha256:ca8f1747007a3ce919739a60fa95c5325f7667cccf6f1c1ef18ae799af119f5e \
- --hash=sha256:cac095cbdf44bc286339a77214bbca6d4d228c9ebae3da5ff6a80aaeb7c35634 \
- --hash=sha256:cfdcdedfd12a0077193f2cf3626ff6722c5a184adf0d2d51f1ec984bf21c23c3 \
- --hash=sha256:d0ae6ec79a1931929bb9dd57bc173eb5ba4c7197461bf69e3a34b6dd314feed2 \
- --hash=sha256:d14752c9dd2036c5f36ebe8db5f027275fa7d6b3ec6484158f83efb674bab84e \
- --hash=sha256:d4deae6a918ecc260d0c4612257be8ba321d8e913ccb43155403842758c46fbe \
- --hash=sha256:d50622efefdb03a640a51a6123748cd151d305c1f0431af762e833d6ffef71f0 \
- --hash=sha256:d59fb3a410d253f50099d7063855c2b95df1ef20ad93ea3a6b84115590899f25 \
- --hash=sha256:d62137c2ca37aea90a11003ad7dc109c8f1739bfbe5a9a217f3cdb07d7ac00f6 \
- --hash=sha256:d7927722ff43690e52b3145b5bd3089151d841d350c6f8378c3cfac91f67573a \
- --hash=sha256:da7fac7c3da39f93e6b2ebe386ed0ffe1cefec91509b91857f6e1204509e931f \
- --hash=sha256:dc3cafa68cfa54638632bdcadf9aab89a3d182b4a3f04d2cad7585ed58ea8731 \
- --hash=sha256:dffdf03499e0a5b3442951bb82b556333b069e0661e80568752786c79c5b32de \
- --hash=sha256:e1e0e569108a5760d8f01d0f2148dd08cc9a39ead79fbefefca9e7c7723c7e88 \
- --hash=sha256:e40a2f60024f9d3c15401e668f732800114a023f3f8d8c40f1521a62081ff054 \
- --hash=sha256:e9296c530e544f68858c3416ad1d982a1854f71e9d2d3dcedb5b216e6d54f067 \
- --hash=sha256:ebb40a279e134bb3fef099a8b58ed5beefb201033d29bdac005bddcdb004ef71 \
- --hash=sha256:ed17359061840eb249f8d833cb213942e8299ffc4f67251a6ed61833a9f2ea20 \
- --hash=sha256:ed2cf7c69102c7a0a06926d747ed855bc836f52e8d59a5d1e3adfd980d1bd165 \
- --hash=sha256:f01fa757f0fb332a1f045168d29b0d005de6c39ee5ce5d6c51f2563bb53c601b \
- --hash=sha256:f0e456cbdc0abf39352800309dab82fd3251179fa0ff6573fa117f51f4e84be8 \
- --hash=sha256:f3dd4bcef2d600e0aa121e19e6e62f6f06f22a89f82ef62755e205ce14727874 \
- --hash=sha256:f67d5f56aa48c0da9de4ab81bffb310683cf7815f05ea38e5aa64f3ba4368339 \
- --hash=sha256:f85bece1ec59bda8b982bd719507d468d4df746dfb1988df11d916b5e9fe19e8 \
- --hash=sha256:f976e76ac72f650790b3a5402431612175b2ac0363179446285cb3c901136ca9 \
- --hash=sha256:fc0bc259ebe3b93e7ce9df50b3d00e7345335d35acbd735163b7c4b1957074d3 \
- --hash=sha256:fc4528b7736e5c30bc954022c2cf410889abc19504a023abadbc59cdf9f37cae
+rapidfuzz==3.6.1 \
+ --hash=sha256:01835d02acd5d95c1071e1da1bb27fe213c84a013b899aba96380ca9962364bc \
+ --hash=sha256:01eb03cd880a294d1bf1a583fdd00b87169b9cc9c9f52587411506658c864d73 \
+ --hash=sha256:03f73b381bdeccb331a12c3c60f1e41943931461cdb52987f2ecf46bfc22f50d \
+ --hash=sha256:0402f1629e91a4b2e4aee68043a30191e5e1b7cd2aa8dacf50b1a1bcf6b7d3ab \
+ --hash=sha256:060bd7277dc794279fa95522af355034a29c90b42adcb7aa1da358fc839cdb11 \
+ --hash=sha256:064c1d66c40b3a0f488db1f319a6e75616b2e5fe5430a59f93a9a5e40a656d15 \
+ --hash=sha256:06e98ff000e2619e7cfe552d086815671ed09b6899408c2c1b5103658261f6f3 \
+ --hash=sha256:08b6fb47dd889c69fbc0b915d782aaed43e025df6979b6b7f92084ba55edd526 \
+ --hash=sha256:0a9fc714b8c290261669f22808913aad49553b686115ad0ee999d1cb3df0cd66 \
+ --hash=sha256:0bbfae35ce4de4c574b386c43c78a0be176eeddfdae148cb2136f4605bebab89 \
+ --hash=sha256:12ff8eaf4a9399eb2bebd838f16e2d1ded0955230283b07376d68947bbc2d33d \
+ --hash=sha256:1936d134b6c513fbe934aeb668b0fee1ffd4729a3c9d8d373f3e404fbb0ce8a0 \
+ --hash=sha256:1c47d592e447738744905c18dda47ed155620204714e6df20eb1941bb1ba315e \
+ --hash=sha256:1dfc557c0454ad22382373ec1b7df530b4bbd974335efe97a04caec936f2956a \
+ --hash=sha256:1e12319c6b304cd4c32d5db00b7a1e36bdc66179c44c5707f6faa5a889a317c0 \
+ --hash=sha256:23de71e7f05518b0bbeef55d67b5dbce3bcd3e2c81e7e533051a2e9401354eb0 \
+ --hash=sha256:266dd630f12696ea7119f31d8b8e4959ef45ee2cbedae54417d71ae6f47b9848 \
+ --hash=sha256:2963f4a3f763870a16ee076796be31a4a0958fbae133dbc43fc55c3968564cf5 \
+ --hash=sha256:2a791168e119cfddf4b5a40470620c872812042f0621e6a293983a2d52372db0 \
+ --hash=sha256:2b155e67fff215c09f130555002e42f7517d0ea72cbd58050abb83cb7c880cec \
+ --hash=sha256:2b19795b26b979c845dba407fe79d66975d520947b74a8ab6cee1d22686f7967 \
+ --hash=sha256:2e03038bfa66d2d7cffa05d81c2f18fd6acbb25e7e3c068d52bb7469e07ff382 \
+ --hash=sha256:3028ee8ecc48250607fa8a0adce37b56275ec3b1acaccd84aee1f68487c8557b \
+ --hash=sha256:35660bee3ce1204872574fa041c7ad7ec5175b3053a4cb6e181463fc07013de7 \
+ --hash=sha256:3c772d04fb0ebeece3109d91f6122b1503023086a9591a0b63d6ee7326bd73d9 \
+ --hash=sha256:3c84294f4470fcabd7830795d754d808133329e0a81d62fcc2e65886164be83b \
+ --hash=sha256:40cced1a8852652813f30fb5d4b8f9b237112a0bbaeebb0f4cc3611502556764 \
+ --hash=sha256:4243a9c35667a349788461aae6471efde8d8800175b7db5148a6ab929628047f \
+ --hash=sha256:42f211e366e026de110a4246801d43a907cd1a10948082f47e8a4e6da76fef52 \
+ --hash=sha256:4381023fa1ff32fd5076f5d8321249a9aa62128eb3f21d7ee6a55373e672b261 \
+ --hash=sha256:484759b5dbc5559e76fefaa9170147d1254468f555fd9649aea3bad46162a88b \
+ --hash=sha256:49b9ed2472394d306d5dc967a7de48b0aab599016aa4477127b20c2ed982dbf9 \
+ --hash=sha256:53251e256017e2b87f7000aee0353ba42392c442ae0bafd0f6b948593d3f68c6 \
+ --hash=sha256:588c4b20fa2fae79d60a4e438cf7133d6773915df3cc0a7f1351da19eb90f720 \
+ --hash=sha256:5a2f3e9df346145c2be94e4d9eeffb82fab0cbfee85bd4a06810e834fe7c03fa \
+ --hash=sha256:5d82b9651e3d34b23e4e8e201ecd3477c2baa17b638979deeabbb585bcb8ba74 \
+ --hash=sha256:5dd95b6b7bfb1584f806db89e1e0c8dbb9d25a30a4683880c195cc7f197eaf0c \
+ --hash=sha256:692c9a50bea7a8537442834f9bc6b7d29d8729a5b6379df17c31b6ab4df948c2 \
+ --hash=sha256:6b0ccc2ec1781c7e5370d96aef0573dd1f97335343e4982bdb3a44c133e27786 \
+ --hash=sha256:6dede83a6b903e3ebcd7e8137e7ff46907ce9316e9d7e7f917d7e7cdc570ee05 \
+ --hash=sha256:7142ee354e9c06e29a2636b9bbcb592bb00600a88f02aa5e70e4f230347b373e \
+ --hash=sha256:7183157edf0c982c0b8592686535c8b3e107f13904b36d85219c77be5cefd0d8 \
+ --hash=sha256:7420e801b00dee4a344ae2ee10e837d603461eb180e41d063699fb7efe08faf0 \
+ --hash=sha256:757dfd7392ec6346bd004f8826afb3bf01d18a723c97cbe9958c733ab1a51791 \
+ --hash=sha256:76c23ceaea27e790ddd35ef88b84cf9d721806ca366199a76fd47cfc0457a81b \
+ --hash=sha256:7fec74c234d3097612ea80f2a80c60720eec34947066d33d34dc07a3092e8105 \
+ --hash=sha256:82300e5f8945d601c2daaaac139d5524d7c1fdf719aa799a9439927739917460 \
+ --hash=sha256:841eafba6913c4dfd53045835545ba01a41e9644e60920c65b89c8f7e60c00a9 \
+ --hash=sha256:8d7a072f10ee57c8413c8ab9593086d42aaff6ee65df4aa6663eecdb7c398dca \
+ --hash=sha256:8e4da90e4c2b444d0a171d7444ea10152e07e95972bb40b834a13bdd6de1110c \
+ --hash=sha256:96cd19934f76a1264e8ecfed9d9f5291fde04ecb667faef5f33bdbfd95fe2d1f \
+ --hash=sha256:a03863714fa6936f90caa7b4b50ea59ea32bb498cc91f74dc25485b3f8fccfe9 \
+ --hash=sha256:a1788ebb5f5b655a15777e654ea433d198f593230277e74d51a2a1e29a986283 \
+ --hash=sha256:a3ee4f8f076aa92184e80308fc1a079ac356b99c39408fa422bbd00145be9854 \
+ --hash=sha256:a490cd645ef9d8524090551016f05f052e416c8adb2d8b85d35c9baa9d0428ab \
+ --hash=sha256:a553cc1a80d97459d587529cc43a4c7c5ecf835f572b671107692fe9eddf3e24 \
+ --hash=sha256:a59472b43879012b90989603aa5a6937a869a72723b1bf2ff1a0d1edee2cc8e6 \
+ --hash=sha256:ac434fc71edda30d45db4a92ba5e7a42c7405e1a54cb4ec01d03cc668c6dcd40 \
+ --hash=sha256:ad9d74ef7c619b5b0577e909582a1928d93e07d271af18ba43e428dc3512c2a1 \
+ --hash=sha256:ae598a172e3a95df3383634589660d6b170cc1336fe7578115c584a99e0ba64d \
+ --hash=sha256:b2ef4c0fd3256e357b70591ffb9e8ed1d439fb1f481ba03016e751a55261d7c1 \
+ --hash=sha256:b3e5af946f419c30f5cb98b69d40997fe8580efe78fc83c2f0f25b60d0e56efb \
+ --hash=sha256:b53137d81e770c82189e07a8f32722d9e4260f13a0aec9914029206ead38cac3 \
+ --hash=sha256:b7e3375e4f2bfec77f907680328e4cd16cc64e137c84b1886d547ab340ba6928 \
+ --hash=sha256:bcc957c0a8bde8007f1a8a413a632a1a409890f31f73fe764ef4eac55f59ca87 \
+ --hash=sha256:be156f51f3a4f369e758505ed4ae64ea88900dcb2f89d5aabb5752676d3f3d7e \
+ --hash=sha256:be368573255f8fbb0125a78330a1a40c65e9ba3c5ad129a426ff4289099bfb41 \
+ --hash=sha256:c1a23eee225dfb21c07f25c9fcf23eb055d0056b48e740fe241cbb4b22284379 \
+ --hash=sha256:c65f92881753aa1098c77818e2b04a95048f30edbe9c3094dc3707d67df4598b \
+ --hash=sha256:ca3dfcf74f2b6962f411c33dd95b0adf3901266e770da6281bc96bb5a8b20de9 \
+ --hash=sha256:cd4ba4c18b149da11e7f1b3584813159f189dc20833709de5f3df8b1342a9759 \
+ --hash=sha256:d056e342989248d2bdd67f1955bb7c3b0ecfa239d8f67a8dfe6477b30872c607 \
+ --hash=sha256:d2f0274595cc5b2b929c80d4e71b35041104b577e118cf789b3fe0a77b37a4c5 \
+ --hash=sha256:d73dcfe789d37c6c8b108bf1e203e027714a239e50ad55572ced3c004424ed3b \
+ --hash=sha256:d79aec8aeee02ab55d0ddb33cea3ecd7b69813a48e423c966a26d7aab025cdfe \
+ --hash=sha256:da3e8c9f7e64bb17faefda085ff6862ecb3ad8b79b0f618a6cf4452028aa2222 \
+ --hash=sha256:dad55a514868dae4543ca48c4e1fc0fac704ead038dafedf8f1fc0cc263746c1 \
+ --hash=sha256:dec307b57ec2d5054d77d03ee4f654afcd2c18aee00c48014cb70bfed79597d6 \
+ --hash=sha256:e06c4242a1354cf9d48ee01f6f4e6e19c511d50bb1e8d7d20bcadbb83a2aea90 \
+ --hash=sha256:e19d519386e9db4a5335a4b29f25b8183a1c3f78cecb4c9c3112e7f86470e37f \
+ --hash=sha256:e49b9575d16c56c696bc7b06a06bf0c3d4ef01e89137b3ddd4e2ce709af9fe06 \
+ --hash=sha256:ebcfb5bfd0a733514352cfc94224faad8791e576a80ffe2fd40b2177bf0e7198 \
+ --hash=sha256:ed0f712e0bb5fea327e92aec8a937afd07ba8de4c529735d82e4c4124c10d5a0 \
+ --hash=sha256:edf97c321fd641fea2793abce0e48fa4f91f3c202092672f8b5b4e781960b891 \
+ --hash=sha256:eef8b346ab331bec12bbc83ac75641249e6167fab3d84d8f5ca37fd8e6c7a08c \
+ --hash=sha256:f056ba42fd2f32e06b2c2ba2443594873cfccc0c90c8b6327904fc2ddf6d5799 \
+ --hash=sha256:f382f7ffe384ce34345e1c0b2065451267d3453cadde78946fbd99a59f0cc23c \
+ --hash=sha256:f59d19078cc332dbdf3b7b210852ba1f5db8c0a2cd8cc4c0ed84cc00c76e6802 \
+ --hash=sha256:fbc07e2e4ac696497c5f66ec35c21ddab3fc7a406640bffed64c26ab2f7ce6d6 \
+ --hash=sha256:fde9b14302a31af7bdafbf5cfbb100201ba21519be2b9dedcf4f1048e4fbe65d
# via cleo
redis==4.6.0 \
--hash=sha256:585dc516b9eb042a619ef0a39c3d7d55fe81bdb4df09a52c9cdde0d07bf1aa7d \
@@ -1132,9 +1174,9 @@ rsa==4.9 \
--hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \
--hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21
# via python-jose
-s3transfer==0.6.1 \
- --hash=sha256:3c0da2d074bf35d6870ef157158641178a4204a6e689e82546083e31e0311346 \
- --hash=sha256:640bb492711f4c0c0905e1f62b6aaeb771881935ad27884852411f8e9cacbca9
+s3transfer==0.10.0 \
+ --hash=sha256:3cdb40f5cfa6966e812209d0994f2a4709b561c88e90cf00c2696d2df4e56b2e \
+ --hash=sha256:d0c8bbf672d5eebbe4e57945e23b972d963f07d82f661cabf678a5c88831595b
# via boto3
scipy==1.10.0 \
--hash=sha256:0490dc499fe23e4be35b8b6dd1e60a4a34f0c4adb30ac671e6332446b3cbbb5a \
@@ -1288,86 +1330,90 @@ taskcluster-urls==13.0.1 \
# via
# mozci
# taskcluster
-tomlkit==0.11.8 \
- --hash=sha256:8c726c4c202bdb148667835f68d68780b9a003a9ec34167b6c673b38eff2a171 \
- --hash=sha256:9330fc7faa1db67b541b28e62018c17d20be733177d290a13b24c62d1614e0c3
+tomlkit==0.12.3 \
+ --hash=sha256:75baf5012d06501f07bee5bf8e801b9f343e7aac5a92581f20f80ce632e6b5a4 \
+ --hash=sha256:b0a645a9156dc7cb5d3a1f0d4bab66db287fcb8e0430bdd4664a095ea16414ba
# via mozci
-typing-extensions==4.7.1 \
- --hash=sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36 \
- --hash=sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2
+types-python-dateutil==2.8.19.20240106 \
+ --hash=sha256:1f8db221c3b98e6ca02ea83a58371b22c374f42ae5bbdf186db9c9a76581459f \
+ --hash=sha256:efbbdc54590d0f16152fa103c9879c7d4a00e82078f6e2cf01769042165acaa2
+ # via arrow
+typing-extensions==4.9.0 \
+ --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \
+ --hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd
# via
# asgiref
# kombu
-tzdata==2023.3 \
- --hash=sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a \
- --hash=sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda
+tzdata==2023.4 \
+ --hash=sha256:aa3ace4329eeacda5b7beb7ea08ece826c28d761cda36e747cfbf97996d39bf3 \
+ --hash=sha256:dd54c94f294765522c77399649b4fefd95522479a664a0cec87f41bebc6148c9
# via celery
uritemplate==4.1.1 \
--hash=sha256:4346edfc5c3b79f694bccd6d6099a322bbeb628dbf2cd86eea55a456ce5124f0 \
--hash=sha256:830c08b8d99bdd312ea4ead05994a38e8936266f84b9a7878232db50b044e02e
# via -r requirements/common.in
-urllib3==1.26.16 \
- --hash=sha256:8d36afa7616d8ab714608411b4a3b13e58f463aee519024578e062e141dce20f \
- --hash=sha256:8f135f6502756bde6b2a9b28989df5fbe87c9970cecaa69041edcce7f0589b14
+urllib3==1.26.18 \
+ --hash=sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07 \
+ --hash=sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0
# via
# botocore
# requests
-validx==0.8 \
- --hash=sha256:09b1193a116a7d6cc7cabcab876917b03f8f0fdb7eff6fc5ab01dddf0c1cea3c \
- --hash=sha256:0e6514c73a29a5bd95b116acc658251406d80e1dbe2935983e223d1f253c1812 \
- --hash=sha256:13daf9259856f90f85b2c55d0da85f9e61c93b0aa4a43a7b31af6b68a2ee791b \
- --hash=sha256:1b4321884f9d7d6d9ebbe2f24712ba238530f964d94fdcd97745aff177a0a318 \
- --hash=sha256:21e097dd264273b9c988b959133122ee3562de977e60ea5efac72176383f7c33 \
- --hash=sha256:273a7ce4f340ced005fc3e84c9dbf5074e6b6c64e9ea708880a38bb39fa6dd99 \
- --hash=sha256:3eeec5299a9e70c94a1f0b005ad9ed06baeb4b81838e001100f75caddf6a4445 \
- --hash=sha256:4b3b5c6615e222286c09ab7487d108d6367f746e067eb17f7cd9ac537bec0934 \
- --hash=sha256:54f9d2cb5c9b340dccf11456700b51fe2a4d52b16852960cffe7d115b848671e \
- --hash=sha256:553ee8ba6eedb35960f0a7e8754b58b319d0083bc9e8c279b86e9d4b1b5a7f9c \
- --hash=sha256:555436b76823c8e69f3f4aae985bc00b67abb7e12eb4e15f5053359ff0fa3cdb \
- --hash=sha256:5788023c3f5c497569bedc8d6b90be235797df98ce5794296c92cf6e51317be1 \
- --hash=sha256:5b36287c2b31bf364656e06df72ed9a7eab62832f3f049a4d9f0ad682ee545b9 \
- --hash=sha256:6781783a8833caebe0b0a26a7edb5de6ee9bb1c1d59a5f2cb433de018a7d11c5 \
- --hash=sha256:6867af20384a7d817be07c422f3ed858f7860e2d85f93647dce188c0eb0a0232 \
- --hash=sha256:68a60b0884fa9582c1ba6e721b9a038f6c5a2022557213e8f582d97cbee1c0b4 \
- --hash=sha256:6b503380715c40718d20aee0452f782821b8664a32a92a404dcff29e35e1857d \
- --hash=sha256:6f46fefff0c9a1fd1897f459fdf9f00e78ade149cea6872332ae1bf1f7d38b62 \
- --hash=sha256:800fbc3b6edecca2a8f9ebddc1355b0b9c66a0f59345eb5b1cb28b416462b2a0 \
- --hash=sha256:801df0277c96de20220e96dc434cbbbc3383f399b383c18ddbeaa2ef3799cada \
- --hash=sha256:81427338761db86f197af3ee48a568e3d5ad2c56ee61db55f81d9ab54f290ddc \
- --hash=sha256:82287771ddb20a1a02f60aabb5150d59b0e5a10b8006579e004fd3d54243114c \
- --hash=sha256:83b7f6d9cfef7dbb6e3c34fe73f9662837e56161fc33300de3767472a5ac63ad \
- --hash=sha256:853051035646d4a045a4a54a6c6d0d6e7281a23eeb922f2f5030caeb6f0aab03 \
- --hash=sha256:85754e1cfebbdf43188b520081bbda5906d4182e7fa3444b6a6f60945f398453 \
- --hash=sha256:888853e2c833630dbfe06b6b860b894cb06a6cc8af41de738d1c72d205227765 \
- --hash=sha256:9b6bfa2b46b80d88b271a5d83faae046a4b37a3702da114fe602742e0ca59af3 \
- --hash=sha256:a140bbca789ccc931fb7a8b92743008578da1a55d832629fd846a32de4f8ce76 \
- --hash=sha256:a194ee597be5d7841c12e8bb98b8c8c349ef973e4d9b0d27b3003b686b6e0562 \
- --hash=sha256:a379a728b82513e0e924ca7d56b5804af04d793b4e8ad4c99383ac28268eb788 \
- --hash=sha256:a4f12b356677590d947aba4c899d2f41a6cfc408d1d4289b1184f10ba9a52fc9 \
- --hash=sha256:ab065833277b69cbf5701bd3ba08a0b1c9cb3a9b776c8cf49a0f9e06f2684fc0 \
- --hash=sha256:b0a4f9530af83cc884f9571961ad09c06ca9d8a00056e124374fd2a92f0456c5 \
- --hash=sha256:b5be3cc2ef969c73519653d11e638a9ed909aed1ceefe68900c1a798927bc859 \
- --hash=sha256:bb20b7b34ccab1b61768e2dad30d09d51ea92dbd053c64f3286724157fc1c5fa \
- --hash=sha256:c2edc3d835597e2f4e0446bb4ca8727ffb76553746e11d5da6d82108dacdb6fe \
- --hash=sha256:c7f2f1d62e10cf0fde723940209ea7593db789626e9411d2fe95afb64324f7e8 \
- --hash=sha256:e01480bbc0faa5523a2def7c241a1cf4dec7ca033807528d1cb50a1c79a14c05 \
- --hash=sha256:e4d934d4cbf9fb9f257f0c60e667316894979e4aaa6de8d52261ddf69721e25c \
- --hash=sha256:eab536576d640b102667492d4cc52ea7f3843432f2fa4126634bc9b412f4e869 \
- --hash=sha256:eae4b92651b2e266e9d37416de0dbf59dbf37cf382c9c5b076be36ce5686fb70 \
- --hash=sha256:fab59482474268cfaeea266c8c412300c8e396b6c041105b9871e2c34beaf740 \
- --hash=sha256:fd44a8109f00c8a95c14baa461b6aeb5379facf1e7037bad7e31c589f4e2fc2d \
- --hash=sha256:ff91a7e3faca01ed45efb8f2df5b2a161ec25f2cd4deef5dcd089c5b3956b277
+validx==0.8.1 \
+ --hash=sha256:06a968020ab88851feb09afaee31611c5238108fe82bdc99dc61e80262f08167 \
+ --hash=sha256:0896cb45a7927b04dcbf2e286bbac64b86d13911971f936a7dce44f80ca54c83 \
+ --hash=sha256:09862fba063b1140f79ee91f5cee047a34a793e1b314e725d67ce984ddb85f85 \
+ --hash=sha256:1638bfa2842f58c7765d462b9ee6c85981cd44a1387823e593525079ee28b71d \
+ --hash=sha256:25b3b35289b7ade07470948119c6b9b863725a2587c7049c3c1b33dd9bc529bb \
+ --hash=sha256:265b36c01a2b9e9def0f06423efaccfd09abdefc253d4286b179f77f81dc28e8 \
+ --hash=sha256:26c52239db0caf3620f15a23a209a219624e5c6f325a85f916aa9c152d17add4 \
+ --hash=sha256:26ecb8fc5c0f9abd4154ae114bd9301394eb67b63ef05bbe570a64edb2c12f31 \
+ --hash=sha256:2d03e64e6b8a971af200eb110cdf8e8467edca12faf689db64196834ae558229 \
+ --hash=sha256:2e892c6f7a4250b4cffe051042c5b7845be4db5d35c644c16df133b318249968 \
+ --hash=sha256:3e91dbf550531bcf5565629157c170632edca83963ec428a1509da821534425d \
+ --hash=sha256:45a23c8ecf95a8be36ef346aec0f526744d793ea3862e8af077ad782694407b6 \
+ --hash=sha256:48d18fffc5961a72c8dede13b2e270b9baefe7bd0e9711614b63cbf3fb347157 \
+ --hash=sha256:5004a0ca66c925c7c317f7eb40c24a7dbb3c28d30cd8d7d9160900bbd8db7f28 \
+ --hash=sha256:5476ff1b2d08f29df7f91cf96d2f4c906e95d22c611d592719b5da3131437b3f \
+ --hash=sha256:56bf42faeec5d415064644d673facee9c4350770609399a6ca511bb329ed340a \
+ --hash=sha256:59c6f6fb180635057a5975183c8f0c5c36dcaec0c7eb728ae9ba3f09e7863fc5 \
+ --hash=sha256:6839212a4b3fcddeb4e7dce45fe389bfae3dbd08994d632b9e3001f7ad707037 \
+ --hash=sha256:6eb103ec13c204a0c744ba371cfb2218f6fe38e50a13414b17653e5dee9730d2 \
+ --hash=sha256:744bedc42a8100b63e196cd65ee1e1e91b45691aa9097beb90d21d797b9065a5 \
+ --hash=sha256:7cd7ac7c3de8a5bf44ebeb5a31854b48417fc450887d9f6e60dace5bd2fb4440 \
+ --hash=sha256:7d3ead95c6da10fe887eb9776c72732e0d99510a53bd1e09d78ac2176060d118 \
+ --hash=sha256:7da9e982e9cf3465eef0b952243e2c7dd83f51452c1c1d09ab3926fdd1b7d2f7 \
+ --hash=sha256:7e5f52664c04594d1e87c6c680e8541d6c2e58a57a2fc10061fd2320c5834d2f \
+ --hash=sha256:8606b5764fbb67fb8e60404e0bb195a1b0f1a297771e3c71012970c200f9ceb0 \
+ --hash=sha256:88b9e5438fd8412fb5feff947f5e3816f80494f58281a3ac26e9ed887e65df99 \
+ --hash=sha256:8adce6c7ee4973720e0fbe8ca3694120b24396a07e47f451ce9b05d919290de1 \
+ --hash=sha256:8f3cf20c21c16117245c16c0799ca25ddd356ffabe6239933c99b85db5188c95 \
+ --hash=sha256:965e7710409e81ad1bef5fb122f7dfbe8d6921b89e5780cd52f28b99b3e14910 \
+ --hash=sha256:9eb79755e69be633b529bf53259431fe23ef88554fc2b17ebe1ef5c7661ee987 \
+ --hash=sha256:a6a28c380dce8ec3a997e03ceddfeff5920f7699b6eb2f0c4d5599993f5cfc5d \
+ --hash=sha256:bc07a9db24e84487f82a41547fa8456daac97ed67d460c5a3e6cf9cecda06990 \
+ --hash=sha256:c83dbfca90bddd846fb19463ac738fc116e2c587e58de607df1dff74465ffd60 \
+ --hash=sha256:ce62232f2c663f05535cab1f5fd55ce5c1d4bf7de39342f094457d7995096d1a \
+ --hash=sha256:d27add0f74fa0c32d9a7731475fd9025923a0fb0f1702360da1fe12c569b7837 \
+ --hash=sha256:d5b2e497ab411149e44cf288f14cf6bcce855a8a48c2662fa33f7f4bc3372303 \
+ --hash=sha256:d6ed875d1829302ed926f5b87631a65f6721239bb145a46ca7e3f3f30e422d5b \
+ --hash=sha256:d7197c8ac367fff703ef942503d5bc76011fc8dd0740ca5ed7ea830a391f8c31 \
+ --hash=sha256:d896e9753015c28eccdfc1a03527014712066e90a15ae9f92fd2eb3a16512db2 \
+ --hash=sha256:d9bfa89130dd1a8f2ca64869e4b9148867df172cafe238ace5f47a9f59d1f47c \
+ --hash=sha256:dd0af01ffb831d3ea1998c67599b3faf75d55ab347370c70f7f00836fad3a02d \
+ --hash=sha256:e2e7e1e4186d20b71116ec0bb5b97905824f2ffe431ab2de0360ff36dcaabe16 \
+ --hash=sha256:f6cdde133cd50fb5af99ac26453835adcb55652138f07a04c68b79588ade03a6 \
+ --hash=sha256:fb6cda83cfc7a84d5d86ebe1ee3264b6aa1d6bc4c2f5744b173e8e21a8f4d67d
# via mozci
-vine==5.0.0 \
- --hash=sha256:4c9dceab6f76ed92105027c49c823800dd33cacce13bdedc5b914e3514b7fb30 \
- --hash=sha256:7d3b1624a953da82ef63462013bbd271d3eb75751489f9807598e8f340bd637e
+vine==5.1.0 \
+ --hash=sha256:40fdf3c48b2cfe1c38a49e9ae2da6fda88e4794c810050a728bd7413811fb1dc \
+ --hash=sha256:8b62e981d35c41049211cf62a0a1242d8c1ee9bd15bb196ce38aefd6799e61e0
# via
# amqp
# celery
# kombu
-wcwidth==0.2.6 \
- --hash=sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e \
- --hash=sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0
+wcwidth==0.2.13 \
+ --hash=sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859 \
+ --hash=sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5
# via
# blessed
# prompt-toolkit
@@ -1375,126 +1421,145 @@ whitenoise[brotli]==6.5.0 \
--hash=sha256:15fe60546ac975b58e357ccaeb165a4ca2d0ab697e48450b8f0307ca368195a8 \
--hash=sha256:16468e9ad2189f09f4a8c635a9031cc9bb2cdbc8e5e53365407acf99f7ade9ec
# via -r requirements/common.in
-yarl==1.9.2 \
- --hash=sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571 \
- --hash=sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3 \
- --hash=sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3 \
- --hash=sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c \
- --hash=sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7 \
- --hash=sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04 \
- --hash=sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191 \
- --hash=sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea \
- --hash=sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4 \
- --hash=sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4 \
- --hash=sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095 \
- --hash=sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e \
- --hash=sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74 \
- --hash=sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef \
- --hash=sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33 \
- --hash=sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde \
- --hash=sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45 \
- --hash=sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf \
- --hash=sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b \
- --hash=sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac \
- --hash=sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0 \
- --hash=sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528 \
- --hash=sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716 \
- --hash=sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb \
- --hash=sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18 \
- --hash=sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72 \
- --hash=sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6 \
- --hash=sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582 \
- --hash=sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5 \
- --hash=sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368 \
- --hash=sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc \
- --hash=sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9 \
- --hash=sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be \
- --hash=sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a \
- --hash=sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80 \
- --hash=sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8 \
- --hash=sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6 \
- --hash=sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417 \
- --hash=sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574 \
- --hash=sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59 \
- --hash=sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608 \
- --hash=sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82 \
- --hash=sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1 \
- --hash=sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3 \
- --hash=sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d \
- --hash=sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8 \
- --hash=sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc \
- --hash=sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac \
- --hash=sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8 \
- --hash=sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955 \
- --hash=sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0 \
- --hash=sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367 \
- --hash=sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb \
- --hash=sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a \
- --hash=sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623 \
- --hash=sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2 \
- --hash=sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6 \
- --hash=sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7 \
- --hash=sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4 \
- --hash=sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051 \
- --hash=sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938 \
- --hash=sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8 \
- --hash=sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9 \
- --hash=sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3 \
- --hash=sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5 \
- --hash=sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9 \
- --hash=sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333 \
- --hash=sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185 \
- --hash=sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3 \
- --hash=sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560 \
- --hash=sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b \
- --hash=sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7 \
- --hash=sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78 \
- --hash=sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7
+yarl==1.9.4 \
+ --hash=sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51 \
+ --hash=sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce \
+ --hash=sha256:07574b007ee20e5c375a8fe4a0789fad26db905f9813be0f9fef5a68080de559 \
+ --hash=sha256:09efe4615ada057ba2d30df871d2f668af661e971dfeedf0c159927d48bbeff0 \
+ --hash=sha256:0d2454f0aef65ea81037759be5ca9947539667eecebca092733b2eb43c965a81 \
+ --hash=sha256:0e9d124c191d5b881060a9e5060627694c3bdd1fe24c5eecc8d5d7d0eb6faabc \
+ --hash=sha256:18580f672e44ce1238b82f7fb87d727c4a131f3a9d33a5e0e82b793362bf18b4 \
+ --hash=sha256:1f23e4fe1e8794f74b6027d7cf19dc25f8b63af1483d91d595d4a07eca1fb26c \
+ --hash=sha256:206a55215e6d05dbc6c98ce598a59e6fbd0c493e2de4ea6cc2f4934d5a18d130 \
+ --hash=sha256:23d32a2594cb5d565d358a92e151315d1b2268bc10f4610d098f96b147370136 \
+ --hash=sha256:26a1dc6285e03f3cc9e839a2da83bcbf31dcb0d004c72d0730e755b33466c30e \
+ --hash=sha256:29e0f83f37610f173eb7e7b5562dd71467993495e568e708d99e9d1944f561ec \
+ --hash=sha256:2b134fd795e2322b7684155b7855cc99409d10b2e408056db2b93b51a52accc7 \
+ --hash=sha256:2d47552b6e52c3319fede1b60b3de120fe83bde9b7bddad11a69fb0af7db32f1 \
+ --hash=sha256:357495293086c5b6d34ca9616a43d329317feab7917518bc97a08f9e55648455 \
+ --hash=sha256:35a2b9396879ce32754bd457d31a51ff0a9d426fd9e0e3c33394bf4b9036b099 \
+ --hash=sha256:3777ce5536d17989c91696db1d459574e9a9bd37660ea7ee4d3344579bb6f129 \
+ --hash=sha256:3986b6f41ad22988e53d5778f91855dc0399b043fc8946d4f2e68af22ee9ff10 \
+ --hash=sha256:44d8ffbb9c06e5a7f529f38f53eda23e50d1ed33c6c869e01481d3fafa6b8142 \
+ --hash=sha256:49a180c2e0743d5d6e0b4d1a9e5f633c62eca3f8a86ba5dd3c471060e352ca98 \
+ --hash=sha256:4aa9741085f635934f3a2583e16fcf62ba835719a8b2b28fb2917bb0537c1dfa \
+ --hash=sha256:4b21516d181cd77ebd06ce160ef8cc2a5e9ad35fb1c5930882baff5ac865eee7 \
+ --hash=sha256:4b3c1ffe10069f655ea2d731808e76e0f452fc6c749bea04781daf18e6039525 \
+ --hash=sha256:4c7d56b293cc071e82532f70adcbd8b61909eec973ae9d2d1f9b233f3d943f2c \
+ --hash=sha256:4e9035df8d0880b2f1c7f5031f33f69e071dfe72ee9310cfc76f7b605958ceb9 \
+ --hash=sha256:54525ae423d7b7a8ee81ba189f131054defdb122cde31ff17477951464c1691c \
+ --hash=sha256:549d19c84c55d11687ddbd47eeb348a89df9cb30e1993f1b128f4685cd0ebbf8 \
+ --hash=sha256:54beabb809ffcacbd9d28ac57b0db46e42a6e341a030293fb3185c409e626b8b \
+ --hash=sha256:566db86717cf8080b99b58b083b773a908ae40f06681e87e589a976faf8246bf \
+ --hash=sha256:5a2e2433eb9344a163aced6a5f6c9222c0786e5a9e9cac2c89f0b28433f56e23 \
+ --hash=sha256:5aef935237d60a51a62b86249839b51345f47564208c6ee615ed2a40878dccdd \
+ --hash=sha256:604f31d97fa493083ea21bd9b92c419012531c4e17ea6da0f65cacdcf5d0bd27 \
+ --hash=sha256:63b20738b5aac74e239622d2fe30df4fca4942a86e31bf47a81a0e94c14df94f \
+ --hash=sha256:686a0c2f85f83463272ddffd4deb5e591c98aac1897d65e92319f729c320eece \
+ --hash=sha256:6a962e04b8f91f8c4e5917e518d17958e3bdee71fd1d8b88cdce74dd0ebbf434 \
+ --hash=sha256:6ad6d10ed9b67a382b45f29ea028f92d25bc0bc1daf6c5b801b90b5aa70fb9ec \
+ --hash=sha256:6f5cb257bc2ec58f437da2b37a8cd48f666db96d47b8a3115c29f316313654ff \
+ --hash=sha256:6fe79f998a4052d79e1c30eeb7d6c1c1056ad33300f682465e1b4e9b5a188b78 \
+ --hash=sha256:7855426dfbddac81896b6e533ebefc0af2f132d4a47340cee6d22cac7190022d \
+ --hash=sha256:7d5aaac37d19b2904bb9dfe12cdb08c8443e7ba7d2852894ad448d4b8f442863 \
+ --hash=sha256:801e9264d19643548651b9db361ce3287176671fb0117f96b5ac0ee1c3530d53 \
+ --hash=sha256:81eb57278deb6098a5b62e88ad8281b2ba09f2f1147c4767522353eaa6260b31 \
+ --hash=sha256:824d6c50492add5da9374875ce72db7a0733b29c2394890aef23d533106e2b15 \
+ --hash=sha256:8397a3817d7dcdd14bb266283cd1d6fc7264a48c186b986f32e86d86d35fbac5 \
+ --hash=sha256:848cd2a1df56ddbffeb375535fb62c9d1645dde33ca4d51341378b3f5954429b \
+ --hash=sha256:84fc30f71689d7fc9168b92788abc977dc8cefa806909565fc2951d02f6b7d57 \
+ --hash=sha256:8619d6915b3b0b34420cf9b2bb6d81ef59d984cb0fde7544e9ece32b4b3043c3 \
+ --hash=sha256:8a854227cf581330ffa2c4824d96e52ee621dd571078a252c25e3a3b3d94a1b1 \
+ --hash=sha256:8be9e837ea9113676e5754b43b940b50cce76d9ed7d2461df1af39a8ee674d9f \
+ --hash=sha256:928cecb0ef9d5a7946eb6ff58417ad2fe9375762382f1bf5c55e61645f2c43ad \
+ --hash=sha256:957b4774373cf6f709359e5c8c4a0af9f6d7875db657adb0feaf8d6cb3c3964c \
+ --hash=sha256:992f18e0ea248ee03b5a6e8b3b4738850ae7dbb172cc41c966462801cbf62cf7 \
+ --hash=sha256:9fc5fc1eeb029757349ad26bbc5880557389a03fa6ada41703db5e068881e5f2 \
+ --hash=sha256:a00862fb23195b6b8322f7d781b0dc1d82cb3bcac346d1e38689370cc1cc398b \
+ --hash=sha256:a3a6ed1d525bfb91b3fc9b690c5a21bb52de28c018530ad85093cc488bee2dd2 \
+ --hash=sha256:a6327976c7c2f4ee6816eff196e25385ccc02cb81427952414a64811037bbc8b \
+ --hash=sha256:a7409f968456111140c1c95301cadf071bd30a81cbd7ab829169fb9e3d72eae9 \
+ --hash=sha256:a825ec844298c791fd28ed14ed1bffc56a98d15b8c58a20e0e08c1f5f2bea1be \
+ --hash=sha256:a8c1df72eb746f4136fe9a2e72b0c9dc1da1cbd23b5372f94b5820ff8ae30e0e \
+ --hash=sha256:a9bd00dc3bc395a662900f33f74feb3e757429e545d831eef5bb280252631984 \
+ --hash=sha256:aa102d6d280a5455ad6a0f9e6d769989638718e938a6a0a2ff3f4a7ff8c62cc4 \
+ --hash=sha256:aaaea1e536f98754a6e5c56091baa1b6ce2f2700cc4a00b0d49eca8dea471074 \
+ --hash=sha256:ad4d7a90a92e528aadf4965d685c17dacff3df282db1121136c382dc0b6014d2 \
+ --hash=sha256:b8477c1ee4bd47c57d49621a062121c3023609f7a13b8a46953eb6c9716ca392 \
+ --hash=sha256:ba6f52cbc7809cd8d74604cce9c14868306ae4aa0282016b641c661f981a6e91 \
+ --hash=sha256:bac8d525a8dbc2a1507ec731d2867025d11ceadcb4dd421423a5d42c56818541 \
+ --hash=sha256:bef596fdaa8f26e3d66af846bbe77057237cb6e8efff8cd7cc8dff9a62278bbf \
+ --hash=sha256:c0ec0ed476f77db9fb29bca17f0a8fcc7bc97ad4c6c1d8959c507decb22e8572 \
+ --hash=sha256:c38c9ddb6103ceae4e4498f9c08fac9b590c5c71b0370f98714768e22ac6fa66 \
+ --hash=sha256:c7224cab95645c7ab53791022ae77a4509472613e839dab722a72abe5a684575 \
+ --hash=sha256:c74018551e31269d56fab81a728f683667e7c28c04e807ba08f8c9e3bba32f14 \
+ --hash=sha256:ca06675212f94e7a610e85ca36948bb8fc023e458dd6c63ef71abfd482481aa5 \
+ --hash=sha256:d1d2532b340b692880261c15aee4dc94dd22ca5d61b9db9a8a361953d36410b1 \
+ --hash=sha256:d25039a474c4c72a5ad4b52495056f843a7ff07b632c1b92ea9043a3d9950f6e \
+ --hash=sha256:d5ff2c858f5f6a42c2a8e751100f237c5e869cbde669a724f2062d4c4ef93551 \
+ --hash=sha256:d7d7f7de27b8944f1fee2c26a88b4dabc2409d2fea7a9ed3df79b67277644e17 \
+ --hash=sha256:d7eeb6d22331e2fd42fce928a81c697c9ee2d51400bd1a28803965883e13cead \
+ --hash=sha256:d8a1c6c0be645c745a081c192e747c5de06e944a0d21245f4cf7c05e457c36e0 \
+ --hash=sha256:d8b889777de69897406c9fb0b76cdf2fd0f31267861ae7501d93003d55f54fbe \
+ --hash=sha256:d9e09c9d74f4566e905a0b8fa668c58109f7624db96a2171f21747abc7524234 \
+ --hash=sha256:db8e58b9d79200c76956cefd14d5c90af54416ff5353c5bfd7cbe58818e26ef0 \
+ --hash=sha256:ddb2a5c08a4eaaba605340fdee8fc08e406c56617566d9643ad8bf6852778fc7 \
+ --hash=sha256:e0381b4ce23ff92f8170080c97678040fc5b08da85e9e292292aba67fdac6c34 \
+ --hash=sha256:e23a6d84d9d1738dbc6e38167776107e63307dfc8ad108e580548d1f2c587f42 \
+ --hash=sha256:e516dc8baf7b380e6c1c26792610230f37147bb754d6426462ab115a02944385 \
+ --hash=sha256:ea65804b5dc88dacd4a40279af0cdadcfe74b3e5b4c897aa0d81cf86927fee78 \
+ --hash=sha256:ec61d826d80fc293ed46c9dd26995921e3a82146feacd952ef0757236fc137be \
+ --hash=sha256:ee04010f26d5102399bd17f8df8bc38dc7ccd7701dc77f4a68c5b8d733406958 \
+ --hash=sha256:f3bc6af6e2b8f92eced34ef6a96ffb248e863af20ef4fde9448cc8c9b858b749 \
+ --hash=sha256:f7d6b36dd2e029b6bcb8a13cf19664c7b8e19ab3a58e0fefbb5b8461447ed5ec
# via aiohttp
-zstandard==0.21.0 \
- --hash=sha256:0aad6090ac164a9d237d096c8af241b8dcd015524ac6dbec1330092dba151657 \
- --hash=sha256:0bdbe350691dec3078b187b8304e6a9c4d9db3eb2d50ab5b1d748533e746d099 \
- --hash=sha256:0e1e94a9d9e35dc04bf90055e914077c80b1e0c15454cc5419e82529d3e70728 \
- --hash=sha256:1243b01fb7926a5a0417120c57d4c28b25a0200284af0525fddba812d575f605 \
- --hash=sha256:144a4fe4be2e747bf9c646deab212666e39048faa4372abb6a250dab0f347a29 \
- --hash=sha256:14e10ed461e4807471075d4b7a2af51f5234c8f1e2a0c1d37d5ca49aaaad49e8 \
- --hash=sha256:1545fb9cb93e043351d0cb2ee73fa0ab32e61298968667bb924aac166278c3fc \
- --hash=sha256:1e6e131a4df2eb6f64961cea6f979cdff22d6e0d5516feb0d09492c8fd36f3bc \
- --hash=sha256:25fbfef672ad798afab12e8fd204d122fca3bc8e2dcb0a2ba73bf0a0ac0f5f07 \
- --hash=sha256:2769730c13638e08b7a983b32cb67775650024632cd0476bf1ba0e6360f5ac7d \
- --hash=sha256:48b6233b5c4cacb7afb0ee6b4f91820afbb6c0e3ae0fa10abbc20000acdf4f11 \
- --hash=sha256:4af612c96599b17e4930fe58bffd6514e6c25509d120f4eae6031b7595912f85 \
- --hash=sha256:52b2b5e3e7670bd25835e0e0730a236f2b0df87672d99d3bf4bf87248aa659fb \
- --hash=sha256:57ac078ad7333c9db7a74804684099c4c77f98971c151cee18d17a12649bc25c \
- --hash=sha256:62957069a7c2626ae80023998757e27bd28d933b165c487ab6f83ad3337f773d \
- --hash=sha256:649a67643257e3b2cff1c0a73130609679a5673bf389564bc6d4b164d822a7ce \
- --hash=sha256:67829fdb82e7393ca68e543894cd0581a79243cc4ec74a836c305c70a5943f07 \
- --hash=sha256:7d3bc4de588b987f3934ca79140e226785d7b5e47e31756761e48644a45a6766 \
- --hash=sha256:7f2afab2c727b6a3d466faee6974a7dad0d9991241c498e7317e5ccf53dbc766 \
- --hash=sha256:8070c1cdb4587a8aa038638acda3bd97c43c59e1e31705f2766d5576b329e97c \
- --hash=sha256:8257752b97134477fb4e413529edaa04fc0457361d304c1319573de00ba796b1 \
- --hash=sha256:9980489f066a391c5572bc7dc471e903fb134e0b0001ea9b1d3eff85af0a6f1b \
- --hash=sha256:9cff89a036c639a6a9299bf19e16bfb9ac7def9a7634c52c257166db09d950e7 \
- --hash=sha256:a8d200617d5c876221304b0e3fe43307adde291b4a897e7b0617a61611dfff6a \
- --hash=sha256:a9fec02ce2b38e8b2e86079ff0b912445495e8ab0b137f9c0505f88ad0d61296 \
- --hash=sha256:b1367da0dde8ae5040ef0413fb57b5baeac39d8931c70536d5f013b11d3fc3a5 \
- --hash=sha256:b69cccd06a4a0a1d9fb3ec9a97600055cf03030ed7048d4bcb88c574f7895773 \
- --hash=sha256:b72060402524ab91e075881f6b6b3f37ab715663313030d0ce983da44960a86f \
- --hash=sha256:c053b7c4cbf71cc26808ed67ae955836232f7638444d709bfc302d3e499364fa \
- --hash=sha256:cff891e37b167bc477f35562cda1248acc115dbafbea4f3af54ec70821090965 \
- --hash=sha256:d12fa383e315b62630bd407477d750ec96a0f438447d0e6e496ab67b8b451d39 \
- --hash=sha256:d2d61675b2a73edcef5e327e38eb62bdfc89009960f0e3991eae5cc3d54718de \
- --hash=sha256:db62cbe7a965e68ad2217a056107cc43d41764c66c895be05cf9c8b19578ce9c \
- --hash=sha256:ddb086ea3b915e50f6604be93f4f64f168d3fc3cef3585bb9a375d5834392d4f \
- --hash=sha256:df28aa5c241f59a7ab524f8ad8bb75d9a23f7ed9d501b0fed6d40ec3064784e8 \
- --hash=sha256:e1e0c62a67ff425927898cf43da2cf6b852289ebcc2054514ea9bf121bec10a5 \
- --hash=sha256:e6048a287f8d2d6e8bc67f6b42a766c61923641dd4022b7fd3f7439e17ba5a4d \
- --hash=sha256:e7d560ce14fd209db6adacce8908244503a009c6c39eee0c10f138996cd66d3e \
- --hash=sha256:ea68b1ba4f9678ac3d3e370d96442a6332d431e5050223626bdce748692226ea \
- --hash=sha256:f08e3a10d01a247877e4cb61a82a319ea746c356a3786558bed2481e6c405546 \
- --hash=sha256:f1b9703fe2e6b6811886c44052647df7c37478af1b4a1a9078585806f42e5b15 \
- --hash=sha256:fe6c821eb6870f81d73bf10e5deed80edcac1e63fbc40610e61f340723fd5f7c \
- --hash=sha256:ff0852da2abe86326b20abae912d0367878dd0854b8931897d44cfeb18985472
+zstandard==0.22.0 \
+ --hash=sha256:11f0d1aab9516a497137b41e3d3ed4bbf7b2ee2abc79e5c8b010ad286d7464bd \
+ --hash=sha256:1958100b8a1cc3f27fa21071a55cb2ed32e9e5df4c3c6e661c193437f171cba2 \
+ --hash=sha256:1a90ba9a4c9c884bb876a14be2b1d216609385efb180393df40e5172e7ecf356 \
+ --hash=sha256:1d43501f5f31e22baf822720d82b5547f8a08f5386a883b32584a185675c8fbf \
+ --hash=sha256:23d2b3c2b8e7e5a6cb7922f7c27d73a9a615f0a5ab5d0e03dd533c477de23004 \
+ --hash=sha256:2612e9bb4977381184bb2463150336d0f7e014d6bb5d4a370f9a372d21916f69 \
+ --hash=sha256:275df437ab03f8c033b8a2c181e51716c32d831082d93ce48002a5227ec93019 \
+ --hash=sha256:2ac9957bc6d2403c4772c890916bf181b2653640da98f32e04b96e4d6fb3252a \
+ --hash=sha256:2b11ea433db22e720758cba584c9d661077121fcf60ab43351950ded20283440 \
+ --hash=sha256:2fdd53b806786bd6112d97c1f1e7841e5e4daa06810ab4b284026a1a0e484c0b \
+ --hash=sha256:33591d59f4956c9812f8063eff2e2c0065bc02050837f152574069f5f9f17775 \
+ --hash=sha256:36a47636c3de227cd765e25a21dc5dace00539b82ddd99ee36abae38178eff9e \
+ --hash=sha256:39b2853efc9403927f9065cc48c9980649462acbdf81cd4f0cb773af2fd734bc \
+ --hash=sha256:3db41c5e49ef73641d5111554e1d1d3af106410a6c1fb52cf68912ba7a343a0d \
+ --hash=sha256:445b47bc32de69d990ad0f34da0e20f535914623d1e506e74d6bc5c9dc40bb09 \
+ --hash=sha256:466e6ad8caefb589ed281c076deb6f0cd330e8bc13c5035854ffb9c2014b118c \
+ --hash=sha256:48f260e4c7294ef275744210a4010f116048e0c95857befb7462e033f09442fe \
+ --hash=sha256:4ac59d5d6910b220141c1737b79d4a5aa9e57466e7469a012ed42ce2d3995e88 \
+ --hash=sha256:53866a9d8ab363271c9e80c7c2e9441814961d47f88c9bc3b248142c32141d94 \
+ --hash=sha256:589402548251056878d2e7c8859286eb91bd841af117dbe4ab000e6450987e08 \
+ --hash=sha256:68953dc84b244b053c0d5f137a21ae8287ecf51b20872eccf8eaac0302d3e3b0 \
+ --hash=sha256:6c25b8eb733d4e741246151d895dd0308137532737f337411160ff69ca24f93a \
+ --hash=sha256:7034d381789f45576ec3f1fa0e15d741828146439228dc3f7c59856c5bcd3292 \
+ --hash=sha256:73a1d6bd01961e9fd447162e137ed949c01bdb830dfca487c4a14e9742dccc93 \
+ --hash=sha256:8226a33c542bcb54cd6bd0a366067b610b41713b64c9abec1bc4533d69f51e70 \
+ --hash=sha256:888196c9c8893a1e8ff5e89b8f894e7f4f0e64a5af4d8f3c410f0319128bb2f8 \
+ --hash=sha256:88c5b4b47a8a138338a07fc94e2ba3b1535f69247670abfe422de4e0b344aae2 \
+ --hash=sha256:8a1b2effa96a5f019e72874969394edd393e2fbd6414a8208fea363a22803b45 \
+ --hash=sha256:93e1856c8313bc688d5df069e106a4bc962eef3d13372020cc6e3ebf5e045202 \
+ --hash=sha256:9501f36fac6b875c124243a379267d879262480bf85b1dbda61f5ad4d01b75a3 \
+ --hash=sha256:959665072bd60f45c5b6b5d711f15bdefc9849dd5da9fb6c873e35f5d34d8cfb \
+ --hash=sha256:a1d67d0d53d2a138f9e29d8acdabe11310c185e36f0a848efa104d4e40b808e4 \
+ --hash=sha256:a493d470183ee620a3df1e6e55b3e4de8143c0ba1b16f3ded83208ea8ddfd91d \
+ --hash=sha256:a7ccf5825fd71d4542c8ab28d4d482aace885f5ebe4b40faaa290eed8e095a4c \
+ --hash=sha256:a88b7df61a292603e7cd662d92565d915796b094ffb3d206579aaebac6b85d5f \
+ --hash=sha256:a97079b955b00b732c6f280d5023e0eefe359045e8b83b08cf0333af9ec78f26 \
+ --hash=sha256:d22fdef58976457c65e2796e6730a3ea4a254f3ba83777ecfc8592ff8d77d303 \
+ --hash=sha256:d75f693bb4e92c335e0645e8845e553cd09dc91616412d1d4650da835b5449df \
+ --hash=sha256:d8593f8464fb64d58e8cb0b905b272d40184eac9a18d83cf8c10749c3eafcd7e \
+ --hash=sha256:d8fff0f0c1d8bc5d866762ae95bd99d53282337af1be9dc0d88506b340e74b73 \
+ --hash=sha256:de20a212ef3d00d609d0b22eb7cc798d5a69035e81839f549b538eff4105d01c \
+ --hash=sha256:e9e9d4e2e336c529d4c435baad846a181e39a982f823f7e4495ec0b0ec8538d2 \
+ --hash=sha256:f058a77ef0ece4e210bb0450e68408d4223f728b109764676e1a13537d056bb0 \
+ --hash=sha256:f1a4b358947a65b94e2501ce3e078bbc929b039ede4679ddb0460829b12f7375 \
+ --hash=sha256:f9b2cde1cd1b2a10246dbc143ba49d942d14fb3d2b4bccf4618d475c65464912 \
+ --hash=sha256:fe3390c538f12437b859d815040763abc728955a52ca6ff9c5d4ac707c4ad98e
# via mozci
# WARNING: The following packages were not pinned, but pip requires them to be
diff --git a/tests/test_dockerflow.py b/tests/test_dockerflow.py
new file mode 100644
index 00000000000..f8362c54e91
--- /dev/null
+++ b/tests/test_dockerflow.py
@@ -0,0 +1,47 @@
+import json
+import pytest
+
+from django.conf import settings
+
+
+@pytest.mark.django_db
+def test_get_version(client):
+ response = client.get("/__version__")
+ assert response.status_code == 200
+
+ with open(f"{settings.BASE_DIR}/version.json", "r") as version_file:
+ assert response.json() == json.loads(version_file.read())
+
+
+@pytest.mark.django_db
+def test_get_heartbeat_debug(client):
+ settings.DEBUG = True
+
+ response = client.get("/__heartbeat__")
+ assert response.status_code == 200
+
+ # In DEBUG mode, we can retrieve checks details
+ heartbeat = response.json()
+ assert heartbeat["status"] == "ok"
+ assert "checks" in heartbeat
+ assert "details" in heartbeat
+
+
+@pytest.mark.django_db
+def test_get_heartbeat(client):
+ settings.DEBUG = False
+
+ response = client.get("/__heartbeat__")
+ assert response.status_code == 200
+
+ # When DEBUG is False, we can't retrieve checks details and the status is certainly
+ # equal to "warning" because of the deployment checks that are added:
+ # https://github.com/mozilla-services/python-dockerflow/blob/e316f0c5f0aa6d176a6d08d1f568f83658b51339/src/dockerflow/django/views.py#L45
+ assert response.json() == {"status": "warning"}
+
+
+@pytest.mark.django_db
+def test_get_lbheartbeat(client):
+ response = client.get("/__lbheartbeat__")
+ assert response.status_code == 200
+ assert not response.content
From 1ef9c293a61576bf5e5c241623467654b32f68a9 Mon Sep 17 00:00:00 2001
From: Tooru Fujisawa
Date: Sun, 12 Nov 2023 17:23:25 +0900
Subject: [PATCH 003/128] Bug 1864319 - Always show the scrollbar to avoid the
layout change depending on the content.
---
ui/css/treeherder.css | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/ui/css/treeherder.css b/ui/css/treeherder.css
index 8eb26e616f8..e96d18fbd3e 100644
--- a/ui/css/treeherder.css
+++ b/ui/css/treeherder.css
@@ -17,7 +17,9 @@ body {
}
.th-global-content {
- overflow-y: auto;
+ /* Always show the scrollbar to avoid the layout change
+ * depending on the content */
+ overflow-y: scroll;
overflow-x: hidden;
height: 100%;
}
From 83186dcd4de04a6b609f2d25cb65c74bbe71309e Mon Sep 17 00:00:00 2001
From: Yoann Schneider <114239491+yschneider-sinneria@users.noreply.github.com>
Date: Wed, 31 Jan 2024 16:59:20 +0100
Subject: [PATCH 004/128] Bug 1823654 - Mostly use double quotes (#7900)
* Enable quote style verifications from Black
* Actually exclude migrations folders
---
.pre-commit-config.yaml | 1 +
manage.py | 2 +-
misc/compare_pushes.py | 2 +-
pyproject.toml | 8 -
tests/autoclassify/utils.py | 6 +-
tests/client/test_perfherder_client.py | 32 +-
tests/client/test_treeherder_client.py | 6 +-
tests/conftest.py | 276 ++++----
tests/e2e/conftest.py | 8 +-
tests/e2e/test_job_ingestion.py | 60 +-
tests/e2e/test_jobs_loaded.py | 24 +-
tests/etl/conftest.py | 6 +-
tests/etl/test_bugzilla.py | 8 +-
tests/etl/test_classification_loader.py | 290 ++++----
tests/etl/test_job_ingestion.py | 92 +--
tests/etl/test_job_loader.py | 36 +-
tests/etl/test_job_schema.py | 4 +-
tests/etl/test_load_artifacts.py | 32 +-
tests/etl/test_perf_data_adapters.py | 152 ++---
tests/etl/test_perf_data_load.py | 232 +++----
tests/etl/test_perf_schema.py | 34 +-
tests/etl/test_push_loader.py | 8 +-
tests/etl/test_pushlog.py | 20 +-
tests/etl/test_runnable_jobs.py | 38 +-
tests/etl/test_text.py | 18 +-
.../intermittents_commenter/test_commenter.py | 32 +-
.../test_artifact_builder_collection.py | 8 +-
tests/log_parser/test_error_parser.py | 4 +-
.../test_performance_artifact_builder.py | 10 +-
tests/log_parser/test_performance_parser.py | 2 +-
tests/log_parser/test_store_failure_lines.py | 42 +-
tests/log_parser/test_tasks.py | 6 +-
tests/log_parser/test_utils.py | 60 +-
.../cycle_data/test_perfherder_cycling.py | 132 ++--
.../cycle_data/test_treeherder_cycling.py | 32 +-
tests/model/test_bugscache.py | 84 +--
tests/model/test_error_summary.py | 196 +++---
tests/model/test_files_bugzilla_map.py | 138 ++--
tests/model/test_option_collection.py | 2 +-
tests/model/test_performance_signature.py | 2 +-
tests/model/test_performance_tag.py | 4 +-
tests/model/test_suite_public_name.py | 86 +--
tests/model/test_time_to_triage.py | 32 +-
tests/perf/auto_perf_sheriffing/conftest.py | 66 +-
.../test_backfill_reports/conftest.py | 58 +-
.../test_alerts_picker.py | 70 +-
.../test_identify_retriggerables.py | 14 +-
.../test_backfill_tool.py | 6 +-
.../test_report_backfill_outcome.py | 22 +-
.../auto_perf_sheriffing/test_secretary.py | 50 +-
.../auto_perf_sheriffing/test_sherlock.py | 24 +-
.../perf/auto_sheriffing_criteria/conftest.py | 4 +-
.../test_common_behaviour.py | 106 +--
.../test_criteria_tracker.py | 116 ++--
.../test_engineer_traction.py | 48 +-
.../test_fix_ratio.py | 20 +-
.../test_nonblockable_session.py | 4 +-
tests/perf/test_email.py | 2 +-
tests/perfalert/conftest.py | 2 +-
tests/perfalert/test_alert_modification.py | 18 +-
tests/perfalert/test_alerts.py | 8 +-
tests/perfalert/test_analyze.py | 20 +-
tests/push_health/test_builds.py | 10 +-
tests/push_health/test_classification.py | 44 +-
tests/push_health/test_compare.py | 24 +-
tests/push_health/test_linting.py | 14 +-
tests/push_health/test_tests.py | 26 +-
tests/push_health/test_usage.py | 36 +-
tests/push_health/test_utils.py | 102 +--
tests/sample_data_generator.py | 48 +-
tests/services/pulse/test_consumers.py | 20 +-
tests/services/test_taskcluster.py | 14 +-
tests/settings.py | 10 +-
tests/test_middleware.py | 52 +-
tests/test_setup.py | 14 +-
tests/test_utils.py | 70 +-
tests/test_worker/test_stats.py | 24 +-
tests/test_worker/test_task.py | 2 +-
.../test_taskcluster_download_artifact.py | 26 +-
tests/utils/test_taskcluster_lib_scopes.py | 86 +--
tests/webapp/api/test_auth.py | 188 +++---
tests/webapp/api/test_bug_creation.py | 122 ++--
tests/webapp/api/test_bug_job_map_api.py | 18 +-
tests/webapp/api/test_bugzilla.py | 144 ++--
tests/webapp/api/test_csp_report.py | 22 +-
tests/webapp/api/test_groupsummary_api.py | 8 +-
.../api/test_intermittent_failures_api.py | 54 +-
tests/webapp/api/test_job_log_url_api.py | 14 +-
tests/webapp/api/test_jobs_api.py | 116 ++--
tests/webapp/api/test_note_api.py | 24 +-
.../webapp/api/test_option_collection_hash.py | 6 +-
tests/webapp/api/test_perfcompare_api.py | 540 +++++++--------
.../webapp/api/test_performance_alerts_api.py | 160 ++---
.../api/test_performance_alertsummary_api.py | 362 +++++-----
.../api/test_performance_bug_template_api.py | 20 +-
tests/webapp/api/test_performance_data_api.py | 264 ++++----
tests/webapp/api/test_performance_tags.py | 10 +-
tests/webapp/api/test_push_api.py | 194 +++---
tests/webapp/api/test_version.py | 18 +-
treeherder/__init__.py | 2 +-
treeherder/auth/backends.py | 50 +-
treeherder/celery.py | 8 +-
treeherder/changelog/models.py | 2 +-
treeherder/client/setup.py | 42 +-
treeherder/client/thclient/client.py | 28 +-
treeherder/client/thclient/perfherder.py | 4 +-
treeherder/config/settings.py | 318 ++++-----
treeherder/config/urls.py | 6 +-
treeherder/config/utils.py | 2 +-
treeherder/config/wsgi.py | 2 +-
treeherder/etl/artifact.py | 34 +-
treeherder/etl/bugzilla.py | 122 ++--
treeherder/etl/classification_loader.py | 12 +-
treeherder/etl/files_bugzilla_map.py | 38 +-
treeherder/etl/job_loader.py | 14 +-
treeherder/etl/jobs.py | 128 ++--
treeherder/etl/management/commands/ingest.py | 30 +-
.../management/commands/publish_to_pulse.py | 6 +-
.../commands/pulse_listener_pushes.py | 2 +-
.../commands/pulse_listener_tasks.py | 2 +-
.../pulse_listener_tasks_classification.py | 2 +-
treeherder/etl/perf.py | 148 ++--
treeherder/etl/push.py | 14 +-
treeherder/etl/push_loader.py | 4 +-
treeherder/etl/pushlog.py | 30 +-
treeherder/etl/runnable_jobs.py | 38 +-
treeherder/etl/taskcluster_pulse/handler.py | 32 +-
.../etl/taskcluster_pulse/parse_route.py | 10 +-
treeherder/etl/tasks/pulse_tasks.py | 12 +-
treeherder/etl/tasks/pushlog_tasks.py | 10 +-
treeherder/etl/text.py | 2 +-
.../intermittents_commenter/commenter.py | 144 ++--
.../intermittents_commenter/constants.py | 136 ++--
.../commands/run_intermittents_commenter.py | 28 +-
.../log_parser/artifactbuildercollection.py | 12 +-
treeherder/log_parser/artifactbuilders.py | 2 +-
treeherder/log_parser/failureline.py | 24 +-
.../management/commands/test_parse_log.py | 20 +-
treeherder/log_parser/parsers.py | 4 +-
treeherder/log_parser/tasks.py | 10 +-
treeherder/log_parser/utils.py | 2 +-
treeherder/middleware.py | 12 +-
treeherder/model/data_cycling/cyclers.py | 44 +-
treeherder/model/data_cycling/max_runtime.py | 2 +-
.../model/data_cycling/removal_strategies.py | 82 +--
.../model/data_cycling/signature_remover.py | 4 +-
treeherder/model/data_cycling/utils.py | 4 +-
treeherder/model/error_summary.py | 104 +--
.../commands/backfill_text_log_error_jobs.py | 20 +-
.../commands/cache_failure_history.py | 26 +-
.../model/management/commands/cycle_data.py | 42 +-
.../commands/import_reference_data.py | 82 +--
.../management/commands/load_initial_data.py | 16 +-
treeherder/model/models.py | 310 ++++-----
treeherder/perf/alerts.py | 32 +-
.../auto_perf_sheriffing/backfill_reports.py | 40 +-
.../auto_perf_sheriffing/backfill_tool.py | 4 +-
.../perf/auto_perf_sheriffing/factories.py | 10 +-
.../auto_perf_sheriffing/outcome_checker.py | 4 +-
.../perf/auto_perf_sheriffing/secretary.py | 10 +-
.../perf/auto_perf_sheriffing/sherlock.py | 34 +-
treeherder/perf/email.py | 8 +-
treeherder/perf/exceptions.py | 4 +-
.../management/commands/backfill_perf_jobs.py | 8 +-
.../commands/compute_criteria_formulas.py | 96 +--
.../commands/create_test_perf_data.py | 6 +-
.../management/commands/generate_alerts.py | 22 +-
.../management/commands/import_perf_data.py | 174 ++---
.../perf/management/commands/perf_sheriff.py | 34 +-
.../management/commands/reassign_perf_data.py | 44 +-
.../commands/remove_multi_commit_data.py | 14 +-
.../management/commands/remove_vcs_data.py | 2 +-
.../commands/report_backfill_outcome.py | 4 +-
.../management/commands/test_analyze_perf.py | 70 +-
treeherder/perf/models.py | 154 ++---
.../sheriffing_criteria/bugzilla_formulas.py | 74 +-
.../sheriffing_criteria/criteria_tracking.py | 78 +--
treeherder/perf/tasks.py | 2 +-
treeherder/perfalert/perfalert/__init__.py | 10 +-
treeherder/perfalert/setup.py | 30 +-
treeherder/push_health/builds.py | 6 +-
treeherder/push_health/classification.py | 42 +-
treeherder/push_health/compare.py | 28 +-
treeherder/push_health/filter.py | 6 +-
treeherder/push_health/linting.py | 6 +-
treeherder/push_health/performance.py | 6 +-
treeherder/push_health/tests.py | 142 ++--
treeherder/push_health/usage.py | 44 +-
treeherder/push_health/utils.py | 134 ++--
treeherder/services/elasticsearch/__init__.py | 18 +-
treeherder/services/elasticsearch/mapping.py | 64 +-
treeherder/services/elasticsearch/utils.py | 34 +-
treeherder/services/pulse/consumers.py | 84 +--
treeherder/services/taskcluster.py | 50 +-
treeherder/utils/__init__.py | 2 +-
treeherder/utils/http.py | 16 +-
treeherder/utils/queryset.py | 4 +-
treeherder/utils/taskcluster.py | 4 +-
treeherder/utils/taskcluster_lib_scopes.py | 2 +-
treeherder/webapp/api/bug.py | 8 +-
treeherder/webapp/api/bug_creation.py | 48 +-
treeherder/webapp/api/bugzilla.py | 40 +-
treeherder/webapp/api/changelog.py | 4 +-
treeherder/webapp/api/classification.py | 2 +-
treeherder/webapp/api/csp_report.py | 8 +-
treeherder/webapp/api/exceptions.py | 2 +-
treeherder/webapp/api/groups.py | 68 +-
treeherder/webapp/api/infra_compare.py | 12 +-
treeherder/webapp/api/infra_serializers.py | 10 +-
treeherder/webapp/api/intermittents_view.py | 106 +--
treeherder/webapp/api/investigated_test.py | 16 +-
treeherder/webapp/api/job_log_url.py | 12 +-
treeherder/webapp/api/jobs.py | 320 ++++-----
treeherder/webapp/api/note.py | 24 +-
treeherder/webapp/api/pagination.py | 4 +-
treeherder/webapp/api/perfcompare_utils.py | 50 +-
treeherder/webapp/api/performance_data.py | 638 +++++++++---------
.../webapp/api/performance_serializers.py | 366 +++++-----
treeherder/webapp/api/push.py | 218 +++---
treeherder/webapp/api/refdata.py | 10 +-
treeherder/webapp/api/serializers.py | 252 +++----
treeherder/webapp/api/urls.py | 118 ++--
treeherder/webapp/api/utils.py | 24 +-
treeherder/workers/stats.py | 28 +-
224 files changed, 6078 insertions(+), 6085 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 4292a25c70f..f45e6642c6a 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -22,3 +22,4 @@ repos:
hooks:
- id: black
language_version: python3.9
+ exclude: ^treeherder/.*/migrations
diff --git a/manage.py b/manage.py
index c1b7da31a26..302bd5e4757 100755
--- a/manage.py
+++ b/manage.py
@@ -6,7 +6,7 @@
# Display deprecation warnings, which are hidden by default:
# https://docs.python.org/3.7/library/warnings.html#default-warning-filters
-warnings.simplefilter('default', DeprecationWarning)
+warnings.simplefilter("default", DeprecationWarning)
if __name__ == "__main__":
os.environ["DJANGO_SETTINGS_MODULE"] = "treeherder.config.settings"
diff --git a/misc/compare_pushes.py b/misc/compare_pushes.py
index 80294459778..47853ee9543 100755
--- a/misc/compare_pushes.py
+++ b/misc/compare_pushes.py
@@ -25,7 +25,7 @@ def main(args):
production_client = TreeherderClient(server_url=HOSTS["production"])
# Support comma separated projects
- projects = args.projects.split(',')
+ projects = args.projects.split(",")
for _project in projects:
logger.info("Comparing {} against production.".format(_project))
# Remove properties that are irrelevant for the comparison
diff --git a/pyproject.toml b/pyproject.toml
index 92bfe210504..cfc336d8848 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -24,15 +24,7 @@ mdx_truly_sane_lists = { version = "1.3", optional = true }
[tool.black]
line-length = 100
target-version = ['py39']
-skip-string-normalization = true
include = '\.pyi?$'
-exclude = '''
-/(
- treeherder/model/migrations
- | treeherder/perf/migrations
- | treeherder/changelog/migrations
-)/
-'''
[tool.ruff]
# Same as Black.
diff --git a/tests/autoclassify/utils.py b/tests/autoclassify/utils.py
index 228dd50a218..90c4d669ffd 100644
--- a/tests/autoclassify/utils.py
+++ b/tests/autoclassify/utils.py
@@ -41,11 +41,11 @@ def create_failure_lines(job, failure_line_list, start_line=0):
failure_line = FailureLine(**data)
job_log = JobLog.objects.create(
job=job,
- name='{}{}'.format(base_data.get('test'), job.id),
- url='bar{}'.format(i),
+ name="{}{}".format(base_data.get("test"), job.id),
+ url="bar{}".format(i),
status=1,
)
- print('create jobLog for job id: {}'.format(job.id))
+ print("create jobLog for job id: {}".format(job.id))
failure_line.job_log = job_log
failure_line.save()
failure_lines.append(failure_line)
diff --git a/tests/client/test_perfherder_client.py b/tests/client/test_perfherder_client.py
index 43a9a8f9593..586108ef8fa 100644
--- a/tests/client/test_perfherder_client.py
+++ b/tests/client/test_perfherder_client.py
@@ -9,37 +9,37 @@ class PerfherderClientTest(unittest.TestCase):
@responses.activate
def test_get_performance_signatures(self):
pc = PerfherderClient()
- url = pc._get_endpoint_url(pc.PERFORMANCE_SIGNATURES_ENDPOINT, project='mozilla-central')
+ url = pc._get_endpoint_url(pc.PERFORMANCE_SIGNATURES_ENDPOINT, project="mozilla-central")
content = {
- 'signature1': {'cheezburgers': 1},
- 'signature2': {'hamburgers': 2},
- 'signature3': {'cheezburgers': 2},
+ "signature1": {"cheezburgers": 1},
+ "signature2": {"hamburgers": 2},
+ "signature3": {"cheezburgers": 2},
}
responses.add(responses.GET, url, json=content, status=200)
- sigs = pc.get_performance_signatures('mozilla-central')
+ sigs = pc.get_performance_signatures("mozilla-central")
self.assertEqual(len(sigs), 3)
- self.assertEqual(sigs.get_signature_hashes(), ['signature1', 'signature2', 'signature3'])
- self.assertEqual(sigs.get_property_names(), set(['cheezburgers', 'hamburgers']))
- self.assertEqual(sigs.get_property_values('cheezburgers'), set([1, 2]))
+ self.assertEqual(sigs.get_signature_hashes(), ["signature1", "signature2", "signature3"])
+ self.assertEqual(sigs.get_property_names(), set(["cheezburgers", "hamburgers"]))
+ self.assertEqual(sigs.get_property_values("cheezburgers"), set([1, 2]))
@responses.activate
def test_get_performance_data(self):
pc = PerfherderClient()
- url = '{}?{}'.format(
- pc._get_endpoint_url(pc.PERFORMANCE_DATA_ENDPOINT, project='mozilla-central'),
- 'signatures=signature1&signatures=signature2',
+ url = "{}?{}".format(
+ pc._get_endpoint_url(pc.PERFORMANCE_DATA_ENDPOINT, project="mozilla-central"),
+ "signatures=signature1&signatures=signature2",
)
content = {
- 'signature1': [{'value': 1}, {'value': 2}],
- 'signature2': [{'value': 2}, {'value': 1}],
+ "signature1": [{"value": 1}, {"value": 2}],
+ "signature2": [{"value": 2}, {"value": 1}],
}
responses.add(responses.GET, url, json=content, status=200)
series_list = pc.get_performance_data(
- 'mozilla-central', signatures=['signature1', 'signature2']
+ "mozilla-central", signatures=["signature1", "signature2"]
)
self.assertEqual(len(series_list), 2)
- self.assertEqual(series_list['signature1']['value'], [1, 2])
- self.assertEqual(series_list['signature2']['value'], [2, 1])
+ self.assertEqual(series_list["signature1"]["value"], [1, 2])
+ self.assertEqual(series_list["signature2"]["value"], [2, 1])
diff --git a/tests/client/test_treeherder_client.py b/tests/client/test_treeherder_client.py
index af2f1703138..6d7e7c2d8cc 100644
--- a/tests/client/test_treeherder_client.py
+++ b/tests/client/test_treeherder_client.py
@@ -12,7 +12,7 @@ class TreeherderClientTest(unittest.TestCase):
@responses.activate
def test_get_job(self):
tdc = TreeherderClient()
- url = tdc._get_endpoint_url(tdc.JOBS_ENDPOINT, project='autoland')
+ url = tdc._get_endpoint_url(tdc.JOBS_ENDPOINT, project="autoland")
content = {
"meta": {"count": 3, "repository": "autoland", "offset": 0},
"results": self.JOB_RESULTS,
@@ -26,7 +26,7 @@ def test_get_job(self):
@responses.activate
def test_get_pushes(self):
tdc = TreeherderClient()
- url = tdc._get_endpoint_url(tdc.PUSH_ENDPOINT, project='autoland')
+ url = tdc._get_endpoint_url(tdc.PUSH_ENDPOINT, project="autoland")
content = {
"meta": {"count": 3, "repository": "autoland", "offset": 0},
"results": self.PUSHES,
@@ -38,5 +38,5 @@ def test_get_pushes(self):
self.assertEqual(pushes, self.PUSHES)
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
diff --git a/tests/conftest.py b/tests/conftest.py
index 52927521fb1..7e7b4527df7 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -27,7 +27,7 @@
from treeherder.webapp.api import perfcompare_utils
IS_WINDOWS = "windows" in platform.system().lower()
-SAMPLE_DATA_PATH = join(dirname(__file__), 'sample_data')
+SAMPLE_DATA_PATH = join(dirname(__file__), "sample_data")
def pytest_addoption(parser):
@@ -45,7 +45,7 @@ def pytest_runtest_setup(item):
- Clear the django cache between runs
"""
- if 'slow' in item.keywords and not item.config.getoption("--runslow"):
+ if "slow" in item.keywords and not item.config.getoption("--runslow"):
pytest.skip("need --runslow option to run")
from django.core.cache import cache
@@ -56,9 +56,9 @@ def pytest_runtest_setup(item):
@pytest.fixture
def setup_repository_data(django_db_setup, django_db_blocker):
with django_db_blocker.unblock():
- call_command('loaddata', join(SAMPLE_DATA_PATH, 'repository_group.json'))
+ call_command("loaddata", join(SAMPLE_DATA_PATH, "repository_group.json"))
with django_db_blocker.unblock():
- call_command('loaddata', join(SAMPLE_DATA_PATH, 'repository.json'))
+ call_command("loaddata", join(SAMPLE_DATA_PATH, "repository.json"))
@pytest.fixture(scope="session", autouse=True)
@@ -70,14 +70,14 @@ def block_unmocked_requests():
"""
def mocked_send(*args, **kwargs):
- raise RuntimeError('Tests must mock all HTTP requests!')
+ raise RuntimeError("Tests must mock all HTTP requests!")
# The standard monkeypatch fixture cannot be used with session scope:
# https://github.com/pytest-dev/pytest/issues/363
monkeypatch = MonkeyPatch()
# Monkeypatching here since any higher level would break responses:
# https://github.com/getsentry/responses/blob/0.5.1/responses.py#L295
- monkeypatch.setattr('requests.adapters.HTTPAdapter.send', mocked_send)
+ monkeypatch.setattr("requests.adapters.HTTPAdapter.send", mocked_send)
yield monkeypatch
monkeypatch.undo()
@@ -90,7 +90,7 @@ def sample_data():
return SampleData()
-@pytest.fixture(scope='session')
+@pytest.fixture(scope="session")
def test_base_dir():
return os.path.dirname(__file__)
@@ -100,14 +100,14 @@ def sample_push(sample_data):
return copy.deepcopy(sample_data.push_data)
-@pytest.fixture(name='create_push')
+@pytest.fixture(name="create_push")
def fixture_create_push():
"""Return a function to create a push"""
def create(
repository,
- revision='4c45a777949168d16c03a4cba167678b7ab65f76',
- author='foo@bar.com',
+ revision="4c45a777949168d16c03a4cba167678b7ab65f76",
+ author="foo@bar.com",
time=None,
explicit_id=None,
):
@@ -122,11 +122,11 @@ def create(
return create
-@pytest.fixture(name='create_commit')
+@pytest.fixture(name="create_commit")
def fixture_create_commit():
"""Return a function to create a commit"""
- def create(push, comments='Bug 12345 - This is a message'):
+ def create(push, comments="Bug 12345 - This is a message"):
return th_models.Commit.objects.create(
push=push, revision=push.revision, author=push.author, comments=comments
)
@@ -134,7 +134,7 @@ def create(push, comments='Bug 12345 - This is a message'):
return create
-@pytest.fixture(name='create_signature')
+@pytest.fixture(name="create_signature")
def fixture_create_signature():
"""Returns a function to create a signature"""
@@ -147,7 +147,7 @@ def create(
test,
test_perf_signature,
repository,
- application='',
+ application="",
):
return perf_models.PerformanceSignature.objects.create(
repository=repository,
@@ -167,7 +167,7 @@ def create(
return create
-@pytest.fixture(name='create_perf_datum')
+@pytest.fixture(name="create_perf_datum")
def fixture_create_perf_datum():
"""Returns a function to create a performance datum"""
@@ -258,9 +258,9 @@ def test_issue_tracker(transactional_db):
def test_repository_2(test_repository):
return th_models.Repository.objects.create(
repository_group=test_repository.repository_group,
- name=test_repository.name + '_2',
+ name=test_repository.name + "_2",
dvcs_type=test_repository.dvcs_type,
- url=test_repository.url + '_2',
+ url=test_repository.url + "_2",
codebase=test_repository.codebase,
)
@@ -272,25 +272,25 @@ def test_push(create_push, test_repository):
@pytest.fixture
def test_perfcomp_push(create_push, test_repository):
- return create_push(test_repository, '1377267c6dc1')
+ return create_push(test_repository, "1377267c6dc1")
@pytest.fixture
def test_perfcomp_push_2(create_push, test_repository):
- return create_push(test_repository, '08038e535f58')
+ return create_push(test_repository, "08038e535f58")
@pytest.fixture
def test_linux_platform():
return th_models.MachinePlatform.objects.create(
- os_name='-', platform='linux1804-64-shippable-qr', architecture='-'
+ os_name="-", platform="linux1804-64-shippable-qr", architecture="-"
)
@pytest.fixture
def test_macosx_platform():
return th_models.MachinePlatform.objects.create(
- os_name='', platform='macosx1015-64-shippable-qr', architecture=''
+ os_name="", platform="macosx1015-64-shippable-qr", architecture=""
)
@@ -304,7 +304,7 @@ def test_commit(create_commit, test_push):
return create_commit(test_push)
-@pytest.fixture(name='create_jobs')
+@pytest.fixture(name="create_jobs")
def fixture_create_jobs(test_repository, failure_classifications):
"""Return a function to create jobs"""
@@ -318,8 +318,8 @@ def create(jobs):
@pytest.fixture
def test_job(eleven_job_blobs, create_jobs):
job = eleven_job_blobs[0]
- job['job'].update(
- {'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs', 'taskcluster_retry_id': '0'}
+ job["job"].update(
+ {"taskcluster_task_id": "V3SVuxO8TFy37En_6HcXLs", "taskcluster_retry_id": "0"}
)
return create_jobs([job])[0]
@@ -327,20 +327,20 @@ def test_job(eleven_job_blobs, create_jobs):
@pytest.fixture
def test_two_jobs_tc_metadata(eleven_job_blobs_new_date, create_jobs):
job_1, job_2 = eleven_job_blobs_new_date[0:2]
- job_1['job'].update(
+ job_1["job"].update(
{
- 'status': 'completed',
- 'result': 'testfailed',
- 'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs',
- 'taskcluster_retry_id': '0',
+ "status": "completed",
+ "result": "testfailed",
+ "taskcluster_task_id": "V3SVuxO8TFy37En_6HcXLs",
+ "taskcluster_retry_id": "0",
}
)
- job_2['job'].update(
+ job_2["job"].update(
{
- 'status': 'completed',
- 'result': 'testfailed',
- 'taskcluster_task_id': 'FJtjczXfTAGClIl6wNBo9g',
- 'taskcluster_retry_id': '0',
+ "status": "completed",
+ "result": "testfailed",
+ "taskcluster_task_id": "FJtjczXfTAGClIl6wNBo9g",
+ "taskcluster_retry_id": "0",
}
)
return create_jobs([job_1, job_2])
@@ -365,7 +365,7 @@ def mock_log_parser(monkeypatch):
def task_mock(*args, **kwargs):
pass
- monkeypatch.setattr(tasks, 'parse_logs', task_mock)
+ monkeypatch.setattr(tasks, "parse_logs", task_mock)
@pytest.fixture
@@ -376,20 +376,20 @@ def mockreturn(*arg, **kwargs):
nonlocal mock
return mock
- monkeypatch.setattr(taskcluster, 'notify_client_factory', mockreturn)
+ monkeypatch.setattr(taskcluster, "notify_client_factory", mockreturn)
return mock
@pytest.fixture
def mock_tc_prod_backfill_credentials(monkeypatch):
- monkeypatch.setattr(settings, 'PERF_SHERIFF_BOT_CLIENT_ID', "client_id")
- monkeypatch.setattr(settings, 'PERF_SHERIFF_BOT_ACCESS_TOKEN', "access_token")
+ monkeypatch.setattr(settings, "PERF_SHERIFF_BOT_CLIENT_ID", "client_id")
+ monkeypatch.setattr(settings, "PERF_SHERIFF_BOT_ACCESS_TOKEN", "access_token")
@pytest.fixture
def mock_tc_prod_notify_credentials(monkeypatch):
- monkeypatch.setattr(settings, 'NOTIFY_CLIENT_ID', "client_id")
- monkeypatch.setattr(settings, 'NOTIFY_ACCESS_TOKEN', "access_token")
+ monkeypatch.setattr(settings, "NOTIFY_CLIENT_ID", "client_id")
+ monkeypatch.setattr(settings, "NOTIFY_ACCESS_TOKEN", "access_token")
@pytest.fixture
@@ -423,12 +423,12 @@ def eleven_job_blobs(sample_data, sample_push, test_repository, mock_log_parser)
push_index = 0
# Modify job structure to sync with the push sample data
- if 'sources' in blob:
- del blob['sources']
+ if "sources" in blob:
+ del blob["sources"]
- blob['revision'] = sample_push[push_index]['revision']
- blob['taskcluster_task_id'] = 'V3SVuxO8TFy37En_6HcXL{}'.format(task_id_index)
- blob['taskcluster_retry_id'] = '0'
+ blob["revision"] = sample_push[push_index]["revision"]
+ blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcXL{}".format(task_id_index)
+ blob["taskcluster_retry_id"] = "0"
blobs.append(blob)
push_index += 1
@@ -441,7 +441,7 @@ def eleven_job_blobs_new_date(sample_data, sample_push, test_repository, mock_lo
# make unique revisions
counter = 0
for push in sample_push:
- push['push_timestamp'] = int(time.time()) + counter
+ push["push_timestamp"] = int(time.time()) + counter
counter += 1
store_push_data(test_repository, sample_push)
@@ -459,16 +459,16 @@ def eleven_job_blobs_new_date(sample_data, sample_push, test_repository, mock_lo
push_index = 0
# Modify job structure to sync with the push sample data
- if 'sources' in blob:
- del blob['sources']
-
- blob['revision'] = sample_push[push_index]['revision']
- blob['taskcluster_task_id'] = 'V3SVuxO8TFy37En_6HcX{:0>2}'.format(task_id_index)
- blob['taskcluster_retry_id'] = '0'
- blob['job']['revision'] = sample_push[push_index]['revision']
- blob['job']['submit_timestamp'] = sample_push[push_index]['push_timestamp']
- blob['job']['start_timestamp'] = sample_push[push_index]['push_timestamp'] + 10
- blob['job']['end_timestamp'] = sample_push[push_index]['push_timestamp'] + 1000
+ if "sources" in blob:
+ del blob["sources"]
+
+ blob["revision"] = sample_push[push_index]["revision"]
+ blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcX{:0>2}".format(task_id_index)
+ blob["taskcluster_retry_id"] = "0"
+ blob["job"]["revision"] = sample_push[push_index]["revision"]
+ blob["job"]["submit_timestamp"] = sample_push[push_index]["push_timestamp"]
+ blob["job"]["start_timestamp"] = sample_push[push_index]["push_timestamp"] + 10
+ blob["job"]["end_timestamp"] = sample_push[push_index]["push_timestamp"] + 1000
blobs.append(blob)
push_index += 1
@@ -552,7 +552,7 @@ def failure_lines(test_job):
def failure_line_logs(test_job):
return create_failure_lines(
test_job,
- [(test_line, {'action': 'log', 'test': None}), (test_line, {'subtest': 'subtest2'})],
+ [(test_line, {"action": "log", "test": None}), (test_line, {"subtest": "subtest2"})],
)
@@ -611,7 +611,7 @@ def classified_failures(
@pytest.fixture
def test_user(db):
# a user *without* sheriff/staff permissions
- user = th_models.User.objects.create(username="testuser1", email='user@foo.com', is_staff=False)
+ user = th_models.User.objects.create(username="testuser1", email="user@foo.com", is_staff=False)
return user
@@ -622,7 +622,7 @@ def test_ldap_user(db):
and who does not have `is_staff` permissions.
"""
user = th_models.User.objects.create(
- username="mozilla-ldap/user@foo.com", email='user@foo.com', is_staff=False
+ username="mozilla-ldap/user@foo.com", email="user@foo.com", is_staff=False
)
return user
@@ -631,20 +631,20 @@ def test_ldap_user(db):
def test_sheriff(db):
# a user *with* sheriff/staff permissions
user = th_models.User.objects.create(
- username="testsheriff1", email='sheriff@foo.com', is_staff=True
+ username="testsheriff1", email="sheriff@foo.com", is_staff=True
)
return user
@pytest.fixture
def test_perf_framework(transactional_db):
- return perf_models.PerformanceFramework.objects.create(name='test_talos', enabled=True)
+ return perf_models.PerformanceFramework.objects.create(name="test_talos", enabled=True)
@pytest.fixture
def test_perf_signature(test_repository, test_perf_framework) -> perf_models.PerformanceSignature:
windows_7_platform = th_models.MachinePlatform.objects.create(
- os_name='win', platform='win7', architecture='x86'
+ os_name="win", platform="win7", architecture="x86"
)
return create_perf_signature(test_perf_framework, test_repository, windows_7_platform)
@@ -652,24 +652,24 @@ def test_perf_signature(test_repository, test_perf_framework) -> perf_models.Per
def create_perf_signature(
perf_framework, repository, machine_platform: th_models.MachinePlatform
) -> perf_models.PerformanceSignature:
- option = th_models.Option.objects.create(name='opt')
+ option = th_models.Option.objects.create(name="opt")
option_collection = th_models.OptionCollection.objects.create(
- option_collection_hash='my_option_hash', option=option
+ option_collection_hash="my_option_hash", option=option
)
return perf_models.PerformanceSignature.objects.create(
repository=repository,
- signature_hash=(40 * 't'),
+ signature_hash=(40 * "t"),
framework=perf_framework,
platform=machine_platform,
option_collection=option_collection,
- suite='mysuite',
- test='mytest',
- application='firefox',
+ suite="mysuite",
+ test="mytest",
+ application="firefox",
has_subtests=False,
- tags='warm pageload',
- extra_options='e10s opt',
- measurement_unit='ms',
+ tags="warm pageload",
+ extra_options="e10s opt",
+ measurement_unit="ms",
last_updated=datetime.datetime.now(),
)
@@ -687,16 +687,16 @@ def test_taskcluster_metadata_2(test_job_3) -> th_models.TaskclusterMetadata:
def create_taskcluster_metadata(test_job_2) -> th_models.TaskclusterMetadata:
return th_models.TaskclusterMetadata.objects.create(
job=test_job_2,
- task_id='V3SVuxO8TFy37En_6HcXLp',
- retry_id='0',
+ task_id="V3SVuxO8TFy37En_6HcXLp",
+ retry_id="0",
)
def create_taskcluster_metadata_2(test_job_3) -> th_models.TaskclusterMetadata:
return th_models.TaskclusterMetadata.objects.create(
job=test_job_3,
- task_id='V3SVuxO8TFy37En_6HcXLq',
- retry_id='0',
+ task_id="V3SVuxO8TFy37En_6HcXLq",
+ retry_id="0",
)
@@ -704,12 +704,12 @@ def create_taskcluster_metadata_2(test_job_3) -> th_models.TaskclusterMetadata:
def test_perf_signature_2(test_perf_signature):
return perf_models.PerformanceSignature.objects.create(
repository=test_perf_signature.repository,
- signature_hash=(20 * 't2'),
+ signature_hash=(20 * "t2"),
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
- suite='mysuite2',
- test='mytest2',
+ suite="mysuite2",
+ test="mytest2",
has_subtests=test_perf_signature.has_subtests,
extra_options=test_perf_signature.extra_options,
last_updated=datetime.datetime.now(),
@@ -721,12 +721,12 @@ def test_stalled_data_signature(test_perf_signature):
stalled_data_timestamp = datetime.datetime.now() - datetime.timedelta(days=120)
return perf_models.PerformanceSignature.objects.create(
repository=test_perf_signature.repository,
- signature_hash=(20 * 't3'),
+ signature_hash=(20 * "t3"),
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
- suite='mysuite3',
- test='mytest3',
+ suite="mysuite3",
+ test="mytest3",
has_subtests=test_perf_signature.has_subtests,
extra_options=test_perf_signature.extra_options,
last_updated=stalled_data_timestamp,
@@ -738,7 +738,7 @@ def test_perf_data(test_perf_signature, eleven_jobs_stored):
# for making things easier, ids for jobs
# and push should be the same;
# also, we only need a subset of jobs
- perf_jobs = th_models.Job.objects.filter(pk__in=range(7, 11)).order_by('id').all()
+ perf_jobs = th_models.Job.objects.filter(pk__in=range(7, 11)).order_by("id").all()
for index, job in enumerate(perf_jobs, start=1):
job.push_id = index
@@ -755,7 +755,7 @@ def test_perf_data(test_perf_signature, eleven_jobs_stored):
perf_datum.push.time = job.push.time
perf_datum.push.save()
- return perf_models.PerformanceDatum.objects.order_by('id').all()
+ return perf_models.PerformanceDatum.objects.order_by("id").all()
@pytest.fixture
@@ -767,14 +767,14 @@ def _fetch_json(url, params=None):
bug_list_path = os.path.join(tests_folder, "sample_data", "bug_list.json")
with open(bug_list_path) as f:
last_change_time = (datetime.datetime.utcnow() - datetime.timedelta(days=30)).strftime(
- '%Y-%m-%dT%H:%M:%SZ'
+ "%Y-%m-%dT%H:%M:%SZ"
)
data = json.load(f)
for bug in data["bugs"]:
bug["last_change_time"] = last_change_time
return data
- monkeypatch.setattr(treeherder.etl.bugzilla, 'fetch_json', _fetch_json)
+ monkeypatch.setattr(treeherder.etl.bugzilla, "fetch_json", _fetch_json)
@pytest.fixture
@@ -787,7 +787,7 @@ def mock_deviance(monkeypatch):
def _deviance(*args, **kwargs):
return "OK", 0
- monkeypatch.setattr(moz_measure_noise, 'deviance', _deviance)
+ monkeypatch.setattr(moz_measure_noise, "deviance", _deviance)
@pytest.fixture
@@ -797,7 +797,7 @@ def bugs(mock_bugzilla_api_request):
process = BzApiBugProcess()
process.run()
- return th_models.Bugscache.objects.all().order_by('id')
+ return th_models.Bugscache.objects.all().order_by("id")
@pytest.fixture
@@ -807,11 +807,11 @@ def mock_bugzilla_reopen_request(monkeypatch, request):
def _reopen_request(url, method, headers, json):
import json as json_module
- reopened_bugs = request.config.cache.get('reopened_bugs', {})
+ reopened_bugs = request.config.cache.get("reopened_bugs", {})
reopened_bugs[url] = json_module.dumps(json)
- request.config.cache.set('reopened_bugs', reopened_bugs)
+ request.config.cache.set("reopened_bugs", reopened_bugs)
- monkeypatch.setattr(treeherder.etl.bugzilla, 'reopen_request', _reopen_request)
+ monkeypatch.setattr(treeherder.etl.bugzilla, "reopen_request", _reopen_request)
@pytest.fixture
@@ -839,7 +839,7 @@ def mock_file_bugzilla_map_request(monkeypatch):
def _fetch_data(self, project):
url = (
- 'https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.%s.latest.source.source-bugzilla-info/artifacts/public/components.json'
+ "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.%s.latest.source.source-bugzilla-info/artifacts/public/components.json"
% project
)
files_bugzilla_data = None
@@ -859,7 +859,7 @@ def _fetch_data(self, project):
}
monkeypatch.setattr(
- treeherder.etl.files_bugzilla_map.FilesBugzillaMapProcess, 'fetch_data', _fetch_data
+ treeherder.etl.files_bugzilla_map.FilesBugzillaMapProcess, "fetch_data", _fetch_data
)
@@ -879,11 +879,11 @@ def _fetch_intermittent_bugs(additional_params, limit, duplicate_chain_length):
for bug in bugzilla_data["bugs"]:
bug["last_change_time"] = (
datetime.datetime.now() - datetime.timedelta(20)
- ).isoformat(timespec='seconds') + 'Z'
+ ).isoformat(timespec="seconds") + "Z"
return bugzilla_data["bugs"]
monkeypatch.setattr(
- treeherder.etl.bugzilla, 'fetch_intermittent_bugs', _fetch_intermittent_bugs
+ treeherder.etl.bugzilla, "fetch_intermittent_bugs", _fetch_intermittent_bugs
)
@@ -909,7 +909,7 @@ def mock_get_artifact_list(monkeypatch):
def _mock_get(url, params=None):
return MockResponse()
- monkeypatch.setattr(treeherder.webapp.api.utils, 'fetch_json', _mock_get)
+ monkeypatch.setattr(treeherder.webapp.api.utils, "fetch_json", _mock_get)
@pytest.fixture
@@ -919,7 +919,7 @@ def mock_cache(monkeypatch):
def mockreturn_cache(*args, **kwargs):
return {"task_id": "some_id", "retry_id": 0}
- monkeypatch.setattr(django.core.cache.cache, 'get', mockreturn_cache)
+ monkeypatch.setattr(django.core.cache.cache, "get", mockreturn_cache)
@pytest.fixture
@@ -935,17 +935,17 @@ def text_log_error_lines(test_job, failure_lines):
@pytest.fixture
def test_perf_tag():
- return perf_models.PerformanceTag.objects.create(name='first_tag')
+ return perf_models.PerformanceTag.objects.create(name="first_tag")
@pytest.fixture
def test_perf_tag_2():
- return perf_models.PerformanceTag.objects.create(name='second_tag')
+ return perf_models.PerformanceTag.objects.create(name="second_tag")
@pytest.fixture
def test_perf_alert_summary(test_repository, push_stored, test_perf_framework, test_issue_tracker):
- test_perf_tag = perf_models.PerformanceTag.objects.create(name='harness')
+ test_perf_tag = perf_models.PerformanceTag.objects.create(name="harness")
performance_alert_summary = perf_models.PerformanceAlertSummary.objects.create(
repository=test_repository,
@@ -1074,9 +1074,9 @@ class RefdataHolder:
r = RefdataHolder()
- r.option = th_models.Option.objects.create(name='my_option')
+ r.option = th_models.Option.objects.create(name="my_option")
r.option_collection = th_models.OptionCollection.objects.create(
- option_collection_hash='my_option_hash', option=r.option
+ option_collection_hash="my_option_hash", option=r.option
)
r.option_collection_hash = r.option_collection.option_collection_hash
r.machine_platform = th_models.MachinePlatform.objects.create(
@@ -1085,13 +1085,13 @@ class RefdataHolder:
r.build_platform = th_models.BuildPlatform.objects.create(
os_name="my_os", platform="my_platform", architecture="x86"
)
- r.machine = th_models.Machine.objects.create(name='mymachine')
- r.job_group = th_models.JobGroup.objects.create(symbol='S', name='myjobgroup')
- r.job_type = th_models.JobType.objects.create(symbol='j', name='myjob')
- r.product = th_models.Product.objects.create(name='myproduct')
+ r.machine = th_models.Machine.objects.create(name="mymachine")
+ r.job_group = th_models.JobGroup.objects.create(symbol="S", name="myjobgroup")
+ r.job_type = th_models.JobType.objects.create(symbol="j", name="myjob")
+ r.product = th_models.Product.objects.create(name="myproduct")
r.signature = th_models.ReferenceDataSignatures.objects.create(
- name='myreferencedatasignaeture',
- signature='1234',
+ name="myreferencedatasignaeture",
+ signature="1234",
build_os_name=r.build_platform.os_name,
build_platform=r.build_platform.platform,
build_architecture=r.build_platform.architecture,
@@ -1103,7 +1103,7 @@ class RefdataHolder:
job_type_name=r.job_type.name,
job_type_symbol=r.job_type.symbol,
option_collection_hash=r.option_collection_hash,
- build_system_type='buildbot',
+ build_system_type="buildbot",
repository=test_repository.name,
first_submission_timestamp=0,
)
@@ -1113,37 +1113,37 @@ class RefdataHolder:
@pytest.fixture
def bug_data(eleven_jobs_stored, test_repository, test_push, bugs):
- jobs = th_models.Job.objects.all().order_by('id')
+ jobs = th_models.Job.objects.all().order_by("id")
bug_id = bugs[0].id
job_id = jobs[0].id
th_models.BugJobMap.create(job_id=job_id, bug_id=bug_id)
- query_string = '?startday=2012-05-09&endday=2018-05-10&tree={}'.format(test_repository.name)
+ query_string = "?startday=2012-05-09&endday=2018-05-10&tree={}".format(test_repository.name)
return {
- 'tree': test_repository.name,
- 'option': th_models.Option.objects.first(),
- 'bug_id': bug_id,
- 'job': jobs[0],
- 'jobs': jobs,
- 'query_string': query_string,
+ "tree": test_repository.name,
+ "option": th_models.Option.objects.first(),
+ "bug_id": bug_id,
+ "job": jobs[0],
+ "jobs": jobs,
+ "query_string": query_string,
}
@pytest.fixture
def test_run_data(bug_data):
pushes = th_models.Push.objects.all()
- time = pushes[0].time.strftime('%Y-%m-%d')
+ time = pushes[0].time.strftime("%Y-%m-%d")
test_runs = 0
for push in list(pushes):
- if push.time.strftime('%Y-%m-%d') == time:
+ if push.time.strftime("%Y-%m-%d") == time:
test_runs += 1
- return {'test_runs': test_runs, 'push_time': time}
+ return {"test_runs": test_runs, "push_time": time}
@pytest.fixture
def group_data(transactional_db, eleven_job_blobs, create_jobs):
- query_string = '?manifest=/test&date=2022-10-01'
+ query_string = "?manifest=/test&date=2022-10-01"
jt = []
jt.append(
@@ -1159,11 +1159,11 @@ def group_data(transactional_db, eleven_job_blobs, create_jobs):
g1 = th_models.Group.objects.create(name="/test")
for i in range(3):
job = eleven_job_blobs[i]
- job['job'].update(
+ job["job"].update(
{
- 'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXL%s' % i,
- 'taskcluster_retry_id': '0',
- 'name': jt[i].name,
+ "taskcluster_task_id": "V3SVuxO8TFy37En_6HcXL%s" % i,
+ "taskcluster_retry_id": "0",
+ "name": jt[i].name,
}
)
j = create_jobs([job])[0]
@@ -1174,17 +1174,17 @@ def group_data(transactional_db, eleven_job_blobs, create_jobs):
th_models.GroupStatus.objects.create(status=1, duration=1, job_log=job_log, group=g1)
return {
- 'date': j.submit_time,
- 'manifest': '/test',
- 'query_string': query_string,
- 'expected': {
- 'job_type_names': [
- 'test-windows10-64-2004-qr/opt-mochitest-plain',
- 'test-windows10-64-2004-qr/opt-mochitest-plain-swr',
+ "date": j.submit_time,
+ "manifest": "/test",
+ "query_string": query_string,
+ "expected": {
+ "job_type_names": [
+ "test-windows10-64-2004-qr/opt-mochitest-plain",
+ "test-windows10-64-2004-qr/opt-mochitest-plain-swr",
],
- 'manifests': [
+ "manifests": [
{
- '/test': [[0, "passed", 1, 2], [1, "passed", 1, 1]],
+ "/test": [[0, "passed", 1, 2], [1, "passed", 1, 1]],
}
],
},
@@ -1210,10 +1210,10 @@ def generate_enough_perf_datum(test_repository, test_perf_signature):
@pytest.fixture
def sample_option_collections(transactional_db):
- option1 = th_models.Option.objects.create(name='opt1')
- option2 = th_models.Option.objects.create(name='opt2')
- th_models.OptionCollection.objects.create(option_collection_hash='option_hash1', option=option1)
- th_models.OptionCollection.objects.create(option_collection_hash='option_hash2', option=option2)
+ option1 = th_models.Option.objects.create(name="opt1")
+ option2 = th_models.Option.objects.create(name="opt2")
+ th_models.OptionCollection.objects.create(option_collection_hash="option_hash1", option=option1)
+ th_models.OptionCollection.objects.create(option_collection_hash="option_hash2", option=option2)
@pytest.fixture
@@ -1270,7 +1270,7 @@ def __init__(self, *prior_dirs):
def __call__(self, fixture_filename):
fixture_path = join(*self._prior_dirs, fixture_filename)
- with open(fixture_path, 'r') as f:
+ with open(fixture_path, "r") as f:
return json.load(f)
diff --git a/tests/e2e/conftest.py b/tests/e2e/conftest.py
index 8371f8ed52d..1c9bbe984b4 100644
--- a/tests/e2e/conftest.py
+++ b/tests/e2e/conftest.py
@@ -35,7 +35,7 @@ def pending_jobs_stored(test_repository, failure_classifications, pending_job, p
stores a list of buildapi pending jobs into the jobs store
"""
pending_job.update(push_stored[0])
- pending_job.update({'project': test_repository.name})
+ pending_job.update({"project": test_repository.name})
store_job_data(test_repository, [pending_job])
@@ -45,7 +45,7 @@ def running_jobs_stored(test_repository, failure_classifications, running_job, p
stores a list of buildapi running jobs
"""
running_job.update(push_stored[0])
- running_job.update({'project': test_repository.name})
+ running_job.update({"project": test_repository.name})
store_job_data(test_repository, [running_job])
@@ -54,6 +54,6 @@ def completed_jobs_stored(test_repository, failure_classifications, completed_jo
"""
stores a list of buildapi completed jobs
"""
- completed_job['revision'] = push_stored[0]['revision']
- completed_job.update({'project': test_repository.name})
+ completed_job["revision"] = push_stored[0]["revision"]
+ completed_job.update({"project": test_repository.name})
store_job_data(test_repository, [completed_job])
diff --git a/tests/e2e/test_job_ingestion.py b/tests/e2e/test_job_ingestion.py
index d9841d03b7a..7dff6573c11 100644
--- a/tests/e2e/test_job_ingestion.py
+++ b/tests/e2e/test_job_ingestion.py
@@ -24,23 +24,23 @@ def test_store_job_with_unparsed_log(
# create a wrapper around get_error_summary that records whether
# it's been called
- mock_get_error_summary = MagicMock(name='get_error_summary', wraps=get_error_summary)
+ mock_get_error_summary = MagicMock(name="get_error_summary", wraps=get_error_summary)
import treeherder.model.error_summary
- monkeypatch.setattr(treeherder.model.error_summary, 'get_error_summary', mock_get_error_summary)
+ monkeypatch.setattr(treeherder.model.error_summary, "get_error_summary", mock_get_error_summary)
log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
errorsummary = add_log_response("mochitest-browser-chrome_errorsummary.log")
- job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
+ job_guid = "d22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33"
job_data = {
- 'project': test_repository.name,
- 'revision': push_stored[0]['revision'],
- 'job': {
- 'job_guid': job_guid,
- 'state': 'completed',
- 'log_references': [
- {'url': log_url, 'name': 'live_backing_log', 'parse_status': 'pending'},
- {'url': errorsummary, 'name': 'mochi_errorsummary.log', 'parse_status': 'pending'},
+ "project": test_repository.name,
+ "revision": push_stored[0]["revision"],
+ "job": {
+ "job_guid": job_guid,
+ "state": "completed",
+ "log_references": [
+ {"url": log_url, "name": "live_backing_log", "parse_status": "pending"},
+ {"url": errorsummary, "name": "mochi_errorsummary.log", "parse_status": "pending"},
],
},
}
@@ -58,13 +58,13 @@ def test_store_job_with_unparsed_log(
def test_store_job_pending_to_completed_with_unparsed_log(
test_repository, push_stored, failure_classifications, activate_responses
):
- job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
+ job_guid = "d22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33"
# the first time, submit it as running (with no logs)
job_data = {
- 'project': test_repository.name,
- 'revision': push_stored[0]['revision'],
- 'job': {'job_guid': job_guid, 'state': 'running'},
+ "project": test_repository.name,
+ "revision": push_stored[0]["revision"],
+ "job": {"job_guid": job_guid, "state": "running"},
}
store_job_data(test_repository, [job_data])
# should have no text log errors or bug suggestions
@@ -74,13 +74,13 @@ def test_store_job_pending_to_completed_with_unparsed_log(
# the second time, post a log that will get parsed
log_url = add_log_response("mozilla-central-macosx64-debug-bm65-build1-build15.txt.gz")
job_data = {
- 'project': test_repository.name,
- 'revision': push_stored[0]['revision'],
- 'job': {
- 'job_guid': job_guid,
- 'state': 'completed',
- 'log_references': [
- {'url': log_url, 'name': 'live_backing_log', 'parse_status': 'pending'}
+ "project": test_repository.name,
+ "revision": push_stored[0]["revision"],
+ "job": {
+ "job_guid": job_guid,
+ "state": "completed",
+ "log_references": [
+ {"url": log_url, "name": "live_backing_log", "parse_status": "pending"}
],
},
}
@@ -93,11 +93,11 @@ def test_store_job_pending_to_completed_with_unparsed_log(
def test_store_job_with_tier(test_repository, failure_classifications, push_stored):
"""test submitting a job with tier specified"""
- job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
+ job_guid = "d22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33"
job_data = {
- 'project': test_repository.name,
- 'revision': push_stored[0]['revision'],
- 'job': {'job_guid': job_guid, 'state': 'completed', 'tier': 3},
+ "project": test_repository.name,
+ "revision": push_stored[0]["revision"],
+ "job": {"job_guid": job_guid, "state": "completed", "tier": 3},
}
store_job_data(test_repository, [job_data])
@@ -108,11 +108,11 @@ def test_store_job_with_tier(test_repository, failure_classifications, push_stor
def test_store_job_with_default_tier(test_repository, failure_classifications, push_stored):
"""test submitting a job with no tier specified gets default"""
- job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
+ job_guid = "d22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33"
job_data = {
- 'project': test_repository.name,
- 'revision': push_stored[0]['revision'],
- 'job': {'job_guid': job_guid, 'state': 'completed'},
+ "project": test_repository.name,
+ "revision": push_stored[0]["revision"],
+ "job": {"job_guid": job_guid, "state": "completed"},
}
store_job_data(test_repository, [job_data])
diff --git a/tests/e2e/test_jobs_loaded.py b/tests/e2e/test_jobs_loaded.py
index aaeb8d75871..3e26b109bfc 100644
--- a/tests/e2e/test_jobs_loaded.py
+++ b/tests/e2e/test_jobs_loaded.py
@@ -6,9 +6,9 @@ def test_pending_job_available(test_repository, pending_jobs_stored, client):
assert resp.status_code == 200
jobs = resp.json()
- assert len(jobs['results']) == 1
+ assert len(jobs["results"]) == 1
- assert jobs['results'][0]['state'] == 'pending'
+ assert jobs["results"][0]["state"] == "pending"
def test_running_job_available(test_repository, running_jobs_stored, client):
@@ -16,9 +16,9 @@ def test_running_job_available(test_repository, running_jobs_stored, client):
assert resp.status_code == 200
jobs = resp.json()
- assert len(jobs['results']) == 1
+ assert len(jobs["results"]) == 1
- assert jobs['results'][0]['state'] == 'running'
+ assert jobs["results"][0]["state"] == "running"
def test_completed_job_available(test_repository, completed_jobs_stored, client):
@@ -26,8 +26,8 @@ def test_completed_job_available(test_repository, completed_jobs_stored, client)
assert resp.status_code == 200
jobs = resp.json()
- assert len(jobs['results']) == 1
- assert jobs['results'][0]['state'] == 'completed'
+ assert len(jobs["results"]) == 1
+ assert jobs["results"][0]["state"] == "completed"
def test_pending_stored_to_running_loaded(
@@ -42,8 +42,8 @@ def test_pending_stored_to_running_loaded(
assert resp.status_code == 200
jobs = resp.json()
- assert len(jobs['results']) == 1
- assert jobs['results'][0]['state'] == 'running'
+ assert len(jobs["results"]) == 1
+ assert jobs["results"][0]["state"] == "running"
def test_finished_job_to_running(
@@ -56,8 +56,8 @@ def test_finished_job_to_running(
assert resp.status_code == 200
jobs = resp.json()
- assert len(jobs['results']) == 1
- assert jobs['results'][0]['state'] == 'completed'
+ assert len(jobs["results"]) == 1
+ assert jobs["results"][0]["state"] == "completed"
def test_running_job_to_pending(test_repository, running_jobs_stored, pending_jobs_stored, client):
@@ -69,5 +69,5 @@ def test_running_job_to_pending(test_repository, running_jobs_stored, pending_jo
assert resp.status_code == 200
jobs = resp.json()
- assert len(jobs['results']) == 1
- assert jobs['results'][0]['state'] == 'running'
+ assert len(jobs["results"]) == 1
+ assert jobs["results"][0]["state"] == "running"
diff --git a/tests/etl/conftest.py b/tests/etl/conftest.py
index a82d705ca46..764965d7fd7 100644
--- a/tests/etl/conftest.py
+++ b/tests/etl/conftest.py
@@ -10,8 +10,8 @@
def perf_push(test_repository):
return Push.objects.create(
repository=test_repository,
- revision='1234abcd',
- author='foo@bar.com',
+ revision="1234abcd",
+ author="foo@bar.com",
time=datetime.datetime.now(),
)
@@ -19,5 +19,5 @@ def perf_push(test_repository):
@pytest.fixture
def perf_job(perf_push, failure_classifications, generic_reference_data):
return create_generic_job(
- 'myfunguid', perf_push.repository, perf_push.id, generic_reference_data
+ "myfunguid", perf_push.repository, perf_push.id, generic_reference_data
)
diff --git a/tests/etl/test_bugzilla.py b/tests/etl/test_bugzilla.py
index c879548d137..3ef3f5ec7e2 100644
--- a/tests/etl/test_bugzilla.py
+++ b/tests/etl/test_bugzilla.py
@@ -34,7 +34,7 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
incomplete_bugs[0],
incomplete_bugs[2],
]:
- submit_obj = {u"job_id": test_job.id, u"bug_id": bug.id, u"type": u"manual"}
+ submit_obj = {"job_id": test_job.id, "bug_id": bug.id, "type": "manual"}
client.post(
reverse("bug-job-map-list", kwargs={"project": test_job.repository.name}),
@@ -44,12 +44,12 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
process = BzApiBugProcess()
process.run()
- reopened_bugs = request.config.cache.get('reopened_bugs', None)
+ reopened_bugs = request.config.cache.get("reopened_bugs", None)
import json
EXPECTED_REOPEN_ATTEMPTS = {
- 'https://thisisnotbugzilla.org/rest/bug/202': json.dumps(
+ "https://thisisnotbugzilla.org/rest/bug/202": json.dumps(
{
"status": "REOPENED",
"comment": {
@@ -58,7 +58,7 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
"comment_tags": "treeherder",
}
),
- 'https://thisisnotbugzilla.org/rest/bug/404': json.dumps(
+ "https://thisisnotbugzilla.org/rest/bug/404": json.dumps(
{
"status": "REOPENED",
"comment": {
diff --git a/tests/etl/test_classification_loader.py b/tests/etl/test_classification_loader.py
index 3538172f9d4..19376077887 100644
--- a/tests/etl/test_classification_loader.py
+++ b/tests/etl/test_classification_loader.py
@@ -21,80 +21,80 @@
)
DEFAULT_GTD_CONFIG = {
- 'json': {
- 'routes': ['index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA']
+ "json": {
+ "routes": ["index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA"]
},
- 'content_type': 'application/json',
- 'status': 200,
+ "content_type": "application/json",
+ "status": 200,
}
DEFAULT_DA_CONFIG = {
- 'json': {
- 'push': {
- 'id': 'autoland/c73bcc465e0c2bce7debb0a86277e2dcb27444e4',
- 'classification': 'GOOD',
+ "json": {
+ "push": {
+ "id": "autoland/c73bcc465e0c2bce7debb0a86277e2dcb27444e4",
+ "classification": "GOOD",
},
- 'failures': {
- 'real': {},
- 'intermittent': {
- 'testing/web-platform/tests/webdriver/tests/element_click': [],
- 'devtools/client/framework/test/browser.ini': [
+ "failures": {
+ "real": {},
+ "intermittent": {
+ "testing/web-platform/tests/webdriver/tests/element_click": [],
+ "devtools/client/framework/test/browser.ini": [
{
- 'task_id': 'V3SVuxO8TFy37En_6HcXLs',
- 'label': 'test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-1',
+ "task_id": "V3SVuxO8TFy37En_6HcXLs",
+ "label": "test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-1",
# autoclassify is True, there is a cached bug test1.js => autoclassification with one associated bug
- 'autoclassify': True,
- 'tests': ['devtools/client/framework/test/test1.js'],
+ "autoclassify": True,
+ "tests": ["devtools/client/framework/test/test1.js"],
},
{
- 'task_id': 'FJtjczXfTAGClIl6wNBo9g',
- 'label': 'test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-2',
+ "task_id": "FJtjczXfTAGClIl6wNBo9g",
+ "label": "test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-2",
# autoclassify is True, there are two cached bugs test1.js and test2.js => autoclassification with two associated bugs
- 'autoclassify': True,
- 'tests': [
- 'devtools/client/framework/test/test1.js',
- 'devtools/client/framework/test/test2.js',
+ "autoclassify": True,
+ "tests": [
+ "devtools/client/framework/test/test1.js",
+ "devtools/client/framework/test/test2.js",
],
},
],
- 'devtools/client/framework/test2/browser.ini': [
+ "devtools/client/framework/test2/browser.ini": [
{
- 'task_id': 'RutlNkofzrbTnbauRSTJWc',
- 'label': 'test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-3',
+ "task_id": "RutlNkofzrbTnbauRSTJWc",
+ "label": "test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-3",
# autoclassify is False, there is a cached bug for test1.js => no autoclassification
- 'autoclassify': False,
- 'tests': ['devtools/client/framework/test/test1.js'],
+ "autoclassify": False,
+ "tests": ["devtools/client/framework/test/test1.js"],
},
{
- 'task_id': 'HTZJyyQLalgtOkbwDBxChF',
- 'label': 'test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-4',
+ "task_id": "HTZJyyQLalgtOkbwDBxChF",
+ "label": "test-linux1804-64-qr/opt-mochitest-devtools-chrome-dt-no-eft-nofis-e10s-4",
# Even if autoclassify is True, there is no cached bug for test3.js => no autoclassification
- 'autoclassify': True,
- 'tests': ['devtools/client/framework/test/test3.js'],
+ "autoclassify": True,
+ "tests": ["devtools/client/framework/test/test3.js"],
},
],
},
- 'unknown': {},
+ "unknown": {},
},
},
- 'content_type': 'application/json',
- 'status': 200,
+ "content_type": "application/json",
+ "status": 200,
}
@pytest.fixture
def autoland_repository():
- group = RepositoryGroup.objects.create(name='development')
+ group = RepositoryGroup.objects.create(name="development")
return Repository.objects.create(
- dvcs_type='hg',
- name='autoland',
- url='https://hg.mozilla.org/integration/autoland',
- active_status='active',
- codebase='gecko',
+ dvcs_type="hg",
+ name="autoland",
+ url="https://hg.mozilla.org/integration/autoland",
+ active_status="active",
+ codebase="gecko",
repository_group=group,
performance_alerts_enabled=True,
expire_performance_data=False,
- tc_root_url='https://firefox-ci-tc.services.mozilla.com',
+ tc_root_url="https://firefox-ci-tc.services.mozilla.com",
)
@@ -102,8 +102,8 @@ def autoland_repository():
def autoland_push(autoland_repository):
return Push.objects.create(
repository=autoland_repository,
- revision='A35mWTRuQmyj88yMnIF0fA',
- author='foo@bar.com',
+ revision="A35mWTRuQmyj88yMnIF0fA",
+ author="foo@bar.com",
time=datetime.datetime.now(),
)
@@ -114,39 +114,39 @@ def populate_bugscache():
[
Bugscache(
id=1234567,
- status='NEW',
- summary='intermittent devtools/client/framework/test/test1.js | single tracking bug',
- modified='2014-01-01 00:00:00',
+ status="NEW",
+ summary="intermittent devtools/client/framework/test/test1.js | single tracking bug",
+ modified="2014-01-01 00:00:00",
),
Bugscache(
id=2345678,
- status='NEW',
- summary='intermittent devtools/client/framework/test/test2.js | single tracking bug',
- modified='2014-01-01 00:00:00',
+ status="NEW",
+ summary="intermittent devtools/client/framework/test/test2.js | single tracking bug",
+ modified="2014-01-01 00:00:00",
),
]
)
@pytest.mark.parametrize(
- 'mode, route',
+ "mode, route",
[
- ('production', 'completely bad route'),
- ('production', 'index.project.mozci.classification..revision.A35mWTRuQmyj88yMnIF0fA'),
- ('production', 'index.project.mozci.classification.autoland.revision.'),
+ ("production", "completely bad route"),
+ ("production", "index.project.mozci.classification..revision.A35mWTRuQmyj88yMnIF0fA"),
+ ("production", "index.project.mozci.classification.autoland.revision."),
(
- 'production',
- 'index.project.mozci.classification.autoland.revision.-35mW@RuQ__j88yénIF0f-',
+ "production",
+ "index.project.mozci.classification.autoland.revision.-35mW@RuQ__j88yénIF0f-",
),
(
- 'production',
- 'index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
+ "production",
+ "index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
),
- ('testing', 'index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA'),
+ ("testing", "index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA"),
],
)
def test_get_push_wrong_route(mode, route, monkeypatch):
- monkeypatch.setenv('PULSE_MOZCI_ENVIRONMENT', mode)
+ monkeypatch.setenv("PULSE_MOZCI_ENVIRONMENT", mode)
with pytest.raises(AttributeError):
ClassificationLoader().get_push(route)
@@ -154,66 +154,66 @@ def test_get_push_wrong_route(mode, route, monkeypatch):
@pytest.mark.django_db
@pytest.mark.parametrize(
- 'mode, route',
+ "mode, route",
[
(
- 'production',
- 'index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
+ "production",
+ "index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
),
(
- 'testing',
- 'index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
+ "testing",
+ "index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
),
],
)
def test_get_push_unsupported_project(mode, route, monkeypatch):
- monkeypatch.setenv('PULSE_MOZCI_ENVIRONMENT', mode)
+ monkeypatch.setenv("PULSE_MOZCI_ENVIRONMENT", mode)
with pytest.raises(Repository.DoesNotExist) as e:
ClassificationLoader().get_push(route)
- assert str(e.value) == 'Repository matching query does not exist.'
+ assert str(e.value) == "Repository matching query does not exist."
@pytest.mark.django_db
@pytest.mark.parametrize(
- 'mode, route',
+ "mode, route",
[
(
- 'production',
- 'index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
+ "production",
+ "index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
),
(
- 'testing',
- 'index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
+ "testing",
+ "index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
),
],
)
def test_get_push_unsupported_revision(mode, route, autoland_repository, monkeypatch):
- monkeypatch.setenv('PULSE_MOZCI_ENVIRONMENT', mode)
+ monkeypatch.setenv("PULSE_MOZCI_ENVIRONMENT", mode)
with pytest.raises(Push.DoesNotExist) as e:
ClassificationLoader().get_push(route)
- assert str(e.value) == 'Push matching query does not exist.'
+ assert str(e.value) == "Push matching query does not exist."
@pytest.mark.django_db
@pytest.mark.parametrize(
- 'mode, route',
+ "mode, route",
[
(
- 'production',
- 'index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
+ "production",
+ "index.project.mozci.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
),
(
- 'testing',
- 'index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA',
+ "testing",
+ "index.project.mozci.testing.classification.autoland.revision.A35mWTRuQmyj88yMnIF0fA",
),
],
)
def test_get_push(mode, route, autoland_push, monkeypatch):
- monkeypatch.setenv('PULSE_MOZCI_ENVIRONMENT', mode)
+ monkeypatch.setenv("PULSE_MOZCI_ENVIRONMENT", mode)
assert ClassificationLoader().get_push(route) == autoland_push
@@ -226,51 +226,51 @@ def update_dict(dict, update):
@responses.activate
@pytest.mark.django_db
@pytest.mark.parametrize(
- 'error_type, error_message, get_task_definition_config, get_push_error, download_artifact_config',
+ "error_type, error_message, get_task_definition_config, get_push_error, download_artifact_config",
[
- [HTTPError, '', {'status': 500}, None, DEFAULT_DA_CONFIG],
+ [HTTPError, "", {"status": 500}, None, DEFAULT_DA_CONFIG],
[
AssertionError,
- 'A route containing the push project and revision is needed to save the mozci classification',
- update_dict({**DEFAULT_GTD_CONFIG}, {'json': {}}),
+ "A route containing the push project and revision is needed to save the mozci classification",
+ update_dict({**DEFAULT_GTD_CONFIG}, {"json": {}}),
None,
DEFAULT_DA_CONFIG,
],
[
AssertionError,
- 'A route containing the push project and revision is needed to save the mozci classification',
- update_dict({**DEFAULT_GTD_CONFIG}, {'json': {'routes': []}}),
+ "A route containing the push project and revision is needed to save the mozci classification",
+ update_dict({**DEFAULT_GTD_CONFIG}, {"json": {"routes": []}}),
None,
DEFAULT_DA_CONFIG,
],
[
AttributeError,
None,
- update_dict({**DEFAULT_GTD_CONFIG}, {'json': {'routes': ['bad route']}}),
+ update_dict({**DEFAULT_GTD_CONFIG}, {"json": {"routes": ["bad route"]}}),
None,
DEFAULT_DA_CONFIG,
],
[None, None, DEFAULT_GTD_CONFIG, Repository.DoesNotExist, DEFAULT_DA_CONFIG],
[
Push.DoesNotExist,
- 'Push matching query does not exist.',
+ "Push matching query does not exist.",
DEFAULT_GTD_CONFIG,
Push.DoesNotExist,
DEFAULT_DA_CONFIG,
],
- [HTTPError, '', DEFAULT_GTD_CONFIG, None, {'status': 500}],
+ [HTTPError, "", DEFAULT_GTD_CONFIG, None, {"status": 500}],
[
AssertionError,
- 'Classification result should be a value in BAD, GOOD, UNKNOWN',
+ "Classification result should be a value in BAD, GOOD, UNKNOWN",
DEFAULT_GTD_CONFIG,
None,
update_dict(
{**DEFAULT_DA_CONFIG},
{
- 'json': {
- 'push': {
- 'id': 'autoland/c73bcc465e0c2bce7debb0a86277e2dcb27444e4',
- 'classification': 'WRONG',
+ "json": {
+ "push": {
+ "id": "autoland/c73bcc465e0c2bce7debb0a86277e2dcb27444e4",
+ "classification": "WRONG",
}
}
},
@@ -287,17 +287,17 @@ def test_process_handle_errors(
get_push_error,
download_artifact_config,
):
- root_url = 'https://community-tc.services.mozilla.com'
- task_id = 'A35mWTRuQmyj88yMnIF0fA'
+ root_url = "https://community-tc.services.mozilla.com"
+ task_id = "A35mWTRuQmyj88yMnIF0fA"
responses.add(
responses.GET,
- f'{root_url}/api/queue/v1/task/{task_id}',
+ f"{root_url}/api/queue/v1/task/{task_id}",
**get_task_definition_config,
)
responses.add(
responses.GET,
- f'{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json',
+ f"{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json",
**download_artifact_config,
)
@@ -306,17 +306,17 @@ def test_process_handle_errors(
def mock_get_push(x, y):
raise get_push_error(error_message)
- monkeypatch.setattr(ClassificationLoader, 'get_push', mock_get_push)
+ monkeypatch.setattr(ClassificationLoader, "get_push", mock_get_push)
assert MozciClassification.objects.count() == 0
if error_type:
with pytest.raises(error_type) as e:
- ClassificationLoader().process({'status': {'taskId': task_id}}, root_url)
+ ClassificationLoader().process({"status": {"taskId": task_id}}, root_url)
if error_message:
assert str(e.value) == error_message
else:
- ClassificationLoader().process({'status': {'taskId': task_id}}, root_url)
+ ClassificationLoader().process({"status": {"taskId": task_id}}, root_url)
assert MozciClassification.objects.count() == 0
@@ -324,28 +324,28 @@ def mock_get_push(x, y):
@responses.activate
@pytest.mark.django_db
def test_process_missing_failureclassification(autoland_push, test_two_jobs_tc_metadata):
- root_url = 'https://community-tc.services.mozilla.com'
- task_id = 'A35mWTRuQmyj88yMnIF0fA'
+ root_url = "https://community-tc.services.mozilla.com"
+ task_id = "A35mWTRuQmyj88yMnIF0fA"
- responses.add(responses.GET, f'{root_url}/api/queue/v1/task/{task_id}', **DEFAULT_GTD_CONFIG)
+ responses.add(responses.GET, f"{root_url}/api/queue/v1/task/{task_id}", **DEFAULT_GTD_CONFIG)
responses.add(
responses.GET,
- f'{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json',
+ f"{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json",
**DEFAULT_DA_CONFIG,
)
assert MozciClassification.objects.count() == 0
first_job, second_job = test_two_jobs_tc_metadata
- assert first_job.failure_classification.name == 'not classified'
- assert second_job.failure_classification.name == 'not classified'
+ assert first_job.failure_classification.name == "not classified"
+ assert second_job.failure_classification.name == "not classified"
assert JobNote.objects.count() == 0
assert BugJobMap.objects.count() == 0
- FailureClassification.objects.filter(name='autoclassified intermittent').delete()
+ FailureClassification.objects.filter(name="autoclassified intermittent").delete()
with pytest.raises(FailureClassification.DoesNotExist) as e:
- ClassificationLoader().process({'status': {'taskId': task_id}}, root_url)
+ ClassificationLoader().process({"status": {"taskId": task_id}}, root_url)
- assert str(e.value) == 'FailureClassification matching query does not exist.'
+ assert str(e.value) == "FailureClassification matching query does not exist."
assert MozciClassification.objects.count() == 1
classification = MozciClassification.objects.first()
@@ -356,8 +356,8 @@ def test_process_missing_failureclassification(autoland_push, test_two_jobs_tc_m
# Did not autoclassify since the requested FailureClassification was not found
first_job.refresh_from_db()
second_job.refresh_from_db()
- assert first_job.failure_classification.name == 'not classified'
- assert second_job.failure_classification.name == 'not classified'
+ assert first_job.failure_classification.name == "not classified"
+ assert second_job.failure_classification.name == "not classified"
assert JobNote.objects.count() == 0
assert BugJobMap.objects.count() == 0
@@ -365,19 +365,19 @@ def test_process_missing_failureclassification(autoland_push, test_two_jobs_tc_m
@responses.activate
@pytest.mark.django_db
def test_process(autoland_push, test_two_jobs_tc_metadata, populate_bugscache):
- root_url = 'https://community-tc.services.mozilla.com'
- task_id = 'A35mWTRuQmyj88yMnIF0fA'
+ root_url = "https://community-tc.services.mozilla.com"
+ task_id = "A35mWTRuQmyj88yMnIF0fA"
- responses.add(responses.GET, f'{root_url}/api/queue/v1/task/{task_id}', **DEFAULT_GTD_CONFIG)
+ responses.add(responses.GET, f"{root_url}/api/queue/v1/task/{task_id}", **DEFAULT_GTD_CONFIG)
responses.add(
responses.GET,
- f'{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json',
+ f"{root_url}/api/queue/v1/task/{task_id}/artifacts/public/classification.json",
**DEFAULT_DA_CONFIG,
)
assert MozciClassification.objects.count() == 0
- ClassificationLoader().process({'status': {'taskId': task_id}}, root_url)
+ ClassificationLoader().process({"status": {"taskId": task_id}}, root_url)
assert MozciClassification.objects.count() == 1
classification = MozciClassification.objects.first()
@@ -386,7 +386,7 @@ def test_process(autoland_push, test_two_jobs_tc_metadata, populate_bugscache):
assert classification.task_id == task_id
autoclassified_intermittent = FailureClassification.objects.get(
- name='autoclassified intermittent'
+ name="autoclassified intermittent"
)
first_bug, second_bug = populate_bugscache
@@ -407,7 +407,7 @@ def test_process(autoland_push, test_two_jobs_tc_metadata, populate_bugscache):
).exists()
maps = BugJobMap.objects.filter(job=second_job)
assert maps.count() == 2
- assert list(maps.values_list('bug_id', flat=True)) == [first_bug.id, second_bug.id]
+ assert list(maps.values_list("bug_id", flat=True)) == [first_bug.id, second_bug.id]
@pytest.mark.django_db
@@ -416,41 +416,41 @@ def test_autoclassify_failures_missing_job(failure_classifications, populate_bug
assert BugJobMap.objects.count() == 0
intermittents = {
- 'group1': [
+ "group1": [
{
- 'task_id': 'unknown_task_id',
- 'label': 'unknown_task',
+ "task_id": "unknown_task_id",
+ "label": "unknown_task",
# Should be autoclassified if a matching Job exists
- 'autoclassify': True,
- 'tests': ['devtools/client/framework/test/test1.js'],
+ "autoclassify": True,
+ "tests": ["devtools/client/framework/test/test1.js"],
}
]
}
with pytest.raises(Job.DoesNotExist) as e:
ClassificationLoader().autoclassify_failures(
- intermittents, FailureClassification.objects.get(name='autoclassified intermittent')
+ intermittents, FailureClassification.objects.get(name="autoclassified intermittent")
)
- assert str(e.value) == 'Job matching query does not exist.'
+ assert str(e.value) == "Job matching query does not exist."
assert JobNote.objects.count() == 0
assert BugJobMap.objects.count() == 0
@pytest.mark.django_db
-@pytest.mark.parametrize('existing_classification', [False, True])
+@pytest.mark.parametrize("existing_classification", [False, True])
def test_autoclassify_failures(
existing_classification, test_two_jobs_tc_metadata, test_sheriff, populate_bugscache
):
first_job, second_job = test_two_jobs_tc_metadata
- assert first_job.failure_classification.name == 'not classified'
- assert second_job.failure_classification.name == 'not classified'
+ assert first_job.failure_classification.name == "not classified"
+ assert second_job.failure_classification.name == "not classified"
assert JobNote.objects.count() == 0
assert BugJobMap.objects.count() == 0
- intermittent = FailureClassification.objects.get(name='intermittent')
+ intermittent = FailureClassification.objects.get(name="intermittent")
autoclassified_intermittent = FailureClassification.objects.get(
- name='autoclassified intermittent'
+ name="autoclassified intermittent"
)
if existing_classification:
@@ -463,7 +463,7 @@ def test_autoclassify_failures(
assert JobNote.objects.count() == 1
ClassificationLoader().autoclassify_failures(
- DEFAULT_DA_CONFIG['json']['failures']['intermittent'], autoclassified_intermittent
+ DEFAULT_DA_CONFIG["json"]["failures"]["intermittent"], autoclassified_intermittent
)
first_bug, second_bug = populate_bugscache
@@ -484,11 +484,11 @@ def test_autoclassify_failures(
if existing_classification
else autoclassified_intermittent
)
- assert job_note.who == test_sheriff.email if existing_classification else 'autoclassifier'
+ assert job_note.who == test_sheriff.email if existing_classification else "autoclassifier"
assert (
job_note.text == "Classified by a Sheriff"
if existing_classification
- else 'Autoclassified by mozci bot as an intermittent failure'
+ else "Autoclassified by mozci bot as an intermittent failure"
)
if not existing_classification:
@@ -496,7 +496,7 @@ def test_autoclassify_failures(
bug_job_map = BugJobMap.objects.filter(job=first_job).first()
assert bug_job_map.job == first_job
assert bug_job_map.bug_id == first_bug.id
- assert bug_job_map.who == 'autoclassifier'
+ assert bug_job_map.who == "autoclassifier"
# Second job
second_job.refresh_from_db()
@@ -506,14 +506,14 @@ def test_autoclassify_failures(
job_note = JobNote.objects.filter(job=second_job).first()
assert job_note.job == second_job
assert job_note.failure_classification == autoclassified_intermittent
- assert job_note.who == 'autoclassifier'
- assert job_note.text == 'Autoclassified by mozci bot as an intermittent failure'
+ assert job_note.who == "autoclassifier"
+ assert job_note.text == "Autoclassified by mozci bot as an intermittent failure"
maps = BugJobMap.objects.filter(job=second_job)
assert maps.count() == 2
- assert list(maps.values_list('job', flat=True)) == [second_job.id, second_job.id]
- assert list(maps.values_list('bug_id', flat=True)) == [first_bug.id, second_bug.id]
- assert [m.who for m in maps] == ['autoclassifier', 'autoclassifier']
+ assert list(maps.values_list("job", flat=True)) == [second_job.id, second_job.id]
+ assert list(maps.values_list("bug_id", flat=True)) == [first_bug.id, second_bug.id]
+ assert [m.who for m in maps] == ["autoclassifier", "autoclassifier"]
assert JobNote.objects.count() == 2
assert BugJobMap.objects.count() == 2 if existing_classification else 3
@@ -526,20 +526,20 @@ def test_new_classification(autoland_push, sample_data, test_two_jobs_tc_metadat
first_job, second_job = test_two_jobs_tc_metadata
artifact1 = sample_data.text_log_summary
artifact1["job_id"] = first_job.id
- artifact1['job_guid'] = first_job.guid
- artifact1['blob'] = json.dumps(artifact1['blob'])
+ artifact1["job_guid"] = first_job.guid
+ artifact1["blob"] = json.dumps(artifact1["blob"])
artifact2 = copy.deepcopy(artifact1)
artifact2["job_id"] = second_job.id
- artifact1['job_guid'] = second_job.guid
+ artifact1["job_guid"] = second_job.guid
store_job_artifacts([artifact1, artifact2])
# first is NEW
second_job = Job.objects.get(id=1)
first_job = Job.objects.get(id=2)
- assert first_job.failure_classification.name == 'intermittent needs filing'
+ assert first_job.failure_classification.name == "intermittent needs filing"
# second instance is normal
- assert second_job.failure_classification.name == 'not classified'
+ assert second_job.failure_classification.name == "not classified"
# annotate each job and ensure marked as intermittent
diff --git a/tests/etl/test_job_ingestion.py b/tests/etl/test_job_ingestion.py
index 3fa23625386..f0d476c16c0 100644
--- a/tests/etl/test_job_ingestion.py
+++ b/tests/etl/test_job_ingestion.py
@@ -18,8 +18,8 @@ def test_ingest_single_sample_job(
assert Job.objects.count() == 1
job = Job.objects.get(id=1)
# Ensure we don't inadvertently change the way we generate job-related hashes.
- assert job.option_collection_hash == '32faaecac742100f7753f0c1d0aa0add01b4046b'
- assert job.signature.signature == '5bb6ec49547193d8d9274232cd9de61fb4ef2e59'
+ assert job.option_collection_hash == "32faaecac742100f7753f0c1d0aa0add01b4046b"
+ assert job.signature.signature == "5bb6ec49547193d8d9274232cd9de61fb4ef2e59"
def test_ingest_all_sample_jobs(
@@ -39,13 +39,13 @@ def test_ingest_twice_log_parsing_status_changed(
verify that nothing changes"""
job_data = sample_data.job_data[:1]
- job_data[0]['job']['state'] = 'running'
+ job_data[0]["job"]["state"] = "running"
test_utils.do_job_ingestion(test_repository, job_data, sample_push)
assert JobLog.objects.count() == 1
for job_log in JobLog.objects.all():
job_log.update_status(JobLog.FAILED)
- job_data[0]['job']['state'] = 'completed'
+ job_data[0]["job"]["state"] = "completed"
test_utils.do_job_ingestion(test_repository, job_data, sample_push)
assert JobLog.objects.count() == 1
for job_log in JobLog.objects.all():
@@ -65,23 +65,23 @@ def test_ingest_running_to_retry_sample_job(
store_push_data(test_repository, sample_push)
job_data = copy.deepcopy(sample_data.job_data[:1])
- job = job_data[0]['job']
- job_data[0]['revision'] = sample_push[0]['revision']
- job['state'] = 'running'
- job['result'] = 'unknown'
+ job = job_data[0]["job"]
+ job_data[0]["revision"] = sample_push[0]["revision"]
+ job["state"] = "running"
+ job["result"] = "unknown"
def _simulate_retry_job(job):
- job['state'] = 'completed'
- job['result'] = 'retry'
+ job["state"] = "completed"
+ job["result"] = "retry"
# convert the job_guid to what it would be on a retry
- job['job_guid'] = job['job_guid'] + "_" + str(job['end_timestamp'])[-5:]
+ job["job_guid"] = job["job_guid"] + "_" + str(job["end_timestamp"])[-5:]
return job
if same_ingestion_cycle:
# now we simulate the complete version of the job coming in (on the
# same push)
new_job_datum = copy.deepcopy(job_data[0])
- new_job_datum['job'] = _simulate_retry_job(new_job_datum['job'])
+ new_job_datum["job"] = _simulate_retry_job(new_job_datum["job"])
job_data.append(new_job_datum)
store_job_data(test_repository, job_data)
else:
@@ -95,9 +95,9 @@ def _simulate_retry_job(job):
assert Job.objects.count() == 1
job = Job.objects.get(id=1)
- assert job.result == 'retry'
+ assert job.result == "retry"
# guid should be the retry one
- assert job.guid == job_data[-1]['job']['job_guid']
+ assert job.guid == job_data[-1]["job"]["job_guid"]
@pytest.mark.parametrize(
@@ -115,29 +115,29 @@ def test_ingest_running_to_retry_to_success_sample_job(
store_push_data(test_repository, sample_push)
job_datum = copy.deepcopy(sample_data.job_data[0])
- job_datum['revision'] = sample_push[0]['revision']
+ job_datum["revision"] = sample_push[0]["revision"]
- job = job_datum['job']
- job_guid_root = job['job_guid']
+ job = job_datum["job"]
+ job_guid_root = job["job_guid"]
job_data = []
for state, result, job_guid in [
- ('running', 'unknown', job_guid_root),
- ('completed', 'retry', job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
- ('completed', 'success', job_guid_root),
+ ("running", "unknown", job_guid_root),
+ ("completed", "retry", job_guid_root + "_" + str(job["end_timestamp"])[-5:]),
+ ("completed", "success", job_guid_root),
]:
new_job_datum = copy.deepcopy(job_datum)
- new_job_datum['job']['state'] = state
- new_job_datum['job']['result'] = result
- new_job_datum['job']['job_guid'] = job_guid
+ new_job_datum["job"]["state"] = state
+ new_job_datum["job"]["result"] = result
+ new_job_datum["job"]["job_guid"] = job_guid
job_data.append(new_job_datum)
for i, j in ingestion_cycles:
store_job_data(test_repository, job_data[i:j])
assert Job.objects.count() == 2
- assert Job.objects.get(id=1).result == 'retry'
- assert Job.objects.get(id=2).result == 'success'
+ assert Job.objects.get(id=1).result == "retry"
+ assert Job.objects.get(id=2).result == "success"
assert JobLog.objects.count() == 2
@@ -159,22 +159,22 @@ def test_ingest_running_to_retry_to_success_sample_job_multiple_retries(
store_push_data(test_repository, sample_push)
job_datum = copy.deepcopy(sample_data.job_data[0])
- job_datum['revision'] = sample_push[0]['revision']
+ job_datum["revision"] = sample_push[0]["revision"]
- job = job_datum['job']
- job_guid_root = job['job_guid']
+ job = job_datum["job"]
+ job_guid_root = job["job_guid"]
job_data = []
for state, result, job_guid in [
- ('running', 'unknown', job_guid_root),
- ('completed', 'retry', job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
- ('completed', 'retry', job_guid_root + "_12345"),
- ('completed', 'success', job_guid_root),
+ ("running", "unknown", job_guid_root),
+ ("completed", "retry", job_guid_root + "_" + str(job["end_timestamp"])[-5:]),
+ ("completed", "retry", job_guid_root + "_12345"),
+ ("completed", "success", job_guid_root),
]:
new_job_datum = copy.deepcopy(job_datum)
- new_job_datum['job']['state'] = state
- new_job_datum['job']['result'] = result
- new_job_datum['job']['job_guid'] = job_guid
+ new_job_datum["job"]["state"] = state
+ new_job_datum["job"]["result"] = result
+ new_job_datum["job"]["job_guid"] = job_guid
job_data.append(new_job_datum)
for i, j in ingestion_cycles:
@@ -182,9 +182,9 @@ def test_ingest_running_to_retry_to_success_sample_job_multiple_retries(
store_job_data(test_repository, ins)
assert Job.objects.count() == 3
- assert Job.objects.get(id=1).result == 'retry'
- assert Job.objects.get(id=2).result == 'retry'
- assert Job.objects.get(id=3).result == 'success'
+ assert Job.objects.get(id=1).result == "retry"
+ assert Job.objects.get(id=2).result == "retry"
+ assert Job.objects.get(id=3).result == "success"
assert JobLog.objects.count() == 3
@@ -193,23 +193,23 @@ def test_ingest_retry_sample_job_no_running(
):
"""Process a single job structure in the job_data.txt file"""
job_data = copy.deepcopy(sample_data.job_data[:1])
- job = job_data[0]['job']
- job_data[0]['revision'] = sample_push[0]['revision']
+ job = job_data[0]["job"]
+ job_data[0]["revision"] = sample_push[0]["revision"]
store_push_data(test_repository, sample_push)
# complete version of the job coming in
- job['state'] = 'completed'
- job['result'] = 'retry'
+ job["state"] = "completed"
+ job["result"] = "retry"
# convert the job_guid to what it would be on a retry
- retry_guid = job['job_guid'] + "_" + str(job['end_timestamp'])[-5:]
- job['job_guid'] = retry_guid
+ retry_guid = job["job_guid"] + "_" + str(job["end_timestamp"])[-5:]
+ job["job_guid"] = retry_guid
store_job_data(test_repository, job_data)
assert Job.objects.count() == 1
job = Job.objects.get(id=1)
- assert job.result == 'retry'
+ assert job.result == "retry"
assert job.guid == retry_guid
@@ -220,7 +220,7 @@ def test_bad_date_value_ingestion(
Test ingesting a job blob with bad date value
"""
- blob = job_data(start_timestamp="foo", revision=sample_push[0]['revision'])
+ blob = job_data(start_timestamp="foo", revision=sample_push[0]["revision"])
store_push_data(test_repository, sample_push[:1])
store_job_data(test_repository, [blob])
diff --git a/tests/etl/test_job_loader.py b/tests/etl/test_job_loader.py
index 44650c7bc1e..468c719a247 100644
--- a/tests/etl/test_job_loader.py
+++ b/tests/etl/test_job_loader.py
@@ -45,7 +45,7 @@ def mock_artifact(taskId, runId, artifactName):
responses.GET,
baseUrl.format(taskId=taskId, runId=runId, artifactName=artifactName),
body="",
- content_type='text/plain',
+ content_type="text/plain",
status=200,
)
@@ -100,7 +100,7 @@ def test_new_job_transformation(new_pulse_jobs, new_transformed_jobs, failure_cl
(decoded_task_id, _) = job_guid.split("/")
# As of slugid v2, slugid.encode() returns a string not bytestring under Python 3.
taskId = slugid.encode(uuid.UUID(decoded_task_id))
- transformed_job = jl.process_job(message, 'https://firefox-ci-tc.services.mozilla.com')
+ transformed_job = jl.process_job(message, "https://firefox-ci-tc.services.mozilla.com")
# Not all messages from Taskcluster will be processed
if transformed_job:
assert new_transformed_jobs[taskId] == transformed_job
@@ -117,18 +117,18 @@ def test_ingest_pulse_jobs(
revision = push_stored[0]["revision"]
for job in pulse_jobs:
job["origin"]["revision"] = revision
- jl.process_job(job, 'https://firefox-ci-tc.services.mozilla.com')
+ jl.process_job(job, "https://firefox-ci-tc.services.mozilla.com")
jobs = Job.objects.all()
assert len(jobs) == 5
assert [job.taskcluster_metadata for job in jobs]
- assert set(TaskclusterMetadata.objects.values_list('task_id', flat=True)) == set(
+ assert set(TaskclusterMetadata.objects.values_list("task_id", flat=True)) == set(
[
- 'IYyscnNMTLuxzna7PNqUJQ',
- 'XJCbbRQ6Sp-UL1lL-tw5ng',
- 'ZsSzJQu3Q7q2MfehIBAzKQ',
- 'bIzVZt9jQQKgvQYD3a2HQw',
+ "IYyscnNMTLuxzna7PNqUJQ",
+ "XJCbbRQ6Sp-UL1lL-tw5ng",
+ "ZsSzJQu3Q7q2MfehIBAzKQ",
+ "bIzVZt9jQQKgvQYD3a2HQw",
]
)
@@ -165,7 +165,7 @@ def test_ingest_pulse_job_with_long_job_type_name(
"jobName"
] = "this is a very long string that exceeds the 100 character size that was the previous limit by just a little bit"
job["origin"]["revision"] = revision
- jl.process_job(job, 'https://firefox-ci-tc.services.mozilla.com')
+ jl.process_job(job, "https://firefox-ci-tc.services.mozilla.com")
jobs = Job.objects.all()
assert len(jobs) == 1
@@ -184,14 +184,14 @@ def test_ingest_pending_pulse_job(
revision = push_stored[0]["revision"]
pulse_job["origin"]["revision"] = revision
pulse_job["state"] = "pending"
- jl.process_job(pulse_job, 'https://firefox-ci-tc.services.mozilla.com')
+ jl.process_job(pulse_job, "https://firefox-ci-tc.services.mozilla.com")
jobs = Job.objects.all()
assert len(jobs) == 1
job = jobs[0]
assert job.taskcluster_metadata
- assert job.taskcluster_metadata.task_id == 'IYyscnNMTLuxzna7PNqUJQ'
+ assert job.taskcluster_metadata.task_id == "IYyscnNMTLuxzna7PNqUJQ"
# should not have processed any log or details for pending jobs
assert JobLog.objects.count() == 2
@@ -211,7 +211,7 @@ def test_ingest_pulse_jobs_bad_project(
job["origin"]["project"] = "ferd"
for pulse_job in pulse_jobs:
- jl.process_job(pulse_job, 'https://firefox-ci-tc.services.mozilla.com')
+ jl.process_job(pulse_job, "https://firefox-ci-tc.services.mozilla.com")
# length of pulse jobs is 5, so one will be skipped due to bad project
assert Job.objects.count() == 4
@@ -230,13 +230,13 @@ def test_ingest_pulse_jobs_with_missing_push(pulse_jobs):
responses.GET,
"https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/IYyscnNMTLuxzna7PNqUJQ",
json={},
- content_type='application/json',
+ content_type="application/json",
status=200,
)
with pytest.raises(ObjectDoesNotExist):
for pulse_job in pulse_jobs:
- jl.process_job(pulse_job, 'https://firefox-ci-tc.services.mozilla.com')
+ jl.process_job(pulse_job, "https://firefox-ci-tc.services.mozilla.com")
# if one job isn't ready, except on the whole batch. They'll retry as a
# task after the timeout.
@@ -300,7 +300,7 @@ def test_transition_pending_retry_fail_stays_retry(
def test_skip_unscheduled(first_job, failure_classifications, mock_log_parser):
jl = JobLoader()
first_job["state"] = "unscheduled"
- jl.process_job(first_job, 'https://firefox-ci-tc.services.mozilla.com')
+ jl.process_job(first_job, "https://firefox-ci-tc.services.mozilla.com")
assert not Job.objects.count()
@@ -310,10 +310,10 @@ def change_state_result(test_job, job_loader, new_state, new_result, exp_state,
job = copy.deepcopy(test_job)
job["state"] = new_state
job["result"] = new_result
- if new_state == 'pending':
+ if new_state == "pending":
# pending jobs wouldn't have logs and our store_job_data doesn't
# support it.
- del job['logs']
+ del job["logs"]
errorsummary_indices = [
i
for i, item in enumerate(job["jobInfo"].get("links", []))
@@ -322,7 +322,7 @@ def change_state_result(test_job, job_loader, new_state, new_result, exp_state,
for index in errorsummary_indices:
del job["jobInfo"]["links"][index]
- job_loader.process_job(job, 'https://firefox-ci-tc.services.mozilla.com')
+ job_loader.process_job(job, "https://firefox-ci-tc.services.mozilla.com")
assert Job.objects.count() == 1
job = Job.objects.get(id=1)
diff --git a/tests/etl/test_job_schema.py b/tests/etl/test_job_schema.py
index 60179af9956..b1df394ffa2 100644
--- a/tests/etl/test_job_schema.py
+++ b/tests/etl/test_job_schema.py
@@ -7,7 +7,7 @@
# production Treeherder
-@pytest.mark.parametrize("group_symbol", ['?', 'A', 'Aries', 'Buri/Hamac', 'L10n', 'M-e10s'])
+@pytest.mark.parametrize("group_symbol", ["?", "A", "Aries", "Buri/Hamac", "L10n", "M-e10s"])
def test_group_symbols(sample_data, group_symbol):
"""
Validate jobs against the schema with different group_symbol values
@@ -19,7 +19,7 @@ def test_group_symbols(sample_data, group_symbol):
jsonschema.validate(job, get_json_schema("pulse-job.yml"))
-@pytest.mark.parametrize("job_symbol", ['1.1g', '1g', '20', 'A', 'GBI10', 'en-US-1'])
+@pytest.mark.parametrize("job_symbol", ["1.1g", "1g", "20", "A", "GBI10", "en-US-1"])
def test_job_symbols(sample_data, job_symbol):
"""
Validate jobs against the schema with different job_symbol values
diff --git a/tests/etl/test_load_artifacts.py b/tests/etl/test_load_artifacts.py
index 08c1eb0f8f8..4c70fed95e6 100644
--- a/tests/etl/test_load_artifacts.py
+++ b/tests/etl/test_load_artifacts.py
@@ -6,17 +6,17 @@
def test_load_textlog_summary_twice(test_repository, test_job):
text_log_summary_artifact = {
- 'type': 'json',
- 'name': 'text_log_summary',
- 'blob': json.dumps(
+ "type": "json",
+ "name": "text_log_summary",
+ "blob": json.dumps(
{
- 'errors': [
- {"line": 'WARNING - foobar', "linenumber": 1587},
- {"line": 'WARNING - foobar', "linenumber": 1590},
+ "errors": [
+ {"line": "WARNING - foobar", "linenumber": 1587},
+ {"line": "WARNING - foobar", "linenumber": 1590},
],
}
),
- 'job_guid': test_job.guid,
+ "job_guid": test_job.guid,
}
store_job_artifacts([text_log_summary_artifact])
@@ -29,29 +29,29 @@ def test_load_textlog_summary_twice(test_repository, test_job):
def test_load_non_ascii_textlog_errors(test_job):
text_log_summary_artifact = {
- 'type': 'json',
- 'name': 'text_log_summary',
- 'blob': json.dumps(
+ "type": "json",
+ "name": "text_log_summary",
+ "blob": json.dumps(
{
- 'errors': [
+ "errors": [
{
# non-ascii character
- "line": '07:51:28 WARNING - \U000000c3',
+ "line": "07:51:28 WARNING - \U000000c3",
"linenumber": 1587,
},
{
# astral character (i.e. higher than ucs2)
- "line": '07:51:29 WARNING - \U0001d400',
+ "line": "07:51:29 WARNING - \U0001d400",
"linenumber": 1588,
},
],
}
),
- 'job_guid': test_job.guid,
+ "job_guid": test_job.guid,
}
store_job_artifacts([text_log_summary_artifact])
assert TextLogError.objects.count() == 2
- assert TextLogError.objects.get(line_number=1587).line == '07:51:28 WARNING - \U000000c3'
- assert TextLogError.objects.get(line_number=1588).line == '07:51:29 WARNING - '
+ assert TextLogError.objects.get(line_number=1587).line == "07:51:28 WARNING - \U000000c3"
+ assert TextLogError.objects.get(line_number=1588).line == "07:51:29 WARNING - "
diff --git a/tests/etl/test_perf_data_adapters.py b/tests/etl/test_perf_data_adapters.py
index bdaa2996623..03672692fe9 100644
--- a/tests/etl/test_perf_data_adapters.py
+++ b/tests/etl/test_perf_data_adapters.py
@@ -19,16 +19,16 @@
def sample_perf_datum(framework_name: str, subtest_value: int = 20.0) -> dict:
return {
- 'job_guid': 'fake_job_guid',
- 'name': 'test',
- 'type': 'test',
- 'blob': {
- 'framework': {'name': framework_name},
- 'suites': [
+ "job_guid": "fake_job_guid",
+ "name": "test",
+ "type": "test",
+ "blob": {
+ "framework": {"name": framework_name},
+ "suites": [
{
- 'name': "some-perf-suite",
- 'unit': "ms",
- 'subtests': [{'name': "some-perf-test", 'value': subtest_value, 'unit': 'ms'}],
+ "name": "some-perf-suite",
+ "unit": "ms",
+ "subtests": [{"name": "some-perf-test", "value": subtest_value, "unit": "ms"}],
}
],
},
@@ -60,33 +60,33 @@ def _generate_and_validate_alerts(
"some-perf-framework",
"some-perf-suite",
"some-perf-test",
- 'my_option_hash',
- 'my_platform',
+ "my_option_hash",
+ "my_platform",
True,
None,
- 'ms',
- alert_threshold=extra_subtest_metadata.get('alertThreshold'),
- alert_change_type=extra_subtest_metadata.get('alertChangeType'),
- min_back_window=extra_subtest_metadata.get('minBackWindow'),
- max_back_window=extra_subtest_metadata.get('maxBackWindow'),
- fore_window=extra_subtest_metadata.get('foreWindow'),
+ "ms",
+ alert_threshold=extra_subtest_metadata.get("alertThreshold"),
+ alert_change_type=extra_subtest_metadata.get("alertChangeType"),
+ min_back_window=extra_subtest_metadata.get("minBackWindow"),
+ max_back_window=extra_subtest_metadata.get("maxBackWindow"),
+ fore_window=extra_subtest_metadata.get("foreWindow"),
)
if suite_provides_value:
_verify_signature(
test_repository.name,
"some-perf-framework",
"some-perf-suite",
- '',
- 'my_option_hash',
- 'my_platform',
+ "",
+ "my_option_hash",
+ "my_platform",
True,
None,
- 'ms',
- alert_threshold=extra_suite_metadata.get('alertThreshold'),
- alert_change_type=extra_suite_metadata.get('alertChangeType'),
- min_back_window=extra_suite_metadata.get('minBackWindow'),
- max_back_window=extra_suite_metadata.get('maxBackWindow'),
- fore_window=extra_suite_metadata.get('foreWindow'),
+ "ms",
+ alert_threshold=extra_suite_metadata.get("alertThreshold"),
+ alert_change_type=extra_suite_metadata.get("alertChangeType"),
+ min_back_window=extra_suite_metadata.get("minBackWindow"),
+ max_back_window=extra_suite_metadata.get("maxBackWindow"),
+ fore_window=extra_suite_metadata.get("foreWindow"),
)
@@ -125,15 +125,15 @@ def _generate_perf_data_range(
datum = sample_perf_datum(framework_name, value)
if suite_provides_value:
- datum['blob']['suites'][0]['value'] = value
+ datum["blob"]["suites"][0]["value"] = value
if extra_suite_metadata:
- datum['blob']['suites'][0].update(extra_suite_metadata)
+ datum["blob"]["suites"][0].update(extra_suite_metadata)
if extra_subtest_metadata:
- datum['blob']['suites'][0]['subtests'][0].update(extra_subtest_metadata)
+ datum["blob"]["suites"][0]["subtests"][0].update(extra_subtest_metadata)
# the perf data adapter expects deserialized performance data
submit_datum = copy.copy(datum)
- submit_datum['blob'] = json.dumps({'performance_data': submit_datum['blob']})
+ submit_datum["blob"] = json.dumps({"performance_data": submit_datum["blob"]})
store_performance_artifact(job, submit_datum)
@@ -155,9 +155,9 @@ def _verify_signature(
fore_window=None,
):
if not extra_opts:
- extra_options = ''
+ extra_options = ""
else:
- extra_options = ' '.join(sorted(extra_opts))
+ extra_options = " ".join(sorted(extra_opts))
repository = Repository.objects.get(name=repo_name)
signature = PerformanceSignature.objects.get(suite=suite_name, test=test_name)
@@ -199,7 +199,7 @@ def test_same_signature_multiple_performance_frameworks(test_repository, perf_jo
# the perf data adapter expects deserialized performance data
submit_datum = copy.copy(datum)
- submit_datum['blob'] = json.dumps({'performance_data': submit_datum['blob']})
+ submit_datum["blob"] = json.dumps({"performance_data": submit_datum["blob"]})
store_performance_artifact(perf_job, submit_datum)
@@ -218,36 +218,36 @@ def test_same_signature_multiple_performance_frameworks(test_repository, perf_jo
@pytest.mark.parametrize(
(
- 'alerts_enabled_repository',
- 'suite_provides_value',
- 'extra_suite_metadata',
- 'extra_subtest_metadata',
- 'job_tier',
- 'expected_subtest_alert',
- 'expected_suite_alert',
+ "alerts_enabled_repository",
+ "suite_provides_value",
+ "extra_suite_metadata",
+ "extra_subtest_metadata",
+ "job_tier",
+ "expected_subtest_alert",
+ "expected_suite_alert",
),
[
# should still alert even if we optionally
# use a large maximum back window
- (True, False, None, {'minBackWindow': 12, 'maxBackWindow': 100}, 2, True, False),
+ (True, False, None, {"minBackWindow": 12, "maxBackWindow": 100}, 2, True, False),
# summary+subtest, no metadata, default settings
(True, True, {}, {}, 1, False, True),
# summary+subtest, no metadata, no alerting on
# summary, alerting on subtest
- (True, True, {'shouldAlert': False}, {'shouldAlert': True}, 2, True, False),
+ (True, True, {"shouldAlert": False}, {"shouldAlert": True}, 2, True, False),
# summary+subtest, no metadata on summary, alerting
# override on subtest
- (True, True, {}, {'shouldAlert': True}, 2, True, True),
+ (True, True, {}, {"shouldAlert": True}, 2, True, True),
# summary+subtest, alerting override on subtest +
# summary
- (True, True, {'shouldAlert': True}, {'shouldAlert': True}, 1, True, True),
+ (True, True, {"shouldAlert": True}, {"shouldAlert": True}, 1, True, True),
# summary + subtest, only subtest is absolute so
# summary should alert
(
True,
True,
- {'shouldAlert': True},
- {'shouldAlert': True, 'alertChangeType': 'absolute'},
+ {"shouldAlert": True},
+ {"shouldAlert": True, "alertChangeType": "absolute"},
2,
False,
True,
@@ -292,9 +292,9 @@ def test_alerts_should_be_generated(
if expected_suite_alert:
# validate suite alert
- alert = PerformanceAlert.objects.get(series_signature__test='')
+ alert = PerformanceAlert.objects.get(series_signature__test="")
assert alert.series_signature.suite == "some-perf-suite"
- assert alert.series_signature.test == ''
+ assert alert.series_signature.test == ""
assert alert.is_regression
assert alert.amount_abs == 1
assert alert.amount_pct == 100
@@ -311,76 +311,76 @@ def test_alerts_should_be_generated(
@pytest.mark.parametrize(
(
- 'alerts_enabled_repository',
- 'suite_provides_value',
- 'extra_suite_metadata',
- 'extra_subtest_metadata',
- 'job_tier',
+ "alerts_enabled_repository",
+ "suite_provides_value",
+ "extra_suite_metadata",
+ "extra_subtest_metadata",
+ "job_tier",
),
[
# just subtest, no metadata, default settings & non sheriff-able job tier won't alert
(True, False, None, {}, 3),
# just subtest, high alert threshold (so no alert)
- (True, False, None, {'alertThreshold': 500.0}, 2),
+ (True, False, None, {"alertThreshold": 500.0}, 2),
# non sheriff-able job tier won't alert either
- (True, False, None, {'alertThreshold': 500.0}, 3),
+ (True, False, None, {"alertThreshold": 500.0}, 3),
# just subtest, but larger min window size
# (so no alerting)
- (True, False, {}, {'minBackWindow': 100, 'maxBackWindow': 100}, 1),
+ (True, False, {}, {"minBackWindow": 100, "maxBackWindow": 100}, 1),
# non sheriff-able job tier won't alert either
- (True, False, {}, {'minBackWindow': 100, 'maxBackWindow': 100}, 3),
+ (True, False, {}, {"minBackWindow": 100, "maxBackWindow": 100}, 3),
# should still alert even if we optionally
# use a large maximum back window, but because of
# non sheriff-able job tier it won't
- (True, False, None, {'minBackWindow': 12, 'maxBackWindow': 100}, 3),
+ (True, False, None, {"minBackWindow": 12, "maxBackWindow": 100}, 3),
# summary+subtest, no metadata, default settings should alert,
# but because of non sheriff-able job tier it won't
(True, True, {}, {}, 3),
# summary+subtest, high alert threshold
# (so no alert)
- (True, True, {'alertThreshold': 500.0}, {}, 2),
+ (True, True, {"alertThreshold": 500.0}, {}, 2),
# non sheriff-able job tier won't alert either
- (True, True, {'alertThreshold': 500.0}, {}, 3),
+ (True, True, {"alertThreshold": 500.0}, {}, 3),
# non sheriff-able job tier won't alert
- (True, True, {'alertThreshold': 500.0}, {}, 2),
+ (True, True, {"alertThreshold": 500.0}, {}, 2),
# non sheriff-able job tier won't alert either
- (True, True, {'alertThreshold': 500.0}, {}, 3),
+ (True, True, {"alertThreshold": 500.0}, {}, 3),
# summary+subtest, no metadata, no alerting on summary
- (True, True, {'shouldAlert': False}, {}, 1),
+ (True, True, {"shouldAlert": False}, {}, 1),
# non sheriff-able job tier won't alert either
- (True, True, {'shouldAlert': False}, {}, 3),
+ (True, True, {"shouldAlert": False}, {}, 3),
# summary+subtest, no metadata, no alerting on
# summary, alerting on subtest should alert, but
# because of non sheriff-able job tier it won't
- (True, True, {'shouldAlert': False}, {'shouldAlert': True}, 3),
+ (True, True, {"shouldAlert": False}, {"shouldAlert": True}, 3),
# summary+subtest, no metadata on summary, alerting
# override on subtest should alert, but because of
# non sheriff-able job tier it won't
- (True, True, {}, {'shouldAlert': True}, 3),
+ (True, True, {}, {"shouldAlert": True}, 3),
# summary+subtest, alerting override on subtest +
# summary & non sheriff-able job tier won't alert
- (True, True, {'shouldAlert': True}, {'shouldAlert': True}, 3),
+ (True, True, {"shouldAlert": True}, {"shouldAlert": True}, 3),
# summary+subtest, alerting override on subtest +
# summary -- but alerts disabled
- (False, True, {'shouldAlert': True}, {'shouldAlert': True}, 2),
+ (False, True, {"shouldAlert": True}, {"shouldAlert": True}, 2),
# non sheriff-able job tier won't alert either
- (False, True, {'shouldAlert': True}, {'shouldAlert': True}, 3),
+ (False, True, {"shouldAlert": True}, {"shouldAlert": True}, 3),
# summary+subtest, alerting override on subtest +
# summary, but using absolute change so shouldn't
# alert
(
True,
True,
- {'shouldAlert': True, 'alertChangeType': 'absolute'},
- {'shouldAlert': True, 'alertChangeType': 'absolute'},
+ {"shouldAlert": True, "alertChangeType": "absolute"},
+ {"shouldAlert": True, "alertChangeType": "absolute"},
1,
),
# non sheriff-able job tier won't alert either
(
True,
True,
- {'shouldAlert': True, 'alertChangeType': 'absolute'},
- {'shouldAlert': True, 'alertChangeType': 'absolute'},
+ {"shouldAlert": True, "alertChangeType": "absolute"},
+ {"shouldAlert": True, "alertChangeType": "absolute"},
3,
),
# summary + subtest, only subtest is absolute so
@@ -389,8 +389,8 @@ def test_alerts_should_be_generated(
(
True,
True,
- {'shouldAlert': True},
- {'shouldAlert': True, 'alertChangeType': 'absolute'},
+ {"shouldAlert": True},
+ {"shouldAlert": True, "alertChangeType": "absolute"},
3,
),
],
@@ -449,4 +449,4 @@ def test_last_updated(
_generate_perf_data_range(test_repository, generic_reference_data, reverse_push_range=True)
assert PerformanceSignature.objects.count() == 1
signature = PerformanceSignature.objects.first()
- assert signature.last_updated == max(Push.objects.values_list('time', flat=True))
+ assert signature.last_updated == max(Push.objects.values_list("time", flat=True))
diff --git a/tests/etl/test_perf_data_load.py b/tests/etl/test_perf_data_load.py
index a1fae4ff926..518af66550b 100644
--- a/tests/etl/test_perf_data_load.py
+++ b/tests/etl/test_perf_data_load.py
@@ -21,65 +21,65 @@
PerformanceSignature,
)
-FRAMEWORK_NAME = 'browsertime'
-MEASUREMENT_UNIT = 'ms'
-UPDATED_MEASUREMENT_UNIT = 'seconds'
+FRAMEWORK_NAME = "browsertime"
+MEASUREMENT_UNIT = "ms"
+UPDATED_MEASUREMENT_UNIT = "seconds"
DATA_PER_ARTIFACT = 8 # related to sample_perf_artifact fixture
@pytest.fixture
def sample_perf_artifact() -> dict:
return {
- 'job_guid': 'fake_job_guid',
- 'name': 'test',
- 'type': 'test',
- 'blob': {
- 'framework': {'name': FRAMEWORK_NAME},
- 'suites': [
+ "job_guid": "fake_job_guid",
+ "name": "test",
+ "type": "test",
+ "blob": {
+ "framework": {"name": FRAMEWORK_NAME},
+ "suites": [
{
- 'name': 'youtube-watch',
- 'extraOptions': ['shell', 'e10s'],
- 'lowerIsBetter': True,
- 'value': 10.0,
- 'unit': MEASUREMENT_UNIT,
- 'subtests': [
+ "name": "youtube-watch",
+ "extraOptions": ["shell", "e10s"],
+ "lowerIsBetter": True,
+ "value": 10.0,
+ "unit": MEASUREMENT_UNIT,
+ "subtests": [
{
- 'name': 'fcp',
- 'value': 20.0,
- 'unit': MEASUREMENT_UNIT,
- 'lowerIsBetter': True,
+ "name": "fcp",
+ "value": 20.0,
+ "unit": MEASUREMENT_UNIT,
+ "lowerIsBetter": True,
},
{
- 'name': 'loadtime',
- 'value': 30.0,
- 'unit': MEASUREMENT_UNIT,
- 'lowerIsBetter': False,
+ "name": "loadtime",
+ "value": 30.0,
+ "unit": MEASUREMENT_UNIT,
+ "lowerIsBetter": False,
},
{
- 'name': 'fnbpaint',
- 'value': 40.0,
- 'unit': MEASUREMENT_UNIT,
+ "name": "fnbpaint",
+ "value": 40.0,
+ "unit": MEASUREMENT_UNIT,
},
],
},
{
- 'name': 'youtube-watch 2',
- 'lowerIsBetter': False,
- 'value': 10.0,
- 'unit': MEASUREMENT_UNIT,
- 'subtests': [
+ "name": "youtube-watch 2",
+ "lowerIsBetter": False,
+ "value": 10.0,
+ "unit": MEASUREMENT_UNIT,
+ "subtests": [
{
- 'name': 'fcp',
- 'value': 20.0,
- 'unit': MEASUREMENT_UNIT,
+ "name": "fcp",
+ "value": 20.0,
+ "unit": MEASUREMENT_UNIT,
}
],
},
{
- 'name': 'youtube-watch 3',
- 'value': 10.0,
- 'unit': MEASUREMENT_UNIT,
- 'subtests': [{'name': 'fcp', 'value': 20.0, 'unit': MEASUREMENT_UNIT}],
+ "name": "youtube-watch 3",
+ "value": 10.0,
+ "unit": MEASUREMENT_UNIT,
+ "subtests": [{"name": "fcp", "value": 20.0, "unit": MEASUREMENT_UNIT}],
},
],
},
@@ -95,14 +95,14 @@ def sibling_perf_artifacts(sample_perf_artifact: dict) -> List[dict]:
mocked_push_timestamp = (
datetime.datetime.utcnow() + datetime.timedelta(hours=idx)
).timestamp()
- artifact['blob']['pushTimestamp'] = int(mocked_push_timestamp)
+ artifact["blob"]["pushTimestamp"] = int(mocked_push_timestamp)
# having distinct values for suites & subtests
# will make it easier to write tests
- for suite in artifact['blob']['suites']:
- suite['value'] = suite['value'] + idx
- for subtest in suite['subtests']:
- subtest['value'] = subtest['value'] + idx
+ for suite in artifact["blob"]["suites"]:
+ suite["value"] = suite["value"] + idx
+ for subtest in suite["subtests"]:
+ subtest["value"] = subtest["value"] + idx
return artifacts
@@ -110,35 +110,35 @@ def sibling_perf_artifacts(sample_perf_artifact: dict) -> List[dict]:
@pytest.fixture
def sample_perf_artifact_with_new_unit():
return {
- 'job_guid': 'new_fake_job_guid',
- 'name': 'test',
- 'type': 'test',
- 'blob': {
- 'framework': {'name': FRAMEWORK_NAME},
- 'suites': [
+ "job_guid": "new_fake_job_guid",
+ "name": "test",
+ "type": "test",
+ "blob": {
+ "framework": {"name": FRAMEWORK_NAME},
+ "suites": [
{
- 'name': 'youtube-watch',
- 'extraOptions': ['shell', 'e10s'],
- 'lowerIsBetter': True,
- 'value': 10.0,
- 'unit': UPDATED_MEASUREMENT_UNIT,
- 'subtests': [
+ "name": "youtube-watch",
+ "extraOptions": ["shell", "e10s"],
+ "lowerIsBetter": True,
+ "value": 10.0,
+ "unit": UPDATED_MEASUREMENT_UNIT,
+ "subtests": [
{
- 'name': 'fcp',
- 'value': 20.0,
- 'unit': UPDATED_MEASUREMENT_UNIT,
- 'lowerIsBetter': True,
+ "name": "fcp",
+ "value": 20.0,
+ "unit": UPDATED_MEASUREMENT_UNIT,
+ "lowerIsBetter": True,
},
{
- 'name': 'loadtime',
- 'value': 30.0,
- 'unit': MEASUREMENT_UNIT,
- 'lowerIsBetter': False,
+ "name": "loadtime",
+ "value": 30.0,
+ "unit": MEASUREMENT_UNIT,
+ "lowerIsBetter": False,
},
{
- 'name': 'fnbpaint',
- 'value': 40.0,
- 'unit': MEASUREMENT_UNIT,
+ "name": "fnbpaint",
+ "value": 40.0,
+ "unit": MEASUREMENT_UNIT,
},
],
}
@@ -152,8 +152,8 @@ def later_perf_push(test_repository):
later_timestamp = datetime.datetime.fromtimestamp(int(time.time()) + 5)
return Push.objects.create(
repository=test_repository,
- revision='1234abcd12',
- author='foo@bar.com',
+ revision="1234abcd12",
+ author="foo@bar.com",
time=later_timestamp,
)
@@ -170,18 +170,18 @@ def _prepare_test_data(datum):
PerformanceFramework.objects.get_or_create(name=FRAMEWORK_NAME, enabled=True)
# the perf data adapter expects unserialized performance data
submit_datum = copy.copy(datum)
- submit_datum['blob'] = json.dumps({'performance_data': submit_datum['blob']})
- perf_datum = datum['blob']
+ submit_datum["blob"] = json.dumps({"performance_data": submit_datum["blob"]})
+ perf_datum = datum["blob"]
return perf_datum, submit_datum
def _assert_hash_remains_unchanged():
- summary_signature = PerformanceSignature.objects.get(suite='youtube-watch', test='')
+ summary_signature = PerformanceSignature.objects.get(suite="youtube-watch", test="")
# Ensure we don't inadvertently change the way we generate signature hashes.
- assert summary_signature.signature_hash == '78aaeaf7d3a0170f8a1fb0c4dc34ca276da47e1c'
+ assert summary_signature.signature_hash == "78aaeaf7d3a0170f8a1fb0c4dc34ca276da47e1c"
subtest_signatures = PerformanceSignature.objects.filter(
parent_signature=summary_signature
- ).values_list('signature_hash', flat=True)
+ ).values_list("signature_hash", flat=True)
assert len(subtest_signatures) == 3
@@ -205,35 +205,35 @@ def test_default_ingest_workflow(
assert 1 == PerformanceFramework.objects.all().count()
framework = PerformanceFramework.objects.first()
assert FRAMEWORK_NAME == framework.name
- for suite in perf_datum['suites']:
+ for suite in perf_datum["suites"]:
# verify summary, then subtests
_verify_signature(
test_repository.name,
- perf_datum['framework']['name'],
- suite['name'],
- '',
- 'my_option_hash',
- 'my_platform',
- suite.get('lowerIsBetter', True),
- suite.get('extraOptions'),
- suite.get('unit'),
+ perf_datum["framework"]["name"],
+ suite["name"],
+ "",
+ "my_option_hash",
+ "my_platform",
+ suite.get("lowerIsBetter", True),
+ suite.get("extraOptions"),
+ suite.get("unit"),
perf_push.time,
)
- _verify_datum(suite['name'], '', suite['value'], perf_push.time)
- for subtest in suite['subtests']:
+ _verify_datum(suite["name"], "", suite["value"], perf_push.time)
+ for subtest in suite["subtests"]:
_verify_signature(
test_repository.name,
- perf_datum['framework']['name'],
- suite['name'],
- subtest['name'],
- 'my_option_hash',
- 'my_platform',
- subtest.get('lowerIsBetter', True),
- suite.get('extraOptions'),
- suite.get('unit'),
+ perf_datum["framework"]["name"],
+ suite["name"],
+ subtest["name"],
+ "my_option_hash",
+ "my_platform",
+ subtest.get("lowerIsBetter", True),
+ suite.get("extraOptions"),
+ suite.get("unit"),
perf_push.time,
)
- _verify_datum(suite['name'], subtest['name'], subtest['value'], perf_push.time)
+ _verify_datum(suite["name"], subtest["name"], subtest["value"], perf_push.time)
def test_hash_remains_unchanged_for_default_ingestion_workflow(
@@ -253,11 +253,11 @@ def test_timestamp_can_be_updated_for_default_ingestion_workflow(
# send another datum, a little later, verify that signature is changed accordingly
later_job = create_generic_job(
- 'lateguid', test_repository, later_perf_push.id, generic_reference_data
+ "lateguid", test_repository, later_perf_push.id, generic_reference_data
)
store_performance_artifact(later_job, submit_datum)
- signature = PerformanceSignature.objects.get(suite='youtube-watch', test='fcp')
+ signature = PerformanceSignature.objects.get(suite="youtube-watch", test="fcp")
assert signature.last_updated == later_perf_push.time
@@ -274,19 +274,19 @@ def test_measurement_unit_can_be_updated(
_, updated_submit_datum = _prepare_test_data(sample_perf_artifact_with_new_unit)
later_job = create_generic_job(
- 'lateguid', test_repository, later_perf_push.id, generic_reference_data
+ "lateguid", test_repository, later_perf_push.id, generic_reference_data
)
store_performance_artifact(later_job, updated_submit_datum)
- summary_signature = PerformanceSignature.objects.get(suite='youtube-watch', test='')
- updated_subtest_signature = PerformanceSignature.objects.get(suite='youtube-watch', test='fcp')
+ summary_signature = PerformanceSignature.objects.get(suite="youtube-watch", test="")
+ updated_subtest_signature = PerformanceSignature.objects.get(suite="youtube-watch", test="fcp")
assert summary_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT
assert updated_subtest_signature.measurement_unit == UPDATED_MEASUREMENT_UNIT
# no side effects when parent/sibling signatures
# change measurement units
not_changed_subtest_signature = PerformanceSignature.objects.get(
- suite='youtube-watch', test='loadtime'
+ suite="youtube-watch", test="loadtime"
)
assert not_changed_subtest_signature.measurement_unit == MEASUREMENT_UNIT
@@ -295,9 +295,9 @@ def test_changing_extra_options_decouples_perf_signatures(
test_repository, later_perf_push, perf_job, generic_reference_data, sample_perf_artifact
):
updated_perf_artifact = copy.deepcopy(sample_perf_artifact)
- updated_perf_artifact['blob']['suites'][0]['extraOptions'] = ['different-extra-options']
+ updated_perf_artifact["blob"]["suites"][0]["extraOptions"] = ["different-extra-options"]
later_job = create_generic_job(
- 'lateguid', test_repository, later_perf_push.id, generic_reference_data
+ "lateguid", test_repository, later_perf_push.id, generic_reference_data
)
_, submit_datum = _prepare_test_data(sample_perf_artifact)
_, updated_submit_datum = _prepare_test_data(updated_perf_artifact)
@@ -312,7 +312,7 @@ def test_changing_extra_options_decouples_perf_signatures(
# Multi perf data (for the same job) ingestion workflow
-@pytest.mark.parametrize('PERFHERDER_ENABLE_MULTIDATA_INGESTION', [True, False])
+@pytest.mark.parametrize("PERFHERDER_ENABLE_MULTIDATA_INGESTION", [True, False])
def test_multi_data_can_be_ingested_for_same_job_and_push(
PERFHERDER_ENABLE_MULTIDATA_INGESTION,
test_repository,
@@ -331,7 +331,7 @@ def test_multi_data_can_be_ingested_for_same_job_and_push(
@pytest.mark.parametrize(
- 'PERFHERDER_ENABLE_MULTIDATA_INGESTION, based_on_multidata_toggle',
+ "PERFHERDER_ENABLE_MULTIDATA_INGESTION, based_on_multidata_toggle",
[(True, operator.truth), (False, operator.not_)],
)
def test_multi_data_ingest_workflow(
@@ -376,8 +376,8 @@ def performance_datum_exists(**with_these_properties) -> bool:
# and their essential properties were correctly stored (or not)
for artifact in sibling_perf_artifacts:
- artifact_blob = artifact['blob']
- push_timestamp = datetime.datetime.fromtimestamp(artifact_blob['pushTimestamp'])
+ artifact_blob = artifact["blob"]
+ push_timestamp = datetime.datetime.fromtimestamp(artifact_blob["pushTimestamp"])
common_properties = dict( # to both suites & subtests
repository=perf_job.repository,
job=perf_job,
@@ -385,21 +385,21 @@ def performance_datum_exists(**with_these_properties) -> bool:
push_timestamp=push_timestamp,
)
# check suites
- for suite in artifact_blob['suites']:
+ for suite in artifact_blob["suites"]:
assert performance_datum_exists(
**common_properties,
- value=suite['value'],
+ value=suite["value"],
)
# and subtests
- for subtest in suite['subtests']:
+ for subtest in suite["subtests"]:
assert performance_datum_exists(
**common_properties,
- value=subtest['value'],
+ value=subtest["value"],
)
-@pytest.mark.parametrize('PERFHERDER_ENABLE_MULTIDATA_INGESTION', [True, False])
+@pytest.mark.parametrize("PERFHERDER_ENABLE_MULTIDATA_INGESTION", [True, False])
def test_hash_remains_unchanged_for_multi_data_ingestion_workflow(
PERFHERDER_ENABLE_MULTIDATA_INGESTION,
test_repository,
@@ -417,7 +417,7 @@ def test_hash_remains_unchanged_for_multi_data_ingestion_workflow(
@pytest.mark.parametrize(
- 'PERFHERDER_ENABLE_MULTIDATA_INGESTION, operator_', [(True, operator.eq), (False, operator.ne)]
+ "PERFHERDER_ENABLE_MULTIDATA_INGESTION, operator_", [(True, operator.eq), (False, operator.ne)]
)
def test_timestamp_can_be_updated_for_multi_data_ingestion_workflow(
PERFHERDER_ENABLE_MULTIDATA_INGESTION,
@@ -435,9 +435,9 @@ def test_timestamp_can_be_updated_for_multi_data_ingestion_workflow(
_, submit_datum = _prepare_test_data(artifact)
store_performance_artifact(perf_job, submit_datum)
- signature = PerformanceSignature.objects.get(suite='youtube-watch', test='fcp')
+ signature = PerformanceSignature.objects.get(suite="youtube-watch", test="fcp")
last_artifact = sibling_perf_artifacts[-1]
- last_push_timestamp = datetime.datetime.fromtimestamp(last_artifact['blob']['pushTimestamp'])
+ last_push_timestamp = datetime.datetime.fromtimestamp(last_artifact["blob"]["pushTimestamp"])
assert operator_(signature.last_updated, last_push_timestamp)
@@ -452,8 +452,8 @@ def test_multi_commit_data_is_removed_by_dedicated_management_script(
settings,
):
settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = True
- sibling_perf_artifacts[0]['blob'].pop(
- 'pushTimestamp'
+ sibling_perf_artifacts[0]["blob"].pop(
+ "pushTimestamp"
) # assume 1st PERFORMANCE_DATA is ingested in the old way
# ingest all perf_data
@@ -469,7 +469,7 @@ def test_multi_commit_data_is_removed_by_dedicated_management_script(
== (len(sibling_perf_artifacts) - 1) * DATA_PER_ARTIFACT
)
- call_command('remove_multi_commit_data')
+ call_command("remove_multi_commit_data")
assert MultiCommitDatum.objects.all().count() == 0
assert (
PerformanceDatum.objects.all().count() == DATA_PER_ARTIFACT
diff --git a/tests/etl/test_perf_schema.py b/tests/etl/test_perf_schema.py
index 8827fe9a460..6ef85847aa1 100644
--- a/tests/etl/test_perf_schema.py
+++ b/tests/etl/test_perf_schema.py
@@ -5,41 +5,41 @@
@pytest.mark.parametrize(
- ('suite_value', 'test_value', 'expected_fail'),
+ ("suite_value", "test_value", "expected_fail"),
[
({}, {}, True),
- ({'value': 1234}, {}, True),
- ({}, {'value': 1234}, False),
- ({'value': 1234}, {'value': 1234}, False),
- ({'value': float('inf')}, {}, True),
- ({}, {'value': float('inf')}, True),
+ ({"value": 1234}, {}, True),
+ ({}, {"value": 1234}, False),
+ ({"value": 1234}, {"value": 1234}, False),
+ ({"value": float("inf")}, {}, True),
+ ({}, {"value": float("inf")}, True),
(
{
- 'value': 1234,
- 'extraOptions': [
+ "value": 1234,
+ "extraOptions": [
# has >45 characters
[
- 'android-api-53211-with-google-play-services-and-some-random-other-extra-information'
+ "android-api-53211-with-google-play-services-and-some-random-other-extra-information"
]
],
},
- {'value': 1234},
+ {"value": 1234},
True,
),
(
- {'value': 1234, 'extraOptions': ['1', '2', '3', '4', '5', '6', '7', '8', '9']},
- {'value': 1234},
+ {"value": 1234, "extraOptions": ["1", "2", "3", "4", "5", "6", "7", "8", "9"]},
+ {"value": 1234},
True,
),
(
- {'value': 1234, 'extraOptions': ['1', '2', '3', '4', '5', '6', '7', '8']},
- {'value': 1234},
+ {"value": 1234, "extraOptions": ["1", "2", "3", "4", "5", "6", "7", "8"]},
+ {"value": 1234},
False,
),
],
)
def test_perf_schema(suite_value, test_value, expected_fail):
- with open('schemas/performance-artifact.json') as f:
+ with open("schemas/performance-artifact.json") as f:
perf_schema = json.load(f)
datum = {
@@ -51,8 +51,8 @@ def test_perf_schema(suite_value, test_value, expected_fail):
}
],
}
- datum['suites'][0].update(suite_value)
- datum['suites'][0]['subtests'][0].update(test_value)
+ datum["suites"][0].update(suite_value)
+ datum["suites"][0]["subtests"][0].update(test_value)
print(datum)
if expected_fail:
with pytest.raises(jsonschema.ValidationError):
diff --git a/tests/etl/test_push_loader.py b/tests/etl/test_push_loader.py
index dd959977c72..a6ce96df0ab 100644
--- a/tests/etl/test_push_loader.py
+++ b/tests/etl/test_push_loader.py
@@ -56,7 +56,7 @@ def mock_github_pr_commits(activate_responses):
"https://api.github.com/repos/mozilla/test_treeherder/pulls/1692/commits",
body=mocked_content,
status=200,
- content_type='application/json',
+ content_type="application/json",
)
@@ -74,7 +74,7 @@ def mock_github_push_compare(activate_responses):
"5fdb785b28b356f50fc1d9cb180d401bb03fc1f1",
json=mocked_content[0],
status=200,
- content_type='application/json',
+ content_type="application/json",
)
responses.add(
responses.GET,
@@ -83,7 +83,7 @@ def mock_github_push_compare(activate_responses):
"ad9bfc2a62b70b9f3dbb1c3a5969f30bacce3d74",
json=mocked_content[1],
status=200,
- content_type='application/json',
+ content_type="application/json",
)
@@ -98,7 +98,7 @@ def mock_hg_push_commits(activate_responses):
"https://hg.mozilla.org/try/json-pushes",
body=mocked_content,
status=200,
- content_type='application/json',
+ content_type="application/json",
)
diff --git a/tests/etl/test_pushlog.py b/tests/etl/test_pushlog.py
index ce95ab82beb..8da2f658d8f 100644
--- a/tests/etl/test_pushlog.py
+++ b/tests/etl/test_pushlog.py
@@ -11,7 +11,7 @@
def test_ingest_hg_pushlog(test_repository, test_base_dir, activate_responses):
"""ingesting a number of pushes should populate push and revisions"""
- pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
+ pushlog_path = os.path.join(test_base_dir, "sample_data", "hg_pushlog.json")
with open(pushlog_path) as f:
pushlog_content = f.read()
pushlog_fake_url = "http://www.thisismypushlog.com"
@@ -20,7 +20,7 @@ def test_ingest_hg_pushlog(test_repository, test_base_dir, activate_responses):
pushlog_fake_url,
body=pushlog_content,
status=200,
- content_type='application/json',
+ content_type="application/json",
)
process = HgPushlogProcess()
@@ -37,10 +37,10 @@ def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir, activa
all the pushes in the request,
e.g. trying to store [A,B] with A already stored, B will be stored"""
- pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
+ pushlog_path = os.path.join(test_base_dir, "sample_data", "hg_pushlog.json")
with open(pushlog_path) as f:
pushlog_json = json.load(f)
- pushes = list(pushlog_json['pushes'].values())
+ pushes = list(pushlog_json["pushes"].values())
first_push, second_push = pushes[0:2]
pushlog_fake_url = "http://www.thisismypushlog.com/?full=1&version=2"
@@ -52,7 +52,7 @@ def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir, activa
pushlog_fake_url,
body=first_push_json,
status=200,
- content_type='application/json',
+ content_type="application/json",
)
process = HgPushlogProcess()
@@ -70,7 +70,7 @@ def test_ingest_hg_pushlog_already_stored(test_repository, test_base_dir, activa
pushlog_fake_url + "&startID=1",
body=first_and_second_push_json,
status=200,
- content_type='application/json',
+ content_type="application/json",
)
process = HgPushlogProcess()
@@ -85,7 +85,7 @@ def test_ingest_hg_pushlog_cache_last_push(test_repository, test_base_dir, activ
ingesting a number of pushes should cache the top revision of the last push
"""
- pushlog_path = os.path.join(test_base_dir, 'sample_data', 'hg_pushlog.json')
+ pushlog_path = os.path.join(test_base_dir, "sample_data", "hg_pushlog.json")
with open(pushlog_path) as f:
pushlog_content = f.read()
pushlog_fake_url = "http://www.thisismypushlog.com"
@@ -94,14 +94,14 @@ def test_ingest_hg_pushlog_cache_last_push(test_repository, test_base_dir, activ
pushlog_fake_url,
body=pushlog_content,
status=200,
- content_type='application/json',
+ content_type="application/json",
)
process = HgPushlogProcess()
process.run(pushlog_fake_url, test_repository.name)
pushlog_dict = json.loads(pushlog_content)
- pushes = pushlog_dict['pushes']
+ pushes = pushlog_dict["pushes"]
max_push_id = max(int(k) for k in pushes.keys())
cache_key = "{}:last_push_id".format(test_repository.name)
@@ -123,7 +123,7 @@ def test_empty_json_pushes(test_repository, test_base_dir, activate_responses):
pushlog_fake_url,
body=empty_push_json,
status=200,
- content_type='application/json',
+ content_type="application/json",
)
process = HgPushlogProcess()
diff --git a/tests/etl/test_runnable_jobs.py b/tests/etl/test_runnable_jobs.py
index 3e699169819..f474b8fb899 100644
--- a/tests/etl/test_runnable_jobs.py
+++ b/tests/etl/test_runnable_jobs.py
@@ -6,30 +6,30 @@
_taskcluster_runnable_jobs,
)
-TASK_ID = 'AFq3FRt4TyiTwIN7fUqOQg'
-CONTENT1 = {'taskId': TASK_ID}
+TASK_ID = "AFq3FRt4TyiTwIN7fUqOQg"
+CONTENT1 = {"taskId": TASK_ID}
RUNNABLE_JOBS_URL = RUNNABLE_JOBS_URL.format(task_id=TASK_ID, run_number=0)
-JOB_NAME = 'job name'
+JOB_NAME = "job name"
API_RETURN = {
- 'build_platform': 'plaform name',
- 'build_system_type': 'taskcluster',
- 'job_group_name': 'Group Name',
- 'job_group_symbol': 'GRP',
- 'job_type_name': JOB_NAME,
- 'job_type_symbol': 'sym',
- 'platform': 'plaform name',
- 'platform_option': 'opt',
- 'ref_data_name': JOB_NAME,
- 'state': 'runnable',
- 'result': 'runnable',
+ "build_platform": "plaform name",
+ "build_system_type": "taskcluster",
+ "job_group_name": "Group Name",
+ "job_group_symbol": "GRP",
+ "job_type_name": JOB_NAME,
+ "job_type_symbol": "sym",
+ "platform": "plaform name",
+ "platform_option": "opt",
+ "ref_data_name": JOB_NAME,
+ "state": "runnable",
+ "result": "runnable",
}
RUNNABLE_JOBS_CONTENTS = {
JOB_NAME: {
- 'collection': {'opt': True},
- 'groupName': API_RETURN['job_group_name'],
- 'groupSymbol': API_RETURN['job_group_symbol'],
- 'platform': API_RETURN['platform'],
- 'symbol': API_RETURN['job_type_symbol'],
+ "collection": {"opt": True},
+ "groupName": API_RETURN["job_group_name"],
+ "groupSymbol": API_RETURN["job_group_symbol"],
+ "platform": API_RETURN["platform"],
+ "symbol": API_RETURN["job_type_symbol"],
}
}
diff --git a/tests/etl/test_text.py b/tests/etl/test_text.py
index 8b65df4cde8..9046ae12c6d 100644
--- a/tests/etl/test_text.py
+++ b/tests/etl/test_text.py
@@ -4,9 +4,9 @@
def test_filter_re_matching():
points = [
- u"\U00010045",
- u"\U00010053",
- u"\U00010054",
+ "\U00010045",
+ "\U00010053",
+ "\U00010054",
]
for point in points:
assert bool(filter_re.match(point)) is True
@@ -14,21 +14,21 @@ def test_filter_re_matching():
def test_filter_not_matching():
points = [
- u"\U00000045",
- u"\U00000053",
- u"\U00000054",
+ "\U00000045",
+ "\U00000053",
+ "\U00000054",
]
for point in points:
assert bool(filter_re.match(point)) is False
def test_astra_filter_emoji():
- output = astral_filter(u'🍆')
- expected = ''
+ output = astral_filter("🍆")
+ expected = ""
assert output == expected
def test_astra_filter_hex_value():
"""check the expected outcome is also not changed"""
- hex_values = '\U00000048\U00000049'
+ hex_values = "\U00000048\U00000049"
assert hex_values == astral_filter(hex_values)
diff --git a/tests/intermittents_commenter/test_commenter.py b/tests/intermittents_commenter/test_commenter.py
index 4f367327369..c32a0e2d60c 100644
--- a/tests/intermittents_commenter/test_commenter.py
+++ b/tests/intermittents_commenter/test_commenter.py
@@ -6,39 +6,39 @@
@responses.activate
def test_intermittents_commenter(bug_data):
- startday = '2012-05-09'
- endday = '2018-05-10'
+ startday = "2012-05-09"
+ endday = "2018-05-10"
alt_startday = startday
alt_endday = endday
process = Commenter(weekly_mode=True, dry_run=True)
- params = {'include_fields': 'product%2C+component%2C+priority%2C+whiteboard%2C+id'}
- url = '{}/rest/bug?id={}&include_fields={}'.format(
- settings.BZ_API_URL, bug_data['bug_id'], params['include_fields']
+ params = {"include_fields": "product%2C+component%2C+priority%2C+whiteboard%2C+id"}
+ url = "{}/rest/bug?id={}&include_fields={}".format(
+ settings.BZ_API_URL, bug_data["bug_id"], params["include_fields"]
)
content = {
"bugs": [
{
- u"component": u"General",
- u"priority": u"P3",
- u"product": u"Testing",
- u"whiteboard": u"[stockwell infra] [see summary at comment 92]",
- u"id": bug_data['bug_id'],
+ "component": "General",
+ "priority": "P3",
+ "product": "Testing",
+ "whiteboard": "[stockwell infra] [see summary at comment 92]",
+ "id": bug_data["bug_id"],
}
],
"faults": [],
}
- responses.add(responses.Response(method='GET', url=url, json=content, status=200))
+ responses.add(responses.Response(method="GET", url=url, json=content, status=200))
- resp = process.fetch_bug_details(bug_data['bug_id'])
- assert resp == content['bugs']
+ resp = process.fetch_bug_details(bug_data["bug_id"])
+ assert resp == content["bugs"]
comment_params = process.generate_bug_changes(startday, endday, alt_startday, alt_endday)
- with open('tests/intermittents_commenter/expected_comment.text', 'r') as comment:
+ with open("tests/intermittents_commenter/expected_comment.text", "r") as comment:
expected_comment = comment.read()
print(len(expected_comment))
- print(len(comment_params[0]['changes']['comment']['body']))
- assert comment_params[0]['changes']['comment']['body'] == expected_comment
+ print(len(comment_params[0]["changes"]["comment"]["body"]))
+ assert comment_params[0]["changes"]["comment"]["body"] == expected_comment
diff --git a/tests/log_parser/test_artifact_builder_collection.py b/tests/log_parser/test_artifact_builder_collection.py
index 92d5e490839..b57fdea2d06 100644
--- a/tests/log_parser/test_artifact_builder_collection.py
+++ b/tests/log_parser/test_artifact_builder_collection.py
@@ -57,14 +57,14 @@ def test_all_builders_complete():
@responses.activate
def test_log_download_size_limit():
"""Test that logs whose Content-Length exceed the size limit are not parsed."""
- url = 'http://foo.tld/fake_large_log.tar.gz'
+ url = "http://foo.tld/fake_large_log.tar.gz"
responses.add(
responses.GET,
url,
- body='',
+ body="",
adding_headers={
- 'Content-Encoding': 'gzip',
- 'Content-Length': str(MAX_DOWNLOAD_SIZE_IN_BYTES + 1),
+ "Content-Encoding": "gzip",
+ "Content-Length": str(MAX_DOWNLOAD_SIZE_IN_BYTES + 1),
},
)
lpc = ArtifactBuilderCollection(url)
diff --git a/tests/log_parser/test_error_parser.py b/tests/log_parser/test_error_parser.py
index 3c77e75b96b..7d307c7a28b 100644
--- a/tests/log_parser/test_error_parser.py
+++ b/tests/log_parser/test_error_parser.py
@@ -120,7 +120,7 @@ def test_error_lines_matched(line):
def test_error_lines_taskcluster(line):
parser = ErrorParser()
# Make the log parser think this is a TaskCluster log.
- parser.parse_line('[taskcluster foo] this is a taskcluster log', 1)
+ parser.parse_line("[taskcluster foo] this is a taskcluster log", 1)
assert parser.is_taskcluster
parser.parse_line(line, 2)
assert len(parser.artifact) == 1
@@ -155,4 +155,4 @@ def test_taskcluster_strip_prefix():
# TC prefix is stripped.
parser.parse_line("[vcs 2016-09-07T19:03:02.188327Z] 23:57:52 ERROR - Return code: 1", 3)
assert len(parser.artifact) == 1
- assert parser.artifact[0]['linenumber'] == 3
+ assert parser.artifact[0]["linenumber"] == 3
diff --git a/tests/log_parser/test_performance_artifact_builder.py b/tests/log_parser/test_performance_artifact_builder.py
index 5981f4620c7..9d31f0167a1 100644
--- a/tests/log_parser/test_performance_artifact_builder.py
+++ b/tests/log_parser/test_performance_artifact_builder.py
@@ -15,9 +15,9 @@ def test_performance_log_parsing():
# first two have only one artifact, second has two artifacts
for logfile, num_perf_artifacts in [
- ('mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz', 1),
- ('try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz', 1),
- ('mozilla-inbound-linux64-bm72-build1-build225.txt.gz', 2),
+ ("mozilla-inbound-android-api-11-debug-bm91-build1-build1317.txt.gz", 1),
+ ("try_ubuntu64_hw_test-chromez-bm103-tests1-linux-build1429.txt.gz", 1),
+ ("mozilla-inbound-linux64-bm72-build1-build225.txt.gz", 2),
]:
url = add_log_response(logfile)
@@ -25,6 +25,6 @@ def test_performance_log_parsing():
lpc = ArtifactBuilderCollection(url, builders=[builder])
lpc.parse()
act = lpc.artifacts[builder.name]
- assert len(act['performance_data']) == num_perf_artifacts
- for perfherder_artifact in act['performance_data']:
+ assert len(act["performance_data"]) == num_perf_artifacts
+ for perfherder_artifact in act["performance_data"]:
validate(perfherder_artifact, PERFHERDER_SCHEMA)
diff --git a/tests/log_parser/test_performance_parser.py b/tests/log_parser/test_performance_parser.py
index afb570ee1b4..34944a36789 100644
--- a/tests/log_parser/test_performance_parser.py
+++ b/tests/log_parser/test_performance_parser.py
@@ -27,6 +27,6 @@ def test_performance_log_parsing_malformed_perfherder_data():
}
],
}
- parser.parse_line('PERFHERDER_DATA: {}'.format(json.dumps(valid_perfherder_data)), 3)
+ parser.parse_line("PERFHERDER_DATA: {}".format(json.dumps(valid_perfherder_data)), 3)
assert parser.get_artifact() == [valid_perfherder_data]
diff --git a/tests/log_parser/test_store_failure_lines.py b/tests/log_parser/test_store_failure_lines.py
index b25bdd0052e..6fdb8a2a75f 100644
--- a/tests/log_parser/test_store_failure_lines.py
+++ b/tests/log_parser/test_store_failure_lines.py
@@ -17,7 +17,7 @@
def test_store_error_summary(activate_responses, test_repository, test_job):
log_path = SampleData().get_log_path("plain-chunked_errorsummary.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
with open(log_path) as log_handler:
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
@@ -37,7 +37,7 @@ def test_store_error_summary(activate_responses, test_repository, test_job):
def test_store_error_summary_default_group(activate_responses, test_repository, test_job):
log_path = SampleData().get_log_path("plain-chunked_errorsummary.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
with open(log_path) as log_handler:
resp_body = json.load(log_handler)
@@ -54,9 +54,9 @@ def test_store_error_summary_default_group(activate_responses, test_repository,
def test_store_error_summary_truncated(activate_responses, test_repository, test_job, monkeypatch):
log_path = SampleData().get_log_path("plain-chunked_errorsummary_10_lines.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
- monkeypatch.setattr(settings, 'FAILURE_LINES_CUTOFF', 5)
+ monkeypatch.setattr(settings, "FAILURE_LINES_CUTOFF", 5)
with open(log_path) as log_handler:
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
@@ -67,7 +67,7 @@ def test_store_error_summary_truncated(activate_responses, test_repository, test
assert FailureLine.objects.count() == 5 + 1
- failure = FailureLine.objects.get(action='truncated')
+ failure = FailureLine.objects.get(action="truncated")
assert failure.job_guid == test_job.guid
@@ -76,9 +76,9 @@ def test_store_error_summary_truncated(activate_responses, test_repository, test
def test_store_error_summary_astral(activate_responses, test_repository, test_job):
log_path = SampleData().get_log_path("plain-chunked_errorsummary_astral.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
- with open(log_path, encoding='utf8') as log_handler:
+ with open(log_path, encoding="utf8") as log_handler:
responses.add(
responses.GET,
log_url,
@@ -100,7 +100,7 @@ def test_store_error_summary_astral(activate_responses, test_repository, test_jo
assert failure.repository == test_repository
# Specific unicode chars cannot be inserted as MySQL pseudo-UTF8 and are replaced by a plain text representation
- if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
+ if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
assert (
failure.test
== "toolkit/content/tests/widgets/test_videocontrols_video_direction.html "
@@ -122,7 +122,7 @@ def test_store_error_summary_astral(activate_responses, test_repository, test_jo
def test_store_error_summary_404(activate_responses, test_repository, test_job):
log_path = SampleData().get_log_path("plain-chunked_errorsummary.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
with open(log_path) as log_handler:
responses.add(responses.GET, log_url, body=log_handler.read(), status=404)
@@ -137,7 +137,7 @@ def test_store_error_summary_404(activate_responses, test_repository, test_job):
def test_store_error_summary_500(activate_responses, test_repository, test_job):
log_path = SampleData().get_log_path("plain-chunked_errorsummary.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
with open(log_path) as log_handler:
responses.add(responses.GET, log_url, body=log_handler.read(), status=500)
@@ -152,7 +152,7 @@ def test_store_error_summary_500(activate_responses, test_repository, test_job):
def test_store_error_summary_duplicate(activate_responses, test_repository, test_job):
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
log_obj = JobLog.objects.create(job=test_job, name="errorsummary_json", url=log_url)
write_failure_lines(
@@ -171,7 +171,7 @@ def test_store_error_summary_duplicate(activate_responses, test_repository, test
def test_store_error_summary_group_status(activate_responses, test_repository, test_job):
log_path = SampleData().get_log_path("mochitest-browser-chrome_errorsummary.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
with open(log_path) as log_handler:
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
@@ -195,7 +195,7 @@ def test_store_error_summary_group_status(activate_responses, test_repository, t
def test_group_status_duration(activate_responses, test_repository, test_job):
log_path = SampleData().get_log_path("mochitest-browser-chrome_errorsummary.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
with open(log_path) as log_handler:
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
@@ -215,7 +215,7 @@ def test_group_status_duration(activate_responses, test_repository, test_job):
def test_get_group_results(activate_responses, test_repository, test_job):
log_path = SampleData().get_log_path("mochitest-browser-chrome_errorsummary.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
with open(log_path) as log_handler:
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
@@ -224,14 +224,14 @@ def test_get_group_results(activate_responses, test_repository, test_job):
store_failure_lines(log_obj)
groups = get_group_results(test_job.push)
- task_groups = groups['V3SVuxO8TFy37En_6HcXLs']
+ task_groups = groups["V3SVuxO8TFy37En_6HcXLs"]
- assert task_groups['dom/base/test/browser.ini']
+ assert task_groups["dom/base/test/browser.ini"]
def test_get_group_results_with_colon(activate_responses, test_repository, test_job):
log_path = SampleData().get_log_path("xpcshell-errorsummary-with-colon.log")
- log_url = 'http://my-log.mozilla.org'
+ log_url = "http://my-log.mozilla.org"
with open(log_path) as log_handler:
responses.add(responses.GET, log_url, body=log_handler.read(), status=200)
@@ -240,12 +240,12 @@ def test_get_group_results_with_colon(activate_responses, test_repository, test_
store_failure_lines(log_obj)
groups = get_group_results(test_job.push)
- task_groups = groups['V3SVuxO8TFy37En_6HcXLs']
+ task_groups = groups["V3SVuxO8TFy37En_6HcXLs"]
assert task_groups[
- 'toolkit/components/extensions/test/xpcshell/xpcshell-e10s.ini:toolkit/components/extensions/test/xpcshell/xpcshell-content.ini'
+ "toolkit/components/extensions/test/xpcshell/xpcshell-e10s.ini:toolkit/components/extensions/test/xpcshell/xpcshell-content.ini"
]
- assert task_groups['toolkit/components/places/tests/unit/xpcshell.ini']
+ assert task_groups["toolkit/components/places/tests/unit/xpcshell.ini"]
assert task_groups[
- 'toolkit/components/extensions/test/xpcshell/xpcshell-e10s.ini:toolkit/components/extensions/test/xpcshell/xpcshell-common-e10s.ini'
+ "toolkit/components/extensions/test/xpcshell/xpcshell-e10s.ini:toolkit/components/extensions/test/xpcshell/xpcshell-common-e10s.ini"
]
diff --git a/tests/log_parser/test_tasks.py b/tests/log_parser/test_tasks.py
index bc4061d57f8..ffb6a77b306 100644
--- a/tests/log_parser/test_tasks.py
+++ b/tests/log_parser/test_tasks.py
@@ -19,7 +19,7 @@ def jobs_with_local_log(activate_responses):
job = sample_data.job_data[0]
# substitute the log url with a local url
- job['job']['log_references'][0]['url'] = url
+ job["job"]["log_references"][0]["url"] = url
return [job]
@@ -35,8 +35,8 @@ def test_create_error_summary(
jobs = jobs_with_local_log
for job in jobs:
- job['job']['result'] = "testfailed"
- job['revision'] = sample_push[0]['revision']
+ job["job"]["result"] = "testfailed"
+ job["revision"] = sample_push[0]["revision"]
store_job_data(test_repository, jobs)
diff --git a/tests/log_parser/test_utils.py b/tests/log_parser/test_utils.py
index f81b12d843d..dffd2a7a8c3 100644
--- a/tests/log_parser/test_utils.py
+++ b/tests/log_parser/test_utils.py
@@ -14,58 +14,58 @@
)
LENGTH_OK = {
- 'framework': {},
- 'suites': [
+ "framework": {},
+ "suites": [
{
- 'extraOptions': [
- '.' * 45,
- '.' * 100,
+ "extraOptions": [
+ "." * 45,
+ "." * 100,
],
- 'name': 'testing',
- 'subtests': [],
+ "name": "testing",
+ "subtests": [],
}
]
* 3,
}
LONGER_THAN_ALL_MAX = {
- 'framework': {},
- 'suites': [
+ "framework": {},
+ "suites": [
{
- 'extraOptions': [
- '.' * 46,
- '.' * 101,
+ "extraOptions": [
+ "." * 46,
+ "." * 101,
],
- 'name': 'testing',
- 'subtests': [],
+ "name": "testing",
+ "subtests": [],
}
],
}
LONGER_THAN_BIGGER_MAX = {
- 'framework': {},
- 'suites': [
+ "framework": {},
+ "suites": [
{
- 'extraOptions': [
- '.' * 45,
- '.' * 101,
+ "extraOptions": [
+ "." * 45,
+ "." * 101,
],
- 'name': 'testing',
- 'subtests': [],
+ "name": "testing",
+ "subtests": [],
}
],
}
LONGER_THAN_SMALLER_MAX = {
- 'framework': {},
- 'suites': [
+ "framework": {},
+ "suites": [
{
- 'extraOptions': [
- '.' * 46,
- '.' * 100,
+ "extraOptions": [
+ "." * 46,
+ "." * 100,
],
- 'name': 'testing',
- 'subtests': [],
+ "name": "testing",
+ "subtests": [],
}
]
* 3,
@@ -77,7 +77,7 @@ def test_smaller_than_bigger():
def test_extra_option_max_length():
- with open(os.path.join('schemas', 'performance-artifact.json')) as f:
+ with open(os.path.join("schemas", "performance-artifact.json")) as f:
PERFHERDER_SCHEMA = json.load(f)
assert 100 == _lookup_extra_options_max(PERFHERDER_SCHEMA)
@@ -90,7 +90,7 @@ def test_validate_perf_schema_no_exception():
@pytest.mark.parametrize(
- 'data', (LONGER_THAN_ALL_MAX, LONGER_THAN_BIGGER_MAX, LONGER_THAN_SMALLER_MAX)
+ "data", (LONGER_THAN_ALL_MAX, LONGER_THAN_BIGGER_MAX, LONGER_THAN_SMALLER_MAX)
)
def test_validate_perf_schema(data):
for datum in data:
diff --git a/tests/model/cycle_data/test_perfherder_cycling.py b/tests/model/cycle_data/test_perfherder_cycling.py
index dfd35eb50bd..23f5fbfb25c 100644
--- a/tests/model/cycle_data/test_perfherder_cycling.py
+++ b/tests/model/cycle_data/test_perfherder_cycling.py
@@ -36,11 +36,11 @@ def empty_backfill_report(test_perf_alert_summary) -> BackfillReport:
@pytest.mark.parametrize(
- 'repository_name',
+ "repository_name",
[
- 'autoland',
- 'mozilla-beta',
- 'mozilla-central',
+ "autoland",
+ "mozilla-beta",
+ "mozilla-central",
],
)
def test_cycle_performance_data(
@@ -57,13 +57,13 @@ def test_cycle_performance_data(
expired_timestamp = datetime.now() - timedelta(days=400)
test_perf_signature_2 = PerformanceSignature.objects.create(
- signature_hash='b' * 40,
+ signature_hash="b" * 40,
repository=test_perf_signature.repository,
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
suite=test_perf_signature.suite,
- test='test 2',
+ test="test 2",
last_updated=expired_timestamp,
has_subtests=False,
)
@@ -100,12 +100,12 @@ def test_cycle_performance_data(
command = filter(
lambda arg: arg is not None,
- ['cycle_data', 'from:perfherder'],
+ ["cycle_data", "from:perfherder"],
)
call_command(*list(command)) # test repository isn't a main one
- assert list(PerformanceDatum.objects.values_list('id', flat=True)) == [1]
- assert list(PerformanceSignature.objects.values_list('id', flat=True)) == [
+ assert list(PerformanceDatum.objects.values_list("id", flat=True)) == [1]
+ assert list(PerformanceSignature.objects.values_list("id", flat=True)) == [
test_perf_signature.id
]
@@ -115,36 +115,36 @@ def test_performance_signatures_are_deleted(test_perf_signature, taskcluster_not
expired_timestamp = cycler.max_timestamp
perf_signature_to_delete = PerformanceSignature.objects.create(
- signature_hash='b' * 40,
+ signature_hash="b" * 40,
repository=test_perf_signature.repository,
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
suite=test_perf_signature.suite,
- test='test_perf_signature_to_delete',
+ test="test_perf_signature_to_delete",
last_updated=expired_timestamp,
has_subtests=False,
)
perf_signature_to_keep = PerformanceSignature.objects.create(
- signature_hash='h' * 40,
+ signature_hash="h" * 40,
repository=test_perf_signature.repository,
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
suite=test_perf_signature.suite,
- test='test_perf_signature_to_keep',
+ test="test_perf_signature_to_keep",
last_updated=datetime.now(),
has_subtests=False,
)
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert perf_signature_to_keep.id in list(
- PerformanceSignature.objects.values_list('id', flat=True)
+ PerformanceSignature.objects.values_list("id", flat=True)
)
assert perf_signature_to_delete.id not in list(
- PerformanceSignature.objects.values_list('id', flat=True)
+ PerformanceSignature.objects.values_list("id", flat=True)
)
@@ -160,7 +160,7 @@ def test_try_data_removal(
test_perf_signature.repository = try_repository
test_perf_signature.save()
- try_pushes = list(Push.objects.filter(repository=try_repository).order_by('id').all())
+ try_pushes = list(Push.objects.filter(repository=try_repository).order_by("id").all())
for idx, push in enumerate(try_pushes[:-2]):
push_timestamp = datetime.now()
@@ -191,7 +191,7 @@ def test_try_data_removal(
total_initial_data = PerformanceDatum.objects.count()
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert PerformanceDatum.objects.count() == total_initial_data - total_removals
assert not PerformanceDatum.objects.filter(
push_timestamp__lt=datetime.now() - timedelta(weeks=6),
@@ -203,8 +203,8 @@ def test_try_data_removal(
@pytest.mark.parametrize(
- 'repository_name',
- ['autoland', 'mozilla-beta', 'fenix', 'reference-browser'],
+ "repository_name",
+ ["autoland", "mozilla-beta", "fenix", "reference-browser"],
)
def test_irrelevant_repos_data_removal(
test_repository,
@@ -262,7 +262,7 @@ def test_irrelevant_repos_data_removal(
total_initial_data = PerformanceDatum.objects.count()
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert PerformanceDatum.objects.count() == total_initial_data - 1
assert PerformanceDatum.objects.filter(repository=relevant_repository).exists()
assert not PerformanceDatum.objects.filter(
@@ -285,14 +285,14 @@ def test_signature_remover(
assert len(PerformanceSignature.objects.all()) == 2
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert taskcluster_notify_mock.email.call_count == 1
assert len(PerformanceSignature.objects.all()) == 1
assert PerformanceSignature.objects.first() == test_perf_signature
-@pytest.mark.parametrize('total_signatures', [3, 4, 8, 10])
+@pytest.mark.parametrize("total_signatures", [3, 4, 8, 10])
def test_total_emails_sent(
test_perf_signature, try_repository, total_signatures, mock_tc_prod_notify_credentials
):
@@ -311,13 +311,13 @@ def test_total_emails_sent(
for n in range(0, total_signatures):
PerformanceSignature.objects.create(
repository=test_perf_signature.repository,
- signature_hash=(20 * ('t%s' % n)),
+ signature_hash=(20 * ("t%s" % n)),
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
- suite='mysuite%s' % n,
- test='mytest%s' % n,
- application='firefox',
+ suite="mysuite%s" % n,
+ test="mytest%s" % n,
+ application="firefox",
has_subtests=test_perf_signature.has_subtests,
extra_options=test_perf_signature.extra_options,
last_updated=datetime.now(),
@@ -326,13 +326,13 @@ def test_total_emails_sent(
for n in range(0, 10):
PerformanceSignature.objects.create(
repository=try_repository,
- signature_hash=(20 * ('e%s' % n)),
+ signature_hash=(20 * ("e%s" % n)),
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
- suite='mysuite%s' % n,
- test='mytest%s' % n,
- application='firefox',
+ suite="mysuite%s" % n,
+ test="mytest%s" % n,
+ application="firefox",
has_subtests=test_perf_signature.has_subtests,
extra_options=test_perf_signature.extra_options,
last_updated=datetime.now(),
@@ -348,7 +348,7 @@ def test_total_emails_sent(
signatures_remover.remove_in_chunks(signatures)
assert notify_client_mock.email.call_count == expected_call_count
- assert not PerformanceSignature.objects.filter(repository__name='try').exists()
+ assert not PerformanceSignature.objects.filter(repository__name="try").exists()
def test_remove_try_signatures_without_data(
@@ -367,13 +367,13 @@ def test_remove_try_signatures_without_data(
)
signature_with_perf_data = PerformanceSignature.objects.create(
repository=try_repository,
- signature_hash=(20 * 'e1'),
+ signature_hash=(20 * "e1"),
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
- suite='mysuite',
- test='mytest',
- application='firefox',
+ suite="mysuite",
+ test="mytest",
+ application="firefox",
has_subtests=test_perf_signature.has_subtests,
extra_options=test_perf_signature.extra_options,
last_updated=datetime.now(),
@@ -414,7 +414,7 @@ def test_performance_cycler_quit_indicator(taskcluster_notify_mock):
max_runtime.started_at = two_seconds_ago
max_runtime.quit_on_timeout()
except MaxRuntimeExceeded:
- pytest.fail('Performance cycling shouldn\'t have timed out')
+ pytest.fail("Performance cycling shouldn't have timed out")
@pytest.fixture
@@ -432,7 +432,7 @@ def empty_alert_summary(
@pytest.mark.parametrize(
- 'expired_time',
+ "expired_time",
[
datetime.now() - timedelta(days=365),
datetime.now() - timedelta(days=181),
@@ -449,12 +449,12 @@ def test_summary_without_any_kind_of_alerts_is_deleted(
assert empty_alert_summary.alerts.count() == 0
assert empty_alert_summary.related_alerts.count() == 0
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert not PerformanceAlertSummary.objects.exists()
@pytest.mark.parametrize(
- 'recently',
+ "recently",
[
datetime.now(),
datetime.now() - timedelta(minutes=30),
@@ -472,12 +472,12 @@ def test_summary_without_any_kind_of_alerts_isnt_deleted(
assert empty_alert_summary.alerts.count() == 0
assert empty_alert_summary.related_alerts.count() == 0
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert PerformanceAlertSummary.objects.count() == 1
@pytest.mark.parametrize(
- 'creation_time',
+ "creation_time",
[
# expired
datetime.now() - timedelta(days=365),
@@ -515,7 +515,7 @@ def test_summary_with_alerts_isnt_deleted(
assert empty_alert_summary.alerts.count() == 1
assert empty_alert_summary.related_alerts.count() == 0
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert PerformanceAlertSummary.objects.filter(id=empty_alert_summary.id).exists()
# with both
@@ -526,7 +526,7 @@ def test_summary_with_alerts_isnt_deleted(
assert empty_alert_summary.alerts.count() == 1
assert empty_alert_summary.related_alerts.count() == 1
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert PerformanceAlertSummary.objects.filter(id=empty_alert_summary.id).exists()
# with related_alerts only
@@ -536,7 +536,7 @@ def test_summary_with_alerts_isnt_deleted(
assert empty_alert_summary.alerts.count() == 0
assert empty_alert_summary.related_alerts.count() == 1
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert PerformanceAlertSummary.objects.filter(id=empty_alert_summary.id).exists()
@@ -563,7 +563,7 @@ def test_stalled_data_removal(
last_updated__lt=max_timestamp
)
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert test_perf_signature not in PerformanceSignature.objects.all()
assert test_perf_data not in PerformanceDatum.objects.all()
@@ -573,8 +573,8 @@ def test_stalled_data_removal(
@pytest.mark.parametrize(
- 'nr_months, repository',
- [(8, 'autoland'), (6, 'autoland'), (5, 'mozilla-central')],
+ "nr_months, repository",
+ [(8, "autoland"), (6, "autoland"), (5, "mozilla-central")],
)
def test_equal_distribution_for_historical_data(
test_repository,
@@ -610,7 +610,7 @@ def test_equal_distribution_for_historical_data(
)
perf_data.append(data)
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert PerformanceSignature.objects.filter(id=perf_signature.id).exists()
all_perf_datum = PerformanceDatum.objects.all()
@@ -619,8 +619,8 @@ def test_equal_distribution_for_historical_data(
@pytest.mark.parametrize(
- 'nr_months, repository',
- [(8, 'autoland'), (6, 'autoland'), (5, 'mozilla-central')],
+ "nr_months, repository",
+ [(8, "autoland"), (6, "autoland"), (5, "mozilla-central")],
)
def test_big_density_in_historical_data(
test_repository,
@@ -667,7 +667,7 @@ def test_big_density_in_historical_data(
)
perf_data.append(data)
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert PerformanceSignature.objects.filter(id=perf_signature.id).exists()
all_perf_datum = PerformanceDatum.objects.all()
@@ -676,8 +676,8 @@ def test_big_density_in_historical_data(
@pytest.mark.parametrize(
- 'nr_months, repository',
- [(5, 'autoland'), (8, 'mozilla-central'), (11, 'mozilla-central')],
+ "nr_months, repository",
+ [(5, "autoland"), (8, "mozilla-central"), (11, "mozilla-central")],
)
def test_one_month_worth_of_data_points(
test_repository,
@@ -721,7 +721,7 @@ def test_one_month_worth_of_data_points(
)
perf_data.append(data)
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
stalled_signature.refresh_from_db()
assert PerformanceSignature.objects.filter(id=stalled_signature.id).exists()
@@ -731,8 +731,8 @@ def test_one_month_worth_of_data_points(
@pytest.mark.parametrize(
- 'nr_months, repository',
- [(8, 'autoland'), (6, 'autoland'), (5, 'mozilla-central')],
+ "nr_months, repository",
+ [(8, "autoland"), (6, "autoland"), (5, "mozilla-central")],
)
def test_non_historical_stalled_data_is_removed(
test_repository,
@@ -768,7 +768,7 @@ def test_non_historical_stalled_data_is_removed(
)
perf_data.append(data)
- call_command('cycle_data', 'from:perfherder')
+ call_command("cycle_data", "from:perfherder")
assert not PerformanceSignature.objects.filter(id=perf_signature.id).exists()
all_perf_datum = PerformanceDatum.objects.all()
@@ -783,8 +783,8 @@ def test_try_data_removal_errors_out_on_missing_try_data(try_repository):
_ = try_removal_strategy.target_signatures
-@patch('treeherder.config.settings.SITE_HOSTNAME', 'treeherder-production.com')
-@pytest.mark.parametrize('days', [5, 30, 100, 364])
+@patch("treeherder.config.settings.SITE_HOSTNAME", "treeherder-production.com")
+@pytest.mark.parametrize("days", [5, 30, 100, 364])
def test_explicit_days_validation_on_all_envs(days):
with pytest.raises(ValueError):
_ = PerfherderCycler(10_000, 0, days=days)
@@ -810,12 +810,12 @@ def test_deleting_performance_data_cascades_to_perf_multicomit_data(test_perf_da
try:
cursor = connection.cursor()
- if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
+ if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
cursor.execute(
- '''
+ """
DELETE FROM `performance_datum`
WHERE id = %s
- ''',
+ """,
[perf_datum.id],
)
else:
@@ -837,10 +837,10 @@ def test_deleting_performance_data_cascades_to_perf_datum_replicate(test_perf_da
try:
cursor = connection.cursor()
cursor.execute(
- '''
+ """
DELETE FROM performance_datum
WHERE id = %s
- ''',
+ """,
[perf_datum.id],
)
except IntegrityError:
@@ -870,7 +870,7 @@ def test_empty_backfill_reports_get_removed(empty_backfill_report):
assert BackfillReport.objects.count() == 0
-@pytest.mark.parametrize('days_since_created', [0, 30, 100])
+@pytest.mark.parametrize("days_since_created", [0, 30, 100])
def test_empty_backfill_reports_arent_removed_if_not_enough_time_passed(
empty_backfill_report, days_since_created
):
diff --git a/tests/model/cycle_data/test_treeherder_cycling.py b/tests/model/cycle_data/test_treeherder_cycling.py
index 73027a1f3c3..8fc38ff01d4 100644
--- a/tests/model/cycle_data/test_treeherder_cycling.py
+++ b/tests/model/cycle_data/test_treeherder_cycling.py
@@ -11,13 +11,13 @@
@pytest.mark.parametrize(
- 'days, expected_jobs, expected_failure_lines, expected_job_logs, cmd_args, cmd_kwargs',
+ "days, expected_jobs, expected_failure_lines, expected_job_logs, cmd_args, cmd_kwargs",
[
- (7, 0, 0, 0, ('cycle_data', 'from:treeherder'), {'sleep_time': 0, 'days': 1}),
+ (7, 0, 0, 0, ("cycle_data", "from:treeherder"), {"sleep_time": 0, "days": 1}),
# also check default '--days' param from treeherder
- (119, 20, 2, 22, ('cycle_data',), {'sleep_time': 0}),
- (120, 0, 0, 0, ('cycle_data',), {'sleep_time': 0}),
- (150, 0, 0, 0, ('cycle_data',), {'sleep_time': 0}),
+ (119, 20, 2, 22, ("cycle_data",), {"sleep_time": 0}),
+ (120, 0, 0, 0, ("cycle_data",), {"sleep_time": 0}),
+ (150, 0, 0, 0, ("cycle_data",), {"sleep_time": 0}),
],
)
def test_cycle_all_data(
@@ -75,7 +75,7 @@ def test_cycle_all_but_one_job(
job_not_deleted.save()
extra_objects = {
- 'failure_lines': (
+ "failure_lines": (
FailureLine,
create_failure_lines(
job_not_deleted, [(test_line, {}), (test_line, {"subtest": "subtest2"})]
@@ -91,7 +91,7 @@ def test_cycle_all_but_one_job(
num_job_logs_to_be_deleted = JobLog.objects.all().exclude(job__id=job_not_deleted.id).count()
num_job_logs_before = JobLog.objects.count()
- call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, debug=True, chunk_size=1)
+ call_command("cycle_data", "from:treeherder", sleep_time=0, days=1, debug=True, chunk_size=1)
assert Job.objects.count() == 1
assert JobLog.objects.count() == (num_job_logs_before - num_job_logs_to_be_deleted)
@@ -119,7 +119,7 @@ def test_cycle_all_data_in_chunks(
create_failure_lines(Job.objects.get(id=1), [(test_line, {})] * 7)
- call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, chunk_size=3)
+ call_command("cycle_data", "from:treeherder", sleep_time=0, days=1, chunk_size=3)
# There should be no jobs after cycling
assert Job.objects.count() == 0
@@ -133,17 +133,17 @@ def test_cycle_job_model_reference_data(
test_utils.do_job_ingestion(test_repository, job_data, sample_push, False)
# get a list of ids of original reference data
- original_job_type_ids = JobType.objects.values_list('id', flat=True)
- original_job_group_ids = JobGroup.objects.values_list('id', flat=True)
- original_machine_ids = Machine.objects.values_list('id', flat=True)
+ original_job_type_ids = JobType.objects.values_list("id", flat=True)
+ original_job_group_ids = JobGroup.objects.values_list("id", flat=True)
+ original_machine_ids = Machine.objects.values_list("id", flat=True)
# create a bunch of job model data that should be cycled, since they don't
# reference any current jobs
- jg = JobGroup.objects.create(symbol='moo', name='moo')
- jt = JobType.objects.create(symbol='mu', name='mu')
- m = Machine.objects.create(name='machine_with_no_job')
+ jg = JobGroup.objects.create(symbol="moo", name="moo")
+ jt = JobType.objects.create(symbol="mu", name="mu")
+ m = Machine.objects.create(name="machine_with_no_job")
(jg_id, jt_id, m_id) = (jg.id, jt.id, m.id)
- call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, chunk_size=3)
+ call_command("cycle_data", "from:treeherder", sleep_time=0, days=1, chunk_size=3)
# assert that reference data that should have been cycled, was cycled
assert JobGroup.objects.filter(id=jg_id).count() == 0
@@ -186,7 +186,7 @@ def test_cycle_job_with_performance_data(
value=1.0,
)
- call_command('cycle_data', 'from:treeherder', sleep_time=0, days=1, chunk_size=3)
+ call_command("cycle_data", "from:treeherder", sleep_time=0, days=1, chunk_size=3)
# assert that the job got cycled
assert Job.objects.count() == 0
diff --git a/tests/model/test_bugscache.py b/tests/model/test_bugscache.py
index 534522ff3d4..a184b73365f 100644
--- a/tests/model/test_bugscache.py
+++ b/tests/model/test_bugscache.py
@@ -11,26 +11,26 @@
@pytest.fixture
def sample_bugs(test_base_dir):
- filename = os.path.join(test_base_dir, 'sample_data', 'bug_list.json')
+ filename = os.path.join(test_base_dir, "sample_data", "bug_list.json")
with open(filename) as f:
return json.load(f)
def _update_bugscache(bug_list):
- max_summary_length = Bugscache._meta.get_field('summary').max_length
- max_whiteboard_length = Bugscache._meta.get_field('whiteboard').max_length
+ max_summary_length = Bugscache._meta.get_field("summary").max_length
+ max_whiteboard_length = Bugscache._meta.get_field("whiteboard").max_length
for bug in bug_list:
Bugscache.objects.create(
- id=bug['id'],
- status=bug['status'],
- resolution=bug['resolution'],
- summary=bug['summary'][:max_summary_length],
- dupe_of=bug['dupe_of'],
- crash_signature=bug['cf_crash_signature'],
- keywords=",".join(bug['keywords']),
- modified=bug['last_change_time'],
- whiteboard=bug['whiteboard'][:max_whiteboard_length],
+ id=bug["id"],
+ status=bug["status"],
+ resolution=bug["resolution"],
+ summary=bug["summary"][:max_summary_length],
+ dupe_of=bug["dupe_of"],
+ crash_signature=bug["cf_crash_signature"],
+ keywords=",".join(bug["keywords"]),
+ modified=bug["last_change_time"],
+ whiteboard=bug["whiteboard"][:max_whiteboard_length],
processed_update=True,
)
@@ -47,7 +47,7 @@ def _update_bugscache(bug_list):
[1054456],
),
(
- "[taskcluster:error] Command \" [./test-macosx.sh --no-read-buildbot-config --installer-url=https://q",
+ '[taskcluster:error] Command " [./test-macosx.sh --no-read-buildbot-config --installer-url=https://q',
[100],
),
("should not be match_d", []),
@@ -64,33 +64,33 @@ def _update_bugscache(bug_list):
@pytest.mark.parametrize(("search_term", "exp_bugs"), BUG_SEARCHES)
def test_get_open_recent_bugs(transactional_db, sample_bugs, search_term, exp_bugs):
"""Test that we retrieve the expected open recent bugs for a search term."""
- bug_list = sample_bugs['bugs']
+ bug_list = sample_bugs["bugs"]
# Update the resolution so that all bugs will be placed in
# the open_recent bucket, and none in all_others.
for bug in bug_list:
- bug['resolution'] = ''
- bug['last_change_time'] = fifty_days_ago
+ bug["resolution"] = ""
+ bug["last_change_time"] = fifty_days_ago
_update_bugscache(bug_list)
suggestions = Bugscache.search(search_term)
- open_recent_bugs = [b['id'] for b in suggestions['open_recent']]
+ open_recent_bugs = [b["id"] for b in suggestions["open_recent"]]
assert open_recent_bugs == exp_bugs
- assert suggestions['all_others'] == []
+ assert suggestions["all_others"] == []
@pytest.mark.parametrize(("search_term", "exp_bugs"), BUG_SEARCHES)
def test_get_all_other_bugs(transactional_db, sample_bugs, search_term, exp_bugs):
"""Test that we retrieve the expected old bugs for a search term."""
- bug_list = sample_bugs['bugs']
+ bug_list = sample_bugs["bugs"]
# Update the resolution so that all bugs will be placed in
# the all_others bucket, and none in open_recent.
for bug in bug_list:
- bug['resolution'] = 'FIXED'
- bug['last_change_time'] = fifty_days_ago
+ bug["resolution"] = "FIXED"
+ bug["last_change_time"] = fifty_days_ago
_update_bugscache(bug_list)
suggestions = Bugscache.search(search_term)
- assert suggestions['open_recent'] == []
- all_others_bugs = [b['id'] for b in suggestions['all_others']]
+ assert suggestions["open_recent"] == []
+ all_others_bugs = [b["id"] for b in suggestions["all_others"]]
assert all_others_bugs == exp_bugs
@@ -99,46 +99,46 @@ def test_get_recent_resolved_bugs(transactional_db, sample_bugs):
search_term = "Recently modified resolved bugs should be returned in all_others"
exp_bugs = [100001]
- bug_list = sample_bugs['bugs']
+ bug_list = sample_bugs["bugs"]
# Update the resolution so that all bugs will be placed in
# the open_recent bucket, and none in all_others.
for bug in bug_list:
- bug['resolution'] = 'FIXED'
- bug['last_change_time'] = fifty_days_ago
+ bug["resolution"] = "FIXED"
+ bug["last_change_time"] = fifty_days_ago
_update_bugscache(bug_list)
suggestions = Bugscache.search(search_term)
- assert suggestions['open_recent'] == []
- all_others_bugs = [b['id'] for b in suggestions['all_others']]
+ assert suggestions["open_recent"] == []
+ all_others_bugs = [b["id"] for b in suggestions["all_others"]]
assert all_others_bugs == exp_bugs
def test_bug_properties(transactional_db, sample_bugs):
"""Test that we retrieve recent, but fixed bugs for a search term."""
search_term = "test_popup_preventdefault_chrome.xul"
- bug_list = sample_bugs['bugs']
+ bug_list = sample_bugs["bugs"]
# Update the resolution so that all bugs will be placed in
# the open_recent bucket, and none in all_others.
for bug in bug_list:
- bug['resolution'] = ''
- bug['last_change_time'] = fifty_days_ago
+ bug["resolution"] = ""
+ bug["last_change_time"] = fifty_days_ago
_update_bugscache(bug_list)
expected_keys = set(
[
- 'crash_signature',
- 'resolution',
- 'summary',
- 'dupe_of',
- 'keywords',
- 'id',
- 'status',
- 'whiteboard',
+ "crash_signature",
+ "resolution",
+ "summary",
+ "dupe_of",
+ "keywords",
+ "id",
+ "status",
+ "whiteboard",
]
)
suggestions = Bugscache.search(search_term)
- assert set(suggestions['open_recent'][0].keys()) == expected_keys
+ assert set(suggestions["open_recent"][0].keys()) == expected_keys
SEARCH_TERMS = (
@@ -152,7 +152,7 @@ def test_bug_properties(transactional_db, sample_bugs):
" command timed out: 3600 seconds without output running ",
),
(
- "\"input password unmask.html#abc_def 0 7 7 7\"",
+ '"input password unmask.html#abc_def 0 7 7 7"',
" input password unmask.html#abc_def 0 7 7 7 ",
),
)
@@ -199,7 +199,7 @@ def test_import(mock_bugscache_bugzilla_request):
for open_bug, duplicates in EXPECTED_BUG_DUPE_OF_DATA.items():
assert Bugscache.objects.get(id=open_bug).dupe_of is None
- assert set(Bugscache.objects.filter(dupe_of=open_bug).values_list('id', flat=True)) == set(
+ assert set(Bugscache.objects.filter(dupe_of=open_bug).values_list("id", flat=True)) == set(
duplicates
)
diff --git a/tests/model/test_error_summary.py b/tests/model/test_error_summary.py
index e66633a3301..ca2e66ffc00 100644
--- a/tests/model/test_error_summary.py
+++ b/tests/model/test_error_summary.py
@@ -9,14 +9,14 @@
LINE_CLEANING_TEST_CASES = (
(
(
- '00:54:20 INFO - GECKO(1943) | Assertion failure: rc != 0 '
- '(destroyed timer off its target thread!), at '
- '/builds/worker/checkouts/gecko/xpcom/threads/TimerThread.cpp:434'
+ "00:54:20 INFO - GECKO(1943) | Assertion failure: rc != 0 "
+ "(destroyed timer off its target thread!), at "
+ "/builds/worker/checkouts/gecko/xpcom/threads/TimerThread.cpp:434"
),
(
- 'Assertion failure: rc != 0 (destroyed timer off its target thread!),'
- ' at '
- '/builds/worker/checkouts/gecko/xpcom/threads/TimerThread.cpp:434'
+ "Assertion failure: rc != 0 (destroyed timer off its target thread!),"
+ " at "
+ "/builds/worker/checkouts/gecko/xpcom/threads/TimerThread.cpp:434"
),
),
)
@@ -35,35 +35,35 @@ def test_get_cleaned_line(line_raw, exp_line_cleaned):
PIPE_DELIMITED_LINE_TEST_CASES = (
(
(
- '596 INFO TEST-UNEXPECTED-FAIL '
- '| chrome://mochitests/content/browser/browser/components/loop/test/mochitest/browser_fxa_login.js '
- '| Check settings tab URL - Got http://mochi.test:8888/browser/browser/components/loop/test/mochitest/loop_fxa.sjs'
+ "596 INFO TEST-UNEXPECTED-FAIL "
+ "| chrome://mochitests/content/browser/browser/components/loop/test/mochitest/browser_fxa_login.js "
+ "| Check settings tab URL - Got http://mochi.test:8888/browser/browser/components/loop/test/mochitest/loop_fxa.sjs"
),
{
- 'path_end': 'chrome://mochitests/content/browser/browser/components/loop/test/mochitest/browser_fxa_login.js',
- 'search_term': ['browser_fxa_login.js'],
+ "path_end": "chrome://mochitests/content/browser/browser/components/loop/test/mochitest/browser_fxa_login.js",
+ "search_term": ["browser_fxa_login.js"],
},
),
(
(
- 'REFTEST TEST-UNEXPECTED-FAIL '
- '| file:///C:/slave/test/build/tests/reftest/tests/layout/reftests/layers/component-alpha-exit-1.html '
- '| image comparison (==), max difference: 255, number of differing pixels: 251'
+ "REFTEST TEST-UNEXPECTED-FAIL "
+ "| file:///C:/slave/test/build/tests/reftest/tests/layout/reftests/layers/component-alpha-exit-1.html "
+ "| image comparison (==), max difference: 255, number of differing pixels: 251"
),
{
- 'path_end': 'file:///C:/slave/test/build/tests/reftest/tests/layout/reftests/layers/component-alpha-exit-1.html',
- 'search_term': ['component-alpha-exit-1.html'],
+ "path_end": "file:///C:/slave/test/build/tests/reftest/tests/layout/reftests/layers/component-alpha-exit-1.html",
+ "search_term": ["component-alpha-exit-1.html"],
},
),
(
(
- '2423 INFO TEST-UNEXPECTED-FAIL '
- '| /tests/dom/media/tests/mochitest/test_dataChannel_basicAudio.html '
- '| undefined assertion name - Result logged after SimpleTest.finish()'
+ "2423 INFO TEST-UNEXPECTED-FAIL "
+ "| /tests/dom/media/tests/mochitest/test_dataChannel_basicAudio.html "
+ "| undefined assertion name - Result logged after SimpleTest.finish()"
),
{
- 'path_end': '/tests/dom/media/tests/mochitest/test_dataChannel_basicAudio.html',
- 'search_term': ['test_dataChannel_basicAudio.html'],
+ "path_end": "/tests/dom/media/tests/mochitest/test_dataChannel_basicAudio.html",
+ "search_term": ["test_dataChannel_basicAudio.html"],
},
),
(
@@ -73,8 +73,8 @@ def test_get_cleaned_line(line_raw, exp_line_cleaned):
r"| File 'c:\users\cltbld~1.t-w' was accessed and we were not expecting it: {'Count': 6, 'Duration': 0.112512, 'RunCount': 6}"
),
{
- 'path_end': 'mainthreadio',
- 'search_term': ['mainthreadio'],
+ "path_end": "mainthreadio",
+ "search_term": ["mainthreadio"],
},
),
(
@@ -85,8 +85,8 @@ def test_get_cleaned_line(line_raw, exp_line_cleaned):
"http://10.0.2.2:8854/tests/dom/canvas/test/reftest/wrapper.html?green.png"
),
{
- 'path_end': 'http://10.0.2.2:8854/tests/dom/canvas/test/reftest/webgl-resize-test.html',
- 'search_term': ['application crashed [@ jemalloc_crash]'],
+ "path_end": "http://10.0.2.2:8854/tests/dom/canvas/test/reftest/webgl-resize-test.html",
+ "search_term": ["application crashed [@ jemalloc_crash]"],
},
),
(
@@ -97,8 +97,8 @@ def test_get_cleaned_line(line_raw, exp_line_cleaned):
"http://10.0.2.2:8854/tests/dom/canvas/test/reftest/wrapper.html?green.png"
),
{
- 'path_end': 'http://10.0.2.2:8854/tests/dom/canvas/test/reftest/webgl-resize-test.html',
- 'search_term': ['application crashed [@ jemalloc_crash]'],
+ "path_end": "http://10.0.2.2:8854/tests/dom/canvas/test/reftest/webgl-resize-test.html",
+ "search_term": ["application crashed [@ jemalloc_crash]"],
},
),
(
@@ -108,8 +108,8 @@ def test_get_cleaned_line(line_raw, exp_line_cleaned):
"| touch-action attribute test on the cell: assert_true: scroll received while shouldn't expected true got false"
),
{
- 'path_end': '/tests/dom/events/test/pointerevents/pointerevent_touch-action-table-test_touch-manual.html',
- 'search_term': ['pointerevent_touch-action-table-test_touch-manual.html'],
+ "path_end": "/tests/dom/events/test/pointerevents/pointerevent_touch-action-table-test_touch-manual.html",
+ "search_term": ["pointerevent_touch-action-table-test_touch-manual.html"],
},
),
)
@@ -125,15 +125,15 @@ def test_get_delimited_search_term(line, exp_search_info):
PIPE_DELIMITED_LINE_TEST_CASES_WITH_PARAMS = (
(
(
- 'INFO TEST-UNEXPECTED-TIMEOUT '
- '| /html/cross-origin-opener-policy/coep-navigate-popup.https.html?4-last '
- '| TestRunner hit external timeout (this may indicate a hang)'
+ "INFO TEST-UNEXPECTED-TIMEOUT "
+ "| /html/cross-origin-opener-policy/coep-navigate-popup.https.html?4-last "
+ "| TestRunner hit external timeout (this may indicate a hang)"
),
{
- 'path_end': '/html/cross-origin-opener-policy/coep-navigate-popup.https.html?4-last',
- 'search_term': [
- 'coep-navigate-popup.https.html?4-last',
- 'coep-navigate-popup.https.html',
+ "path_end": "/html/cross-origin-opener-policy/coep-navigate-popup.https.html?4-last",
+ "search_term": [
+ "coep-navigate-popup.https.html?4-last",
+ "coep-navigate-popup.https.html",
],
},
),
@@ -150,42 +150,42 @@ def test_get_delimited_search_term_with_params(line, exp_search_info):
LEAK_LINE_TEST_CASES = (
(
(
- 'TEST-UNEXPECTED-FAIL '
- '| leakcheck | 13195 bytes leaked '
- '(BackstagePass, CallbackObject, DOMEventTargetHelper, '
- 'EventListenerManager, EventTokenBucket, ...)'
+ "TEST-UNEXPECTED-FAIL "
+ "| leakcheck | 13195 bytes leaked "
+ "(BackstagePass, CallbackObject, DOMEventTargetHelper, "
+ "EventListenerManager, EventTokenBucket, ...)"
),
{
- 'path_end': None,
- 'search_term': [
- 'BackstagePass, CallbackObject, DOMEventTargetHelper, EventListenerManager, EventTokenBucket, ...'
+ "path_end": None,
+ "search_term": [
+ "BackstagePass, CallbackObject, DOMEventTargetHelper, EventListenerManager, EventTokenBucket, ..."
],
},
),
(
(
- 'TEST-UNEXPECTED-FAIL '
- '| leakcheck | tab process: 44330 bytes leaked '
- '(AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, '
- 'BufferRecycleBin, CipherSuiteChangeObserver, ...)'
+ "TEST-UNEXPECTED-FAIL "
+ "| leakcheck | tab process: 44330 bytes leaked "
+ "(AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, "
+ "BufferRecycleBin, CipherSuiteChangeObserver, ...)"
),
{
- 'path_end': None,
- 'search_term': [
- 'AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, BufferRecycleBin, CipherSui'
+ "path_end": None,
+ "search_term": [
+ "AsyncLatencyLogger, AsyncTransactionTrackersHolder, AudioOutputObserver, BufferRecycleBin, CipherSui"
],
},
),
(
(
- 'TEST-UNEXPECTED-FAIL '
- '| LeakSanitizer | leak at '
- 'MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init'
+ "TEST-UNEXPECTED-FAIL "
+ "| LeakSanitizer | leak at "
+ "MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init"
),
{
- 'path_end': None,
- 'search_term': [
- 'MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init'
+ "path_end": None,
+ "search_term": [
+ "MakeUnique, nsThread::nsChainedEventQueue::nsChainedEventQueue, nsThread, nsThreadManager::Init"
],
},
),
@@ -201,21 +201,21 @@ def test_get_leak_search_term(line, exp_search_info):
FULL_LINE_FALLBACK_TEST_CASES = (
(
- 'Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device',
+ "Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device",
{
- 'path_end': None,
- 'search_term': [
- 'Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device'
+ "path_end": None,
+ "search_term": [
+ "Automation Error: No crash directory (/mnt/sdcard/tests/profile/minidumps/) found on remote device"
],
},
),
(
- 'PROCESS-CRASH | Automation Error: Missing end of test marker (process crashed?)',
+ "PROCESS-CRASH | Automation Error: Missing end of test marker (process crashed?)",
{
- 'path_end': None,
- 'search_term': [
- 'Automation Error: Missing end of test marker (process crashed?)',
- 'Automation Error: Missing end of test marker (process crashed',
+ "path_end": None,
+ "search_term": [
+ "Automation Error: Missing end of test marker (process crashed?)",
+ "Automation Error: Missing end of test marker (process crashed",
],
},
),
@@ -232,32 +232,32 @@ def test_get_full_line_search_term(line, exp_search_info):
LONG_LINE_TEST_CASES = (
(
(
- 'command timed out: 2400 seconds without output running '
- '[\'/tools/buildbot/bin/python\', '
- '\'scripts/scripts/android_emulator_unittest.py\', \'--cfg\', '
- '\'android/androidx86.py\', \'--test-suite\', \'robocop-1\', '
- '\'--test-suite\', \'robocop-2\', \'--test-suite\', \'robocop-3\', '
- '\'--test-suite\', \'xpcshell\', \'--blob-upload-branch\', '
- '\'b2g-inbound\', \'--download-symbols\', \'ondemand\'], '
- 'attempting to kill'
+ "command timed out: 2400 seconds without output running "
+ "['/tools/buildbot/bin/python', "
+ "'scripts/scripts/android_emulator_unittest.py', '--cfg', "
+ "'android/androidx86.py', '--test-suite', 'robocop-1', "
+ "'--test-suite', 'robocop-2', '--test-suite', 'robocop-3', "
+ "'--test-suite', 'xpcshell', '--blob-upload-branch', "
+ "'b2g-inbound', '--download-symbols', 'ondemand'], "
+ "attempting to kill"
),
{
- 'path_end': None,
- 'search_term': [
- 'command timed out: 2400 seconds without output running '
- '[\'/tools/buildbot/bin/python\', \'scripts/scrip'
+ "path_end": None,
+ "search_term": [
+ "command timed out: 2400 seconds without output running "
+ "['/tools/buildbot/bin/python', 'scripts/scrip"
],
},
),
(
(
- 'TEST-UNEXPECTED-FAIL '
- '| frames/marionette/test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted_from_under_us '
- '| AssertionError: 0 != 1'
+ "TEST-UNEXPECTED-FAIL "
+ "| frames/marionette/test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted_from_under_us "
+ "| AssertionError: 0 != 1"
),
{
- 'path_end': 'frames/marionette/test_switch_frame.py',
- 'search_term': ['test_switch_frame.py'],
+ "path_end": "frames/marionette/test_switch_frame.py",
+ "search_term": ["test_switch_frame.py"],
},
),
)
@@ -275,11 +275,11 @@ def test_get_long_search_term(line, exp_search_info):
CRASH_LINE_TEST_CASES = (
(
(
- 'PROCESS-CRASH | application crashed [@ nsInputStreamPump::OnStateStop()] | '
- 'file:///C:/slave/test/build/tests/jsreftest/tests/'
- 'jsreftest.html?test=test262/ch11/11.4/11.4.1/11.4.1-4.a-6.js'
+ "PROCESS-CRASH | application crashed [@ nsInputStreamPump::OnStateStop()] | "
+ "file:///C:/slave/test/build/tests/jsreftest/tests/"
+ "jsreftest.html?test=test262/ch11/11.4/11.4.1/11.4.1-4.a-6.js"
),
- 'nsInputStreamPump::OnStateStop()',
+ "nsInputStreamPump::OnStateStop()",
),
)
@@ -293,30 +293,30 @@ def test_get_crash_signature(line, exp_search_info):
BLACKLIST_TEST_CASES = (
(
- 'TEST-UNEXPECTED-FAIL | remoteautomation.py | application timed out after 330 seconds with no output',
+ "TEST-UNEXPECTED-FAIL | remoteautomation.py | application timed out after 330 seconds with no output",
{
- 'path_end': 'remoteautomation.py',
- 'search_term': [
- 'remoteautomation.py | application timed out after 330 seconds with no output'
+ "path_end": "remoteautomation.py",
+ "search_term": [
+ "remoteautomation.py | application timed out after 330 seconds with no output"
],
},
),
(
- 'Return code: 1',
+ "Return code: 1",
{
- 'path_end': None,
- 'search_term': [None],
+ "path_end": None,
+ "search_term": [None],
},
),
(
(
- 'REFTEST PROCESS-CRASH '
- '| application crashed [@ mozalloc_abort] '
- '| file:///home/worker/workspace/build/tests/reftest/tests/layout/reftests/font-inflation/video-1.html'
+ "REFTEST PROCESS-CRASH "
+ "| application crashed [@ mozalloc_abort] "
+ "| file:///home/worker/workspace/build/tests/reftest/tests/layout/reftests/font-inflation/video-1.html"
),
{
- 'path_end': 'file:///home/worker/workspace/build/tests/reftest/tests/layout/reftests/font-inflation/video-1.html',
- 'search_term': ['application crashed [@ mozalloc_abort]'],
+ "path_end": "file:///home/worker/workspace/build/tests/reftest/tests/layout/reftests/font-inflation/video-1.html",
+ "search_term": ["application crashed [@ mozalloc_abort]"],
},
),
)
diff --git a/tests/model/test_files_bugzilla_map.py b/tests/model/test_files_bugzilla_map.py
index 4d8973f51dd..1c54ca511af 100644
--- a/tests/model/test_files_bugzilla_map.py
+++ b/tests/model/test_files_bugzilla_map.py
@@ -4,10 +4,10 @@
from treeherder.etl.files_bugzilla_map import FilesBugzillaMapProcess
EXPECTED_PROJECTS = [
- 'mozilla-central',
- 'mozilla-beta',
- 'mozilla-release',
- 'mozilla-esr78',
+ "mozilla-central",
+ "mozilla-beta",
+ "mozilla-release",
+ "mozilla-esr78",
]
@@ -18,11 +18,11 @@ def test_get_project_to_import(setup_repository_data):
imported and if the order is correct.
"""
actual_projects = list(
- Repository.objects.filter(codebase='gecko')
- .filter(active_status='active')
+ Repository.objects.filter(codebase="gecko")
+ .filter(active_status="active")
.filter(life_cycle_order__isnull=False)
- .values_list('name', flat=True)
- .order_by('life_cycle_order')
+ .values_list("name", flat=True)
+ .order_by("life_cycle_order")
)
assert actual_projects == EXPECTED_PROJECTS
@@ -41,61 +41,61 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
assert FilesBugzillaMap.objects.count() == 7
EXPECTED_FILES_BUGZILLA_DATA_IMPORT_1 = [
- ('AUTHORS', 'AUTHORS', 'mozilla.org', 'Licensing'),
- ('browser/components/BrowserGlue.jsm', 'BrowserGlue.jsm', 'Firefox', 'General'),
+ ("AUTHORS", "AUTHORS", "mozilla.org", "Licensing"),
+ ("browser/components/BrowserGlue.jsm", "BrowserGlue.jsm", "Firefox", "General"),
(
- 'mozilla-esr78-folder/file.new.here',
- 'file.new.here',
- 'Mock Component',
- 'File only present in mozilla-esr78',
+ "mozilla-esr78-folder/file.new.here",
+ "file.new.here",
+ "Mock Component",
+ "File only present in mozilla-esr78",
),
(
- 'otherfolder/AUTHORS',
- 'AUTHORS',
- 'mozilla.org',
- 'Different path, same product, different component',
+ "otherfolder/AUTHORS",
+ "AUTHORS",
+ "mozilla.org",
+ "Different path, same product, different component",
),
(
- 'testing/web-platform/meta/IndexedDB/historical.html.ini',
- 'historical.html.ini',
- 'Testing',
- 'web-platform-tests',
+ "testing/web-platform/meta/IndexedDB/historical.html.ini",
+ "historical.html.ini",
+ "Testing",
+ "web-platform-tests",
),
(
- 'testing/web-platform/tests/IndexedDB/historical.html',
- 'historical.html',
- 'Core',
- 'Storage: IndexedDB',
+ "testing/web-platform/tests/IndexedDB/historical.html",
+ "historical.html",
+ "Core",
+ "Storage: IndexedDB",
),
(
- 'toolkit/mozilla-beta/fantasy_file.js',
- 'fantasy_file.js',
- 'Mock',
- 'File first seen on mozilla-beta',
+ "toolkit/mozilla-beta/fantasy_file.js",
+ "fantasy_file.js",
+ "Mock",
+ "File first seen on mozilla-beta",
),
]
assert EXPECTED_FILES_BUGZILLA_DATA_IMPORT_1 == list(
FilesBugzillaMap.objects.all()
.values_list(
- 'path', 'file_name', 'bugzilla_component__product', 'bugzilla_component__component'
+ "path", "file_name", "bugzilla_component__product", "bugzilla_component__component"
)
- .order_by('path')
+ .order_by("path")
)
EXPECTED_BUGZILLA_COMPONENTS_IMPORT_1 = [
- ('Core', 'Storage: IndexedDB'),
- ('Firefox', 'General'),
- ('Mock', 'File first seen on mozilla-beta'),
- ('Mock Component', 'File only present in mozilla-esr78'),
- ('Testing', 'web-platform-tests'),
- ('mozilla.org', 'Different path, same product, different component'),
- ('mozilla.org', 'Licensing'),
+ ("Core", "Storage: IndexedDB"),
+ ("Firefox", "General"),
+ ("Mock", "File first seen on mozilla-beta"),
+ ("Mock Component", "File only present in mozilla-esr78"),
+ ("Testing", "web-platform-tests"),
+ ("mozilla.org", "Different path, same product, different component"),
+ ("mozilla.org", "Licensing"),
]
assert EXPECTED_BUGZILLA_COMPONENTS_IMPORT_1 == sorted(
list(
BugzillaComponent.objects.all()
- .values_list('product', 'component')
- .order_by('product', 'component')
+ .values_list("product", "component")
+ .order_by("product", "component")
)
)
@@ -104,55 +104,55 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
assert FilesBugzillaMap.objects.count() == 6
EXPECTED_FILES_BUGZILLA_DATA_IMPORT_2 = [
- ('AUTHORS', 'AUTHORS', 'mozilla.org', 'Import 2: same product, different component'),
- ('browser/components/BrowserGlue.jsm', 'BrowserGlue.jsm', 'Firefox', 'General'),
+ ("AUTHORS", "AUTHORS", "mozilla.org", "Import 2: same product, different component"),
+ ("browser/components/BrowserGlue.jsm", "BrowserGlue.jsm", "Firefox", "General"),
(
- 'testing/web-platform/meta/IndexedDB/historical.html.ini',
- 'historical.html.ini',
- 'Testing',
- 'web-platform-tests',
+ "testing/web-platform/meta/IndexedDB/historical.html.ini",
+ "historical.html.ini",
+ "Testing",
+ "web-platform-tests",
),
(
- 'testing/web-platform/tests/IndexedDB/historical.html',
- 'historical.html',
- 'Core',
- 'Storage: IndexedDB',
+ "testing/web-platform/tests/IndexedDB/historical.html",
+ "historical.html",
+ "Core",
+ "Storage: IndexedDB",
),
(
- 'testing/web-platform/tests/IndexedDB2/historical.html',
- 'historical.html',
- 'Core',
- 'Storage: IndexedDB2',
+ "testing/web-platform/tests/IndexedDB2/historical.html",
+ "historical.html",
+ "Core",
+ "Storage: IndexedDB2",
),
(
- 'toolkit/mozilla-beta/fantasy_file.js',
- 'fantasy_file.js',
- 'Mock (import 2)',
- 'File first seen on mozilla-beta',
+ "toolkit/mozilla-beta/fantasy_file.js",
+ "fantasy_file.js",
+ "Mock (import 2)",
+ "File first seen on mozilla-beta",
),
]
assert EXPECTED_FILES_BUGZILLA_DATA_IMPORT_2 == sorted(
list(
FilesBugzillaMap.objects.all()
.values_list(
- 'path', 'file_name', 'bugzilla_component__product', 'bugzilla_component__component'
+ "path", "file_name", "bugzilla_component__product", "bugzilla_component__component"
)
- .order_by('path')
+ .order_by("path")
)
)
EXPECTED_BUGZILLA_COMPONENTS_IMPORT_2 = [
- ('Core', 'Storage: IndexedDB'),
- ('Core', 'Storage: IndexedDB2'),
- ('Firefox', 'General'),
- ('Mock (import 2)', 'File first seen on mozilla-beta'),
- ('Testing', 'web-platform-tests'),
- ('mozilla.org', 'Import 2: same product, different component'),
+ ("Core", "Storage: IndexedDB"),
+ ("Core", "Storage: IndexedDB2"),
+ ("Firefox", "General"),
+ ("Mock (import 2)", "File first seen on mozilla-beta"),
+ ("Testing", "web-platform-tests"),
+ ("mozilla.org", "Import 2: same product, different component"),
]
assert EXPECTED_BUGZILLA_COMPONENTS_IMPORT_2 == sorted(
list(
BugzillaComponent.objects.all()
- .values_list('product', 'component')
- .order_by('product', 'component')
+ .values_list("product", "component")
+ .order_by("product", "component")
)
)
diff --git a/tests/model/test_option_collection.py b/tests/model/test_option_collection.py
index ad9dfe4638e..3543b3a47f3 100644
--- a/tests/model/test_option_collection.py
+++ b/tests/model/test_option_collection.py
@@ -3,4 +3,4 @@
def test_option_collection_map(sample_option_collections):
option_map = OptionCollection.objects.get_option_collection_map()
- assert option_map == {'option_hash1': 'opt1', 'option_hash2': 'opt2'}
+ assert option_map == {"option_hash1": "opt1", "option_hash2": "opt2"}
diff --git a/tests/model/test_performance_signature.py b/tests/model/test_performance_signature.py
index 2b6fc710515..66ab7815141 100644
--- a/tests/model/test_performance_signature.py
+++ b/tests/model/test_performance_signature.py
@@ -6,7 +6,7 @@ def test_performance_signatures_with_different_applications(test_perf_signature)
# create a performance signature that only differs from another existing one by the application name
test_perf_signature.id = None
- test_perf_signature.application = 'chrome'
+ test_perf_signature.application = "chrome"
test_perf_signature.save()
assert PerformanceSignature.objects.count() == 2
diff --git a/tests/model/test_performance_tag.py b/tests/model/test_performance_tag.py
index 38e336783e9..511eb0683e3 100644
--- a/tests/model/test_performance_tag.py
+++ b/tests/model/test_performance_tag.py
@@ -6,7 +6,7 @@
def test_performance_tags_cannot_have_duplicate_names(transactional_db):
- PerformanceTag.objects.create(name='harness')
+ PerformanceTag.objects.create(name="harness")
with pytest.raises(IntegrityError):
- PerformanceTag.objects.create(name='harness')
+ PerformanceTag.objects.create(name="harness")
diff --git a/tests/model/test_suite_public_name.py b/tests/model/test_suite_public_name.py
index 8c86725bbce..5e7755905f8 100644
--- a/tests/model/test_suite_public_name.py
+++ b/tests/model/test_suite_public_name.py
@@ -1,10 +1,10 @@
import pytest
from django.db.utils import IntegrityError
-SAME_SUITE_PUBLIC_NAME = 'same suite name'
-SAME_TEST_PUBLIC_NAME = 'same test name'
-SAME_SUITE = 'same suite'
-SAME_TEST = 'same test'
+SAME_SUITE_PUBLIC_NAME = "same suite name"
+SAME_TEST_PUBLIC_NAME = "same test name"
+SAME_SUITE = "same suite"
+SAME_TEST = "same test"
@pytest.mark.parametrize(
@@ -19,16 +19,16 @@
SAME_TEST_PUBLIC_NAME,
SAME_SUITE,
SAME_SUITE,
- 'test',
- 'test_2',
+ "test",
+ "test_2",
),
(
SAME_SUITE_PUBLIC_NAME,
SAME_SUITE_PUBLIC_NAME,
SAME_TEST_PUBLIC_NAME,
SAME_TEST_PUBLIC_NAME,
- 'suite',
- 'suite_2',
+ "suite",
+ "suite_2",
SAME_TEST,
SAME_TEST,
),
@@ -37,10 +37,10 @@
SAME_SUITE_PUBLIC_NAME,
SAME_TEST_PUBLIC_NAME,
SAME_TEST_PUBLIC_NAME,
- 'suite',
- 'suite_2',
- 'test',
- 'test_2',
+ "suite",
+ "suite_2",
+ "test",
+ "test_2",
),
],
)
@@ -77,42 +77,42 @@ def test_trigger_public_suite_name_constraint(
"test_public_name, test_public_name_2,"
"suite, suite_2, test, test_2",
[
- (None, None, None, None, 'suite', 'suite_2', 'test', 'test_2'),
+ (None, None, None, None, "suite", "suite_2", "test", "test_2"),
(
- 'suite_public_name',
- 'suite_public_name_2',
+ "suite_public_name",
+ "suite_public_name_2",
None,
None,
- 'suite',
- 'suite_2',
- 'test',
- 'test_2',
+ "suite",
+ "suite_2",
+ "test",
+ "test_2",
),
- (None, None, 'test', 'test_2', 'suite', 'suite_2', 'test', 'test_2'),
- ('suite_public_name', None, 'test', None, 'suite', 'suite_2', 'test', 'test_2'),
+ (None, None, "test", "test_2", "suite", "suite_2", "test", "test_2"),
+ ("suite_public_name", None, "test", None, "suite", "suite_2", "test", "test_2"),
(
- 'suite_public_name',
- 'suite_public_name_2',
+ "suite_public_name",
+ "suite_public_name_2",
SAME_TEST_PUBLIC_NAME,
SAME_TEST_PUBLIC_NAME,
- 'suite',
- 'suite_2',
- 'test',
- 'test_2',
+ "suite",
+ "suite_2",
+ "test",
+ "test_2",
),
(
SAME_SUITE_PUBLIC_NAME,
SAME_SUITE_PUBLIC_NAME,
- 'test_public_name',
- 'test_public_name_2',
- 'suite',
- 'suite_2',
- 'test',
- 'test_2',
+ "test_public_name",
+ "test_public_name_2",
+ "suite",
+ "suite_2",
+ "test",
+ "test_2",
),
(
- 'suite_public_name',
- 'suite_public_name_2',
+ "suite_public_name",
+ "suite_public_name_2",
SAME_TEST_PUBLIC_NAME,
SAME_TEST_PUBLIC_NAME,
SAME_SUITE,
@@ -121,14 +121,14 @@ def test_trigger_public_suite_name_constraint(
SAME_TEST,
),
(
- 'suite_public_name',
- 'suite_public_name_2',
- 'test_public_name',
- 'test_public_name_2',
- 'suite',
- 'suite_2',
- 'test',
- 'test_2',
+ "suite_public_name",
+ "suite_public_name_2",
+ "test_public_name",
+ "test_public_name_2",
+ "suite",
+ "suite_2",
+ "test",
+ "test_2",
),
],
)
diff --git a/tests/model/test_time_to_triage.py b/tests/model/test_time_to_triage.py
index a4a63a5a25f..e511777eabe 100644
--- a/tests/model/test_time_to_triage.py
+++ b/tests/model/test_time_to_triage.py
@@ -4,7 +4,7 @@
def test_triage_due_alert_summary_created_monday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-30')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-30")
test_perf_alert_summary.triage_due_date = None
assert not test_perf_alert_summary.triage_due_date
@@ -16,7 +16,7 @@ def test_triage_due_alert_summary_created_monday(test_perf_alert_summary):
def test_triage_due_alert_summary_created_tuesday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-31')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-31")
test_perf_alert_summary.triage_due_date = None
assert not test_perf_alert_summary.triage_due_date
@@ -28,7 +28,7 @@ def test_triage_due_alert_summary_created_tuesday(test_perf_alert_summary):
def test_triage_due_alert_summary_created_wednesday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-01')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-01")
test_perf_alert_summary.triage_due_date = None
assert not test_perf_alert_summary.triage_due_date
@@ -40,7 +40,7 @@ def test_triage_due_alert_summary_created_wednesday(test_perf_alert_summary):
def test_triage_due_alert_summary_created_thursday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-02')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-02")
test_perf_alert_summary.triage_due_date = None
assert not test_perf_alert_summary.triage_due_date
@@ -53,7 +53,7 @@ def test_triage_due_alert_summary_created_thursday(test_perf_alert_summary):
def test_triage_due_alert_summary_created_friday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-03')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-03")
test_perf_alert_summary.triage_due_date = None
assert not test_perf_alert_summary.triage_due_date
@@ -66,7 +66,7 @@ def test_triage_due_alert_summary_created_friday(test_perf_alert_summary):
def test_triage_due_alert_summary_created_saturday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-04')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-04")
test_perf_alert_summary.triage_due_date = None
assert not test_perf_alert_summary.triage_due_date
@@ -78,7 +78,7 @@ def test_triage_due_alert_summary_created_saturday(test_perf_alert_summary):
def test_triage_due_alert_summary_created_sunday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-05')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-05")
test_perf_alert_summary.triage_due_date = None
assert not test_perf_alert_summary.triage_due_date
@@ -90,7 +90,7 @@ def test_triage_due_alert_summary_created_sunday(test_perf_alert_summary):
def test_alert_summary_with_modified_created_date(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-30')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-30")
test_perf_alert_summary.triage_due_date = None
assert not test_perf_alert_summary.triage_due_date
@@ -100,7 +100,7 @@ def test_alert_summary_with_modified_created_date(test_perf_alert_summary):
# created monday isoweekday = 1 + OKR = 3 => 4
assert test_perf_alert_summary.triage_due_date.isoweekday() == THU
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-03')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-03")
test_perf_alert_summary.update_status()
@@ -110,7 +110,7 @@ def test_alert_summary_with_modified_created_date(test_perf_alert_summary):
def test_bug_due_alert_summary_created_monday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-30')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-30")
test_perf_alert_summary.bug_due_date = None
assert not test_perf_alert_summary.bug_due_date
@@ -122,7 +122,7 @@ def test_bug_due_alert_summary_created_monday(test_perf_alert_summary):
def test_bug_due_alert_summary_created_tuesday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-05-31')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-05-31")
test_perf_alert_summary.bug_due_date = None
assert not test_perf_alert_summary.bug_due_date
@@ -135,7 +135,7 @@ def test_bug_due_alert_summary_created_tuesday(test_perf_alert_summary):
def test_bug_due_alert_summary_created_wednesday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-01')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-01")
test_perf_alert_summary.bug_due_date = None
assert not test_perf_alert_summary.bug_due_date
@@ -148,7 +148,7 @@ def test_bug_due_alert_summary_created_wednesday(test_perf_alert_summary):
def test_bug_due_alert_summary_created_thursday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-02')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-02")
test_perf_alert_summary.bug_due_date = None
assert not test_perf_alert_summary.bug_due_date
@@ -161,7 +161,7 @@ def test_bug_due_alert_summary_created_thursday(test_perf_alert_summary):
def test_bug_due_alert_summary_created_friday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-03')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-03")
test_perf_alert_summary.bug_due_date = None
assert not test_perf_alert_summary.bug_due_date
@@ -174,7 +174,7 @@ def test_bug_due_alert_summary_created_friday(test_perf_alert_summary):
def test_bug_due_alert_summary_created_saturday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-04')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-04")
test_perf_alert_summary.bug_due_date = None
assert not test_perf_alert_summary.bug_due_date
@@ -186,7 +186,7 @@ def test_bug_due_alert_summary_created_saturday(test_perf_alert_summary):
def test_bug_due_alert_summary_created_sunday(test_perf_alert_summary):
- test_perf_alert_summary.created = datetime.datetime.fromisoformat('2022-06-05')
+ test_perf_alert_summary.created = datetime.datetime.fromisoformat("2022-06-05")
test_perf_alert_summary.bug_due_date = None
assert not test_perf_alert_summary.bug_due_date
diff --git a/tests/perf/auto_perf_sheriffing/conftest.py b/tests/perf/auto_perf_sheriffing/conftest.py
index 237acfcb2cb..ef8e4ceb828 100644
--- a/tests/perf/auto_perf_sheriffing/conftest.py
+++ b/tests/perf/auto_perf_sheriffing/conftest.py
@@ -22,42 +22,42 @@
from treeherder.services.taskcluster import notify_client_factory
from treeherder.utils import default_serializer
-load_json_fixture = SampleDataJSONLoader('sherlock')
+load_json_fixture = SampleDataJSONLoader("sherlock")
@pytest.fixture(scope="module")
def record_context_sample():
# contains 5 data points that can be backfilled
- return load_json_fixture('recordContext.json')
+ return load_json_fixture("recordContext.json")
-@pytest.fixture(params=['totally_broken_json', 'missing_job_fields', 'null_job_fields'])
+@pytest.fixture(params=["totally_broken_json", "missing_job_fields", "null_job_fields"])
def broken_context_str(record_context_sample: dict, request) -> list:
context_str = json.dumps(record_context_sample)
specific = request.param
- if specific == 'totally_broken_json':
- return copy(context_str).replace(r'"', '<')
+ if specific == "totally_broken_json":
+ return copy(context_str).replace(r'"', "<")
else:
record_copy = deepcopy(record_context_sample)
- if specific == 'missing_job_fields':
+ if specific == "missing_job_fields":
for data_point in record_copy:
- del data_point['job_id']
+ del data_point["job_id"]
- elif specific == 'null_job_fields':
+ elif specific == "null_job_fields":
for data_point in record_copy:
- data_point['job_id'] = None
+ data_point["job_id"] = None
return json.dumps(record_copy)
-@pytest.fixture(params=['preliminary', 'from_non_linux'])
+@pytest.fixture(params=["preliminary", "from_non_linux"])
def record_unsuited_for_backfill(test_perf_alert, request):
report = BackfillReport.objects.create(summary=test_perf_alert.summary)
- if request.param == 'preliminary':
+ if request.param == "preliminary":
return BackfillRecord.objects.create(alert=test_perf_alert, report=report)
- elif request.param == 'from_non_linux':
+ elif request.param == "from_non_linux":
# test_perf_alert originates from wind platform, by default
return BackfillRecord.objects.create(
alert=test_perf_alert, report=report, status=BackfillRecord.READY_FOR_PROCESSING
@@ -69,9 +69,9 @@ def record_with_job_symbol(test_perf_alert):
report = BackfillReport.objects.create(summary=test_perf_alert.summary)
job_group = JobGroup.objects.create(
- symbol='Btime', name='Browsertime performance tests on Firefox'
+ symbol="Btime", name="Browsertime performance tests on Firefox"
)
- job_type = JobType.objects.create(symbol='Bogo', name='Bogo tests')
+ job_type = JobType.objects.create(symbol="Bogo", name="Bogo tests")
return BackfillRecord.objects.create(
alert=test_perf_alert,
report=report,
@@ -81,15 +81,15 @@ def record_with_job_symbol(test_perf_alert):
)
-@pytest.fixture(params=['no_job_tier', 'no_job_group', 'no_job_type'])
+@pytest.fixture(params=["no_job_tier", "no_job_group", "no_job_type"])
def record_with_missing_job_symbol_components(record_with_job_symbol, request):
- if request.param == 'no_job_tier':
+ if request.param == "no_job_tier":
record_with_job_symbol.job_tier = None
record_with_job_symbol.save()
- elif request.param == 'no_job_group':
+ elif request.param == "no_job_group":
record_with_job_symbol.job_group = None
record_with_job_symbol.save()
- elif request.param == 'no_job_type':
+ elif request.param == "no_job_type":
record_with_job_symbol.job_type = None
record_with_job_symbol.save()
@@ -97,22 +97,22 @@ def record_with_missing_job_symbol_components(record_with_job_symbol, request):
def prepare_record_with_search_str(record_with_job_symbol, search_str_with):
- if search_str_with == 'no_job_group':
+ if search_str_with == "no_job_group":
record_with_job_symbol.job_group = None
record_with_job_symbol.save()
- elif search_str_with == 'no_job_type':
+ elif search_str_with == "no_job_type":
record_with_job_symbol.job_type = None
record_with_job_symbol.save()
return record_with_job_symbol
-@pytest.fixture(params=['windows', 'linux', 'osx'])
+@pytest.fixture(params=["windows", "linux", "osx"])
def platform_specific_signature(
test_repository, test_perf_framework, request
) -> PerformanceSignature:
new_platform = MachinePlatform.objects.create(
- os_name=request.param, platform=request.param, architecture='x86'
+ os_name=request.param, platform=request.param, architecture="x86"
)
return create_perf_signature(test_perf_framework, test_repository, new_platform)
@@ -153,7 +153,7 @@ def record_from_mature_report(test_perf_alert_2):
@pytest.fixture
def report_maintainer_mock():
- return type('', (), {'provide_updated_reports': lambda *params: []})
+ return type("", (), {"provide_updated_reports": lambda *params: []})
@pytest.fixture
@@ -161,9 +161,9 @@ def backfill_tool_mock():
def backfill_job(job_id):
if job_id is None:
raise Job.DoesNotExist
- return 'RANDOM_TASK_ID'
+ return "RANDOM_TASK_ID"
- return type('', (), {'backfill_job': backfill_job})
+ return type("", (), {"backfill_job": backfill_job})
@pytest.fixture
@@ -174,17 +174,17 @@ def secretary():
@pytest.fixture
def sherlock_settings(secretary, db):
secretary.validate_settings()
- return PerformanceSettings.objects.get(name='perf_sheriff_bot')
+ return PerformanceSettings.objects.get(name="perf_sheriff_bot")
@pytest.fixture
def empty_sheriff_settings(secretary):
all_of_them = 1_000_000_000
secretary.validate_settings()
- secretary.consume_backfills(on_platform='linux', amount=all_of_them)
- secretary.consume_backfills(on_platform='windows', amount=all_of_them)
- secretary.consume_backfills(on_platform='osx', amount=all_of_them)
- return PerformanceSettings.objects.get(name='perf_sheriff_bot')
+ secretary.consume_backfills(on_platform="linux", amount=all_of_them)
+ secretary.consume_backfills(on_platform="windows", amount=all_of_them)
+ secretary.consume_backfills(on_platform="osx", amount=all_of_them)
+ return PerformanceSettings.objects.get(name="perf_sheriff_bot")
# For testing Secretary
@@ -224,7 +224,7 @@ def _create_record(alert):
@pytest.fixture
def notify_client_mock() -> taskcluster.Notify:
return MagicMock(
- spec=notify_client_factory('https://fakerooturl.org', 'FAKE_CLIENT_ID', 'FAKE_ACCESS_TOKEN')
+ spec=notify_client_factory("https://fakerooturl.org", "FAKE_CLIENT_ID", "FAKE_ACCESS_TOKEN")
)
@@ -239,13 +239,13 @@ def tc_notify_mock(monkeypatch):
mock = MagicMock()
response = Response()
- mock.email.return_value = {'response': response}
+ mock.email.return_value = {"response": response}
def mockreturn(*arg, **kwargs):
nonlocal mock
return mock
- monkeypatch.setattr(tc_services, 'notify_client_factory', mockreturn)
+ monkeypatch.setattr(tc_services, "notify_client_factory", mockreturn)
return mock
diff --git a/tests/perf/auto_perf_sheriffing/test_backfill_reports/conftest.py b/tests/perf/auto_perf_sheriffing/test_backfill_reports/conftest.py
index 570195c74ef..5e93cb453ae 100644
--- a/tests/perf/auto_perf_sheriffing/test_backfill_reports/conftest.py
+++ b/tests/perf/auto_perf_sheriffing/test_backfill_reports/conftest.py
@@ -16,13 +16,13 @@
RANDOM_STRINGS = set()
-@pytest.fixture(scope='module')
+@pytest.fixture(scope="module")
def alerts_picker():
# real-world instance
return AlertsPicker(
max_alerts=5,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
@@ -34,19 +34,19 @@ def mock_backfill_context_fetcher(backfill_record_context):
@pytest.fixture
def option_collection():
- option = Option.objects.create(name='opt')
- return OptionCollection.objects.create(option_collection_hash='my_option_hash', option=option)
+ option = Option.objects.create(name="opt")
+ return OptionCollection.objects.create(option_collection_hash="my_option_hash", option=option)
@pytest.fixture
def relevant_platform():
- return MachinePlatform.objects.create(os_name='win', platform='windows10', architecture='x86')
+ return MachinePlatform.objects.create(os_name="win", platform="windows10", architecture="x86")
@pytest.fixture
def irrelevant_platform():
return MachinePlatform.objects.create(
- os_name='OS_OF_NO_INTEREST', platform='PLATFORM_OF_NO_INTEREST', architecture='x86'
+ os_name="OS_OF_NO_INTEREST", platform="PLATFORM_OF_NO_INTEREST", architecture="x86"
)
@@ -56,7 +56,7 @@ def unique_random_string():
def _unique_random_string(length=14):
while True:
- random_string = ''.join(random.choice(LETTERS) for _ in range(length))
+ random_string = "".join(random.choice(LETTERS) for _ in range(length))
if random_string not in RANDOM_STRINGS:
RANDOM_STRINGS.add(random_string)
return random_string
@@ -111,16 +111,16 @@ def _create_alerts(summary, relevant=True, amount=3):
def test_many_various_alerts():
alerts = [Mock(spec=PerformanceAlert) for _ in range(10)]
platforms = (
- 'windows10-64-shippable',
- 'windows10-64-shippable',
- 'windows7-32-shippable',
- 'windows7-32-shippable',
- 'linux64-shippable-qr',
- 'linux64-shippable-qr',
- 'osx-10-10-shippable',
- 'osx-10-10-shippable',
- 'android-hw-pix-7-1-android-aarch64',
- 'android-hw-pix-7-1-android-aarch64',
+ "windows10-64-shippable",
+ "windows10-64-shippable",
+ "windows7-32-shippable",
+ "windows7-32-shippable",
+ "linux64-shippable-qr",
+ "linux64-shippable-qr",
+ "osx-10-10-shippable",
+ "osx-10-10-shippable",
+ "android-hw-pix-7-1-android-aarch64",
+ "android-hw-pix-7-1-android-aarch64",
)
reversed_magnitudes = list(reversed(range(len(alerts))))
@@ -137,7 +137,7 @@ def test_many_various_alerts():
@pytest.fixture
def test_few_various_alerts():
alerts = [Mock(spec=PerformanceAlert) for _ in range(2)]
- platforms = ('windows7-32-shippable', 'linux64-shippable-qr')
+ platforms = ("windows7-32-shippable", "linux64-shippable-qr")
reversed_magnitudes = list(reversed(range(len(alerts))))
toggle = True
for idx, alert in enumerate(alerts):
@@ -151,7 +151,7 @@ def test_few_various_alerts():
@pytest.fixture
def test_macosx_alert():
alert = Mock(spec=PerformanceAlert)
- platform = 'macosx1015-64-shippable-qr'
+ platform = "macosx1015-64-shippable-qr"
alert.series_signature.platform.platform = platform
alert.is_regression = True
return alert
@@ -161,11 +161,11 @@ def test_macosx_alert():
def test_few_regressions():
alerts = [Mock(spec=PerformanceAlert) for _ in range(5)]
platforms = (
- 'windows10-64-shippable',
- 'windows7-32-shippable',
- 'linux64-shippable-qr',
- 'osx-10-10-shippable',
- 'android-hw-pix-7-1-android-aarch64',
+ "windows10-64-shippable",
+ "windows7-32-shippable",
+ "linux64-shippable-qr",
+ "osx-10-10-shippable",
+ "android-hw-pix-7-1-android-aarch64",
)
reversed_magnitudes = list(reversed(range(len(alerts))))
for idx, alert in enumerate(alerts):
@@ -187,10 +187,10 @@ def test_few_improvements(test_few_regressions):
def test_bad_platform_names():
alerts = [Mock(spec=PerformanceAlert) for _ in range(4)]
platforms = (
- 'rfvrtgb', # noqa
- '4.0',
- '54dcwec58', # noqa
- '8y6 t g',
+ "rfvrtgb", # noqa
+ "4.0",
+ "54dcwec58", # noqa
+ "8y6 t g",
)
for idx, alert in enumerate(alerts):
alert.series_signature.platform.platform = platforms[idx]
@@ -204,7 +204,7 @@ def test_bad_platform_names():
def prepare_graph_data_scenario(push_ids_to_keep, highlighted_push_id, perf_alert, perf_signature):
original_job_count = Job.objects.count()
- selectable_jobs = Job.objects.filter(push_id__in=push_ids_to_keep).order_by('push_id', 'id')
+ selectable_jobs = Job.objects.filter(push_id__in=push_ids_to_keep).order_by("push_id", "id")
Job.objects.exclude(push_id__in=push_ids_to_keep).delete()
assert Job.objects.count() < original_job_count
diff --git a/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_alerts_picker.py b/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_alerts_picker.py
index 46652a3680f..bb60c7c6868 100644
--- a/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_alerts_picker.py
+++ b/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_alerts_picker.py
@@ -11,14 +11,14 @@ def test_init():
AlertsPicker(
max_alerts=0,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
with pytest.raises(ValueError):
AlertsPicker(
max_alerts=3,
max_improvements=0,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
with pytest.raises(ValueError):
@@ -37,15 +37,15 @@ def count_alert_types(alerts):
picker = AlertsPicker(
max_alerts=5,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
expected_platforms_order = (
- 'windows10-64-shippable',
- 'windows7-32-shippable',
- 'linux64-shippable-qr',
- 'osx-10-10-shippable',
- 'windows10-64-shippable',
+ "windows10-64-shippable",
+ "windows7-32-shippable",
+ "linux64-shippable-qr",
+ "osx-10-10-shippable",
+ "windows10-64-shippable",
)
expected_magnitudes_order = (4, 3, 2, 1, 4)
@@ -73,7 +73,7 @@ def count_alert_types(alerts):
picker = AlertsPicker(
max_alerts=5,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
selected_alerts = picker._ensure_alerts_variety(test_few_regressions)
@@ -101,7 +101,7 @@ def count_alert_types(alerts):
picker = AlertsPicker(
max_alerts=1,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
selected_alerts = picker._ensure_alerts_variety(test_few_various_alerts)
@@ -112,17 +112,17 @@ def count_alert_types(alerts):
@pytest.mark.parametrize(
- ('max_alerts, expected_alerts_platforms'), # noqa
+ ("max_alerts, expected_alerts_platforms"), # noqa
[
- (5, ('windows10', 'windows7', 'linux', 'osx', 'android')),
- (8, ('windows10', 'windows7', 'linux', 'osx', 'android', 'windows10', 'windows7', 'linux')),
+ (5, ("windows10", "windows7", "linux", "osx", "android")),
+ (8, ("windows10", "windows7", "linux", "osx", "android", "windows10", "windows7", "linux")),
],
)
def test_ensure_platform_variety(test_many_various_alerts, max_alerts, expected_alerts_platforms):
picker = AlertsPicker(
max_alerts=max_alerts,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
picked_alerts = picker._ensure_platform_variety(test_many_various_alerts)
@@ -134,17 +134,17 @@ def test_os_relevance():
picker = AlertsPicker(
max_alerts=5,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
- assert 5 == picker._os_relevance('windows10')
- assert 4 == picker._os_relevance('windows7')
- assert 3 == picker._os_relevance('linux')
- assert 2 == picker._os_relevance('osx')
- assert 2 == picker._os_relevance('macosx') # ensure macosx has the same relevance as osx
- assert 1 == picker._os_relevance('android')
+ assert 5 == picker._os_relevance("windows10")
+ assert 4 == picker._os_relevance("windows7")
+ assert 3 == picker._os_relevance("linux")
+ assert 2 == picker._os_relevance("osx")
+ assert 2 == picker._os_relevance("macosx") # ensure macosx has the same relevance as osx
+ assert 1 == picker._os_relevance("android")
with pytest.raises(ValueError):
- picker._os_relevance('some weird OS')
+ picker._os_relevance("some weird OS")
def test_has_relevant_platform(
@@ -153,7 +153,7 @@ def test_has_relevant_platform(
picker = AlertsPicker(
max_alerts=5,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
for alert in test_many_various_alerts:
@@ -167,7 +167,7 @@ def test_extract_by_relevant_platforms(test_many_various_alerts, test_bad_platfo
picker = AlertsPicker(
max_alerts=5,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
all_alerts = test_many_various_alerts + test_bad_platform_names
@@ -183,20 +183,20 @@ def count_alert_types(alerts):
picker = AlertsPicker(
max_alerts=5,
max_improvements=2,
- platforms_of_interest=('windows10', 'windows7', 'linux', 'osx', 'android'),
+ platforms_of_interest=("windows10", "windows7", "linux", "osx", "android"),
)
expected_platforms_order = (
- 'android-hw-pix-7-1-android-aarch64',
- 'windows10-64-shippable',
- 'windows7-32-shippable',
- 'linux64-shippable-qr',
- 'osx-10-10-shippable',
- 'osx-10-10-shippable',
- 'android-hw-pix-7-1-android-aarch64',
- 'windows10-64-shippable',
- 'windows7-32-shippable',
- 'linux64-shippable-qr',
+ "android-hw-pix-7-1-android-aarch64",
+ "windows10-64-shippable",
+ "windows7-32-shippable",
+ "linux64-shippable-qr",
+ "osx-10-10-shippable",
+ "osx-10-10-shippable",
+ "android-hw-pix-7-1-android-aarch64",
+ "windows10-64-shippable",
+ "windows7-32-shippable",
+ "linux64-shippable-qr",
)
expected_magnitudes_order = (1, 9, 7, 5, 3, 2, 0, 8, 6, 4)
diff --git a/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_identify_retriggerables.py b/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_identify_retriggerables.py
index 4b5c2c47195..dbdaea8e494 100644
--- a/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_identify_retriggerables.py
+++ b/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_identify_retriggerables.py
@@ -24,16 +24,16 @@ def test_identify_retriggerables_as_unit():
# its small private methods
annotated_data_points = [
- {'job_id': 1, 'push_id': 1},
- {'job_id': 2, 'push_id': 2},
- {'job_id': 3, 'push_id': 2},
- {'job_id': 4, 'push_id': 3},
- {'job_id': 5, 'push_id': 3},
- {'job_id': 6, 'push_id': 3},
+ {"job_id": 1, "push_id": 1},
+ {"job_id": 2, "push_id": 2},
+ {"job_id": 3, "push_id": 2},
+ {"job_id": 4, "push_id": 3},
+ {"job_id": 5, "push_id": 3},
+ {"job_id": 6, "push_id": 3},
]
operation = IdentifyAlertRetriggerables(max_data_points=5, time_interval=one_day)
flattened_data_points = operation._one_data_point_per_push(annotated_data_points) # noqa
- push_counter = Counter([data_point['push_id'] for data_point in flattened_data_points])
+ push_counter = Counter([data_point["push_id"] for data_point in flattened_data_points])
assert max(count for count in push_counter.values()) == 1
diff --git a/tests/perf/auto_perf_sheriffing/test_backfill_tool.py b/tests/perf/auto_perf_sheriffing/test_backfill_tool.py
index 983f6664617..89d60c8c4ee 100644
--- a/tests/perf/auto_perf_sheriffing/test_backfill_tool.py
+++ b/tests/perf/auto_perf_sheriffing/test_backfill_tool.py
@@ -6,9 +6,9 @@
class TestBackfillTool:
- FAKE_ROOT_URL = 'https://fakerooturl.org'
- FAKE_OPTIONS = (FAKE_ROOT_URL, 'FAKE_CLIENT_ID', 'FAKE_ACCESS_TOKEN')
- MISSING_JOB_ID = '12830123912'
+ FAKE_ROOT_URL = "https://fakerooturl.org"
+ FAKE_OPTIONS = (FAKE_ROOT_URL, "FAKE_CLIENT_ID", "FAKE_ACCESS_TOKEN")
+ MISSING_JOB_ID = "12830123912"
def test_backfilling_missing_job_errors_out(self, db):
backfill_tool = BackfillTool(TaskclusterModelNullObject(*self.FAKE_OPTIONS))
diff --git a/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py b/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py
index e0206cdcc75..7416e1f7ac1 100644
--- a/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py
+++ b/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py
@@ -27,12 +27,12 @@ def test_email_is_sent_after_successful_backfills(
)
sherlock.sheriff(
since=EPOCH,
- frameworks=['test_talos'],
+ frameworks=["test_talos"],
repositories=[test_settings.TREEHERDER_TEST_REPOSITORY_NAME],
)
record_ready_for_processing.refresh_from_db()
assert BackfillNotificationRecord.objects.count() == 1
- call_command('report_backfill_outcome')
+ call_command("report_backfill_outcome")
assert BackfillNotificationRecord.objects.count() == 0
@@ -56,12 +56,12 @@ def test_email_is_still_sent_if_context_is_too_corrupt_to_be_actionable(
)
sherlock.sheriff(
since=EPOCH,
- frameworks=['test_talos'],
+ frameworks=["test_talos"],
repositories=[test_settings.TREEHERDER_TEST_REPOSITORY_NAME],
)
assert BackfillNotificationRecord.objects.count() == 1
- call_command('report_backfill_outcome')
+ call_command("report_backfill_outcome")
assert BackfillNotificationRecord.objects.count() == 0
@@ -77,21 +77,21 @@ def test_no_email_is_sent_if_runtime_exceeded(
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary, no_time_left)
try:
- sherlock.sheriff(since=EPOCH, frameworks=['raptor', 'talos'], repositories=['autoland'])
+ sherlock.sheriff(since=EPOCH, frameworks=["raptor", "talos"], repositories=["autoland"])
except MaxRuntimeExceeded:
pass
assert BackfillNotificationRecord.objects.count() == 0
- call_command('report_backfill_outcome')
+ call_command("report_backfill_outcome")
assert BackfillNotificationRecord.objects.count() == 0
@pytest.mark.parametrize(
- 'framework, repository',
+ "framework, repository",
[
- ('non_existent_framework', test_settings.TREEHERDER_TEST_REPOSITORY_NAME),
- ('test_talos', 'non_existent_repository'),
- ('non_existent_framework', 'non_existent_repository'),
+ ("non_existent_framework", test_settings.TREEHERDER_TEST_REPOSITORY_NAME),
+ ("test_talos", "non_existent_repository"),
+ ("non_existent_framework", "non_existent_repository"),
],
)
def test_no_email_is_sent_for_untargeted_alerts(
@@ -117,5 +117,5 @@ def test_no_email_is_sent_for_untargeted_alerts(
record_ready_for_processing.refresh_from_db()
assert BackfillNotificationRecord.objects.count() == 0
- call_command('report_backfill_outcome')
+ call_command("report_backfill_outcome")
assert BackfillNotificationRecord.objects.count() == 0
diff --git a/tests/perf/auto_perf_sheriffing/test_secretary.py b/tests/perf/auto_perf_sheriffing/test_secretary.py
index 32b8ace191e..06b8f0cd075 100644
--- a/tests/perf/auto_perf_sheriffing/test_secretary.py
+++ b/tests/perf/auto_perf_sheriffing/test_secretary.py
@@ -42,15 +42,15 @@ def record_backfilled(test_perf_alert, record_context_sample):
@pytest.fixture
def range_dates(record_context_sample):
- from_date = datetime.fromisoformat(record_context_sample[0]['push_timestamp'])
- to_date = datetime.fromisoformat(record_context_sample[-1]['push_timestamp'])
+ from_date = datetime.fromisoformat(record_context_sample[0]["push_timestamp"])
+ to_date = datetime.fromisoformat(record_context_sample[-1]["push_timestamp"])
return {
- 'before_date': from_date - timedelta(days=5),
- 'from_date': from_date,
- 'in_range_date': from_date + timedelta(hours=13),
- 'to_date': to_date,
- 'after_date': to_date + timedelta(days=3),
+ "before_date": from_date - timedelta(days=5),
+ "from_date": from_date,
+ "in_range_date": from_date + timedelta(hours=13),
+ "to_date": to_date,
+ "after_date": to_date + timedelta(days=3),
}
@@ -58,28 +58,28 @@ def range_dates(record_context_sample):
def outcome_checking_pushes(
create_push, range_dates, record_context_sample, test_repository, test_repository_2
):
- from_push_id = record_context_sample[0]['push_id']
- to_push_id = record_context_sample[-1]['push_id']
+ from_push_id = record_context_sample[0]["push_id"]
+ to_push_id = record_context_sample[-1]["push_id"]
pushes = [
- create_push(test_repository, revision=uuid.uuid4(), time=range_dates['before_date']),
+ create_push(test_repository, revision=uuid.uuid4(), time=range_dates["before_date"]),
create_push(
test_repository,
revision=uuid.uuid4(),
- time=range_dates['from_date'],
+ time=range_dates["from_date"],
explicit_id=from_push_id,
),
- create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
- create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
- create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
- create_push(test_repository, revision=uuid.uuid4(), time=range_dates['in_range_date']),
+ create_push(test_repository, revision=uuid.uuid4(), time=range_dates["in_range_date"]),
+ create_push(test_repository, revision=uuid.uuid4(), time=range_dates["in_range_date"]),
+ create_push(test_repository, revision=uuid.uuid4(), time=range_dates["in_range_date"]),
+ create_push(test_repository, revision=uuid.uuid4(), time=range_dates["in_range_date"]),
create_push(
test_repository,
revision=uuid.uuid4(),
- time=range_dates['to_date'],
+ time=range_dates["to_date"],
explicit_id=to_push_id,
),
- create_push(test_repository, revision=uuid.uuid4(), time=range_dates['after_date']),
+ create_push(test_repository, revision=uuid.uuid4(), time=range_dates["after_date"]),
]
return pushes
@@ -92,7 +92,7 @@ def successful_jobs(outcome_checking_pushes, eleven_jobs_stored):
pairs = zip(outcome_checking_pushes, jobs)
for push, job in pairs:
job.push = push
- job.result = 'success'
+ job.result = "success"
job.job_type_id = JOB_TYPE_ID
job.save()
_successful_jobs.append(job)
@@ -103,7 +103,7 @@ def successful_jobs(outcome_checking_pushes, eleven_jobs_stored):
def jobs_with_one_failed(successful_jobs):
index_in_range = get_middle_index(successful_jobs)
job_to_fail = successful_jobs[index_in_range]
- job_to_fail.result = 'testfailed'
+ job_to_fail.result = "testfailed"
job_to_fail.save()
@@ -111,7 +111,7 @@ def jobs_with_one_failed(successful_jobs):
def jobs_with_one_pending(successful_jobs):
index_in_range = get_middle_index(successful_jobs)
job_pending = successful_jobs[index_in_range]
- job_pending.result = 'unknown'
+ job_pending.result = "unknown"
job_pending.save()
@@ -120,17 +120,17 @@ def jobs_with_one_pending_and_one_failed(successful_jobs):
index_in_range = get_middle_index(successful_jobs)
next_index_in_range = get_middle_index(successful_jobs) + 1
job_pending = successful_jobs[index_in_range]
- job_pending.result = 'unknown'
+ job_pending.result = "unknown"
job_pending.save()
job_to_fail = successful_jobs[next_index_in_range]
- job_to_fail.result = 'testfailed'
+ job_to_fail.result = "testfailed"
job_to_fail.save()
@pytest.fixture
def get_outcome_checker_mock():
def get_outcome_checker_mock(outcome: OutcomeStatus):
- return type('', (), {'check': lambda *params: outcome})
+ return type("", (), {"check": lambda *params: outcome})
return get_outcome_checker_mock
@@ -184,8 +184,8 @@ def test_outcome_checker_identifies_pushes_in_range(
):
total_pushes = Push.objects.count()
- from_time = range_dates['from_date']
- to_time = range_dates['to_date']
+ from_time = range_dates["from_date"]
+ to_time = range_dates["to_date"]
total_outside_pushes = Push.objects.filter(
Q(time__lt=from_time) | Q(time__gt=to_time), repository=test_repository
diff --git a/tests/perf/auto_perf_sheriffing/test_sherlock.py b/tests/perf/auto_perf_sheriffing/test_sherlock.py
index f6684f5707b..d23de352e85 100644
--- a/tests/perf/auto_perf_sheriffing/test_sherlock.py
+++ b/tests/perf/auto_perf_sheriffing/test_sherlock.py
@@ -35,16 +35,16 @@ def test_record_job_symbol_is_none_if_component_misses(record_with_missing_job_s
def test_record_correct_job_symbol(record_with_job_symbol):
- expected_job_symbol = 'Btime[tier 2](Bogo)'
+ expected_job_symbol = "Btime[tier 2](Bogo)"
assert record_with_job_symbol.job_symbol == expected_job_symbol
@pytest.mark.parametrize(
- 'search_str_with, expected_search_str',
+ "search_str_with, expected_search_str",
[
- ('all_fields', 'win7,Browsertime performance tests on Firefox,Bogo tests,Bogo'),
- ('no_job_group', 'win7,Bogo tests,Bogo'),
- ('no_job_type', 'win7,Browsertime performance tests on Firefox'),
+ ("all_fields", "win7,Browsertime performance tests on Firefox,Bogo tests,Bogo"),
+ ("no_job_group", "win7,Bogo tests,Bogo"),
+ ("no_job_type", "win7,Browsertime performance tests on Firefox"),
],
)
def test_record_search_str(record_with_job_symbol, search_str_with, expected_search_str):
@@ -78,7 +78,7 @@ def test_records_change_to_ready_for_processing(
backfill_tool_mock,
secretary,
)
- sherlock.sheriff(since=EPOCH, frameworks=['raptor', 'talos'], repositories=['autoland'])
+ sherlock.sheriff(since=EPOCH, frameworks=["raptor", "talos"], repositories=["autoland"])
assert preliminary_records.count() == 1
assert ready_records.count() == 1
@@ -123,7 +123,7 @@ def test_records_and_db_limits_remain_unchanged_if_no_records_suitable_for_backf
record_unsuited_for_backfill,
):
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary)
- sherlock._backfill(['test_talos'], [test_settings.TREEHERDER_TEST_REPOSITORY_NAME])
+ sherlock._backfill(["test_talos"], [test_settings.TREEHERDER_TEST_REPOSITORY_NAME])
assert not has_changed(record_unsuited_for_backfill)
assert not has_changed(sherlock_settings)
@@ -137,7 +137,7 @@ def test_records_remain_unchanged_if_no_backfills_left(
empty_sheriff_settings,
):
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary)
- sherlock._backfill(['test_talos'], [test_settings.TREEHERDER_TEST_REPOSITORY_NAME])
+ sherlock._backfill(["test_talos"], [test_settings.TREEHERDER_TEST_REPOSITORY_NAME])
assert not has_changed(record_ready_for_processing)
@@ -152,7 +152,7 @@ def test_records_and_db_limits_remain_unchanged_if_runtime_exceeded(
no_time_left = timedelta(seconds=0)
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary, no_time_left)
try:
- sherlock.sheriff(since=EPOCH, frameworks=['raptor', 'talos'], repositories=['autoland'])
+ sherlock.sheriff(since=EPOCH, frameworks=["raptor", "talos"], repositories=["autoland"])
except MaxRuntimeExceeded:
pass
@@ -170,11 +170,11 @@ def test_db_limits_update_if_backfills_left(
targeted_platform = record_ready_for_processing.platform.platform
initial_backfills = secretary.backfills_left(on_platform=targeted_platform)
- assert initial_backfills == json.loads(sherlock_settings.settings)['limits'][targeted_platform]
+ assert initial_backfills == json.loads(sherlock_settings.settings)["limits"][targeted_platform]
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary)
sherlock.sheriff(
since=EPOCH,
- frameworks=['test_talos'],
+ frameworks=["test_talos"],
repositories=[test_settings.TREEHERDER_TEST_REPOSITORY_NAME],
)
@@ -198,7 +198,7 @@ def test_backfilling_gracefully_handles_invalid_json_contexts_without_blowing_up
try:
sherlock.sheriff(
since=EPOCH,
- frameworks=['test_talos'],
+ frameworks=["test_talos"],
repositories=[test_settings.TREEHERDER_TEST_REPOSITORY_NAME],
)
except (JSONDecodeError, KeyError, Job.DoesNotExist, Push.DoesNotExist):
diff --git a/tests/perf/auto_sheriffing_criteria/conftest.py b/tests/perf/auto_sheriffing_criteria/conftest.py
index ad5e91fb04b..b387dd1ba22 100644
--- a/tests/perf/auto_sheriffing_criteria/conftest.py
+++ b/tests/perf/auto_sheriffing_criteria/conftest.py
@@ -5,8 +5,8 @@
from treeherder.perf.sheriffing_criteria import NonBlockableSession
-CASSETTE_LIBRARY_DIR = 'tests/sample_data/betamax_cassettes/perf_sheriffing_criteria'
-CASSETTES_RECORDING_DATE = 'June 2nd, 2020' # when VCR has been conducted
+CASSETTE_LIBRARY_DIR = "tests/sample_data/betamax_cassettes/perf_sheriffing_criteria"
+CASSETTES_RECORDING_DATE = "June 2nd, 2020" # when VCR has been conducted
@pytest.fixture
diff --git a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
index e763f1ea216..994466fd9e4 100644
--- a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
+++ b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
@@ -30,12 +30,12 @@ def concrete_formula_classes() -> List[Type[BugzillaFormula]]:
return [EngineerTractionFormula, FixRatioFormula]
-@pytest.mark.parametrize('formula', formula_instances())
+@pytest.mark.parametrize("formula", formula_instances())
def test_formula_exposes_quantifying_period(formula, nonblock_session):
assert formula.quantifying_period == settings.QUANTIFYING_PERIOD
-@pytest.mark.parametrize('formula', bugzilla_formula_instances())
+@pytest.mark.parametrize("formula", bugzilla_formula_instances())
def test_formula_exposes_oldest_timestamp(formula, nonblock_session):
no_older_than = datetime.now() - timedelta(weeks=24, seconds=5)
@@ -48,9 +48,9 @@ def test_total_alerts_formula_exposes_oldest_timestamp():
assert TotalAlertsFormula().oldest_timestamp >= no_older_than
-@pytest.mark.parametrize('formula', bugzilla_formula_instances())
+@pytest.mark.parametrize("formula", bugzilla_formula_instances())
@pytest.mark.parametrize(
- 'cooled_down_bug',
+ "cooled_down_bug",
[
{"creation_time": "2020-05-18T15:20:55Z"}, # older than 2 weeks
{"creation_time": "2020-05-04T15:20:55Z"}, # older than 1 month
@@ -61,13 +61,13 @@ def test_formula_correctly_detects_cooled_down_bugs(cooled_down_bug, formula, no
assert formula.has_cooled_down(cooled_down_bug)
-@pytest.mark.parametrize('formula', bugzilla_formula_instances())
+@pytest.mark.parametrize("formula", bugzilla_formula_instances())
@pytest.mark.parametrize(
- 'not_cooled_down_bug',
+ "not_cooled_down_bug",
[
- {'creation_time': '2020-05-31T00:00:00Z'}, # 2 days old
- {'creation_time': '2020-05-26T00:00:00Z'}, # 1 week old
- {'creation_time': '2020-05-19T23:00:00Z'}, # ~2 weeks old, except for 1 hour
+ {"creation_time": "2020-05-31T00:00:00Z"}, # 2 days old
+ {"creation_time": "2020-05-26T00:00:00Z"}, # 1 week old
+ {"creation_time": "2020-05-19T23:00:00Z"}, # ~2 weeks old, except for 1 hour
],
)
def test_formula_detects_bugs_that_didnt_cool_down_yet(
@@ -76,14 +76,14 @@ def test_formula_detects_bugs_that_didnt_cool_down_yet(
assert not formula.has_cooled_down(not_cooled_down_bug)
-@pytest.mark.parametrize('formula', bugzilla_formula_instances())
-@pytest.mark.parametrize('bad_structured_bug', [{}, {'creation_time': 'jiberish-date'}])
+@pytest.mark.parametrize("formula", bugzilla_formula_instances())
+@pytest.mark.parametrize("bad_structured_bug", [{}, {"creation_time": "jiberish-date"}])
def test_formula_throws_adequate_error_for_bug(bad_structured_bug, formula, nonblock_session):
with pytest.raises(ValueError):
formula.has_cooled_down(bad_structured_bug)
-@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
+@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
def test_formula_initializes_with_non_blockable_sessions(FormulaClass, nonblock_session):
try:
_ = FormulaClass(nonblock_session)
@@ -96,13 +96,13 @@ def test_formula_initializes_with_non_blockable_sessions(FormulaClass, nonblock_
pytest.fail()
-@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
+@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
def test_formula_cannot_be_initialized_with_a_regular_session(FormulaClass, unrecommended_session):
with pytest.raises(TypeError):
_ = FormulaClass(unrecommended_session)
-@pytest.mark.parametrize('formula', bugzilla_formula_instances())
+@pytest.mark.parametrize("formula", bugzilla_formula_instances())
def test_accessing_breakdown_without_prior_calculus_errors_out(formula, nonblock_session):
with pytest.raises(RuntimeError):
_ = formula.breakdown()
@@ -111,61 +111,61 @@ def test_accessing_breakdown_without_prior_calculus_errors_out(formula, nonblock
# Leveraging HTTP VCR
-@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
+@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
def test_formula_demands_at_least_framework_and_suite(FormulaClass, betamax_recorder):
formula = FormulaClass(betamax_recorder.session)
with pytest.raises(TypeError):
- formula('some_framework')
+ formula("some_framework")
with pytest.raises(TypeError):
formula()
- with betamax_recorder.use_cassette('awsy-JS', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette("awsy-JS", serialize_with="prettyjson"):
try:
- formula('awsy', 'JS')
+ formula("awsy", "JS")
except TypeError:
pytest.fail()
-@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
+@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
def test_breakdown_updates_between_calculations(FormulaClass, betamax_recorder):
formula = FormulaClass(betamax_recorder.session)
- test_moniker_A = ('build_metrics', 'build times')
- test_moniker_B = ('talos', 'tp5n', 'nonmain_startup_fileio')
+ test_moniker_A = ("build_metrics", "build times")
+ test_moniker_B = ("talos", "tp5n", "nonmain_startup_fileio")
- cassette_preffix_A = '-'.join(filter(None, test_moniker_A))
- cassette_preffix_B = '-'.join(filter(None, test_moniker_B))
+ cassette_preffix_A = "-".join(filter(None, test_moniker_A))
+ cassette_preffix_B = "-".join(filter(None, test_moniker_B))
- with betamax_recorder.use_cassette(f'{cassette_preffix_A}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{cassette_preffix_A}", serialize_with="prettyjson"):
formula(*test_moniker_A) # let it perform calculus & cache breakdown
breakdown_A = formula.breakdown()
- with betamax_recorder.use_cassette(f'{cassette_preffix_B}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{cassette_preffix_B}", serialize_with="prettyjson"):
formula(*test_moniker_B) # let it perform calculus & cache breakdown
breakdown_B = formula.breakdown()
assert breakdown_A != breakdown_B
-@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
+@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
def test_breakdown_resets_to_null_when_calculus_errors_out(FormulaClass, betamax_recorder):
formula = FormulaClass(betamax_recorder.session)
- test_moniker_A = ('build_metrics', 'build times')
- test_moniker_B = ('nonexistent_framework', 'nonexistent_suite')
+ test_moniker_A = ("build_metrics", "build times")
+ test_moniker_B = ("nonexistent_framework", "nonexistent_suite")
- cassette_preffix_A = '-'.join(filter(None, test_moniker_A))
- cassette_preffix_B = '-'.join(filter(None, test_moniker_B))
+ cassette_preffix_A = "-".join(filter(None, test_moniker_A))
+ cassette_preffix_B = "-".join(filter(None, test_moniker_B))
# run happy path calculus
- with betamax_recorder.use_cassette(f'{cassette_preffix_A}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{cassette_preffix_A}", serialize_with="prettyjson"):
formula(*test_moniker_A) # let it perform calculus & cache breakdown
_ = formula.breakdown()
# now run alternated path calculus
- with betamax_recorder.use_cassette(f'{cassette_preffix_B}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{cassette_preffix_B}", serialize_with="prettyjson"):
with pytest.raises(NoFiledBugs):
formula(*test_moniker_B) # intentionally blows up while doing calculus
@@ -174,50 +174,50 @@ def test_breakdown_resets_to_null_when_calculus_errors_out(FormulaClass, betamax
_ = formula.breakdown()
-@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
+@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
@pytest.mark.parametrize(
- 'framework, suite, test',
+ "framework, suite, test",
[
- ('build_metrics', 'build times', None),
- ('build_metrics', 'installer size', None),
- ('awsy', 'JS', None),
- ('talos', 'tp5n', 'nonmain_startup_fileio'),
+ ("build_metrics", "build times", None),
+ ("build_metrics", "installer size", None),
+ ("awsy", "JS", None),
+ ("talos", "tp5n", "nonmain_startup_fileio"),
],
)
def test_formula_fetches_bugs_from_quantifying_period(
framework, suite, test, FormulaClass, betamax_recorder
):
formula = FormulaClass(betamax_recorder.session)
- cassette = '-'.join(filter(None, [framework, suite, test]))
+ cassette = "-".join(filter(None, [framework, suite, test]))
- with betamax_recorder.use_cassette(f'{cassette}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{cassette}", serialize_with="prettyjson"):
formula(framework, suite, test) # let it perform calculus & cache breakdown
all_filed_bugs, except_new_bugs = formula.breakdown()
assert len(all_filed_bugs) > 0
for bug in all_filed_bugs:
- creation_time = datetime.strptime(bug['creation_time'], BZ_DATETIME_FORMAT)
+ creation_time = datetime.strptime(bug["creation_time"], BZ_DATETIME_FORMAT)
assert creation_time >= formula.oldest_timestamp
-@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
+@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
@pytest.mark.parametrize(
- 'framework, suite, test',
+ "framework, suite, test",
[
- ('build_metrics', 'build times', None),
- ('build_metrics', 'installer size', None),
- ('awsy', 'JS', None),
- ('talos', 'tp5n', 'nonmain_startup_fileio'),
+ ("build_metrics", "build times", None),
+ ("build_metrics", "installer size", None),
+ ("awsy", "JS", None),
+ ("talos", "tp5n", "nonmain_startup_fileio"),
],
)
def test_formula_filters_out_bugs_that_didnt_cool_down_yet(
framework, suite, test, FormulaClass, betamax_recorder
):
formula = FormulaClass(betamax_recorder.session)
- cassette = '-'.join(filter(None, [framework, suite, test]))
+ cassette = "-".join(filter(None, [framework, suite, test]))
- with betamax_recorder.use_cassette(f'{cassette}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{cassette}", serialize_with="prettyjson"):
formula(framework, suite, test) # let it perform calculus & cache breakdown
# left with cooled down bugs only
@@ -226,14 +226,14 @@ def test_formula_filters_out_bugs_that_didnt_cool_down_yet(
assert formula.has_cooled_down(bug)
-@pytest.mark.parametrize('FormulaClass', concrete_formula_classes())
+@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
def test_formula_errors_up_when_no_bugs_were_filed(FormulaClass, betamax_recorder):
formula = FormulaClass(betamax_recorder.session)
- nonexistent_framework = 'nonexistent_framework'
- nonexistent_suite = 'nonexistent_suite'
+ nonexistent_framework = "nonexistent_framework"
+ nonexistent_suite = "nonexistent_suite"
with betamax_recorder.use_cassette(
- f'{nonexistent_framework}-{nonexistent_suite}', serialize_with='prettyjson'
+ f"{nonexistent_framework}-{nonexistent_suite}", serialize_with="prettyjson"
):
with pytest.raises(NoFiledBugs):
formula(nonexistent_framework, nonexistent_suite)
diff --git a/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py b/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
index ee3d98914bd..33d972fab38 100644
--- a/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
+++ b/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
@@ -26,28 +26,28 @@
pytestmark = [pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, tick=True)]
-RECORD_TEST_PATH = (PROJECT_ROOT / 'tests/sample_data/criteria-records.csv').resolve()
+RECORD_TEST_PATH = (PROJECT_ROOT / "tests/sample_data/criteria-records.csv").resolve()
EXPECTED_LAST_UPDATE = dateutil_parse(CASSETTES_RECORDING_DATE)
EXPECTED_VALUE = 0.5
TESTS_WITH_NO_DATA = [
- ('awsy', 'Base Content Explicit', ''),
- ('browsertime', 'allrecipes-cold', ''),
- ('raptor', 'os-baseline-power', ''),
- ('talos', 'a11yr', ''),
+ ("awsy", "Base Content Explicit", ""),
+ ("browsertime", "allrecipes-cold", ""),
+ ("raptor", "os-baseline-power", ""),
+ ("talos", "a11yr", ""),
]
TESTS_WITH_EXPIRED_DATA = [
- ('awsy', 'Base Content Heap Unclassified', ''),
- ('browsertime', 'amazon', ''),
- ('build_metrics', 'compiler warnings', ''),
- ('raptor', 'raptor-ares6-firefox', ''),
- ('talos', 'about_newtab_with_snippets', ''),
+ ("awsy", "Base Content Heap Unclassified", ""),
+ ("browsertime", "amazon", ""),
+ ("build_metrics", "compiler warnings", ""),
+ ("raptor", "raptor-ares6-firefox", ""),
+ ("talos", "about_newtab_with_snippets", ""),
]
TESTS_WITH_UPDATED_DATA = [
- ('awsy', 'Base Content JS', ''),
- ('browsertime', 'amazon-cold', ''),
- ('build_metrics', 'installer size', ''),
- ('raptor', 'raptor-assorted-dom-firefox', ''),
- ('talos', 'about_preferences_basic', ''),
+ ("awsy", "Base Content JS", ""),
+ ("browsertime", "amazon-cold", ""),
+ ("build_metrics", "installer size", ""),
+ ("raptor", "raptor-assorted-dom-firefox", ""),
+ ("talos", "about_preferences_basic", ""),
]
recording_date = dateutil_parse(CASSETTES_RECORDING_DATE).isoformat()
RECORDS_WITH_NO_DATA = [
@@ -55,11 +55,11 @@
Framework=test[0],
Suite=test[1],
Test=test[2],
- EngineerTraction='',
- FixRatio='',
- TotalAlerts='',
- LastUpdatedOn='',
- AllowSync='',
+ EngineerTraction="",
+ FixRatio="",
+ TotalAlerts="",
+ LastUpdatedOn="",
+ AllowSync="",
)
for test in TESTS_WITH_NO_DATA
]
@@ -71,8 +71,8 @@
EngineerTraction=0.5,
FixRatio=0.3,
TotalAlerts=21,
- LastUpdatedOn='2020-05-02T00:00:00.000000',
- AllowSync='',
+ LastUpdatedOn="2020-05-02T00:00:00.000000",
+ AllowSync="",
)
for test in TESTS_WITH_EXPIRED_DATA
]
@@ -84,8 +84,8 @@
EngineerTraction=0.5,
FixRatio=0.3,
TotalAlerts=21,
- LastUpdatedOn='2020-06-02T00:00:00.000000',
- AllowSync='',
+ LastUpdatedOn="2020-06-02T00:00:00.000000",
+ AllowSync="",
)
for test in TESTS_WITH_UPDATED_DATA
]
@@ -114,7 +114,7 @@
class eventually_ready:
def __init__(self, start_time: float, ready_after: float):
- print(f'start_time: {start_time}')
+ print(f"start_time: {start_time}")
self.start_time = start_time
self.ready_after = ready_after
@@ -151,7 +151,7 @@ def should_take_more_than(seconds: float):
@pytest.fixture
def updatable_criteria_csv(tmp_path):
updatable_csv = tmp_path / "updatable-criteria.csv"
- with open(RECORD_TEST_PATH, 'r') as file_:
+ with open(RECORD_TEST_PATH, "r") as file_:
updatable_csv.write_text(file_.read())
return updatable_csv
@@ -160,17 +160,17 @@ def updatable_criteria_csv(tmp_path):
@pytest.fixture
def mock_formula_map():
return {
- 'EngineerTraction': MagicMock(spec=EngineerTractionFormula, return_value=EXPECTED_VALUE),
- 'FixRatio': MagicMock(spec=FixRatioFormula, return_value=EXPECTED_VALUE),
- 'TotalAlerts': MagicMock(spec=FixRatioFormula, return_value=0),
+ "EngineerTraction": MagicMock(spec=EngineerTractionFormula, return_value=EXPECTED_VALUE),
+ "FixRatio": MagicMock(spec=FixRatioFormula, return_value=EXPECTED_VALUE),
+ "TotalAlerts": MagicMock(spec=FixRatioFormula, return_value=0),
}
@pytest.mark.parametrize(
- 'invalid_formulas',
+ "invalid_formulas",
[
- {'EngineerTraction': InvalidFormula(), 'FixRatio': InvalidFormula()},
- {'EngineerTraction': None, 'FixRatio': None},
+ {"EngineerTraction": InvalidFormula(), "FixRatio": InvalidFormula()},
+ {"EngineerTraction": None, "FixRatio": None},
],
)
def test_tracker_throws_error_for_invalid_formulas(invalid_formulas):
@@ -179,7 +179,7 @@ def test_tracker_throws_error_for_invalid_formulas(invalid_formulas):
def test_tracker_throws_error_if_no_record_file_found(tmp_path):
- nonexistent_file = str(tmp_path / 'perf-sheriffing-criteria.csv')
+ nonexistent_file = str(tmp_path / "perf-sheriffing-criteria.csv")
tracker = CriteriaTracker(record_path=nonexistent_file)
with pytest.raises(FileNotFoundError):
@@ -194,28 +194,28 @@ def test_tracker_has_a_list_of_records():
assert len(record_list) == 5
-@pytest.mark.parametrize('criteria_record', RECORDS_WITH_NO_DATA)
+@pytest.mark.parametrize("criteria_record", RECORDS_WITH_NO_DATA)
def test_record_computer_can_tell_missing_data(criteria_record):
computer = RecordComputer({}, timedelta(days=3), timedelta(seconds=0))
assert computer.should_update(criteria_record)
-@pytest.mark.parametrize('criteria_record', RECORDS_WITH_EXPIRED_DATA)
+@pytest.mark.parametrize("criteria_record", RECORDS_WITH_EXPIRED_DATA)
def test_record_computer_can_tell_expired_data(criteria_record):
computer = RecordComputer({}, timedelta(days=3), timedelta(seconds=0))
assert computer.should_update(criteria_record)
-@pytest.mark.parametrize('criteria_record', RECORDS_WITH_UPDATED_DATA)
+@pytest.mark.parametrize("criteria_record", RECORDS_WITH_UPDATED_DATA)
def test_record_computer_can_tell_updated_data(criteria_record):
computer = RecordComputer({}, timedelta(days=3), timedelta(seconds=0))
assert not computer.should_update(criteria_record)
-@pytest.mark.parametrize('criteria_record', RECORDS_UNALLOWED_TO_SYNC)
+@pytest.mark.parametrize("criteria_record", RECORDS_UNALLOWED_TO_SYNC)
def test_record_computer_can_tell_unallowed_data(criteria_record):
computer = RecordComputer({}, timedelta(days=3), timedelta(seconds=0))
@@ -223,31 +223,31 @@ def test_record_computer_can_tell_unallowed_data(criteria_record):
@pytest.mark.freeze_time(CASSETTES_RECORDING_DATE) # disable tick
-@pytest.mark.parametrize('exception', [NoFiledBugs(), Exception()])
+@pytest.mark.parametrize("exception", [NoFiledBugs(), Exception()])
def test_record_computer_still_updates_if_one_of_the_formulas_fails(exception, db):
formula_map = {
- 'EngineerTraction': MagicMock(spec=EngineerTractionFormula, return_value=EXPECTED_VALUE),
- 'FixRatio': MagicMock(spec=FixRatioFormula, side_effect=exception),
- 'TotalAlerts': TotalAlertsFormula(),
+ "EngineerTraction": MagicMock(spec=EngineerTractionFormula, return_value=EXPECTED_VALUE),
+ "FixRatio": MagicMock(spec=FixRatioFormula, side_effect=exception),
+ "TotalAlerts": TotalAlertsFormula(),
}
record = CriteriaRecord(
- Framework='talos',
- Suite='tp5n',
- Test='',
- EngineerTraction='',
- FixRatio='',
- TotalAlerts='',
- LastUpdatedOn='',
- AllowSync='',
+ Framework="talos",
+ Suite="tp5n",
+ Test="",
+ EngineerTraction="",
+ FixRatio="",
+ TotalAlerts="",
+ LastUpdatedOn="",
+ AllowSync="",
)
computer = RecordComputer(formula_map, timedelta(days=3), timedelta(seconds=0))
record = computer.apply_formulas(record)
- assert record.Framework == 'talos'
- assert record.Suite == 'tp5n'
+ assert record.Framework == "talos"
+ assert record.Suite == "tp5n"
assert record.EngineerTraction == EXPECTED_VALUE
- assert record.FixRatio == 'N/A'
+ assert record.FixRatio == "N/A"
assert record.TotalAlerts == 0 # as the test database is empty
assert record.LastUpdatedOn == EXPECTED_LAST_UPDATE
assert record.AllowSync is True
@@ -277,10 +277,10 @@ def test_tracker_updates_records_with_missing_data(mock_formula_map, updatable_c
# CSV has no criteria data initially
for criteria_rec in tracker:
- assert criteria_rec.EngineerTraction == ''
- assert criteria_rec.FixRatio == ''
- assert criteria_rec.TotalAlerts == ''
- assert criteria_rec.LastUpdatedOn == ''
+ assert criteria_rec.EngineerTraction == ""
+ assert criteria_rec.FixRatio == ""
+ assert criteria_rec.TotalAlerts == ""
+ assert criteria_rec.LastUpdatedOn == ""
assert criteria_rec.AllowSync is True
tracker.update_records()
@@ -301,7 +301,7 @@ def test_tracker_updates_records_with_missing_data(mock_formula_map, updatable_c
@pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, auto_tick_seconds=30)
-@pytest.mark.parametrize('async_results', [NEVER_READY_RESULTS, PARTIALLY_READY_RESULTS])
+@pytest.mark.parametrize("async_results", [NEVER_READY_RESULTS, PARTIALLY_READY_RESULTS])
def test_results_checker_timeouts_on_no_changes(async_results):
checker = ResultsChecker(check_interval=timedelta(0.0), timeout_after=timedelta(minutes=5))
@@ -310,7 +310,7 @@ def test_results_checker_timeouts_on_no_changes(async_results):
@pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, auto_tick_seconds=30)
-@pytest.mark.parametrize('async_results', [READY_RESULTS, EVENTUALLY_READY_RESULTS])
+@pytest.mark.parametrize("async_results", [READY_RESULTS, EVENTUALLY_READY_RESULTS])
def test_results_checker_doesnt_timeout_unexpectedly(async_results):
checker = ResultsChecker(check_interval=timedelta(0.0), timeout_after=timedelta(minutes=5))
diff --git a/tests/perf/auto_sheriffing_criteria/test_engineer_traction.py b/tests/perf/auto_sheriffing_criteria/test_engineer_traction.py
index 9bedeeadc2b..530d693e4d9 100644
--- a/tests/perf/auto_sheriffing_criteria/test_engineer_traction.py
+++ b/tests/perf/auto_sheriffing_criteria/test_engineer_traction.py
@@ -24,30 +24,30 @@
@pytest.fixture
def quantified_bugs(betamax_recorder) -> list:
params = {
- 'longdesc': 'raptor speedometer',
- 'longdesc_type': 'allwords',
- 'longdesc_initial': 1,
- 'keywords': 'perf,perf-alert',
- 'keywords_type': 'anywords',
- 'creation_time': '2019-12-17',
- 'query_format': 'advanced',
+ "longdesc": "raptor speedometer",
+ "longdesc_type": "allwords",
+ "longdesc_initial": 1,
+ "keywords": "perf,perf-alert",
+ "keywords_type": "anywords",
+ "creation_time": "2019-12-17",
+ "query_format": "advanced",
}
- with betamax_recorder.use_cassette('quantified-bugs', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette("quantified-bugs", serialize_with="prettyjson"):
bug_resp = betamax_recorder.session.get(
- 'https://bugzilla.mozilla.org/rest/bug',
- headers={'Accept': 'application/json'},
+ "https://bugzilla.mozilla.org/rest/bug",
+ headers={"Accept": "application/json"},
params=params,
timeout=60,
)
- return bug_resp.json()['bugs']
+ return bug_resp.json()["bugs"]
@pytest.fixture
def cooled_down_bugs(nonblock_session, quantified_bugs) -> List[dict]:
bugs = []
for bug in quantified_bugs:
- created_at = datetime.strptime(bug['creation_time'], BZ_DATETIME_FORMAT)
+ created_at = datetime.strptime(bug["creation_time"], BZ_DATETIME_FORMAT)
if created_at <= datetime.now() - timedelta(weeks=2):
bugs.append(bug)
return bugs
@@ -59,39 +59,39 @@ def cooled_down_bugs(nonblock_session, quantified_bugs) -> List[dict]:
def test_formula_counts_tracted_bugs(cooled_down_bugs, betamax_recorder):
engineer_traction = EngineerTractionFormula(betamax_recorder.session)
- with betamax_recorder.use_cassette('cooled-down-bug-history', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette("cooled-down-bug-history", serialize_with="prettyjson"):
tracted_bugs = engineer_traction._filter_numerator_bugs(cooled_down_bugs)
assert len(tracted_bugs) == 2
@pytest.mark.parametrize(
- 'framework, suite, test',
+ "framework, suite, test",
[
# Sheriffed tests
- ('build_metrics', 'build times', None), # 92%
- ('build_metrics', 'installer size', None), # 78%
- ('awsy', 'JS', None), # 55%
- ('talos', 'tp5n', 'main_startup_fileio'), # 50%
+ ("build_metrics", "build times", None), # 92%
+ ("build_metrics", "installer size", None), # 78%
+ ("awsy", "JS", None), # 55%
+ ("talos", "tp5n", "main_startup_fileio"), # 50%
],
)
def test_final_formula_confirms_sheriffed_tests(framework, suite, test, betamax_recorder):
engineer_traction = EngineerTractionFormula(betamax_recorder.session)
- with betamax_recorder.use_cassette(f'{framework}-{suite}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{framework}-{suite}", serialize_with="prettyjson"):
assert engineer_traction(framework, suite) >= 0.35
@pytest.mark.parametrize(
- 'framework, suite, test',
+ "framework, suite, test",
[
# Non-sheriffed tests
- ('raptor', 'raptor-speedometer-firefox', None), # 33%
- ('raptor', 'raptor-webaudio-firefox', None), # 0%
- ('raptor', 'raptor-tp6-google-mail-firefox-cold', 'replayed'), # 0%
+ ("raptor", "raptor-speedometer-firefox", None), # 33%
+ ("raptor", "raptor-webaudio-firefox", None), # 0%
+ ("raptor", "raptor-tp6-google-mail-firefox-cold", "replayed"), # 0%
],
)
def test_final_formula_confirms_non_sheriffed_tests(framework, suite, test, betamax_recorder):
engineer_traction = EngineerTractionFormula(betamax_recorder.session)
- with betamax_recorder.use_cassette(f'{framework}-{suite}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{framework}-{suite}", serialize_with="prettyjson"):
assert engineer_traction(framework, suite, test) < 0.35
diff --git a/tests/perf/auto_sheriffing_criteria/test_fix_ratio.py b/tests/perf/auto_sheriffing_criteria/test_fix_ratio.py
index b52aad44022..332c5e3dd89 100644
--- a/tests/perf/auto_sheriffing_criteria/test_fix_ratio.py
+++ b/tests/perf/auto_sheriffing_criteria/test_fix_ratio.py
@@ -15,32 +15,32 @@
@pytest.mark.parametrize(
- 'framework, suite',
+ "framework, suite",
[
# Sheriffed tests
- ('build_metrics', 'build times'), # 37.5%
- ('build_metrics', 'installer size'), # 41.6%
- ('raptor', 'raptor-speedometer-firefox'), # 100%
- ('raptor', 'raptor-webaudio-firefox'), # 100%
+ ("build_metrics", "build times"), # 37.5%
+ ("build_metrics", "installer size"), # 41.6%
+ ("raptor", "raptor-speedometer-firefox"), # 100%
+ ("raptor", "raptor-webaudio-firefox"), # 100%
],
)
def test_formula_confirms_sheriffed_tests(framework, suite, betamax_recorder):
fix_ratio = FixRatioFormula(betamax_recorder.session)
- with betamax_recorder.use_cassette(f'{framework}-{suite}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{framework}-{suite}", serialize_with="prettyjson"):
assert fix_ratio(framework, suite) >= 0.3
@pytest.mark.parametrize(
- 'framework, suite, test',
+ "framework, suite, test",
[
# Non-sheriffed tests
- ('awsy', 'JS', None), # 20%
- ('talos', 'tp5n', 'nonmain_startup_fileio'), # 0%
+ ("awsy", "JS", None), # 20%
+ ("talos", "tp5n", "nonmain_startup_fileio"), # 0%
],
)
def test_formula_confirms_non_sheriffed_tests(framework, suite, test, betamax_recorder):
fix_ratio = FixRatioFormula(betamax_recorder.session)
- with betamax_recorder.use_cassette(f'{framework}-{suite}', serialize_with='prettyjson'):
+ with betamax_recorder.use_cassette(f"{framework}-{suite}", serialize_with="prettyjson"):
assert fix_ratio(framework, suite, test) < 0.3
diff --git a/tests/perf/auto_sheriffing_criteria/test_nonblockable_session.py b/tests/perf/auto_sheriffing_criteria/test_nonblockable_session.py
index 635c27d4ba1..d17799abc31 100644
--- a/tests/perf/auto_sheriffing_criteria/test_nonblockable_session.py
+++ b/tests/perf/auto_sheriffing_criteria/test_nonblockable_session.py
@@ -5,7 +5,7 @@ def test_nonblockable_sessions_has_the_recommended_headers(nonblock_session):
session_headers = nonblock_session.headers
try:
- assert session_headers['Referer']
- assert session_headers['User-Agent']
+ assert session_headers["Referer"]
+ assert session_headers["User-Agent"]
except KeyError:
pytest.fail()
diff --git a/tests/perf/test_email.py b/tests/perf/test_email.py
index 5b377bb6141..ee1b1a732f2 100644
--- a/tests/perf/test_email.py
+++ b/tests/perf/test_email.py
@@ -43,5 +43,5 @@ def __prepare_expected_content(test_perf_signature):
application=test_perf_signature.application,
last_updated=test_perf_signature.last_updated.date(),
)
- expected_content += '\n'
+ expected_content += "\n"
return expected_content
diff --git a/tests/perfalert/conftest.py b/tests/perfalert/conftest.py
index e13a7755f4f..6025bae7d7e 100644
--- a/tests/perfalert/conftest.py
+++ b/tests/perfalert/conftest.py
@@ -1,3 +1,3 @@
from tests.conftest import SampleDataJSONLoader
-load_json_fixture = SampleDataJSONLoader('sherlock')
+load_json_fixture = SampleDataJSONLoader("sherlock")
diff --git a/tests/perfalert/test_alert_modification.py b/tests/perfalert/test_alert_modification.py
index 887ad81e2cd..7496deb3de6 100644
--- a/tests/perfalert/test_alert_modification.py
+++ b/tests/perfalert/test_alert_modification.py
@@ -39,12 +39,12 @@ def test_summary_status(
signature1 = test_perf_signature
signature2 = PerformanceSignature.objects.create(
repository=test_repository,
- signature_hash=(40 * 'u'),
+ signature_hash=(40 * "u"),
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
- suite='mysuite_2',
- test='mytest_2',
+ suite="mysuite_2",
+ test="mytest_2",
has_subtests=False,
last_updated=datetime.datetime.now(),
)
@@ -81,12 +81,12 @@ def test_reassigning_regression(
signature1 = test_perf_signature
signature2 = PerformanceSignature.objects.create(
repository=test_repository,
- signature_hash=(40 * 'u'),
+ signature_hash=(40 * "u"),
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
- suite='mysuite_2',
- test='mytest_2',
+ suite="mysuite_2",
+ test="mytest_2",
has_subtests=False,
last_updated=datetime.datetime.now(),
)
@@ -132,12 +132,12 @@ def test_improvement_summary_status_after_reassigning_regression(
signature1 = test_perf_signature
signature2 = PerformanceSignature.objects.create(
repository=test_repository,
- signature_hash=(40 * 'u'),
+ signature_hash=(40 * "u"),
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
- suite='mysuite_2',
- test='mytest_2',
+ suite="mysuite_2",
+ test="mytest_2",
has_subtests=False,
last_updated=datetime.datetime.now(),
)
diff --git a/tests/perfalert/test_alerts.py b/tests/perfalert/test_alerts.py
index 873fecb8f40..c02f80c9abd 100644
--- a/tests/perfalert/test_alerts.py
+++ b/tests/perfalert/test_alerts.py
@@ -59,10 +59,10 @@ def _generate_performance_data(
):
push, _ = Push.objects.get_or_create(
repository=test_repository,
- revision='1234abcd%s' % t,
+ revision="1234abcd%s" % t,
defaults={
- 'author': 'foo@bar.com',
- 'time': datetime.datetime.fromtimestamp(base_timestamp + t),
+ "author": "foo@bar.com",
+ "time": datetime.datetime.fromtimestamp(base_timestamp + t),
},
)
PerformanceDatum.objects.create(
@@ -302,7 +302,7 @@ def test_custom_alert_threshold(
assert PerformanceAlertSummary.objects.count() == 1
-@pytest.mark.parametrize(('new_value', 'expected_num_alerts'), [(1.0, 1), (0.25, 0)])
+@pytest.mark.parametrize(("new_value", "expected_num_alerts"), [(1.0, 1), (0.25, 0)])
def test_alert_change_type_absolute(
test_repository,
test_issue_tracker,
diff --git a/tests/perfalert/test_analyze.py b/tests/perfalert/test_analyze.py
index 29849bea8fb..81a428d58f7 100644
--- a/tests/perfalert/test_analyze.py
+++ b/tests/perfalert/test_analyze.py
@@ -47,7 +47,7 @@ def test_weights():
[
([0.0, 0.0], [1.0, 2.0], 3.0),
([0.0, 0.0], [0.0, 0.0], 0.0),
- ([0.0, 0.0], [1.0, 1.0], float('inf')),
+ ([0.0, 0.0], [1.0, 1.0], float("inf")),
],
)
def test_calc_t(old_data, new_data, expected):
@@ -111,13 +111,13 @@ def test_detect_changes_few_revisions_many_values():
@pytest.mark.parametrize(
("filename", "expected_timestamps"),
[
- ('runs1.json', [1365019665]),
- ('runs2.json', [1357704596, 1358971894, 1365014104]),
- ('runs3.json', [1335293827, 1338839958]),
- ('runs4.json', [1364922838]),
- ('runs5.json', []),
- ('a11y.json', [1366197637, 1367799757]),
- ('tp5rss.json', [1372846906, 1373413365, 1373424974]),
+ ("runs1.json", [1365019665]),
+ ("runs2.json", [1357704596, 1358971894, 1365014104]),
+ ("runs3.json", [1335293827, 1338839958]),
+ ("runs4.json", [1364922838]),
+ ("runs5.json", []),
+ ("a11y.json", [1366197637, 1367799757]),
+ ("tp5rss.json", [1372846906, 1373413365, 1373424974]),
],
)
def test_detect_changes_historical_data(filename, expected_timestamps):
@@ -128,8 +128,8 @@ def test_detect_changes_historical_data(filename, expected_timestamps):
MAX_BACK_WINDOW = 24
THRESHOLD = 7
- payload = SampleData.get_perf_data(os.path.join('graphs', filename))
- runs = payload['test_runs']
+ payload = SampleData.get_perf_data(os.path.join("graphs", filename))
+ runs = payload["test_runs"]
data = [RevisionDatum(r[2], r[2], [r[3]]) for r in runs]
results = detect_changes(
diff --git a/tests/push_health/test_builds.py b/tests/push_health/test_builds.py
index c4c233e7f59..56fb82f8264 100644
--- a/tests/push_health/test_builds.py
+++ b/tests/push_health/test_builds.py
@@ -8,14 +8,14 @@ def test_get_build_failures(
jobs = sample_data.job_data[20:25]
for blob in jobs:
- blob['revision'] = test_push.revision
- blob['job']['result'] = 'busted'
- blob['job']['taskcluster_task_id'] = 'V3SVuxO8TFy37En_6HcXLs'
- blob['job']['taskcluster_retry_id'] = '0'
+ blob["revision"] = test_push.revision
+ blob["job"]["result"] = "busted"
+ blob["job"]["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcXLs"
+ blob["job"]["taskcluster_retry_id"] = "0"
store_job_data(test_repository, jobs)
result, build_failures, in_progress = get_build_failures(test_push)
assert in_progress == 0
- assert result == 'fail'
+ assert result == "fail"
assert len(build_failures) == 2
diff --git a/tests/push_health/test_classification.py b/tests/push_health/test_classification.py
index 8de8e32b42d..aa6cd11d75e 100644
--- a/tests/push_health/test_classification.py
+++ b/tests/push_health/test_classification.py
@@ -7,45 +7,45 @@ def test_intermittent_win7_reftest():
"""test that a failed test is classified as infra"""
failures = [
{
- 'testName': 'foo',
- 'jobName': 'Foodebug-reftest',
- 'platform': 'windows7-32',
- 'suggestedClassification': 'New Failure',
- 'config': 'foo',
- 'isClassifiedIntermittent': True,
+ "testName": "foo",
+ "jobName": "Foodebug-reftest",
+ "platform": "windows7-32",
+ "suggestedClassification": "New Failure",
+ "config": "foo",
+ "isClassifiedIntermittent": True,
}
]
set_classifications(failures, {}, {})
- assert failures[0]['suggestedClassification'] == 'intermittent'
+ assert failures[0]["suggestedClassification"] == "intermittent"
@pytest.mark.parametrize(
- ('history', 'confidence', 'classification', 'fcid'),
+ ("history", "confidence", "classification", "fcid"),
[
- ({'foo': {'bing': {'baz': 2}}}, 100, 'intermittent', 1),
- ({'foo': {'bing': {'bee': 2}}}, 75, 'intermittent', 1),
- ({'foo': {'bee': {'bee': 2}}}, 50, 'intermittent', 1),
- ({'fee': {'bee': {'bee': 2}}}, 0, 'New Failure', 1),
+ ({"foo": {"bing": {"baz": 2}}}, 100, "intermittent", 1),
+ ({"foo": {"bing": {"bee": 2}}}, 75, "intermittent", 1),
+ ({"foo": {"bee": {"bee": 2}}}, 50, "intermittent", 1),
+ ({"fee": {"bee": {"bee": 2}}}, 0, "New Failure", 1),
# no match, but job has been classified as intermittent by hand.
- ({'fee': {'bee': {'bee': 2}}}, 100, 'intermittent', 4),
+ ({"fee": {"bee": {"bee": 2}}}, 100, "intermittent", 4),
],
)
def test_intermittent_confidence(history, confidence, classification, fcid):
"""test that a failed test is classified as intermittent, confidence 100"""
failures = [
{
- 'testName': 'foo',
- 'jobName': 'bar',
- 'platform': 'bing',
- 'suggestedClassification': 'New Failure',
- 'config': 'baz',
- 'confidence': 0,
- 'isClassifiedIntermittent': fcid == 4,
+ "testName": "foo",
+ "jobName": "bar",
+ "platform": "bing",
+ "suggestedClassification": "New Failure",
+ "config": "baz",
+ "confidence": 0,
+ "isClassifiedIntermittent": fcid == 4,
}
]
set_classifications(failures, history, {})
- assert failures[0]['suggestedClassification'] == classification
- assert failures[0]['confidence'] == confidence
+ assert failures[0]["suggestedClassification"] == classification
+ assert failures[0]["confidence"] == confidence
diff --git a/tests/push_health/test_compare.py b/tests/push_health/test_compare.py
index f17f9ef026c..e114237ad84 100644
--- a/tests/push_health/test_compare.py
+++ b/tests/push_health/test_compare.py
@@ -5,8 +5,8 @@
from treeherder.model.models import Push
from treeherder.push_health.compare import get_commit_history
-test_revision = '4c45a777949168d16c03a4cba167678b7ab65f76'
-parent_revision = 'abcdef77949168d16c03a4cba167678b7ab65f76'
+test_revision = "4c45a777949168d16c03a4cba167678b7ab65f76"
+parent_revision = "abcdef77949168d16c03a4cba167678b7ab65f76"
@pytest.fixture
@@ -14,7 +14,7 @@ def mock_rev(test_push):
# This is the revision/push under test
responses.add(
responses.GET,
- f'https://hg.mozilla.org/{test_push.repository.name}/rev/{test_revision}?style=json',
+ f"https://hg.mozilla.org/{test_push.repository.name}/rev/{test_revision}?style=json",
json={
"node": test_revision,
"date": [1589318819.0, -7200],
@@ -26,7 +26,7 @@ def mock_rev(test_push):
"pushdate": [1589318855, 0],
"pushuser": "hiro@protagonist.com",
},
- content_type='application/json',
+ content_type="application/json",
status=200,
)
@@ -35,7 +35,7 @@ def mock_rev(test_push):
def mock_json_pushes(test_push):
responses.add(
responses.GET,
- f'https://hg.mozilla.org/{test_push.repository.name}/json-pushes?version=2&full=1&startID=536015&endID=536016',
+ f"https://hg.mozilla.org/{test_push.repository.name}/json-pushes?version=2&full=1&startID=536015&endID=536016",
json={
"pushes": {
"536016": {
@@ -49,12 +49,12 @@ def mock_json_pushes(test_push):
}
},
},
- content_type='application/json',
+ content_type="application/json",
status=200,
)
responses.add(
responses.GET,
- f'https://hg.mozilla.org/{test_push.repository.name}/json-automationrelevance/4c45a777949168d16c03a4cba167678b7ab65f76?backouts=1',
+ f"https://hg.mozilla.org/{test_push.repository.name}/json-automationrelevance/4c45a777949168d16c03a4cba167678b7ab65f76?backouts=1",
json={
"changesets": [
{
@@ -90,7 +90,7 @@ def mock_json_pushes(test_push):
},
],
},
- content_type='application/json',
+ content_type="application/json",
status=200,
)
@@ -100,12 +100,12 @@ def test_get_commit_history(test_push, test_repository, mock_rev, mock_json_push
Push.objects.create(
revision=parent_revision,
repository=test_repository,
- author='foo@bar.baz',
+ author="foo@bar.baz",
time=datetime.datetime.now(),
)
history = get_commit_history(test_repository, test_revision, test_push)
- print('\n<><><>history')
+ print("\n<><><>history")
print(history)
- assert history['parentSha'] == parent_revision
- assert history['parentRepository']['name'] == test_repository.name
+ assert history["parentSha"] == parent_revision
+ assert history["parentRepository"]["name"] == test_repository.name
diff --git a/tests/push_health/test_linting.py b/tests/push_health/test_linting.py
index 5c02e74cafc..c7e3a9fce44 100644
--- a/tests/push_health/test_linting.py
+++ b/tests/push_health/test_linting.py
@@ -8,19 +8,19 @@ def test_get_linting_failures(
jobs = sample_data.job_data[20:22]
for blob in jobs:
- blob['revision'] = test_push.revision
- blob['job'].update(
+ blob["revision"] = test_push.revision
+ blob["job"].update(
{
- 'result': 'testfailed',
- 'taskcluster_task_id': 'V3SVuxO8TFy37En_6HcXLs',
- 'taskcluster_retry_id': '0',
+ "result": "testfailed",
+ "taskcluster_task_id": "V3SVuxO8TFy37En_6HcXLs",
+ "taskcluster_retry_id": "0",
}
)
- blob['job']['machine_platform']['platform'] = 'lint'
+ blob["job"]["machine_platform"]["platform"] = "lint"
store_job_data(test_repository, jobs)
result, build_failures, in_progress = get_lint_failures(test_push)
assert in_progress == 0
- assert result == 'fail'
+ assert result == "fail"
assert len(build_failures) == 2
diff --git a/tests/push_health/test_tests.py b/tests/push_health/test_tests.py
index 74f598c6035..64a2769f2b1 100644
--- a/tests/push_health/test_tests.py
+++ b/tests/push_health/test_tests.py
@@ -4,31 +4,31 @@
from treeherder.push_health.tests import get_test_failures, get_test_failure_jobs, has_job, has_line
-@pytest.mark.parametrize(('find_it',), [(True,), (False,)])
+@pytest.mark.parametrize(("find_it",), [(True,), (False,)])
def test_has_job(find_it):
- job = Job(id=123, repository=Repository(), guid='12345')
+ job = Job(id=123, repository=Repository(), guid="12345")
job_list = [
- {'id': 111},
- {'id': 222},
+ {"id": 111},
+ {"id": 222},
]
if find_it:
- job_list.append({'id': 123})
+ job_list.append({"id": 123})
assert has_job(job, job_list)
else:
assert not has_job(job, job_list)
-@pytest.mark.parametrize(('find_it',), [(True,), (False,)])
+@pytest.mark.parametrize(("find_it",), [(True,), (False,)])
def test_has_line(find_it):
line = FailureLine(line=123)
line_list = [
- {'line_number': 111},
- {'line_number': 222},
+ {"line_number": 111},
+ {"line_number": 222},
]
if find_it:
- line_list.append({'line_number': 123})
+ line_list.append({"line_number": 123})
assert has_line(line, line_list)
else:
assert not has_line(line, line_list)
@@ -37,13 +37,13 @@ def test_has_line(find_it):
def test_get_test_failures(
failure_classifications, test_repository, test_job, text_log_error_lines
):
- test_job.result = 'testfailed'
+ test_job.result = "testfailed"
test_job.save()
result_status, jobs = get_test_failure_jobs(test_job.push)
result, build_failures = get_test_failures(test_job.push, jobs, result_status)
- need_investigation = build_failures['needInvestigation']
+ need_investigation = build_failures["needInvestigation"]
- assert result == 'fail'
+ assert result == "fail"
assert len(need_investigation) == 1
- assert len(jobs[need_investigation[0]['jobName']]) == 1
+ assert len(jobs[need_investigation[0]["jobName"]]) == 1
diff --git a/tests/push_health/test_usage.py b/tests/push_health/test_usage.py
index 46737c0aec5..04fd6bbaf04 100644
--- a/tests/push_health/test_usage.py
+++ b/tests/push_health/test_usage.py
@@ -12,49 +12,49 @@
@pytest.fixture
def push_usage(test_base_dir):
- usage_path = os.path.join(test_base_dir, 'sample_data', 'push_usage_data.json')
+ usage_path = os.path.join(test_base_dir, "sample_data", "push_usage_data.json")
with open(usage_path) as f:
return json.load(f)
def test_peak(push_usage):
- peak = get_peak(push_usage['facets'][0])
- assert peak['needInvestigation'] == 149.0
- assert peak['time'] == 1584035553
+ peak = get_peak(push_usage["facets"][0])
+ assert peak["needInvestigation"] == 149.0
+ assert peak["time"] == 1584035553
def test_latest(push_usage):
- latest = get_latest(push_usage['facets'][0])
- assert latest['needInvestigation'] == 30.0
- assert latest['time'] == 1584042753
+ latest = get_latest(push_usage["facets"][0])
+ assert latest["needInvestigation"] == 30.0
+ assert latest["time"] == 1584042753
@responses.activate
def test_get_usage(push_usage, test_repository):
nrql = "SELECT%20max(needInvestigation)%20FROM%20push_health_need_investigation%20FACET%20revision%20SINCE%201%20DAY%20AGO%20TIMESERIES%20where%20repo%3D'{}'%20AND%20appName%3D'{}'".format(
- 'try', 'treeherder-prod'
+ "try", "treeherder-prod"
)
- new_relic_url = '{}?nrql={}'.format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
+ new_relic_url = "{}?nrql={}".format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
responses.add(
responses.GET,
new_relic_url,
body=json.dumps(push_usage),
status=200,
- content_type='application/json',
+ content_type="application/json",
)
# create the Pushes that match the usage response
for rev in [
- '4c45a777949168d16c03a4cba167678b7ab65f76',
- '1cd5f1062ce081636af8083eb5b87e45d0f03d01',
- 'c73645027199ac3e092002452b436dde461bbe28',
- 'b6e5cd6373370c40d315b0e266c6c3e9aa48ae12',
+ "4c45a777949168d16c03a4cba167678b7ab65f76",
+ "1cd5f1062ce081636af8083eb5b87e45d0f03d01",
+ "c73645027199ac3e092002452b436dde461bbe28",
+ "b6e5cd6373370c40d315b0e266c6c3e9aa48ae12",
]:
Push.objects.create(
revision=rev,
repository=test_repository,
- author='phydeaux@dog.org',
+ author="phydeaux@dog.org",
time=datetime.datetime.now(),
)
@@ -62,6 +62,6 @@ def test_get_usage(push_usage, test_repository):
facet = usage[0]
assert len(usage) == 4
- assert facet['push']['revision'] == '4c45a777949168d16c03a4cba167678b7ab65f76'
- assert facet['peak']['needInvestigation'] == 149.0
- assert facet['latest']['needInvestigation'] == 30.0
+ assert facet["push"]["revision"] == "4c45a777949168d16c03a4cba167678b7ab65f76"
+ assert facet["peak"]["needInvestigation"] == 149.0
+ assert facet["latest"]["needInvestigation"] == 30.0
diff --git a/tests/push_health/test_utils.py b/tests/push_health/test_utils.py
index d4cb21e6276..dde40334728 100644
--- a/tests/push_health/test_utils.py
+++ b/tests/push_health/test_utils.py
@@ -9,49 +9,49 @@
@pytest.mark.parametrize(
- ('action', 'test', 'signature', 'message', 'expected'),
+ ("action", "test", "signature", "message", "expected"),
[
- ('test_result', 'dis/dat/da/odder/ting', 'sig', 'mess', 'dis/dat/da/odder/ting'),
- ('crash', None, 'sig', 'mess', 'sig'),
- ('log', None, None, 'mess', 'mess'),
- ('meh', None, None, None, 'Non-Test Error'),
- ('test_result', 'pid:dis/dat/da/odder/ting', 'sig', 'mess', None),
+ ("test_result", "dis/dat/da/odder/ting", "sig", "mess", "dis/dat/da/odder/ting"),
+ ("crash", None, "sig", "mess", "sig"),
+ ("log", None, None, "mess", "mess"),
+ ("meh", None, None, None, "Non-Test Error"),
+ ("test_result", "pid:dis/dat/da/odder/ting", "sig", "mess", None),
(
- 'test_result',
- 'tests/layout/this == tests/layout/that',
- 'sig',
- 'mess',
- 'layout/this == layout/that',
+ "test_result",
+ "tests/layout/this == tests/layout/that",
+ "sig",
+ "mess",
+ "layout/this == layout/that",
),
(
- 'test_result',
- 'tests/layout/this != tests/layout/that',
- 'sig',
- 'mess',
- 'layout/this != layout/that',
+ "test_result",
+ "tests/layout/this != tests/layout/that",
+ "sig",
+ "mess",
+ "layout/this != layout/that",
),
(
- 'test_result',
- 'build/tests/reftest/tests/this != build/tests/reftest/tests/that',
- 'sig',
- 'mess',
- 'this != that',
+ "test_result",
+ "build/tests/reftest/tests/this != build/tests/reftest/tests/that",
+ "sig",
+ "mess",
+ "this != that",
),
(
- 'test_result',
- 'http://10.0.5.5/tests/this != http://10.0.5.5/tests/that',
- 'sig',
- 'mess',
- 'this != that',
+ "test_result",
+ "http://10.0.5.5/tests/this != http://10.0.5.5/tests/that",
+ "sig",
+ "mess",
+ "this != that",
),
- ('test_result', 'build/tests/reftest/tests/this', 'sig', 'mess', 'this'),
- ('test_result', 'test=jsreftest.html', 'sig', 'mess', 'jsreftest.html'),
- ('test_result', 'http://10.0.5.5/tests/this/thing', 'sig', 'mess', 'this/thing'),
- ('test_result', 'http://localhost:5000/tests/this/thing', 'sig', 'mess', 'thing'),
- ('test_result', 'thing is done (finished)', 'sig', 'mess', 'thing is done'),
- ('test_result', 'Last test finished', 'sig', 'mess', None),
- ('test_result', '(SimpleTest/TestRunner.js)', 'sig', 'mess', None),
- ('test_result', '/this\\thing\\there', 'sig', 'mess', 'this/thing/there'),
+ ("test_result", "build/tests/reftest/tests/this", "sig", "mess", "this"),
+ ("test_result", "test=jsreftest.html", "sig", "mess", "jsreftest.html"),
+ ("test_result", "http://10.0.5.5/tests/this/thing", "sig", "mess", "this/thing"),
+ ("test_result", "http://localhost:5000/tests/this/thing", "sig", "mess", "thing"),
+ ("test_result", "thing is done (finished)", "sig", "mess", "thing is done"),
+ ("test_result", "Last test finished", "sig", "mess", None),
+ ("test_result", "(SimpleTest/TestRunner.js)", "sig", "mess", None),
+ ("test_result", "/this\\thing\\there", "sig", "mess", "this/thing/there"),
],
)
def test_clean_test(action, test, signature, message, expected):
@@ -59,13 +59,13 @@ def test_clean_test(action, test, signature, message, expected):
@pytest.mark.parametrize(
- ('config', 'expected'),
+ ("config", "expected"),
[
- ('opt', 'opt'),
- ('debug', 'debug'),
- ('asan', 'asan'),
- ('pgo', 'opt'),
- ('shippable', 'opt'),
+ ("opt", "opt"),
+ ("debug", "debug"),
+ ("asan", "asan"),
+ ("pgo", "opt"),
+ ("shippable", "opt"),
],
)
def test_clean_config(config, expected):
@@ -73,11 +73,11 @@ def test_clean_config(config, expected):
@pytest.mark.parametrize(
- ('platform', 'expected'),
+ ("platform", "expected"),
[
- ('macosx64 opt and such', 'osx-10-10 opt and such'),
- ('linux doohickey', 'linux doohickey'),
- ('windows gizmo', 'windows gizmo'),
+ ("macosx64 opt and such", "osx-10-10 opt and such"),
+ ("linux doohickey", "linux doohickey"),
+ ("windows gizmo", "windows gizmo"),
],
)
def test_clean_platform(platform, expected):
@@ -85,14 +85,14 @@ def test_clean_platform(platform, expected):
@pytest.mark.parametrize(
- ('line', 'expected'),
+ ("line", "expected"),
[
- ('Return code:', False),
- ('unexpected status', False),
- ('unexpected crashes', False),
- ('exit status', False),
- ('Finished in', False),
- ('expect magic', True),
+ ("Return code:", False),
+ ("unexpected status", False),
+ ("unexpected crashes", False),
+ ("exit status", False),
+ ("Finished in", False),
+ ("expect magic", True),
],
)
def test_is_valid_failure_line(line, expected):
diff --git a/tests/sample_data_generator.py b/tests/sample_data_generator.py
index 61bc9286250..f7d1efc4032 100644
--- a/tests/sample_data_generator.py
+++ b/tests/sample_data_generator.py
@@ -10,22 +10,22 @@ def job_data(**kwargs):
jobs_obj = {
"revision": kwargs.get("revision", "24fd64b8251fac5cf60b54a915bffa7e51f636b5"),
"job": {
- u"build_platform": build_platform(**kwargs.pop("build_platform", {})),
- u"submit_timestamp": kwargs.pop("submit_timestamp", submit_timestamp()),
- u"start_timestamp": kwargs.pop("start_timestamp", start_timestamp()),
- u"name": kwargs.pop("name", u"mochitest-5"),
- u"option_collection": option_collection(**kwargs.pop("option_collection", {})),
- u"log_references": log_references(kwargs.pop("log_references", [])),
- u"who": kwargs.pop("who", u"sendchange-unittest"),
- u"reason": kwargs.pop("reason", u"scheduler"),
- u"artifact": kwargs.pop("artifact", {}),
- u"machine_platform": machine_platform(**kwargs.pop("machine_platform", {})),
- u"machine": kwargs.pop("machine", u"talos-r3-xp-088"),
- u"state": kwargs.pop("state", u"completed"),
- u"result": kwargs.pop("result", 0),
- u"job_guid": kwargs.pop(u"job_guid", u"f3e3a9e6526881c39a3b2b6ff98510f213b3d4ed"),
- u"product_name": kwargs.pop("product_name", u"firefox"),
- u"end_timestamp": kwargs.pop("end_timestamp", end_timestamp()),
+ "build_platform": build_platform(**kwargs.pop("build_platform", {})),
+ "submit_timestamp": kwargs.pop("submit_timestamp", submit_timestamp()),
+ "start_timestamp": kwargs.pop("start_timestamp", start_timestamp()),
+ "name": kwargs.pop("name", "mochitest-5"),
+ "option_collection": option_collection(**kwargs.pop("option_collection", {})),
+ "log_references": log_references(kwargs.pop("log_references", [])),
+ "who": kwargs.pop("who", "sendchange-unittest"),
+ "reason": kwargs.pop("reason", "scheduler"),
+ "artifact": kwargs.pop("artifact", {}),
+ "machine_platform": machine_platform(**kwargs.pop("machine_platform", {})),
+ "machine": kwargs.pop("machine", "talos-r3-xp-088"),
+ "state": kwargs.pop("state", "completed"),
+ "result": kwargs.pop("result", 0),
+ "job_guid": kwargs.pop("job_guid", "f3e3a9e6526881c39a3b2b6ff98510f213b3d4ed"),
+ "product_name": kwargs.pop("product_name", "firefox"),
+ "end_timestamp": kwargs.pop("end_timestamp", end_timestamp()),
},
}
@@ -63,7 +63,7 @@ def option_collection(**kwargs):
Return a sample data structure, with default values.
"""
- defaults = {u"debug": True}
+ defaults = {"debug": True}
defaults.update(kwargs)
@@ -72,7 +72,7 @@ def option_collection(**kwargs):
def log_references(log_refs=None):
if not log_refs:
- log_refs = [{u"url": u"http://ftp.mozilla.org/pub/...", u"name": u"unittest"}]
+ log_refs = [{"url": "http://ftp.mozilla.org/pub/...", "name": "unittest"}]
return log_refs
@@ -82,9 +82,9 @@ def build_platform(**kwargs):
"""
defaults = {
- u"platform": u"WINNT5.1",
- u"os_name": u"win",
- u"architecture": u"x86",
+ "platform": "WINNT5.1",
+ "os_name": "win",
+ "architecture": "x86",
}
defaults.update(kwargs)
@@ -98,9 +98,9 @@ def machine_platform(**kwargs):
"""
defaults = {
- u"platform": u"WINNT5.1",
- u"os_name": u"win",
- u"architecture": u"x86",
+ "platform": "WINNT5.1",
+ "os_name": "win",
+ "architecture": "x86",
}
defaults.update(kwargs)
diff --git a/tests/services/pulse/test_consumers.py b/tests/services/pulse/test_consumers.py
index 0ae5567c0e1..61a3e9235ba 100644
--- a/tests/services/pulse/test_consumers.py
+++ b/tests/services/pulse/test_consumers.py
@@ -58,9 +58,9 @@ def mock_store_pulse_tasks_classification(args, queue):
nonlocal mock_called
mock_called = True
- monkeypatch.setattr(store_pulse_tasks, 'apply_async', lambda args, queue: None)
+ monkeypatch.setattr(store_pulse_tasks, "apply_async", lambda args, queue: None)
monkeypatch.setattr(
- store_pulse_tasks_classification, 'apply_async', mock_store_pulse_tasks_classification
+ store_pulse_tasks_classification, "apply_async", mock_store_pulse_tasks_classification
)
consumer = JointConsumer(
@@ -76,10 +76,10 @@ def mock_store_pulse_tasks_classification(args, queue):
message = MagicMock()
monkeypatch.setattr(
message,
- 'delivery_info',
+ "delivery_info",
{
- 'exchange': 'exchange/taskcluster-queue/v1/task-completed',
- 'routing_key': 'primary.aaaaaaaaaaaaaaaaaaaaaa.0.us-east1.111111111111111111.proj-bugbug.compute-smaller.-.AAAAAAAAAAAAAAAAAAAAAA._',
+ "exchange": "exchange/taskcluster-queue/v1/task-completed",
+ "routing_key": "primary.aaaaaaaaaaaaaaaaaaaaaa.0.us-east1.111111111111111111.proj-bugbug.compute-smaller.-.AAAAAAAAAAAAAAAAAAAAAA._",
},
)
consumer.on_message(None, message)
@@ -94,9 +94,9 @@ def mock_store_pulse_tasks_classification(args, queue):
nonlocal mock_called
mock_called = True
- monkeypatch.setattr(store_pulse_tasks, 'apply_async', lambda args, queue: None)
+ monkeypatch.setattr(store_pulse_tasks, "apply_async", lambda args, queue: None)
monkeypatch.setattr(
- store_pulse_tasks_classification, 'apply_async', mock_store_pulse_tasks_classification
+ store_pulse_tasks_classification, "apply_async", mock_store_pulse_tasks_classification
)
consumer = JointConsumer(
@@ -112,10 +112,10 @@ def mock_store_pulse_tasks_classification(args, queue):
message = MagicMock()
monkeypatch.setattr(
message,
- 'delivery_info',
+ "delivery_info",
{
- 'exchange': 'exchange/taskcluster-queue/v1/task-completed',
- 'routing_key': 'primary.aaaaaaaaaaaaaaaaaaaaaa.0.us-east1.111111111111111111.proj-mozci.compute-smaller.-.AAAAAAAAAAAAAAAAAAAAAA._',
+ "exchange": "exchange/taskcluster-queue/v1/task-completed",
+ "routing_key": "primary.aaaaaaaaaaaaaaaaaaaaaa.0.us-east1.111111111111111111.proj-mozci.compute-smaller.-.AAAAAAAAAAAAAAAAAAAAAA._",
},
)
consumer.on_message(None, message)
diff --git a/tests/services/test_taskcluster.py b/tests/services/test_taskcluster.py
index 41ae1fdb3d2..d0244cb067b 100644
--- a/tests/services/test_taskcluster.py
+++ b/tests/services/test_taskcluster.py
@@ -10,32 +10,32 @@
TaskclusterModelNullObject,
)
-load_json_fixture = SampleDataJSONLoader('sherlock')
+load_json_fixture = SampleDataJSONLoader("sherlock")
@pytest.fixture(scope="module")
def actions_json():
- return load_json_fixture('initialActions.json')
+ return load_json_fixture("initialActions.json")
@pytest.fixture(scope="module")
def expected_actions_json():
- return load_json_fixture('reducedActions.json')
+ return load_json_fixture("reducedActions.json")
@pytest.fixture(scope="module")
def original_task():
- return load_json_fixture('originalTask.json')
+ return load_json_fixture("originalTask.json")
@pytest.fixture(scope="module")
def expected_backfill_task():
- return load_json_fixture('backfillTask.json')
+ return load_json_fixture("backfillTask.json")
class TestTaskclusterModelImpl:
- FAKE_ROOT_URL = 'https://fakerooturl.org'
- FAKE_OPTIONS = (FAKE_ROOT_URL, 'FAKE_CLIENT_ID', 'FAKE_ACCESS_TOKEN')
+ FAKE_ROOT_URL = "https://fakerooturl.org"
+ FAKE_OPTIONS = (FAKE_ROOT_URL, "FAKE_CLIENT_ID", "FAKE_ACCESS_TOKEN")
def test_can_instantiate_without_credentials(self):
try:
diff --git a/tests/settings.py b/tests/settings.py
index c41e71ea184..b52db70d8df 100644
--- a/tests/settings.py
+++ b/tests/settings.py
@@ -1,9 +1,9 @@
from treeherder.config.settings import * # noqa: F403
DATABASES["default"]["TEST"] = {"NAME": "test_treeherder"} # noqa: F405
-KEY_PREFIX = 'test'
+KEY_PREFIX = "test"
-TREEHERDER_TEST_REPOSITORY_NAME = 'mozilla-central'
+TREEHERDER_TEST_REPOSITORY_NAME = "mozilla-central"
# this makes celery calls synchronous, useful for unit testing
CELERY_TASK_ALWAYS_EAGER = True
@@ -22,7 +22,7 @@
# access. But if we use the defaults in config.settings, we also get the
# ``ModelBackend``, which will try to access the DB. This ensures we don't
# do that, since we don't have any tests that use the ``ModelBackend``.
-AUTHENTICATION_BACKENDS = ('treeherder.auth.backends.AuthBackend',)
+AUTHENTICATION_BACKENDS = ("treeherder.auth.backends.AuthBackend",)
# For Push Health Usage dashboard
NEW_RELIC_INSIGHTS_API_KEY = "123"
@@ -31,7 +31,7 @@
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#show-toolbar-callback
# "You can provide your own function callback(request) which returns True or False."
DEBUG_TOOLBAR_CONFIG = {
- 'SHOW_TOOLBAR_CALLBACK': lambda request: False,
+ "SHOW_TOOLBAR_CALLBACK": lambda request: False,
}
-INSTALLED_APPS.remove('django.contrib.staticfiles') # noqa: F405
+INSTALLED_APPS.remove("django.contrib.staticfiles") # noqa: F405
diff --git a/tests/test_middleware.py b/tests/test_middleware.py
index c6e1e15b8e2..3260b3dc161 100644
--- a/tests/test_middleware.py
+++ b/tests/test_middleware.py
@@ -6,41 +6,41 @@
URLS_IMMUTABLE = [
# Assets generated by Yarn.
- '/assets/2.379789df.css',
- '/assets/dancing_cat.fa5552a5.gif',
- '/assets/fontawesome-webfont.af7ae505.woff2',
- '/assets/fontawesome-webfont.fee66e71.woff',
- '/assets/index.1d85033a.js',
- '/assets/index.1d85033a.js.map',
- '/assets/perf.d7fea1e4.css',
- '/assets/perf.d7fea1e4.css.map',
- '/assets/treeherder-logo.3df97cff.png',
+ "/assets/2.379789df.css",
+ "/assets/dancing_cat.fa5552a5.gif",
+ "/assets/fontawesome-webfont.af7ae505.woff2",
+ "/assets/fontawesome-webfont.fee66e71.woff",
+ "/assets/index.1d85033a.js",
+ "/assets/index.1d85033a.js.map",
+ "/assets/perf.d7fea1e4.css",
+ "/assets/perf.d7fea1e4.css.map",
+ "/assets/treeherder-logo.3df97cff.png",
]
URLS_NOT_IMMUTABLE = [
- '/',
- '/contribute.json',
- '/perf.html',
- '/revision.txt',
- '/tree_open.png',
- '/docs/schema.js',
+ "/",
+ "/contribute.json",
+ "/perf.html",
+ "/revision.txt",
+ "/tree_open.png",
+ "/docs/schema.js",
# The unhashed Yarn/webpack output if using `yarn build --mode development`.
- '/assets/runtime.js',
- '/assets/vendors~index.js',
+ "/assets/runtime.js",
+ "/assets/vendors~index.js",
# The unhashed Django static asset originals (used in development).
- '/static/debug_toolbar/assets/toolbar.css',
- '/static/rest_framework/docs/js/jquery.json-view.min.js',
+ "/static/debug_toolbar/assets/toolbar.css",
+ "/static/rest_framework/docs/js/jquery.json-view.min.js",
]
-@pytest.mark.parametrize('url', URLS_IMMUTABLE)
+@pytest.mark.parametrize("url", URLS_IMMUTABLE)
def test_immutable_file_test_matches(url):
- assert CustomWhiteNoise().immutable_file_test('', url)
+ assert CustomWhiteNoise().immutable_file_test("", url)
-@pytest.mark.parametrize('url', URLS_NOT_IMMUTABLE)
+@pytest.mark.parametrize("url", URLS_NOT_IMMUTABLE)
def test_immutable_file_test_does_not_match(url):
- assert not CustomWhiteNoise().immutable_file_test('', url)
+ assert not CustomWhiteNoise().immutable_file_test("", url)
def test_content_security_policy_header(client):
@@ -48,7 +48,7 @@ def test_content_security_policy_header(client):
# however they won't exist unless `yarn build` has been run first.
# So instead we request an arbitrary static asset from django-rest-framework,
# which will be served with the same headers as our frontend HTML.
- response = client.get('/static/rest_framework/css/default.css')
- assert response.has_header('Content-Security-Policy')
+ response = client.get("/static/rest_framework/css/default.css")
+ assert response.has_header("Content-Security-Policy")
policy_regex = r"default-src 'none'; script-src 'self' 'unsafe-eval' 'report-sample'; .*; report-uri /api/csp-report/"
- assert re.match(policy_regex, response['Content-Security-Policy'])
+ assert re.match(policy_regex, response["Content-Security-Policy"])
diff --git a/tests/test_setup.py b/tests/test_setup.py
index 2198ae7bb90..92c8d3ab492 100644
--- a/tests/test_setup.py
+++ b/tests/test_setup.py
@@ -9,26 +9,26 @@
def test_block_unmocked_requests():
"""Ensure the `block_unmocked_requests` fixture prevents requests from hitting the network."""
- url = 'https://example.com'
+ url = "https://example.com"
- with pytest.raises(RuntimeError, match='Tests must mock all HTTP requests!'):
+ with pytest.raises(RuntimeError, match="Tests must mock all HTTP requests!"):
fetch_text(url)
with responses.RequestsMock() as rsps:
- rsps.add(responses.GET, url, body='Mocked requests still work')
+ rsps.add(responses.GET, url, body="Mocked requests still work")
text = fetch_text(url)
- assert text == 'Mocked requests still work'
+ assert text == "Mocked requests still work"
@pytest.mark.django_db
def test_no_missing_migrations():
"""Check no model changes have been made since the last `./manage.py makemigrations`."""
- call_command('makemigrations', interactive=False, dry_run=True, check_changes=True)
+ call_command("makemigrations", interactive=False, dry_run=True, check_changes=True)
def test_django_cache():
"""Test the Django cache backend & associated server are properly set up."""
- k, v = 'my_key', 'my_value'
+ k, v = "my_key", "my_value"
cache.set(k, v, 10)
assert cache.get(k) == v
@@ -49,4 +49,4 @@ def test_celery_setup():
def test_load_initial_data():
"Test load_initial_data executes properly"
- call_command('load_initial_data')
+ call_command("load_initial_data")
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 02e322deec1..81042a789f1 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -40,10 +40,10 @@ def do_job_ingestion(test_repository, job_data, sample_push, verify_data=True):
push_index = 0
# Modify job structure to sync with the push sample data
- if 'sources' in blob:
- del blob['sources']
+ if "sources" in blob:
+ del blob["sources"]
- blob['revision'] = sample_push[push_index]['revision']
+ blob["revision"] = sample_push[push_index]["revision"]
blobs.append(blob)
@@ -52,14 +52,14 @@ def do_job_ingestion(test_repository, job_data, sample_push, verify_data=True):
# Build data structures to confirm everything is stored
# as expected
if verify_data:
- job = blob['job']
+ job = blob["job"]
build_platforms_ref.add(
"-".join(
[
- job.get('build_platform', {}).get('os_name', 'unknown'),
- job.get('build_platform', {}).get('platform', 'unknown'),
- job.get('build_platform', {}).get('architecture', 'unknown'),
+ job.get("build_platform", {}).get("os_name", "unknown"),
+ job.get("build_platform", {}).get("platform", "unknown"),
+ job.get("build_platform", {}).get("architecture", "unknown"),
]
)
)
@@ -67,30 +67,30 @@ def do_job_ingestion(test_repository, job_data, sample_push, verify_data=True):
machine_platforms_ref.add(
"-".join(
[
- job.get('machine_platform', {}).get('os_name', 'unknown'),
- job.get('machine_platform', {}).get('platform', 'unknown'),
- job.get('machine_platform', {}).get('architecture', 'unknown'),
+ job.get("machine_platform", {}).get("os_name", "unknown"),
+ job.get("machine_platform", {}).get("platform", "unknown"),
+ job.get("machine_platform", {}).get("architecture", "unknown"),
]
)
)
- machines_ref.add(job.get('machine', 'unknown'))
+ machines_ref.add(job.get("machine", "unknown"))
- options_ref = options_ref.union(job.get('option_collection', []).keys())
+ options_ref = options_ref.union(job.get("option_collection", []).keys())
- job_types_ref.add(job.get('name', 'unknown'))
- products_ref.add(job.get('product_name', 'unknown'))
- pushes_ref.add(blob['revision'])
+ job_types_ref.add(job.get("name", "unknown"))
+ products_ref.add(job.get("product_name", "unknown"))
+ pushes_ref.add(blob["revision"])
- log_url_list = job.get('log_references', [])
+ log_url_list = job.get("log_references", [])
for log_data in log_url_list:
- log_urls_ref.add(log_data['url'])
+ log_urls_ref.add(log_data["url"])
- artifact_name = job.get('artifact', {}).get('name')
+ artifact_name = job.get("artifact", {}).get("name")
if artifact_name:
- artifacts_ref[artifact_name] = job.get('artifact')
+ artifacts_ref[artifact_name] = job.get("artifact")
- superseded = blob.get('superseded', [])
+ superseded = blob.get("superseded", [])
superseded_job_guids.update(superseded)
# Store the modified json blobs
@@ -132,40 +132,40 @@ def verify_machine_platforms(machine_platforms_ref):
def verify_machines(machines_ref):
- machines = models.Machine.objects.all().values_list('name', flat=True)
+ machines = models.Machine.objects.all().values_list("name", flat=True)
assert machines_ref.issubset(machines)
def verify_options(options_ref):
- options = models.Option.objects.all().values_list('name', flat=True)
+ options = models.Option.objects.all().values_list("name", flat=True)
assert options_ref.issubset(options)
def verify_job_types(job_types_ref):
- job_types = models.JobType.objects.all().values_list('name', flat=True)
+ job_types = models.JobType.objects.all().values_list("name", flat=True)
assert job_types_ref.issubset(job_types)
def verify_products(products_ref):
- products = models.Product.objects.all().values_list('name', flat=True)
+ products = models.Product.objects.all().values_list("name", flat=True)
assert products_ref.issubset(products)
def verify_pushes(pushes_ref):
- return pushes_ref.issubset(models.Push.objects.values_list('revision', flat=True))
+ return pushes_ref.issubset(models.Push.objects.values_list("revision", flat=True))
def verify_log_urls(log_urls_ref):
- log_urls = set(models.JobLog.objects.values_list('url', flat=True))
+ log_urls = set(models.JobLog.objects.values_list("url", flat=True))
assert log_urls_ref.issubset(log_urls)
def verify_superseded(expected_superseded_job_guids):
- super_seeded_guids = models.Job.objects.filter(result='superseded').values_list(
- 'guid', flat=True
+ super_seeded_guids = models.Job.objects.filter(result="superseded").values_list(
+ "guid", flat=True
)
assert set(super_seeded_guids) == expected_superseded_job_guids
@@ -197,10 +197,10 @@ def create_generic_job(guid, repository, push_id, generic_reference_data, tier=N
job_group=generic_reference_data.job_group,
product=generic_reference_data.product,
failure_classification_id=1,
- who='testuser@foo.com',
- reason='success',
- result='finished',
- state='completed',
+ who="testuser@foo.com",
+ reason="success",
+ result="finished",
+ state="completed",
submit_time=job_time,
start_time=job_time,
end_time=job_time,
@@ -215,15 +215,15 @@ def add_log_response(filename):
log_path = SampleData().get_log_path(filename)
log_url = "http://my-log.mozilla.org/{}".format(filename)
- with open(log_path, 'rb') as log_file:
+ with open(log_path, "rb") as log_file:
content = log_file.read()
responses.add(
responses.GET,
log_url,
body=content,
adding_headers={
- 'Content-Encoding': 'gzip',
- 'Content-Length': str(len(content)),
+ "Content-Encoding": "gzip",
+ "Content-Length": str(len(content)),
},
)
return log_url
diff --git a/tests/test_worker/test_stats.py b/tests/test_worker/test_stats.py
index 1796e742553..2ff9bff986d 100644
--- a/tests/test_worker/test_stats.py
+++ b/tests/test_worker/test_stats.py
@@ -7,7 +7,7 @@
@pytest.mark.django_db
-@patch('treeherder.workers.stats.get_stats_client')
+@patch("treeherder.workers.stats.get_stats_client")
def test_publish_stats_nothing_to_do(get_worker_mock, django_assert_num_queries, caplog):
statsd_client = MagicMock()
get_worker_mock.return_value = statsd_client
@@ -16,15 +16,15 @@ def test_publish_stats_nothing_to_do(get_worker_mock, django_assert_num_queries,
with django_assert_num_queries(2):
publish_stats()
assert [(level, message) for _, level, message in caplog.record_tuples] == [
- (20, 'Publishing runtime statistics to statsd'),
- (20, 'Ingested 0 pushes'),
- (20, 'Ingested 0 jobs in total'),
+ (20, "Publishing runtime statistics to statsd"),
+ (20, "Ingested 0 pushes"),
+ (20, "Ingested 0 jobs in total"),
]
assert statsd_client.call_args_list == []
@pytest.mark.django_db
-@patch('treeherder.workers.stats.get_stats_client')
+@patch("treeherder.workers.stats.get_stats_client")
def test_publish_stats(
get_worker_mock, eleven_jobs_stored_new_date, django_assert_num_queries, caplog, settings
):
@@ -40,13 +40,13 @@ def test_publish_stats(
with django_assert_num_queries(2):
publish_stats()
assert [(level, message) for _, level, message in caplog.record_tuples] == [
- (20, 'Publishing runtime statistics to statsd'),
- (20, 'Ingested 10 pushes'),
- (20, 'Ingested 11 jobs in total'),
+ (20, "Publishing runtime statistics to statsd"),
+ (20, "Ingested 10 pushes"),
+ (20, "Ingested 11 jobs in total"),
]
assert statsd_client.incr.call_args_list == [
- call('push', 10),
- call('jobs', 11),
- call('jobs_repo.mozilla-central', 11),
- call('jobs_state.completed', 11),
+ call("push", 10),
+ call("jobs", 11),
+ call("jobs_repo.mozilla-central", 11),
+ call("jobs_state.completed", 11),
]
diff --git a/tests/test_worker/test_task.py b/tests/test_worker/test_task.py
index 5b482fc4dac..783bd54d859 100644
--- a/tests/test_worker/test_task.py
+++ b/tests/test_worker/test_task.py
@@ -58,7 +58,7 @@ def test_retryable_task_throws_retry():
with pytest.raises(Retry) as e:
throwing_task_should_retry.delay()
- assert str(e.value) == 'Retry in 10s: OperationalError()'
+ assert str(e.value) == "Retry in 10s: OperationalError()"
# The task is only called once, the Retry() exception
# will signal to the worker that the task needs to be tried again later
diff --git a/tests/utils/test_taskcluster_download_artifact.py b/tests/utils/test_taskcluster_download_artifact.py
index dd3fb4175c8..86af0eb8433 100644
--- a/tests/utils/test_taskcluster_download_artifact.py
+++ b/tests/utils/test_taskcluster_download_artifact.py
@@ -7,32 +7,32 @@
@responses.activate
@pytest.mark.parametrize(
- 'path, response_config, expected_result',
+ "path, response_config, expected_result",
[
[
- 'my_file.json',
- {'json': {'key': 'value'}, 'content_type': 'application/json'},
- {'key': 'value'},
+ "my_file.json",
+ {"json": {"key": "value"}, "content_type": "application/json"},
+ {"key": "value"},
],
[
- 'my_file.yml',
- {'body': 'key:\n - value1\n - value2', 'content_type': 'text/plain'},
- {'key': ['value1', 'value2']},
+ "my_file.yml",
+ {"body": "key:\n - value1\n - value2", "content_type": "text/plain"},
+ {"key": ["value1", "value2"]},
],
[
- 'my_file.txt',
- {'body': 'some text from a file', 'content_type': 'text/plain'},
- 'some text from a file',
+ "my_file.txt",
+ {"body": "some text from a file", "content_type": "text/plain"},
+ "some text from a file",
],
],
)
def test_download_artifact(path, response_config, expected_result):
- root_url = 'https://taskcluster.net'
- task_id = 'A35mWTRuQmyj88yMnIF0fA'
+ root_url = "https://taskcluster.net"
+ task_id = "A35mWTRuQmyj88yMnIF0fA"
responses.add(
responses.GET,
- f'{root_url}/api/queue/v1/task/{task_id}/artifacts/{path}',
+ f"{root_url}/api/queue/v1/task/{task_id}/artifacts/{path}",
**response_config,
status=200,
)
diff --git a/tests/utils/test_taskcluster_lib_scopes.py b/tests/utils/test_taskcluster_lib_scopes.py
index c881a740867..0bcf5d288de 100644
--- a/tests/utils/test_taskcluster_lib_scopes.py
+++ b/tests/utils/test_taskcluster_lib_scopes.py
@@ -5,31 +5,31 @@
# satisfiesExpression()
@pytest.mark.parametrize(
- 'scopeset, expression',
+ "scopeset, expression",
[
- [[], {'AllOf': []}],
- [['A'], {'AllOf': ['A']}],
- [['A', 'B'], 'A'],
- [['a*', 'b*', 'c*'], 'abc'],
- [['abc'], {'AnyOf': ['abc', 'def']}],
- [['def'], {'AnyOf': ['abc', 'def']}],
- [['abc', 'def'], {'AnyOf': ['abc', 'def']}],
- [['abc*'], {'AnyOf': ['abc', 'def']}],
- [['abc*'], {'AnyOf': ['abc']}],
- [['abc*', 'def*'], {'AnyOf': ['abc', 'def']}],
- [['foo'], {'AllOf': [{'AnyOf': [{'AllOf': ['foo']}, {'AllOf': ['bar']}]}]}],
- [['a*', 'b*', 'c*'], {'AnyOf': ['cfoo', 'dfoo']}],
- [['a*', 'b*', 'c*'], {'AnyOf': ['bx', 'by']}],
- [['a*', 'b*', 'c*'], {'AllOf': ['bx', 'cx']}],
+ [[], {"AllOf": []}],
+ [["A"], {"AllOf": ["A"]}],
+ [["A", "B"], "A"],
+ [["a*", "b*", "c*"], "abc"],
+ [["abc"], {"AnyOf": ["abc", "def"]}],
+ [["def"], {"AnyOf": ["abc", "def"]}],
+ [["abc", "def"], {"AnyOf": ["abc", "def"]}],
+ [["abc*"], {"AnyOf": ["abc", "def"]}],
+ [["abc*"], {"AnyOf": ["abc"]}],
+ [["abc*", "def*"], {"AnyOf": ["abc", "def"]}],
+ [["foo"], {"AllOf": [{"AnyOf": [{"AllOf": ["foo"]}, {"AllOf": ["bar"]}]}]}],
+ [["a*", "b*", "c*"], {"AnyOf": ["cfoo", "dfoo"]}],
+ [["a*", "b*", "c*"], {"AnyOf": ["bx", "by"]}],
+ [["a*", "b*", "c*"], {"AllOf": ["bx", "cx"]}],
# complex expression with only
# some AnyOf branches matching
[
- ['a*', 'b*', 'c*'],
+ ["a*", "b*", "c*"],
{
- 'AnyOf': [
- {'AllOf': ['ax', 'jx']}, # doesn't match
- {'AllOf': ['bx', 'cx']}, # does match
- 'bbb',
+ "AnyOf": [
+ {"AllOf": ["ax", "jx"]}, # doesn't match
+ {"AllOf": ["bx", "cx"]}, # does match
+ "bbb",
]
},
],
@@ -40,21 +40,21 @@ def test_expression_is_satisfied(scopeset, expression):
@pytest.mark.parametrize(
- 'scopeset, expression',
+ "scopeset, expression",
[
- [[], {'AnyOf': []}],
- [[], 'missing-scope'],
- [['wrong-scope'], 'missing-scope'],
- [['ghi'], {'AnyOf': ['abc', 'def']}],
- [['ghi*'], {'AnyOf': ['abc', 'def']}],
- [['ghi', 'fff'], {'AnyOf': ['abc', 'def']}],
- [['ghi*', 'fff*'], {'AnyOf': ['abc', 'def']}],
- [['abc'], {'AnyOf': ['ghi']}],
- [['abc*'], {'AllOf': ['abc', 'ghi']}],
- [[''], {'AnyOf': ['abc', 'def']}],
- [['abc:def'], {'AnyOf': ['abc', 'def']}],
- [['xyz', 'abc'], {'AllOf': [{'AnyOf': [{'AllOf': ['foo']}, {'AllOf': ['bar']}]}]}],
- [['a*', 'b*', 'c*'], {'AllOf': ['bx', 'cx', {'AnyOf': ['xxx', 'yyyy']}]}],
+ [[], {"AnyOf": []}],
+ [[], "missing-scope"],
+ [["wrong-scope"], "missing-scope"],
+ [["ghi"], {"AnyOf": ["abc", "def"]}],
+ [["ghi*"], {"AnyOf": ["abc", "def"]}],
+ [["ghi", "fff"], {"AnyOf": ["abc", "def"]}],
+ [["ghi*", "fff*"], {"AnyOf": ["abc", "def"]}],
+ [["abc"], {"AnyOf": ["ghi"]}],
+ [["abc*"], {"AllOf": ["abc", "ghi"]}],
+ [[""], {"AnyOf": ["abc", "def"]}],
+ [["abc:def"], {"AnyOf": ["abc", "def"]}],
+ [["xyz", "abc"], {"AllOf": [{"AnyOf": [{"AllOf": ["foo"]}, {"AllOf": ["bar"]}]}]}],
+ [["a*", "b*", "c*"], {"AllOf": ["bx", "cx", {"AnyOf": ["xxx", "yyyy"]}]}],
],
)
def test_expression_is_not_satisfied(scopeset, expression):
@@ -62,34 +62,34 @@ def test_expression_is_not_satisfied(scopeset, expression):
@pytest.mark.parametrize(
- 'scopeset',
+ "scopeset",
[
None,
- 'scopeset_argument',
- ('scopeset', 'argument'),
- {'scopeset', 'argument'},
+ "scopeset_argument",
+ ("scopeset", "argument"),
+ {"scopeset", "argument"},
],
)
def test_wrong_scopeset_type_raises_exception(scopeset):
with pytest.raises(TypeError):
- satisfiesExpression(scopeset, 'in-tree:hook-action:{hook_group_id}/{hook_id}')
+ satisfiesExpression(scopeset, "in-tree:hook-action:{hook_group_id}/{hook_id}")
# patternMatch()
def test_identical_scope_and_pattern_are_matching():
- assert patternMatch('mock:scope', 'mock:scope') is True
+ assert patternMatch("mock:scope", "mock:scope") is True
@pytest.mark.parametrize(
- 'pattern, scope', [('matching*', 'matching'), ('matching*', 'matching/scope')]
+ "pattern, scope", [("matching*", "matching"), ("matching*", "matching/scope")]
)
def test_starred_patterns_are_matching(pattern, scope):
assert patternMatch(pattern, scope) is True
@pytest.mark.parametrize(
- 'pattern, scope',
- [('matching*', 'mismatching'), ('match*ing', 'matching'), ('*matching', 'matching')],
+ "pattern, scope",
+ [("matching*", "mismatching"), ("match*ing", "matching"), ("*matching", "matching")],
)
def test_starred_patterns_dont_matching(pattern, scope):
assert not patternMatch(pattern, scope)
diff --git a/tests/webapp/api/test_auth.py b/tests/webapp/api/test_auth.py
index 22b0cac4bf3..f598e6d330a 100644
--- a/tests/webapp/api/test_auth.py
+++ b/tests/webapp/api/test_auth.py
@@ -19,14 +19,14 @@ class AuthenticatedView(APIView):
"""This inherits `IsAuthenticatedOrReadOnly` due to `DEFAULT_PERMISSION_CLASSES`."""
def get(self, request, *args, **kwargs):
- return Response({'foo': 'bar'})
+ return Response({"foo": "bar"})
def post(self, request, *args, **kwargs):
- return Response({'foo': 'bar'})
+ return Response({"foo": "bar"})
factory = APIRequestFactory()
-url = 'http://testserver/'
+url = "http://testserver/"
def test_get_no_auth():
@@ -34,7 +34,7 @@ def test_get_no_auth():
view = AuthenticatedView.as_view()
response = view(request)
assert response.status_code == status.HTTP_200_OK
- assert response.data == {'foo': 'bar'}
+ assert response.data == {"foo": "bar"}
def test_post_no_auth():
@@ -42,7 +42,7 @@ def test_post_no_auth():
view = AuthenticatedView.as_view()
response = view(request)
assert response.status_code == status.HTTP_403_FORBIDDEN
- assert response.data == {'detail': 'Authentication credentials were not provided.'}
+ assert response.data == {"detail": "Authentication credentials were not provided."}
# Auth Login and Logout Tests
@@ -50,13 +50,13 @@ def test_post_no_auth():
@pytest.mark.django_db
@pytest.mark.parametrize(
- ('id_token_sub', 'id_token_email', 'expected_username'),
+ ("id_token_sub", "id_token_email", "expected_username"),
[
- ('ad|Mozilla-LDAP|biped', 'biped@mozilla.com', 'mozilla-ldap/biped@mozilla.com'),
- ('email', 'biped@mozilla.com', 'email/biped@mozilla.com'),
- ('oauth2|biped', 'biped@mozilla.com', 'oauth2/biped@mozilla.com'),
- ('github|0000', 'biped@gmail.com', 'github/biped@gmail.com'),
- ('google-oauth2|0000', 'biped@mozilla.com', 'google/biped@mozilla.com'),
+ ("ad|Mozilla-LDAP|biped", "biped@mozilla.com", "mozilla-ldap/biped@mozilla.com"),
+ ("email", "biped@mozilla.com", "email/biped@mozilla.com"),
+ ("oauth2|biped", "biped@mozilla.com", "oauth2/biped@mozilla.com"),
+ ("github|0000", "biped@gmail.com", "github/biped@gmail.com"),
+ ("google-oauth2|0000", "biped@mozilla.com", "google/biped@mozilla.com"),
],
)
def test_login_logout_relogin(client, monkeypatch, id_token_sub, id_token_email, expected_username):
@@ -69,9 +69,9 @@ def test_login_logout_relogin(client, monkeypatch, id_token_sub, id_token_email,
access_token_expiration_timestamp = now_in_seconds + one_hour_in_seconds
def userinfo_mock(*args, **kwargs):
- return {'sub': id_token_sub, 'email': id_token_email, 'exp': id_token_expiration_timestamp}
+ return {"sub": id_token_sub, "email": id_token_email, "exp": id_token_expiration_timestamp}
- monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
+ monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
assert auth_session_key not in client.session
assert User.objects.count() == 0
@@ -80,17 +80,17 @@ def userinfo_mock(*args, **kwargs):
# which is then associated with their Django session.
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer meh',
- HTTP_ID_TOKEN='meh',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer meh",
+ HTTP_ID_TOKEN="meh",
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
)
assert resp.status_code == 200
assert resp.json() == {
- 'username': expected_username,
- 'email': id_token_email,
- 'is_staff': False,
- 'is_superuser': False,
+ "username": expected_username,
+ "email": id_token_email,
+ "is_staff": False,
+ "is_superuser": False,
}
assert auth_session_key in client.session
# Uses a tolerance of up to 5 seconds to account for rounding/the time the test takes to run.
@@ -104,20 +104,20 @@ def userinfo_mock(*args, **kwargs):
# Logging out should disassociate the user from the Django session.
- resp = client.get(reverse('auth-logout'))
+ resp = client.get(reverse("auth-logout"))
assert resp.status_code == 200
assert auth_session_key not in client.session
# Logging in again should associate the existing user with the Django session.
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer meh',
- HTTP_ID_TOKEN='meh',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer meh",
+ HTTP_ID_TOKEN="meh",
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
)
assert resp.status_code == 200
- assert resp.json()['username'] == expected_username
+ assert resp.json()["username"] == expected_username
assert auth_session_key in client.session
assert client.session.get_expiry_age() == pytest.approx(one_hour_in_seconds, abs=5)
assert User.objects.count() == 1
@@ -134,19 +134,19 @@ def test_login_same_email_different_provider(test_ldap_user, client, monkeypatch
access_token_expiration_timestamp = now_in_seconds + one_hour_in_seconds
def userinfo_mock(*args, **kwargs):
- return {'sub': 'email', 'email': test_ldap_user.email, 'exp': id_token_expiration_timestamp}
+ return {"sub": "email", "email": test_ldap_user.email, "exp": id_token_expiration_timestamp}
- monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
+ monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer meh',
- HTTP_ID_TOKEN='meh',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer meh",
+ HTTP_ID_TOKEN="meh",
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
)
assert resp.status_code == 200
- assert resp.json()['username'] == 'email/user@foo.com'
- assert resp.json()['email'] == test_ldap_user.email
+ assert resp.json()["username"] == "email/user@foo.com"
+ assert resp.json()["email"] == test_ldap_user.email
def test_login_unknown_identity_provider(client, monkeypatch):
@@ -156,9 +156,9 @@ def test_login_unknown_identity_provider(client, monkeypatch):
access_token_expiration_timestamp = now_in_seconds + one_hour_in_seconds
def userinfo_mock(*args, **kwargs):
- return {'sub': 'bad', 'email': 'foo@bar.com', 'exp': id_token_expiration_timestamp}
+ return {"sub": "bad", "email": "foo@bar.com", "exp": id_token_expiration_timestamp}
- monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
+ monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
resp = client.get(
reverse("auth-login"),
@@ -179,12 +179,12 @@ def test_login_not_active(test_ldap_user, client, monkeypatch):
def userinfo_mock(*args, **kwargs):
return {
- 'sub': 'Mozilla-LDAP',
- 'email': test_ldap_user.email,
- 'exp': id_token_expiration_timestamp,
+ "sub": "Mozilla-LDAP",
+ "email": test_ldap_user.email,
+ "exp": id_token_expiration_timestamp,
}
- monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
+ monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
test_ldap_user.is_active = False
test_ldap_user.save()
@@ -206,45 +206,45 @@ def test_login_authorization_header_missing(client):
@pytest.mark.parametrize(
- 'auth_header_value',
+ "auth_header_value",
[
- 'foo',
- 'Bearer ',
- 'Bearer foo bar',
+ "foo",
+ "Bearer ",
+ "Bearer foo bar",
],
)
def test_login_authorization_header_malformed(client, auth_header_value):
resp = client.get(
- reverse('auth-login'),
+ reverse("auth-login"),
HTTP_AUTHORIZATION=auth_header_value,
)
assert resp.status_code == 403
- assert resp.json()['detail'] == "Authorization header must be of form 'Bearer {token}'"
+ assert resp.json()["detail"] == "Authorization header must be of form 'Bearer {token}'"
def test_login_id_token_header_missing(client):
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer abc',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer abc",
)
assert resp.status_code == 403
- assert resp.json()['detail'] == 'Id-Token header is expected'
+ assert resp.json()["detail"] == "Id-Token header is expected"
def test_login_id_token_malformed(client):
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer abc',
- HTTP_ID_TOKEN='aaa',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer abc",
+ HTTP_ID_TOKEN="aaa",
)
assert resp.status_code == 403
- assert resp.json()['detail'] == 'Unable to decode the Id token header'
+ assert resp.json()["detail"] == "Unable to decode the Id token header"
def test_login_id_token_missing_rsa_key_id(client):
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer abc',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer abc",
HTTP_ID_TOKEN=(
# Token generated using:
# https://jwt.io/#debugger-io
@@ -254,19 +254,19 @@ def test_login_id_token_missing_rsa_key_id(client):
# "typ": "JWT"
# }
# (and default payload)
- 'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.'
- + 'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.'
- + 'SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c'
+ "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9."
+ + "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ."
+ + "SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c"
),
)
assert resp.status_code == 403
- assert resp.json()['detail'] == 'Id token header missing RSA key ID'
+ assert resp.json()["detail"] == "Id token header missing RSA key ID"
def test_login_id_token_unknown_rsa_key_id(client):
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer abc',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer abc",
HTTP_ID_TOKEN=(
# Token generated using:
# https://jwt.io/#debugger-io
@@ -277,19 +277,19 @@ def test_login_id_token_unknown_rsa_key_id(client):
# "kid": "1234"
# }
# (and default payload)
- 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjEyMzQifQ.'
- + 'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.'
- + 'Fghd96rsPbzEOGv0mMn4DDBf86PiW_ztPcAbDQoeA6s'
+ "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IjEyMzQifQ."
+ + "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ."
+ + "Fghd96rsPbzEOGv0mMn4DDBf86PiW_ztPcAbDQoeA6s"
),
)
assert resp.status_code == 403
- assert resp.json()['detail'] == 'Id token using unrecognised RSA key ID'
+ assert resp.json()["detail"] == "Id token using unrecognised RSA key ID"
def test_login_id_token_invalid_signature(client):
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer foo',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer foo",
HTTP_ID_TOKEN=(
# Token generated using:
# https://jwt.io/#debugger-io
@@ -300,14 +300,14 @@ def test_login_id_token_invalid_signature(client):
# "kid": "MkZDNDcyRkNGRTFDNjlBNjZFOEJBN0ZBNzJBQTNEMDhCMEEwNkFGOA"
# }
# (and default payload)
- 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6Ik1rWkRORGN5UmtOR1JURkROamxCTmp'
- + 'aRk9FSkJOMFpCTnpKQlFUTkVNRGhDTUVFd05rRkdPQSJ9.'
- + 'eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.'
- + 'this_signature_is_not_valid'
+ "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6Ik1rWkRORGN5UmtOR1JURkROamxCTmp"
+ + "aRk9FSkJOMFpCTnpKQlFUTkVNRGhDTUVFd05rRkdPQSJ9."
+ + "eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ."
+ + "this_signature_is_not_valid"
),
)
assert resp.status_code == 403
- assert resp.json()['detail'] == 'Invalid header: Unable to parse authentication'
+ assert resp.json()["detail"] == "Invalid header: Unable to parse authentication"
def test_login_access_token_expiry_header_missing(client, monkeypatch):
@@ -315,17 +315,17 @@ def test_login_access_token_expiry_header_missing(client, monkeypatch):
id_token_expiration_timestamp = now_in_seconds + one_day_in_seconds
def userinfo_mock(*args, **kwargs):
- return {'sub': 'Mozilla-LDAP', 'email': 'x@y.z', 'exp': id_token_expiration_timestamp}
+ return {"sub": "Mozilla-LDAP", "email": "x@y.z", "exp": id_token_expiration_timestamp}
- monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
+ monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer foo',
- HTTP_ID_TOKEN='bar',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer foo",
+ HTTP_ID_TOKEN="bar",
)
assert resp.status_code == 403
- assert resp.json()['detail'] == 'Access-Token-Expires-At header is expected'
+ assert resp.json()["detail"] == "Access-Token-Expires-At header is expected"
def test_login_access_token_expiry_header_malformed(client, monkeypatch):
@@ -333,18 +333,18 @@ def test_login_access_token_expiry_header_malformed(client, monkeypatch):
id_token_expiration_timestamp = now_in_seconds + one_day_in_seconds
def userinfo_mock(*args, **kwargs):
- return {'sub': 'Mozilla-LDAP', 'email': 'x@y.z', 'exp': id_token_expiration_timestamp}
+ return {"sub": "Mozilla-LDAP", "email": "x@y.z", "exp": id_token_expiration_timestamp}
- monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
+ monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer foo',
- HTTP_ID_TOKEN='bar',
- HTTP_ACCESS_TOKEN_EXPIRES_AT='aaa',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer foo",
+ HTTP_ID_TOKEN="bar",
+ HTTP_ACCESS_TOKEN_EXPIRES_AT="aaa",
)
assert resp.status_code == 403
- assert resp.json()['detail'] == 'Access-Token-Expires-At header value is invalid'
+ assert resp.json()["detail"] == "Access-Token-Expires-At header value is invalid"
def test_login_access_token_expired(client, monkeypatch):
@@ -353,18 +353,18 @@ def test_login_access_token_expired(client, monkeypatch):
access_token_expiration_timestamp = now_in_seconds - 30
def userinfo_mock(*args, **kwargs):
- return {'sub': 'Mozilla-LDAP', 'email': 'x@y.z', 'exp': id_token_expiration_timestamp}
+ return {"sub": "Mozilla-LDAP", "email": "x@y.z", "exp": id_token_expiration_timestamp}
- monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
+ monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer foo',
- HTTP_ID_TOKEN='bar',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer foo",
+ HTTP_ID_TOKEN="bar",
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
)
assert resp.status_code == 403
- assert resp.json()['detail'] == 'Session expiry time has already passed!'
+ assert resp.json()["detail"] == "Session expiry time has already passed!"
def test_login_id_token_expires_before_access_token(test_ldap_user, client, monkeypatch):
@@ -377,14 +377,14 @@ def test_login_id_token_expires_before_access_token(test_ldap_user, client, monk
access_token_expiration_timestamp = now_in_seconds + one_day_in_seconds
def userinfo_mock(*args, **kwargs):
- return {'sub': 'email', 'email': test_ldap_user.email, 'exp': id_token_expiration_timestamp}
+ return {"sub": "email", "email": test_ldap_user.email, "exp": id_token_expiration_timestamp}
- monkeypatch.setattr(AuthBackend, '_get_user_info', userinfo_mock)
+ monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
resp = client.get(
- reverse('auth-login'),
- HTTP_AUTHORIZATION='Bearer meh',
- HTTP_ID_TOKEN='meh',
+ reverse("auth-login"),
+ HTTP_AUTHORIZATION="Bearer meh",
+ HTTP_ID_TOKEN="meh",
HTTP_ACCESS_TOKEN_EXPIRES_AT=str(access_token_expiration_timestamp),
)
assert resp.status_code == 200
diff --git a/tests/webapp/api/test_bug_creation.py b/tests/webapp/api/test_bug_creation.py
index 5a55b409d38..7206d6f504c 100644
--- a/tests/webapp/api/test_bug_creation.py
+++ b/tests/webapp/api/test_bug_creation.py
@@ -10,149 +10,149 @@
def test_bugzilla_components_for_path(client, test_job):
- BugzillaComponent.objects.create(product='Mock Product 1', component='Mock Component 1')
+ BugzillaComponent.objects.create(product="Mock Product 1", component="Mock Component 1")
FilesBugzillaMap.objects.create(
- path='mock/folder/file_1.extension',
- file_name='file_1.extension',
+ path="mock/folder/file_1.extension",
+ file_name="file_1.extension",
bugzilla_component=BugzillaComponent.objects.last(),
)
- URL_BASE = reverse('bugzilla-component-list')
+ URL_BASE = reverse("bugzilla-component-list")
- EXPECTED_MOCK1 = [{'product': 'Mock Product 1', 'component': 'Mock Component 1'}]
+ EXPECTED_MOCK1 = [{"product": "Mock Product 1", "component": "Mock Component 1"}]
- resp = client.get(URL_BASE + '?path=file_1.extension')
+ resp = client.get(URL_BASE + "?path=file_1.extension")
assert resp.status_code == 200
assert resp.json() == EXPECTED_MOCK1
- resp = client.get(URL_BASE + '?path=file_2.extension')
+ resp = client.get(URL_BASE + "?path=file_2.extension")
assert resp.json() == []
- resp = client.get(URL_BASE + '?path=ile_2.extension')
+ resp = client.get(URL_BASE + "?path=ile_2.extension")
assert resp.json() == []
- resp = client.get(URL_BASE + '?path=file_1')
+ resp = client.get(URL_BASE + "?path=file_1")
assert resp.json() == EXPECTED_MOCK1
- resp = client.get(URL_BASE + '?path=mock/folder/file_1.extension')
+ resp = client.get(URL_BASE + "?path=mock/folder/file_1.extension")
assert resp.json() == EXPECTED_MOCK1
- resp = client.get(URL_BASE + '?path=other_mock/other_folder/file_1.extension')
+ resp = client.get(URL_BASE + "?path=other_mock/other_folder/file_1.extension")
# Should also pass because search falls back to file name if no match for path.
assert resp.json() == EXPECTED_MOCK1
- resp = client.get(URL_BASE + '?path=folder/file_1.extension')
+ resp = client.get(URL_BASE + "?path=folder/file_1.extension")
assert resp.json() == EXPECTED_MOCK1
- resp = client.get(URL_BASE + '?path=folder/file_1.other_extension')
+ resp = client.get(URL_BASE + "?path=folder/file_1.other_extension")
assert resp.json() == EXPECTED_MOCK1
- resp = client.get(URL_BASE + '?path=completely.unrelated')
+ resp = client.get(URL_BASE + "?path=completely.unrelated")
assert resp.json() == []
- BugzillaComponent.objects.create(product='Mock Product 1', component='Mock Component 2')
+ BugzillaComponent.objects.create(product="Mock Product 1", component="Mock Component 2")
FilesBugzillaMap.objects.create(
- path='mock/folder_2/file_1.extension',
- file_name='file_1.extension',
+ path="mock/folder_2/file_1.extension",
+ file_name="file_1.extension",
bugzilla_component=BugzillaComponent.objects.last(),
)
- EXPECTED_MOCK2 = [{'product': 'Mock Product 1', 'component': 'Mock Component 2'}]
+ EXPECTED_MOCK2 = [{"product": "Mock Product 1", "component": "Mock Component 2"}]
EXPECTED_MOCK1_MOCK2 = [
- {'product': 'Mock Product 1', 'component': 'Mock Component 1'},
- {'product': 'Mock Product 1', 'component': 'Mock Component 2'},
+ {"product": "Mock Product 1", "component": "Mock Component 1"},
+ {"product": "Mock Product 1", "component": "Mock Component 2"},
]
- resp = client.get(URL_BASE + '?path=file_1.extension')
+ resp = client.get(URL_BASE + "?path=file_1.extension")
assert resp.json() == EXPECTED_MOCK1_MOCK2
- resp = client.get(URL_BASE + '?path=mock/folder/file_1.extension')
+ resp = client.get(URL_BASE + "?path=mock/folder/file_1.extension")
assert resp.json() == EXPECTED_MOCK1
- resp = client.get(URL_BASE + '?path=mock/folder_2/file_1.extension')
+ resp = client.get(URL_BASE + "?path=mock/folder_2/file_1.extension")
assert resp.json() == EXPECTED_MOCK2
- resp = client.get(URL_BASE + '?path=other_mock/other_folder/file_1.extension')
+ resp = client.get(URL_BASE + "?path=other_mock/other_folder/file_1.extension")
# Should also pass because search falls back to file name if no match for path.
assert resp.json() == EXPECTED_MOCK1_MOCK2
- BugzillaComponent.objects.create(product='Mock Product 3', component='Mock Component 3')
+ BugzillaComponent.objects.create(product="Mock Product 3", component="Mock Component 3")
FilesBugzillaMap.objects.create(
- path='mock_3/folder_3/other.file.js',
- file_name='other.file.js',
+ path="mock_3/folder_3/other.file.js",
+ file_name="other.file.js",
bugzilla_component=BugzillaComponent.objects.last(),
)
- EXPECTED_MOCK3 = [{'product': 'Mock Product 3', 'component': 'Mock Component 3'}]
+ EXPECTED_MOCK3 = [{"product": "Mock Product 3", "component": "Mock Component 3"}]
- resp = client.get(URL_BASE + '?path=other.file.js')
+ resp = client.get(URL_BASE + "?path=other.file.js")
assert resp.json() == EXPECTED_MOCK3
- resp = client.get(URL_BASE + '?path=other.file')
+ resp = client.get(URL_BASE + "?path=other.file")
assert resp.json() == EXPECTED_MOCK3
- resp = client.get(URL_BASE + '?path=other')
+ resp = client.get(URL_BASE + "?path=other")
assert resp.json() == EXPECTED_MOCK3
- BugzillaComponent.objects.create(product='Mock Product 4', component='Mock Component 4')
+ BugzillaComponent.objects.create(product="Mock Product 4", component="Mock Component 4")
FilesBugzillaMap.objects.create(
- path='mock_3/folder_3/other.extension',
- file_name='other.extension',
+ path="mock_3/folder_3/other.extension",
+ file_name="other.extension",
bugzilla_component=BugzillaComponent.objects.last(),
)
- EXPECTED_MOCK4 = [{'product': 'Mock Product 4', 'component': 'Mock Component 4'}]
+ EXPECTED_MOCK4 = [{"product": "Mock Product 4", "component": "Mock Component 4"}]
EXPECTED_MOCK3_MOCK4 = [
- {'product': 'Mock Product 3', 'component': 'Mock Component 3'},
- {'product': 'Mock Product 4', 'component': 'Mock Component 4'},
+ {"product": "Mock Product 3", "component": "Mock Component 3"},
+ {"product": "Mock Product 4", "component": "Mock Component 4"},
]
- resp = client.get(URL_BASE + '?path=other.file.js')
+ resp = client.get(URL_BASE + "?path=other.file.js")
assert resp.json() == EXPECTED_MOCK3
- resp = client.get(URL_BASE + '?path=other.extension')
+ resp = client.get(URL_BASE + "?path=other.extension")
assert resp.json() == EXPECTED_MOCK4
- resp = client.get(URL_BASE + '?path=other')
+ resp = client.get(URL_BASE + "?path=other")
assert resp.json() == EXPECTED_MOCK3_MOCK4
- resp = client.get(URL_BASE + '?path=another')
+ resp = client.get(URL_BASE + "?path=another")
assert resp.json() == []
BugzillaComponent.objects.create(
- product='Mock Product org.mozilla.*.', component='Mock Component File Match'
+ product="Mock Product org.mozilla.*.", component="Mock Component File Match"
)
FilesBugzillaMap.objects.create(
- path='parent/folder/org/mozilla/geckoview/test/MockTestName.kt',
- file_name='MockTestName.kt',
+ path="parent/folder/org/mozilla/geckoview/test/MockTestName.kt",
+ file_name="MockTestName.kt",
bugzilla_component=BugzillaComponent.objects.last(),
)
BugzillaComponent.objects.create(
- product='Mock Product org.mozilla.*.', component='Mock Component No File Match'
+ product="Mock Product org.mozilla.*.", component="Mock Component No File Match"
)
FilesBugzillaMap.objects.create(
- path='parent/folder/org/mozilla/geckoview/test/OtherName.kt',
- file_name='OtherName.kt',
+ path="parent/folder/org/mozilla/geckoview/test/OtherName.kt",
+ file_name="OtherName.kt",
bugzilla_component=BugzillaComponent.objects.last(),
)
BugzillaComponent.objects.create(
- product='Mock Product org.mozilla.*.',
- component='Mock Component No File Match For Subtest',
+ product="Mock Product org.mozilla.*.",
+ component="Mock Component No File Match For Subtest",
)
FilesBugzillaMap.objects.create(
- path='parent/folder/org/mozilla/geckoview/test/Subtest.kt',
- file_name='Subtest.kt',
+ path="parent/folder/org/mozilla/geckoview/test/Subtest.kt",
+ file_name="Subtest.kt",
bugzilla_component=BugzillaComponent.objects.last(),
)
@@ -161,33 +161,33 @@ def test_bugzilla_components_for_path(client, test_job):
)
FilesBugzillaMap.objects.create(
- path='other/folder/org.html',
- file_name='org.html',
+ path="other/folder/org.html",
+ file_name="org.html",
bugzilla_component=BugzillaComponent.objects.last(),
)
EXPECTED_MOCK_ORG_MOZILLA = [
{
- 'product': 'Mock Product org.mozilla.*.',
- 'component': 'Mock Component File Match',
+ "product": "Mock Product org.mozilla.*.",
+ "component": "Mock Component File Match",
}
]
- resp = client.get(URL_BASE + '?path=org.mozilla.geckoview.test.MockTestName#Subtest')
+ resp = client.get(URL_BASE + "?path=org.mozilla.geckoview.test.MockTestName#Subtest")
assert resp.json() == EXPECTED_MOCK_ORG_MOZILLA
# Only take test name into account.
- resp = client.get(URL_BASE + '?path=org.mozilla.otherproduct.otherfolder.MockTestName')
+ resp = client.get(URL_BASE + "?path=org.mozilla.otherproduct.otherfolder.MockTestName")
assert resp.json() == EXPECTED_MOCK_ORG_MOZILLA
- BugzillaComponent.objects.create(product='Testing', component='Mochitest')
+ BugzillaComponent.objects.create(product="Testing", component="Mochitest")
FilesBugzillaMap.objects.create(
- path='mock/mochitest/mochitest.test',
- file_name='mochitest.test',
+ path="mock/mochitest/mochitest.test",
+ file_name="mochitest.test",
bugzilla_component=BugzillaComponent.objects.last(),
)
# Respect the ignore list of product and component combinations.
- resp = client.get(URL_BASE + '?path=mock/mochitest/mochitest.test')
+ resp = client.get(URL_BASE + "?path=mock/mochitest/mochitest.test")
assert resp.json() == []
diff --git a/tests/webapp/api/test_bug_job_map_api.py b/tests/webapp/api/test_bug_job_map_api.py
index 264314d11c0..a937b8b25f6 100644
--- a/tests/webapp/api/test_bug_job_map_api.py
+++ b/tests/webapp/api/test_bug_job_map_api.py
@@ -7,7 +7,7 @@
@pytest.mark.parametrize(
- 'test_no_auth,test_duplicate_handling', [(True, False), (False, False), (False, True)]
+ "test_no_auth,test_duplicate_handling", [(True, False), (False, False), (False, True)]
)
def test_create_bug_job_map(
client, test_job, test_user, bugs, test_no_auth, test_duplicate_handling
@@ -19,7 +19,7 @@ def test_create_bug_job_map(
if not test_no_auth:
client.force_authenticate(user=test_user)
- submit_obj = {u"job_id": test_job.id, u"bug_id": bug.id, u"type": u"manual"}
+ submit_obj = {"job_id": test_job.id, "bug_id": bug.id, "type": "manual"}
# if testing duplicate handling, submit twice
if test_duplicate_handling:
@@ -40,8 +40,8 @@ def test_create_bug_job_map(
assert BugJobMap.objects.count() == 1
bug_job_map = BugJobMap.objects.first()
- assert bug_job_map.job_id == submit_obj['job_id']
- assert bug_job_map.bug_id == submit_obj['bug_id']
+ assert bug_job_map.job_id == submit_obj["job_id"]
+ assert bug_job_map.bug_id == submit_obj["bug_id"]
assert bug_job_map.user == test_user
@@ -73,10 +73,10 @@ def test_bug_job_map_list(client, test_repository, eleven_jobs_stored, test_user
for job_range in [(0, 1), (0, 2), (0, 9)]:
resp = client.get(
reverse("bug-job-map-list", kwargs={"project": test_repository.name}),
- data={'job_id': [job.id for job in jobs[job_range[0] : job_range[1]]]},
+ data={"job_id": [job.id for job in jobs[job_range[0] : job_range[1]]]},
)
assert resp.status_code == 200
- buglist = sorted(resp.json(), key=lambda i: i['bug_id'])
+ buglist = sorted(resp.json(), key=lambda i: i["bug_id"])
assert buglist == expected[job_range[0] : job_range[1]]
@@ -111,7 +111,7 @@ def test_bug_job_map_detail(client, eleven_jobs_stored, test_repository, test_us
assert resp.json() == expected
-@pytest.mark.parametrize('test_no_auth', [True, False])
+@pytest.mark.parametrize("test_no_auth", [True, False])
def test_bug_job_map_delete(
client, eleven_jobs_stored, test_repository, test_user, test_no_auth, bugs
):
@@ -153,8 +153,8 @@ def test_bug_job_map_bad_job_id(client, test_repository):
resp = client.get(
reverse("bug-job-map-list", kwargs={"project": test_repository.name}),
- data={'job_id': bad_job_id},
+ data={"job_id": bad_job_id},
)
assert resp.status_code == 400
- assert resp.json() == {'message': 'Valid job_id required'}
+ assert resp.json() == {"message": "Valid job_id required"}
diff --git a/tests/webapp/api/test_bugzilla.py b/tests/webapp/api/test_bugzilla.py
index d8686ad523f..c02ca9fcf88 100644
--- a/tests/webapp/api/test_bugzilla.py
+++ b/tests/webapp/api/test_bugzilla.py
@@ -15,17 +15,17 @@ def request_callback(request):
headers = {}
requestdata = json.loads(request.body)
requestheaders = request.headers
- assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
- assert requestdata['type'] == "defect"
- assert requestdata['product'] == "Bugzilla"
- assert requestdata['description'] == u"**Filed by:** {}\nIntermittent Description".format(
- test_user.email.replace('@', " [at] ")
+ assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
+ assert requestdata["type"] == "defect"
+ assert requestdata["product"] == "Bugzilla"
+ assert requestdata["description"] == "**Filed by:** {}\nIntermittent Description".format(
+ test_user.email.replace("@", " [at] ")
)
- assert requestdata['component'] == "Administration"
- assert requestdata['summary'] == u"Intermittent summary"
- assert requestdata['comment_tags'] == "treeherder"
- assert requestdata['version'] == "4.0.17"
- assert requestdata['keywords'] == ["intermittent-failure"]
+ assert requestdata["component"] == "Administration"
+ assert requestdata["summary"] == "Intermittent summary"
+ assert requestdata["comment_tags"] == "treeherder"
+ assert requestdata["version"] == "4.0.17"
+ assert requestdata["keywords"] == ["intermittent-failure"]
resp_body = {"id": 323}
return (200, headers, json.dumps(resp_body))
@@ -44,17 +44,17 @@ def request_callback(request):
"type": "defect",
"product": "Bugzilla",
"component": "Administration",
- "summary": u"Intermittent summary",
+ "summary": "Intermittent summary",
"version": "4.0.17",
- "comment": u"Intermittent Description",
+ "comment": "Intermittent Description",
"comment_tags": "treeherder",
"keywords": ["intermittent-failure"],
"is_security_issue": False,
},
)
assert resp.status_code == 200
- assert resp.json()['id'] == 323
- assert resp.json()['url'] == 'https://thisisnotbugzilla.org/show_bug.cgi?id=323'
+ assert resp.json()["id"] == 323
+ assert resp.json()["url"] == "https://thisisnotbugzilla.org/show_bug.cgi?id=323"
def test_create_bug_with_unicode(client, eleven_jobs_stored, activate_responses, test_user):
@@ -66,19 +66,19 @@ def request_callback(request):
headers = {}
requestdata = json.loads(request.body)
requestheaders = request.headers
- assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
- assert requestdata['type'] == "defect"
- assert requestdata['product'] == "Bugzilla"
+ assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
+ assert requestdata["type"] == "defect"
+ assert requestdata["product"] == "Bugzilla"
assert requestdata[
- 'description'
- ] == u"**Filed by:** {}\nIntermittent “description” string".format(
- test_user.email.replace('@', " [at] ")
+ "description"
+ ] == "**Filed by:** {}\nIntermittent “description” string".format(
+ test_user.email.replace("@", " [at] ")
)
- assert requestdata['component'] == "Administration"
- assert requestdata['summary'] == u"Intermittent “summary”"
- assert requestdata['comment_tags'] == "treeherder"
- assert requestdata['version'] == "4.0.17"
- assert requestdata['keywords'] == ["intermittent-failure"]
+ assert requestdata["component"] == "Administration"
+ assert requestdata["summary"] == "Intermittent “summary”"
+ assert requestdata["comment_tags"] == "treeherder"
+ assert requestdata["version"] == "4.0.17"
+ assert requestdata["keywords"] == ["intermittent-failure"]
resp_body = {"id": 323}
return (200, headers, json.dumps(resp_body))
@@ -97,16 +97,16 @@ def request_callback(request):
"type": "defect",
"product": "Bugzilla",
"component": "Administration",
- "summary": u"Intermittent “summary”",
+ "summary": "Intermittent “summary”",
"version": "4.0.17",
- "comment": u"Intermittent “description” string",
+ "comment": "Intermittent “description” string",
"comment_tags": "treeherder",
"keywords": ["intermittent-failure"],
"is_security_issue": False,
},
)
assert resp.status_code == 200
- assert resp.json()['id'] == 323
+ assert resp.json()["id"] == 323
def test_create_crash_bug(client, eleven_jobs_stored, activate_responses, test_user):
@@ -118,19 +118,19 @@ def request_callback(request):
headers = {}
requestdata = json.loads(request.body)
requestheaders = request.headers
- assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
- assert requestdata['type'] == "defect"
- assert requestdata['product'] == "Bugzilla"
- assert requestdata['description'] == u"**Filed by:** {}\nIntermittent Description".format(
- test_user.email.replace('@', " [at] ")
+ assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
+ assert requestdata["type"] == "defect"
+ assert requestdata["product"] == "Bugzilla"
+ assert requestdata["description"] == "**Filed by:** {}\nIntermittent Description".format(
+ test_user.email.replace("@", " [at] ")
)
- assert requestdata['component'] == "Administration"
- assert requestdata['summary'] == u"Intermittent summary"
- assert requestdata['comment_tags'] == "treeherder"
- assert requestdata['version'] == "4.0.17"
- assert requestdata['keywords'] == ["intermittent-failure", "crash"]
- assert requestdata['cf_crash_signature'] == "[@crashsig]"
- assert requestdata['priority'] == '--'
+ assert requestdata["component"] == "Administration"
+ assert requestdata["summary"] == "Intermittent summary"
+ assert requestdata["comment_tags"] == "treeherder"
+ assert requestdata["version"] == "4.0.17"
+ assert requestdata["keywords"] == ["intermittent-failure", "crash"]
+ assert requestdata["cf_crash_signature"] == "[@crashsig]"
+ assert requestdata["priority"] == "--"
resp_body = {"id": 323}
return (200, headers, json.dumps(resp_body))
@@ -149,9 +149,9 @@ def request_callback(request):
"type": "defect",
"product": "Bugzilla",
"component": "Administration",
- "summary": u"Intermittent summary",
+ "summary": "Intermittent summary",
"version": "4.0.17",
- "comment": u"Intermittent Description",
+ "comment": "Intermittent Description",
"comment_tags": "treeherder",
"crash_signature": "[@crashsig]",
"priority": "--",
@@ -160,7 +160,7 @@ def request_callback(request):
},
)
assert resp.status_code == 200
- assert resp.json()['id'] == 323
+ assert resp.json()["id"] == 323
def test_create_unauthenticated_bug(client, eleven_jobs_stored, activate_responses):
@@ -172,16 +172,16 @@ def request_callback(request):
headers = {}
requestdata = json.loads(request.body)
requestheaders = request.headers
- assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
- assert requestdata['type'] == "defect"
- assert requestdata['product'] == "Bugzilla"
- assert requestdata['description'] == u"**Filed by:** MyName\nIntermittent Description"
- assert requestdata['component'] == "Administration"
- assert requestdata['summary'] == u"Intermittent summary"
- assert requestdata['comment_tags'] == "treeherder"
- assert requestdata['version'] == "4.0.17"
- assert requestdata['keywords'] == ["intermittent-failure"]
- assert requestdata['see_also'] == "12345"
+ assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
+ assert requestdata["type"] == "defect"
+ assert requestdata["product"] == "Bugzilla"
+ assert requestdata["description"] == "**Filed by:** MyName\nIntermittent Description"
+ assert requestdata["component"] == "Administration"
+ assert requestdata["summary"] == "Intermittent summary"
+ assert requestdata["comment_tags"] == "treeherder"
+ assert requestdata["version"] == "4.0.17"
+ assert requestdata["keywords"] == ["intermittent-failure"]
+ assert requestdata["see_also"] == "12345"
resp_body = {"id": 323}
return (200, headers, json.dumps(resp_body))
@@ -198,9 +198,9 @@ def request_callback(request):
"type": "defect",
"product": "Bugzilla",
"component": "Administration",
- "summary": u"Intermittent summary",
+ "summary": "Intermittent summary",
"version": "4.0.17",
- "comment": u"Intermittent Description",
+ "comment": "Intermittent Description",
"comment_tags": "treeherder",
"keywords": ["intermittent-failure"],
"see_also": "12345",
@@ -208,7 +208,7 @@ def request_callback(request):
},
)
assert resp.status_code == 403
- assert resp.json()['detail'] == "Authentication credentials were not provided."
+ assert resp.json()["detail"] == "Authentication credentials were not provided."
def test_create_bug_with_long_crash_signature(
@@ -222,18 +222,18 @@ def request_callback(request):
headers = {}
requestdata = json.loads(request.body)
requestheaders = request.headers
- assert requestheaders['x-bugzilla-api-key'] == "12345helloworld"
- assert requestdata['type'] == "defect"
- assert requestdata['product'] == "Bugzilla"
- assert requestdata['description'] == u"**Filed by:** MyName\nIntermittent Description"
- assert requestdata['component'] == "Administration"
- assert requestdata['summary'] == u"Intermittent summary"
- assert requestdata['comment_tags'] == "treeherder"
- assert requestdata['version'] == "4.0.17"
- assert requestdata['keywords'] == ["intermittent-failure", "regression"]
- assert requestdata['cf_crash_signature'] == "[@crashsig]"
- assert requestdata['regressed_by'] == "123"
- assert requestdata['see_also'] == "12345"
+ assert requestheaders["x-bugzilla-api-key"] == "12345helloworld"
+ assert requestdata["type"] == "defect"
+ assert requestdata["product"] == "Bugzilla"
+ assert requestdata["description"] == "**Filed by:** MyName\nIntermittent Description"
+ assert requestdata["component"] == "Administration"
+ assert requestdata["summary"] == "Intermittent summary"
+ assert requestdata["comment_tags"] == "treeherder"
+ assert requestdata["version"] == "4.0.17"
+ assert requestdata["keywords"] == ["intermittent-failure", "regression"]
+ assert requestdata["cf_crash_signature"] == "[@crashsig]"
+ assert requestdata["regressed_by"] == "123"
+ assert requestdata["see_also"] == "12345"
resp_body = {"id": 323}
return (200, headers, json.dumps(resp_body))
@@ -246,16 +246,16 @@ def request_callback(request):
client.force_authenticate(user=test_user)
- crashsig = 'x' * 2050
+ crashsig = "x" * 2050
resp = client.post(
reverse("bugzilla-create-bug"),
{
"type": "defect",
"product": "Bugzilla",
"component": "Administration",
- "summary": u"Intermittent summary",
+ "summary": "Intermittent summary",
"version": "4.0.17",
- "comment": u"Intermittent Description",
+ "comment": "Intermittent Description",
"comment_tags": "treeherder",
"keywords": ["intermittent-failure", "regression"],
"crash_signature": crashsig,
@@ -265,4 +265,4 @@ def request_callback(request):
},
)
assert resp.status_code == 400
- assert resp.json()['failure'] == "Crash signature can't be more than 2048 characters."
+ assert resp.json()["failure"] == "Crash signature can't be more than 2048 characters."
diff --git a/tests/webapp/api/test_csp_report.py b/tests/webapp/api/test_csp_report.py
index cbf9d9d65b7..590ad508d36 100644
--- a/tests/webapp/api/test_csp_report.py
+++ b/tests/webapp/api/test_csp_report.py
@@ -6,28 +6,28 @@
def test_valid_report(client):
"""Tests that a correctly formed CSP violation report is accepted when unauthenticated."""
valid_report = {
- 'csp-report': {
- 'blocked-uri': 'https://treestatus.mozilla-releng.net/trees/autoland',
- 'document-uri': 'http://localhost:8000/',
- 'original-policy': '...',
- 'referrer': '',
- 'violated-directive': 'connect-src',
+ "csp-report": {
+ "blocked-uri": "https://treestatus.mozilla-releng.net/trees/autoland",
+ "document-uri": "http://localhost:8000/",
+ "original-policy": "...",
+ "referrer": "",
+ "violated-directive": "connect-src",
}
}
response = client.post(
- reverse('csp-report'),
+ reverse("csp-report"),
data=json.dumps(valid_report),
- content_type='application/csp-report',
+ content_type="application/csp-report",
)
assert response.status_code == 200
def test_invalid_report(client):
"""Test that badly formed reports are gracefully handled."""
- invalid_report = 'bad'
+ invalid_report = "bad"
response = client.post(
- reverse('csp-report'),
+ reverse("csp-report"),
data=json.dumps(invalid_report),
- content_type='application/csp-report',
+ content_type="application/csp-report",
)
assert response.status_code == 400
diff --git a/tests/webapp/api/test_groupsummary_api.py b/tests/webapp/api/test_groupsummary_api.py
index e1ba70c73f0..a04b9a16811 100644
--- a/tests/webapp/api/test_groupsummary_api.py
+++ b/tests/webapp/api/test_groupsummary_api.py
@@ -8,7 +8,7 @@ def test_future_date(group_data, client):
today = datetime.datetime.today().date()
tomorrow = today + datetime.timedelta(days=1)
- url = reverse('groupsummary') + "?startdate=%s" % tomorrow
+ url = reverse("groupsummary") + "?startdate=%s" % tomorrow
resp = client.get(url)
assert resp.status_code == 200
assert resp.json() == expected
@@ -18,7 +18,7 @@ def test_future_date(group_data, client):
def test_default_date(group_data, client):
expected = {"job_type_names": [], "manifests": []}
- url = reverse('groupsummary')
+ url = reverse("groupsummary")
resp = client.get(url)
assert resp.status_code == 200
assert resp.json() == expected
@@ -27,8 +27,8 @@ def test_default_date(group_data, client):
# test data, summarized by manifest
# test jobname chunk removal and aggregation
def test_summarized(group_data, client):
- expected = group_data['expected']
- url = reverse('groupsummary') + "?startdate=%s" % str(group_data['date']).split(' ')[0]
+ expected = group_data["expected"]
+ url = reverse("groupsummary") + "?startdate=%s" % str(group_data["date"]).split(" ")[0]
resp = client.get(url)
assert resp.status_code == 200
assert resp.json() == expected
diff --git a/tests/webapp/api/test_intermittent_failures_api.py b/tests/webapp/api/test_intermittent_failures_api.py
index 10198d60e3d..f9bffd4903a 100644
--- a/tests/webapp/api/test_intermittent_failures_api.py
+++ b/tests/webapp/api/test_intermittent_failures_api.py
@@ -4,9 +4,9 @@
def test_failures(bug_data, client):
- expected = [{'bug_count': 1, 'bug_id': bug_data['bug_id']}]
+ expected = [{"bug_count": 1, "bug_id": bug_data["bug_id"]}]
- resp = client.get(reverse('failures') + bug_data['query_string'])
+ resp = client.get(reverse("failures") + bug_data["query_string"])
assert resp.status_code == 200
assert resp.json() == expected
@@ -14,21 +14,21 @@ def test_failures(bug_data, client):
def test_failures_by_bug(bug_data, client):
expected = [
{
- 'bug_id': bug_data['bug_id'],
- 'build_type': bug_data['option'].name,
- 'job_id': bug_data['job'].id,
- 'push_time': bug_data['job'].push.time.strftime('%Y-%m-%d %H:%M:%S'),
- 'platform': bug_data['job'].machine_platform.platform,
- 'revision': bug_data['job'].push.revision,
- 'test_suite': bug_data['job'].signature.job_type_name,
- 'tree': bug_data['job'].repository.name,
- 'machine_name': bug_data['job'].machine.name,
- 'lines': [],
+ "bug_id": bug_data["bug_id"],
+ "build_type": bug_data["option"].name,
+ "job_id": bug_data["job"].id,
+ "push_time": bug_data["job"].push.time.strftime("%Y-%m-%d %H:%M:%S"),
+ "platform": bug_data["job"].machine_platform.platform,
+ "revision": bug_data["job"].push.revision,
+ "test_suite": bug_data["job"].signature.job_type_name,
+ "tree": bug_data["job"].repository.name,
+ "machine_name": bug_data["job"].machine.name,
+ "lines": [],
}
]
resp = client.get(
- reverse('failures-by-bug') + bug_data['query_string'] + '&bug={}'.format(bug_data['bug_id'])
+ reverse("failures-by-bug") + bug_data["query_string"] + "&bug={}".format(bug_data["bug_id"])
)
assert resp.status_code == 200
assert resp.json() == expected
@@ -40,20 +40,20 @@ def test_failure_count_by_bug(bug_data, client, test_run_data):
for bug in bugs:
if (
- bug.job.repository.name == bug_data['tree']
- and bug.bug_id == bug_data['bug_id']
- and bug.job.push.time.strftime('%Y-%m-%d') == test_run_data['push_time']
+ bug.job.repository.name == bug_data["tree"]
+ and bug.bug_id == bug_data["bug_id"]
+ and bug.job.push.time.strftime("%Y-%m-%d") == test_run_data["push_time"]
):
failure_count += 1
expected = {
- 'date': test_run_data['push_time'],
- 'test_runs': test_run_data['test_runs'],
- 'failure_count': failure_count,
+ "date": test_run_data["push_time"],
+ "test_runs": test_run_data["test_runs"],
+ "failure_count": failure_count,
}
resp = client.get(
- reverse('failure-count') + bug_data['query_string'] + '&bug={}'.format(bug_data['bug_id'])
+ reverse("failure-count") + bug_data["query_string"] + "&bug={}".format(bug_data["bug_id"])
)
assert resp.status_code == 200
assert resp.json()[0] == expected
@@ -62,20 +62,20 @@ def test_failure_count_by_bug(bug_data, client, test_run_data):
def test_failure_count(bug_data, client, test_run_data):
failure_count = 0
- for job in list(bug_data['jobs']):
+ for job in list(bug_data["jobs"]):
if (
- job.repository.name == bug_data['tree']
+ job.repository.name == bug_data["tree"]
and job.failure_classification_id == 4
- and job.push.time.strftime('%Y-%m-%d') == test_run_data['push_time']
+ and job.push.time.strftime("%Y-%m-%d") == test_run_data["push_time"]
):
failure_count += 1
expected = {
- 'date': test_run_data['push_time'],
- 'test_runs': test_run_data['test_runs'],
- 'failure_count': failure_count,
+ "date": test_run_data["push_time"],
+ "test_runs": test_run_data["test_runs"],
+ "failure_count": failure_count,
}
- resp = client.get(reverse('failure-count') + bug_data['query_string'])
+ resp = client.get(reverse("failure-count") + bug_data["query_string"])
assert resp.status_code == 200
assert resp.json()[0] == expected
diff --git a/tests/webapp/api/test_job_log_url_api.py b/tests/webapp/api/test_job_log_url_api.py
index 10c1017e140..c5237d6163e 100644
--- a/tests/webapp/api/test_job_log_url_api.py
+++ b/tests/webapp/api/test_job_log_url_api.py
@@ -7,23 +7,23 @@
def test_get_job_log_urls(
test_repository, push_stored, failure_classifications, generic_reference_data, client
):
- job1 = create_generic_job('1234', test_repository, 1, generic_reference_data)
- job2 = create_generic_job('5678', test_repository, 1, generic_reference_data)
+ job1 = create_generic_job("1234", test_repository, 1, generic_reference_data)
+ job2 = create_generic_job("5678", test_repository, 1, generic_reference_data)
JobLog.objects.create(
- job=job1, name='test_log_1', url='http://google.com', status=JobLog.PENDING
+ job=job1, name="test_log_1", url="http://google.com", status=JobLog.PENDING
)
- JobLog.objects.create(job=job1, name='test_log_2', url='http://yahoo.com', status=JobLog.PARSED)
- JobLog.objects.create(job=job2, name='test_log_3', url='http://yahoo.com', status=JobLog.PARSED)
+ JobLog.objects.create(job=job1, name="test_log_2", url="http://yahoo.com", status=JobLog.PARSED)
+ JobLog.objects.create(job=job2, name="test_log_3", url="http://yahoo.com", status=JobLog.PARSED)
resp = client.get(
- reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1'
+ reverse("job-log-url-list", kwargs={"project": test_repository.name}) + "?job_id=1"
)
assert resp.status_code == 200
assert len(resp.json()) == 2
resp = client.get(
- reverse('job-log-url-list', kwargs={"project": test_repository.name}) + '?job_id=1&job_id=2'
+ reverse("job-log-url-list", kwargs={"project": test_repository.name}) + "?job_id=1&job_id=2"
)
assert resp.status_code == 200
assert len(resp.json()) == 3
diff --git a/tests/webapp/api/test_jobs_api.py b/tests/webapp/api/test_jobs_api.py
index 6c8ef154f8b..4a2e453a7e7 100644
--- a/tests/webapp/api/test_jobs_api.py
+++ b/tests/webapp/api/test_jobs_api.py
@@ -9,7 +9,7 @@
@pytest.mark.parametrize(
- ('offset', 'count', 'expected_num'),
+ ("offset", "count", "expected_num"),
[(None, None, 10), (None, 5, 5), (5, None, 6), (0, 5, 5), (10, 10, 1)],
)
def test_job_list(client, eleven_jobs_stored, test_repository, offset, count, expected_num):
@@ -18,11 +18,11 @@ def test_job_list(client, eleven_jobs_stored, test_repository, offset, count, ex
endpoint.
"""
url = reverse("jobs-list", kwargs={"project": test_repository.name})
- params = '&'.join(
- ['{}={}'.format(k, v) for k, v in [('offset', offset), ('count', count)] if v]
+ params = "&".join(
+ ["{}={}".format(k, v) for k, v in [("offset", offset), ("count", count)] if v]
)
if params:
- url += '?{}'.format(params)
+ url += "?{}".format(params)
resp = client.get(url)
assert resp.status_code == 200
response_dict = resp.json()
@@ -91,47 +91,47 @@ def test_job_list_equals_filter(client, eleven_jobs_stored, test_repository):
resp = client.get(final_url)
assert resp.status_code == 200
- assert len(resp.json()['results']) == 1
+ assert len(resp.json()["results"]) == 1
job_filter_values = [
- (u'build_architecture', u'x86_64'),
- (u'build_os', u'mac'),
- (u'build_platform', u'osx-10-7'),
- (u'build_platform_id', 3),
- (u'build_system_type', u'buildbot'),
- (u'end_timestamp', 1384364849),
- (u'failure_classification_id', 1),
- (u'id', 4),
- (u'job_group_id', 2),
- (u'job_group_name', u'Mochitest'),
- (u'job_group_symbol', u'M'),
- (u'job_guid', u'ab952a4bbbc74f1d9fb3cf536073b371029dbd02'),
- (u'job_type_id', 2),
- (u'job_type_name', u'Mochitest Browser Chrome'),
- (u'job_type_symbol', u'bc'),
- (u'machine_name', u'talos-r4-lion-011'),
- (u'machine_platform_architecture', u'x86_64'),
- (u'machine_platform_os', u'mac'),
- (u'option_collection_hash', u'32faaecac742100f7753f0c1d0aa0add01b4046b'),
- (u'platform', u'osx-10-7'),
- (u'reason', u'scheduler'),
+ ("build_architecture", "x86_64"),
+ ("build_os", "mac"),
+ ("build_platform", "osx-10-7"),
+ ("build_platform_id", 3),
+ ("build_system_type", "buildbot"),
+ ("end_timestamp", 1384364849),
+ ("failure_classification_id", 1),
+ ("id", 4),
+ ("job_group_id", 2),
+ ("job_group_name", "Mochitest"),
+ ("job_group_symbol", "M"),
+ ("job_guid", "ab952a4bbbc74f1d9fb3cf536073b371029dbd02"),
+ ("job_type_id", 2),
+ ("job_type_name", "Mochitest Browser Chrome"),
+ ("job_type_symbol", "bc"),
+ ("machine_name", "talos-r4-lion-011"),
+ ("machine_platform_architecture", "x86_64"),
+ ("machine_platform_os", "mac"),
+ ("option_collection_hash", "32faaecac742100f7753f0c1d0aa0add01b4046b"),
+ ("platform", "osx-10-7"),
+ ("reason", "scheduler"),
(
- u'ref_data_name',
- u'Rev4 MacOSX Lion 10.7 mozilla-release debug test mochitest-browser-chrome',
+ "ref_data_name",
+ "Rev4 MacOSX Lion 10.7 mozilla-release debug test mochitest-browser-chrome",
),
- (u'result', u'success'),
- (u'result_set_id', 4),
- (u'signature', u'b4a4be709b937853b4ea1a49fc21bf43bf6d6406'),
- (u'start_timestamp', 1384356880),
- (u'state', u'completed'),
- (u'submit_timestamp', 1384356854),
- (u'tier', 1),
- (u'who', u'tests-mozilla-release-lion-debug-unittest'),
+ ("result", "success"),
+ ("result_set_id", 4),
+ ("signature", "b4a4be709b937853b4ea1a49fc21bf43bf6d6406"),
+ ("start_timestamp", 1384356880),
+ ("state", "completed"),
+ ("submit_timestamp", 1384356854),
+ ("tier", 1),
+ ("who", "tests-mozilla-release-lion-debug-unittest"),
]
-@pytest.mark.parametrize(('fieldname', 'expected'), job_filter_values)
+@pytest.mark.parametrize(("fieldname", "expected"), job_filter_values)
def test_job_list_filter_fields(client, eleven_jobs_stored, test_repository, fieldname, expected):
"""
test retrieving a job list with a querystring filter.
@@ -146,7 +146,7 @@ def test_job_list_filter_fields(client, eleven_jobs_stored, test_repository, fie
final_url = url + "?{}={}".format(fieldname, expected)
resp = client.get(final_url)
assert resp.status_code == 200
- first = resp.json()['results'][0]
+ first = resp.json()["results"][0]
assert first[fieldname] == expected
@@ -163,7 +163,7 @@ def test_job_list_in_filter(client, eleven_jobs_stored, test_repository):
resp = client.get(final_url)
assert resp.status_code == 200
- assert len(resp.json()['results']) == 2
+ assert len(resp.json()["results"]) == 2
def test_job_detail(client, test_job):
@@ -183,7 +183,7 @@ def test_job_detail(client, test_job):
)
assert resp.status_code == 200
assert resp.json()["taskcluster_metadata"] == {
- "task_id": 'V3SVuxO8TFy37En_6HcXLs',
+ "task_id": "V3SVuxO8TFy37En_6HcXLs",
"retry_id": 0,
}
@@ -210,8 +210,8 @@ def test_job_detail_not_found(client, test_repository):
def test_text_log_errors(client, test_job):
- TextLogError.objects.create(job=test_job, line='failure 1', line_number=101)
- TextLogError.objects.create(job=test_job, line='failure 2', line_number=102)
+ TextLogError.objects.create(job=test_job, line="failure 1", line_number=101)
+ TextLogError.objects.create(job=test_job, line="failure 2", line_number=102)
resp = client.get(
reverse(
"jobs-text-log-errors", kwargs={"project": test_job.repository.name, "pk": test_job.id}
@@ -220,22 +220,22 @@ def test_text_log_errors(client, test_job):
assert resp.status_code == 200
assert resp.json() == [
{
- 'id': 1,
- 'job': 1,
- 'line': 'failure 1',
- 'line_number': 101,
+ "id": 1,
+ "job": 1,
+ "line": "failure 1",
+ "line_number": 101,
},
{
- 'id': 2,
- 'job': 1,
- 'line': 'failure 2',
- 'line_number': 102,
+ "id": 2,
+ "job": 1,
+ "line": "failure 2",
+ "line_number": 102,
},
]
@pytest.mark.parametrize(
- ('offset', 'count', 'expected_num'),
+ ("offset", "count", "expected_num"),
[(None, None, 3), (None, 2, 2), (1, None, 2), (0, 1, 1), (2, 10, 1)],
)
def test_list_similar_jobs(client, eleven_jobs_stored, offset, count, expected_num):
@@ -245,26 +245,26 @@ def test_list_similar_jobs(client, eleven_jobs_stored, offset, count, expected_n
job = Job.objects.get(id=1)
url = reverse("jobs-similar-jobs", kwargs={"project": job.repository.name, "pk": job.id})
- params = '&'.join(
- ['{}={}'.format(k, v) for k, v in [('offset', offset), ('count', count)] if v]
+ params = "&".join(
+ ["{}={}".format(k, v) for k, v in [("offset", offset), ("count", count)] if v]
)
if params:
- url += '?{}'.format(params)
+ url += "?{}".format(params)
resp = client.get(url)
assert resp.status_code == 200
similar_jobs = resp.json()
- assert 'results' in similar_jobs
+ assert "results" in similar_jobs
- assert isinstance(similar_jobs['results'], list)
+ assert isinstance(similar_jobs["results"], list)
- assert len(similar_jobs['results']) == expected_num
+ assert len(similar_jobs["results"]) == expected_num
@pytest.mark.parametrize(
- 'lm_key,lm_value,exp_status, exp_job_count',
+ "lm_key,lm_value,exp_status, exp_job_count",
[
("last_modified__gt", "2016-07-18T22:16:58.000", 200, 8),
("last_modified__lt", "2016-07-18T22:16:58.000", 200, 3),
diff --git a/tests/webapp/api/test_note_api.py b/tests/webapp/api/test_note_api.py
index 19a0fb77b1a..2bc6a539ccb 100644
--- a/tests/webapp/api/test_note_api.py
+++ b/tests/webapp/api/test_note_api.py
@@ -75,7 +75,7 @@ def test_note_detail_bad_project(client, test_repository):
assert resp.status_code == 404
-@pytest.mark.parametrize('test_no_auth', [True, False])
+@pytest.mark.parametrize("test_no_auth", [True, False])
def test_create_note(client, test_job, test_user, test_no_auth):
"""
test creating a single note via endpoint when authenticated
@@ -100,23 +100,23 @@ def test_create_note(client, test_job, test_user, test_no_auth):
assert resp.status_code == 200
content = json.loads(resp.content)
- assert content['message'] == 'note stored for job %s' % test_job.id
+ assert content["message"] == "note stored for job %s" % test_job.id
note_list = JobNote.objects.filter(job=test_job)
assert len(note_list) == 1
assert note_list[0].user == test_user
assert note_list[0].failure_classification.id == 2
- assert note_list[0].text == 'you look like a man-o-lantern'
+ assert note_list[0].text == "you look like a man-o-lantern"
# verify that the job's last_modified field got updated
old_last_modified = test_job.last_modified
- assert old_last_modified < Job.objects.values_list('last_modified', flat=True).get(
+ assert old_last_modified < Job.objects.values_list("last_modified", flat=True).get(
id=test_job.id
)
-@pytest.mark.parametrize('test_no_auth', [True, False])
+@pytest.mark.parametrize("test_no_auth", [True, False])
def test_delete_note(client, test_job_with_notes, test_repository, test_sheriff, test_no_auth):
"""
test deleting a single note via endpoint
@@ -169,16 +169,16 @@ def test_push_notes(client, test_job_with_notes):
"text": "you look like a man-o-lantern",
},
{
- 'failure_classification_name': 'expected fail',
- 'id': 2,
- 'job': {
- 'duration': 191,
- 'job_type_name': 'B2G Emulator Image Build',
- 'result': 'success',
+ "failure_classification_name": "expected fail",
+ "id": 2,
+ "job": {
+ "duration": 191,
+ "job_type_name": "B2G Emulator Image Build",
+ "result": "success",
"task_id": notes[1].job.taskcluster_metadata.task_id,
},
"who": notes[1].user.email,
"created": notes[1].created.isoformat(),
- 'text': 'you look like a man-o-lantern',
+ "text": "you look like a man-o-lantern",
},
]
diff --git a/tests/webapp/api/test_option_collection_hash.py b/tests/webapp/api/test_option_collection_hash.py
index 72d25866b83..0ffacbe9a39 100644
--- a/tests/webapp/api/test_option_collection_hash.py
+++ b/tests/webapp/api/test_option_collection_hash.py
@@ -2,7 +2,7 @@
def test_option_collection_list(client, sample_option_collections):
- resp = client.get(reverse("optioncollectionhash-list") + '?')
+ resp = client.get(reverse("optioncollectionhash-list") + "?")
assert resp.status_code == 200
response = resp.json()
@@ -11,6 +11,6 @@ def test_option_collection_list(client, sample_option_collections):
assert len(response) == 2
assert response == [
- {'option_collection_hash': 'option_hash1', 'options': [{'name': 'opt1'}]},
- {'option_collection_hash': 'option_hash2', 'options': [{'name': 'opt2'}]},
+ {"option_collection_hash": "option_hash1", "options": [{"name": "opt1"}]},
+ {"option_collection_hash": "option_hash2", "options": [{"name": "opt2"}]},
]
diff --git a/tests/webapp/api/test_perfcompare_api.py b/tests/webapp/api/test_perfcompare_api.py
index 7d5678fe09f..80c1f42056c 100644
--- a/tests/webapp/api/test_perfcompare_api.py
+++ b/tests/webapp/api/test_perfcompare_api.py
@@ -27,7 +27,7 @@ def test_perfcompare_results_against_no_base(
test_linux_platform,
test_option_collection,
):
- perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by('push__time').all()
+ perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by("push__time").all()
test_perfcomp_push.time = THREE_DAYS_AGO
test_perfcomp_push.repository = try_repository
@@ -35,15 +35,15 @@ def test_perfcompare_results_against_no_base(
test_perfcomp_push_2.time = datetime.datetime.now()
test_perfcomp_push_2.save()
- suite = 'a11yr'
- test = 'dhtml.html'
- extra_options = 'e10s fission stylo webrender'
- measurement_unit = 'ms'
- base_application = 'firefox'
- new_application = 'geckoview'
+ suite = "a11yr"
+ test = "dhtml.html"
+ extra_options = "e10s fission stylo webrender"
+ measurement_unit = "ms"
+ base_application = "firefox"
+ new_application = "geckoview"
base_sig = create_signature(
- signature_hash=(20 * 't1'),
+ signature_hash=(20 * "t1"),
extra_options=extra_options,
platform=test_linux_platform,
measurement_unit=measurement_unit,
@@ -72,7 +72,7 @@ def test_perfcompare_results_against_no_base(
perf_datum.push.save()
new_sig = create_signature(
- signature_hash=(20 * 't2'),
+ signature_hash=(20 * "t2"),
extra_options=extra_options,
platform=test_linux_platform,
measurement_unit=measurement_unit,
@@ -103,59 +103,59 @@ def test_perfcompare_results_against_no_base(
expected = [
{
- 'base_rev': None,
- 'new_rev': test_perfcomp_push_2.revision,
- 'framework_id': base_sig.framework.id,
- 'platform': base_sig.platform.platform,
- 'suite': base_sig.suite,
- 'is_empty': False,
- 'header_name': response['header_name'],
- 'base_repository_name': base_sig.repository.name,
- 'new_repository_name': new_sig.repository.name,
- 'base_app': 'firefox',
- 'new_app': 'geckoview',
- 'is_complete': response['is_complete'],
- 'base_measurement_unit': base_sig.measurement_unit,
- 'new_measurement_unit': new_sig.measurement_unit,
- 'base_retriggerable_job_ids': [1],
- 'new_retriggerable_job_ids': [4],
- 'base_runs': base_perf_data_values,
- 'new_runs': new_perf_data_values,
- 'base_avg_value': round(response['base_avg_value'], 2),
- 'new_avg_value': round(response['new_avg_value'], 2),
- 'base_median_value': round(response['base_median_value'], 2),
- 'new_median_value': round(response['new_median_value'], 2),
- 'test': base_sig.test,
- 'option_name': response['option_name'],
- 'extra_options': base_sig.extra_options,
- 'base_stddev': round(response['base_stddev'], 2),
- 'new_stddev': round(response['new_stddev'], 2),
- 'base_stddev_pct': round(response['base_stddev_pct'], 2),
- 'new_stddev_pct': round(response['new_stddev_pct'], 2),
- 'confidence': round(response['confidence'], 2),
- 'confidence_text': response['confidence_text'],
- 'delta_value': round(response['delta_value'], 2),
- 'delta_percentage': round(response['delta_pct'], 2),
- 'magnitude': round(response['magnitude'], 2),
- 'new_is_better': response['new_is_better'],
- 'lower_is_better': response['lower_is_better'],
- 'is_confident': response['is_confident'],
- 'more_runs_are_needed': response['more_runs_are_needed'],
- 'noise_metric': False,
- 'graphs_link': f'https://treeherder.mozilla.org/perfherder/graphs?'
- f'highlightedRevisions={test_perfcomp_push_2.revision}&'
- f'series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&'
- f'series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&'
- f'timerange=86400',
- 'is_improvement': response['is_improvement'],
- 'is_regression': response['is_regression'],
- 'is_meaningful': response['is_meaningful'],
+ "base_rev": None,
+ "new_rev": test_perfcomp_push_2.revision,
+ "framework_id": base_sig.framework.id,
+ "platform": base_sig.platform.platform,
+ "suite": base_sig.suite,
+ "is_empty": False,
+ "header_name": response["header_name"],
+ "base_repository_name": base_sig.repository.name,
+ "new_repository_name": new_sig.repository.name,
+ "base_app": "firefox",
+ "new_app": "geckoview",
+ "is_complete": response["is_complete"],
+ "base_measurement_unit": base_sig.measurement_unit,
+ "new_measurement_unit": new_sig.measurement_unit,
+ "base_retriggerable_job_ids": [1],
+ "new_retriggerable_job_ids": [4],
+ "base_runs": base_perf_data_values,
+ "new_runs": new_perf_data_values,
+ "base_avg_value": round(response["base_avg_value"], 2),
+ "new_avg_value": round(response["new_avg_value"], 2),
+ "base_median_value": round(response["base_median_value"], 2),
+ "new_median_value": round(response["new_median_value"], 2),
+ "test": base_sig.test,
+ "option_name": response["option_name"],
+ "extra_options": base_sig.extra_options,
+ "base_stddev": round(response["base_stddev"], 2),
+ "new_stddev": round(response["new_stddev"], 2),
+ "base_stddev_pct": round(response["base_stddev_pct"], 2),
+ "new_stddev_pct": round(response["new_stddev_pct"], 2),
+ "confidence": round(response["confidence"], 2),
+ "confidence_text": response["confidence_text"],
+ "delta_value": round(response["delta_value"], 2),
+ "delta_percentage": round(response["delta_pct"], 2),
+ "magnitude": round(response["magnitude"], 2),
+ "new_is_better": response["new_is_better"],
+ "lower_is_better": response["lower_is_better"],
+ "is_confident": response["is_confident"],
+ "more_runs_are_needed": response["more_runs_are_needed"],
+ "noise_metric": False,
+ "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?"
+ f"highlightedRevisions={test_perfcomp_push_2.revision}&"
+ f"series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&"
+ f"series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&"
+ f"timerange=86400",
+ "is_improvement": response["is_improvement"],
+ "is_regression": response["is_regression"],
+ "is_meaningful": response["is_meaningful"],
},
]
query_params = (
- '?base_repository={}&new_repository={}&new_revision={}&framework={'
- '}&interval={}&no_subtests=true'.format(
+ "?base_repository={}&new_repository={}&new_revision={}&framework={"
+ "}&interval={}&no_subtests=true".format(
try_repository.name,
test_repository.name,
test_perfcomp_push_2.revision,
@@ -164,7 +164,7 @@ def test_perfcompare_results_against_no_base(
)
)
- response = client.get(reverse('perfcompare-results') + query_params)
+ response = client.get(reverse("perfcompare-results") + query_params)
assert response.status_code == 200
assert expected[0] == response.json()[0]
@@ -183,7 +183,7 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
test_linux_platform,
test_option_collection,
):
- perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by('push__time').all()
+ perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by("push__time").all()
test_perfcomp_push.time = THREE_DAYS_AGO
test_perfcomp_push.repository = try_repository
@@ -191,15 +191,15 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
test_perfcomp_push_2.time = datetime.datetime.now()
test_perfcomp_push_2.save()
- suite = 'a11yr'
- test = 'dhtml.html'
- extra_options = 'e10s fission stylo webrender'
- measurement_unit = 'ms'
- base_application = 'firefox'
- new_application = 'geckoview'
+ suite = "a11yr"
+ test = "dhtml.html"
+ extra_options = "e10s fission stylo webrender"
+ measurement_unit = "ms"
+ base_application = "firefox"
+ new_application = "geckoview"
base_sig = create_signature(
- signature_hash=(20 * 't1'),
+ signature_hash=(20 * "t1"),
extra_options=extra_options,
platform=test_linux_platform,
measurement_unit=measurement_unit,
@@ -228,7 +228,7 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
perf_datum.push.save()
new_sig = create_signature(
- signature_hash=(20 * 't2'),
+ signature_hash=(20 * "t2"),
extra_options=extra_options,
platform=test_linux_platform,
measurement_unit=measurement_unit,
@@ -259,59 +259,59 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
expected = [
{
- 'base_rev': test_perfcomp_push.revision,
- 'new_rev': test_perfcomp_push_2.revision,
- 'framework_id': base_sig.framework.id,
- 'platform': base_sig.platform.platform,
- 'suite': base_sig.suite,
- 'is_empty': False,
- 'header_name': response['header_name'],
- 'base_repository_name': base_sig.repository.name,
- 'new_repository_name': new_sig.repository.name,
- 'base_app': 'firefox',
- 'new_app': 'geckoview',
- 'is_complete': response['is_complete'],
- 'base_measurement_unit': base_sig.measurement_unit,
- 'new_measurement_unit': new_sig.measurement_unit,
- 'base_retriggerable_job_ids': [1],
- 'new_retriggerable_job_ids': [4],
- 'base_runs': base_perf_data_values,
- 'new_runs': new_perf_data_values,
- 'base_avg_value': round(response['base_avg_value'], 2),
- 'new_avg_value': round(response['new_avg_value'], 2),
- 'base_median_value': round(response['base_median_value'], 2),
- 'new_median_value': round(response['new_median_value'], 2),
- 'test': base_sig.test,
- 'option_name': response['option_name'],
- 'extra_options': base_sig.extra_options,
- 'base_stddev': round(response['base_stddev'], 2),
- 'new_stddev': round(response['new_stddev'], 2),
- 'base_stddev_pct': round(response['base_stddev_pct'], 2),
- 'new_stddev_pct': round(response['new_stddev_pct'], 2),
- 'confidence': round(response['confidence'], 2),
- 'confidence_text': response['confidence_text'],
- 'delta_value': round(response['delta_value'], 2),
- 'delta_percentage': round(response['delta_pct'], 2),
- 'magnitude': round(response['magnitude'], 2),
- 'new_is_better': response['new_is_better'],
- 'lower_is_better': response['lower_is_better'],
- 'is_confident': response['is_confident'],
- 'more_runs_are_needed': response['more_runs_are_needed'],
- 'noise_metric': False,
- 'graphs_link': f'https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&'
- f'highlightedRevisions={test_perfcomp_push_2.revision}&'
- f'series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&'
- f'series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&'
- f'timerange=604800',
- 'is_improvement': response['is_improvement'],
- 'is_regression': response['is_regression'],
- 'is_meaningful': response['is_meaningful'],
+ "base_rev": test_perfcomp_push.revision,
+ "new_rev": test_perfcomp_push_2.revision,
+ "framework_id": base_sig.framework.id,
+ "platform": base_sig.platform.platform,
+ "suite": base_sig.suite,
+ "is_empty": False,
+ "header_name": response["header_name"],
+ "base_repository_name": base_sig.repository.name,
+ "new_repository_name": new_sig.repository.name,
+ "base_app": "firefox",
+ "new_app": "geckoview",
+ "is_complete": response["is_complete"],
+ "base_measurement_unit": base_sig.measurement_unit,
+ "new_measurement_unit": new_sig.measurement_unit,
+ "base_retriggerable_job_ids": [1],
+ "new_retriggerable_job_ids": [4],
+ "base_runs": base_perf_data_values,
+ "new_runs": new_perf_data_values,
+ "base_avg_value": round(response["base_avg_value"], 2),
+ "new_avg_value": round(response["new_avg_value"], 2),
+ "base_median_value": round(response["base_median_value"], 2),
+ "new_median_value": round(response["new_median_value"], 2),
+ "test": base_sig.test,
+ "option_name": response["option_name"],
+ "extra_options": base_sig.extra_options,
+ "base_stddev": round(response["base_stddev"], 2),
+ "new_stddev": round(response["new_stddev"], 2),
+ "base_stddev_pct": round(response["base_stddev_pct"], 2),
+ "new_stddev_pct": round(response["new_stddev_pct"], 2),
+ "confidence": round(response["confidence"], 2),
+ "confidence_text": response["confidence_text"],
+ "delta_value": round(response["delta_value"], 2),
+ "delta_percentage": round(response["delta_pct"], 2),
+ "magnitude": round(response["magnitude"], 2),
+ "new_is_better": response["new_is_better"],
+ "lower_is_better": response["lower_is_better"],
+ "is_confident": response["is_confident"],
+ "more_runs_are_needed": response["more_runs_are_needed"],
+ "noise_metric": False,
+ "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&"
+ f"highlightedRevisions={test_perfcomp_push_2.revision}&"
+ f"series={try_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&"
+ f"series={test_repository.name}%2C{base_sig.signature_hash}%2C1%2C{base_sig.framework.id}&"
+ f"timerange=604800",
+ "is_improvement": response["is_improvement"],
+ "is_regression": response["is_regression"],
+ "is_meaningful": response["is_meaningful"],
},
]
query_params = (
- '?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={'
- '}&no_subtests=true'.format(
+ "?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
+ "}&no_subtests=true".format(
try_repository.name,
test_repository.name,
test_perfcomp_push.revision,
@@ -320,7 +320,7 @@ def test_perfcompare_results_with_only_one_run_and_diff_repo(
)
)
- response = client.get(reverse('perfcompare-results') + query_params)
+ response = client.get(reverse("perfcompare-results") + query_params)
assert response.status_code == 200
assert expected[0] == response.json()[0]
@@ -340,19 +340,19 @@ def test_perfcompare_results_multiple_runs(
test_macosx_platform,
test_option_collection,
):
- perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by('push__time').all()
+ perf_jobs = Job.objects.filter(pk__in=range(1, 11)).order_by("push__time").all()
test_perfcomp_push.time = SEVEN_DAYS_AGO
test_perfcomp_push.save()
test_perfcomp_push_2.time = datetime.datetime.now(test_perfcomp_push_2.save())
- suite = 'a11yr'
- test = 'dhtml.html'
- extra_options = 'e10s fission stylo webrender'
- measurement_unit = 'ms'
+ suite = "a11yr"
+ test = "dhtml.html"
+ extra_options = "e10s fission stylo webrender"
+ measurement_unit = "ms"
sig1 = create_signature(
- signature_hash=(20 * 't1'),
+ signature_hash=(20 * "t1"),
extra_options=extra_options,
platform=test_linux_platform,
measurement_unit=measurement_unit,
@@ -371,7 +371,7 @@ def test_perfcompare_results_multiple_runs(
create_perf_datum(index, job, test_perfcomp_push, sig1, sig1_val)
sig2 = create_signature(
- signature_hash=(20 * 't2'),
+ signature_hash=(20 * "t2"),
extra_options=extra_options,
platform=test_linux_platform,
measurement_unit=measurement_unit,
@@ -385,7 +385,7 @@ def test_perfcompare_results_multiple_runs(
create_perf_datum(index, job, test_perfcomp_push_2, sig2, sig2_val)
sig3 = create_signature(
- signature_hash=(20 * 't3'),
+ signature_hash=(20 * "t3"),
extra_options=extra_options,
platform=test_macosx_platform,
measurement_unit=measurement_unit,
@@ -399,7 +399,7 @@ def test_perfcompare_results_multiple_runs(
create_perf_datum(index, job, test_perfcomp_push, sig3, sig3_val)
sig4 = create_signature(
- signature_hash=(20 * 't4'),
+ signature_hash=(20 * "t4"),
extra_options=extra_options,
platform=test_macosx_platform,
measurement_unit=measurement_unit,
@@ -418,104 +418,104 @@ def test_perfcompare_results_multiple_runs(
expected = [
{
- 'base_rev': test_perfcomp_push.revision,
- 'new_rev': test_perfcomp_push_2.revision,
- 'framework_id': sig1.framework.id,
- 'platform': sig1.platform.platform,
- 'suite': sig1.suite,
- 'is_empty': False,
- 'header_name': first_row['header_name'],
- 'base_repository_name': sig1.repository.name,
- 'new_repository_name': sig2.repository.name,
- 'base_app': '',
- 'new_app': '',
- 'is_complete': first_row['is_complete'],
- 'base_measurement_unit': sig1.measurement_unit,
- 'new_measurement_unit': sig2.measurement_unit,
- 'base_retriggerable_job_ids': [1, 2, 4],
- 'new_retriggerable_job_ids': [7, 8],
- 'base_runs': sig1_val,
- 'new_runs': sig2_val,
- 'base_avg_value': round(first_row['base_avg_value'], 2),
- 'new_avg_value': round(first_row['new_avg_value'], 2),
- 'base_median_value': round(first_row['base_median_value'], 2),
- 'new_median_value': round(first_row['new_median_value'], 2),
- 'test': sig1.test,
- 'option_name': first_row['option_name'],
- 'extra_options': sig1.extra_options,
- 'base_stddev': round(first_row['base_stddev'], 2),
- 'new_stddev': round(first_row['new_stddev'], 2),
- 'base_stddev_pct': round(first_row['base_stddev_pct'], 2),
- 'new_stddev_pct': round(first_row['new_stddev_pct'], 2),
- 'confidence': round(first_row['confidence'], 2),
- 'confidence_text': first_row['confidence_text'],
- 'delta_value': round(first_row['delta_value'], 2),
- 'delta_percentage': round(first_row['delta_pct'], 2),
- 'magnitude': round(first_row['magnitude'], 2),
- 'new_is_better': first_row['new_is_better'],
- 'lower_is_better': first_row['lower_is_better'],
- 'is_confident': first_row['is_confident'],
- 'more_runs_are_needed': first_row['more_runs_are_needed'],
- 'noise_metric': False,
- 'graphs_link': f'https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&'
- f'highlightedRevisions={test_perfcomp_push_2.revision}&'
- f'series={test_repository.name}%2C{sig1.signature_hash}%2C1%2C{sig1.framework.id}&timerange=1209600',
- 'is_improvement': first_row['is_improvement'],
- 'is_regression': first_row['is_regression'],
- 'is_meaningful': first_row['is_meaningful'],
+ "base_rev": test_perfcomp_push.revision,
+ "new_rev": test_perfcomp_push_2.revision,
+ "framework_id": sig1.framework.id,
+ "platform": sig1.platform.platform,
+ "suite": sig1.suite,
+ "is_empty": False,
+ "header_name": first_row["header_name"],
+ "base_repository_name": sig1.repository.name,
+ "new_repository_name": sig2.repository.name,
+ "base_app": "",
+ "new_app": "",
+ "is_complete": first_row["is_complete"],
+ "base_measurement_unit": sig1.measurement_unit,
+ "new_measurement_unit": sig2.measurement_unit,
+ "base_retriggerable_job_ids": [1, 2, 4],
+ "new_retriggerable_job_ids": [7, 8],
+ "base_runs": sig1_val,
+ "new_runs": sig2_val,
+ "base_avg_value": round(first_row["base_avg_value"], 2),
+ "new_avg_value": round(first_row["new_avg_value"], 2),
+ "base_median_value": round(first_row["base_median_value"], 2),
+ "new_median_value": round(first_row["new_median_value"], 2),
+ "test": sig1.test,
+ "option_name": first_row["option_name"],
+ "extra_options": sig1.extra_options,
+ "base_stddev": round(first_row["base_stddev"], 2),
+ "new_stddev": round(first_row["new_stddev"], 2),
+ "base_stddev_pct": round(first_row["base_stddev_pct"], 2),
+ "new_stddev_pct": round(first_row["new_stddev_pct"], 2),
+ "confidence": round(first_row["confidence"], 2),
+ "confidence_text": first_row["confidence_text"],
+ "delta_value": round(first_row["delta_value"], 2),
+ "delta_percentage": round(first_row["delta_pct"], 2),
+ "magnitude": round(first_row["magnitude"], 2),
+ "new_is_better": first_row["new_is_better"],
+ "lower_is_better": first_row["lower_is_better"],
+ "is_confident": first_row["is_confident"],
+ "more_runs_are_needed": first_row["more_runs_are_needed"],
+ "noise_metric": False,
+ "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&"
+ f"highlightedRevisions={test_perfcomp_push_2.revision}&"
+ f"series={test_repository.name}%2C{sig1.signature_hash}%2C1%2C{sig1.framework.id}&timerange=1209600",
+ "is_improvement": first_row["is_improvement"],
+ "is_regression": first_row["is_regression"],
+ "is_meaningful": first_row["is_meaningful"],
},
{
- 'base_rev': test_perfcomp_push.revision,
- 'new_rev': test_perfcomp_push_2.revision,
- 'framework_id': sig3.framework.id,
- 'platform': sig3.platform.platform,
- 'suite': sig3.suite,
- 'is_empty': False,
- 'header_name': second_row['header_name'],
- 'base_repository_name': sig3.repository.name,
- 'new_repository_name': sig4.repository.name,
- 'base_app': '',
- 'new_app': '',
- 'is_complete': second_row['is_complete'],
- 'base_measurement_unit': sig3.measurement_unit,
- 'new_measurement_unit': sig4.measurement_unit,
- 'base_retriggerable_job_ids': [1, 2],
- 'new_retriggerable_job_ids': [4, 7],
- 'base_runs': sig3_val,
- 'new_runs': sig4_val,
- 'base_avg_value': round(second_row['base_avg_value'], 2),
- 'new_avg_value': round(second_row['new_avg_value'], 2),
- 'base_median_value': round(second_row['base_median_value'], 2),
- 'new_median_value': round(second_row['new_median_value'], 2),
- 'test': sig3.test,
- 'option_name': second_row['option_name'],
- 'extra_options': sig3.extra_options,
- 'base_stddev': round(second_row['base_stddev'], 2),
- 'new_stddev': round(second_row['new_stddev'], 2),
- 'base_stddev_pct': round(second_row['base_stddev_pct'], 2),
- 'new_stddev_pct': round(second_row['new_stddev_pct'], 2),
- 'confidence': round(second_row['confidence'], 2),
- 'confidence_text': second_row['confidence_text'],
- 'delta_value': round(second_row['delta_value'], 2),
- 'delta_percentage': round(second_row['delta_pct'], 2),
- 'magnitude': round(second_row['magnitude'], 2),
- 'new_is_better': second_row['new_is_better'],
- 'lower_is_better': second_row['lower_is_better'],
- 'is_confident': second_row['is_confident'],
- 'more_runs_are_needed': second_row['more_runs_are_needed'],
- 'noise_metric': False,
- 'graphs_link': f'https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&'
- f'highlightedRevisions={test_perfcomp_push_2.revision}&'
- f'series={test_repository.name}%2C{sig3.signature_hash}%2C1%2C{sig1.framework.id}&timerange=1209600',
- 'is_improvement': second_row['is_improvement'],
- 'is_regression': second_row['is_regression'],
- 'is_meaningful': second_row['is_meaningful'],
+ "base_rev": test_perfcomp_push.revision,
+ "new_rev": test_perfcomp_push_2.revision,
+ "framework_id": sig3.framework.id,
+ "platform": sig3.platform.platform,
+ "suite": sig3.suite,
+ "is_empty": False,
+ "header_name": second_row["header_name"],
+ "base_repository_name": sig3.repository.name,
+ "new_repository_name": sig4.repository.name,
+ "base_app": "",
+ "new_app": "",
+ "is_complete": second_row["is_complete"],
+ "base_measurement_unit": sig3.measurement_unit,
+ "new_measurement_unit": sig4.measurement_unit,
+ "base_retriggerable_job_ids": [1, 2],
+ "new_retriggerable_job_ids": [4, 7],
+ "base_runs": sig3_val,
+ "new_runs": sig4_val,
+ "base_avg_value": round(second_row["base_avg_value"], 2),
+ "new_avg_value": round(second_row["new_avg_value"], 2),
+ "base_median_value": round(second_row["base_median_value"], 2),
+ "new_median_value": round(second_row["new_median_value"], 2),
+ "test": sig3.test,
+ "option_name": second_row["option_name"],
+ "extra_options": sig3.extra_options,
+ "base_stddev": round(second_row["base_stddev"], 2),
+ "new_stddev": round(second_row["new_stddev"], 2),
+ "base_stddev_pct": round(second_row["base_stddev_pct"], 2),
+ "new_stddev_pct": round(second_row["new_stddev_pct"], 2),
+ "confidence": round(second_row["confidence"], 2),
+ "confidence_text": second_row["confidence_text"],
+ "delta_value": round(second_row["delta_value"], 2),
+ "delta_percentage": round(second_row["delta_pct"], 2),
+ "magnitude": round(second_row["magnitude"], 2),
+ "new_is_better": second_row["new_is_better"],
+ "lower_is_better": second_row["lower_is_better"],
+ "is_confident": second_row["is_confident"],
+ "more_runs_are_needed": second_row["more_runs_are_needed"],
+ "noise_metric": False,
+ "graphs_link": f"https://treeherder.mozilla.org/perfherder/graphs?highlightedRevisions={test_perfcomp_push.revision}&"
+ f"highlightedRevisions={test_perfcomp_push_2.revision}&"
+ f"series={test_repository.name}%2C{sig3.signature_hash}%2C1%2C{sig1.framework.id}&timerange=1209600",
+ "is_improvement": second_row["is_improvement"],
+ "is_regression": second_row["is_regression"],
+ "is_meaningful": second_row["is_meaningful"],
},
]
query_params = (
- '?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={'
- '}&no_subtests=true'.format(
+ "?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
+ "}&no_subtests=true".format(
test_perf_signature.repository.name,
test_perf_signature.repository.name,
test_perfcomp_push.revision,
@@ -524,7 +524,7 @@ def test_perfcompare_results_multiple_runs(
)
)
- response = client.get(reverse('perfcompare-results') + query_params)
+ response = client.get(reverse("perfcompare-results") + query_params)
assert response.status_code == 200
for result in expected:
assert result in response.json()
@@ -533,8 +533,8 @@ def test_perfcompare_results_multiple_runs(
def test_revision_is_not_found(client, test_perf_signature, test_perfcomp_push):
non_existent_revision = "nonexistentrevision"
query_params = (
- '?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={'
- '}&no_subtests=true'.format(
+ "?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
+ "}&no_subtests=true".format(
test_perf_signature.repository.name,
test_perf_signature.repository.name,
non_existent_revision,
@@ -543,15 +543,15 @@ def test_revision_is_not_found(client, test_perf_signature, test_perfcomp_push):
)
)
- response = client.get(reverse('perfcompare-results') + query_params)
+ response = client.get(reverse("perfcompare-results") + query_params)
assert response.status_code == 400
assert response.json() == "No base push with revision {} from repo {}.".format(
non_existent_revision, test_perf_signature.repository.name
)
query_params = (
- '?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={'
- '}&no_subtests=true'.format(
+ "?base_repository={}&new_repository={}&base_revision={}&new_revision={}&framework={"
+ "}&no_subtests=true".format(
test_perf_signature.repository.name,
test_perf_signature.repository.name,
test_perfcomp_push.revision,
@@ -560,7 +560,7 @@ def test_revision_is_not_found(client, test_perf_signature, test_perfcomp_push):
)
)
- response = client.get(reverse('perfcompare-results') + query_params)
+ response = client.get(reverse("perfcompare-results") + query_params)
assert response.status_code == 400
assert response.json() == "No new push with revision {} from repo {}.".format(
non_existent_revision, test_perf_signature.repository.name
@@ -572,8 +572,8 @@ def test_interval_is_required_when_comparing_without_base(
):
non_existent_revision = "nonexistentrevision"
query_params = (
- '?base_repository={}&new_repository={}&new_revision={}&framework={'
- '}&no_subtests=true'.format(
+ "?base_repository={}&new_repository={}&new_revision={}&framework={"
+ "}&no_subtests=true".format(
test_perf_signature.repository.name,
test_perf_signature.repository.name,
non_existent_revision,
@@ -581,68 +581,68 @@ def test_interval_is_required_when_comparing_without_base(
)
)
- response = client.get(reverse('perfcompare-results') + query_params)
+ response = client.get(reverse("perfcompare-results") + query_params)
assert response.status_code == 400
- assert response.json() == {'non_field_errors': ['Field required: interval.']}
+ assert response.json() == {"non_field_errors": ["Field required: interval."]}
def get_expected(
base_sig, extra_options, test_option_collection, new_perf_data_values, base_perf_data_values
):
- response = {'option_name': test_option_collection.get(base_sig.option_collection_id, '')}
+ response = {"option_name": test_option_collection.get(base_sig.option_collection_id, "")}
test_suite = perfcompare_utils.get_test_suite(base_sig.suite, base_sig.test)
- response['header_name'] = perfcompare_utils.get_header_name(
- extra_options, response['option_name'], test_suite
+ response["header_name"] = perfcompare_utils.get_header_name(
+ extra_options, response["option_name"], test_suite
)
- response['base_avg_value'] = perfcompare_utils.get_avg(
- base_perf_data_values, response['header_name']
+ response["base_avg_value"] = perfcompare_utils.get_avg(
+ base_perf_data_values, response["header_name"]
)
- response['new_avg_value'] = perfcompare_utils.get_avg(
- new_perf_data_values, response['header_name']
+ response["new_avg_value"] = perfcompare_utils.get_avg(
+ new_perf_data_values, response["header_name"]
)
- response['base_median_value'] = perfcompare_utils.get_median(base_perf_data_values)
- response['new_median_value'] = perfcompare_utils.get_median(new_perf_data_values)
- response['delta_value'] = perfcompare_utils.get_delta_value(
- response['new_avg_value'], response.get('base_avg_value')
+ response["base_median_value"] = perfcompare_utils.get_median(base_perf_data_values)
+ response["new_median_value"] = perfcompare_utils.get_median(new_perf_data_values)
+ response["delta_value"] = perfcompare_utils.get_delta_value(
+ response["new_avg_value"], response.get("base_avg_value")
)
- response['delta_pct'] = perfcompare_utils.get_delta_percentage(
- response['delta_value'], response['base_avg_value']
+ response["delta_pct"] = perfcompare_utils.get_delta_percentage(
+ response["delta_value"], response["base_avg_value"]
)
- response['base_stddev'] = perfcompare_utils.get_stddev(
- base_perf_data_values, response['header_name']
+ response["base_stddev"] = perfcompare_utils.get_stddev(
+ base_perf_data_values, response["header_name"]
)
- response['new_stddev'] = perfcompare_utils.get_stddev(
- new_perf_data_values, response['header_name']
+ response["new_stddev"] = perfcompare_utils.get_stddev(
+ new_perf_data_values, response["header_name"]
)
- response['base_stddev_pct'] = perfcompare_utils.get_stddev_pct(
- response['base_avg_value'], response['base_stddev']
+ response["base_stddev_pct"] = perfcompare_utils.get_stddev_pct(
+ response["base_avg_value"], response["base_stddev"]
)
- response['new_stddev_pct'] = perfcompare_utils.get_stddev_pct(
- response['new_avg_value'], response['new_stddev']
+ response["new_stddev_pct"] = perfcompare_utils.get_stddev_pct(
+ response["new_avg_value"], response["new_stddev"]
)
- response['magnitude'] = perfcompare_utils.get_magnitude(response['delta_pct'])
- response['new_is_better'] = perfcompare_utils.is_new_better(
- response['delta_value'], base_sig.lower_is_better
+ response["magnitude"] = perfcompare_utils.get_magnitude(response["delta_pct"])
+ response["new_is_better"] = perfcompare_utils.is_new_better(
+ response["delta_value"], base_sig.lower_is_better
)
- response['lower_is_better'] = base_sig.lower_is_better
- response['confidence'] = perfcompare_utils.get_abs_ttest_value(
+ response["lower_is_better"] = base_sig.lower_is_better
+ response["confidence"] = perfcompare_utils.get_abs_ttest_value(
base_perf_data_values, new_perf_data_values
)
- response['is_confident'] = perfcompare_utils.is_confident(
- len(base_perf_data_values), len(new_perf_data_values), response['confidence']
+ response["is_confident"] = perfcompare_utils.is_confident(
+ len(base_perf_data_values), len(new_perf_data_values), response["confidence"]
)
- response['confidence_text'] = perfcompare_utils.get_confidence_text(response['confidence'])
- response['is_complete'] = True
- response['more_runs_are_needed'] = perfcompare_utils.more_runs_are_needed(
- response['is_complete'], response['is_confident'], len(base_perf_data_values)
+ response["confidence_text"] = perfcompare_utils.get_confidence_text(response["confidence"])
+ response["is_complete"] = True
+ response["more_runs_are_needed"] = perfcompare_utils.more_runs_are_needed(
+ response["is_complete"], response["is_confident"], len(base_perf_data_values)
)
class_name = perfcompare_utils.get_class_name(
- response['new_is_better'],
- response['base_avg_value'],
- response['new_avg_value'],
- response['confidence'],
- )
- response['is_improvement'] = class_name == 'success'
- response['is_regression'] = class_name == 'danger'
- response['is_meaningful'] = class_name == ''
+ response["new_is_better"],
+ response["base_avg_value"],
+ response["new_avg_value"],
+ response["confidence"],
+ )
+ response["is_improvement"] = class_name == "success"
+ response["is_regression"] = class_name == "danger"
+ response["is_meaningful"] = class_name == ""
return response
diff --git a/tests/webapp/api/test_performance_alerts_api.py b/tests/webapp/api/test_performance_alerts_api.py
index 5ea9e5ecad2..aeb87ce93d0 100644
--- a/tests/webapp/api/test_performance_alerts_api.py
+++ b/tests/webapp/api/test_performance_alerts_api.py
@@ -17,44 +17,44 @@ def test_alerts_get(
test_taskcluster_metadata,
test_taskcluster_metadata_2,
):
- resp = client.get(reverse('performance-alerts-list'))
+ resp = client.get(reverse("performance-alerts-list"))
assert resp.status_code == 200
# should just have the one alert
- assert resp.json()['next'] is None
- assert resp.json()['previous'] is None
- assert len(resp.json()['results']) == 1
- assert set(resp.json()['results'][0].keys()) == {
- 'amount_pct',
- 'amount_abs',
- 'id',
- 'is_regression',
- 'starred',
- 'manually_created',
- 'new_value',
- 'prev_value',
- 'related_summary_id',
- 'series_signature',
- 'taskcluster_metadata',
- 'prev_taskcluster_metadata',
- 'profile_url',
- 'prev_profile_url',
- 'summary_id',
- 'status',
- 't_value',
- 'classifier',
- 'classifier_email',
- 'backfill_record',
- 'noise_profile',
+ assert resp.json()["next"] is None
+ assert resp.json()["previous"] is None
+ assert len(resp.json()["results"]) == 1
+ assert set(resp.json()["results"][0].keys()) == {
+ "amount_pct",
+ "amount_abs",
+ "id",
+ "is_regression",
+ "starred",
+ "manually_created",
+ "new_value",
+ "prev_value",
+ "related_summary_id",
+ "series_signature",
+ "taskcluster_metadata",
+ "prev_taskcluster_metadata",
+ "profile_url",
+ "prev_profile_url",
+ "summary_id",
+ "status",
+ "t_value",
+ "classifier",
+ "classifier_email",
+ "backfill_record",
+ "noise_profile",
}
- assert resp.json()['results'][0]['related_summary_id'] is None
- assert set(resp.json()['results'][0]['taskcluster_metadata'].keys()) == {
- 'task_id',
- 'retry_id',
+ assert resp.json()["results"][0]["related_summary_id"] is None
+ assert set(resp.json()["results"][0]["taskcluster_metadata"].keys()) == {
+ "task_id",
+ "retry_id",
}
- assert set(resp.json()['results'][0]['prev_taskcluster_metadata'].keys()) == {
- 'task_id',
- 'retry_id',
+ assert set(resp.json()["results"][0]["prev_taskcluster_metadata"].keys()) == {
+ "task_id",
+ "retry_id",
}
@@ -71,14 +71,14 @@ def test_alerts_put(
test_user,
test_sheriff,
):
- resp = client.get(reverse('performance-alerts-list'))
+ resp = client.get(reverse("performance-alerts-list"))
assert resp.status_code == 200
- assert resp.json()['results'][0]['related_summary_id'] is None
+ assert resp.json()["results"][0]["related_summary_id"] is None
# verify that we fail if not authenticated
resp = client.put(
- reverse('performance-alerts-list') + '1/',
- {'related_summary_id': 2, 'status': PerformanceAlert.DOWNSTREAM},
+ reverse("performance-alerts-list") + "1/",
+ {"related_summary_id": 2, "status": PerformanceAlert.DOWNSTREAM},
)
assert resp.status_code == 403
assert PerformanceAlert.objects.get(id=1).related_summary_id is None
@@ -86,8 +86,8 @@ def test_alerts_put(
# verify that we fail if authenticated, but not staff
client.force_authenticate(user=test_user)
resp = client.put(
- reverse('performance-alerts-list') + '1/',
- {'related_summary_id': 2, 'status': PerformanceAlert.DOWNSTREAM},
+ reverse("performance-alerts-list") + "1/",
+ {"related_summary_id": 2, "status": PerformanceAlert.DOWNSTREAM},
)
assert resp.status_code == 403
assert PerformanceAlert.objects.get(id=1).related_summary_id is None
@@ -95,8 +95,8 @@ def test_alerts_put(
# verify that we succeed if authenticated + staff
client.force_authenticate(user=test_sheriff)
resp = client.put(
- reverse('performance-alerts-list') + '1/',
- {'related_summary_id': 2, 'status': PerformanceAlert.DOWNSTREAM},
+ reverse("performance-alerts-list") + "1/",
+ {"related_summary_id": 2, "status": PerformanceAlert.DOWNSTREAM},
)
assert resp.status_code == 200
assert PerformanceAlert.objects.get(id=1).related_summary_id == 2
@@ -104,8 +104,8 @@ def test_alerts_put(
# verify that we can unset it too
resp = client.put(
- reverse('performance-alerts-list') + '1/',
- {'related_summary_id': None, 'status': PerformanceAlert.UNTRIAGED},
+ reverse("performance-alerts-list") + "1/",
+ {"related_summary_id": None, "status": PerformanceAlert.UNTRIAGED},
)
assert resp.status_code == 200
assert PerformanceAlert.objects.get(id=1).related_summary_id is None
@@ -136,8 +136,8 @@ def test_reassign_different_repository(
# mark downstream of summary with different repository,
# should succeed
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/',
- {'related_summary_id': test_perf_alert_summary_2.id, 'status': PerformanceAlert.DOWNSTREAM},
+ reverse("performance-alerts-list") + "1/",
+ {"related_summary_id": test_perf_alert_summary_2.id, "status": PerformanceAlert.DOWNSTREAM},
)
assert resp.status_code == 200
test_perf_alert.refresh_from_db()
@@ -155,7 +155,7 @@ def test_reassign_different_framework(
):
# verify that we can't reassign to another performance alert summary
# with a different framework
- framework_2 = PerformanceFramework.objects.create(name='test_talos_2', enabled=True)
+ framework_2 = PerformanceFramework.objects.create(name="test_talos_2", enabled=True)
test_perf_alert_summary_2.framework = framework_2
test_perf_alert_summary_2.save()
@@ -168,8 +168,8 @@ def assert_incompatible_alert_assignment_fails(
authorized_sheriff_client, perf_alert, incompatible_summary
):
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/',
- {'related_summary_id': incompatible_summary.id, 'status': PerformanceAlert.REASSIGNED},
+ reverse("performance-alerts-list") + "1/",
+ {"related_summary_id": incompatible_summary.id, "status": PerformanceAlert.REASSIGNED},
)
assert resp.status_code == 400
perf_alert.refresh_from_db()
@@ -181,25 +181,25 @@ def assert_incompatible_alert_assignment_fails(
def alert_create_post_blob(test_perf_alert_summary, test_perf_signature):
# this blob should be sufficient to create a new alert (assuming
# the user of this API is authorized to do so!)
- return {'summary_id': test_perf_alert_summary.id, 'signature_id': test_perf_signature.id}
+ return {"summary_id": test_perf_alert_summary.id, "signature_id": test_perf_signature.id}
def test_alerts_post(
client, alert_create_post_blob, test_user, test_sheriff, generate_enough_perf_datum
):
# verify that we fail if not authenticated
- resp = client.post(reverse('performance-alerts-list'), alert_create_post_blob)
+ resp = client.post(reverse("performance-alerts-list"), alert_create_post_blob)
assert resp.status_code == 403
# verify that we fail if authenticated, but not staff
client.force_authenticate(user=test_user)
- resp = client.post(reverse('performance-alerts-list'), alert_create_post_blob)
+ resp = client.post(reverse("performance-alerts-list"), alert_create_post_blob)
assert resp.status_code == 403
assert PerformanceAlert.objects.count() == 0
# verify that we succeed if staff + authenticated
client.force_authenticate(user=test_sheriff)
- resp = client.post(reverse('performance-alerts-list'), alert_create_post_blob)
+ resp = client.post(reverse("performance-alerts-list"), alert_create_post_blob)
assert resp.status_code == 200
assert PerformanceAlert.objects.count() == 1
@@ -222,11 +222,11 @@ def test_alerts_post_insufficient_data(
alert_create_post_blob,
):
# we should not succeed if insufficient data is passed through
- for removed_key in ['summary_id', 'signature_id']:
+ for removed_key in ["summary_id", "signature_id"]:
new_post_blob = copy.copy(alert_create_post_blob)
del new_post_blob[removed_key]
- resp = authorized_sheriff_client.post(reverse('performance-alerts-list'), new_post_blob)
+ resp = authorized_sheriff_client.post(reverse("performance-alerts-list"), new_post_blob)
assert resp.status_code == 400
assert PerformanceAlert.objects.count() == 0
@@ -239,7 +239,7 @@ def test_nudge_alert_towards_conflicting_one(
old_conflicting_update = test_conflicting_perf_alert.last_updated
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/', {'prev_push_id': 2, 'push_id': 3}
+ reverse("performance-alerts-list") + "1/", {"prev_push_id": 2, "push_id": 3}
)
assert resp.status_code == 200
test_conflicting_perf_alert.refresh_from_db()
@@ -257,7 +257,7 @@ def test_nudge_alert_towards_conflicting_one(
@pytest.mark.xfail
@pytest.mark.parametrize(
"perf_datum_id, towards_push_ids",
- [(3, {'prev_push_id': 1, 'push_id': 2}), (2, {'prev_push_id': 2, 'push_id': 3})],
+ [(3, {"prev_push_id": 1, "push_id": 2}), (2, {"prev_push_id": 2, "push_id": 3})],
)
def test_nudge_alert_to_changeset_without_alert_summary(
authorized_sheriff_client, test_perf_alert, test_perf_data, perf_datum_id, towards_push_ids
@@ -267,7 +267,7 @@ def test_nudge_alert_to_changeset_without_alert_summary(
old_alert_summary_id = test_perf_alert.summary.id
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/', towards_push_ids
+ reverse("performance-alerts-list") + "1/", towards_push_ids
)
assert resp.status_code == 200
@@ -276,8 +276,8 @@ def test_nudge_alert_to_changeset_without_alert_summary(
new_alert_summary = test_perf_alert.summary
assert new_alert_summary.id != old_alert_summary_id
- assert 'alert_summary_id' in resp.json()
- assert resp.json()['alert_summary_id'] == new_alert_summary.id
+ assert "alert_summary_id" in resp.json()
+ assert resp.json()["alert_summary_id"] == new_alert_summary.id
# new summary has correct push ids
assert new_alert_summary.prev_push_id == towards_push_ids["prev_push_id"]
@@ -291,7 +291,7 @@ def test_nudge_alert_to_changeset_without_alert_summary(
@pytest.mark.xfail
@pytest.mark.parametrize(
"perf_datum_ids, alert_id_to_move, towards_push_ids",
- [((2, 3), 2, {'push_id': 2, 'prev_push_id': 1}), (None, 1, {'push_id': 3, 'prev_push_id': 2})],
+ [((2, 3), 2, {"push_id": 2, "prev_push_id": 1}), (None, 1, {"push_id": 3, "prev_push_id": 2})],
)
def test_nudge_alert_to_changeset_with_an_alert_summary(
authorized_sheriff_client,
@@ -325,7 +325,7 @@ def test_nudge_alert_to_changeset_with_an_alert_summary(
assert target_summary.first_triaged is None
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + str(alert_id_to_move) + '/', towards_push_ids
+ reverse("performance-alerts-list") + str(alert_id_to_move) + "/", towards_push_ids
)
assert resp.status_code == 200
@@ -335,8 +335,8 @@ def test_nudge_alert_to_changeset_with_an_alert_summary(
target_summary.refresh_from_db()
assert alert_to_move.summary.id != old_alert_summary_id
- assert 'alert_summary_id' in resp.json()
- assert resp.json()['alert_summary_id'] == alert_to_move.summary.id
+ assert "alert_summary_id" in resp.json()
+ assert resp.json()["alert_summary_id"] == alert_to_move.summary.id
# old alert summary gets deleted
assert not PerformanceAlertSummary.objects.filter(pk=old_alert_summary_id).exists()
@@ -377,7 +377,7 @@ def test_nudge_left_alert_from_alert_summary_with_more_alerts(
test_perf_alert.save()
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '2/', {'push_id': 2, 'prev_push_id': 1}
+ reverse("performance-alerts-list") + "2/", {"push_id": 2, "prev_push_id": 1}
)
assert resp.status_code == 200
@@ -387,8 +387,8 @@ def test_nudge_left_alert_from_alert_summary_with_more_alerts(
test_perf_alert_summary_2.refresh_from_db()
assert test_perf_alert_2.summary.id != old_alert_summary_id
- assert 'alert_summary_id' in resp.json()
- assert resp.json()['alert_summary_id'] == test_perf_alert_2.summary.id
+ assert "alert_summary_id" in resp.json()
+ assert resp.json()["alert_summary_id"] == test_perf_alert_2.summary.id
# old alert summary still there
old_alert_summary = PerformanceAlertSummary.objects.filter(pk=old_alert_summary_id).first()
@@ -425,7 +425,7 @@ def test_nudge_right_alert_from_alert_summary_with_more_alerts(
test_perf_alert_2.save()
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/', {'push_id': 3, 'prev_push_id': 2}
+ reverse("performance-alerts-list") + "1/", {"push_id": 3, "prev_push_id": 2}
)
assert resp.status_code == 200
@@ -436,8 +436,8 @@ def test_nudge_right_alert_from_alert_summary_with_more_alerts(
test_perf_alert_summary_2.refresh_from_db()
assert test_perf_alert.summary.id != old_alert_summary_id
- assert 'alert_summary_id' in resp.json()
- assert resp.json()['alert_summary_id'] == test_perf_alert.summary.id
+ assert "alert_summary_id" in resp.json()
+ assert resp.json()["alert_summary_id"] == test_perf_alert.summary.id
# old alert summary still there
assert PerformanceAlertSummary.objects.filter(pk=old_alert_summary_id).count() == 1
@@ -458,7 +458,7 @@ def test_nudge_raises_exception_when_no_perf_data(
initial_alert_count = PerformanceAlert.objects.all().count()
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/', {'push_id': 3, 'prev_push_id': 2}
+ reverse("performance-alerts-list") + "1/", {"push_id": 3, "prev_push_id": 2}
)
assert resp.status_code == 400
@@ -471,7 +471,7 @@ def test_nudge_recalculates_alert_properties(
authorized_sheriff_client, test_perf_alert, test_perf_alert_summary, test_perf_data
):
def _get_alert_properties(perf_alert):
- prop_names = ['amount_pct', 'amount_abs', 'prev_value', 'new_value', 't_value']
+ prop_names = ["amount_pct", "amount_abs", "prev_value", "new_value", "t_value"]
return [getattr(perf_alert, prop_name) for prop_name in prop_names]
# let's update the performance data
@@ -481,7 +481,7 @@ def _get_alert_properties(perf_alert):
perf_datum.save()
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/', {'push_id': 3, 'prev_push_id': 2}
+ reverse("performance-alerts-list") + "1/", {"push_id": 3, "prev_push_id": 2}
)
assert resp.status_code == 200
test_perf_alert.refresh_from_db()
@@ -531,11 +531,11 @@ def test_timestamps_on_manual_created_alert_via_their_endpoints(
# created <= last_updated, created <= first_triaged
# BUT manually_created is True
resp = authorized_sheriff_client.post(
- reverse('performance-alerts-list'), alert_create_post_blob
+ reverse("performance-alerts-list"), alert_create_post_blob
)
assert resp.status_code == 200
- manual_alert_id = resp.json()['alert_id']
+ manual_alert_id = resp.json()["alert_id"]
manual_alert = PerformanceAlert.objects.get(pk=manual_alert_id)
assert manual_alert.manually_created is True
assert manual_alert.summary.first_triaged is not None
@@ -560,7 +560,7 @@ def test_alert_timestamps_via_endpoint(
old_last_updated = test_perf_alert.last_updated
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/', {'starred': True}
+ reverse("performance-alerts-list") + "1/", {"starred": True}
)
assert resp.status_code == 200
test_perf_alert.refresh_from_db()
@@ -577,7 +577,7 @@ def test_alert_timestamps_via_endpoint(
# keeps first_triaged the same
authorized_sheriff_client.force_authenticate(user=test_sheriff)
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/', {'status': PerformanceAlert.ACKNOWLEDGED}
+ reverse("performance-alerts-list") + "1/", {"status": PerformanceAlert.ACKNOWLEDGED}
)
assert resp.status_code == 200
test_perf_alert.refresh_from_db()
@@ -586,7 +586,7 @@ def test_alert_timestamps_via_endpoint(
assert test_perf_alert.last_updated > old_last_updated
-@pytest.mark.parametrize('relation', [PerformanceAlert.DOWNSTREAM, PerformanceAlert.REASSIGNED])
+@pytest.mark.parametrize("relation", [PerformanceAlert.DOWNSTREAM, PerformanceAlert.REASSIGNED])
def test_related_alerts_timestamps_via_endpoint(
authorized_sheriff_client,
test_sheriff,
@@ -609,8 +609,8 @@ def test_related_alerts_timestamps_via_endpoint(
old_summary_last_updated_2 = test_perf_alert_summary_2.last_updated
resp = authorized_sheriff_client.put(
- reverse('performance-alerts-list') + '1/',
- {'status': relation, 'related_summary_id': test_perf_alert_summary_2.id},
+ reverse("performance-alerts-list") + "1/",
+ {"status": relation, "related_summary_id": test_perf_alert_summary_2.id},
)
assert resp.status_code == 200
test_perf_alert.refresh_from_db()
@@ -673,4 +673,4 @@ def dump(an_alert):
for alert in alerts:
dump(alert)
for perf_datum in perf_data:
- pprint('PerfData(id={0.push_id}, push_timestamp={0.push_timestamp})'.format(perf_datum))
+ pprint("PerfData(id={0.push_id}, push_timestamp={0.push_timestamp})".format(perf_datum))
diff --git a/tests/webapp/api/test_performance_alertsummary_api.py b/tests/webapp/api/test_performance_alertsummary_api.py
index 5fd0238a1e6..32b31b314e3 100644
--- a/tests/webapp/api/test_performance_alertsummary_api.py
+++ b/tests/webapp/api/test_performance_alertsummary_api.py
@@ -31,8 +31,8 @@ def test_perf_alert_summary_onhold(test_repository_onhold, test_perf_framework):
for i in range(2):
Push.objects.create(
repository=test_repository_onhold,
- revision='1234abcd{}'.format(i),
- author='foo@bar.com',
+ revision="1234abcd{}".format(i),
+ author="foo@bar.com",
time=datetime.now(),
)
@@ -63,69 +63,69 @@ def test_alert_summaries_get(
test_taskcluster_metadata_2,
):
# verify that we get the performance summary + alert on GET
- resp = client.get(reverse('performance-alert-summaries-list'))
+ resp = client.get(reverse("performance-alert-summaries-list"))
assert resp.status_code == 200
# should just have the one alert summary (with one alert)
- assert resp.json()['next'] is None
- assert resp.json()['previous'] is None
- assert len(resp.json()['results']) == 1
- assert set(resp.json()['results'][0].keys()) == {
- 'alerts',
- 'bug_number',
- 'bug_updated',
- 'bug_due_date',
- 'issue_tracker',
- 'notes',
- 'assignee_username',
- 'assignee_email',
- 'framework',
- 'id',
- 'created',
- 'first_triaged',
- 'triage_due_date',
- 'prev_push_id',
- 'related_alerts',
- 'repository',
- 'push_id',
- 'status',
- 'revision',
- 'push_timestamp',
- 'prev_push_revision',
- 'performance_tags',
+ assert resp.json()["next"] is None
+ assert resp.json()["previous"] is None
+ assert len(resp.json()["results"]) == 1
+ assert set(resp.json()["results"][0].keys()) == {
+ "alerts",
+ "bug_number",
+ "bug_updated",
+ "bug_due_date",
+ "issue_tracker",
+ "notes",
+ "assignee_username",
+ "assignee_email",
+ "framework",
+ "id",
+ "created",
+ "first_triaged",
+ "triage_due_date",
+ "prev_push_id",
+ "related_alerts",
+ "repository",
+ "push_id",
+ "status",
+ "revision",
+ "push_timestamp",
+ "prev_push_revision",
+ "performance_tags",
}
- assert len(resp.json()['results'][0]['alerts']) == 1
- assert set(resp.json()['results'][0]['alerts'][0].keys()) == {
- 'id',
- 'status',
- 'series_signature',
- 'taskcluster_metadata',
- 'prev_taskcluster_metadata',
- 'profile_url',
- 'prev_profile_url',
- 'is_regression',
- 'starred',
- 'manually_created',
- 'prev_value',
- 'new_value',
- 't_value',
- 'amount_abs',
- 'amount_pct',
- 'summary_id',
- 'related_summary_id',
- 'classifier',
- 'classifier_email',
- 'backfill_record',
- 'noise_profile',
+ assert len(resp.json()["results"][0]["alerts"]) == 1
+ assert set(resp.json()["results"][0]["alerts"][0].keys()) == {
+ "id",
+ "status",
+ "series_signature",
+ "taskcluster_metadata",
+ "prev_taskcluster_metadata",
+ "profile_url",
+ "prev_profile_url",
+ "is_regression",
+ "starred",
+ "manually_created",
+ "prev_value",
+ "new_value",
+ "t_value",
+ "amount_abs",
+ "amount_pct",
+ "summary_id",
+ "related_summary_id",
+ "classifier",
+ "classifier_email",
+ "backfill_record",
+ "noise_profile",
}
- assert resp.json()['results'][0]['related_alerts'] == []
- assert set(resp.json()['results'][0]['alerts'][0]['taskcluster_metadata'].keys()) == {
- 'task_id',
- 'retry_id',
+ assert resp.json()["results"][0]["related_alerts"] == []
+ assert set(resp.json()["results"][0]["alerts"][0]["taskcluster_metadata"].keys()) == {
+ "task_id",
+ "retry_id",
}
- assert set(resp.json()['results'][0]['alerts'][0]['prev_taskcluster_metadata'].keys()) == {
- 'task_id',
- 'retry_id',
+ assert set(resp.json()["results"][0]["alerts"][0]["prev_taskcluster_metadata"].keys()) == {
+ "task_id",
+ "retry_id",
}
@@ -142,69 +142,69 @@ def test_alert_summaries_get_onhold(
test_repository_onhold,
):
# verify that we get the performance summary + alert on GET
- resp = client.get(reverse('performance-alert-summaries-list'))
+ resp = client.get(reverse("performance-alert-summaries-list"))
assert resp.status_code == 200
# should just have the one alert summary (with one alert)
- assert resp.json()['next'] is None
- assert resp.json()['previous'] is None
- assert len(resp.json()['results']) == 1
- assert set(resp.json()['results'][0].keys()) == {
- 'alerts',
- 'bug_number',
- 'bug_updated',
- 'bug_due_date',
- 'issue_tracker',
- 'notes',
- 'assignee_username',
- 'assignee_email',
- 'framework',
- 'id',
- 'created',
- 'first_triaged',
- 'triage_due_date',
- 'prev_push_id',
- 'related_alerts',
- 'repository',
- 'push_id',
- 'status',
- 'revision',
- 'push_timestamp',
- 'prev_push_revision',
- 'performance_tags',
+ assert resp.json()["next"] is None
+ assert resp.json()["previous"] is None
+ assert len(resp.json()["results"]) == 1
+ assert set(resp.json()["results"][0].keys()) == {
+ "alerts",
+ "bug_number",
+ "bug_updated",
+ "bug_due_date",
+ "issue_tracker",
+ "notes",
+ "assignee_username",
+ "assignee_email",
+ "framework",
+ "id",
+ "created",
+ "first_triaged",
+ "triage_due_date",
+ "prev_push_id",
+ "related_alerts",
+ "repository",
+ "push_id",
+ "status",
+ "revision",
+ "push_timestamp",
+ "prev_push_revision",
+ "performance_tags",
}
- assert len(resp.json()['results'][0]['alerts']) == 1
- assert set(resp.json()['results'][0]['alerts'][0].keys()) == {
- 'id',
- 'status',
- 'series_signature',
- 'taskcluster_metadata',
- 'prev_taskcluster_metadata',
- 'profile_url',
- 'prev_profile_url',
- 'is_regression',
- 'starred',
- 'manually_created',
- 'prev_value',
- 'new_value',
- 't_value',
- 'amount_abs',
- 'amount_pct',
- 'summary_id',
- 'related_summary_id',
- 'classifier',
- 'classifier_email',
- 'backfill_record',
- 'noise_profile',
+ assert len(resp.json()["results"][0]["alerts"]) == 1
+ assert set(resp.json()["results"][0]["alerts"][0].keys()) == {
+ "id",
+ "status",
+ "series_signature",
+ "taskcluster_metadata",
+ "prev_taskcluster_metadata",
+ "profile_url",
+ "prev_profile_url",
+ "is_regression",
+ "starred",
+ "manually_created",
+ "prev_value",
+ "new_value",
+ "t_value",
+ "amount_abs",
+ "amount_pct",
+ "summary_id",
+ "related_summary_id",
+ "classifier",
+ "classifier_email",
+ "backfill_record",
+ "noise_profile",
}
- assert resp.json()['results'][0]['related_alerts'] == []
- assert set(resp.json()['results'][0]['alerts'][0]['taskcluster_metadata'].keys()) == {
- 'task_id',
- 'retry_id',
+ assert resp.json()["results"][0]["related_alerts"] == []
+ assert set(resp.json()["results"][0]["alerts"][0]["taskcluster_metadata"].keys()) == {
+ "task_id",
+ "retry_id",
}
- assert set(resp.json()['results'][0]['alerts'][0]['prev_taskcluster_metadata'].keys()) == {
- 'task_id',
- 'retry_id',
+ assert set(resp.json()["results"][0]["alerts"][0]["prev_taskcluster_metadata"].keys()) == {
+ "task_id",
+ "retry_id",
}
@@ -212,27 +212,27 @@ def test_alert_summaries_put(
client, test_repository, test_perf_signature, test_perf_alert_summary, test_user, test_sheriff
):
# verify that we fail if not authenticated
- resp = client.put(reverse('performance-alert-summaries-list') + '1/', {'status': 1})
+ resp = client.put(reverse("performance-alert-summaries-list") + "1/", {"status": 1})
assert resp.status_code == 403
assert PerformanceAlertSummary.objects.get(id=1).status == 0
# verify that we fail if authenticated, but not staff
client.force_authenticate(user=test_user)
- resp = client.put(reverse('performance-alert-summaries-list') + '1/', {'status': 1})
+ resp = client.put(reverse("performance-alert-summaries-list") + "1/", {"status": 1})
assert resp.status_code == 403
assert PerformanceAlertSummary.objects.get(id=1).status == 0
# verify that we succeed if authenticated + staff
client.force_authenticate(user=test_sheriff)
- resp = client.put(reverse('performance-alert-summaries-list') + '1/', {'status': 1})
+ resp = client.put(reverse("performance-alert-summaries-list") + "1/", {"status": 1})
assert resp.status_code == 200
assert PerformanceAlertSummary.objects.get(id=1).status == 1
# verify we can set assignee
client.force_authenticate(user=test_sheriff)
resp = client.put(
- reverse('performance-alert-summaries-list') + '1/',
- {'assignee_username': test_user.username},
+ reverse("performance-alert-summaries-list") + "1/",
+ {"assignee_username": test_user.username},
)
assert resp.status_code == 200
assert PerformanceAlertSummary.objects.get(id=1).assignee == test_user
@@ -248,20 +248,20 @@ def test_auth_for_alert_summary_post(
test_sheriff,
):
post_blob = {
- 'repository_id': test_repository.id,
- 'framework_id': test_perf_signature.framework.id,
- 'prev_push_id': 1,
- 'push_id': 2,
+ "repository_id": test_repository.id,
+ "framework_id": test_perf_signature.framework.id,
+ "prev_push_id": 1,
+ "push_id": 2,
}
# verify that we fail if not authenticated
- resp = client.post(reverse('performance-alert-summaries-list'), post_blob)
+ resp = client.post(reverse("performance-alert-summaries-list"), post_blob)
assert resp.status_code == 403
assert PerformanceAlertSummary.objects.count() == 0
# verify that we fail if authenticated, but not staff
client.force_authenticate(user=test_user)
- resp = client.post(reverse('performance-alert-summaries-list'), post_blob)
+ resp = client.post(reverse("performance-alert-summaries-list"), post_blob)
assert resp.status_code == 403
assert PerformanceAlertSummary.objects.count() == 0
@@ -276,27 +276,27 @@ def test_alert_summary_post(
test_sheriff,
):
post_blob = {
- 'repository_id': test_repository.id,
- 'framework_id': test_perf_signature.framework.id,
- 'prev_push_id': 1,
- 'push_id': 2,
+ "repository_id": test_repository.id,
+ "framework_id": test_perf_signature.framework.id,
+ "prev_push_id": 1,
+ "push_id": 2,
}
# verify that we succeed if authenticated + staff
- resp = authorized_sheriff_client.post(reverse('performance-alert-summaries-list'), post_blob)
+ resp = authorized_sheriff_client.post(reverse("performance-alert-summaries-list"), post_blob)
assert resp.status_code == 200
assert PerformanceAlertSummary.objects.count() == 1
alert_summary = PerformanceAlertSummary.objects.first()
assert alert_summary.repository == test_repository
assert alert_summary.framework == test_perf_signature.framework
- assert alert_summary.prev_push_id == post_blob['prev_push_id']
- assert alert_summary.push_id == post_blob['push_id']
- assert resp.data['alert_summary_id'] == alert_summary.id
+ assert alert_summary.prev_push_id == post_blob["prev_push_id"]
+ assert alert_summary.push_id == post_blob["push_id"]
+ assert resp.data["alert_summary_id"] == alert_summary.id
# verify that we don't create a new performance alert summary if one
# already exists (but also don't throw an error)
- resp = authorized_sheriff_client.post(reverse('performance-alert-summaries-list'), post_blob)
+ resp = authorized_sheriff_client.post(reverse("performance-alert-summaries-list"), post_blob)
assert resp.status_code == 200
assert PerformanceAlertSummary.objects.count() == 1
@@ -312,21 +312,21 @@ def test_push_range_validation_for_alert_summary_post(
):
identical_push = 1
post_blob = {
- 'repository_id': test_repository.id,
- 'framework_id': test_perf_signature.framework.id,
- 'prev_push_id': identical_push,
- 'push_id': identical_push,
+ "repository_id": test_repository.id,
+ "framework_id": test_perf_signature.framework.id,
+ "prev_push_id": identical_push,
+ "push_id": identical_push,
}
# verify that we succeed if authenticated + staff
- resp = authorized_sheriff_client.post(reverse('performance-alert-summaries-list'), post_blob)
+ resp = authorized_sheriff_client.post(reverse("performance-alert-summaries-list"), post_blob)
assert resp.status_code == 400
assert PerformanceAlertSummary.objects.count() == 0
@pytest.mark.parametrize(
- 'modification', [{'notes': 'human created notes'}, {'bug_number': 123456, 'issue_tracker': 1}]
+ "modification", [{"notes": "human created notes"}, {"bug_number": 123456, "issue_tracker": 1}]
)
def test_alert_summary_timestamps_via_endpoints(
authorized_sheriff_client, test_perf_alert_summary, modification
@@ -335,7 +335,7 @@ def test_alert_summary_timestamps_via_endpoints(
# when editing notes & linking bugs
resp = authorized_sheriff_client.put(
- reverse('performance-alert-summaries-list') + '1/', modification
+ reverse("performance-alert-summaries-list") + "1/", modification
)
assert resp.status_code == 200
test_perf_alert_summary.refresh_from_db()
@@ -354,7 +354,7 @@ def test_bug_number_and_timestamp_on_setting_value(
# link a bug
resp = authorized_sheriff_client.put(
- reverse('performance-alert-summaries-list') + '1/', {'bug_number': 123456}
+ reverse("performance-alert-summaries-list") + "1/", {"bug_number": 123456}
)
assert resp.status_code == 200
test_perf_alert_summary.refresh_from_db()
@@ -374,7 +374,7 @@ def test_bug_number_and_timestamp_on_overriding(
# update the existing bug number
resp = authorized_sheriff_client.put(
- reverse('performance-alert-summaries-list') + '1/', {'bug_number': 987654}
+ reverse("performance-alert-summaries-list") + "1/", {"bug_number": 987654}
)
assert resp.status_code == 200
@@ -393,7 +393,7 @@ def test_bug_number_and_timestamp_dont_update_from_other_modifications(
# link a bug
resp = authorized_sheriff_client.put(
- reverse('performance-alert-summaries-list') + '1/', {'notes': 'human created notes'}
+ reverse("performance-alert-summaries-list") + "1/", {"notes": "human created notes"}
)
assert resp.status_code == 200
test_perf_alert_summary.refresh_from_db()
@@ -409,8 +409,8 @@ def test_add_multiple_tags_to_alert_summary(
assert test_perf_alert_summary.performance_tags.count() == 1
resp = authorized_sheriff_client.put(
- reverse('performance-alert-summaries-list') + '1/',
- {'performance_tags': [test_perf_tag.name, test_perf_tag_2.name]},
+ reverse("performance-alert-summaries-list") + "1/",
+ {"performance_tags": [test_perf_tag.name, test_perf_tag_2.name]},
)
assert resp.status_code == 200
test_perf_alert_summary.refresh_from_db()
@@ -422,7 +422,7 @@ def test_remove_a_tag_from_a_summary(authorized_sheriff_client, test_perf_alert_
assert test_perf_alert_summary.performance_tags.count() == 1
resp = authorized_sheriff_client.put(
- reverse('performance-alert-summaries-list') + '1/', {'performance_tags': []}
+ reverse("performance-alert-summaries-list") + "1/", {"performance_tags": []}
)
assert resp.status_code == 200
test_perf_alert_summary.refresh_from_db()
@@ -436,8 +436,8 @@ def test_cannot_add_unregistered_tag_to_a_summary(
assert test_perf_alert_summary.performance_tags.count() == 1
resp = authorized_sheriff_client.put(
- reverse('performance-alert-summaries-list') + '1/',
- {'performance_tags': ['unregistered-tag']},
+ reverse("performance-alert-summaries-list") + "1/",
+ {"performance_tags": ["unregistered-tag"]},
)
assert resp.status_code == 400
test_perf_alert_summary.refresh_from_db()
@@ -460,17 +460,17 @@ def test_timerange_with_summary_outside_range(
test_perf_alert_summary_2.push.save()
resp = client.get(
- reverse('performance-alert-summaries-list'),
+ reverse("performance-alert-summaries-list"),
data={
- 'framework': 1,
- 'timerange': timerange_to_test,
+ "framework": 1,
+ "timerange": timerange_to_test,
},
)
assert resp.status_code == 200
- retrieved_summaries = resp.json()['results']
- summary_ids = [summary['id'] for summary in retrieved_summaries]
+ retrieved_summaries = resp.json()["results"]
+ summary_ids = [summary["id"] for summary in retrieved_summaries]
assert test_perf_alert_summary_2.id in summary_ids
assert len(summary_ids) == 1
@@ -491,16 +491,16 @@ def test_timerange_with_all_summaries_in_range(
test_perf_alert_summary_2.push.save()
resp = client.get(
- reverse('performance-alert-summaries-list'),
+ reverse("performance-alert-summaries-list"),
data={
- 'framework': 1,
- 'timerange': timerange_to_test,
+ "framework": 1,
+ "timerange": timerange_to_test,
},
)
assert resp.status_code == 200
- retrieved_summaries = resp.json()['results']
- summary_ids = [summary['id'] for summary in retrieved_summaries]
+ retrieved_summaries = resp.json()["results"]
+ summary_ids = [summary["id"] for summary in retrieved_summaries]
assert test_perf_alert_summary.id in summary_ids
assert test_perf_alert_summary_2.id in summary_ids
@@ -511,16 +511,16 @@ def test_pagesize_is_limited_from_params(
client, test_perf_alert_summary, test_perf_alert_summary_2
):
resp = client.get(
- reverse('performance-alert-summaries-list'),
+ reverse("performance-alert-summaries-list"),
data={
- 'framework': 1,
- 'limit': 1,
+ "framework": 1,
+ "limit": 1,
},
)
assert resp.status_code == 200
- retrieved_summaries = resp.json()['results']
- summary_ids = [summary['id'] for summary in retrieved_summaries]
+ retrieved_summaries = resp.json()["results"]
+ summary_ids = [summary["id"] for summary in retrieved_summaries]
assert test_perf_alert_summary_2.id in summary_ids
assert len(summary_ids) == 1
@@ -530,18 +530,18 @@ def test_pagesize_with_limit_higher_than_total_summaries(
client, test_perf_alert_summary, test_perf_alert_summary_2
):
resp = client.get(
- reverse('performance-alert-summaries-list'),
+ reverse("performance-alert-summaries-list"),
data={
- 'framework': 1,
- 'limit': 5,
+ "framework": 1,
+ "limit": 5,
},
)
assert resp.status_code == 200
resp_json = resp.json()
- assert resp_json['next'] is None
- assert resp_json['previous'] is None
- retrieved_summaries = resp_json['results']
- summary_ids = [summary['id'] for summary in retrieved_summaries]
+ assert resp_json["next"] is None
+ assert resp_json["previous"] is None
+ retrieved_summaries = resp_json["results"]
+ summary_ids = [summary["id"] for summary in retrieved_summaries]
assert test_perf_alert_summary.id in summary_ids
assert test_perf_alert_summary_2.id in summary_ids
@@ -559,8 +559,8 @@ def related_alert(test_perf_alert_summary, test_perf_alert_summary_2, test_perf_
@pytest.mark.parametrize(
- 'text_to_filter',
- ['mysuite2', 'mysuite2 mytest2', 'mytest2 win7', 'mysuite2 mytest2 win7 e10s opt'],
+ "text_to_filter",
+ ["mysuite2", "mysuite2 mytest2", "mytest2 win7", "mysuite2 mytest2 win7 e10s opt"],
)
def test_filter_text_accounts_for_related_alerts_also(
text_to_filter, client, test_perf_alert_summary, test_perf_alert, related_alert
@@ -568,17 +568,17 @@ def test_filter_text_accounts_for_related_alerts_also(
summary_id = test_perf_alert_summary.id
resp = client.get(
- reverse('performance-alert-summaries-list'),
+ reverse("performance-alert-summaries-list"),
data={
- 'framework': 1,
- 'page': 1,
- 'filter_text': text_to_filter,
+ "framework": 1,
+ "page": 1,
+ "filter_text": text_to_filter,
}, # excluded 'status' field to emulate 'all statuses'
)
assert resp.status_code == 200
- retrieved_summaries = resp.json()['results']
- summary_ids = [summary['id'] for summary in retrieved_summaries]
+ retrieved_summaries = resp.json()["results"]
+ summary_ids = [summary["id"] for summary in retrieved_summaries]
assert summary_id in summary_ids
# also ensure original & related summary are both fetched
diff --git a/tests/webapp/api/test_performance_bug_template_api.py b/tests/webapp/api/test_performance_bug_template_api.py
index e6ee7f4cc89..089853dfbc3 100644
--- a/tests/webapp/api/test_performance_bug_template_api.py
+++ b/tests/webapp/api/test_performance_bug_template_api.py
@@ -4,30 +4,30 @@
def test_perf_bug_template_api(client, test_perf_framework):
- framework2 = PerformanceFramework.objects.create(name='test_talos2', enabled=True)
+ framework2 = PerformanceFramework.objects.create(name="test_talos2", enabled=True)
template_dicts = []
for framework, i in zip((test_perf_framework, framework2), range(2)):
dict = {
- 'keywords': "keyword{}".format(i),
- 'status_whiteboard': "sw{}".format(i),
- 'default_component': "dfcom{}".format(i),
- 'default_product': "dfprod{}".format(i),
- 'cc_list': "foo{}@bar.com".format(i),
- 'text': "my great text {}".format(i),
+ "keywords": "keyword{}".format(i),
+ "status_whiteboard": "sw{}".format(i),
+ "default_component": "dfcom{}".format(i),
+ "default_product": "dfprod{}".format(i),
+ "cc_list": "foo{}@bar.com".format(i),
+ "text": "my great text {}".format(i),
}
PerformanceBugTemplate.objects.create(framework=framework, **dict)
- dict['framework'] = framework.id
+ dict["framework"] = framework.id
template_dicts.append(dict)
# test that we can get them all
- resp = client.get(reverse('performance-bug-template-list'))
+ resp = client.get(reverse("performance-bug-template-list"))
assert resp.status_code == 200
assert resp.json() == template_dicts
# test that we can get just one (the usual case, probably)
resp = client.get(
- reverse('performance-bug-template-list') + '?framework={}'.format(test_perf_framework.id)
+ reverse("performance-bug-template-list") + "?framework={}".format(test_perf_framework.id)
)
assert resp.status_code == 200
assert resp.json() == [template_dicts[0]]
diff --git a/tests/webapp/api/test_performance_data_api.py b/tests/webapp/api/test_performance_data_api.py
index 74746d3d440..126a04d7698 100644
--- a/tests/webapp/api/test_performance_data_api.py
+++ b/tests/webapp/api/test_performance_data_api.py
@@ -25,13 +25,13 @@ def summary_perf_signature(test_perf_signature):
# summary performance signature don't have test value
signature = PerformanceSignature.objects.create(
repository=test_perf_signature.repository,
- signature_hash=(40 * 's'),
+ signature_hash=(40 * "s"),
framework=test_perf_signature.framework,
platform=test_perf_signature.platform,
option_collection=test_perf_signature.option_collection,
- suite='mysuite',
- test='',
- extra_options='e10s shell',
+ suite="mysuite",
+ test="",
+ extra_options="e10s shell",
has_subtests=True,
last_updated=datetime.datetime.now(),
)
@@ -44,7 +44,7 @@ def summary_perf_signature(test_perf_signature):
def test_perf_signature_same_hash_different_framework(test_perf_signature):
# a new signature, same as the test_perf_signature in every
# way, except it belongs to a different "framework"
- new_framework = PerformanceFramework.objects.create(name='test_talos_2', enabled=True)
+ new_framework = PerformanceFramework.objects.create(name="test_talos_2", enabled=True)
new_signature = PerformanceSignature.objects.create(
repository=test_perf_signature.repository,
signature_hash=test_perf_signature.signature_hash,
@@ -61,23 +61,23 @@ def test_perf_signature_same_hash_different_framework(test_perf_signature):
def test_no_summary_performance_data(client, test_perf_signature, test_repository):
resp = client.get(
- reverse('performance-signatures-list', kwargs={"project": test_repository.name})
+ reverse("performance-signatures-list", kwargs={"project": test_repository.name})
)
assert resp.status_code == 200
assert resp.json() == {
str(test_perf_signature.id): {
- 'id': test_perf_signature.id,
- 'signature_hash': test_perf_signature.signature_hash,
- 'test': test_perf_signature.test,
- 'application': test_perf_signature.application,
- 'suite': test_perf_signature.suite,
- 'tags': test_perf_signature.tags.split(' '),
- 'option_collection_hash': test_perf_signature.option_collection.option_collection_hash,
- 'framework_id': test_perf_signature.framework.id,
- 'machine_platform': test_perf_signature.platform.platform,
- 'extra_options': test_perf_signature.extra_options.split(' '),
- 'measurement_unit': test_perf_signature.measurement_unit,
- 'should_alert': test_perf_signature.should_alert,
+ "id": test_perf_signature.id,
+ "signature_hash": test_perf_signature.signature_hash,
+ "test": test_perf_signature.test,
+ "application": test_perf_signature.application,
+ "suite": test_perf_signature.suite,
+ "tags": test_perf_signature.tags.split(" "),
+ "option_collection_hash": test_perf_signature.option_collection.option_collection_hash,
+ "framework_id": test_perf_signature.framework.id,
+ "machine_platform": test_perf_signature.platform.platform,
+ "extra_options": test_perf_signature.extra_options.split(" "),
+ "measurement_unit": test_perf_signature.measurement_unit,
+ "should_alert": test_perf_signature.should_alert,
}
}
@@ -85,12 +85,12 @@ def test_no_summary_performance_data(client, test_perf_signature, test_repositor
def test_performance_platforms(client, test_perf_signature):
resp = client.get(
reverse(
- 'performance-signatures-platforms-list',
+ "performance-signatures-platforms-list",
kwargs={"project": test_perf_signature.repository.name},
)
)
assert resp.status_code == 200
- assert resp.json() == ['win7']
+ assert resp.json() == ["win7"]
def test_performance_platforms_expired_test(client, test_perf_signature):
@@ -99,10 +99,10 @@ def test_performance_platforms_expired_test(client, test_perf_signature):
test_perf_signature.save()
resp = client.get(
reverse(
- 'performance-signatures-platforms-list',
+ "performance-signatures-platforms-list",
kwargs={"project": test_perf_signature.repository.name},
)
- + '?interval={}'.format(86400)
+ + "?interval={}".format(86400)
)
assert resp.status_code == 200
assert resp.json() == []
@@ -110,8 +110,8 @@ def test_performance_platforms_expired_test(client, test_perf_signature):
def test_performance_platforms_framework_filtering(client, test_perf_signature):
# check framework filtering
- framework2 = PerformanceFramework.objects.create(name='test_talos2', enabled=True)
- platform2 = MachinePlatform.objects.create(os_name='win', platform='win7-a', architecture='x86')
+ framework2 = PerformanceFramework.objects.create(name="test_talos2", enabled=True)
+ platform2 = MachinePlatform.objects.create(os_name="win", platform="win7-a", architecture="x86")
PerformanceSignature.objects.create(
repository=test_perf_signature.repository,
signature_hash=test_perf_signature.signature_hash,
@@ -127,23 +127,23 @@ def test_performance_platforms_framework_filtering(client, test_perf_signature):
# by default should return both
resp = client.get(
reverse(
- 'performance-signatures-platforms-list',
+ "performance-signatures-platforms-list",
kwargs={"project": test_perf_signature.repository.name},
)
)
assert resp.status_code == 200
- assert sorted(resp.json()) == ['win7', 'win7-a']
+ assert sorted(resp.json()) == ["win7", "win7-a"]
# if we specify just one framework, should only return one
resp = client.get(
reverse(
- 'performance-signatures-platforms-list',
+ "performance-signatures-platforms-list",
kwargs={"project": test_perf_signature.repository.name},
)
- + '?framework={}'.format(framework2.id)
+ + "?framework={}".format(framework2.id)
)
assert resp.status_code == 200
- assert resp.json() == ['win7-a']
+ assert resp.json() == ["win7-a"]
def test_summary_performance_data(
@@ -151,12 +151,12 @@ def test_summary_performance_data(
):
summary_signature_id = summary_perf_signature.id
resp = client.get(
- reverse('performance-signatures-list', kwargs={"project": test_repository.name})
+ reverse("performance-signatures-list", kwargs={"project": test_repository.name})
)
assert resp.status_code == 200
resp = client.get(
- reverse('performance-signatures-list', kwargs={"project": test_repository.name})
+ reverse("performance-signatures-list", kwargs={"project": test_repository.name})
)
assert resp.status_code == 200
@@ -165,30 +165,30 @@ def test_summary_performance_data(
for signature in [summary_perf_signature, test_perf_signature]:
expected = {
- 'id': signature.id,
- 'signature_hash': signature.signature_hash,
- 'suite': signature.suite,
- 'option_collection_hash': signature.option_collection.option_collection_hash,
- 'framework_id': signature.framework_id,
- 'machine_platform': signature.platform.platform,
- 'should_alert': signature.should_alert,
+ "id": signature.id,
+ "signature_hash": signature.signature_hash,
+ "suite": signature.suite,
+ "option_collection_hash": signature.option_collection.option_collection_hash,
+ "framework_id": signature.framework_id,
+ "machine_platform": signature.platform.platform,
+ "should_alert": signature.should_alert,
}
if signature.test:
- expected['test'] = signature.test
+ expected["test"] = signature.test
if signature.has_subtests:
- expected['has_subtests'] = True
+ expected["has_subtests"] = True
if signature.tags:
# tags stored as charField but api returns as list
- expected['tags'] = signature.tags.split(' ')
+ expected["tags"] = signature.tags.split(" ")
if signature.parent_signature:
- expected['parent_signature'] = signature.parent_signature.signature_hash
+ expected["parent_signature"] = signature.parent_signature.signature_hash
if signature.extra_options:
# extra_options stored as charField but api returns as list
- expected['extra_options'] = signature.extra_options.split(' ')
+ expected["extra_options"] = signature.extra_options.split(" ")
if signature.measurement_unit:
- expected['measurement_unit'] = signature.measurement_unit
+ expected["measurement_unit"] = signature.measurement_unit
if signature.application:
- expected['application'] = signature.application
+ expected["application"] = signature.application
assert resp.data[signature.id] == expected
@@ -199,21 +199,21 @@ def test_filter_signatures_by_framework(
# Filter by original framework
resp = client.get(
- reverse('performance-signatures-list', kwargs={"project": test_repository.name})
- + '?framework=%s' % test_perf_signature.framework.id,
+ reverse("performance-signatures-list", kwargs={"project": test_repository.name})
+ + "?framework=%s" % test_perf_signature.framework.id,
)
assert resp.status_code == 200
assert len(resp.data.keys()) == 1
- assert resp.data[test_perf_signature.id]['framework_id'] == test_perf_signature.framework.id
+ assert resp.data[test_perf_signature.id]["framework_id"] == test_perf_signature.framework.id
# Filter by new framework
resp = client.get(
- reverse('performance-signatures-list', kwargs={"project": test_repository.name})
- + '?framework=%s' % signature2.framework.id,
+ reverse("performance-signatures-list", kwargs={"project": test_repository.name})
+ + "?framework=%s" % signature2.framework.id,
)
assert resp.status_code == 200
assert len(resp.data.keys()) == 1
- assert resp.data[signature2.id]['framework_id'] == signature2.framework.id
+ assert resp.data[signature2.id]["framework_id"] == signature2.framework.id
def test_filter_data_by_no_retriggers(
@@ -258,17 +258,17 @@ def test_filter_data_by_no_retriggers(
)
resp = client.get(
- reverse('performance-data-list', kwargs={"project": test_repository.name})
- + '?signatures={}&no_retriggers=true'.format(test_perf_signature.signature_hash)
+ reverse("performance-data-list", kwargs={"project": test_repository.name})
+ + "?signatures={}&no_retriggers=true".format(test_perf_signature.signature_hash)
)
assert resp.status_code == 200
datums = resp.data[test_perf_signature.signature_hash]
assert len(datums) == 2
- assert set(datum['signature_id'] for datum in datums) == {
+ assert set(datum["signature_id"] for datum in datums) == {
test_perf_signature.id,
test_perf_signature_2.id,
}
- assert signature_for_retrigger_data.id not in set(datum['signature_id'] for datum in datums)
+ assert signature_for_retrigger_data.id not in set(datum["signature_id"] for datum in datums)
def test_filter_data_by_framework(
@@ -292,56 +292,56 @@ def test_filter_data_by_framework(
# No filtering, return two datapoints (this behaviour actually sucks,
# but it's "by design" for now, see bug 1265709)
resp = client.get(
- reverse('performance-data-list', kwargs={"project": test_repository.name})
- + '?signatures='
+ reverse("performance-data-list", kwargs={"project": test_repository.name})
+ + "?signatures="
+ test_perf_signature.signature_hash
)
assert resp.status_code == 200
datums = resp.data[test_perf_signature.signature_hash]
assert len(datums) == 2
- assert set(datum['signature_id'] for datum in datums) == {1, 2}
+ assert set(datum["signature_id"] for datum in datums) == {1, 2}
# Filtering by first framework
resp = client.get(
- reverse('performance-data-list', kwargs={"project": test_repository.name})
- + '?signatures={}&framework={}'.format(
+ reverse("performance-data-list", kwargs={"project": test_repository.name})
+ + "?signatures={}&framework={}".format(
test_perf_signature.signature_hash, test_perf_signature.framework.id
)
)
assert resp.status_code == 200
datums = resp.data[test_perf_signature.signature_hash]
assert len(datums) == 1
- assert datums[0]['signature_id'] == 1
+ assert datums[0]["signature_id"] == 1
# Filtering by second framework
resp = client.get(
- reverse('performance-data-list', kwargs={"project": test_repository.name})
- + '?signatures={}&framework={}'.format(
+ reverse("performance-data-list", kwargs={"project": test_repository.name})
+ + "?signatures={}&framework={}".format(
test_perf_signature.signature_hash, signature2.framework.id
)
)
assert resp.status_code == 200
datums = resp.data[test_perf_signature.signature_hash]
assert len(datums) == 1
- assert datums[0]['signature_id'] == 2
+ assert datums[0]["signature_id"] == 2
def test_filter_signatures_by_interval(client, test_perf_signature):
# interval for the last 24 hours, only one signature exists last updated within that timeframe
resp = client.get(
reverse(
- 'performance-signatures-list', kwargs={"project": test_perf_signature.repository.name}
+ "performance-signatures-list", kwargs={"project": test_perf_signature.repository.name}
)
- + '?interval={}'.format(86400)
+ + "?interval={}".format(86400)
)
assert resp.status_code == 200
assert len(resp.json().keys()) == 1
- assert resp.json()[str(test_perf_signature.id)]['id'] == 1
+ assert resp.json()[str(test_perf_signature.id)]["id"] == 1
@pytest.mark.parametrize(
- 'start_date, end_date, exp_count, exp_id',
- [(SEVEN_DAYS_AGO, ONE_DAY_AGO, 1, 1), (THREE_DAYS_AGO, '', 1, 1), (ONE_DAY_AGO, '', 0, 0)],
+ "start_date, end_date, exp_count, exp_id",
+ [(SEVEN_DAYS_AGO, ONE_DAY_AGO, 1, 1), (THREE_DAYS_AGO, "", 1, 1), (ONE_DAY_AGO, "", 0, 0)],
)
def test_filter_signatures_by_range(
client, test_perf_signature, start_date, end_date, exp_count, exp_id
@@ -352,17 +352,17 @@ def test_filter_signatures_by_range(
resp = client.get(
reverse(
- 'performance-signatures-list', kwargs={"project": test_perf_signature.repository.name}
+ "performance-signatures-list", kwargs={"project": test_perf_signature.repository.name}
)
- + '?start_date={}&end_date={}'.format(start_date, end_date)
+ + "?start_date={}&end_date={}".format(start_date, end_date)
)
assert resp.status_code == 200
assert len(resp.json().keys()) == exp_count
if exp_count != 0:
- assert resp.json()[str(test_perf_signature.id)]['id'] == exp_id
+ assert resp.json()[str(test_perf_signature.id)]["id"] == exp_id
-@pytest.mark.parametrize('interval, exp_push_ids', [(86400, {1}), (86400 * 3, {2, 1})])
+@pytest.mark.parametrize("interval, exp_push_ids", [(86400, {1}), (86400 * 3, {2, 1})])
def test_filter_data_by_interval(
client, test_repository, test_perf_signature, interval, exp_push_ids
):
@@ -372,8 +372,8 @@ def test_filter_data_by_interval(
):
push = Push.objects.create(
repository=test_repository,
- revision='abcdefgh%s' % i,
- author='foo@bar.com',
+ revision="abcdefgh%s" % i,
+ author="foo@bar.com",
time=timestamp,
)
PerformanceDatum.objects.create(
@@ -386,20 +386,20 @@ def test_filter_data_by_interval(
# going back interval of 1 day, should find 1 item
resp = client.get(
- reverse('performance-data-list', kwargs={"project": test_repository.name})
- + '?signature_id={}&interval={}'.format(test_perf_signature.id, interval)
+ reverse("performance-data-list", kwargs={"project": test_repository.name})
+ + "?signature_id={}&interval={}".format(test_perf_signature.id, interval)
)
assert resp.status_code == 200
perf_data = resp.data[test_perf_signature.signature_hash]
- push_ids = {datum['push_id'] for datum in perf_data}
+ push_ids = {datum["push_id"] for datum in perf_data}
assert push_ids == exp_push_ids
@pytest.mark.parametrize(
- 'start_date, end_date, exp_push_ids',
- [(SEVEN_DAYS_AGO, THREE_DAYS_AGO, {3}), (THREE_DAYS_AGO, '', {2, 1})],
+ "start_date, end_date, exp_push_ids",
+ [(SEVEN_DAYS_AGO, THREE_DAYS_AGO, {3}), (THREE_DAYS_AGO, "", {2, 1})],
)
def test_filter_data_by_range(
client, test_repository, test_perf_signature, start_date, end_date, exp_push_ids
@@ -410,8 +410,8 @@ def test_filter_data_by_range(
):
push = Push.objects.create(
repository=test_repository,
- revision='abcdefgh%s' % i,
- author='foo@bar.com',
+ revision="abcdefgh%s" % i,
+ author="foo@bar.com",
time=timestamp,
)
PerformanceDatum.objects.create(
@@ -423,8 +423,8 @@ def test_filter_data_by_range(
)
resp = client.get(
- reverse('performance-data-list', kwargs={"project": test_repository.name})
- + '?signature_id={}&start_date={}&end_date={}'.format(
+ reverse("performance-data-list", kwargs={"project": test_repository.name})
+ + "?signature_id={}&start_date={}&end_date={}".format(
test_perf_signature.id, start_date, end_date
)
)
@@ -432,18 +432,18 @@ def test_filter_data_by_range(
assert resp.status_code == 200
perf_data = resp.data[test_perf_signature.signature_hash]
- push_ids = {datum['push_id'] for datum in perf_data}
+ push_ids = {datum["push_id"] for datum in perf_data}
assert push_ids == exp_push_ids
def test_job_ids_validity(client, test_repository):
resp = client.get(
- reverse('performance-data-list', kwargs={"project": test_repository.name}) + '?job_id=1'
+ reverse("performance-data-list", kwargs={"project": test_repository.name}) + "?job_id=1"
)
assert resp.status_code == 200
resp = client.get(
- reverse('performance-data-list', kwargs={"project": test_repository.name}) + '?job_id=foo'
+ reverse("performance-data-list", kwargs={"project": test_repository.name}) + "?job_id=foo"
)
assert resp.status_code == 400
@@ -452,7 +452,7 @@ def test_filter_data_by_signature(
client, test_repository, test_perf_signature, summary_perf_signature
):
push = Push.objects.create(
- repository=test_repository, revision='abcdefghi', author='foo@bar.com', time=NOW
+ repository=test_repository, revision="abcdefghi", author="foo@bar.com", time=NOW
)
for i, signature in enumerate([test_perf_signature, summary_perf_signature]):
PerformanceDatum.objects.create(
@@ -467,63 +467,63 @@ def test_filter_data_by_signature(
# passing in signature_id and signature hash
for i, signature in enumerate([test_perf_signature, summary_perf_signature]):
for param, value in [
- ('signatures', signature.signature_hash),
- ('signature_id', signature.id),
+ ("signatures", signature.signature_hash),
+ ("signature_id", signature.id),
]:
resp = client.get(
- reverse('performance-data-list', kwargs={"project": test_repository.name})
- + '?{}={}'.format(param, value)
+ reverse("performance-data-list", kwargs={"project": test_repository.name})
+ + "?{}={}".format(param, value)
)
assert resp.status_code == 200
assert len(resp.data.keys()) == 1
assert len(resp.data[signature.signature_hash]) == 1
- assert resp.data[signature.signature_hash][0]['signature_id'] == signature.id
- assert resp.data[signature.signature_hash][0]['value'] == float(i)
+ assert resp.data[signature.signature_hash][0]["signature_id"] == signature.id
+ assert resp.data[signature.signature_hash][0]["value"] == float(i)
def test_perf_summary(client, test_perf_signature, test_perf_data):
query_params1 = (
- '?repository={}&framework={}&interval=172800&no_subtests=true&revision={}'.format(
+ "?repository={}&framework={}&interval=172800&no_subtests=true&revision={}".format(
test_perf_signature.repository.name,
test_perf_signature.framework_id,
test_perf_data[0].push.revision,
)
)
- query_params2 = '?repository={}&framework={}&interval=172800&no_subtests=true&startday=2013-11-01T23%3A28%3A29&endday=2013-11-30T23%3A28%3A29'.format(
+ query_params2 = "?repository={}&framework={}&interval=172800&no_subtests=true&startday=2013-11-01T23%3A28%3A29&endday=2013-11-30T23%3A28%3A29".format(
test_perf_signature.repository.name, test_perf_signature.framework_id
)
expected = [
{
- 'signature_id': test_perf_signature.id,
- 'framework_id': test_perf_signature.framework_id,
- 'signature_hash': test_perf_signature.signature_hash,
- 'platform': test_perf_signature.platform.platform,
- 'test': test_perf_signature.test,
- 'application': test_perf_signature.application,
- 'lower_is_better': test_perf_signature.lower_is_better,
- 'has_subtests': test_perf_signature.has_subtests,
- 'tags': test_perf_signature.tags,
- 'measurement_unit': test_perf_signature.measurement_unit,
- 'values': [test_perf_data[0].value],
- 'name': 'mysuite mytest opt e10s opt',
- 'parent_signature': None,
- 'job_ids': [test_perf_data[0].job_id],
- 'suite': test_perf_signature.suite,
- 'repository_name': test_perf_signature.repository.name,
- 'repository_id': test_perf_signature.repository.id,
- 'data': [],
+ "signature_id": test_perf_signature.id,
+ "framework_id": test_perf_signature.framework_id,
+ "signature_hash": test_perf_signature.signature_hash,
+ "platform": test_perf_signature.platform.platform,
+ "test": test_perf_signature.test,
+ "application": test_perf_signature.application,
+ "lower_is_better": test_perf_signature.lower_is_better,
+ "has_subtests": test_perf_signature.has_subtests,
+ "tags": test_perf_signature.tags,
+ "measurement_unit": test_perf_signature.measurement_unit,
+ "values": [test_perf_data[0].value],
+ "name": "mysuite mytest opt e10s opt",
+ "parent_signature": None,
+ "job_ids": [test_perf_data[0].job_id],
+ "suite": test_perf_signature.suite,
+ "repository_name": test_perf_signature.repository.name,
+ "repository_id": test_perf_signature.repository.id,
+ "data": [],
}
]
- resp1 = client.get(reverse('performance-summary') + query_params1)
+ resp1 = client.get(reverse("performance-summary") + query_params1)
assert resp1.status_code == 200
assert resp1.json() == expected
- expected[0]['values'] = [item.value for item in test_perf_data]
- expected[0]['job_ids'] = [item.job_id for item in test_perf_data]
- resp2 = client.get(reverse('performance-summary') + query_params2)
+ expected[0]["values"] = [item.value for item in test_perf_data]
+ expected[0]["job_ids"] = [item.job_id for item in test_perf_data]
+ resp2 = client.get(reverse("performance-summary") + query_params2)
assert resp2.status_code == 200
assert resp2.json() == expected
@@ -539,14 +539,14 @@ def test_data_points_from_same_push_are_ordered_chronologically(
As job ids are auto incremented, older jobs have smaller ids than newer ones.
Thus, these ids are sufficient to check for chronological order.
"""
- query_params = '?repository={}&framework={}&interval=172800&no_subtests=true&startday=2013-11-01T23%3A28%3A29&endday=2013-11-30T23%3A28%3A29'.format(
+ query_params = "?repository={}&framework={}&interval=172800&no_subtests=true&startday=2013-11-01T23%3A28%3A29&endday=2013-11-30T23%3A28%3A29".format(
test_perf_signature.repository.name, test_perf_signature.framework_id
)
- response = client.get(reverse('performance-summary') + query_params)
+ response = client.get(reverse("performance-summary") + query_params)
assert response.status_code == 200
- job_ids = response.json()[0]['job_ids']
+ job_ids = response.json()[0]["job_ids"]
assert job_ids == sorted(job_ids)
@@ -554,7 +554,7 @@ def test_no_retriggers_perf_summary(
client, push_stored, test_perf_signature, test_perf_signature_2, test_perf_data
):
push = Push.objects.get(id=1)
- query_params = '?repository={}&framework={}&no_subtests=true&revision={}&all_data=true&signature={}'.format(
+ query_params = "?repository={}&framework={}&no_subtests=true&revision={}&all_data=true&signature={}".format(
test_perf_signature.repository.name,
test_perf_signature.framework_id,
push.revision,
@@ -577,15 +577,15 @@ def test_no_retriggers_perf_summary(
push_timestamp=push.time,
)
- response = client.get(reverse('performance-summary') + query_params)
+ response = client.get(reverse("performance-summary") + query_params)
content = response.json()
assert response.status_code == 200
- assert len(content[0]['data']) == 2
+ assert len(content[0]["data"]) == 2
- response = client.get(reverse('performance-summary') + query_params + "&no_retriggers=true")
+ response = client.get(reverse("performance-summary") + query_params + "&no_retriggers=true")
content = response.json()
assert response.status_code == 200
- assert len(content[0]['data']) == 1
+ assert len(content[0]["data"]) == 1
def test_filter_out_retriggers():
@@ -662,12 +662,12 @@ def test_filter_out_retriggers():
filtered_data = PerformanceSummary._filter_out_retriggers(copy.deepcopy(input_data))
for perf_summary in filtered_data:
push_id_count = defaultdict(int)
- for idx, datum in enumerate(perf_summary['data']):
- push_id_count[datum['push_id']] += 1
+ for idx, datum in enumerate(perf_summary["data"]):
+ push_id_count[datum["push_id"]] += 1
for push_id in push_id_count:
assert push_id_count[push_id] == 1
- assert len(filtered_data[0]['data']) == 3
+ assert len(filtered_data[0]["data"]) == 3
no_retriggers_data = [
{
@@ -719,7 +719,7 @@ def test_alert_summary_tasks_get(client, test_perf_alert_summary, test_perf_data
status=PerformanceAlert.REASSIGNED,
)
resp = client.get(
- reverse('performance-alertsummary-tasks') + '?id={}'.format(test_perf_alert_summary.id)
+ reverse("performance-alertsummary-tasks") + "?id={}".format(test_perf_alert_summary.id)
)
assert resp.status_code == 200
assert resp.json() == {
@@ -738,12 +738,12 @@ def test_alert_summary_tasks_get_failure(client, test_perf_alert_summary):
not_exist_summary_id = test_perf_alert_summary.id
test_perf_alert_summary.delete()
resp = client.get(
- reverse('performance-alertsummary-tasks') + '?id={}'.format(not_exist_summary_id)
+ reverse("performance-alertsummary-tasks") + "?id={}".format(not_exist_summary_id)
)
assert resp.status_code == 400
assert resp.json() == {"message": ["PerformanceAlertSummary does not exist."]}
# verify that we fail if id does not exist as a query parameter
- resp = client.get(reverse('performance-alertsummary-tasks'))
+ resp = client.get(reverse("performance-alertsummary-tasks"))
assert resp.status_code == 400
assert resp.json() == {"id": ["This field is required."]}
diff --git a/tests/webapp/api/test_performance_tags.py b/tests/webapp/api/test_performance_tags.py
index c6c1504d3b0..2540f375770 100644
--- a/tests/webapp/api/test_performance_tags.py
+++ b/tests/webapp/api/test_performance_tags.py
@@ -2,13 +2,13 @@
def test_perf_tags_get(authorized_sheriff_client, test_perf_tag, test_perf_tag_2):
- resp = authorized_sheriff_client.get(reverse('performance-tags-list'))
+ resp = authorized_sheriff_client.get(reverse("performance-tags-list"))
assert resp.status_code == 200
assert len(resp.json()) == 2
- assert resp.json()[0]['id'] == test_perf_tag.id
- assert resp.json()[0]['name'] == test_perf_tag.name
+ assert resp.json()[0]["id"] == test_perf_tag.id
+ assert resp.json()[0]["name"] == test_perf_tag.name
- assert resp.json()[1]['id'] == test_perf_tag_2.id
- assert resp.json()[1]['name'] == test_perf_tag_2.name
+ assert resp.json()[1]["id"] == test_perf_tag_2.id
+ assert resp.json()[1]["name"] == test_perf_tag_2.name
diff --git a/tests/webapp/api/test_push_api.py b/tests/webapp/api/test_push_api.py
index e4448760932..ea2b41d04e2 100644
--- a/tests/webapp/api/test_push_api.py
+++ b/tests/webapp/api/test_push_api.py
@@ -16,8 +16,8 @@ def test_push_list_basic(client, eleven_jobs_stored, test_repository):
"""
resp = client.get(reverse("push-list", kwargs={"project": test_repository.name}))
data = resp.json()
- results = data['results']
- meta = data['meta']
+ results = data["results"]
+ meta = data["meta"]
assert resp.status_code == 200
assert isinstance(results, list)
@@ -25,19 +25,19 @@ def test_push_list_basic(client, eleven_jobs_stored, test_repository):
assert len(results) == 10
exp_keys = set(
[
- u'id',
- u'repository_id',
- u'author',
- u'revision',
- u'revisions',
- u'revision_count',
- u'push_timestamp',
+ "id",
+ "repository_id",
+ "author",
+ "revision",
+ "revisions",
+ "revision_count",
+ "push_timestamp",
]
)
for rs in results:
assert set(rs.keys()) == exp_keys
- assert meta == {u'count': 10, u'filter_params': {}, u'repository': test_repository.name}
+ assert meta == {"count": 10, "filter_params": {}, "repository": test_repository.name}
def test_push_list_bad_project(client, transactional_db):
@@ -63,7 +63,7 @@ def test_push_list_empty_push_still_show(client, sample_push, test_repository):
)
assert resp.status_code == 200
data = resp.json()
- assert len(data['results']) == 10
+ assert len(data["results"]) == 10
def test_push_list_single_short_revision(client, eleven_jobs_stored, test_repository):
@@ -75,15 +75,15 @@ def test_push_list_single_short_revision(client, eleven_jobs_stored, test_reposi
reverse("push-list", kwargs={"project": test_repository.name}), {"revision": "45f8637cb9f7"}
)
assert resp.status_code == 200
- results = resp.json()['results']
- meta = resp.json()['meta']
+ results = resp.json()["results"]
+ meta = resp.json()["meta"]
assert len(results) == 1
assert set([rs["revision"] for rs in results]) == {"45f8637cb9f78f19cb8463ff174e81756805d8cf"}
assert meta == {
- u'count': 1,
- u'revision': u'45f8637cb9f7',
- u'filter_params': {u'revisions_short_revision': "45f8637cb9f7"},
- u'repository': test_repository.name,
+ "count": 1,
+ "revision": "45f8637cb9f7",
+ "filter_params": {"revisions_short_revision": "45f8637cb9f7"},
+ "repository": test_repository.name,
}
@@ -97,15 +97,15 @@ def test_push_list_single_long_revision(client, eleven_jobs_stored, test_reposit
{"revision": "45f8637cb9f78f19cb8463ff174e81756805d8cf"},
)
assert resp.status_code == 200
- results = resp.json()['results']
- meta = resp.json()['meta']
+ results = resp.json()["results"]
+ meta = resp.json()["meta"]
assert len(results) == 1
assert set([rs["revision"] for rs in results]) == {"45f8637cb9f78f19cb8463ff174e81756805d8cf"}
assert meta == {
- u'count': 1,
- u'revision': u'45f8637cb9f78f19cb8463ff174e81756805d8cf',
- u'filter_params': {u'revisions_long_revision': u'45f8637cb9f78f19cb8463ff174e81756805d8cf'},
- u'repository': test_repository.name,
+ "count": 1,
+ "revision": "45f8637cb9f78f19cb8463ff174e81756805d8cf",
+ "filter_params": {"revisions_long_revision": "45f8637cb9f78f19cb8463ff174e81756805d8cf"},
+ "repository": test_repository.name,
}
@@ -121,21 +121,21 @@ def test_push_list_filter_by_revision(client, eleven_jobs_stored, test_repositor
)
assert resp.status_code == 200
data = resp.json()
- results = data['results']
- meta = data['meta']
+ results = data["results"]
+ meta = data["meta"]
assert len(results) == 4
assert set([rs["revision"] for rs in results]) == {
- u'130965d3df6c9a1093b4725f3b877eaef80d72bc',
- u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
- u'a69390334818373e2d7e6e9c8d626a328ed37d47',
- u'f361dcb60bbedaa01257fbca211452972f7a74b2',
+ "130965d3df6c9a1093b4725f3b877eaef80d72bc",
+ "7f417c3505e3d2599ac9540f02e3dbee307a3963",
+ "a69390334818373e2d7e6e9c8d626a328ed37d47",
+ "f361dcb60bbedaa01257fbca211452972f7a74b2",
}
assert meta == {
- u'count': 4,
- u'fromchange': u'130965d3df6c',
- u'filter_params': {u'push_timestamp__gte': 1384363842, u'push_timestamp__lte': 1384365942},
- u'repository': test_repository.name,
- u'tochange': u'f361dcb60bbe',
+ "count": 4,
+ "fromchange": "130965d3df6c",
+ "filter_params": {"push_timestamp__gte": 1384363842, "push_timestamp__lte": 1384365942},
+ "repository": test_repository.name,
+ "tochange": "f361dcb60bbe",
}
@@ -147,7 +147,7 @@ def test_push_list_filter_by_date(client, test_repository, sample_push):
for i, datestr in zip(
[3, 4, 5, 6, 7], ["2013-08-09", "2013-08-10", "2013-08-11", "2013-08-12", "2013-08-13"]
):
- sample_push[i]['push_timestamp'] = utils.to_timestamp(utils.to_datetime(datestr))
+ sample_push[i]["push_timestamp"] = utils.to_timestamp(utils.to_datetime(datestr))
store_push_data(test_repository, sample_push)
@@ -157,35 +157,35 @@ def test_push_list_filter_by_date(client, test_repository, sample_push):
)
assert resp.status_code == 200
data = resp.json()
- results = data['results']
- meta = data['meta']
+ results = data["results"]
+ meta = data["meta"]
assert len(results) == 4
assert set([rs["revision"] for rs in results]) == {
- u'ce17cad5d554cfffddee13d1d8421ae9ec5aad82',
- u'7f417c3505e3d2599ac9540f02e3dbee307a3963',
- u'a69390334818373e2d7e6e9c8d626a328ed37d47',
- u'f361dcb60bbedaa01257fbca211452972f7a74b2',
+ "ce17cad5d554cfffddee13d1d8421ae9ec5aad82",
+ "7f417c3505e3d2599ac9540f02e3dbee307a3963",
+ "a69390334818373e2d7e6e9c8d626a328ed37d47",
+ "f361dcb60bbedaa01257fbca211452972f7a74b2",
}
assert meta == {
- u'count': 4,
- u'enddate': u'2013-08-13',
- u'filter_params': {
- u'push_timestamp__gte': 1376092800.0,
- u'push_timestamp__lt': 1376438400.0,
+ "count": 4,
+ "enddate": "2013-08-13",
+ "filter_params": {
+ "push_timestamp__gte": 1376092800.0,
+ "push_timestamp__lt": 1376438400.0,
},
- u'repository': test_repository.name,
- u'startdate': u'2013-08-10',
+ "repository": test_repository.name,
+ "startdate": "2013-08-10",
}
@pytest.mark.parametrize(
- 'filter_param, exp_ids',
+ "filter_param, exp_ids",
[
- ('id__lt=2', [1]),
- ('id__lte=2', [1, 2]),
- ('id=2', [2]),
- ('id__gt=2', [3]),
- ('id__gte=2', [2, 3]),
+ ("id__lt=2", [1]),
+ ("id__lte=2", [1, 2]),
+ ("id=2", [2]),
+ ("id__gt=2", [3]),
+ ("id__gte=2", [2, 3]),
],
)
def test_push_list_filter_by_id(client, test_repository, filter_param, exp_ids):
@@ -193,9 +193,9 @@ def test_push_list_filter_by_id(client, test_repository, filter_param, exp_ids):
test filtering by id in various ways
"""
for revision, author in [
- ('1234abcd', 'foo@bar.com'),
- ('2234abcd', 'foo2@bar.com'),
- ('3234abcd', 'foo3@bar.com'),
+ ("1234abcd", "foo@bar.com"),
+ ("2234abcd", "foo2@bar.com"),
+ ("3234abcd", "foo3@bar.com"),
]:
Push.objects.create(
repository=test_repository,
@@ -204,11 +204,11 @@ def test_push_list_filter_by_id(client, test_repository, filter_param, exp_ids):
time=datetime.datetime.now(),
)
resp = client.get(
- reverse("push-list", kwargs={"project": test_repository.name}) + '?' + filter_param
+ reverse("push-list", kwargs={"project": test_repository.name}) + "?" + filter_param
)
assert resp.status_code == 200
- results = resp.json()['results']
- assert set([result['id'] for result in results]) == set(exp_ids)
+ results = resp.json()["results"]
+ assert set([result["id"] for result in results]) == set(exp_ids)
def test_push_list_id_in(client, test_repository):
@@ -216,9 +216,9 @@ def test_push_list_id_in(client, test_repository):
test the id__in parameter
"""
for revision, author in [
- ('1234abcd', 'foo@bar.com'),
- ('2234abcd', 'foo2@bar.com'),
- ('3234abcd', 'foo3@bar.com'),
+ ("1234abcd", "foo@bar.com"),
+ ("2234abcd", "foo2@bar.com"),
+ ("3234abcd", "foo3@bar.com"),
]:
Push.objects.create(
repository=test_repository,
@@ -227,17 +227,17 @@ def test_push_list_id_in(client, test_repository):
time=datetime.datetime.now(),
)
resp = client.get(
- reverse("push-list", kwargs={"project": test_repository.name}) + '?id__in=1,2'
+ reverse("push-list", kwargs={"project": test_repository.name}) + "?id__in=1,2"
)
assert resp.status_code == 200
- results = resp.json()['results']
+ results = resp.json()["results"]
assert len(results) == 2 # would have 3 if filter not applied
- assert set([result['id'] for result in results]) == set([1, 2])
+ assert set([result["id"] for result in results]) == set([1, 2])
# test that we do something sane if invalid list passed in
resp = client.get(
- reverse("push-list", kwargs={"project": test_repository.name}) + '?id__in=1,2,foobar',
+ reverse("push-list", kwargs={"project": test_repository.name}) + "?id__in=1,2,foobar",
)
assert resp.status_code == 400
@@ -249,11 +249,11 @@ def test_push_list_bad_count(client, test_repository):
bad_count = "ZAP%n%s%n%s"
resp = client.get(
- reverse("push-list", kwargs={"project": test_repository.name}), data={'count': bad_count}
+ reverse("push-list", kwargs={"project": test_repository.name}), data={"count": bad_count}
)
assert resp.status_code == 400
- assert resp.json() == {'detail': 'Valid count value required'}
+ assert resp.json() == {"detail": "Valid count value required"}
def test_push_author(client, test_repository):
@@ -261,9 +261,9 @@ def test_push_author(client, test_repository):
test the author parameter
"""
for revision, author in [
- ('1234abcd', 'foo@bar.com'),
- ('2234abcd', 'foo@bar.com'),
- ('3234abcd', 'foo2@bar.com'),
+ ("1234abcd", "foo@bar.com"),
+ ("2234abcd", "foo@bar.com"),
+ ("3234abcd", "foo2@bar.com"),
]:
Push.objects.create(
repository=test_repository,
@@ -273,31 +273,31 @@ def test_push_author(client, test_repository):
)
resp = client.get(
- reverse("push-list", kwargs={"project": test_repository.name}) + '?author=foo@bar.com'
+ reverse("push-list", kwargs={"project": test_repository.name}) + "?author=foo@bar.com"
)
assert resp.status_code == 200
- results = resp.json()['results']
+ results = resp.json()["results"]
assert len(results) == 2 # would have 3 if filter not applied
- assert set([result['id'] for result in results]) == set([1, 2])
+ assert set([result["id"] for result in results]) == set([1, 2])
resp = client.get(
- reverse("push-list", kwargs={"project": test_repository.name}) + '?author=foo2@bar.com'
+ reverse("push-list", kwargs={"project": test_repository.name}) + "?author=foo2@bar.com"
)
assert resp.status_code == 200
- results = resp.json()['results']
+ results = resp.json()["results"]
assert len(results) == 1 # would have 3 if filter not applied
- assert results[0]['id'] == 3
+ assert results[0]["id"] == 3
resp = client.get(
- reverse("push-list", kwargs={"project": test_repository.name}) + '?author=-foo2@bar.com'
+ reverse("push-list", kwargs={"project": test_repository.name}) + "?author=-foo2@bar.com"
)
assert resp.status_code == 200
- results = resp.json()['results']
+ results = resp.json()["results"]
assert len(results) == 2 # would have 3 if filter not applied
- assert set([result['id'] for result in results]) == set([1, 2])
+ assert set([result["id"] for result in results]) == set([1, 2])
def test_push_reviewbot(client, test_repository):
@@ -305,10 +305,10 @@ def test_push_reviewbot(client, test_repository):
test the reviewbot parameter
"""
for revision, author in [
- ('1234abcd', 'foo@bar.com'),
- ('2234abcd', 'foo2@bar.com'),
- ('3234abcd', 'reviewbot'),
- ('4234abcd', 'reviewbot'),
+ ("1234abcd", "foo@bar.com"),
+ ("2234abcd", "foo2@bar.com"),
+ ("3234abcd", "reviewbot"),
+ ("4234abcd", "reviewbot"),
]:
Push.objects.create(
repository=test_repository,
@@ -319,13 +319,13 @@ def test_push_reviewbot(client, test_repository):
resp = client.get(
reverse("push-list", kwargs={"project": test_repository.name})
- + '?hide_reviewbot_pushes=true'
+ + "?hide_reviewbot_pushes=true"
)
assert resp.status_code == 200
- results = resp.json()['results']
+ results = resp.json()["results"]
assert len(results) == 2
- assert set([result['id'] for result in results]) == set([1, 2])
+ assert set([result["id"] for result in results]) == set([1, 2])
def test_push_list_without_jobs(client, test_repository, sample_push):
@@ -337,16 +337,16 @@ def test_push_list_without_jobs(client, test_repository, sample_push):
resp = client.get(reverse("push-list", kwargs={"project": test_repository.name}))
assert resp.status_code == 200
data = resp.json()
- results = data['results']
+ results = data["results"]
assert len(results) == 10
- assert all([('platforms' not in result) for result in results])
+ assert all([("platforms" not in result) for result in results])
- meta = data['meta']
+ meta = data["meta"]
assert meta == {
- u'count': len(results),
- u'filter_params': {},
- u'repository': test_repository.name,
+ "count": len(results),
+ "filter_params": {},
+ "repository": test_repository.name,
}
@@ -400,13 +400,13 @@ def test_push_status(client, test_job, test_user):
)
assert resp.status_code == 200
assert isinstance(resp.json(), dict)
- assert resp.json() == {'success': 1, 'completed': 1, 'pending': 0, 'running': 0}
+ assert resp.json() == {"success": 1, "completed": 1, "pending": 0, "running": 0}
JobNote.objects.create(
job=test_job,
failure_classification=failure_classification,
user=test_user,
- text='A random note',
+ text="A random note",
)
resp = client.get(
@@ -414,4 +414,4 @@ def test_push_status(client, test_job, test_user):
)
assert resp.status_code == 200
assert isinstance(resp.json(), dict)
- assert resp.json() == {'completed': 0, 'pending': 0, 'running': 0}
+ assert resp.json() == {"completed": 0, "pending": 0, "running": 0}
diff --git a/tests/webapp/api/test_version.py b/tests/webapp/api/test_version.py
index 83475f60ab5..62d38c8d1a9 100644
--- a/tests/webapp/api/test_version.py
+++ b/tests/webapp/api/test_version.py
@@ -7,7 +7,7 @@
class RequestVersionView(APIView):
def get(self, request, *args, **kwargs):
- return Response({'version': request.version})
+ return Response({"version": request.version})
factory = APIRequestFactory()
@@ -15,25 +15,25 @@ def get(self, request, *args, **kwargs):
def test_unsupported_version():
view = RequestVersionView.as_view()
- request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version=foo.bar')
+ request = factory.get("/endpoint/", HTTP_ACCEPT="application/json; version=foo.bar")
try:
response = view(request)
except NotAcceptable:
pass
- assert response.data == {u'detail': u'Invalid version in "Accept" header.'}
+ assert response.data == {"detail": 'Invalid version in "Accept" header.'}
def test_correct_version():
view = RequestVersionView.as_view()
- version = settings.REST_FRAMEWORK['ALLOWED_VERSIONS'][0]
- request = factory.get('/endpoint/', HTTP_ACCEPT='application/json; version={0}'.format(version))
+ version = settings.REST_FRAMEWORK["ALLOWED_VERSIONS"][0]
+ request = factory.get("/endpoint/", HTTP_ACCEPT="application/json; version={0}".format(version))
response = view(request)
- assert response.data == {'version': version}
+ assert response.data == {"version": version}
def test_default_version():
view = RequestVersionView.as_view()
- request = factory.get('/endpoint/', HTTP_ACCEPT='application/json')
+ request = factory.get("/endpoint/", HTTP_ACCEPT="application/json")
response = view(request)
- version = settings.REST_FRAMEWORK['DEFAULT_VERSION']
- assert response.data == {'version': version}
+ version = settings.REST_FRAMEWORK["DEFAULT_VERSION"]
+ assert response.data == {"version": version}
diff --git a/treeherder/__init__.py b/treeherder/__init__.py
index 15d7c508511..5568b6d791f 100644
--- a/treeherder/__init__.py
+++ b/treeherder/__init__.py
@@ -2,4 +2,4 @@
# Django starts so that shared_task will use this app.
from .celery import app as celery_app
-__all__ = ('celery_app',)
+__all__ = ("celery_app",)
diff --git a/treeherder/auth/backends.py b/treeherder/auth/backends.py
index 5401d2b7b56..b9a228a4328 100644
--- a/treeherder/auth/backends.py
+++ b/treeherder/auth/backends.py
@@ -23,41 +23,41 @@
# with lots of notice in advance. In order to mitigate the additional HTTP request
# as well as the possiblity of receiving a 503 status code, we use a static json file to
# read its content.
-with open('treeherder/auth/jwks.json') as f:
+with open("treeherder/auth/jwks.json") as f:
jwks = json.load(f)
class AuthBackend:
def _get_access_token_expiry(self, request):
- expiration_timestamp_in_seconds = request.META.get('HTTP_ACCESS_TOKEN_EXPIRES_AT')
+ expiration_timestamp_in_seconds = request.META.get("HTTP_ACCESS_TOKEN_EXPIRES_AT")
if not expiration_timestamp_in_seconds:
- raise AuthenticationFailed('Access-Token-Expires-At header is expected')
+ raise AuthenticationFailed("Access-Token-Expires-At header is expected")
try:
return int(expiration_timestamp_in_seconds)
except ValueError:
- raise AuthenticationFailed('Access-Token-Expires-At header value is invalid')
+ raise AuthenticationFailed("Access-Token-Expires-At header value is invalid")
def _get_access_token(self, request):
- auth = request.META.get('HTTP_AUTHORIZATION')
+ auth = request.META.get("HTTP_AUTHORIZATION")
if not auth:
- raise AuthenticationFailed('Authorization header is expected')
+ raise AuthenticationFailed("Authorization header is expected")
parts = auth.split()
- if len(parts) != 2 or parts[0].lower() != 'bearer':
+ if len(parts) != 2 or parts[0].lower() != "bearer":
raise AuthenticationFailed("Authorization header must be of form 'Bearer {token}'")
token = parts[1]
return token
def _get_id_token(self, request):
- id_token = request.META.get('HTTP_ID_TOKEN')
+ id_token = request.META.get("HTTP_ID_TOKEN")
if not id_token:
- raise AuthenticationFailed('Id-Token header is expected')
+ raise AuthenticationFailed("Id-Token header is expected")
return id_token
@@ -65,7 +65,7 @@ def _get_id_token_expiry(self, user_info):
# `exp` is the expiration of the ID token in seconds since the epoch:
# https://auth0.com/docs/tokens/id-token#id-token-payload
# https://openid.net/specs/openid-connect-core-1_0.html#IDToken
- return user_info['exp']
+ return user_info["exp"]
def _get_is_sheriff_from_userinfo(self, user_info):
"""
@@ -73,20 +73,20 @@ def _get_is_sheriff_from_userinfo(self, user_info):
"""
groups = (
- user_info['https://sso.mozilla.com/claim/groups']
- if 'https://sso.mozilla.com/claim/groups' in user_info
+ user_info["https://sso.mozilla.com/claim/groups"]
+ if "https://sso.mozilla.com/claim/groups" in user_info
else []
)
- return 1 if ('sheriff' in groups or 'perf_sheriff' in groups) else 0
+ return 1 if ("sheriff" in groups or "perf_sheriff" in groups) else 0
def _get_username_from_userinfo(self, user_info):
"""
Get the user's username from the jwt sub property
"""
- subject = user_info['sub']
- email = user_info['email']
+ subject = user_info["sub"]
+ email = user_info["email"]
if "Mozilla-LDAP" in subject:
return "mozilla-ldap/" + email
@@ -139,10 +139,10 @@ def _get_user_info(self, access_token, id_token):
try:
unverified_header = jwt.get_unverified_header(id_token)
except jwt.JWTError:
- raise AuthError('Unable to decode the Id token header')
+ raise AuthError("Unable to decode the Id token header")
- if 'kid' not in unverified_header:
- raise AuthError('Id token header missing RSA key ID')
+ if "kid" not in unverified_header:
+ raise AuthError("Id token header missing RSA key ID")
rsa_key = None
for key in jwks["keys"]:
@@ -157,21 +157,21 @@ def _get_user_info(self, access_token, id_token):
break
if not rsa_key:
- raise AuthError('Id token using unrecognised RSA key ID')
+ raise AuthError("Id token using unrecognised RSA key ID")
try:
# https://python-jose.readthedocs.io/en/latest/jwt/api.html#jose.jwt.decode
user_info = jwt.decode(
id_token,
rsa_key,
- algorithms=['RS256'],
+ algorithms=["RS256"],
audience=AUTH0_CLIENTID,
access_token=access_token,
issuer="https://" + AUTH0_DOMAIN + "/",
)
return user_info
except jwt.ExpiredSignatureError:
- raise AuthError('Id token is expired')
+ raise AuthError("Id token is expired")
except jwt.JWTClaimsError:
raise AuthError("Incorrect claims: please check the audience and issuer")
except jwt.JWTError:
@@ -190,7 +190,7 @@ def _calculate_session_expiry(self, request, user_info):
seconds_until_expiry = earliest_expiration_timestamp - now_in_seconds
if seconds_until_expiry <= 0:
- raise AuthError('Session expiry time has already passed!')
+ raise AuthError("Session expiry time has already passed!")
return seconds_until_expiry
@@ -203,7 +203,7 @@ def authenticate(self, request):
is_sheriff = self._get_is_sheriff_from_userinfo(user_info)
seconds_until_expiry = self._calculate_session_expiry(request, user_info)
- logger.debug('Updating session to expire in %i seconds', seconds_until_expiry)
+ logger.debug("Updating session to expire in %i seconds", seconds_until_expiry)
request.session.set_expiry(seconds_until_expiry)
try:
@@ -215,9 +215,9 @@ def authenticate(self, request):
except ObjectDoesNotExist:
# The user doesn't already exist, so create it since we allow
# anyone with SSO access to create an account on Treeherder.
- logger.debug('Creating new user: %s', username)
+ logger.debug("Creating new user: %s", username)
return User.objects.create_user(
- username, email=user_info['email'], password=None, is_staff=is_sheriff
+ username, email=user_info["email"], password=None, is_staff=is_sheriff
)
def get_user(self, user_id):
diff --git a/treeherder/celery.py b/treeherder/celery.py
index 4c5bd117a17..3a5f0852bd4 100644
--- a/treeherder/celery.py
+++ b/treeherder/celery.py
@@ -3,16 +3,16 @@
from celery import Celery
# set the default Django settings module for the 'celery' program.
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'treeherder.config.settings')
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "treeherder.config.settings")
-app = Celery('treeherder')
+app = Celery("treeherder")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
-app.config_from_object('django.conf:settings', namespace='CELERY')
+app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
-app.autodiscover_tasks(['treeherder.workers.stats'])
+app.autodiscover_tasks(["treeherder.workers.stats"])
diff --git a/treeherder/changelog/models.py b/treeherder/changelog/models.py
index 5222daeb90c..51626ac3b77 100644
--- a/treeherder/changelog/models.py
+++ b/treeherder/changelog/models.py
@@ -16,7 +16,7 @@ class Changelog(models.Model):
class Meta:
db_table = "changelog_entry"
- unique_together = ('id', 'remote_id', 'type')
+ unique_together = ("id", "remote_id", "type")
def __str__(self):
return "[%s] %s by %s" % (self.id, self.message, self.author)
diff --git a/treeherder/client/setup.py b/treeherder/client/setup.py
index 98b8bb03780..71cedb709c4 100644
--- a/treeherder/client/setup.py
+++ b/treeherder/client/setup.py
@@ -23,27 +23,27 @@ def find_version(*file_paths):
setup(
- name='treeherder-client',
- version=find_version('thclient', 'client.py'),
- description='Python library to retrieve data from the Treeherder API',
+ name="treeherder-client",
+ version=find_version("thclient", "client.py"),
+ description="Python library to retrieve data from the Treeherder API",
classifiers=[
- 'Environment :: Console',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
- 'Natural Language :: English',
- 'Operating System :: OS Independent',
- 'Programming Language :: Python',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.6',
- 'Programming Language :: Python :: 3.7',
- 'Topic :: Software Development :: Libraries :: Python Modules',
+ "Environment :: Console",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
+ "Natural Language :: English",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Topic :: Software Development :: Libraries :: Python Modules",
],
- keywords='',
- author='Mozilla Automation and Testing Team',
- author_email='tools@lists.mozilla.org',
- url='https://github.com/mozilla/treeherder',
- license='MPL',
- packages=['thclient'],
- python_requires='>=3',
- install_requires=['requests==2.31.0'],
+ keywords="",
+ author="Mozilla Automation and Testing Team",
+ author_email="tools@lists.mozilla.org",
+ url="https://github.com/mozilla/treeherder",
+ license="MPL",
+ packages=["thclient"],
+ python_requires=">=3",
+ install_requires=["requests==2.31.0"],
)
diff --git a/treeherder/client/thclient/client.py b/treeherder/client/thclient/client.py
index 91390636bba..890ef1214cd 100644
--- a/treeherder/client/thclient/client.py
+++ b/treeherder/client/thclient/client.py
@@ -5,7 +5,7 @@
# The Python client release process is documented here:
# https://treeherder.readthedocs.io/common_tasks.html#releasing-a-new-version-of-the-python-client
-__version__ = '5.0.0'
+__version__ = "5.0.0"
logger = logging.getLogger(__name__)
@@ -15,21 +15,21 @@ class TreeherderClient:
Treeherder client class
"""
- API_VERSION = '1.1'
+ API_VERSION = "1.1"
REQUEST_HEADERS = {
- 'Accept': 'application/json; version={}'.format(API_VERSION),
- 'User-Agent': 'treeherder-pyclient/{}'.format(__version__),
+ "Accept": "application/json; version={}".format(API_VERSION),
+ "User-Agent": "treeherder-pyclient/{}".format(__version__),
}
- PUSH_ENDPOINT = 'push'
- JOBS_ENDPOINT = 'jobs'
- JOB_LOG_URL_ENDPOINT = 'job-log-url'
- OPTION_COLLECTION_HASH_ENDPOINT = 'optioncollectionhash'
- REPOSITORY_ENDPOINT = 'repository'
- FAILURE_CLASSIFICATION_ENDPOINT = 'failureclassification'
+ PUSH_ENDPOINT = "push"
+ JOBS_ENDPOINT = "jobs"
+ JOB_LOG_URL_ENDPOINT = "job-log-url"
+ OPTION_COLLECTION_HASH_ENDPOINT = "optioncollectionhash"
+ REPOSITORY_ENDPOINT = "repository"
+ FAILURE_CLASSIFICATION_ENDPOINT = "failureclassification"
MAX_COUNT = 2000
- def __init__(self, server_url='https://treeherder.mozilla.org', timeout=30):
+ def __init__(self, server_url="https://treeherder.mozilla.org", timeout=30):
"""
:param server_url: The site URL of the Treeherder instance (defaults to production)
:param timeout: maximum time it can take for a request to complete
@@ -43,9 +43,9 @@ def __init__(self, server_url='https://treeherder.mozilla.org', timeout=30):
def _get_endpoint_url(self, endpoint, project=None):
if project:
- return '{}/api/project/{}/{}/'.format(self.server_url, project, endpoint)
+ return "{}/api/project/{}/{}/".format(self.server_url, project, endpoint)
- return '{}/api/{}/'.format(self.server_url, endpoint)
+ return "{}/api/{}/".format(self.server_url, endpoint)
def _get_json_list(self, endpoint, project=None, **params):
if "count" in params and (params["count"] is None or params["count"] > self.MAX_COUNT):
@@ -97,7 +97,7 @@ def get_option_collection_hash(self):
resp = self._get_json(self.OPTION_COLLECTION_HASH_ENDPOINT)
ret = {}
for result in resp:
- ret[result['option_collection_hash']] = result['options']
+ ret[result["option_collection_hash"]] = result["options"]
return ret
diff --git a/treeherder/client/thclient/perfherder.py b/treeherder/client/thclient/perfherder.py
index b0c245e5d1e..38a40a64e2a 100644
--- a/treeherder/client/thclient/perfherder.py
+++ b/treeherder/client/thclient/perfherder.py
@@ -107,8 +107,8 @@ def __getitem__(self, key):
class PerfherderClient(TreeherderClient):
- PERFORMANCE_SIGNATURES_ENDPOINT = 'performance/signatures'
- PERFORMANCE_DATA_ENDPOINT = 'performance/data'
+ PERFORMANCE_SIGNATURES_ENDPOINT = "performance/signatures"
+ PERFORMANCE_DATA_ENDPOINT = "performance/data"
def get_performance_signatures(self, project, **params):
"""
diff --git a/treeherder/config/settings.py b/treeherder/config/settings.py
index 146157dcdcc..206552b4e4c 100644
--- a/treeherder/config/settings.py
+++ b/treeherder/config/settings.py
@@ -25,12 +25,12 @@
LOGGING_LEVEL = env("LOGGING_LEVEL", default="INFO")
NEW_RELIC_INSIGHTS_API_KEY = env("NEW_RELIC_INSIGHTS_API_KEY", default=None)
-NEW_RELIC_INSIGHTS_API_URL = 'https://insights-api.newrelic.com/v1/accounts/677903/query'
+NEW_RELIC_INSIGHTS_API_URL = "https://insights-api.newrelic.com/v1/accounts/677903/query"
# Make this unique, and don't share it with anybody.
SECRET_KEY = env(
"TREEHERDER_DJANGO_SECRET_KEY",
- default='secret-key-of-at-least-50-characters-to-pass-check-deploy',
+ default="secret-key-of-at-least-50-characters-to-pass-check-deploy",
)
# Delete the Pulse automatically when no consumers left
@@ -41,16 +41,16 @@
PULSE_AUTO_DELETE_QUEUES = True
# Hosts
-SITE_URL = env("SITE_URL", default='http://localhost:8000')
+SITE_URL = env("SITE_URL", default="http://localhost:8000")
SITE_HOSTNAME = furl(SITE_URL).host
# Including localhost allows using the backend locally
-ALLOWED_HOSTS = [SITE_HOSTNAME, 'localhost', '127.0.0.1']
+ALLOWED_HOSTS = [SITE_HOSTNAME, "localhost", "127.0.0.1"]
# URL handling
APPEND_SLASH = False
ROOT_URLCONF = "treeherder.config.urls"
-WSGI_APPLICATION = 'treeherder.config.wsgi.application'
+WSGI_APPLICATION = "treeherder.config.wsgi.application"
# Send full URL within origin but only origin for cross-origin requests
SECURE_REFERRER_POLICY = "origin-when-cross-origin"
@@ -61,29 +61,29 @@
# We can't set X_FRAME_OPTIONS to DENY since renewal of an Auth0 token
# requires opening the auth handler page in an invisible iframe with the
# same origin.
-X_FRAME_OPTIONS = 'SAMEORIGIN'
+X_FRAME_OPTIONS = "SAMEORIGIN"
# Application definition
INSTALLED_APPS = [
- 'django.contrib.auth',
- 'django.contrib.contenttypes',
+ "django.contrib.auth",
+ "django.contrib.contenttypes",
# Disable Django's own staticfiles handling in favour of WhiteNoise, for
# greater consistency between gunicorn and `./manage.py runserver`.
- 'whitenoise.runserver_nostatic',
- 'django.contrib.staticfiles',
+ "whitenoise.runserver_nostatic",
+ "django.contrib.staticfiles",
# 3rd party apps
- 'rest_framework',
- 'corsheaders',
- 'django_filters',
- 'dockerflow.django',
+ "rest_framework",
+ "corsheaders",
+ "django_filters",
+ "dockerflow.django",
# treeherder apps
- 'treeherder.model',
- 'treeherder.webapp',
- 'treeherder.log_parser',
- 'treeherder.etl',
- 'treeherder.perf',
- 'treeherder.intermittents_commenter',
- 'treeherder.changelog',
+ "treeherder.model",
+ "treeherder.webapp",
+ "treeherder.log_parser",
+ "treeherder.etl",
+ "treeherder.perf",
+ "treeherder.intermittents_commenter",
+ "treeherder.changelog",
]
# Docker/outside-of-Docker/CircleCI
@@ -93,31 +93,31 @@
# https://django-debug-toolbar.readthedocs.io/en/latest/configuration.html#show-toolbar-callback
# "You can provide your own function callback(request) which returns True or False."
DEBUG_TOOLBAR_CONFIG = {
- 'SHOW_TOOLBAR_CALLBACK': lambda request: DEBUG,
+ "SHOW_TOOLBAR_CALLBACK": lambda request: DEBUG,
}
- INSTALLED_APPS.append('debug_toolbar')
- INSTALLED_APPS.append('django_extensions')
+ INSTALLED_APPS.append("debug_toolbar")
+ INSTALLED_APPS.append("django_extensions")
# Middleware
MIDDLEWARE = [
middleware
for middleware in [
# Adds custom New Relic annotations. Must be first so all transactions are annotated.
- 'treeherder.middleware.NewRelicMiddleware',
+ "treeherder.middleware.NewRelicMiddleware",
# Redirect to HTTPS/set HSTS and other security headers.
- 'django.middleware.security.SecurityMiddleware',
- 'django.middleware.clickjacking.XFrameOptionsMiddleware',
- 'corsheaders.middleware.CorsMiddleware',
+ "django.middleware.security.SecurityMiddleware",
+ "django.middleware.clickjacking.XFrameOptionsMiddleware",
+ "corsheaders.middleware.CorsMiddleware",
# Allows both Django static files and those specified via `WHITENOISE_ROOT`
# to be served by WhiteNoise.
- 'treeherder.middleware.CustomWhiteNoise',
- 'django.middleware.gzip.GZipMiddleware',
- 'debug_toolbar.middleware.DebugToolbarMiddleware' if DEBUG else False,
- 'django.contrib.sessions.middleware.SessionMiddleware',
- 'django.middleware.common.CommonMiddleware',
- 'django.middleware.csrf.CsrfViewMiddleware',
- 'django.contrib.auth.middleware.AuthenticationMiddleware',
- 'dockerflow.django.middleware.DockerflowMiddleware',
+ "treeherder.middleware.CustomWhiteNoise",
+ "django.middleware.gzip.GZipMiddleware",
+ "debug_toolbar.middleware.DebugToolbarMiddleware" if DEBUG else False,
+ "django.contrib.sessions.middleware.SessionMiddleware",
+ "django.middleware.common.CommonMiddleware",
+ "django.middleware.csrf.CsrfViewMiddleware",
+ "django.contrib.auth.middleware.AuthenticationMiddleware",
+ "dockerflow.django.middleware.DockerflowMiddleware",
]
if middleware
]
@@ -128,59 +128,59 @@
# 'mysql://username:password@host:optional_port/database_name'
#
# which django-environ converts into the Django DB settings dict format.
-LOCALHOST_MYSQL_HOST = 'mysql://root@{}:3306/treeherder'.format(
- 'localhost' if IS_WINDOWS else '127.0.0.1'
+LOCALHOST_MYSQL_HOST = "mysql://root@{}:3306/treeherder".format(
+ "localhost" if IS_WINDOWS else "127.0.0.1"
)
DATABASES = {
- 'default': env.db_url('DATABASE_URL', default=LOCALHOST_MYSQL_HOST),
+ "default": env.db_url("DATABASE_URL", default=LOCALHOST_MYSQL_HOST),
}
# Only used when syncing local database with production replicas
-UPSTREAM_DATABASE_URL = env('UPSTREAM_DATABASE_URL', default=None)
+UPSTREAM_DATABASE_URL = env("UPSTREAM_DATABASE_URL", default=None)
if UPSTREAM_DATABASE_URL:
- DATABASES['upstream'] = env.db_url_config(UPSTREAM_DATABASE_URL)
+ DATABASES["upstream"] = env.db_url_config(UPSTREAM_DATABASE_URL)
# We're intentionally not using django-environ's query string options feature,
# since it hides configuration outside of the repository, plus could lead to
# drift between environments.
for alias, db in DATABASES.items():
# Persist database connections for 5 minutes, to avoid expensive reconnects.
- db['CONN_MAX_AGE'] = 300
+ db["CONN_MAX_AGE"] = 300
# These options are only valid for mysql
- if db['ENGINE'] != 'django.db.backends.mysql':
+ if db["ENGINE"] != "django.db.backends.mysql":
continue
- db['OPTIONS'] = {
+ db["OPTIONS"] = {
# Override Django's default connection charset of 'utf8', otherwise it's
# still not possible to insert non-BMP unicode into utf8mb4 tables.
- 'charset': 'utf8mb4',
+ "charset": "utf8mb4",
# From MySQL 5.7 onwards and on fresh installs of MySQL 5.6, the default value of the sql_mode
# option contains STRICT_TRANS_TABLES. That option escalates warnings into errors when data are
# truncated upon insertion, so Django highly recommends activating a strict mode for MySQL to
# prevent data loss (either STRICT_TRANS_TABLES or STRICT_ALL_TABLES).
- 'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
+ "init_command": "SET sql_mode='STRICT_TRANS_TABLES'",
}
# For use of the stage replica, use the 'deployment/gcp/ca-cert.pem' path for use in your local env file
# or pass the variable to docker-compose command; additional certs are in the deployment directory.
- if connection_should_use_tls(db['HOST']):
- db['OPTIONS']['ssl'] = {
- 'ca': env("TLS_CERT_PATH", default=None),
+ if connection_should_use_tls(db["HOST"]):
+ db["OPTIONS"]["ssl"] = {
+ "ca": env("TLS_CERT_PATH", default=None),
}
# Since Django 3.2, the default AutoField must be configured
-DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
+DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Caches
-REDIS_URL = env('REDIS_URL', default='redis://localhost:6379')
+REDIS_URL = env("REDIS_URL", default="redis://localhost:6379")
CACHES = {
- 'default': {
- 'BACKEND': 'django_redis.cache.RedisCache',
- 'LOCATION': REDIS_URL,
- 'OPTIONS': {
+ "default": {
+ "BACKEND": "django_redis.cache.RedisCache",
+ "LOCATION": REDIS_URL,
+ "OPTIONS": {
# Override the default of no timeout, to avoid connection hangs.
- 'SOCKET_CONNECT_TIMEOUT': 5,
+ "SOCKET_CONNECT_TIMEOUT": 5,
},
},
}
@@ -199,69 +199,69 @@
# Create hashed+gzipped versions of assets during collectstatic,
# which will then be served by WhiteNoise with a suitable max-age.
# https://whitenoise.readthedocs.io/en/stable/django.html#add-compression-and-caching-support
-STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
+STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Authentication
AUTHENTICATION_BACKENDS = [
- 'django.contrib.auth.backends.ModelBackend',
- 'treeherder.auth.backends.AuthBackend',
+ "django.contrib.auth.backends.ModelBackend",
+ "treeherder.auth.backends.AuthBackend",
]
# Use the cache-based backend rather than the default of database.
-SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
+SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# Path to redirect to on successful login.
-LOGIN_REDIRECT_URL = '/'
+LOGIN_REDIRECT_URL = "/"
# Path to redirect to on unsuccessful login attempt.
-LOGIN_REDIRECT_URL_FAILURE = '/'
+LOGIN_REDIRECT_URL_FAILURE = "/"
# Path to redirect to on logout.
-LOGOUT_REDIRECT_URL = '/'
+LOGOUT_REDIRECT_URL = "/"
# Logging
LOGGING = {
- 'version': 1,
- 'disable_existing_loggers': False,
- 'filters': {
- 'require_debug_true': {
- '()': 'django.utils.log.RequireDebugTrue',
+ "version": 1,
+ "disable_existing_loggers": False,
+ "filters": {
+ "require_debug_true": {
+ "()": "django.utils.log.RequireDebugTrue",
},
},
- 'formatters': {
- 'standard': {
- 'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
+ "formatters": {
+ "standard": {
+ "format": "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
},
- 'json': {'()': 'dockerflow.logging.JsonLogFormatter', 'logger_name': 'treeherder'},
+ "json": {"()": "dockerflow.logging.JsonLogFormatter", "logger_name": "treeherder"},
},
- 'handlers': {
- 'console': {'class': 'logging.StreamHandler', 'formatter': 'standard'},
- 'json': {'class': 'logging.StreamHandler', 'formatter': 'json', 'level': 'DEBUG'},
+ "handlers": {
+ "console": {"class": "logging.StreamHandler", "formatter": "standard"},
+ "json": {"class": "logging.StreamHandler", "formatter": "json", "level": "DEBUG"},
},
- 'loggers': {
- 'django': {
- 'filters': ['require_debug_true'],
- 'handlers': ['console'],
- 'level': 'INFO',
- 'propagate': True,
+ "loggers": {
+ "django": {
+ "filters": ["require_debug_true"],
+ "handlers": ["console"],
+ "level": "INFO",
+ "propagate": True,
},
- 'django.request': {
- 'handlers': ['console'],
- 'level': 'WARNING',
- 'propagate': True,
+ "django.request": {
+ "handlers": ["console"],
+ "level": "WARNING",
+ "propagate": True,
},
- 'treeherder': {
- 'handlers': ['console'],
- 'level': LOGGING_LEVEL,
- 'propagate': LOGGING_LEVEL != 'WARNING',
+ "treeherder": {
+ "handlers": ["console"],
+ "level": LOGGING_LEVEL,
+ "propagate": LOGGING_LEVEL != "WARNING",
},
- 'kombu': {
- 'handlers': ['console'],
- 'level': 'WARNING',
+ "kombu": {
+ "handlers": ["console"],
+ "level": "WARNING",
},
- 'request.summary': {
- 'handlers': ['json'],
- 'level': 'DEBUG',
+ "request.summary": {
+ "handlers": ["json"],
+ "level": "DEBUG",
},
},
}
@@ -269,13 +269,13 @@
# SECURITY
USE_X_FORWARDED_HOST = True
USE_X_FORWARDED_PORT = True
-SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
+SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
CSRF_TRUSTED_ORIGINS = env.list(
- 'CSRF_TRUSTED_ORIGINS', default=['http://localhost:8000', 'http://localhost:5000']
+ "CSRF_TRUSTED_ORIGINS", default=["http://localhost:8000", "http://localhost:5000"]
)
-if SITE_URL.startswith('https://'):
+if SITE_URL.startswith("https://"):
SECURE_SSL_REDIRECT = True
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
@@ -291,30 +291,30 @@
SILENCED_SYSTEM_CHECKS = [
# We can't set CSRF_COOKIE_HTTPONLY to True since the requests to the API
# made using Angular's `httpProvider` require access to the cookie.
- 'security.W017',
- 'security.W019',
+ "security.W017",
+ "security.W019",
]
# User Agents
# User agents which will be blocked from making requests to the site.
DISALLOWED_USER_AGENTS = (
- re.compile(r'^Go-http-client/'),
+ re.compile(r"^Go-http-client/"),
# This was the old Go http package user agent prior to Go-http-client/*
# https://github.com/golang/go/commit/0d1ceef9452c495b6f6d60e578886689184e5e4b
- re.compile(r'^Go 1.1 package http'),
+ re.compile(r"^Go 1.1 package http"),
# Note: This intentionally does not match the command line curl
# tool's default User Agent, only the library used by eg PHP.
- re.compile(r'^libcurl/'),
- re.compile(r'^Python-urllib/'),
- re.compile(r'^python-requests/'),
+ re.compile(r"^libcurl/"),
+ re.compile(r"^Python-urllib/"),
+ re.compile(r"^python-requests/"),
)
# THIRD PARTY APPS
# Auth0 setup
-AUTH0_DOMAIN = env('AUTH0_DOMAIN', default="auth.mozilla.auth0.com")
-AUTH0_CLIENTID = env('AUTH0_CLIENTID', default="q8fZZFfGEmSB2c5uSI8hOkKdDGXnlo5z")
+AUTH0_DOMAIN = env("AUTH0_DOMAIN", default="auth.mozilla.auth0.com")
+AUTH0_CLIENTID = env("AUTH0_CLIENTID", default="q8fZZFfGEmSB2c5uSI8hOkKdDGXnlo5z")
# Celery
@@ -324,26 +324,26 @@
# to simplify the queue configuration, by using the recommended CELERY_TASK_ROUTES instead:
# http://docs.celeryproject.org/en/latest/userguide/routing.html#automatic-routing
CELERY_TASK_QUEUES = [
- Queue('default', Exchange('default'), routing_key='default'),
- Queue('log_parser', Exchange('default'), routing_key='log_parser.normal'),
- Queue('log_parser_fail_raw_sheriffed', Exchange('default'), routing_key='log_parser.failures'),
+ Queue("default", Exchange("default"), routing_key="default"),
+ Queue("log_parser", Exchange("default"), routing_key="log_parser.normal"),
+ Queue("log_parser_fail_raw_sheriffed", Exchange("default"), routing_key="log_parser.failures"),
Queue(
- 'log_parser_fail_raw_unsheriffed', Exchange('default'), routing_key='log_parser.failures'
+ "log_parser_fail_raw_unsheriffed", Exchange("default"), routing_key="log_parser.failures"
),
- Queue('log_parser_fail_json_sheriffed', Exchange('default'), routing_key='log_parser.failures'),
+ Queue("log_parser_fail_json_sheriffed", Exchange("default"), routing_key="log_parser.failures"),
Queue(
- 'log_parser_fail_json_unsheriffed', Exchange('default'), routing_key='log_parser.failures'
+ "log_parser_fail_json_unsheriffed", Exchange("default"), routing_key="log_parser.failures"
),
- Queue('pushlog', Exchange('default'), routing_key='pushlog'),
- Queue('generate_perf_alerts', Exchange('default'), routing_key='generate_perf_alerts'),
- Queue('store_pulse_tasks', Exchange('default'), routing_key='store_pulse_tasks'),
+ Queue("pushlog", Exchange("default"), routing_key="pushlog"),
+ Queue("generate_perf_alerts", Exchange("default"), routing_key="generate_perf_alerts"),
+ Queue("store_pulse_tasks", Exchange("default"), routing_key="store_pulse_tasks"),
Queue(
- 'store_pulse_tasks_classification',
- Exchange('default'),
- routing_key='store_pulse_tasks_classification',
+ "store_pulse_tasks_classification",
+ Exchange("default"),
+ routing_key="store_pulse_tasks_classification",
),
- Queue('store_pulse_pushes', Exchange('default'), routing_key='store_pulse_pushes'),
- Queue('statsd', Exchange('default'), routing_key='statsd'),
+ Queue("store_pulse_pushes", Exchange("default"), routing_key="store_pulse_pushes"),
+ Queue("statsd", Exchange("default"), routing_key="statsd"),
]
# Force all queues to be explicitly listed in `CELERY_TASK_QUEUES` to help prevent typos
@@ -351,7 +351,7 @@
CELERY_TASK_CREATE_MISSING_QUEUES = False
# Celery broker setup
-CELERY_BROKER_URL = env('BROKER_URL', default='amqp://guest:guest@localhost:5672//')
+CELERY_BROKER_URL = env("BROKER_URL", default="amqp://guest:guest@localhost:5672//")
# Force Celery to use TLS when appropriate (ie if not localhost),
# rather than relying on `CELERY_BROKER_URL` having `amqps://` or `?ssl=` set.
@@ -367,7 +367,7 @@
CELERY_BROKER_HEARTBEAT = None
# default value when no task routing info is specified
-CELERY_TASK_DEFAULT_QUEUE = 'default'
+CELERY_TASK_DEFAULT_QUEUE = "default"
# Make Celery defer the acknowledgment of a task until after the task has completed,
# to prevent data loss in the case of celery master process crashes or infra failures.
@@ -388,17 +388,17 @@
CELERY_BEAT_SCHEDULE = {
# this is just a failsafe in case the Pulse ingestion misses something
- 'fetch-push-logs-every-5-minutes': {
- 'task': 'fetch-push-logs',
- 'schedule': timedelta(minutes=5),
- 'relative': True,
- 'options': {"queue": "pushlog"},
+ "fetch-push-logs-every-5-minutes": {
+ "task": "fetch-push-logs",
+ "schedule": timedelta(minutes=5),
+ "relative": True,
+ "options": {"queue": "pushlog"},
},
- 'publish_stats': {
- 'task': 'publish-stats',
- 'schedule': crontab(minute=f'*/{CELERY_STATS_PUBLICATION_DELAY}'),
- 'relative': True,
- 'options': {'queue': 'statsd'},
+ "publish_stats": {
+ "task": "publish-stats",
+ "schedule": crontab(minute=f"*/{CELERY_STATS_PUBLICATION_DELAY}"),
+ "relative": True,
+ "options": {"queue": "statsd"},
},
}
@@ -408,16 +408,16 @@
# Rest Framework
REST_FRAMEWORK = {
- 'ALLOWED_VERSIONS': ('1.0',),
- 'DEFAULT_AUTHENTICATION_CLASSES': ('rest_framework.authentication.SessionAuthentication',),
- 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',),
- 'DEFAULT_PARSER_CLASSES': ('rest_framework.parsers.JSONParser',),
- 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAuthenticatedOrReadOnly',),
- 'DEFAULT_RENDERER_CLASSES': ('rest_framework.renderers.JSONRenderer',),
- 'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.openapi.AutoSchema',
- 'DEFAULT_VERSION': '1.0',
- 'DEFAULT_VERSIONING_CLASS': 'rest_framework.versioning.AcceptHeaderVersioning',
- 'TEST_REQUEST_DEFAULT_FORMAT': 'json',
+ "ALLOWED_VERSIONS": ("1.0",),
+ "DEFAULT_AUTHENTICATION_CLASSES": ("rest_framework.authentication.SessionAuthentication",),
+ "DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",),
+ "DEFAULT_PARSER_CLASSES": ("rest_framework.parsers.JSONParser",),
+ "DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticatedOrReadOnly",),
+ "DEFAULT_RENDERER_CLASSES": ("rest_framework.renderers.JSONRenderer",),
+ "DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.openapi.AutoSchema",
+ "DEFAULT_VERSION": "1.0",
+ "DEFAULT_VERSIONING_CLASS": "rest_framework.versioning.AcceptHeaderVersioning",
+ "TEST_REQUEST_DEFAULT_FORMAT": "json",
}
# Whitenoise
@@ -435,9 +435,9 @@
# Templating
TEMPLATES = [
{
- 'BACKEND': 'django.template.backends.django.DjangoTemplates',
- 'APP_DIRS': True,
- 'DIRS': [WHITENOISE_ROOT],
+ "BACKEND": "django.template.backends.django.DjangoTemplates",
+ "APP_DIRS": True,
+ "DIRS": [WHITENOISE_ROOT],
}
]
@@ -451,7 +451,7 @@
BZ_API_URL = "https://bugzilla.mozilla.org"
BUGFILER_API_URL = env("BUGZILLA_API_URL", default=BZ_API_URL)
BUGFILER_API_KEY = env("BUG_FILER_API_KEY", default=None)
-BZ_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
+BZ_DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
# For intermittents commenter
COMMENTER_API_KEY = env("BUG_COMMENTER_API_KEY", default=None)
@@ -478,36 +478,36 @@
# From the same job's log, ingest (or not) multiple PERFHERDER_DATA dumps
# pertaining to the same performance signature
PERFHERDER_ENABLE_MULTIDATA_INGESTION = env.bool(
- 'PERFHERDER_ENABLE_MULTIDATA_INGESTION', default=True
+ "PERFHERDER_ENABLE_MULTIDATA_INGESTION", default=True
)
# Sherlock' settings (the performance sheriff robot)
-SUPPORTED_PLATFORMS = ['windows', 'linux', 'osx']
+SUPPORTED_PLATFORMS = ["windows", "linux", "osx"]
MAX_BACKFILLS_PER_PLATFORM = {
- 'windows': 200,
- 'linux': 200,
- 'osx': 20,
+ "windows": 200,
+ "linux": 200,
+ "osx": 20,
}
RESET_BACKFILL_LIMITS = timedelta(hours=24)
TIME_TO_MATURE = timedelta(hours=4)
# Taskcluster credentials for Sherlock
# TODO: rename PERF_SHERIFF_BOT prefixes to SHERLOCK
-PERF_SHERIFF_BOT_CLIENT_ID = env('PERF_SHERIFF_BOT_CLIENT_ID', default=None)
-PERF_SHERIFF_BOT_ACCESS_TOKEN = env('PERF_SHERIFF_BOT_ACCESS_TOKEN', default=None)
+PERF_SHERIFF_BOT_CLIENT_ID = env("PERF_SHERIFF_BOT_CLIENT_ID", default=None)
+PERF_SHERIFF_BOT_ACCESS_TOKEN = env("PERF_SHERIFF_BOT_ACCESS_TOKEN", default=None)
# Taskcluster credentials for Notification Service
-NOTIFY_CLIENT_ID = env('NOTIFY_CLIENT_ID', default=None)
-NOTIFY_ACCESS_TOKEN = env('NOTIFY_ACCESS_TOKEN', default=None)
+NOTIFY_CLIENT_ID = env("NOTIFY_CLIENT_ID", default=None)
+NOTIFY_ACCESS_TOKEN = env("NOTIFY_ACCESS_TOKEN", default=None)
# This is only used for removing the rate limiting. You can create your own here:
# https://github.com/settings/tokens
GITHUB_TOKEN = env("GITHUB_TOKEN", default=None)
# Statsd server configuration
-STATSD_HOST = env('STATSD_HOST', default='statsd')
-STATSD_PORT = env('STATSD_PORT', default=8124)
-STATSD_PREFIX = env('STATSD_PREFIX', default='treeherder')
+STATSD_HOST = env("STATSD_HOST", default="statsd")
+STATSD_PORT = env("STATSD_PORT", default=8124)
+STATSD_PREFIX = env("STATSD_PREFIX", default="treeherder")
# For dockerflow
BASE_DIR = SRC_DIR
diff --git a/treeherder/config/urls.py b/treeherder/config/urls.py
index 2503580c984..440de282ff9 100644
--- a/treeherder/config/urls.py
+++ b/treeherder/config/urls.py
@@ -12,10 +12,10 @@
import debug_toolbar
urlpatterns += [
- re_path(r'^__debug__/', include(debug_toolbar.urls)),
+ re_path(r"^__debug__/", include(debug_toolbar.urls)),
]
urlpatterns += [
- re_path(r'^api/', include(api_urls)),
- re_path(r'', TemplateView.as_view(template_name='index.html')),
+ re_path(r"^api/", include(api_urls)),
+ re_path(r"", TemplateView.as_view(template_name="index.html")),
]
diff --git a/treeherder/config/utils.py b/treeherder/config/utils.py
index d4183b6e243..ce331cd25d1 100644
--- a/treeherder/config/utils.py
+++ b/treeherder/config/utils.py
@@ -4,4 +4,4 @@
def connection_should_use_tls(url):
# Ensure use of celery workers for local development
host = furl(url).host or url # The url passed is already just the hostname.
- return host not in ('127.0.0.1', 'localhost', 'mysql', 'rabbitmq')
+ return host not in ("127.0.0.1", "localhost", "mysql", "rabbitmq")
diff --git a/treeherder/config/wsgi.py b/treeherder/config/wsgi.py
index 0ff50cdc4bf..2427eb3a21e 100644
--- a/treeherder/config/wsgi.py
+++ b/treeherder/config/wsgi.py
@@ -10,6 +10,6 @@
from django.core.wsgi import get_wsgi_application
-os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'treeherder.config.settings')
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "treeherder.config.settings")
application = get_wsgi_application()
diff --git a/treeherder/etl/artifact.py b/treeherder/etl/artifact.py
index bb3e653679a..7facdd002d5 100644
--- a/treeherder/etl/artifact.py
+++ b/treeherder/etl/artifact.py
@@ -15,14 +15,14 @@ def store_text_log_summary_artifact(job, text_log_summary_artifact):
"""
Store the contents of the text log summary artifact
"""
- errors = json.loads(text_log_summary_artifact['blob'])['errors']
+ errors = json.loads(text_log_summary_artifact["blob"])["errors"]
log_errors = TextLogError.objects.bulk_create(
[
TextLogError(
job=job,
- line_number=error['linenumber'],
- line=astral_filter(error['line']),
+ line_number=error["linenumber"],
+ line=astral_filter(error["line"]),
)
for error in errors
],
@@ -34,11 +34,11 @@ def store_text_log_summary_artifact(job, text_log_summary_artifact):
# Conflicts may have occured during the insert, but we pass the queryset for performance
bugs = error_summary.get_error_summary(job, queryset=log_errors)
for suggestion in bugs:
- if (suggestion['failure_new_in_rev'] or suggestion['counter'] == 0) and job.result not in [
- 'success',
- 'unknown',
- 'usercancel',
- 'retry',
+ if (suggestion["failure_new_in_rev"] or suggestion["counter"] == 0) and job.result not in [
+ "success",
+ "unknown",
+ "usercancel",
+ "retry",
]:
# classify job as `new failure` - for filtering, etc.
job.failure_classification_id = 6
@@ -62,11 +62,11 @@ def store_job_artifacts(artifact_data):
for artifact in artifact_data:
# Determine what type of artifact we have received
if artifact:
- artifact_name = artifact.get('name')
+ artifact_name = artifact.get("name")
if not artifact_name:
logger.error("load_job_artifacts: Unnamed job artifact, skipping")
continue
- job_guid = artifact.get('job_guid')
+ job_guid = artifact.get("job_guid")
if not job_guid:
logger.error(
"load_job_artifacts: Artifact '%s' with no " "job guid set, skipping",
@@ -77,12 +77,12 @@ def store_job_artifacts(artifact_data):
try:
job = Job.objects.get(guid=job_guid)
except Job.DoesNotExist:
- logger.error('load_job_artifacts: No job_id for guid %s', job_guid)
+ logger.error("load_job_artifacts: No job_id for guid %s", job_guid)
continue
- if artifact_name == 'performance_data':
+ if artifact_name == "performance_data":
store_performance_artifact(job, artifact)
- elif artifact_name == 'text_log_summary':
+ elif artifact_name == "text_log_summary":
try:
store_text_log_summary_artifact(job, artifact)
except IntegrityError:
@@ -97,7 +97,7 @@ def store_job_artifacts(artifact_data):
"Unknown artifact type: %s submitted with job %s", artifact_name, job.guid
)
else:
- logger.error('store_job_artifacts: artifact type %s not understood', artifact_name)
+ logger.error("store_job_artifacts: artifact type %s not understood", artifact_name)
def serialize_artifact_json_blobs(artifacts):
@@ -105,8 +105,8 @@ def serialize_artifact_json_blobs(artifacts):
Ensure that JSON artifact blobs passed as dicts are converted to JSON
"""
for artifact in artifacts:
- blob = artifact['blob']
- if artifact['type'].lower() == 'json' and not isinstance(blob, str):
- artifact['blob'] = json.dumps(blob)
+ blob = artifact["blob"]
+ if artifact["type"].lower() == "json" and not isinstance(blob, str):
+ artifact["blob"] = json.dumps(blob)
return artifacts
diff --git a/treeherder/etl/bugzilla.py b/treeherder/etl/bugzilla.py
index 71f230720e1..535cf34b564 100644
--- a/treeherder/etl/bugzilla.py
+++ b/treeherder/etl/bugzilla.py
@@ -23,14 +23,14 @@ def reopen_intermittent_bugs():
return
incomplete_bugs = set(
- Bugscache.objects.filter(resolution='INCOMPLETE').values_list('id', flat=True)
+ Bugscache.objects.filter(resolution="INCOMPLETE").values_list("id", flat=True)
)
# Intermittent bugs get closed after 3 weeks of inactivity if other conditions don't apply:
# https://github.com/mozilla/relman-auto-nag/blob/c7439e247677333c1cd8c435234b3ef3adc49680/auto_nag/scripts/close_intermittents.py#L17
RECENT_DAYS = 7
recently_used_bugs = set(
BugJobMap.objects.filter(created__gt=datetime.now() - timedelta(RECENT_DAYS)).values_list(
- 'bug_id', flat=True
+ "bug_id", flat=True
)
)
bugs_to_reopen = incomplete_bugs & recently_used_bugs
@@ -38,72 +38,72 @@ def reopen_intermittent_bugs():
for bug_id in bugs_to_reopen:
bug_data = (
BugJobMap.objects.filter(bug_id=bug_id)
- .select_related('job__repository')
- .order_by('-created')
- .values('job_id', 'job__repository__name')[0]
+ .select_related("job__repository")
+ .order_by("-created")
+ .values("job_id", "job__repository__name")[0]
)
- job_id = bug_data.get('job_id')
- repository = bug_data.get('job__repository__name')
+ job_id = bug_data.get("job_id")
+ repository = bug_data.get("job__repository__name")
log_url = f"https://treeherder.mozilla.org/logviewer?job_id={job_id}&repo={repository}"
- comment = {'body': "New failure instance: " + log_url}
+ comment = {"body": "New failure instance: " + log_url}
url = settings.BUGFILER_API_URL + "/rest/bug/" + str(bug_id)
- headers = {'x-bugzilla-api-key': settings.BUGFILER_API_KEY, 'Accept': 'application/json'}
+ headers = {"x-bugzilla-api-key": settings.BUGFILER_API_KEY, "Accept": "application/json"}
data = {
- 'status': 'REOPENED',
- 'comment': comment,
- 'comment_tags': "treeherder",
+ "status": "REOPENED",
+ "comment": comment,
+ "comment_tags": "treeherder",
}
try:
- reopen_request(url, method='PUT', headers=headers, json=data)
+ reopen_request(url, method="PUT", headers=headers, json=data)
except requests.exceptions.HTTPError as e:
try:
- message = e.response.json()['message']
+ message = e.response.json()["message"]
except (ValueError, KeyError):
message = e.response.text
logger.error(f"Reopening bug {str(bug_id)} failed: {message}")
def fetch_intermittent_bugs(additional_params, limit, duplicate_chain_length):
- url = settings.BZ_API_URL + '/rest/bug'
+ url = settings.BZ_API_URL + "/rest/bug"
params = {
- 'include_fields': ','.join(
+ "include_fields": ",".join(
[
- 'id',
- 'summary',
- 'status',
- 'resolution',
- 'dupe_of',
- 'duplicates',
- 'cf_crash_signature',
- 'keywords',
- 'last_change_time',
- 'whiteboard',
+ "id",
+ "summary",
+ "status",
+ "resolution",
+ "dupe_of",
+ "duplicates",
+ "cf_crash_signature",
+ "keywords",
+ "last_change_time",
+ "whiteboard",
]
),
- 'limit': limit,
+ "limit": limit,
}
params.update(additional_params)
response = fetch_json(url, params=params)
- return response.get('bugs', [])
+ return response.get("bugs", [])
class BzApiBugProcess:
def run(self):
year_ago = datetime.utcnow() - timedelta(days=365)
last_change_time_max = (
- Bugscache.objects.all().aggregate(Max('modified'))['modified__max'] or None
+ Bugscache.objects.all().aggregate(Max("modified"))["modified__max"] or None
)
if last_change_time_max:
last_change_time_max -= timedelta(minutes=10)
else:
last_change_time_max = year_ago
- max_summary_length = Bugscache._meta.get_field('summary').max_length
- max_whiteboard_length = Bugscache._meta.get_field('whiteboard').max_length
+ max_summary_length = Bugscache._meta.get_field("summary").max_length
+ max_whiteboard_length = Bugscache._meta.get_field("whiteboard").max_length
- last_change_time_string = last_change_time_max.strftime('%Y-%m-%dT%H:%M:%SZ')
+ last_change_time_string = last_change_time_max.strftime("%Y-%m-%dT%H:%M:%SZ")
bugs_to_duplicates = {}
duplicates_to_bugs = {}
@@ -134,7 +134,7 @@ def run(self):
bugs_to_process = list(
bugs_to_process
- set(
- Bugscache.objects.filter(processed_update=True).values_list('id', flat=True)
+ Bugscache.objects.filter(processed_update=True).values_list("id", flat=True)
)
)
if len(bugs_to_process) == 0:
@@ -148,13 +148,13 @@ def run(self):
while True:
if duplicate_chain_length == 0:
additional_params = {
- 'keywords': 'intermittent-failure',
- 'last_change_time': last_change_time_string,
- 'offset': bugs_offset,
+ "keywords": "intermittent-failure",
+ "last_change_time": last_change_time_string,
+ "offset": bugs_offset,
}
else:
additional_params = {
- 'id': ','.join(
+ "id": ",".join(
list(
map(
str,
@@ -185,21 +185,21 @@ def run(self):
# just ignore it when importing/updating the bug to avoid
# a ValueError
try:
- dupe_of = bug.get('dupe_of', None)
+ dupe_of = bug.get("dupe_of", None)
Bugscache.objects.update_or_create(
- id=bug['id'],
+ id=bug["id"],
defaults={
- 'status': bug.get('status', ''),
- 'resolution': bug.get('resolution', ''),
- 'summary': bug.get('summary', '')[:max_summary_length],
- 'dupe_of': dupe_of,
- 'crash_signature': bug.get('cf_crash_signature', ''),
- 'keywords': ",".join(bug['keywords']),
- 'modified': dateutil.parser.parse(
- bug['last_change_time'], ignoretz=True
+ "status": bug.get("status", ""),
+ "resolution": bug.get("resolution", ""),
+ "summary": bug.get("summary", "")[:max_summary_length],
+ "dupe_of": dupe_of,
+ "crash_signature": bug.get("cf_crash_signature", ""),
+ "keywords": ",".join(bug["keywords"]),
+ "modified": dateutil.parser.parse(
+ bug["last_change_time"], ignoretz=True
),
- 'whiteboard': bug.get('whiteboard', '')[:max_whiteboard_length],
- 'processed_update': True,
+ "whiteboard": bug.get("whiteboard", "")[:max_whiteboard_length],
+ "processed_update": True,
},
)
except Exception as e:
@@ -212,16 +212,16 @@ def run(self):
if dupe_of in duplicates_to_bugs
else dupe_of
)
- duplicates_to_bugs[bug['id']] = openish
+ duplicates_to_bugs[bug["id"]] = openish
if openish not in bugs_to_duplicates:
bugs_to_process_next.add(openish)
bugs_to_duplicates[openish] = set()
- bugs_to_duplicates[openish].add(bug['id'])
- if bug['id'] in bugs_to_duplicates:
- for duplicate_id in bugs_to_duplicates[bug['id']]:
+ bugs_to_duplicates[openish].add(bug["id"])
+ if bug["id"] in bugs_to_duplicates:
+ for duplicate_id in bugs_to_duplicates[bug["id"]]:
duplicates_to_bugs[duplicate_id] = openish
- bugs_to_duplicates[openish] |= bugs_to_duplicates[bug['id']]
- duplicates = bug.get('duplicates')
+ bugs_to_duplicates[openish] |= bugs_to_duplicates[bug["id"]]
+ duplicates = bug.get("duplicates")
if len(duplicates) > 0:
duplicates_to_check |= set(duplicates)
@@ -231,10 +231,10 @@ def run(self):
# typo) but they don't cause issues.
# distinct('bug_id') is not supported by Django + MySQL 5.7
bugs_to_process_next |= set(
- BugJobMap.objects.all().values_list('bug_id', flat=True)
+ BugJobMap.objects.all().values_list("bug_id", flat=True)
)
bugs_to_process = bugs_to_process_next - set(
- Bugscache.objects.filter(processed_update=True).values_list('id', flat=True)
+ Bugscache.objects.filter(processed_update=True).values_list("id", flat=True)
)
if duplicate_chain_length == 5 and len(bugs_to_process):
logger.warn(
@@ -249,7 +249,7 @@ def run(self):
bugs_to_process_next = duplicates_to_check
duplicates_to_check = set()
bugs_to_process = bugs_to_process_next - set(
- Bugscache.objects.filter(processed_update=True).values_list('id', flat=True)
+ Bugscache.objects.filter(processed_update=True).values_list("id", flat=True)
)
if len(bugs_to_process) == 0:
break
@@ -274,15 +274,15 @@ def run(self):
# Switch classifications from duplicate bugs to open ones.
duplicates_db = set(
- Bugscache.objects.filter(dupe_of__isnull=False).values_list('id', flat=True)
+ Bugscache.objects.filter(dupe_of__isnull=False).values_list("id", flat=True)
)
- bugs_used = set(BugJobMap.objects.all().values_list('bug_id', flat=True))
+ bugs_used = set(BugJobMap.objects.all().values_list("bug_id", flat=True))
duplicates_used = duplicates_db & bugs_used
for bug_id in duplicates_used:
dupe_of = Bugscache.objects.get(id=bug_id).dupe_of
# Jobs both already classified with new duplicate and its open bug.
jobs_openish = list(
- BugJobMap.objects.filter(bug_id=dupe_of).values_list('job_id', flat=True)
+ BugJobMap.objects.filter(bug_id=dupe_of).values_list("job_id", flat=True)
)
BugJobMap.objects.filter(bug_id=bug_id, job_id__in=jobs_openish).delete()
BugJobMap.objects.filter(bug_id=bug_id).update(bug_id=dupe_of)
diff --git a/treeherder/etl/classification_loader.py b/treeherder/etl/classification_loader.py
index 94ad98661fd..10144cae567 100644
--- a/treeherder/etl/classification_loader.py
+++ b/treeherder/etl/classification_loader.py
@@ -85,13 +85,13 @@ def process(self, pulse_job, root_url):
)
def get_push(self, task_route):
- mozci_env = env('PULSE_MOZCI_ENVIRONMENT', default='production')
- if mozci_env == 'testing':
+ mozci_env = env("PULSE_MOZCI_ENVIRONMENT", default="production")
+ if mozci_env == "testing":
route_regex = CLASSIFICATION_TESTING_ROUTE_REGEX
else:
- if mozci_env != 'production':
+ if mozci_env != "production":
logger.warning(
- f'PULSE_MOZCI_ENVIRONMENT should be testing or production not {mozci_env}, defaulting to production'
+ f"PULSE_MOZCI_ENVIRONMENT should be testing or production not {mozci_env}, defaulting to production"
)
route_regex = CLASSIFICATION_PRODUCTION_ROUTE_REGEX
@@ -116,8 +116,8 @@ def get_push(self, task_route):
try:
newrelic.agent.add_custom_attribute("revision", revision)
- revision_field = 'revision__startswith' if len(revision) < 40 else 'revision'
- filter_kwargs = {'repository': repository, revision_field: revision}
+ revision_field = "revision__startswith" if len(revision) < 40 else "revision"
+ filter_kwargs = {"repository": repository, revision_field: revision}
push = Push.objects.get(**filter_kwargs)
except Push.DoesNotExist:
diff --git a/treeherder/etl/files_bugzilla_map.py b/treeherder/etl/files_bugzilla_map.py
index 9d4599894a0..5227e7a8c28 100644
--- a/treeherder/etl/files_bugzilla_map.py
+++ b/treeherder/etl/files_bugzilla_map.py
@@ -15,9 +15,9 @@
class FilesBugzillaMapProcess:
bugzilla_components = {}
- max_path_length = FilesBugzillaMap._meta.get_field('path').max_length
- max_product_length = BugzillaComponent._meta.get_field('product').max_length
- max_component_length = BugzillaComponent._meta.get_field('component').max_length
+ max_path_length = FilesBugzillaMap._meta.get_field("path").max_length
+ max_product_length = BugzillaComponent._meta.get_field("product").max_length
+ max_component_length = BugzillaComponent._meta.get_field("component").max_length
run_id = None
@@ -76,16 +76,16 @@ def get_or_add_bugzilla_component(self, files_bugzilla_data, path):
def get_projects_to_import(self):
return list(
- Repository.objects.filter(codebase='gecko')
- .filter(active_status='active')
+ Repository.objects.filter(codebase="gecko")
+ .filter(active_status="active")
.filter(life_cycle_order__isnull=False)
- .values_list('name', flat=True)
- .order_by('life_cycle_order')
+ .values_list("name", flat=True)
+ .order_by("life_cycle_order")
)
def fetch_data(self, project):
url = (
- 'https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.%s.latest.source.source-bugzilla-info/artifacts/public/components.json'
+ "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.%s.latest.source.source-bugzilla-info/artifacts/public/components.json"
% project
)
files_bugzilla_data = None
@@ -131,14 +131,14 @@ def run(self):
paths_ingested_all |= paths_ingested_this_project
paths_bugzilla_ingested_all |= paths_bugzilla_ingested_project
- paths_old = set(FilesBugzillaMap.objects.values_list('path', flat=True))
+ paths_old = set(FilesBugzillaMap.objects.values_list("path", flat=True))
paths_removed = paths_old - paths_ingested_all
FilesBugzillaMap.objects.filter(path__in=paths_removed).delete()
paths_bugzilla_old = set(
- FilesBugzillaMap.objects.select_related('bugzilla_component').values_list(
- 'path', 'bugzilla_component__product', 'bugzilla_component__component'
+ FilesBugzillaMap.objects.select_related("bugzilla_component").values_list(
+ "path", "bugzilla_component__product", "bugzilla_component__component"
)
)
paths_bugzilla_unchanged = paths_bugzilla_old.intersection(paths_bugzilla_ingested_all)
@@ -164,12 +164,12 @@ def run(self):
if not bugzilla_component_data:
continue
path_bugzilla_update_needed = FilesBugzillaMap.objects.select_related(
- 'bugzilla_component'
+ "bugzilla_component"
).filter(path=path)[0]
path_bugzilla_update_needed.bugzilla_component_id = bugzilla_component_data.id
paths_bugzilla_update_needed.append(path_bugzilla_update_needed)
FilesBugzillaMap.objects.bulk_update(
- paths_bugzilla_update_needed, ['bugzilla_component_id'], batch_size=1000
+ paths_bugzilla_update_needed, ["bugzilla_component_id"], batch_size=1000
)
paths_bugzilla_addition_needed = []
@@ -177,7 +177,7 @@ def run(self):
bugzilla_component_data = self.get_or_add_bugzilla_component(path_bugzilla_data, path)
if not bugzilla_component_data:
continue
- file_name = (path.rsplit('/', 1))[-1]
+ file_name = (path.rsplit("/", 1))[-1]
paths_bugzilla_addition_needed.append(
FilesBugzillaMap(
path=path,
@@ -188,21 +188,21 @@ def run(self):
FilesBugzillaMap.objects.bulk_create(paths_bugzilla_addition_needed, batch_size=1000)
bugzilla_components_used = set(
- FilesBugzillaMap.objects.values_list('bugzilla_component_id', flat=True).distinct()
+ FilesBugzillaMap.objects.values_list("bugzilla_component_id", flat=True).distinct()
)
bugzilla_components_all = set(
- BugzillaComponent.objects.all().values_list('id', flat=True).distinct()
+ BugzillaComponent.objects.all().values_list("id", flat=True).distinct()
)
bugzilla_components_unused = bugzilla_components_all.difference(bugzilla_components_used)
(BugzillaComponent.objects.filter(id__in=bugzilla_components_unused).delete())
class ProductSecurityGroupProcess:
- max_product_length = BugzillaSecurityGroup._meta.get_field('product').max_length
- max_security_group_length = BugzillaSecurityGroup._meta.get_field('security_group').max_length
+ max_product_length = BugzillaSecurityGroup._meta.get_field("product").max_length
+ max_security_group_length = BugzillaSecurityGroup._meta.get_field("security_group").max_length
def fetch_data(self):
- url = 'https://bugzilla.mozilla.org/latest/configuration'
+ url = "https://bugzilla.mozilla.org/latest/configuration"
product_security_group_data = None
exception = None
try:
diff --git a/treeherder/etl/job_loader.py b/treeherder/etl/job_loader.py
index 050deac511e..69a3aff9768 100644
--- a/treeherder/etl/job_loader.py
+++ b/treeherder/etl/job_loader.py
@@ -25,7 +25,7 @@
# properly, when it's available:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1323110#c7
def task_and_retry_ids(job_guid):
- (decoded_task_id, retry_id) = job_guid.split('/')
+ (decoded_task_id, retry_id) = job_guid.split("/")
# As of slugid v2, slugid.encode() returns a string not bytestring under Python 3.
real_task_id = slugid.encode(uuid.UUID(decoded_task_id))
return (real_task_id, retry_id)
@@ -64,7 +64,7 @@ def process_job(self, pulse_job, root_url):
newrelic.agent.add_custom_attribute("project", project)
repository = Repository.objects.get(name=project)
- if repository.active_status != 'active':
+ if repository.active_status != "active":
(real_task_id, _) = task_and_retry_ids(pulse_job["taskId"])
logger.debug(
"Task %s belongs to a repository that is not active.", real_task_id
@@ -90,13 +90,13 @@ def validate_revision(self, repository, pulse_job):
# check the revision for this job has an existing push
# If it doesn't, then except out so that the celery task will
# retry till it DOES exist.
- revision_field = 'revision__startswith' if len(revision) < 40 else 'revision'
- filter_kwargs = {'repository': repository, revision_field: revision}
+ revision_field = "revision__startswith" if len(revision) < 40 else "revision"
+ filter_kwargs = {"repository": repository, revision_field: revision}
- if revision_field == 'revision__startswith':
+ if revision_field == "revision__startswith":
newrelic.agent.record_custom_event(
- 'short_revision_job_loader',
- {'error': 'Revision <40 chars', 'revision': revision, 'job': pulse_job},
+ "short_revision_job_loader",
+ {"error": "Revision <40 chars", "revision": revision, "job": pulse_job},
)
if not Push.objects.filter(**filter_kwargs).exists():
diff --git a/treeherder/etl/jobs.py b/treeherder/etl/jobs.py
index 3a11af14bf5..c98ca196d41 100644
--- a/treeherder/etl/jobs.py
+++ b/treeherder/etl/jobs.py
@@ -49,22 +49,22 @@ def _remove_existing_jobs(data):
"""
new_data = []
- guids = [datum['job']['job_guid'] for datum in data]
+ guids = [datum["job"]["job_guid"] for datum in data]
state_map = {
guid: state
- for (guid, state) in Job.objects.filter(guid__in=guids).values_list('guid', 'state')
+ for (guid, state) in Job.objects.filter(guid__in=guids).values_list("guid", "state")
}
for datum in data:
- job = datum['job']
- if not state_map.get(job['job_guid']):
+ job = datum["job"]
+ if not state_map.get(job["job_guid"]):
new_data.append(datum)
else:
# should not transition from running to pending,
# or completed to any other state
- current_state = state_map[job['job_guid']]
- if current_state == 'completed' or (
- job['state'] == 'pending' and current_state == 'running'
+ current_state = state_map[job["job_guid"]]
+ if current_state == "completed" or (
+ job["state"] == "pending" and current_state == "running"
):
continue
new_data.append(datum)
@@ -84,18 +84,18 @@ def _load_job(repository, job_datum, push_id):
``pending``/``running`` job and update it with this ``retry`` job.
"""
build_platform, _ = BuildPlatform.objects.get_or_create(
- os_name=job_datum.get('build_platform', {}).get('os_name', 'unknown'),
- platform=job_datum.get('build_platform', {}).get('platform', 'unknown'),
- architecture=job_datum.get('build_platform', {}).get('architecture', 'unknown'),
+ os_name=job_datum.get("build_platform", {}).get("os_name", "unknown"),
+ platform=job_datum.get("build_platform", {}).get("platform", "unknown"),
+ architecture=job_datum.get("build_platform", {}).get("architecture", "unknown"),
)
machine_platform, _ = MachinePlatform.objects.get_or_create(
- os_name=job_datum.get('machine_platform', {}).get('os_name', 'unknown'),
- platform=job_datum.get('machine_platform', {}).get('platform', 'unknown'),
- architecture=job_datum.get('machine_platform', {}).get('architecture', 'unknown'),
+ os_name=job_datum.get("machine_platform", {}).get("os_name", "unknown"),
+ platform=job_datum.get("machine_platform", {}).get("platform", "unknown"),
+ architecture=job_datum.get("machine_platform", {}).get("architecture", "unknown"),
)
- option_names = job_datum.get('option_collection', [])
+ option_names = job_datum.get("option_collection", [])
option_collection_hash = OptionCollection.calculate_hash(option_names)
if not OptionCollection.objects.filter(option_collection_hash=option_collection_hash).exists():
# in the unlikely event that we haven't seen this set of options
@@ -109,43 +109,43 @@ def _load_job(repository, job_datum, push_id):
option_collection_hash=option_collection_hash, option=option
)
- machine, _ = Machine.objects.get_or_create(name=job_datum.get('machine', 'unknown'))
+ machine, _ = Machine.objects.get_or_create(name=job_datum.get("machine", "unknown"))
job_type, _ = JobType.objects.get_or_create(
- symbol=job_datum.get('job_symbol') or 'unknown', name=job_datum.get('name') or 'unknown'
+ symbol=job_datum.get("job_symbol") or "unknown", name=job_datum.get("name") or "unknown"
)
job_group, _ = JobGroup.objects.get_or_create(
- name=job_datum.get('group_name') or 'unknown',
- symbol=job_datum.get('group_symbol') or 'unknown',
+ name=job_datum.get("group_name") or "unknown",
+ symbol=job_datum.get("group_symbol") or "unknown",
)
- product_name = job_datum.get('product_name', 'unknown')
+ product_name = job_datum.get("product_name", "unknown")
if not product_name.strip():
- product_name = 'unknown'
+ product_name = "unknown"
product, _ = Product.objects.get_or_create(name=product_name)
- job_guid = job_datum['job_guid']
+ job_guid = job_datum["job_guid"]
job_guid = job_guid[0:50]
- who = job_datum.get('who') or 'unknown'
+ who = job_datum.get("who") or "unknown"
who = who[0:50]
- reason = job_datum.get('reason') or 'unknown'
+ reason = job_datum.get("reason") or "unknown"
reason = reason[0:125]
- state = job_datum.get('state') or 'unknown'
+ state = job_datum.get("state") or "unknown"
state = state[0:25]
- build_system_type = job_datum.get('build_system_type', 'buildbot')
+ build_system_type = job_datum.get("build_system_type", "buildbot")
- reference_data_name = job_datum.get('reference_data_name', None)
+ reference_data_name = job_datum.get("reference_data_name", None)
- default_failure_classification = FailureClassification.objects.get(name='not classified')
+ default_failure_classification = FailureClassification.objects.get(name="not classified")
sh = sha1()
sh.update(
- ''.join(
+ "".join(
map(
str,
[
@@ -165,7 +165,7 @@ def _load_job(repository, job_datum, push_id):
reference_data_name,
],
)
- ).encode('utf-8')
+ ).encode("utf-8")
)
signature_hash = sh.hexdigest()
@@ -180,28 +180,28 @@ def _load_job(repository, job_datum, push_id):
build_system_type=build_system_type,
repository=repository.name,
defaults={
- 'first_submission_timestamp': time.time(),
- 'build_os_name': build_platform.os_name,
- 'build_platform': build_platform.platform,
- 'build_architecture': build_platform.architecture,
- 'machine_os_name': machine_platform.os_name,
- 'machine_platform': machine_platform.platform,
- 'machine_architecture': machine_platform.architecture,
- 'job_group_name': job_group.name,
- 'job_group_symbol': job_group.symbol,
- 'job_type_name': job_type.name,
- 'job_type_symbol': job_type.symbol,
- 'option_collection_hash': option_collection_hash,
+ "first_submission_timestamp": time.time(),
+ "build_os_name": build_platform.os_name,
+ "build_platform": build_platform.platform,
+ "build_architecture": build_platform.architecture,
+ "machine_os_name": machine_platform.os_name,
+ "machine_platform": machine_platform.platform,
+ "machine_architecture": machine_platform.architecture,
+ "job_group_name": job_group.name,
+ "job_group_symbol": job_group.symbol,
+ "job_type_name": job_type.name,
+ "job_type_symbol": job_type.symbol,
+ "option_collection_hash": option_collection_hash,
},
)
- tier = job_datum.get('tier') or 1
+ tier = job_datum.get("tier") or 1
- result = job_datum.get('result', 'unknown')
+ result = job_datum.get("result", "unknown")
- submit_time = datetime.fromtimestamp(_get_number(job_datum.get('submit_timestamp')))
- start_time = datetime.fromtimestamp(_get_number(job_datum.get('start_timestamp')))
- end_time = datetime.fromtimestamp(_get_number(job_datum.get('end_timestamp')))
+ submit_time = datetime.fromtimestamp(_get_number(job_datum.get("submit_timestamp")))
+ start_time = datetime.fromtimestamp(_get_number(job_datum.get("start_timestamp")))
+ end_time = datetime.fromtimestamp(_get_number(job_datum.get("end_timestamp")))
# first, try to create the job with the given guid (if it doesn't
# exist yet)
@@ -246,12 +246,12 @@ def _load_job(repository, job_datum, push_id):
job = Job.objects.get(guid=job_guid)
# add taskcluster metadata if applicable
- if all([k in job_datum for k in ['taskcluster_task_id', 'taskcluster_retry_id']]):
+ if all([k in job_datum for k in ["taskcluster_task_id", "taskcluster_retry_id"]]):
try:
TaskclusterMetadata.objects.create(
job=job,
- task_id=job_datum['taskcluster_task_id'],
- retry_id=job_datum['taskcluster_retry_id'],
+ task_id=job_datum["taskcluster_task_id"],
+ retry_id=job_datum["taskcluster_retry_id"],
)
except IntegrityError:
pass
@@ -277,25 +277,25 @@ def _load_job(repository, job_datum, push_id):
push_id=push_id,
)
- log_refs = job_datum.get('log_references', [])
+ log_refs = job_datum.get("log_references", [])
job_logs = []
if log_refs:
for log in log_refs:
- name = log.get('name') or 'unknown'
+ name = log.get("name") or "unknown"
name = name[0:50]
- url = log.get('url') or 'unknown'
+ url = log.get("url") or "unknown"
url = url[0:255]
parse_status_map = dict([(k, v) for (v, k) in JobLog.STATUSES])
- mapped_status = parse_status_map.get(log.get('parse_status'))
+ mapped_status = parse_status_map.get(log.get("parse_status"))
if mapped_status:
parse_status = mapped_status
else:
parse_status = JobLog.PENDING
jl, _ = JobLog.objects.get_or_create(
- job=job, name=name, url=url, defaults={'status': parse_status}
+ job=job, name=name, url=url, defaults={"status": parse_status}
)
job_logs.append(jl)
@@ -345,7 +345,7 @@ def _schedule_log_parsing(job, job_logs, result, repository):
# TODO: Replace the use of different queues for failures vs not with the
# RabbitMQ priority feature (since the idea behind separate queues was
# only to ensure failures are dealt with first if there is a backlog).
- if result != 'success':
+ if result != "success":
if job_log.name == "errorsummary_json":
queue = "log_parser_fail_json"
priority = "failures"
@@ -357,7 +357,7 @@ def _schedule_log_parsing(job, job_logs, result, repository):
else:
queue += "_unsheriffed"
else:
- queue = 'log_parser'
+ queue = "log_parser"
priority = "normal"
parse_logs.apply_async(queue=queue, args=[job.id, [job_log.id], priority])
@@ -434,13 +434,13 @@ def store_job_data(repository, originalData):
# being said, if/when we transition to only using the pulse
# job consumer, then the data will always be vetted with a
# JSON schema before we get to this point.
- job = datum['job']
- revision = datum['revision']
- superseded = datum.get('superseded', [])
+ job = datum["job"]
+ revision = datum["revision"]
+ superseded = datum.get("superseded", [])
- revision_field = 'revision__startswith' if len(revision) < 40 else 'revision'
- filter_kwargs = {'repository': repository, revision_field: revision}
- push_id = Push.objects.values_list('id', flat=True).get(**filter_kwargs)
+ revision_field = "revision__startswith" if len(revision) < 40 else "revision"
+ filter_kwargs = {"repository": repository, revision_field: revision}
+ push_id = Push.objects.values_list("id", flat=True).get(**filter_kwargs)
# load job
job_guid = _load_job(repository, job, push_id)
@@ -455,7 +455,7 @@ def store_job_data(repository, originalData):
# rather report it on New Relic and not block storing the remaining jobs.
# TODO: Once buildbot support is removed, remove this as part of
# refactoring this method to process just one job at a time.
- if 'DYNO' not in os.environ:
+ if "DYNO" not in os.environ:
raise
logger.exception(e)
@@ -471,5 +471,5 @@ def store_job_data(repository, originalData):
if superseded_job_guid_placeholders:
for job_guid, superseded_by_guid in superseded_job_guid_placeholders:
Job.objects.filter(guid=superseded_by_guid).update(
- result='superseded', state='completed'
+ result="superseded", state="completed"
)
diff --git a/treeherder/etl/management/commands/ingest.py b/treeherder/etl/management/commands/ingest.py
index c062012f1fb..f86c75b7c14 100644
--- a/treeherder/etl/management/commands/ingest.py
+++ b/treeherder/etl/management/commands/ingest.py
@@ -71,20 +71,20 @@ def ingest_hg_push(options):
project = options["project"]
commit = options["commit"]
- if not options['last_n_pushes'] and not commit:
- raise CommandError('must specify --last_n_pushes or a positional commit argument')
- elif options['last_n_pushes'] and options['ingest_all_tasks']:
- raise CommandError('Can\'t specify last_n_pushes and ingest_all_tasks at same time')
- elif options['last_n_pushes'] and options['commit']:
- raise CommandError('Can\'t specify last_n_pushes and commit/revision at the same time')
+ if not options["last_n_pushes"] and not commit:
+ raise CommandError("must specify --last_n_pushes or a positional commit argument")
+ elif options["last_n_pushes"] and options["ingest_all_tasks"]:
+ raise CommandError("Can't specify last_n_pushes and ingest_all_tasks at same time")
+ elif options["last_n_pushes"] and options["commit"]:
+ raise CommandError("Can't specify last_n_pushes and commit/revision at the same time")
repo = Repository.objects.get(name=project, active_status="active")
fetch_push_id = None
- if options['last_n_pushes']:
+ if options["last_n_pushes"]:
last_push_id = last_push_id_from_server(repo)
- fetch_push_id = max(1, last_push_id - options['last_n_pushes'])
+ fetch_push_id = max(1, last_push_id - options["last_n_pushes"])
logger.info(
- 'last server push id: %d; fetching push %d and newer',
+ "last server push id: %d; fetching push %d and newer",
last_push_id,
fetch_push_id,
)
@@ -227,21 +227,21 @@ def process_job_with_threads(pulse_job, root_url):
try:
JobLoader().process_job(pulse_job, root_url)
except MissingPushException:
- logger.warning('The push was not in the DB. We are going to try that first')
+ logger.warning("The push was not in the DB. We are going to try that first")
ingest_push(pulse_job["origin"]["project"], pulse_job["origin"]["revision"])
JobLoader().process_job(pulse_job, root_url)
def find_task_id(index_path, root_url):
- index_url = liburls.api(root_url, 'index', 'v1', 'task/{}'.format(index_path))
+ index_url = liburls.api(root_url, "index", "v1", "task/{}".format(index_path))
response = requests.get(index_url)
if response.status_code == 404:
raise Exception("Index URL {} not found".format(index_url))
- return response.json()['taskId']
+ return response.json()["taskId"]
def get_decision_task_id(project, revision, root_url):
- index_fmt = 'gecko.v2.{}.revision.{}.taskgraph.decision'
+ index_fmt = "gecko.v2.{}.revision.{}.taskgraph.decision"
index_path = index_fmt.format(project, revision)
return find_task_id(index_path, root_url)
@@ -343,7 +343,7 @@ def github_push_to_pulse(repo_meta, commit):
def ingest_push(project, revision, fetch_push_id=None):
_repo = repo_meta(project)
- if _repo['url'].startswith('https://github.com'):
+ if _repo["url"].startswith("https://github.com"):
pulse = github_push_to_pulse(_repo, revision)
PushLoader().process(pulse["payload"], pulse["exchange"], _repo["tc_root_url"])
else:
@@ -482,4 +482,4 @@ def handle(self, *args, **options):
elif typeOfIngestion == "push":
ingest_hg_push(options)
else:
- raise Exception('Please check the code for valid ingestion types.')
+ raise Exception("Please check the code for valid ingestion types.")
diff --git a/treeherder/etl/management/commands/publish_to_pulse.py b/treeherder/etl/management/commands/publish_to_pulse.py
index c42ffcbc0d2..33e3c54a32c 100644
--- a/treeherder/etl/management/commands/publish_to_pulse.py
+++ b/treeherder/etl/management/commands/publish_to_pulse.py
@@ -20,12 +20,12 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- 'routing_key', help="The routing key for publishing. Ex: 'autoland.staging'"
+ "routing_key", help="The routing key for publishing. Ex: 'autoland.staging'"
)
parser.add_argument(
- 'connection_url', help="The Pulse url. Ex: 'amqp://guest:guest@localhost:5672/'"
+ "connection_url", help="The Pulse url. Ex: 'amqp://guest:guest@localhost:5672/'"
)
- parser.add_argument('payload_file', help="Path to the file that holds the job payload JSON")
+ parser.add_argument("payload_file", help="Path to the file that holds the job payload JSON")
def handle(self, *args, **options):
routing_key = options["routing_key"]
diff --git a/treeherder/etl/management/commands/pulse_listener_pushes.py b/treeherder/etl/management/commands/pulse_listener_pushes.py
index c59b91587b8..fdc383eb422 100644
--- a/treeherder/etl/management/commands/pulse_listener_pushes.py
+++ b/treeherder/etl/management/commands/pulse_listener_pushes.py
@@ -17,7 +17,7 @@ class Command(BaseCommand):
help = "Read pushes from a set of pulse exchanges and queue for ingestion"
def handle(self, *args, **options):
- if env.bool('SKIP_INGESTION', default=False):
+ if env.bool("SKIP_INGESTION", default=False):
self.stdout.write("Skipping ingestion of Pulse Pushes")
return
# Specifies the Pulse services from which Treeherder will ingest push
diff --git a/treeherder/etl/management/commands/pulse_listener_tasks.py b/treeherder/etl/management/commands/pulse_listener_tasks.py
index 605a6aadb30..000321189a2 100644
--- a/treeherder/etl/management/commands/pulse_listener_tasks.py
+++ b/treeherder/etl/management/commands/pulse_listener_tasks.py
@@ -17,7 +17,7 @@ class Command(BaseCommand):
help = "Read jobs from a set of pulse exchanges and queue for ingestion"
def handle(self, *args, **options):
- if env.bool('SKIP_INGESTION', default=False):
+ if env.bool("SKIP_INGESTION", default=False):
self.stdout.write("Skipping ingestion of Pulse Tasks")
return
# Specifies the Pulse services from which Treeherder will consume task
diff --git a/treeherder/etl/management/commands/pulse_listener_tasks_classification.py b/treeherder/etl/management/commands/pulse_listener_tasks_classification.py
index 4f0a2c0a994..e0768515264 100644
--- a/treeherder/etl/management/commands/pulse_listener_tasks_classification.py
+++ b/treeherder/etl/management/commands/pulse_listener_tasks_classification.py
@@ -18,7 +18,7 @@ class Command(BaseCommand):
help = "Read mozci classification jobs from a set of pulse exchanges and queue for ingestion"
def handle(self, *args, **options):
- if env.bool('SKIP_INGESTION', default=False):
+ if env.bool("SKIP_INGESTION", default=False):
self.stdout.write("Skipping ingestion of Pulse Mozci Classification Tasks")
return
# Specifies the Pulse services from which Treeherder will consume task
diff --git a/treeherder/etl/perf.py b/treeherder/etl/perf.py
index 9a26ce3f6c1..182f943a6c8 100644
--- a/treeherder/etl/perf.py
+++ b/treeherder/etl/perf.py
@@ -23,16 +23,16 @@
def _get_application_name(validated_perf_datum: dict):
try:
- return validated_perf_datum['application']['name']
+ return validated_perf_datum["application"]["name"]
except KeyError:
- return ''
+ return ""
def _get_application_version(validated_perf_datum: dict):
try:
- return validated_perf_datum['application']['version']
+ return validated_perf_datum["application"]["version"]
except KeyError:
- return ''
+ return ""
def _get_signature_hash(signature_properties):
@@ -46,13 +46,13 @@ def _get_signature_hash(signature_properties):
signature_prop_values.extend(str_values)
sha = sha1()
- sha.update(''.join(map(str, sorted(signature_prop_values))).encode('utf-8'))
+ sha.update("".join(map(str, sorted(signature_prop_values))).encode("utf-8"))
return sha.hexdigest()
def _order_and_concat(words: List) -> str:
- return ' '.join(sorted(words))
+ return " ".join(sorted(words))
def _create_or_update_signature(repository, signature_hash, framework, application, defaults):
@@ -64,8 +64,8 @@ def _create_or_update_signature(repository, signature_hash, framework, applicati
defaults=defaults,
)
if not created:
- if signature.last_updated > defaults['last_updated']:
- defaults['last_updated'] = signature.last_updated
+ if signature.last_updated > defaults["last_updated"]:
+ defaults["last_updated"] = signature.last_updated
signature, _ = PerformanceSignature.objects.update_or_create(
repository=repository,
signature_hash=signature_hash,
@@ -82,7 +82,7 @@ def _deduce_push_timestamp(perf_datum: dict, job_push_time: datetime) -> Tuple[d
# the old way of ingestion
return job_push_time, is_multi_commit
- multidata_timestamp = perf_datum.get('pushTimestamp', None)
+ multidata_timestamp = perf_datum.get("pushTimestamp", None)
if multidata_timestamp:
multidata_timestamp = datetime.fromtimestamp(multidata_timestamp)
is_multi_commit = True
@@ -111,7 +111,7 @@ def _test_should_alert_based_on(
property)
"""
return (
- (signature.should_alert or (signature.should_alert is None and suite.get('value') is None))
+ (signature.should_alert or (signature.should_alert is None and suite.get("value") is None))
and new_datum_ingested
and job.repository.performance_alerts_enabled
and job.tier_is_sheriffable
@@ -140,8 +140,8 @@ def _load_perf_datum(job: Job, perf_datum: dict):
extra_properties = {}
reference_data = {
- 'option_collection_hash': job.signature.option_collection_hash,
- 'machine_platform': job.signature.machine_platform,
+ "option_collection_hash": job.signature.option_collection_hash,
+ "machine_platform": job.signature.machine_platform,
}
option_collection = OptionCollection.objects.get(
@@ -149,38 +149,38 @@ def _load_perf_datum(job: Job, perf_datum: dict):
)
try:
- framework = PerformanceFramework.objects.get(name=perf_datum['framework']['name'])
+ framework = PerformanceFramework.objects.get(name=perf_datum["framework"]["name"])
except PerformanceFramework.DoesNotExist:
- if perf_datum['framework']['name'] == "job_resource_usage":
+ if perf_datum["framework"]["name"] == "job_resource_usage":
return
logger.warning(
"Performance framework %s does not exist, skipping " "load of performance artifacts",
- perf_datum['framework']['name'],
+ perf_datum["framework"]["name"],
)
return
if not framework.enabled:
logger.info(
- "Performance framework %s is not enabled, skipping", perf_datum['framework']['name']
+ "Performance framework %s is not enabled, skipping", perf_datum["framework"]["name"]
)
return
application = _get_application_name(perf_datum)
application_version = _get_application_version(perf_datum)
- for suite in perf_datum['suites']:
+ for suite in perf_datum["suites"]:
suite_extra_properties = copy.copy(extra_properties)
- ordered_tags = _order_and_concat(suite.get('tags', []))
+ ordered_tags = _order_and_concat(suite.get("tags", []))
deduced_timestamp, is_multi_commit = _deduce_push_timestamp(perf_datum, job.push.time)
- suite_extra_options = ''
+ suite_extra_options = ""
- if suite.get('extraOptions'):
- suite_extra_properties = {'test_options': sorted(suite['extraOptions'])}
- suite_extra_options = _order_and_concat(suite['extraOptions'])
+ if suite.get("extraOptions"):
+ suite_extra_properties = {"test_options": sorted(suite["extraOptions"])}
+ suite_extra_options = _order_and_concat(suite["extraOptions"])
summary_signature_hash = None
# if we have a summary value, create or get its signature by all its subtest
# properties.
- if suite.get('value') is not None:
+ if suite.get("value") is not None:
# summary series
- summary_properties = {'suite': suite['name']}
+ summary_properties = {"suite": suite["name"]}
summary_properties.update(reference_data)
summary_properties.update(suite_extra_properties)
summary_signature_hash = _get_signature_hash(summary_properties)
@@ -190,27 +190,27 @@ def _load_perf_datum(job: Job, perf_datum: dict):
framework,
application,
{
- 'test': '',
- 'suite': suite['name'],
- 'suite_public_name': suite.get('publicName'),
- 'option_collection': option_collection,
- 'platform': job.machine_platform,
- 'tags': ordered_tags,
- 'extra_options': suite_extra_options,
- 'measurement_unit': suite.get('unit'),
- 'lower_is_better': suite.get('lowerIsBetter', True),
- 'has_subtests': True,
+ "test": "",
+ "suite": suite["name"],
+ "suite_public_name": suite.get("publicName"),
+ "option_collection": option_collection,
+ "platform": job.machine_platform,
+ "tags": ordered_tags,
+ "extra_options": suite_extra_options,
+ "measurement_unit": suite.get("unit"),
+ "lower_is_better": suite.get("lowerIsBetter", True),
+ "has_subtests": True,
# these properties below can be either True, False, or null
# (None). Null indicates no preference has been set.
- 'should_alert': suite.get('shouldAlert'),
- 'alert_change_type': PerformanceSignature._get_alert_change_type(
- suite.get('alertChangeType')
+ "should_alert": suite.get("shouldAlert"),
+ "alert_change_type": PerformanceSignature._get_alert_change_type(
+ suite.get("alertChangeType")
),
- 'alert_threshold': suite.get('alertThreshold'),
- 'min_back_window': suite.get('minBackWindow'),
- 'max_back_window': suite.get('maxBackWindow'),
- 'fore_window': suite.get('foreWindow'),
- 'last_updated': job.push.time,
+ "alert_threshold": suite.get("alertThreshold"),
+ "min_back_window": suite.get("minBackWindow"),
+ "max_back_window": suite.get("maxBackWindow"),
+ "fore_window": suite.get("foreWindow"),
+ "last_updated": job.push.time,
},
)
@@ -220,23 +220,23 @@ def _load_perf_datum(job: Job, perf_datum: dict):
push=job.push,
signature=signature,
push_timestamp=deduced_timestamp,
- defaults={'value': suite['value'], 'application_version': application_version},
+ defaults={"value": suite["value"], "application_version": application_version},
)
if suite_datum.should_mark_as_multi_commit(is_multi_commit, datum_created):
# keep a register with all multi commit perf data
MultiCommitDatum.objects.create(perf_datum=suite_datum)
if _suite_should_alert_based_on(signature, job, datum_created):
- generate_alerts.apply_async(args=[signature.id], queue='generate_perf_alerts')
+ generate_alerts.apply_async(args=[signature.id], queue="generate_perf_alerts")
- for subtest in suite['subtests']:
- subtest_properties = {'suite': suite['name'], 'test': subtest['name']}
+ for subtest in suite["subtests"]:
+ subtest_properties = {"suite": suite["name"], "test": subtest["name"]}
subtest_properties.update(reference_data)
subtest_properties.update(suite_extra_properties)
summary_signature = None
if summary_signature_hash is not None:
- subtest_properties.update({'parent_signature': summary_signature_hash})
+ subtest_properties.update({"parent_signature": summary_signature_hash})
summary_signature = PerformanceSignature.objects.get(
repository=job.repository,
framework=framework,
@@ -245,9 +245,9 @@ def _load_perf_datum(job: Job, perf_datum: dict):
)
subtest_signature_hash = _get_signature_hash(subtest_properties)
value = list(
- subtest['value']
- for subtest in suite['subtests']
- if subtest['name'] == subtest_properties['test']
+ subtest["value"]
+ for subtest in suite["subtests"]
+ if subtest["name"] == subtest_properties["test"]
)
signature = _create_or_update_signature(
job.repository,
@@ -255,30 +255,30 @@ def _load_perf_datum(job: Job, perf_datum: dict):
framework,
application,
{
- 'test': subtest_properties['test'],
- 'suite': suite['name'],
- 'test_public_name': subtest.get('publicName'),
- 'suite_public_name': suite.get('publicName'),
- 'option_collection': option_collection,
- 'platform': job.machine_platform,
- 'tags': ordered_tags,
- 'extra_options': suite_extra_options,
- 'measurement_unit': subtest.get('unit'),
- 'lower_is_better': subtest.get('lowerIsBetter', True),
- 'has_subtests': False,
+ "test": subtest_properties["test"],
+ "suite": suite["name"],
+ "test_public_name": subtest.get("publicName"),
+ "suite_public_name": suite.get("publicName"),
+ "option_collection": option_collection,
+ "platform": job.machine_platform,
+ "tags": ordered_tags,
+ "extra_options": suite_extra_options,
+ "measurement_unit": subtest.get("unit"),
+ "lower_is_better": subtest.get("lowerIsBetter", True),
+ "has_subtests": False,
# these properties below can be either True, False, or
# null (None). Null indicates no preference has been
# set.
- 'should_alert': subtest.get('shouldAlert'),
- 'alert_change_type': PerformanceSignature._get_alert_change_type(
- subtest.get('alertChangeType')
+ "should_alert": subtest.get("shouldAlert"),
+ "alert_change_type": PerformanceSignature._get_alert_change_type(
+ subtest.get("alertChangeType")
),
- 'alert_threshold': subtest.get('alertThreshold'),
- 'min_back_window': subtest.get('minBackWindow'),
- 'max_back_window': subtest.get('maxBackWindow'),
- 'fore_window': subtest.get('foreWindow'),
- 'parent_signature': summary_signature,
- 'last_updated': job.push.time,
+ "alert_threshold": subtest.get("alertThreshold"),
+ "min_back_window": subtest.get("minBackWindow"),
+ "max_back_window": subtest.get("maxBackWindow"),
+ "fore_window": subtest.get("foreWindow"),
+ "parent_signature": summary_signature,
+ "last_updated": job.push.time,
},
)
(subtest_datum, datum_created) = PerformanceDatum.objects.get_or_create(
@@ -287,7 +287,7 @@ def _load_perf_datum(job: Job, perf_datum: dict):
push=job.push,
signature=signature,
push_timestamp=deduced_timestamp,
- defaults={'value': value[0], 'application_version': application_version},
+ defaults={"value": value[0], "application_version": application_version},
)
if _test_should_gather_replicates_based_on(
@@ -313,12 +313,12 @@ def _load_perf_datum(job: Job, perf_datum: dict):
MultiCommitDatum.objects.create(perf_datum=subtest_datum)
if _test_should_alert_based_on(signature, job, datum_created, suite):
- generate_alerts.apply_async(args=[signature.id], queue='generate_perf_alerts')
+ generate_alerts.apply_async(args=[signature.id], queue="generate_perf_alerts")
def store_performance_artifact(job, artifact):
- blob = json.loads(artifact['blob'])
- performance_data = blob['performance_data']
+ blob = json.loads(artifact["blob"])
+ performance_data = blob["performance_data"]
if isinstance(performance_data, list):
for perfdatum in performance_data:
diff --git a/treeherder/etl/push.py b/treeherder/etl/push.py
index 3d3ea7b2a72..0cbe7199b14 100644
--- a/treeherder/etl/push.py
+++ b/treeherder/etl/push.py
@@ -9,23 +9,23 @@
def store_push(repository, push_dict):
- push_revision = push_dict.get('revision')
- if not push_dict.get('revision'):
+ push_revision = push_dict.get("revision")
+ if not push_dict.get("revision"):
raise ValueError("Push must have a revision " "associated with it!")
with transaction.atomic():
push, _ = Push.objects.update_or_create(
repository=repository,
revision=push_revision,
defaults={
- 'author': push_dict['author'],
- 'time': datetime.utcfromtimestamp(push_dict['push_timestamp']),
+ "author": push_dict["author"],
+ "time": datetime.utcfromtimestamp(push_dict["push_timestamp"]),
},
)
- for revision in push_dict['revisions']:
+ for revision in push_dict["revisions"]:
Commit.objects.update_or_create(
push=push,
- revision=revision['revision'],
- defaults={'author': revision['author'], 'comments': revision['comment']},
+ revision=revision["revision"],
+ defaults={"author": revision["author"], "comments": revision["comment"]},
)
diff --git a/treeherder/etl/push_loader.py b/treeherder/etl/push_loader.py
index 5b05c3ead86..4d64b419f15 100644
--- a/treeherder/etl/push_loader.py
+++ b/treeherder/etl/push_loader.py
@@ -99,7 +99,7 @@ def process_push(self, push_data):
revisions.append(
{
"comment": commit["commit"]["message"],
- "author": u"{} <{}>".format(
+ "author": "{} <{}>".format(
commit["commit"]["author"]["name"], commit["commit"]["author"]["email"]
),
"revision": commit["sha"],
@@ -266,7 +266,7 @@ def fetch_push(self, url, repository, sha=None):
commits = []
# we only want to ingest the last 200 commits for each push,
# to protect against the 5000+ commit merges on release day uplift.
- for commit in push['changesets'][-200:]:
+ for commit in push["changesets"][-200:]:
commits.append(
{
"revision": commit["node"],
diff --git a/treeherder/etl/pushlog.py b/treeherder/etl/pushlog.py
index 4cdd5b09c26..49e703bca6f 100644
--- a/treeherder/etl/pushlog.py
+++ b/treeherder/etl/pushlog.py
@@ -16,9 +16,9 @@
def last_push_id_from_server(repo):
"""Obtain the last push ID from a ``Repository`` instance."""
- url = '%s/json-pushes/?version=2' % repo.url
+ url = "%s/json-pushes/?version=2" % repo.url
data = fetch_json(url)
- return data['lastpushid']
+ return data["lastpushid"]
class HgPushlogProcess:
@@ -36,24 +36,24 @@ def transform_push(self, push):
commits = []
# we only want to ingest the last 200 commits for each push,
# to protect against the 5000+ commit merges on release day uplift.
- for commit in push['changesets'][-200:]:
+ for commit in push["changesets"][-200:]:
commits.append(
{
- 'revision': commit['node'],
- 'author': commit['author'],
- 'comment': commit['desc'],
+ "revision": commit["node"],
+ "author": commit["author"],
+ "comment": commit["desc"],
}
)
return {
- 'revision': commits[-1]["revision"],
- 'author': push['user'],
- 'push_timestamp': push['date'],
- 'revisions': commits,
+ "revision": commits[-1]["revision"],
+ "author": push["user"],
+ "push_timestamp": push["date"],
+ "revisions": commits,
}
def run(self, source_url, repository_name, changeset=None, last_push_id=None):
- cache_key = '{}:last_push_id'.format(repository_name)
+ cache_key = "{}:last_push_id".format(repository_name)
if not last_push_id:
# get the last object seen from cache. this will
# reduce the number of pushes processed every time
@@ -73,7 +73,7 @@ def run(self, source_url, repository_name, changeset=None, last_push_id=None):
# ``startID`` to get all new pushes from that point forward.
extracted_content = self.extract(startid_url)
- if extracted_content['lastpushid'] < last_push_id:
+ if extracted_content["lastpushid"] < last_push_id:
# Push IDs from Mercurial are incremental. If we cached a value
# from one call to this API, and a subsequent call told us that
# the ``lastpushid`` is LOWER than the one we have cached, then
@@ -83,7 +83,7 @@ def run(self, source_url, repository_name, changeset=None, last_push_id=None):
logger.warning(
"Got a ``lastpushid`` value of %s lower than the cached value of %s "
"due to Mercurial repo reset. Getting latest changes for '%s' instead",
- extracted_content['lastpushid'],
+ extracted_content["lastpushid"],
last_push_id,
repository_name,
)
@@ -104,7 +104,7 @@ def run(self, source_url, repository_name, changeset=None, last_push_id=None):
)
extracted_content = self.extract(source_url)
- pushes = extracted_content['pushes']
+ pushes = extracted_content["pushes"]
# `pushes` could be empty if there are no new ones since we last fetched
if not pushes:
@@ -118,7 +118,7 @@ def run(self, source_url, repository_name, changeset=None, last_push_id=None):
repository = Repository.objects.get(name=repository_name)
for push in pushes.values():
- if not push['changesets']:
+ if not push["changesets"]:
# A push without commits means it was marked as obsolete (see bug 1286426).
# Without them it's not possible to calculate the push revision required for ingestion.
continue
diff --git a/treeherder/etl/runnable_jobs.py b/treeherder/etl/runnable_jobs.py
index 59a413f3db4..4fcad728b1f 100644
--- a/treeherder/etl/runnable_jobs.py
+++ b/treeherder/etl/runnable_jobs.py
@@ -8,8 +8,8 @@
logger = logging.getLogger(__name__)
-RUNNABLE_JOBS_URL = 'https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/{task_id}/runs/{run_number}/artifacts/public/runnable-jobs.json'
-TASKCLUSTER_INDEX_URL = 'https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.%s.latest.taskgraph.decision'
+RUNNABLE_JOBS_URL = "https://firefox-ci-tc.services.mozilla.com/api/queue/v1/task/{task_id}/runs/{run_number}/artifacts/public/runnable-jobs.json"
+TASKCLUSTER_INDEX_URL = "https://firefox-ci-tc.services.mozilla.com/api/index/v1/task/gecko.v2.%s.latest.taskgraph.decision"
def _taskcluster_runnable_jobs(project):
@@ -24,30 +24,30 @@ def _taskcluster_runnable_jobs(project):
try:
validate(tc_graph_url)
except ValidationError:
- logger.warning('Failed to validate %s', tc_graph_url)
+ logger.warning("Failed to validate %s", tc_graph_url)
return []
try:
tc_graph = fetch_json(tc_graph_url)
except requests.exceptions.HTTPError as e:
logger.info(
- 'HTTPError %s when getting taskgraph at %s', e.response.status_code, tc_graph_url
+ "HTTPError %s when getting taskgraph at %s", e.response.status_code, tc_graph_url
)
continue
return [
{
- 'build_platform': node.get('platform', ''),
- 'build_system_type': 'taskcluster',
- 'job_group_name': node.get('groupName', ''),
- 'job_group_symbol': node.get('groupSymbol', ''),
- 'job_type_name': label,
- 'job_type_symbol': node['symbol'],
- 'platform': node.get('platform'),
- 'platform_option': ' '.join(node.get('collection', {}).keys()),
- 'ref_data_name': label,
- 'state': 'runnable',
- 'result': 'runnable',
+ "build_platform": node.get("platform", ""),
+ "build_system_type": "taskcluster",
+ "job_group_name": node.get("groupName", ""),
+ "job_group_symbol": node.get("groupSymbol", ""),
+ "job_type_name": label,
+ "job_type_symbol": node["symbol"],
+ "platform": node.get("platform"),
+ "platform_option": " ".join(node.get("collection", {}).keys()),
+ "ref_data_name": label,
+ "state": "runnable",
+ "result": "runnable",
}
for label, node in tc_graph.items()
]
@@ -61,15 +61,15 @@ def list_runnable_jobs(project):
def query_latest_gecko_decision_task_id(project):
url = TASKCLUSTER_INDEX_URL % project
- logger.info('Fetching %s', url)
+ logger.info("Fetching %s", url)
try:
latest_task = fetch_json(url)
- task_id = latest_task['taskId']
- logger.info('For %s we found the task id: %s', project, task_id)
+ task_id = latest_task["taskId"]
+ logger.info("For %s we found the task id: %s", project, task_id)
except requests.exceptions.HTTPError as e:
# Specifically handle 404 errors, as it means there's no decision task on this push
if e.response.status_code == 404:
- logger.info('For %s we did not find a task id', project)
+ logger.info("For %s we did not find a task id", project)
task_id = None
else:
raise
diff --git a/treeherder/etl/taskcluster_pulse/handler.py b/treeherder/etl/taskcluster_pulse/handler.py
index 248b2860007..57f9944f862 100644
--- a/treeherder/etl/taskcluster_pulse/handler.py
+++ b/treeherder/etl/taskcluster_pulse/handler.py
@@ -102,16 +102,16 @@ def ignore_task(task, taskId, rootUrl, project):
ignore = False
# This logic is useful to reduce the number of tasks we ingest and requirying
# less dynos and less database writes. You can adjust PROJECTS_TO_INGEST on the app to meet your needs
- if projectsToIngest and project not in projectsToIngest.split(','):
+ if projectsToIngest and project not in projectsToIngest.split(","):
logger.debug("Ignoring tasks not matching PROJECTS_TO_INGEST (Task id: %s)", taskId)
return True
mobile_repos = (
- 'fenix',
- 'firefox-android',
- 'reference-browser',
- 'mozilla-vpn-client',
- 'mozilla-vpn-client-release',
+ "fenix",
+ "firefox-android",
+ "reference-browser",
+ "mozilla-vpn-client",
+ "mozilla-vpn-client-release",
)
if project in mobile_repos:
envs = task["payload"].get("env", {})
@@ -144,19 +144,19 @@ def ignore_task(task, taskId, rootUrl, project):
ignore = True
for scope in scopes:
# e.g. assume:repo:github.com/mozilla-mobile/fenix:branch:master
- if scope.find('branch:master') != -1 or scope.find('branch:main') != -1:
+ if scope.find("branch:master") != -1 or scope.find("branch:main") != -1:
ignore = False
break
# This handles nightly tasks
# e.g. index.mobile.v2.fenix.branch.master.latest.taskgraph.decision-nightly
for route in decision_task["routes"]:
- if route.find('master') != -1 or route.find('main') != -1:
+ if route.find("master") != -1 or route.find("main") != -1:
ignore = False
break
if ignore:
- logger.debug('Task to be ignored ({})'.format(taskId))
+ logger.debug("Task to be ignored ({})".format(taskId))
return ignore
@@ -179,7 +179,7 @@ async def handleMessage(message, taskDefinition=None):
logger.debug("%s", str(e))
return jobs
- if ignore_task(task, taskId, message["root_url"], parsedRoute['project']):
+ if ignore_task(task, taskId, message["root_url"], parsedRoute["project"]):
return jobs
logger.debug("Message received for task %s", taskId)
@@ -290,12 +290,12 @@ def buildMessage(pushInfo, task, runId, payload):
def handleTaskPending(pushInfo, task, message):
- payload = message['payload']
+ payload = message["payload"]
return buildMessage(pushInfo, task, payload["runId"], payload)
async def handleTaskRerun(pushInfo, task, message, session):
- payload = message['payload']
+ payload = message["payload"]
job = buildMessage(pushInfo, task, payload["runId"] - 1, payload)
job["state"] = "completed"
job["result"] = "fail"
@@ -310,21 +310,21 @@ async def handleTaskRerun(pushInfo, task, message, session):
def handleTaskRunning(pushInfo, task, message):
- payload = message['payload']
+ payload = message["payload"]
job = buildMessage(pushInfo, task, payload["runId"], payload)
job["timeStarted"] = payload["status"]["runs"][payload["runId"]]["started"]
return job
async def handleTaskCompleted(pushInfo, task, message, session):
- payload = message['payload']
+ payload = message["payload"]
jobRun = payload["status"]["runs"][payload["runId"]]
job = buildMessage(pushInfo, task, payload["runId"], payload)
job["timeStarted"] = jobRun["started"]
job["timeCompleted"] = jobRun["resolved"]
job["logs"] = [
- createLogReference(message['root_url'], payload["status"]["taskId"], jobRun["runId"]),
+ createLogReference(message["root_url"], payload["status"]["taskId"], jobRun["runId"]),
]
job = await addArtifactUploadedLinks(
message["root_url"], payload["status"]["taskId"], payload["runId"], job, session
@@ -333,7 +333,7 @@ async def handleTaskCompleted(pushInfo, task, message, session):
async def handleTaskException(pushInfo, task, message, session):
- payload = message['payload']
+ payload = message["payload"]
jobRun = payload["status"]["runs"][payload["runId"]]
# Do not report runs that were created as an exception. Such cases
# are deadline-exceeded
diff --git a/treeherder/etl/taskcluster_pulse/parse_route.py b/treeherder/etl/taskcluster_pulse/parse_route.py
index aa0950ba8fc..49a95f2977e 100644
--- a/treeherder/etl/taskcluster_pulse/parse_route.py
+++ b/treeherder/etl/taskcluster_pulse/parse_route.py
@@ -15,10 +15,10 @@ def parseRoute(route):
id = None
owner = None
parsedProject = None
- parsedRoute = route.split('.')
+ parsedRoute = route.split(".")
project = parsedRoute[2]
- if len(project.split('/')) == 2:
- [owner, parsedProject] = project.split('/')
+ if len(project.split("/")) == 2:
+ [owner, parsedProject] = project.split("/")
else:
parsedProject = project
@@ -34,8 +34,8 @@ def parseRoute(route):
if owner and parsedProject:
pushInfo["owner"] = owner
- pushInfo["origin"] = 'github.com'
+ pushInfo["origin"] = "github.com"
else:
- pushInfo["origin"] = 'hg.mozilla.org'
+ pushInfo["origin"] = "hg.mozilla.org"
return pushInfo
diff --git a/treeherder/etl/tasks/pulse_tasks.py b/treeherder/etl/tasks/pulse_tasks.py
index c7a6c0fb91b..7b39b3cb9c7 100644
--- a/treeherder/etl/tasks/pulse_tasks.py
+++ b/treeherder/etl/tasks/pulse_tasks.py
@@ -15,9 +15,9 @@
# that parameter have been processed
-@retryable_task(name='store-pulse-tasks', max_retries=10)
+@retryable_task(name="store-pulse-tasks", max_retries=10)
def store_pulse_tasks(
- pulse_job, exchange, routing_key, root_url='https://firefox-ci-tc.services.mozilla.com'
+ pulse_job, exchange, routing_key, root_url="https://firefox-ci-tc.services.mozilla.com"
):
"""
Fetches tasks from Taskcluster
@@ -40,9 +40,9 @@ def store_pulse_tasks(
JobLoader().process_job(run, root_url)
-@retryable_task(name='store-pulse-pushes', max_retries=10)
+@retryable_task(name="store-pulse-pushes", max_retries=10)
def store_pulse_pushes(
- body, exchange, routing_key, root_url='https://firefox-ci-tc.services.mozilla.com'
+ body, exchange, routing_key, root_url="https://firefox-ci-tc.services.mozilla.com"
):
"""
Fetches the pushes pending from pulse exchanges and loads them.
@@ -53,9 +53,9 @@ def store_pulse_pushes(
PushLoader().process(body, exchange, root_url)
-@retryable_task(name='store-pulse-pushes-classification', max_retries=10)
+@retryable_task(name="store-pulse-pushes-classification", max_retries=10)
def store_pulse_tasks_classification(
- pulse_job, exchange, routing_key, root_url='https://community-tc.services.mozilla.com'
+ pulse_job, exchange, routing_key, root_url="https://community-tc.services.mozilla.com"
):
"""
Fetches the Mozci classification associated to a task from Taskcluster
diff --git a/treeherder/etl/tasks/pushlog_tasks.py b/treeherder/etl/tasks/pushlog_tasks.py
index 22de93b0007..20ff3f8cf71 100644
--- a/treeherder/etl/tasks/pushlog_tasks.py
+++ b/treeherder/etl/tasks/pushlog_tasks.py
@@ -5,20 +5,20 @@
from treeherder.model.models import Repository
-@shared_task(name='fetch-push-logs')
+@shared_task(name="fetch-push-logs")
def fetch_push_logs():
"""
Run several fetch_hg_push_log subtasks, one per repository
"""
- for repo in Repository.objects.filter(dvcs_type='hg', active_status="active"):
- fetch_hg_push_log.apply_async(args=(repo.name, repo.url), queue='pushlog')
+ for repo in Repository.objects.filter(dvcs_type="hg", active_status="active"):
+ fetch_hg_push_log.apply_async(args=(repo.name, repo.url), queue="pushlog")
-@shared_task(name='fetch-hg-push-logs', soft_time_limit=10 * 60)
+@shared_task(name="fetch-hg-push-logs", soft_time_limit=10 * 60)
def fetch_hg_push_log(repo_name, repo_url):
"""
Run a HgPushlog etl process
"""
newrelic.agent.add_custom_attribute("repo_name", repo_name)
process = HgPushlogProcess()
- process.run(repo_url + '/json-pushes/?full=1&version=2', repo_name)
+ process.run(repo_url + "/json-pushes/?full=1&version=2", repo_name)
diff --git a/treeherder/etl/text.py b/treeherder/etl/text.py
index c88c2a1ad68..e327600f372 100644
--- a/treeherder/etl/text.py
+++ b/treeherder/etl/text.py
@@ -19,7 +19,7 @@ def convert_unicode_character_to_ascii_repr(match_obj):
hex_value = hex_code_point.zfill(6).upper()
- return ''.format(hex_value)
+ return "".format(hex_value)
def astral_filter(text):
diff --git a/treeherder/intermittents_commenter/commenter.py b/treeherder/intermittents_commenter/commenter.py
index b2bcf3bec0f..c7c597c6a83 100644
--- a/treeherder/intermittents_commenter/commenter.py
+++ b/treeherder/intermittents_commenter/commenter.py
@@ -48,12 +48,12 @@ def generate_bug_changes(self, startday, endday, alt_startday, alt_endday):
bug_info = self.fetch_all_bug_details(bug_ids)
all_bug_changes = []
- template = Template(self.open_file('comment.template', False))
+ template = Template(self.open_file("comment.template", False))
if self.weekly_mode:
top_bugs = [
bug[0]
- for bug in sorted(bug_stats.items(), key=lambda x: x[1]['total'], reverse=True)
+ for bug in sorted(bug_stats.items(), key=lambda x: x[1]["total"], reverse=True)
][:50]
for bug_id, counts in bug_stats.items():
@@ -72,8 +72,8 @@ def generate_bug_changes(self, startday, endday, alt_startday, alt_endday):
# change [stockwell needswork] to [stockwell unknown] when failures drop below 20 failures/week
# if this block is true, it implies a priority of 0 (mutually exclusive to previous block)
- if counts['total'] < 20:
- change_whiteboard = self.check_needswork(bug_info[bug_id]['whiteboard'])
+ if counts["total"] < 20:
+ change_whiteboard = self.check_needswork(bug_info[bug_id]["whiteboard"])
else:
change_priority, change_whiteboard = self.check_needswork_owner(
@@ -83,39 +83,39 @@ def generate_bug_changes(self, startday, endday, alt_startday, alt_endday):
# recommend disabling when more than 150 failures tracked over 21 days and
# takes precedence over any prevous change_whiteboard assignments
if bug_id in alt_date_bug_totals and not self.check_whiteboard_status(
- bug_info[bug_id]['whiteboard']
+ bug_info[bug_id]["whiteboard"]
):
priority = 3
- change_whiteboard = bug_info[bug_id]['whiteboard'].replace(
- '[stockwell unknown]', ''
+ change_whiteboard = bug_info[bug_id]["whiteboard"].replace(
+ "[stockwell unknown]", ""
)
change_whiteboard = re.sub(
- r'\s*\[stockwell needswork[^\]]*\]\s*', '', change_whiteboard
+ r"\s*\[stockwell needswork[^\]]*\]\s*", "", change_whiteboard
).strip()
- change_whiteboard += '[stockwell disable-recommended]'
+ change_whiteboard += "[stockwell disable-recommended]"
comment = template.render(
bug_id=bug_id,
- total=counts['total'],
+ total=counts["total"],
test_run_count=test_run_count,
rank=rank,
priority=priority,
- failure_rate=round(counts['total'] / float(test_run_count), 3),
- repositories=counts['per_repository'],
- platforms=counts['per_platform'],
+ failure_rate=round(counts["total"] / float(test_run_count), 3),
+ repositories=counts["per_repository"],
+ platforms=counts["per_platform"],
counts=counts,
startday=startday,
endday=endday.split()[0],
weekly_mode=self.weekly_mode,
)
- bug_changes = {'bug_id': bug_id, 'changes': {'comment': {'body': comment}}}
+ bug_changes = {"bug_id": bug_id, "changes": {"comment": {"body": comment}}}
if change_whiteboard:
- bug_changes['changes']['whiteboard'] = change_whiteboard
+ bug_changes["changes"]["whiteboard"] = change_whiteboard
if change_priority:
- bug_changes['changes']['priority'] = change_priority
+ bug_changes["changes"]["priority"] = change_priority
all_bug_changes.append(bug_changes)
@@ -126,32 +126,32 @@ def check_needswork_owner(self, bug_info):
change_whiteboard = None
if (
- [bug_info['product'], bug_info['component']] in COMPONENTS
- ) and not self.check_whiteboard_status(bug_info['whiteboard']):
- if bug_info['priority'] not in ['--', 'P1', 'P2', 'P3']:
- change_priority = '--'
+ [bug_info["product"], bug_info["component"]] in COMPONENTS
+ ) and not self.check_whiteboard_status(bug_info["whiteboard"]):
+ if bug_info["priority"] not in ["--", "P1", "P2", "P3"]:
+ change_priority = "--"
- stockwell_labels = re.findall(r'(\[stockwell .+?\])', bug_info['whiteboard'])
+ stockwell_labels = re.findall(r"(\[stockwell .+?\])", bug_info["whiteboard"])
# update whiteboard text unless it already contains WHITEBOARD_NEEDSWORK_OWNER
if WHITEBOARD_NEEDSWORK_OWNER not in stockwell_labels:
- change_whiteboard = bug_info['whiteboard'] + WHITEBOARD_NEEDSWORK_OWNER
+ change_whiteboard = bug_info["whiteboard"] + WHITEBOARD_NEEDSWORK_OWNER
return change_priority, change_whiteboard
def check_needswork(self, whiteboard):
- stockwell_labels = re.findall(r'\[stockwell needswork[^\]]*\]', whiteboard)
+ stockwell_labels = re.findall(r"\[stockwell needswork[^\]]*\]", whiteboard)
if len(stockwell_labels) == 0:
return None
# update all [stockwell needswork] bugs (including all 'needswork' possibilities,
# ie 'needswork:owner') and update whiteboard to [stockwell unknown]
- change_whiteboard = re.sub(r'\s*\[stockwell needswork[^\]]*\]\s*', '', whiteboard).strip()
- return change_whiteboard + '[stockwell unknown]'
+ change_whiteboard = re.sub(r"\s*\[stockwell needswork[^\]]*\]\s*", "", whiteboard).strip()
+ return change_whiteboard + "[stockwell unknown]"
def assign_priority(self, counts):
priority = 0
- if counts['total'] >= 75:
+ if counts["total"] >= 75:
priority = 1
- elif counts['total'] >= 30:
+ elif counts["total"] >= 30:
priority = 2
return priority
@@ -159,23 +159,23 @@ def assign_priority(self, counts):
def print_or_submit_changes(self, all_bug_changes):
for bug in all_bug_changes:
if self.dry_run:
- logger.info('\n' + bug['changes']['comment']['body'] + '\n')
+ logger.info("\n" + bug["changes"]["comment"]["body"] + "\n")
elif settings.COMMENTER_API_KEY is None:
# prevent duplicate comments when on stage/dev
pass
else:
- self.submit_bug_changes(bug['changes'], bug['bug_id'])
+ self.submit_bug_changes(bug["changes"], bug["bug_id"])
# sleep between comment submissions to avoid overwhelming servers
time.sleep(0.5)
logger.warning(
- 'There were {} comments for this {} task.'.format(
- len(all_bug_changes), 'weekly' if self.weekly_mode else 'daily'
+ "There were {} comments for this {} task.".format(
+ len(all_bug_changes), "weekly" if self.weekly_mode else "daily"
)
)
def open_file(self, filename, load):
- with open('treeherder/intermittents_commenter/{}'.format(filename), 'r') as myfile:
+ with open("treeherder/intermittents_commenter/{}".format(filename), "r") as myfile:
if load:
return json.load(myfile)
else:
@@ -193,17 +193,17 @@ def calculate_date_strings(self, mode, numDays):
# daily mode
startday = yesterday
- return startday.isoformat(), endday.strftime('%Y-%m-%d %H:%M:%S.%f')
+ return startday.isoformat(), endday.strftime("%Y-%m-%d %H:%M:%S.%f")
def check_whiteboard_status(self, whiteboard):
"""Extracts stockwell text from a bug's whiteboard status to
determine whether it matches specified stockwell text;
returns a boolean."""
- stockwell_text = re.search(r'\[stockwell (.+?)\]', whiteboard)
+ stockwell_text = re.search(r"\[stockwell (.+?)\]", whiteboard)
if stockwell_text is not None:
- text = stockwell_text.group(1).split(':')[0]
- if text == 'fixed' or text == 'infra' or 'disable' in text:
+ text = stockwell_text.group(1).split(":")[0]
+ if text == "fixed" or text == "infra" or "disable" in text:
return True
return False
@@ -212,9 +212,9 @@ def new_request(self):
# Use a custom HTTP adapter, so we can set a non-zero max_retries value.
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=3))
session.headers = {
- 'User-Agent': 'treeherder/{}'.format(settings.SITE_HOSTNAME),
- 'x-bugzilla-api-key': settings.COMMENTER_API_KEY,
- 'Accept': 'application/json',
+ "User-Agent": "treeherder/{}".format(settings.SITE_HOSTNAME),
+ "x-bugzilla-api-key": settings.COMMENTER_API_KEY,
+ "Accept": "application/json",
}
return session
@@ -222,43 +222,43 @@ def fetch_bug_details(self, bug_ids):
"""Fetches bug metadata from bugzilla and returns an encoded
dict if successful, otherwise returns None."""
- params = {'include_fields': 'product, component, priority, whiteboard, id'}
- params['id'] = bug_ids
+ params = {"include_fields": "product, component, priority, whiteboard, id"}
+ params["id"] = bug_ids
try:
response = self.session.get(
- settings.BZ_API_URL + '/rest/bug',
+ settings.BZ_API_URL + "/rest/bug",
headers=self.session.headers,
params=params,
timeout=30,
)
response.raise_for_status()
except RequestException as e:
- logger.warning('error fetching bugzilla metadata for bugs due to {}'.format(e))
+ logger.warning("error fetching bugzilla metadata for bugs due to {}".format(e))
return None
- if response.headers['Content-Type'] == 'text/html; charset=UTF-8':
+ if response.headers["Content-Type"] == "text/html; charset=UTF-8":
return None
data = response.json()
- if 'bugs' not in data:
+ if "bugs" not in data:
return None
- return data['bugs']
+ return data["bugs"]
def submit_bug_changes(self, changes, bug_id):
- url = '{}/rest/bug/{}'.format(settings.BZ_API_URL, str(bug_id))
+ url = "{}/rest/bug/{}".format(settings.BZ_API_URL, str(bug_id))
try:
response = self.session.put(url, headers=self.session.headers, json=changes, timeout=30)
response.raise_for_status()
except RequestException as e:
- logger.error('error posting comment to bugzilla for bug {} due to {}'.format(bug_id, e))
+ logger.error("error posting comment to bugzilla for bug {} due to {}".format(bug_id, e))
def get_test_runs(self, startday, endday):
"""Returns an aggregate of pushes for specified date range and
repository."""
- test_runs = Push.objects.filter(time__range=(startday, endday)).aggregate(Count('author'))
- return test_runs['author__count']
+ test_runs = Push.objects.filter(time__range=(startday, endday)).aggregate(Count("author"))
+ return test_runs["author__count"]
def get_bug_stats(self, startday, endday):
"""Get all intermittent failures per specified date range and repository,
@@ -292,20 +292,20 @@ def get_bug_stats(self, startday, endday):
threshold = 1 if self.weekly_mode else 15
bug_ids = (
BugJobMap.failures.by_date(startday, endday)
- .values('bug_id')
- .annotate(total=Count('bug_id'))
+ .values("bug_id")
+ .annotate(total=Count("bug_id"))
.filter(total__gte=threshold)
- .values_list('bug_id', flat=True)
+ .values_list("bug_id", flat=True)
)
bugs = (
BugJobMap.failures.by_date(startday, endday)
.filter(bug_id__in=bug_ids)
.values(
- 'job__repository__name',
- 'job__machine_platform__platform',
- 'bug_id',
- 'job__option_collection_hash',
+ "job__repository__name",
+ "job__machine_platform__platform",
+ "bug_id",
+ "job__option_collection_hash",
)
)
@@ -313,17 +313,17 @@ def get_bug_stats(self, startday, endday):
bug_map = dict()
for bug in bugs:
- platform = bug['job__machine_platform__platform']
- repo = bug['job__repository__name']
- bug_id = bug['bug_id']
+ platform = bug["job__machine_platform__platform"]
+ repo = bug["job__repository__name"]
+ bug_id = bug["bug_id"]
build_type = option_collection_map.get(
- bug['job__option_collection_hash'], 'unknown build'
+ bug["job__option_collection_hash"], "unknown build"
)
if bug_id in bug_map:
- bug_map[bug_id]['total'] += 1
- bug_map[bug_id]['per_repository'][repo] += 1
- bug_map[bug_id]['per_platform'][platform] += 1
+ bug_map[bug_id]["total"] += 1
+ bug_map[bug_id]["per_repository"][repo] += 1
+ bug_map[bug_id]["per_platform"][platform] += 1
if bug_map[bug_id].get(platform):
bug_map[bug_id][platform][build_type] += 1
else:
@@ -331,10 +331,10 @@ def get_bug_stats(self, startday, endday):
else:
bug_map[bug_id] = {}
- bug_map[bug_id]['total'] = 1
- bug_map[bug_id]['per_platform'] = Counter([platform])
+ bug_map[bug_id]["total"] = 1
+ bug_map[bug_id]["per_platform"] = Counter([platform])
bug_map[bug_id][platform] = Counter([build_type])
- bug_map[bug_id]['per_repository'] = Counter([repo])
+ bug_map[bug_id]["per_repository"] = Counter([repo])
return bug_map, bug_ids
@@ -344,12 +344,12 @@ def get_alt_date_bug_totals(self, startday, endday, bug_ids):
bugs = (
BugJobMap.failures.by_date(startday, endday)
.filter(bug_id__in=bug_ids)
- .values('bug_id')
- .annotate(total=Count('id'))
- .values('bug_id', 'total')
+ .values("bug_id")
+ .annotate(total=Count("id"))
+ .values("bug_id", "total")
)
- return {bug['bug_id']: bug['total'] for bug in bugs if bug['total'] >= 150}
+ return {bug["bug_id"]: bug["total"] for bug in bugs if bug["total"] >= 150}
def fetch_all_bug_details(self, bug_ids):
"""batch requests for bugzilla data in groups of 1200 (which is the safe
@@ -366,4 +366,4 @@ def fetch_all_bug_details(self, bug_ids):
min = max
max = max + 600
- return {bug['id']: bug for bug in bugs_list} if len(bugs_list) else None
+ return {bug["id"]: bug for bug in bugs_list} if len(bugs_list) else None
diff --git a/treeherder/intermittents_commenter/constants.py b/treeherder/intermittents_commenter/constants.py
index 0110bd5c7da..e9a46546756 100644
--- a/treeherder/intermittents_commenter/constants.py
+++ b/treeherder/intermittents_commenter/constants.py
@@ -1,71 +1,71 @@
-WHITEBOARD_NEEDSWORK_OWNER = '[stockwell needswork:owner]'
+WHITEBOARD_NEEDSWORK_OWNER = "[stockwell needswork:owner]"
COMPONENTS = [
- ['Core', 'Canvas: 2D'],
- ['Core', 'Canvas: WebGL'],
- ['Core', 'DOM'],
- ['Core', 'DOM: Core & HTML'],
- ['Core', 'DOM: Device Interfaces'],
- ['Core', 'DOM: Events'],
- ['Core', 'DOM: IndexedDB'],
- ['Core', 'DOM: Push Notifications'],
- ['Core', 'DOM: Quota Manager'],
- ['Core', 'DOM: Service Workers'],
- ['Core', 'DOM: Workers'],
- ['Core', 'DOM:Content Processes'],
- ['Core', 'Document Navigation'],
- ['Core', 'Event Handling'],
- ['Core', 'GFX: Color Management'],
- ['Core', 'Graphics'],
- ['Core', 'Graphics: Layers'],
- ['Core', 'Graphics: Text'],
- ['Core', 'Graphics: WebRender'],
- ['Core', 'HTML: Form Submission'],
- ['Core', 'HTML: Parser'],
- ['Core', 'IPC'],
- ['Core', 'Image Blocking'],
- ['Core', 'ImageLib'],
- ['Core', 'Javascript Engine'],
- ['Core', 'Javascript Engine: JIT'],
- ['Core', 'Javascript: GC'],
- ['Core', 'Javascript: Internationalization API'],
- ['Core', 'Javascript: Standard Library'],
- ['Core', 'Keyboard: Navigation'],
- ['Core', 'Networking'],
- ['Core', 'Networking: Cache'],
- ['Core', 'Networking: Cookies'],
- ['Core', 'Networking: DNS'],
- ['Core', 'Networking: Domain Lists'],
- ['Core', 'Networking: FTP'],
- ['Core', 'Networking: File'],
- ['Core', 'Networking: HTTP'],
- ['Core', 'Networking: JAR'],
- ['Core', 'Networking: WebSockets'],
- ['Core', 'Plug-ins'],
- ['Core', 'Security: Sandboxing Process'],
- ['Core', 'Serializers'],
- ['Core', 'Widget'],
- ['Core', 'Widget: Win32'],
- ['Core', 'Widget: WinRT'],
- ['Core', 'XBL'],
- ['Core', 'XML'],
- ['Core', 'XPConnect'],
- ['Core', 'XSLT'],
- ['Core', 'js-ctypes'],
- ['Firefox for Android', 'Add-ons Manager'],
- ['Firefox for Android', 'Testing'],
- ['Firefox', 'Disability Access'],
- ['Firefox', 'Toolbars and Customization'],
- ['Toolkit', 'Add-ons Manager'],
- ['Toolkit', 'Reader Mode'],
- ['Toolkit', 'Toolbars and Toolbar Customization'],
- ['Toolkit', 'WebExtensions: Android'],
- ['Toolkit', 'WebExtensions: Android'],
- ['Toolkit', 'WebExtensions: Compatibility'],
- ['Toolkit', 'WebExtensions: Developer Tools'],
- ['Toolkit', 'WebExtensions: Experiments'],
- ['Toolkit', 'WebExtensions: Frontend'],
- ['Toolkit', 'WebExtensions: General'],
- ['Toolkit', 'WebExtensions: Request Handling'],
- ['Toolkit', 'WebExtensions: Untriaged'],
+ ["Core", "Canvas: 2D"],
+ ["Core", "Canvas: WebGL"],
+ ["Core", "DOM"],
+ ["Core", "DOM: Core & HTML"],
+ ["Core", "DOM: Device Interfaces"],
+ ["Core", "DOM: Events"],
+ ["Core", "DOM: IndexedDB"],
+ ["Core", "DOM: Push Notifications"],
+ ["Core", "DOM: Quota Manager"],
+ ["Core", "DOM: Service Workers"],
+ ["Core", "DOM: Workers"],
+ ["Core", "DOM:Content Processes"],
+ ["Core", "Document Navigation"],
+ ["Core", "Event Handling"],
+ ["Core", "GFX: Color Management"],
+ ["Core", "Graphics"],
+ ["Core", "Graphics: Layers"],
+ ["Core", "Graphics: Text"],
+ ["Core", "Graphics: WebRender"],
+ ["Core", "HTML: Form Submission"],
+ ["Core", "HTML: Parser"],
+ ["Core", "IPC"],
+ ["Core", "Image Blocking"],
+ ["Core", "ImageLib"],
+ ["Core", "Javascript Engine"],
+ ["Core", "Javascript Engine: JIT"],
+ ["Core", "Javascript: GC"],
+ ["Core", "Javascript: Internationalization API"],
+ ["Core", "Javascript: Standard Library"],
+ ["Core", "Keyboard: Navigation"],
+ ["Core", "Networking"],
+ ["Core", "Networking: Cache"],
+ ["Core", "Networking: Cookies"],
+ ["Core", "Networking: DNS"],
+ ["Core", "Networking: Domain Lists"],
+ ["Core", "Networking: FTP"],
+ ["Core", "Networking: File"],
+ ["Core", "Networking: HTTP"],
+ ["Core", "Networking: JAR"],
+ ["Core", "Networking: WebSockets"],
+ ["Core", "Plug-ins"],
+ ["Core", "Security: Sandboxing Process"],
+ ["Core", "Serializers"],
+ ["Core", "Widget"],
+ ["Core", "Widget: Win32"],
+ ["Core", "Widget: WinRT"],
+ ["Core", "XBL"],
+ ["Core", "XML"],
+ ["Core", "XPConnect"],
+ ["Core", "XSLT"],
+ ["Core", "js-ctypes"],
+ ["Firefox for Android", "Add-ons Manager"],
+ ["Firefox for Android", "Testing"],
+ ["Firefox", "Disability Access"],
+ ["Firefox", "Toolbars and Customization"],
+ ["Toolkit", "Add-ons Manager"],
+ ["Toolkit", "Reader Mode"],
+ ["Toolkit", "Toolbars and Toolbar Customization"],
+ ["Toolkit", "WebExtensions: Android"],
+ ["Toolkit", "WebExtensions: Android"],
+ ["Toolkit", "WebExtensions: Compatibility"],
+ ["Toolkit", "WebExtensions: Developer Tools"],
+ ["Toolkit", "WebExtensions: Experiments"],
+ ["Toolkit", "WebExtensions: Frontend"],
+ ["Toolkit", "WebExtensions: General"],
+ ["Toolkit", "WebExtensions: Request Handling"],
+ ["Toolkit", "WebExtensions: Untriaged"],
]
diff --git a/treeherder/intermittents_commenter/management/commands/run_intermittents_commenter.py b/treeherder/intermittents_commenter/management/commands/run_intermittents_commenter.py
index 005de8933d1..5349c8965e4 100644
--- a/treeherder/intermittents_commenter/management/commands/run_intermittents_commenter.py
+++ b/treeherder/intermittents_commenter/management/commands/run_intermittents_commenter.py
@@ -12,25 +12,25 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '-m',
- '--mode',
- dest='mode',
- nargs='?',
- choices=['weekly', 'auto'],
+ "-m",
+ "--mode",
+ dest="mode",
+ nargs="?",
+ choices=["weekly", "auto"],
default=False,
- help='generate comment summaries based on auto or weekly mode; defaults to daily',
+ help="generate comment summaries based on auto or weekly mode; defaults to daily",
)
parser.add_argument(
- '--dry-run',
- action='store_true',
- dest='dry_run',
- help='output comments to stdout rather than submitting to Bugzilla',
+ "--dry-run",
+ action="store_true",
+ dest="dry_run",
+ help="output comments to stdout rather than submitting to Bugzilla",
)
def handle(self, *args, **options):
- mode = options['mode']
- is_monday = calendar.day_name[date.today().weekday()] == 'Monday'
- weekly_mode = (mode == 'weekly') or (mode == 'auto' and is_monday)
+ mode = options["mode"]
+ is_monday = calendar.day_name[date.today().weekday()] == "Monday"
+ weekly_mode = (mode == "weekly") or (mode == "auto" and is_monday)
- process = Commenter(weekly_mode=weekly_mode, dry_run=options['dry_run'])
+ process = Commenter(weekly_mode=weekly_mode, dry_run=options["dry_run"])
process.run()
diff --git a/treeherder/log_parser/artifactbuildercollection.py b/treeherder/log_parser/artifactbuildercollection.py
index a90eb620931..c178ffce932 100644
--- a/treeherder/log_parser/artifactbuildercollection.py
+++ b/treeherder/log_parser/artifactbuildercollection.py
@@ -83,17 +83,17 @@ def parse(self):
building the ``artifact`` as we go.
"""
with make_request(self.url, stream=True) as response:
- download_size_in_bytes = int(response.headers.get('Content-Length', -1))
+ download_size_in_bytes = int(response.headers.get("Content-Length", -1))
# Temporary annotation of log size to help set thresholds in bug 1295997.
- newrelic.agent.add_custom_attribute('unstructured_log_size', download_size_in_bytes)
+ newrelic.agent.add_custom_attribute("unstructured_log_size", download_size_in_bytes)
newrelic.agent.add_custom_attribute(
- 'unstructured_log_encoding', response.headers.get('Content-Encoding', 'None')
+ "unstructured_log_encoding", response.headers.get("Content-Encoding", "None")
)
if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES:
raise LogSizeException(
- 'Download size of %i bytes exceeds limit' % download_size_in_bytes
+ "Download size of %i bytes exceeds limit" % download_size_in_bytes
)
# Lines must be explicitly decoded since `iter_lines()`` returns bytes by default
@@ -105,7 +105,7 @@ def parse(self):
try:
# Using `replace` to prevent malformed unicode (which might possibly exist
# in test message output) from breaking parsing of the rest of the log.
- builder.parse_line(line.decode('utf-8', 'replace'))
+ builder.parse_line(line.decode("utf-8", "replace"))
except EmptyPerformanceData:
logger.warning("We have parsed an empty PERFHERDER_DATA for %s", self.url)
@@ -116,7 +116,7 @@ def parse(self):
builder.finish_parse()
name = builder.name
artifact = builder.get_artifact()
- if name == 'performance_data' and not artifact[name]:
+ if name == "performance_data" and not artifact[name]:
continue
self.artifacts[name] = artifact
diff --git a/treeherder/log_parser/artifactbuilders.py b/treeherder/log_parser/artifactbuilders.py
index e758ebd5603..82415663fa1 100644
--- a/treeherder/log_parser/artifactbuilders.py
+++ b/treeherder/log_parser/artifactbuilders.py
@@ -42,7 +42,7 @@ def parse_line(self, line):
# Perf data is stored in a json structure contained in a single line,
# if the MAX_LINE_LENGTH is applied the data structure could be
# truncated, preventing it from being ingested.
- if 'PERFHERDER_DATA' not in line:
+ if "PERFHERDER_DATA" not in line:
line = line[: self.MAX_LINE_LENGTH]
self.parser.parse_line(line, self.lineno)
diff --git a/treeherder/log_parser/failureline.py b/treeherder/log_parser/failureline.py
index a99ba0e488d..e72d0a660c9 100644
--- a/treeherder/log_parser/failureline.py
+++ b/treeherder/log_parser/failureline.py
@@ -46,7 +46,7 @@ def write_failure_lines(job_log, log_iter):
if len(log_list) > failure_lines_cutoff:
# Alter the N+1th log line to indicate the list was truncated.
- log_list[-1].update(action='truncated')
+ log_list[-1].update(action="truncated")
transformer = None
with transaction.atomic():
@@ -132,7 +132,7 @@ def create_group_result(job_log, line):
)
else:
group, _ = Group.objects.get_or_create(name=group_path[:255])
- duration = line.get('duration', 0)
+ duration = line.get("duration", 0)
if type(duration) not in [float, int]:
duration = 0
else:
@@ -144,7 +144,7 @@ def create_group_result(job_log, line):
GroupStatus.objects.create(
job_log=job_log,
group=group,
- status=GroupStatus.get_status(line['status']),
+ status=GroupStatus.get_status(line["status"]),
duration=duration,
)
@@ -155,15 +155,15 @@ def create(job_log, log_list):
group_results = []
failure_lines = []
for line in log_list:
- action = line['action']
+ action = line["action"]
if action not in FailureLine.ACTION_LIST:
newrelic.agent.record_custom_event("unsupported_failure_line_action", line)
# Unfortunately, these errors flood the logs, but we want to report any
# others that we didn't expect. We know about the following action we choose
# to ignore.
- if action != 'test_groups':
- logger.exception(ValueError(f'Unsupported FailureLine ACTION: {action}'))
- elif action == 'group_result':
+ if action != "test_groups":
+ logger.exception(ValueError(f"Unsupported FailureLine ACTION: {action}"))
+ elif action == "group_result":
group_results.append(line)
else:
failure_lines.append(line)
@@ -190,15 +190,15 @@ def get_group_results(push):
groups = Group.objects.filter(
job_logs__job__push=push, group_result__status__in=[GroupStatus.OK, GroupStatus.ERROR]
).values(
- 'group_result__status',
- 'name',
- 'job_logs__job__taskcluster_metadata__task_id',
+ "group_result__status",
+ "name",
+ "job_logs__job__taskcluster_metadata__task_id",
)
by_task_id = defaultdict(dict)
for group in groups:
- by_task_id[group['job_logs__job__taskcluster_metadata__task_id']][group['name']] = bool(
- GroupStatus.STATUS_LOOKUP[group['group_result__status']] == "OK"
+ by_task_id[group["job_logs__job__taskcluster_metadata__task_id"]][group["name"]] = bool(
+ GroupStatus.STATUS_LOOKUP[group["group_result__status"]] == "OK"
)
return by_task_id
diff --git a/treeherder/log_parser/management/commands/test_parse_log.py b/treeherder/log_parser/management/commands/test_parse_log.py
index 73a61681c75..534f84fba90 100644
--- a/treeherder/log_parser/management/commands/test_parse_log.py
+++ b/treeherder/log_parser/management/commands/test_parse_log.py
@@ -15,34 +15,34 @@ class Command(BaseCommand):
"""
def add_arguments(self, parser):
- parser.add_argument('log_url')
+ parser.add_argument("log_url")
parser.add_argument(
- '--profile',
- action='store',
- dest='profile',
+ "--profile",
+ action="store",
+ dest="profile",
type=int,
default=None,
- help='Profile running command a number of times',
+ help="Profile running command a number of times",
)
def handle(self, *args, **options):
- if options['profile']:
- num_runs = options['profile']
+ if options["profile"]:
+ num_runs = options["profile"]
else:
num_runs = 1
times = []
for _ in range(num_runs):
start = time.time()
- artifact_bc = ArtifactBuilderCollection(options['log_url'])
+ artifact_bc = ArtifactBuilderCollection(options["log_url"])
artifact_bc.parse()
times.append(time.time() - start)
- if not options['profile']:
+ if not options["profile"]:
for name, artifact in artifact_bc.artifacts.items():
print("%s, %s" % (name, json.dumps(artifact, indent=2)))
- if options['profile']:
+ if options["profile"]:
print("Timings: %s" % times)
print("Average: %s" % (sum(times) / len(times)))
print("Total: %s" % sum(times))
diff --git a/treeherder/log_parser/parsers.py b/treeherder/log_parser/parsers.py
index f5730258685..2c9e5c18509 100644
--- a/treeherder/log_parser/parsers.py
+++ b/treeherder/log_parser/parsers.py
@@ -152,7 +152,7 @@ def parse_line(self, line, lineno):
# log prefix if we know we're in a TaskCluster log.
# First line of TaskCluster logs almost certainly has this.
- if line.startswith('[taskcluster '):
+ if line.startswith("[taskcluster "):
self.is_taskcluster = True
# For performance reasons, only do this if we have identified as
@@ -190,7 +190,7 @@ class PerformanceParser(ParserBase):
# Using $ in the regex as an end of line bounds causes the
# regex to fail on windows logs. This is likely due to the
# ^M character representation of the windows end of line.
- RE_PERFORMANCE = re.compile(r'.*?PERFHERDER_DATA:\s+({.*})')
+ RE_PERFORMANCE = re.compile(r".*?PERFHERDER_DATA:\s+({.*})")
def __init__(self):
super().__init__("performance_data")
diff --git a/treeherder/log_parser/tasks.py b/treeherder/log_parser/tasks.py
index 4e2dde85081..9efad3784cf 100644
--- a/treeherder/log_parser/tasks.py
+++ b/treeherder/log_parser/tasks.py
@@ -18,7 +18,7 @@
logger = logging.getLogger(__name__)
-@retryable_task(name='log-parser', max_retries=10)
+@retryable_task(name="log-parser", max_retries=10)
def parse_logs(job_id, job_log_ids, priority):
newrelic.agent.add_custom_attribute("job_id", str(job_id))
@@ -45,7 +45,7 @@ def parse_logs(job_id, job_log_ids, priority):
# Only parse logs which haven't yet been processed or else failed on the last attempt.
if job_log.status not in (JobLog.PENDING, JobLog.FAILED):
logger.info(
- f'Skipping parsing for job %s since log already processed. Log Status: {job_log.status}',
+ f"Skipping parsing for job %s since log already processed. Log Status: {job_log.status}",
job_log.id,
)
continue
@@ -79,7 +79,7 @@ def parse_logs(job_id, job_log_ids, priority):
def store_failure_lines(job_log):
"""Store the failure lines from a log corresponding to the structured
errorsummary file."""
- logger.info('Running store_failure_lines for job %s', job_log.job.id)
+ logger.info("Running store_failure_lines for job %s", job_log.job.id)
failureline.store_failure_lines(job_log)
@@ -91,7 +91,7 @@ def post_log_artifacts(job_log):
artifact_list = extract_text_log_artifacts(job_log)
except LogSizeException as e:
job_log.update_status(JobLog.SKIPPED_SIZE)
- logger.warning('Skipping parsing log for %s: %s', job_log.id, e)
+ logger.warning("Skipping parsing log for %s: %s", job_log.id, e)
return
except Exception as e:
job_log.update_status(JobLog.FAILED)
@@ -130,7 +130,7 @@ def extract_text_log_artifacts(job_log):
{
"job_guid": job_log.job.guid,
"name": name,
- "type": 'json',
+ "type": "json",
"blob": json.dumps(artifact),
}
)
diff --git a/treeherder/log_parser/utils.py b/treeherder/log_parser/utils.py
index d449496fcbb..b81fe765edd 100644
--- a/treeherder/log_parser/utils.py
+++ b/treeherder/log_parser/utils.py
@@ -8,7 +8,7 @@ def _lookup_extra_options_max(schema):
return schema["definitions"]["suite_schema"]["properties"]["extraOptions"]["items"]["maxLength"]
-with open(os.path.join('schemas', 'performance-artifact.json')) as f:
+with open(os.path.join("schemas", "performance-artifact.json")) as f:
PERFHERDER_SCHEMA = json.load(f)
MAX_LENGTH = _lookup_extra_options_max(PERFHERDER_SCHEMA)
SECOND_MAX_LENGTH = 45
diff --git a/treeherder/middleware.py b/treeherder/middleware.py
index 4546232983b..9dd983b7a35 100644
--- a/treeherder/middleware.py
+++ b/treeherder/middleware.py
@@ -33,12 +33,12 @@ def add_headers_function(headers, path, url):
"""
from django.urls import reverse
- report_uri = "report-uri {}".format(reverse('csp-report'))
+ report_uri = "report-uri {}".format(reverse("csp-report"))
if report_uri not in CSP_DIRECTIVES:
CSP_DIRECTIVES.append(report_uri)
- CSP_HEADER = '; '.join(CSP_DIRECTIVES)
- headers['Content-Security-Policy'] = CSP_HEADER
+ CSP_HEADER = "; ".join(CSP_DIRECTIVES)
+ headers["Content-Security-Policy"] = CSP_HEADER
class CustomWhiteNoise(WhiteNoiseMiddleware):
@@ -55,7 +55,7 @@ class CustomWhiteNoise(WhiteNoiseMiddleware):
# /assets/index.1d85033a.js
# /assets/2.379789df.css.map
# /assets/fontawesome-webfont.af7ae505.woff2
- IMMUTABLE_FILE_RE = re.compile(r'^/assets/.*\.[a-f0-9]{8}\..*')
+ IMMUTABLE_FILE_RE = re.compile(r"^/assets/.*\.[a-f0-9]{8}\..*")
def immutable_file_test(self, path, url):
"""
@@ -76,5 +76,5 @@ class NewRelicMiddleware(MiddlewareMixin):
def process_request(self, request):
# The New Relic Python agent only submits the User Agent to APM (for exceptions and
# slow transactions), so for use in Insights we have to add it as a customer parameter.
- if 'HTTP_USER_AGENT' in request.META:
- newrelic.agent.add_custom_attribute('user_agent', request.META['HTTP_USER_AGENT'])
+ if "HTTP_USER_AGENT" in request.META:
+ newrelic.agent.add_custom_attribute("user_agent", request.META["HTTP_USER_AGENT"])
diff --git a/treeherder/model/data_cycling/cyclers.py b/treeherder/model/data_cycling/cyclers.py
index d91e9749f95..a4ef1599177 100644
--- a/treeherder/model/data_cycling/cyclers.py
+++ b/treeherder/model/data_cycling/cyclers.py
@@ -32,8 +32,8 @@
logger = logging.getLogger(__name__)
-TREEHERDER = 'treeherder'
-PERFHERDER = 'perfherder'
+TREEHERDER = "treeherder"
+PERFHERDER = "perfherder"
class DataCycler(ABC):
@@ -76,29 +76,29 @@ def cycle(self):
self._remove_leftovers()
def _remove_leftovers(self):
- logger.warning('Pruning ancillary data: job types, groups and machines')
+ logger.warning("Pruning ancillary data: job types, groups and machines")
def prune(reference_model, id_name, model):
- logger.warning('Pruning {}s'.format(model.__name__))
+ logger.warning("Pruning {}s".format(model.__name__))
used_ids = (
reference_model.objects.only(id_name).values_list(id_name, flat=True).distinct()
)
- unused_ids = model.objects.exclude(id__in=used_ids).values_list('id', flat=True)
+ unused_ids = model.objects.exclude(id__in=used_ids).values_list("id", flat=True)
- logger.warning('Removing {} records from {}'.format(len(unused_ids), model.__name__))
+ logger.warning("Removing {} records from {}".format(len(unused_ids), model.__name__))
while len(unused_ids):
delete_ids = unused_ids[: self.chunk_size]
- logger.warning('deleting {} of {}'.format(len(delete_ids), len(unused_ids)))
+ logger.warning("deleting {} of {}".format(len(delete_ids), len(unused_ids)))
model.objects.filter(id__in=delete_ids).delete()
unused_ids = unused_ids[self.chunk_size :]
- prune(Job, 'job_type_id', JobType)
- prune(Job, 'job_group_id', JobGroup)
- prune(Job, 'machine_id', Machine)
- prune(GroupStatus, 'group_id', Group)
- prune(Job, 'build_platform_id', BuildPlatform)
- prune(Job, 'machine_platform_id', MachinePlatform)
+ prune(Job, "job_type_id", JobType)
+ prune(Job, "job_group_id", JobGroup)
+ prune(Job, "machine_id", Machine)
+ prune(GroupStatus, "group_id", Group)
+ prune(Job, "build_platform_id", BuildPlatform)
+ prune(Job, "machine_platform_id", MachinePlatform)
class PerfherderCycler(DataCycler):
@@ -139,7 +139,7 @@ def cycle(self):
try:
for strategy in self.strategies:
try:
- logger.warning(f'Cycling data using {strategy.name}...')
+ logger.warning(f"Cycling data using {strategy.name}...")
self._delete_in_chunks(strategy)
except NoDataCyclingAtAll as ex:
logger.warning(str(ex))
@@ -179,10 +179,10 @@ def __remove_too_old_alerts(self):
def __remove_empty_alert_summaries(self):
logger.warning("Removing alert summaries which no longer have any alerts...")
(
- PerformanceAlertSummary.objects.prefetch_related('alerts', 'related_alerts')
+ PerformanceAlertSummary.objects.prefetch_related("alerts", "related_alerts")
.annotate(
- total_alerts=Count('alerts'),
- total_related_alerts=Count('related_alerts'),
+ total_alerts=Count("alerts"),
+ total_related_alerts=Count("related_alerts"),
)
.filter(
total_alerts=0,
@@ -200,7 +200,7 @@ def __remove_empty_backfill_reports(self):
logger.warning("Removing backfill reports which no longer have any records...")
four_months_ago = datetime.now() - timedelta(days=120)
- BackfillReport.objects.annotate(total_records=Count('records')).filter(
+ BackfillReport.objects.annotate(total_records=Count("records")).filter(
created__lt=four_months_ago, total_records=0
).delete()
@@ -224,19 +224,19 @@ def _delete_in_chunks(self, strategy: RemovalStrategy):
else:
any_successful_attempt = True
logger.debug(
- 'Successfully deleted {} performance datum rows'.format(deleted_rows)
+ "Successfully deleted {} performance datum rows".format(deleted_rows)
)
def __handle_chunk_removal_exception(
self, exception, cursor: CursorWrapper, any_successful_attempt: bool
):
- msg = 'Failed to delete performance data chunk'
- if hasattr(cursor, '_last_executed'):
+ msg = "Failed to delete performance data chunk"
+ if hasattr(cursor, "_last_executed"):
msg = f'{msg}, while running "{cursor._last_executed}" query'
if any_successful_attempt:
# an intermittent error may have occurred
- logger.warning(f'{msg}: (Exception: {exception})')
+ logger.warning(f"{msg}: (Exception: {exception})")
else:
logger.warning(msg)
raise NoDataCyclingAtAll() from exception
diff --git a/treeherder/model/data_cycling/max_runtime.py b/treeherder/model/data_cycling/max_runtime.py
index c79c95a75aa..d4dad8172d9 100644
--- a/treeherder/model/data_cycling/max_runtime.py
+++ b/treeherder/model/data_cycling/max_runtime.py
@@ -16,7 +16,7 @@ def quit_on_timeout(self):
elapsed_runtime = datetime.now() - self.started_at
if self.max_runtime < elapsed_runtime:
- raise MaxRuntimeExceeded('Max runtime for performance data cycling exceeded')
+ raise MaxRuntimeExceeded("Max runtime for performance data cycling exceeded")
def start_timer(self):
self.started_at = datetime.now()
diff --git a/treeherder/model/data_cycling/removal_strategies.py b/treeherder/model/data_cycling/removal_strategies.py
index 272dbec6d6d..2f9fc6469cb 100644
--- a/treeherder/model/data_cycling/removal_strategies.py
+++ b/treeherder/model/data_cycling/removal_strategies.py
@@ -82,7 +82,7 @@ def max_timestamp(self):
def remove(self, using: CursorWrapper):
chunk_size = self._find_ideal_chunk_size()
- if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
+ if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
# Django's queryset API doesn't support MySQL's
# DELETE statements with LIMIT constructs,
# even though this database is capable of doing that.
@@ -90,30 +90,30 @@ def remove(self, using: CursorWrapper):
# If ever this support is added in Django, replace
# raw SQL bellow with equivalent queryset commands.
using.execute(
- '''
+ """
DELETE FROM `performance_datum`
WHERE push_timestamp <= %s
LIMIT %s
- ''',
+ """,
[self._max_timestamp, chunk_size],
)
else:
deleted, _ = PerformanceDatum.objects.filter(
id__in=PerformanceDatum.objects.filter(
push_timestamp__lte=self._max_timestamp
- ).values_list('id')[:chunk_size]
+ ).values_list("id")[:chunk_size]
).delete()
using.rowcount = deleted
@property
def name(self) -> str:
- return 'main removal strategy'
+ return "main removal strategy"
def _find_ideal_chunk_size(self) -> int:
- max_id = self._manager.filter(push_timestamp__gt=self._max_timestamp).order_by('-id')[0].id
+ max_id = self._manager.filter(push_timestamp__gt=self._max_timestamp).order_by("-id")[0].id
older_ids = self._manager.filter(
push_timestamp__lte=self._max_timestamp, id__lte=max_id
- ).order_by('id')[: self._chunk_size]
+ ).order_by("id")[: self._chunk_size]
return len(older_ids) or self._chunk_size
@@ -147,7 +147,7 @@ def max_timestamp(self):
@property
def try_repo(self):
if self.__try_repo_id is None:
- self.__try_repo_id = Repository.objects.get(name='try').id
+ self.__try_repo_id = Repository.objects.get(name="try").id
return self.__try_repo_id
@property
@@ -155,7 +155,7 @@ def target_signatures(self):
if self.__target_signatures is None:
self.__target_signatures = self.try_signatures[: self.SIGNATURE_BULK_SIZE]
if len(self.__target_signatures) == 0:
- msg = 'No try signatures found.'
+ msg = "No try signatures found."
logger.warning(msg) # no try data is not normal
raise LookupError(msg)
return self.__target_signatures
@@ -165,8 +165,8 @@ def try_signatures(self):
if self.__try_signatures is None:
self.__try_signatures = list(
PerformanceSignature.objects.filter(repository=self.try_repo)
- .order_by('-id')
- .values_list('id', flat=True)
+ .order_by("-id")
+ .values_list("id", flat=True)
)
return self.__try_signatures
@@ -185,15 +185,15 @@ def remove(self, using: CursorWrapper):
self.__lookup_new_signature() # to remove data from
except LookupError as ex:
- logger.debug(f'Could not target any (new) try signature to delete data from. {ex}')
+ logger.debug(f"Could not target any (new) try signature to delete data from. {ex}")
break
@property
def name(self) -> str:
- return 'try data removal strategy'
+ return "try data removal strategy"
def __attempt_remove(self, using):
- if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
+ if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
# Django's queryset API doesn't support MySQL's
# DELETE statements with LIMIT constructs,
# even though this database is capable of doing that.
@@ -201,13 +201,13 @@ def __attempt_remove(self, using):
# If ever this support is added in Django, replace
# raw SQL bellow with equivalent queryset commands.
total_signatures = len(self.target_signatures)
- from_target_signatures = ' OR '.join(['signature_id = %s'] * total_signatures)
+ from_target_signatures = " OR ".join(["signature_id = %s"] * total_signatures)
- delete_try_data = f'''
+ delete_try_data = f"""
DELETE FROM `performance_datum`
WHERE repository_id = %s AND push_timestamp <= %s AND ({from_target_signatures})
LIMIT %s
- '''
+ """
using.execute(
delete_try_data,
@@ -219,7 +219,7 @@ def __attempt_remove(self, using):
repository_id=self.try_repo,
push_timestamp__lte=self._max_timestamp,
signature_id__in=self.target_signatures,
- ).values_list('id')[: self._chunk_size]
+ ).values_list("id")[: self._chunk_size]
).delete()
using.rowcount = deleted
@@ -228,7 +228,7 @@ def __lookup_new_signature(self):
del self.__try_signatures[: self.SIGNATURE_BULK_SIZE]
if len(self.__target_signatures) == 0:
- raise LookupError('Exhausted all signatures originating from try repository.')
+ raise LookupError("Exhausted all signatures originating from try repository.")
class IrrelevantDataRemoval(RemovalStrategy):
@@ -239,11 +239,11 @@ class IrrelevantDataRemoval(RemovalStrategy):
"""
RELEVANT_REPO_NAMES = [
- 'autoland',
- 'mozilla-central',
- 'mozilla-beta',
- 'fenix',
- 'reference-browser',
+ "autoland",
+ "mozilla-central",
+ "mozilla-beta",
+ "fenix",
+ "reference-browser",
]
@property
@@ -268,7 +268,7 @@ def irrelevant_repositories(self):
if self.__irrelevant_repos is None:
self.__irrelevant_repos = list(
Repository.objects.exclude(name__in=self.RELEVANT_REPO_NAMES).values_list(
- 'id', flat=True
+ "id", flat=True
)
)
return self.__irrelevant_repos
@@ -281,12 +281,12 @@ def irrelevant_repo(self):
@property
def name(self) -> str:
- return 'irrelevant data removal strategy'
+ return "irrelevant data removal strategy"
def remove(self, using: CursorWrapper):
chunk_size = self._find_ideal_chunk_size()
- if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
+ if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
# Django's queryset API doesn't support MySQL's
# DELETE statements with LIMIT constructs,
# even though this database is capable of doing that.
@@ -294,11 +294,11 @@ def remove(self, using: CursorWrapper):
# If ever this support is added in Django, replace
# raw SQL bellow with equivalent queryset commands.
using.execute(
- '''
+ """
DELETE FROM `performance_datum`
WHERE repository_id = %s AND push_timestamp <= %s
LIMIT %s
- ''',
+ """,
[
self.irrelevant_repo,
self._max_timestamp,
@@ -309,7 +309,7 @@ def remove(self, using: CursorWrapper):
deleted, _ = PerformanceDatum.objects.filter(
id__in=PerformanceDatum.objects.filter(
repository_id=self.irrelevant_repo, push_timestamp__lte=self._max_timestamp
- ).values_list('id')[:chunk_size]
+ ).values_list("id")[:chunk_size]
).delete()
using.rowcount = deleted
@@ -317,7 +317,7 @@ def _find_ideal_chunk_size(self) -> int:
max_id_of_non_expired_row = (
self._manager.filter(push_timestamp__gt=self._max_timestamp)
.filter(repository_id__in=self.irrelevant_repositories)
- .order_by('-id')[0]
+ .order_by("-id")[0]
.id
)
older_perf_data_rows = (
@@ -325,7 +325,7 @@ def _find_ideal_chunk_size(self) -> int:
push_timestamp__lte=self._max_timestamp, id__lte=max_id_of_non_expired_row
)
.filter(repository_id__in=self.irrelevant_repositories)
- .order_by('id')[: self._chunk_size]
+ .order_by("id")[: self._chunk_size]
)
return len(older_perf_data_rows) or self._chunk_size
@@ -358,7 +358,7 @@ def target_signature(self) -> PerformanceSignature:
if self._target_signature is None:
self._target_signature = self.removable_signatures.pop()
except IndexError:
- msg = 'No stalled signature found.'
+ msg = "No stalled signature found."
logger.warning(msg) # no stalled data is not normal
raise LookupError(msg)
return self._target_signature
@@ -368,7 +368,7 @@ def removable_signatures(self) -> List[PerformanceSignature]:
if self._removable_signatures is None:
self._removable_signatures = list(
PerformanceSignature.objects.filter(last_updated__lte=self._max_timestamp).order_by(
- 'last_updated'
+ "last_updated"
)
)
self._removable_signatures = [
@@ -390,7 +390,7 @@ def remove(self, using: CursorWrapper):
self.__lookup_new_signature() # to remove data from
except LookupError as ex:
logger.debug(
- f'Could not target any (new) stalled signature to delete data from. {ex}'
+ f"Could not target any (new) stalled signature to delete data from. {ex}"
)
break
@@ -400,10 +400,10 @@ def max_timestamp(self) -> datetime:
@property
def name(self) -> str:
- return 'stalled data removal strategy'
+ return "stalled data removal strategy"
def __attempt_remove(self, using: CursorWrapper):
- if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
+ if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
# Django's queryset API doesn't support MySQL's
# DELETE statements with LIMIT constructs,
# even though this database is capable of doing that.
@@ -411,11 +411,11 @@ def __attempt_remove(self, using: CursorWrapper):
# If ever this support is added in Django, replace
# raw SQL bellow with equivalent queryset commands.
using.execute(
- '''
+ """
DELETE FROM `performance_datum`
WHERE repository_id = %s AND signature_id = %s AND push_timestamp <= %s
LIMIT %s
- ''',
+ """,
[
self.target_signature.repository_id,
self.target_signature.id,
@@ -429,7 +429,7 @@ def __attempt_remove(self, using: CursorWrapper):
repository_id=self.target_signature.repository_id,
signature_id=self.target_signature.id,
push_timestamp__lte=self._max_timestamp,
- ).values_list('id')[: self._chunk_size]
+ ).values_list("id")[: self._chunk_size]
).delete()
using.rowcount = deleted
@@ -437,4 +437,4 @@ def __lookup_new_signature(self):
try:
self._target_signature = self._removable_signatures.pop()
except IndexError:
- raise LookupError('Exhausted all stalled signatures.')
+ raise LookupError("Exhausted all stalled signatures.")
diff --git a/treeherder/model/data_cycling/signature_remover.py b/treeherder/model/data_cycling/signature_remover.py
index 4896fa94e04..605f764d8be 100644
--- a/treeherder/model/data_cycling/signature_remover.py
+++ b/treeherder/model/data_cycling/signature_remover.py
@@ -64,7 +64,7 @@ def remove_in_chunks(self, potentially_empty_signatures: QuerySet):
@staticmethod
def _remove_empty_try_signatures(signatures: QuerySet):
- try_signatures = signatures.filter(repository__name='try')
+ try_signatures = signatures.filter(repository__name="try")
for perf_signature in try_signatures:
if not perf_signature.has_performance_data():
perf_signature.delete()
@@ -98,7 +98,7 @@ def __delete_and_notify(self, signatures: List[PerformanceSignature]) -> bool:
self._send_notification()
except TaskclusterRestFailure as ex:
logger.warning(
- f'Failed to atomically delete perf signatures & notify about this. (Reason: {ex})'
+ f"Failed to atomically delete perf signatures & notify about this. (Reason: {ex})"
)
return False
diff --git a/treeherder/model/data_cycling/utils.py b/treeherder/model/data_cycling/utils.py
index bdbd2dedd3d..7e05c05e166 100644
--- a/treeherder/model/data_cycling/utils.py
+++ b/treeherder/model/data_cycling/utils.py
@@ -1,8 +1,8 @@
def has_valid_explicit_days(func):
def wrapper(*args, **kwargs):
- days = kwargs.get('days')
+ days = kwargs.get("days")
if days is not None:
- raise ValueError('Cannot override performance data retention parameters.')
+ raise ValueError("Cannot override performance data retention parameters.")
func(*args, **kwargs)
return wrapper
diff --git a/treeherder/model/error_summary.py b/treeherder/model/error_summary.py
index 340dd92746d..00b42671515 100644
--- a/treeherder/model/error_summary.py
+++ b/treeherder/model/error_summary.py
@@ -16,13 +16,13 @@
LINE_CACHE_TIMEOUT_DAYS = 21
LINE_CACHE_TIMEOUT = 86400 * LINE_CACHE_TIMEOUT_DAYS
-LEAK_RE = re.compile(r'\d+ bytes leaked \((.+)\)$|leak at (.+)$')
-CRASH_RE = re.compile(r'.+ application crashed \[@ (.+)\] \|.+')
-MOZHARNESS_RE = re.compile(r'^\d+:\d+:\d+[ ]+(?:DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL) - [ ]?')
-MARIONETTE_RE = re.compile(r'.+marionette([_harness/]?).*/test_.+.py ([A-Za-z]+).+')
+LEAK_RE = re.compile(r"\d+ bytes leaked \((.+)\)$|leak at (.+)$")
+CRASH_RE = re.compile(r".+ application crashed \[@ (.+)\] \|.+")
+MOZHARNESS_RE = re.compile(r"^\d+:\d+:\d+[ ]+(?:DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL) - [ ]?")
+MARIONETTE_RE = re.compile(r".+marionette([_harness/]?).*/test_.+.py ([A-Za-z]+).+")
PROCESS_ID_RE = re.compile(r"(?:PID \d+|GECKO\(\d+\)) \| +")
-REFTEST_RE = re.compile(r'\s+[=!]=\s+.*')
-PREFIX_PATTERN = r'^(TEST-UNEXPECTED-\S+|PROCESS-CRASH)\s+\|\s+'
+REFTEST_RE = re.compile(r"\s+[=!]=\s+.*")
+PREFIX_PATTERN = r"^(TEST-UNEXPECTED-\S+|PROCESS-CRASH)\s+\|\s+"
def get_error_summary(job, queryset=None):
@@ -32,15 +32,15 @@ def get_error_summary(job, queryset=None):
Caches the results if there are any.
"""
- cache_key = 'error-summary-{}'.format(job.id)
+ cache_key = "error-summary-{}".format(job.id)
cached_error_summary = cache.get(cache_key)
if cached_error_summary is not None:
return cached_error_summary
# add support for error line caching
- line_cache_key = 'mc_error_lines'
+ line_cache_key = "mc_error_lines"
if job.repository == "comm-central":
- line_cache_key = 'cc_error_lines'
+ line_cache_key = "cc_error_lines"
line_cache = cache.get(line_cache_key)
if line_cache is None:
line_cache = {str(job.submit_time.date()): {}}
@@ -48,7 +48,7 @@ def get_error_summary(job, queryset=None):
dates = list(line_cache.keys())
dates.sort()
for d in dates:
- dTime = datetime.datetime.strptime(d, '%Y-%m-%d')
+ dTime = datetime.datetime.strptime(d, "%Y-%m-%d")
if dTime <= (job.submit_time - datetime.timedelta(days=LINE_CACHE_TIMEOUT_DAYS)):
del line_cache[d]
else:
@@ -79,14 +79,14 @@ def get_error_summary(job, queryset=None):
try:
cache.set(cache_key, error_summary, BUG_SUGGESTION_CACHE_TIMEOUT)
except Exception as e:
- newrelic.agent.record_custom_event('error caching error_summary for job', job.id)
- logger.error('error caching error_summary for job %s: %s', job.id, e, exc_info=True)
+ newrelic.agent.record_custom_event("error caching error_summary for job", job.id)
+ logger.error("error caching error_summary for job %s: %s", job.id, e, exc_info=True)
try:
cache.set(line_cache_key, line_cache, LINE_CACHE_TIMEOUT)
except Exception as e:
- newrelic.agent.record_custom_event('error caching error_lines for job', job.id)
- logger.error('error caching error_lines for job %s: %s', job.id, e, exc_info=True)
+ newrelic.agent.record_custom_event("error caching error_lines for job", job.id)
+ logger.error("error caching error_lines for job %s: %s", job.id, e, exc_info=True)
return error_summary
@@ -125,7 +125,7 @@ def bug_suggestions_line(
for day in line_cache.keys():
counter += line_cache[day].get(cache_clean_line, 0)
- count_branches = ['autoland', 'mozilla-central', 'comm-central']
+ count_branches = ["autoland", "mozilla-central", "comm-central"]
if project and str(project.name) in count_branches:
if cache_clean_line not in line_cache[today].keys():
line_cache[today][cache_clean_line] = 0
@@ -152,22 +152,22 @@ def bug_suggestions_line(
continue
if term not in term_cache:
term_cache[term] = Bugscache.search(term)
- bugs['open_recent'].extend(
+ bugs["open_recent"].extend(
[
bug_to_check
- for bug_to_check in term_cache[term]['open_recent']
- if bug_to_check['id'] not in [bug['id'] for bug in bugs['open_recent']]
+ for bug_to_check in term_cache[term]["open_recent"]
+ if bug_to_check["id"] not in [bug["id"] for bug in bugs["open_recent"]]
]
)
- bugs['all_others'].extend(
+ bugs["all_others"].extend(
[
bug_to_check
- for bug_to_check in term_cache[term]['all_others']
- if bug_to_check['id'] not in [bug['id'] for bug in bugs['all_others']]
+ for bug_to_check in term_cache[term]["all_others"]
+ if bug_to_check["id"] not in [bug["id"] for bug in bugs["all_others"]]
]
)
- if not bugs or not (bugs['open_recent'] or bugs['all_others']):
+ if not bugs or not (bugs["open_recent"] or bugs["all_others"]):
# no suggestions, try to use
# the crash signature as search term
crash_signature = get_crash_signature(clean_line)
@@ -197,18 +197,18 @@ def bug_suggestions_line(
def get_cleaned_line(line):
"""Strip possible mozharness bits from the given line."""
- line_to_clean = MOZHARNESS_RE.sub('', line).strip()
- return PROCESS_ID_RE.sub('', line_to_clean)
+ line_to_clean = MOZHARNESS_RE.sub("", line).strip()
+ return PROCESS_ID_RE.sub("", line_to_clean)
def cache_clean_error_line(line):
- cache_clean_line = re.sub(r' [0-9]+\.[0-9]+ ', ' X ', line)
- cache_clean_line = re.sub(r' leaked [0-9]+ window(s)', ' leaked X window(s)', cache_clean_line)
- cache_clean_line = re.sub(r' [0-9]+ bytes leaked', ' X bytes leaked', cache_clean_line)
- cache_clean_line = re.sub(r' value=[0-9]+', ' value=*', cache_clean_line)
- cache_clean_line = re.sub(r'ot [0-9]+, expected [0-9]+', 'ot X, expected Y', cache_clean_line)
+ cache_clean_line = re.sub(r" [0-9]+\.[0-9]+ ", " X ", line)
+ cache_clean_line = re.sub(r" leaked [0-9]+ window(s)", " leaked X window(s)", cache_clean_line)
+ cache_clean_line = re.sub(r" [0-9]+ bytes leaked", " X bytes leaked", cache_clean_line)
+ cache_clean_line = re.sub(r" value=[0-9]+", " value=*", cache_clean_line)
+ cache_clean_line = re.sub(r"ot [0-9]+, expected [0-9]+", "ot X, expected Y", cache_clean_line)
cache_clean_line = re.sub(
- r' http://localhost:[0-9]+/', ' http://localhost:X/', cache_clean_line
+ r" http://localhost:[0-9]+/", " http://localhost:X/", cache_clean_line
)
return cache_clean_line
@@ -231,7 +231,7 @@ def get_error_search_term_and_path(error_line):
path_end = None
if len(tokens) >= 3:
- is_crash = 'PROCESS-CRASH' in tokens[0]
+ is_crash = "PROCESS-CRASH" in tokens[0]
# it's in the "FAILURE-TYPE | testNameOrFilePath | message" type format.
test_name_or_path = tokens[1]
message = tokens[2]
@@ -276,14 +276,14 @@ def get_error_search_term_and_path(error_line):
# false positives, but means we're more susceptible to false negatives due to
# run-to-run variances in the error messages (eg paths, process IDs).
if search_term:
- search_term = re.sub(PREFIX_PATTERN, '', search_term)
+ search_term = re.sub(PREFIX_PATTERN, "", search_term)
search_term = search_term[:100]
# for wpt tests we have testname.html?params, we need to add a search term
# for just testname.html.
# we will now return an array
- if search_term and '?' in search_term:
- search_name = search_term.split('?')[0]
+ if search_term and "?" in search_term:
+ search_name = search_term.split("?")[0]
search_term = [search_term, search_name]
else:
search_term = [search_term]
@@ -317,24 +317,24 @@ def is_helpful_search_term(search_term):
search_term = search_term.strip()
blacklist = [
- 'automation.py',
- 'remoteautomation.py',
- 'Shutdown',
- 'undefined',
- 'Main app process exited normally',
- 'Traceback (most recent call last):',
- 'Return code: 0',
- 'Return code: 1',
- 'Return code: 2',
- 'Return code: 10',
- 'mozalloc_abort(char const*)',
- 'mozalloc_abort',
- 'CrashingThread(void *)',
- 'gtest',
- 'Last test finished',
- 'leakcheck',
- 'LeakSanitizer',
- '# TBPL FAILURE #',
+ "automation.py",
+ "remoteautomation.py",
+ "Shutdown",
+ "undefined",
+ "Main app process exited normally",
+ "Traceback (most recent call last):",
+ "Return code: 0",
+ "Return code: 1",
+ "Return code: 2",
+ "Return code: 10",
+ "mozalloc_abort(char const*)",
+ "mozalloc_abort",
+ "CrashingThread(void *)",
+ "gtest",
+ "Last test finished",
+ "leakcheck",
+ "LeakSanitizer",
+ "# TBPL FAILURE #",
]
return len(search_term) > 4 and search_term not in blacklist
diff --git a/treeherder/model/management/commands/backfill_text_log_error_jobs.py b/treeherder/model/management/commands/backfill_text_log_error_jobs.py
index 1ecfe2f65d4..0fe42587758 100644
--- a/treeherder/model/management/commands/backfill_text_log_error_jobs.py
+++ b/treeherder/model/management/commands/backfill_text_log_error_jobs.py
@@ -14,20 +14,20 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '--chunk-size',
- action='store',
- dest='chunk_size',
+ "--chunk-size",
+ action="store",
+ dest="chunk_size",
default=1000,
type=int,
- help=('Define the size of the chunks for querying the TextLogError table'),
+ help=("Define the size of the chunks for querying the TextLogError table"),
)
def handle(self, *args, **options):
- queryset = TextLogError.objects.select_related('step').filter(job__isnull=True)
- chunk_size = options['chunk_size']
+ queryset = TextLogError.objects.select_related("step").filter(job__isnull=True)
+ chunk_size = options["chunk_size"]
for chunked_queryset in chunked_qs(
- queryset, chunk_size=chunk_size, fields=['id', 'step', 'job']
+ queryset, chunk_size=chunk_size, fields=["id", "step", "job"]
):
if not chunked_queryset:
return
@@ -35,12 +35,12 @@ def handle(self, *args, **options):
for row in chunked_queryset:
row.job_id = row.step.job_id
- TextLogError.objects.bulk_update(chunked_queryset, ['job'])
+ TextLogError.objects.bulk_update(chunked_queryset, ["job"])
logger.warning(
- 'successfully added job_id in TextLogError table to rows {} to {}'.format(
+ "successfully added job_id in TextLogError table to rows {} to {}".format(
chunked_queryset[0].id, chunked_queryset[-1].id
)
)
- logger.warning('successfully finished backfilling job_ids in the TextLogError table')
+ logger.warning("successfully finished backfilling job_ids in the TextLogError table")
diff --git a/treeherder/model/management/commands/cache_failure_history.py b/treeherder/model/management/commands/cache_failure_history.py
index 402012ae675..7925da70990 100644
--- a/treeherder/model/management/commands/cache_failure_history.py
+++ b/treeherder/model/management/commands/cache_failure_history.py
@@ -18,41 +18,41 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '--debug',
- action='store_true',
- dest='debug',
+ "--debug",
+ action="store_true",
+ dest="debug",
default=False,
- help='Write debug messages to stdout',
+ help="Write debug messages to stdout",
)
parser.add_argument(
- '--days',
- action='store',
- dest='days',
+ "--days",
+ action="store",
+ dest="days",
default=5,
type=int,
- help='Number of history sets to store (one for each day prior to today)',
+ help="Number of history sets to store (one for each day prior to today)",
)
def handle(self, *args, **options):
- self.is_debug = options['debug']
- days = options['days']
+ self.is_debug = options["debug"]
+ days = options["days"]
self.debug("Fetching {} sets of history...".format(days))
option_map = OptionCollection.objects.get_option_collection_map()
- repository_ids = REPO_GROUPS['trunk']
+ repository_ids = REPO_GROUPS["trunk"]
for day in range(days):
push_date = datetime.datetime.now().date() - datetime.timedelta(days=day)
get_history(4, push_date, intermittent_history_days, option_map, repository_ids, True)
- self.debug(f'Cached failure history for {CACHE_KEY_ROOT}:{4}:{push_date}')
+ self.debug(f"Cached failure history for {CACHE_KEY_ROOT}:{4}:{push_date}")
get_history(
2, push_date, fixed_by_commit_history_days, option_map, repository_ids, True
)
- self.debug(f'Cached failure history for {CACHE_KEY_ROOT}:{2}:{push_date}')
+ self.debug(f"Cached failure history for {CACHE_KEY_ROOT}:{2}:{push_date}")
def debug(self, msg):
if self.is_debug:
diff --git a/treeherder/model/management/commands/cycle_data.py b/treeherder/model/management/commands/cycle_data.py
index dbf74a86c57..c55cdf22508 100644
--- a/treeherder/model/management/commands/cycle_data.py
+++ b/treeherder/model/management/commands/cycle_data.py
@@ -4,10 +4,10 @@
from treeherder.model.data_cycling import TreeherderCycler, PerfherderCycler, TREEHERDER, PERFHERDER
-logging.basicConfig(format='%(levelname)s:%(message)s')
+logging.basicConfig(format="%(levelname)s:%(message)s")
-TREEHERDER_SUBCOMMAND = 'from:treeherder'
-PERFHERDER_SUBCOMMAND = 'from:perfherder'
+TREEHERDER_SUBCOMMAND = "from:treeherder"
+PERFHERDER_SUBCOMMAND = "from:perfherder"
logger = logging.getLogger(__name__)
@@ -21,39 +21,39 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '--debug',
- action='store_true',
- dest='is_debug',
+ "--debug",
+ action="store_true",
+ dest="is_debug",
default=False,
- help='Write debug messages to stdout',
+ help="Write debug messages to stdout",
)
parser.add_argument(
- '--days',
- action='store',
- dest='days',
+ "--days",
+ action="store",
+ dest="days",
type=int,
help=("Data cycle interval expressed in days. This only applies to Treeherder"),
)
parser.add_argument(
- '--chunk-size',
- action='store',
- dest='chunk_size',
+ "--chunk-size",
+ action="store",
+ dest="chunk_size",
default=100,
type=int,
help=(
- 'Define the size of the chunks ' 'Split the job deletes into chunks of this size'
+ "Define the size of the chunks " "Split the job deletes into chunks of this size"
),
)
parser.add_argument(
- '--sleep-time',
- action='store',
- dest='sleep_time',
+ "--sleep-time",
+ action="store",
+ dest="sleep_time",
default=0,
type=int,
- help='How many seconds to pause between each query. Ignored when cycling performance data.',
+ help="How many seconds to pause between each query. Ignored when cycling performance data.",
)
subparsers = parser.add_subparsers(
- description='Data producers from which to expire data', dest='data_source'
+ description="Data producers from which to expire data", dest="data_source"
)
subparsers.add_parser(TREEHERDER_SUBCOMMAND) # default subcommand even if not provided
@@ -65,8 +65,8 @@ def handle(self, *args, **options):
data_cycler.cycle()
def fabricate_data_cycler(self, options):
- data_source = options.pop('data_source') or TREEHERDER_SUBCOMMAND
- data_source = data_source.split(':')[1]
+ data_source = options.pop("data_source") or TREEHERDER_SUBCOMMAND
+ data_source = data_source.split(":")[1]
cls = self.CYCLER_CLASSES[data_source]
return cls(**options)
diff --git a/treeherder/model/management/commands/import_reference_data.py b/treeherder/model/management/commands/import_reference_data.py
index 6b6cfa33a0d..9dcb58bfa51 100644
--- a/treeherder/model/management/commands/import_reference_data.py
+++ b/treeherder/model/management/commands/import_reference_data.py
@@ -21,101 +21,101 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '--server',
- action='store',
- dest='server',
- default='https://treeherder.mozilla.org',
- help='Server to get data from, default https://treeherder.mozilla.org',
+ "--server",
+ action="store",
+ dest="server",
+ default="https://treeherder.mozilla.org",
+ help="Server to get data from, default https://treeherder.mozilla.org",
)
def handle(self, *args, **options):
- c = TreeherderClient(server_url=options['server'])
+ c = TreeherderClient(server_url=options["server"])
# options / option collection hashes
for uuid, props in c.get_option_collection_hash().items():
for prop in props:
- option, _ = Option.objects.get_or_create(name=prop['name'])
+ option, _ = Option.objects.get_or_create(name=prop["name"])
OptionCollection.objects.get_or_create(option_collection_hash=uuid, option=option)
# machine platforms
for machine_platform in c.get_machine_platforms():
MachinePlatform.objects.get_or_create(
- os_name=machine_platform['os_name'],
- platform=machine_platform['platform'],
- architecture=machine_platform['architecture'],
+ os_name=machine_platform["os_name"],
+ platform=machine_platform["platform"],
+ architecture=machine_platform["architecture"],
)
# machine
for machine in c.get_machines():
Machine.objects.get_or_create(
- id=machine['id'],
- name=machine['name'],
+ id=machine["id"],
+ name=machine["name"],
defaults={
- 'first_timestamp': machine['first_timestamp'],
- 'last_timestamp': machine['last_timestamp'],
+ "first_timestamp": machine["first_timestamp"],
+ "last_timestamp": machine["last_timestamp"],
},
)
# job group
for job_group in c.get_job_groups():
JobGroup.objects.get_or_create(
- id=job_group['id'],
- symbol=job_group['symbol'],
- name=job_group['name'],
- defaults={'description': job_group['description']},
+ id=job_group["id"],
+ symbol=job_group["symbol"],
+ name=job_group["name"],
+ defaults={"description": job_group["description"]},
)
# job type
for job_type in c.get_job_types():
JobType.objects.get_or_create(
- id=job_type['id'],
- symbol=job_type['symbol'],
- name=job_type['name'],
- defaults={'description': job_type['description']},
+ id=job_type["id"],
+ symbol=job_type["symbol"],
+ name=job_type["name"],
+ defaults={"description": job_type["description"]},
)
# product
for product in c.get_products():
Product.objects.get_or_create(
- id=product['id'],
- name=product['name'],
- defaults={'description': product['description']},
+ id=product["id"],
+ name=product["name"],
+ defaults={"description": product["description"]},
)
# failure classification
for failure_classification in c.get_failure_classifications():
FailureClassification.objects.get_or_create(
- id=failure_classification['id'],
- name=failure_classification['name'],
- defaults={'description': failure_classification['description']},
+ id=failure_classification["id"],
+ name=failure_classification["name"],
+ defaults={"description": failure_classification["description"]},
)
# build platform
for build_platform in c.get_build_platforms():
BuildPlatform.objects.get_or_create(
- id=build_platform['id'],
- os_name=build_platform['os_name'],
+ id=build_platform["id"],
+ os_name=build_platform["os_name"],
defaults={
- 'platform': build_platform['platform'],
- 'architecture': build_platform['architecture'],
+ "platform": build_platform["platform"],
+ "architecture": build_platform["architecture"],
},
)
# repository and repository group
for repository in c.get_repositories():
rgroup, _ = RepositoryGroup.objects.get_or_create(
- name=repository['repository_group']['name'],
- description=repository['repository_group']['description'],
+ name=repository["repository_group"]["name"],
+ description=repository["repository_group"]["description"],
)
Repository.objects.get_or_create(
- id=repository['id'],
+ id=repository["id"],
repository_group=rgroup,
- name=repository['name'],
- dvcs_type=repository['dvcs_type'],
- url=repository['url'],
+ name=repository["name"],
+ dvcs_type=repository["dvcs_type"],
+ url=repository["url"],
defaults={
- 'codebase': repository['codebase'],
- 'description': repository['description'],
- 'active_status': repository['active_status'],
+ "codebase": repository["codebase"],
+ "description": repository["description"],
+ "active_status": repository["active_status"],
},
)
diff --git a/treeherder/model/management/commands/load_initial_data.py b/treeherder/model/management/commands/load_initial_data.py
index f2bb577cdb0..a389c6bb2d8 100644
--- a/treeherder/model/management/commands/load_initial_data.py
+++ b/treeherder/model/management/commands/load_initial_data.py
@@ -7,12 +7,12 @@ class Command(BaseCommand):
def handle(self, *args, **options):
call_command(
- 'loaddata',
- 'repository_group',
- 'repository',
- 'failure_classification',
- 'issue_tracker',
- 'performance_framework',
- 'performance_bug_templates',
- 'performance_tag',
+ "loaddata",
+ "repository_group",
+ "repository",
+ "failure_classification",
+ "issue_tracker",
+ "performance_framework",
+ "performance_bug_templates",
+ "performance_tag",
)
diff --git a/treeherder/model/models.py b/treeherder/model/models.py
index ba2f753ad40..4adfe7baab9 100644
--- a/treeherder/model/models.py
+++ b/treeherder/model/models.py
@@ -8,7 +8,7 @@
import warnings
-warnings.filterwarnings('ignore', category=DeprecationWarning, module='newrelic')
+warnings.filterwarnings("ignore", category=DeprecationWarning, module="newrelic")
import newrelic.agent
from django.conf import settings
@@ -33,7 +33,7 @@ def by_bug(self, bug_id):
return self.filter(bug_id=int(bug_id))
def by_date(self, startday, endday):
- return self.select_related('push', 'job').filter(job__push__time__range=(startday, endday))
+ return self.select_related("push", "job").filter(job__push__time__range=(startday, endday))
def by_repo(self, name, bugjobmap=True):
if name in REPO_GROUPS:
@@ -43,7 +43,7 @@ def by_repo(self, name, bugjobmap=True):
if bugjobmap
else self.filter(repository_id__in=repo)
)
- elif name == 'all':
+ elif name == "all":
return self
else:
return (
@@ -66,7 +66,7 @@ def __str__(self):
class Product(NamedModel):
class Meta:
- db_table = 'product'
+ db_table = "product"
class BuildPlatform(models.Model):
@@ -76,7 +76,7 @@ class BuildPlatform(models.Model):
architecture = models.CharField(max_length=25, blank=True, db_index=True)
class Meta:
- db_table = 'build_platform'
+ db_table = "build_platform"
unique_together = ("os_name", "platform", "architecture")
def __str__(self):
@@ -85,7 +85,7 @@ def __str__(self):
class Option(NamedModel):
class Meta:
- db_table = 'option'
+ db_table = "option"
class RepositoryGroup(NamedModel):
@@ -93,19 +93,19 @@ class RepositoryGroup(NamedModel):
description = models.TextField(blank=True)
class Meta:
- db_table = 'repository_group'
+ db_table = "repository_group"
class Repository(models.Model):
id = models.AutoField(primary_key=True)
- repository_group = models.ForeignKey('RepositoryGroup', on_delete=models.CASCADE)
+ repository_group = models.ForeignKey("RepositoryGroup", on_delete=models.CASCADE)
name = models.CharField(max_length=50, unique=True, db_index=True)
dvcs_type = models.CharField(max_length=25, db_index=True)
url = models.CharField(max_length=255)
branch = models.CharField(max_length=255, null=True, db_index=True)
codebase = models.CharField(max_length=50, blank=True, db_index=True)
description = models.TextField(blank=True)
- active_status = models.CharField(max_length=7, blank=True, default='active', db_index=True)
+ active_status = models.CharField(max_length=7, blank=True, default="active", db_index=True)
life_cycle_order = models.PositiveIntegerField(null=True, default=None)
performance_alerts_enabled = models.BooleanField(default=False)
expire_performance_data = models.BooleanField(default=True)
@@ -113,12 +113,12 @@ class Repository(models.Model):
tc_root_url = models.CharField(max_length=255, null=False, db_index=True)
class Meta:
- db_table = 'repository'
- verbose_name_plural = 'repositories'
+ db_table = "repository"
+ verbose_name_plural = "repositories"
@classmethod
def fetch_all_names(cls) -> List[str]:
- return cls.objects.values_list('name', flat=True)
+ return cls.objects.values_list("name", flat=True)
def __str__(self):
return "{0} {1}".format(self.name, self.repository_group)
@@ -141,8 +141,8 @@ class Push(models.Model):
objects = models.Manager()
class Meta:
- db_table = 'push'
- unique_together = ('repository', 'revision')
+ db_table = "push"
+ unique_together = ("repository", "revision")
def __str__(self):
return "{0} {1}".format(self.repository.name, self.revision)
@@ -158,23 +158,23 @@ def get_status(self):
Job.objects.filter(push=self)
.filter(
Q(failure_classification__isnull=True)
- | Q(failure_classification__name='not classified')
+ | Q(failure_classification__name="not classified")
)
.exclude(tier=3)
)
- status_dict = {'completed': 0, 'pending': 0, 'running': 0}
- for state, result, total in jobs.values_list('state', 'result').annotate(
- total=Count('result')
+ status_dict = {"completed": 0, "pending": 0, "running": 0}
+ for state, result, total in jobs.values_list("state", "result").annotate(
+ total=Count("result")
):
- if state == 'completed':
+ if state == "completed":
status_dict[result] = total
status_dict[state] += total
else:
status_dict[state] = total
- if 'superseded' in status_dict:
+ if "superseded" in status_dict:
# backward compatability for API consumers
- status_dict['coalesced'] = status_dict['superseded']
+ status_dict["coalesced"] = status_dict["superseded"]
return status_dict
@@ -184,14 +184,14 @@ class Commit(models.Model):
A single commit in a push
"""
- push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name='commits')
+ push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name="commits")
revision = models.CharField(max_length=40, db_index=True)
author = models.CharField(max_length=150)
comments = models.TextField()
class Meta:
- db_table = 'commit'
- unique_together = ('push', 'revision')
+ db_table = "commit"
+ unique_together = ("push", "revision")
def __str__(self):
return "{0} {1}".format(self.push.repository.name, self.revision)
@@ -204,7 +204,7 @@ class MachinePlatform(models.Model):
architecture = models.CharField(max_length=25, blank=True, db_index=True)
class Meta:
- db_table = 'machine_platform'
+ db_table = "machine_platform"
unique_together = ("os_name", "platform", "architecture")
def __str__(self):
@@ -221,14 +221,14 @@ class Bugscache(models.Model):
crash_signature = models.TextField(blank=True)
keywords = models.TextField(blank=True)
modified = models.DateTimeField()
- whiteboard = models.CharField(max_length=100, blank=True, default='')
+ whiteboard = models.CharField(max_length=100, blank=True, default="")
processed_update = models.BooleanField(default=True)
class Meta:
- db_table = 'bugscache'
- verbose_name_plural = 'bugscache'
+ db_table = "bugscache"
+ verbose_name_plural = "bugscache"
indexes = [
- models.Index(fields=['summary']),
+ models.Index(fields=["summary"]),
]
def __str__(self):
@@ -253,14 +253,14 @@ def search(self, search_term):
# see https://bugzilla.mozilla.org/show_bug.cgi?id=1704311
search_term_fulltext = self.sanitized_search_term(search_term)
- if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
+ if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
# Substitute escape and wildcard characters, so the search term is used
# literally in the LIKE statement.
search_term_like = (
- search_term.replace('=', '==')
- .replace('%', '=%')
- .replace('_', '=_')
- .replace('\\"', '')
+ search_term.replace("=", "==")
+ .replace("%", "=%")
+ .replace("_", "=_")
+ .replace('\\"', "")
)
recent_qs = self.objects.raw(
@@ -297,12 +297,12 @@ def search(self, search_term):
or "\\" + search_term in match["summary"]
or "," + search_term in match["summary"]
]
- open_recent = [x for x in all_data if x["resolution"] == '']
- all_others = [x for x in all_data if x["resolution"] != '']
+ open_recent = [x for x in all_data if x["resolution"] == ""]
+ all_others = [x for x in all_data if x["resolution"] != ""]
except ProgrammingError as e:
newrelic.agent.notice_error()
logger.error(
- 'Failed to execute FULLTEXT search on Bugscache, error={}, SQL={}'.format(
+ "Failed to execute FULLTEXT search on Bugscache, error={}, SQL={}".format(
e, recent_qs.query.__str__()
)
)
@@ -317,8 +317,8 @@ class BugzillaComponent(models.Model):
component = models.CharField(max_length=60)
class Meta:
- db_table = 'bugzilla_component'
- verbose_name_plural = 'bugzilla_components'
+ db_table = "bugzilla_component"
+ verbose_name_plural = "bugzilla_components"
unique_together = ("product", "component")
def __str__(self):
@@ -328,11 +328,11 @@ def __str__(self):
class FilesBugzillaMap(models.Model):
path = models.CharField(max_length=255, unique=True, db_index=True)
file_name = models.CharField(max_length=255, db_index=True)
- bugzilla_component = models.ForeignKey('BugzillaComponent', on_delete=models.CASCADE)
+ bugzilla_component = models.ForeignKey("BugzillaComponent", on_delete=models.CASCADE)
class Meta:
- db_table = 'file_bugzilla_component'
- verbose_name_plural = 'files_bugzilla_components'
+ db_table = "file_bugzilla_component"
+ verbose_name_plural = "files_bugzilla_components"
def __str__(self):
return "{0}".format(self.path)
@@ -343,34 +343,34 @@ class BugzillaSecurityGroup(models.Model):
security_group = models.CharField(max_length=60)
class Meta:
- db_table = 'bugzilla_security_group'
- verbose_name_plural = 'bugzilla_security_groups'
+ db_table = "bugzilla_security_group"
+ verbose_name_plural = "bugzilla_security_groups"
class Machine(NamedModel):
class Meta:
- db_table = 'machine'
+ db_table = "machine"
class JobGroup(models.Model):
id = models.AutoField(primary_key=True)
- symbol = models.CharField(max_length=25, default='?', db_index=True)
+ symbol = models.CharField(max_length=25, default="?", db_index=True)
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
class Meta:
- db_table = 'job_group'
- unique_together = ('name', 'symbol')
+ db_table = "job_group"
+ unique_together = ("name", "symbol")
def __str__(self):
return "{0} ({1})".format(self.name, self.symbol)
class OptionCollectionManager(models.Manager):
- cache_key = 'option_collection_map'
- '''
+ cache_key = "option_collection_map"
+ """
Convenience function to determine the option collection map
- '''
+ """
def get_option_collection_map(self):
option_collection_map = cache.get(self.cache_key)
@@ -380,12 +380,12 @@ def get_option_collection_map(self):
option_collection_map = {}
for hash, option_name in OptionCollection.objects.values_list(
- 'option_collection_hash', 'option__name'
+ "option_collection_hash", "option__name"
):
if not option_collection_map.get(hash):
option_collection_map[hash] = option_name
else:
- option_collection_map[hash] += ' ' + option_name
+ option_collection_map[hash] += " " + option_name
# Caches for the default of 5 minutes.
cache.set(self.cache_key, option_collection_map)
@@ -405,12 +405,12 @@ def calculate_hash(options):
options = sorted(list(options))
sha_hash = sha1()
# equivalent to loop over the options and call sha_hash.update()
- sha_hash.update(''.join(options).encode('utf-8'))
+ sha_hash.update("".join(options).encode("utf-8"))
return sha_hash.hexdigest()
class Meta:
- db_table = 'option_collection'
- unique_together = ('option_collection_hash', 'option')
+ db_table = "option_collection"
+ unique_together = ("option_collection_hash", "option")
def __str__(self):
return "{0}".format(self.option)
@@ -418,13 +418,13 @@ def __str__(self):
class JobType(models.Model):
id = models.AutoField(primary_key=True)
- symbol = models.CharField(max_length=25, default='?', db_index=True)
+ symbol = models.CharField(max_length=25, default="?", db_index=True)
name = models.CharField(max_length=140)
description = models.TextField(blank=True)
class Meta:
- db_table = 'job_type'
- unique_together = (('name', 'symbol'),)
+ db_table = "job_type"
+ unique_together = (("name", "symbol"),)
def __str__(self):
return "{0} ({1})".format(self.name, self.symbol)
@@ -432,7 +432,7 @@ def __str__(self):
class FailureClassification(NamedModel):
class Meta:
- db_table = 'failure_classification'
+ db_table = "failure_classification"
class ReferenceDataSignatures(models.Model):
@@ -463,10 +463,10 @@ class ReferenceDataSignatures(models.Model):
first_submission_timestamp = models.IntegerField(db_index=True)
class Meta:
- db_table = 'reference_data_signatures'
+ db_table = "reference_data_signatures"
# Remove if/when the model is renamed to 'ReferenceDataSignature'.
- verbose_name_plural = 'reference data signatures'
- unique_together = ('name', 'signature', 'build_system_type', 'repository')
+ verbose_name_plural = "reference data signatures"
+ unique_together = ("name", "signature", "build_system_type", "repository")
class JobManager(models.Manager):
@@ -537,11 +537,11 @@ class Job(models.Model):
FAILED = 255
AUTOCLASSIFY_STATUSES = (
- (PENDING, 'pending'),
- (CROSSREFERENCED, 'crossreferenced'),
- (AUTOCLASSIFIED, 'autoclassified'),
- (SKIPPED, 'skipped'),
- (FAILED, 'failed'),
+ (PENDING, "pending"),
+ (CROSSREFERENCED, "crossreferenced"),
+ (AUTOCLASSIFIED, "autoclassified"),
+ (SKIPPED, "skipped"),
+ (FAILED, "failed"),
)
repository = models.ForeignKey(Repository, on_delete=models.CASCADE)
@@ -553,15 +553,15 @@ class Job(models.Model):
# TODO: Remove coalesced_to_guid next time the jobs table is modified (bug 1402992)
coalesced_to_guid = models.CharField(max_length=50, null=True, default=None)
signature = models.ForeignKey(ReferenceDataSignatures, on_delete=models.CASCADE)
- build_platform = models.ForeignKey(BuildPlatform, on_delete=models.CASCADE, related_name='jobs')
+ build_platform = models.ForeignKey(BuildPlatform, on_delete=models.CASCADE, related_name="jobs")
machine_platform = models.ForeignKey(MachinePlatform, on_delete=models.CASCADE)
machine = models.ForeignKey(Machine, on_delete=models.CASCADE)
option_collection_hash = models.CharField(max_length=64)
- job_type = models.ForeignKey(JobType, on_delete=models.CASCADE, related_name='jobs')
- job_group = models.ForeignKey(JobGroup, on_delete=models.CASCADE, related_name='jobs')
+ job_type = models.ForeignKey(JobType, on_delete=models.CASCADE, related_name="jobs")
+ job_group = models.ForeignKey(JobGroup, on_delete=models.CASCADE, related_name="jobs")
product = models.ForeignKey(Product, on_delete=models.CASCADE)
failure_classification = models.ForeignKey(
- FailureClassification, on_delete=models.CASCADE, related_name='jobs'
+ FailureClassification, on_delete=models.CASCADE, related_name="jobs"
)
who = models.CharField(max_length=50)
reason = models.CharField(max_length=125)
@@ -576,22 +576,22 @@ class Job(models.Model):
running_eta = models.PositiveIntegerField(null=True, default=None)
tier = models.PositiveIntegerField()
- push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name='jobs')
+ push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name="jobs")
class Meta:
- db_table = 'job'
+ db_table = "job"
index_together = [
# these speed up the various permutations of the "similar jobs"
# queries
- ('repository', 'job_type', 'start_time'),
- ('repository', 'build_platform', 'job_type', 'start_time'),
- ('repository', 'option_collection_hash', 'job_type', 'start_time'),
- ('repository', 'build_platform', 'option_collection_hash', 'job_type', 'start_time'),
+ ("repository", "job_type", "start_time"),
+ ("repository", "build_platform", "job_type", "start_time"),
+ ("repository", "option_collection_hash", "job_type", "start_time"),
+ ("repository", "build_platform", "option_collection_hash", "job_type", "start_time"),
# this is intended to speed up queries for specific platform /
# option collections on a push
- ('machine_platform', 'option_collection_hash', 'push'),
+ ("machine_platform", "option_collection_hash", "push"),
# speed up cycle data
- ('repository', 'submit_time'),
+ ("repository", "submit_time"),
]
@property
@@ -605,8 +605,8 @@ def __str__(self):
return "{0} {1} {2}".format(self.id, self.repository, self.guid)
def get_platform_option(self, option_collection_map=None):
- if not hasattr(self, 'platform_option'):
- self.platform_option = ''
+ if not hasattr(self, "platform_option"):
+ self.platform_option = ""
option_hash = self.option_collection_hash
if option_hash:
if not option_collection_map:
@@ -666,7 +666,7 @@ def fetch_associated_decision_job(self):
decision_type = JobType.objects.filter(name="Gecko Decision Task", symbol="D")
return Job.objects.get(
repository_id=self.repository_id,
- job_type_id=Subquery(decision_type.values('id')[:1]),
+ job_type_id=Subquery(decision_type.values("id")[:1]),
push_id=self.push_id,
)
@@ -684,7 +684,7 @@ class TaskclusterMetadata(models.Model):
"""
job = models.OneToOneField(
- Job, on_delete=models.CASCADE, primary_key=True, related_name='taskcluster_metadata'
+ Job, on_delete=models.CASCADE, primary_key=True, related_name="taskcluster_metadata"
)
task_id = models.CharField(max_length=22, validators=[MinLengthValidator(22)], db_index=True)
@@ -707,10 +707,10 @@ class JobLog(models.Model):
SKIPPED_SIZE = 3
STATUSES = (
- (PENDING, 'pending'),
- (PARSED, 'parsed'),
- (FAILED, 'failed'),
- (SKIPPED_SIZE, 'skipped-size'),
+ (PENDING, "pending"),
+ (PARSED, "parsed"),
+ (FAILED, "failed"),
+ (SKIPPED_SIZE, "skipped-size"),
)
job = models.ForeignKey(Job, on_delete=models.CASCADE, related_name="job_log")
@@ -720,14 +720,14 @@ class JobLog(models.Model):
class Meta:
db_table = "job_log"
- unique_together = ('job', 'name', 'url')
+ unique_together = ("job", "name", "url")
def __str__(self):
return "{0} {1} {2} {3}".format(self.id, self.job.guid, self.name, self.status)
def update_status(self, status):
self.status = status
- self.save(update_fields=['status'])
+ self.save(update_fields=["status"])
class BugJobMap(models.Model):
@@ -750,7 +750,7 @@ class BugJobMap(models.Model):
class Meta:
db_table = "bug_job_map"
- unique_together = ('job', 'bug_id')
+ unique_together = ("job", "bug_id")
@property
def who(self):
@@ -832,12 +832,12 @@ def _update_failure_type(self):
been denormalised onto Job.
"""
# update the job classification
- note = JobNote.objects.filter(job=self.job).order_by('-created').first()
+ note = JobNote.objects.filter(job=self.job).order_by("-created").first()
if note:
self.job.failure_classification_id = note.failure_classification.id
else:
self.job.failure_classification_id = FailureClassification.objects.get(
- name='not classified'
+ name="not classified"
).id
self.job.save()
@@ -869,11 +869,11 @@ def _ensure_classification(self):
existing_bugs = list(
ClassifiedFailure.objects.filter(
error_matches__text_log_error=text_log_error
- ).values_list('bug_number', flat=True)
+ ).values_list("bug_number", flat=True)
)
new_bugs = self.job.bugjobmap_set.exclude(bug_id__in=existing_bugs).values_list(
- 'bug_id', flat=True
+ "bug_id", flat=True
)
if not new_bugs:
@@ -907,7 +907,7 @@ def __str__(self):
class FailureLine(models.Model):
# We make use of prefix indicies for several columns in this table which
# can't be expressed in django syntax so are created with raw sql in migrations.
- STATUS_LIST = ('PASS', 'FAIL', 'OK', 'ERROR', 'TIMEOUT', 'CRASH', 'ASSERT', 'SKIP', 'NOTRUN')
+ STATUS_LIST = ("PASS", "FAIL", "OK", "ERROR", "TIMEOUT", "CRASH", "ASSERT", "SKIP", "NOTRUN")
# Truncated is a special action that we use to indicate that the list of failure lines
# was truncated according to settings.FAILURE_LINES_CUTOFF.
ACTION_LIST = ("test_result", "log", "crash", "truncated", "group_result")
@@ -954,9 +954,9 @@ class FailureLine(models.Model):
modified = models.DateTimeField(auto_now=True)
class Meta:
- db_table = 'failure_line'
- index_together = (('job_guid', 'repository'),)
- unique_together = ('job_log', 'line')
+ db_table = "failure_line"
+ index_together = (("job_guid", "repository"),)
+ unique_together = ("job_log", "line")
def __str__(self):
return "{0} {1}".format(self.id, Job.objects.get(guid=self.job_guid).id)
@@ -1006,26 +1006,26 @@ def to_dict(self):
metadata = None
return {
- 'id': self.id,
- 'job_guid': self.job_guid,
- 'repository': self.repository_id,
- 'job_log': self.job_log_id,
- 'action': self.action,
- 'line': self.line,
- 'test': self.test,
- 'subtest': self.subtest,
- 'status': self.status,
- 'expected': self.expected,
- 'message': self.message,
- 'signature': self.signature,
- 'level': self.level,
- 'stack': self.stack,
- 'stackwalk_stdout': self.stackwalk_stdout,
- 'stackwalk_stderr': self.stackwalk_stderr,
- 'best_classification': metadata.best_classification_id if metadata else None,
- 'best_is_verified': metadata.best_is_verified if metadata else False,
- 'created': self.created,
- 'modified': self.modified,
+ "id": self.id,
+ "job_guid": self.job_guid,
+ "repository": self.repository_id,
+ "job_log": self.job_log_id,
+ "action": self.action,
+ "line": self.line,
+ "test": self.test,
+ "subtest": self.subtest,
+ "status": self.status,
+ "expected": self.expected,
+ "message": self.message,
+ "signature": self.signature,
+ "level": self.level,
+ "stack": self.stack,
+ "stackwalk_stdout": self.stackwalk_stdout,
+ "stackwalk_stderr": self.stackwalk_stderr,
+ "best_classification": metadata.best_classification_id if metadata else None,
+ "best_is_verified": metadata.best_is_verified if metadata else False,
+ "created": self.created,
+ "modified": self.modified,
}
def to_mozlog_format(self):
@@ -1064,13 +1064,13 @@ class Group(models.Model):
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=255, unique=True)
- job_logs = models.ManyToManyField("JobLog", through='GroupStatus', related_name='groups')
+ job_logs = models.ManyToManyField("JobLog", through="GroupStatus", related_name="groups")
def __str__(self):
return self.name
class Meta:
- db_table = 'group'
+ db_table = "group"
class GroupStatus(models.Model):
@@ -1095,7 +1095,7 @@ def get_status(status_str):
)
class Meta:
- db_table = 'group_status'
+ db_table = "group_status"
class ClassifiedFailure(models.Model):
@@ -1107,7 +1107,7 @@ class ClassifiedFailure(models.Model):
id = models.BigAutoField(primary_key=True)
text_log_errors = models.ManyToManyField(
- "TextLogError", through='TextLogErrorMatch', related_name='classified_failures'
+ "TextLogError", through="TextLogErrorMatch", related_name="classified_failures"
)
# Note that we use a bug number of 0 as a sentinel value to indicate lines that
# are not actually symptomatic of a real bug
@@ -1136,7 +1136,7 @@ def set_bug(self, bug_number):
other = ClassifiedFailure.objects.filter(bug_number=bug_number).first()
if not other:
self.bug_number = bug_number
- self.save(update_fields=['bug_number'])
+ self.save(update_fields=["bug_number"])
return self
self.replace_with(other)
@@ -1177,7 +1177,7 @@ def update_matches(self, other):
if not other_matches:
match.classified_failure = other
- match.save(update_fields=['classified_failure'])
+ match.save(update_fields=["classified_failure"])
continue
# if any of our matches have higher scores than other's matches,
@@ -1187,7 +1187,7 @@ def update_matches(self, other):
yield match.id # for deletion
class Meta:
- db_table = 'classified_failure'
+ db_table = "classified_failure"
# TODO delete table once backfill of jobs in TextLogError table has been completed
@@ -1213,15 +1213,15 @@ class TextLogStep(models.Model):
SUPERSEDED = 8
RESULTS = (
- (SUCCESS, 'success'),
- (TEST_FAILED, 'testfailed'),
- (BUSTED, 'busted'),
- (SKIPPED, 'skipped'),
- (EXCEPTION, 'exception'),
- (RETRY, 'retry'),
- (USERCANCEL, 'usercancel'),
- (UNKNOWN, 'unknown'),
- (SUPERSEDED, 'superseded'),
+ (SUCCESS, "success"),
+ (TEST_FAILED, "testfailed"),
+ (BUSTED, "busted"),
+ (SKIPPED, "skipped"),
+ (EXCEPTION, "exception"),
+ (RETRY, "retry"),
+ (USERCANCEL, "usercancel"),
+ (UNKNOWN, "unknown"),
+ (SUPERSEDED, "superseded"),
)
name = models.CharField(max_length=200)
@@ -1233,7 +1233,7 @@ class TextLogStep(models.Model):
class Meta:
db_table = "text_log_step"
- unique_together = ('job', 'started_line_number', 'finished_line_number')
+ unique_together = ("job", "started_line_number", "finished_line_number")
class TextLogError(models.Model):
@@ -1242,18 +1242,18 @@ class TextLogError(models.Model):
"""
id = models.BigAutoField(primary_key=True)
- job = models.ForeignKey(Job, on_delete=models.CASCADE, related_name='text_log_error', null=True)
+ job = models.ForeignKey(Job, on_delete=models.CASCADE, related_name="text_log_error", null=True)
line = models.TextField()
line_number = models.PositiveIntegerField()
# TODO delete this field and unique_together once backfill of jobs in TextLogError table has been completed
step = models.ForeignKey(
- TextLogStep, on_delete=models.CASCADE, related_name='errors', null=True
+ TextLogStep, on_delete=models.CASCADE, related_name="errors", null=True
)
class Meta:
db_table = "text_log_error"
- unique_together = (('step', 'line_number'), ('job', 'line_number'))
+ unique_together = (("step", "line_number"), ("job", "line_number"))
def __str__(self):
return "{0} {1}".format(self.id, self.job.id)
@@ -1309,7 +1309,7 @@ def verify_classification(self, classification):
else:
self.metadata.best_classification = classification
self.metadata.best_is_verified = True
- self.metadata.save(update_fields=['best_classification', 'best_is_verified'])
+ self.metadata.save(update_fields=["best_classification", "best_is_verified"])
# Send event to NewRelic when a verifing an autoclassified failure.
match = self.matches.filter(classified_failure=classification).first()
@@ -1317,10 +1317,10 @@ def verify_classification(self, classification):
return
newrelic.agent.record_custom_event(
- 'user_verified_classification',
+ "user_verified_classification",
{
- 'matcher': match.matcher_name,
- 'job_id': self.id,
+ "matcher": match.matcher_name,
+ "job_id": self.id,
},
)
@@ -1362,7 +1362,7 @@ class Meta:
def __str__(self):
args = (self.text_log_error_id, self.failure_line_id)
- return 'TextLogError={} FailureLine={}'.format(*args)
+ return "TextLogError={} FailureLine={}".format(*args)
class TextLogErrorMatch(models.Model):
@@ -1382,9 +1382,9 @@ class TextLogErrorMatch(models.Model):
score = models.DecimalField(max_digits=3, decimal_places=2, blank=True, null=True)
class Meta:
- db_table = 'text_log_error_match'
- verbose_name_plural = 'text log error matches'
- unique_together = ('text_log_error', 'classified_failure', 'matcher_name')
+ db_table = "text_log_error_match"
+ verbose_name_plural = "text log error matches"
+ unique_together = ("text_log_error", "classified_failure", "matcher_name")
def __str__(self):
return "{0} {1}".format(self.text_log_error.id, self.classified_failure.id)
@@ -1401,7 +1401,7 @@ class InvestigatedTests(models.Model):
class Meta:
unique_together = ["job_type", "test", "push"]
- db_table = 'investigated_tests'
+ db_table = "investigated_tests"
class MozciClassification(models.Model):
@@ -1409,14 +1409,14 @@ class MozciClassification(models.Model):
Automated classification of a Push provided by mozci
"""
- BAD = 'BAD'
- GOOD = 'GOOD'
- UNKNOWN = 'UNKNOWN'
+ BAD = "BAD"
+ GOOD = "GOOD"
+ UNKNOWN = "UNKNOWN"
CLASSIFICATION_RESULT = (
- (BAD, 'bad'),
- (GOOD, 'good'),
- (UNKNOWN, 'unknown'),
+ (BAD, "bad"),
+ (GOOD, "good"),
+ (UNKNOWN, "unknown"),
)
id = models.BigAutoField(primary_key=True)
@@ -1426,4 +1426,4 @@ class MozciClassification(models.Model):
task_id = models.CharField(max_length=22, validators=[MinLengthValidator(22)])
class Meta:
- db_table = 'mozci_classification'
+ db_table = "mozci_classification"
diff --git a/treeherder/perf/alerts.py b/treeherder/perf/alerts.py
index 1be99d456d1..7b39b68982f 100644
--- a/treeherder/perf/alerts.py
+++ b/treeherder/perf/alerts.py
@@ -28,7 +28,7 @@ def geomean(iterable):
def get_alert_properties(prev_value, new_value, lower_is_better):
AlertProperties = namedtuple(
- 'AlertProperties', 'pct_change delta is_regression prev_value new_value'
+ "AlertProperties", "pct_change delta is_regression prev_value new_value"
)
if prev_value != 0:
pct_change = 100.0 * abs(new_value - prev_value) / float(prev_value)
@@ -51,9 +51,9 @@ def generate_new_alerts_in_series(signature):
series = PerformanceDatum.objects.filter(signature=signature, push_timestamp__gte=max_alert_age)
latest_alert_timestamp = (
PerformanceAlert.objects.filter(series_signature=signature)
- .select_related('summary__push__time')
- .order_by('-summary__push__time')
- .values_list('summary__push__time', flat=True)[:1]
+ .select_related("summary__push__time")
+ .order_by("-summary__push__time")
+ .values_list("summary__push__time", flat=True)[:1]
)
if latest_alert_timestamp:
series = series.filter(push_timestamp__gt=latest_alert_timestamp[0])
@@ -90,8 +90,8 @@ def generate_new_alerts_in_series(signature):
with transaction.atomic():
for prev, cur in zip(analyzed_series, analyzed_series[1:]):
if cur.change_detected:
- prev_value = cur.historical_stats['avg']
- new_value = cur.forward_stats['avg']
+ prev_value = cur.historical_stats["avg"]
+ new_value = cur.forward_stats["avg"]
alert_properties = get_alert_properties(
prev_value, new_value, signature.lower_is_better
)
@@ -139,27 +139,27 @@ def generate_new_alerts_in_series(signature):
push_id=cur.push_id,
prev_push_id=prev.push_id,
defaults={
- 'manually_created': False,
- 'created': datetime.utcfromtimestamp(cur.push_timestamp),
+ "manually_created": False,
+ "created": datetime.utcfromtimestamp(cur.push_timestamp),
},
)
# django/mysql doesn't understand "inf", so just use some
# arbitrarily high value for that case
t_value = cur.t
- if t_value == float('inf'):
+ if t_value == float("inf"):
t_value = 1000
PerformanceAlert.objects.update_or_create(
summary=summary,
series_signature=signature,
defaults={
- 'noise_profile': noise_profile,
- 'is_regression': alert_properties.is_regression,
- 'amount_pct': alert_properties.pct_change,
- 'amount_abs': alert_properties.delta,
- 'prev_value': prev_value,
- 'new_value': new_value,
- 't_value': t_value,
+ "noise_profile": noise_profile,
+ "is_regression": alert_properties.is_regression,
+ "amount_pct": alert_properties.pct_change,
+ "amount_abs": alert_properties.delta,
+ "prev_value": prev_value,
+ "new_value": new_value,
+ "t_value": t_value,
},
)
diff --git a/treeherder/perf/auto_perf_sheriffing/backfill_reports.py b/treeherder/perf/auto_perf_sheriffing/backfill_reports.py
index ad92feddf5f..f80c79c6308 100644
--- a/treeherder/perf/auto_perf_sheriffing/backfill_reports.py
+++ b/treeherder/perf/auto_perf_sheriffing/backfill_reports.py
@@ -41,9 +41,9 @@ def __init__(
too less specific will alter the correct order of alerts
"""
if max_alerts <= 0 or max_improvements <= 0:
- raise ValueError('Use positive values.')
+ raise ValueError("Use positive values.")
if len(platforms_of_interest) == 0:
- raise ValueError('Provide at least one platform name.')
+ raise ValueError("Provide at least one platform name.")
self.max_alerts = max_alerts
self.max_improvements = max_improvements
@@ -51,7 +51,7 @@ def __init__(
def extract_important_alerts(self, alerts: Tuple[PerformanceAlert, ...]):
if any(not isinstance(alert, PerformanceAlert) for alert in alerts):
- raise ValueError('Provided parameter does not contain only PerformanceAlert objects.')
+ raise ValueError("Provided parameter does not contain only PerformanceAlert objects.")
relevant_alerts = self._extract_by_relevant_platforms(alerts)
alerts_with_distinct_jobs = self._ensure_distinct_jobs(relevant_alerts)
sorted_alerts = self._multi_criterion_sort(alerts_with_distinct_jobs)
@@ -140,7 +140,7 @@ def _os_relevance(self, alert_platform: str):
return len(
self.ordered_platforms_of_interest
) - self.ordered_platforms_of_interest.index(platform_of_interest)
- raise ValueError('Unknown platform.')
+ raise ValueError("Unknown platform.")
def _noise_profile_is_ok(self, noise_profile: str):
"""
@@ -181,11 +181,11 @@ def _multi_criterion_sort(self, relevant_alerts):
class IdentifyAlertRetriggerables:
def __init__(self, max_data_points: int, time_interval: timedelta, logger=None):
if max_data_points < 1:
- raise ValueError('Cannot set range width less than 1')
+ raise ValueError("Cannot set range width less than 1")
if max_data_points % 2 == 0:
- raise ValueError('Must provide odd range width')
+ raise ValueError("Must provide odd range width")
if not isinstance(time_interval, timedelta):
- raise TypeError('Must provide time interval as timedelta')
+ raise TypeError("Must provide time interval as timedelta")
self._range_width = max_data_points
self._time_interval = time_interval
@@ -219,7 +219,7 @@ def _fetch_suspect_data_points(self, alert: PerformanceAlert) -> QuerySet:
startday = self.min_timestamp(alert.summary.push.time)
endday = self.max_timestamp(alert.summary.push.time)
- data = PerformanceDatum.objects.select_related('push').filter(
+ data = PerformanceDatum.objects.select_related("push").filter(
repository_id=alert.series_signature.repository_id, # leverage compound index
signature_id=alert.series_signature_id,
push_timestamp__gt=startday,
@@ -230,11 +230,11 @@ def _fetch_suspect_data_points(self, alert: PerformanceAlert) -> QuerySet:
data
# JSONs are more self explanatory
# with perf_datum_id instead of id
- .extra(select={'perf_datum_id': 'performance_datum.id'})
+ .extra(select={"perf_datum_id": "performance_datum.id"})
.values(
- 'value', 'job_id', 'perf_datum_id', 'push_id', 'push_timestamp', 'push__revision'
+ "value", "job_id", "perf_datum_id", "push_id", "push_timestamp", "push__revision"
)
- .order_by('push_timestamp')
+ .order_by("push_timestamp")
)
return annotated_data_points
@@ -244,14 +244,14 @@ def _one_data_point_per_push(self, annotated_data_points: QuerySet) -> List[dict
return [
data_point
for data_point in annotated_data_points
- if not (data_point['push_id'] in seen_push_ids or seen_add(data_point['push_id']))
+ if not (data_point["push_id"] in seen_push_ids or seen_add(data_point["push_id"]))
]
def _find_push_id_index(self, push_id: int, flattened_data_points: List[dict]) -> int:
for index, data_point in enumerate(flattened_data_points):
- if data_point['push_id'] == push_id:
+ if data_point["push_id"] == push_id:
return index
- raise LookupError(f'Could not find push id {push_id}')
+ raise LookupError(f"Could not find push id {push_id}")
def __compute_window_slices(self, center_index: int) -> slice:
side = self._range_width // 2
@@ -265,7 +265,7 @@ def _glance_over_retrigger_range(self, data_points_to_retrigger: List[dict]):
retrigger_range = len(data_points_to_retrigger)
if retrigger_range < self._range_width:
self.log.warning(
- 'Found small backfill range (of size {} instead of {})'.format(
+ "Found small backfill range (of size {} instead of {})".format(
retrigger_range, self._range_width
)
)
@@ -334,7 +334,7 @@ def __fetch_summaries_to_retrigger(
self, since: datetime, frameworks: List[str], repositories: List[str]
) -> QuerySet:
no_reports_yet = Q(last_updated__gte=since, backfill_report__isnull=True)
- with_outdated_reports = Q(last_updated__gt=F('backfill_report__last_updated'))
+ with_outdated_reports = Q(last_updated__gt=F("backfill_report__last_updated"))
filters = no_reports_yet | with_outdated_reports
if frameworks:
@@ -343,8 +343,8 @@ def __fetch_summaries_to_retrigger(
filters = filters & Q(repository__name__in=repositories)
return (
- PerformanceAlertSummary.objects.prefetch_related('backfill_report')
- .select_related('framework', 'repository')
+ PerformanceAlertSummary.objects.prefetch_related("backfill_report")
+ .select_related("framework", "repository")
.filter(filters)
)
@@ -367,9 +367,9 @@ def _associate_retrigger_context(self, important_alerts: List[PerformanceAlert])
if incomplete_mapping:
expected = len(important_alerts)
missing = expected - len(retrigger_map)
- raise MissingRecords(f'{missing} out of {expected} records are missing!')
+ raise MissingRecords(f"{missing} out of {expected} records are missing!")
return retrigger_map
def _doesnt_have_report(self, summary):
- return not hasattr(summary, 'backfill_report')
+ return not hasattr(summary, "backfill_report")
diff --git a/treeherder/perf/auto_perf_sheriffing/backfill_tool.py b/treeherder/perf/auto_perf_sheriffing/backfill_tool.py
index eb61a1b5994..dcbc4b480b7 100644
--- a/treeherder/perf/auto_perf_sheriffing/backfill_tool.py
+++ b/treeherder/perf/auto_perf_sheriffing/backfill_tool.py
@@ -28,7 +28,7 @@ def backfill_job(self, job: Union[Job, str]) -> str:
if "browsertime" in job.job_group.name.lower():
logger.debug(f"Requesting side_by_side for task {task_id_to_backfill}...")
side_by_side_task_id = self.__taskcluster.trigger_action(
- action='side-by-side',
+ action="side-by-side",
task_id=task_id_to_backfill,
decision_task_id=decision_task_id,
input={},
@@ -39,7 +39,7 @@ def backfill_job(self, job: Union[Job, str]) -> str:
)
logger.debug(f"Requesting backfill for task {task_id_to_backfill}...")
task_id = self.__taskcluster.trigger_action(
- action='backfill',
+ action="backfill",
task_id=task_id_to_backfill,
decision_task_id=decision_task_id,
input={
diff --git a/treeherder/perf/auto_perf_sheriffing/factories.py b/treeherder/perf/auto_perf_sheriffing/factories.py
index 517bfcb5014..561b55fb219 100644
--- a/treeherder/perf/auto_perf_sheriffing/factories.py
+++ b/treeherder/perf/auto_perf_sheriffing/factories.py
@@ -24,11 +24,11 @@ def __report_maintainer_factory(days_to_lookup: timedelta) -> BackfillReportMain
max_alerts=5,
max_improvements=2,
platforms_of_interest=(
- 'windows10',
- 'linux',
- 'osx',
- 'android',
- 'windows7',
+ "windows10",
+ "linux",
+ "osx",
+ "android",
+ "windows7",
), # windows7 lost it's relevance due to lower alert rate on this platform
)
backfill_context_fetcher = IdentifyAlertRetriggerables(
diff --git a/treeherder/perf/auto_perf_sheriffing/outcome_checker.py b/treeherder/perf/auto_perf_sheriffing/outcome_checker.py
index a6e8cef303c..92a63e90203 100644
--- a/treeherder/perf/auto_perf_sheriffing/outcome_checker.py
+++ b/treeherder/perf/auto_perf_sheriffing/outcome_checker.py
@@ -23,8 +23,8 @@ def check(self, record: BackfillRecord) -> OutcomeStatus:
if record.job_type is None:
raise ValueError(f"No job_type for record {record.alert.id}.")
of_type = record.job_type
- with_successful_results = 'success' # state is "completed"
- with_unknown_results = 'unknown' # state is "running" or "pending"
+ with_successful_results = "success" # state is "completed"
+ with_unknown_results = "unknown" # state is "running" or "pending"
total_backfills_in_progress = 0
total_backfills_failed = 0
total_backfills_successful = 0
diff --git a/treeherder/perf/auto_perf_sheriffing/secretary.py b/treeherder/perf/auto_perf_sheriffing/secretary.py
index 983d97bab3c..8c4558049b6 100644
--- a/treeherder/perf/auto_perf_sheriffing/secretary.py
+++ b/treeherder/perf/auto_perf_sheriffing/secretary.py
@@ -94,7 +94,7 @@ def backfills_left(self, on_platform: str) -> int:
perf_sheriff_settings = PerformanceSettings.objects.get(name="perf_sheriff_bot")
settings = json.loads(perf_sheriff_settings.settings)
- return settings['limits'][on_platform]
+ return settings["limits"][on_platform]
def consume_backfills(self, on_platform: str, amount: int) -> int:
self.__assert_platform_is_supported(on_platform)
@@ -103,10 +103,10 @@ def consume_backfills(self, on_platform: str, amount: int) -> int:
settings = json.loads(perf_sheriff_settings.settings)
- _backfills_left = left = settings['limits'][on_platform] - amount
+ _backfills_left = left = settings["limits"][on_platform] - amount
_backfills_left = left if left > 0 else 0
- settings['limits'][on_platform] = _backfills_left
+ settings["limits"][on_platform] = _backfills_left
perf_sheriff_settings.set_settings(settings)
perf_sheriff_settings.save()
@@ -137,8 +137,8 @@ def __assert_platform_is_supported(self, on_platform: str):
@classmethod
def _get_default_settings(cls, as_json=True):
default_settings = {
- 'limits': django_settings.MAX_BACKFILLS_PER_PLATFORM,
- 'last_reset_date': datetime.utcnow(),
+ "limits": django_settings.MAX_BACKFILLS_PER_PLATFORM,
+ "last_reset_date": datetime.utcnow(),
}
return (
diff --git a/treeherder/perf/auto_perf_sheriffing/sherlock.py b/treeherder/perf/auto_perf_sheriffing/sherlock.py
index 6d1e3faacfa..dcaa5cb6efd 100644
--- a/treeherder/perf/auto_perf_sheriffing/sherlock.py
+++ b/treeherder/perf/auto_perf_sheriffing/sherlock.py
@@ -113,11 +113,11 @@ def __fetch_records_requiring_backfills_on(
platform: str, frameworks: List[str], repositories: List[str]
) -> QuerySet:
records_to_backfill = BackfillRecord.objects.select_related(
- 'alert',
- 'alert__series_signature',
- 'alert__series_signature__platform',
- 'alert__summary__framework',
- 'alert__summary__repository',
+ "alert",
+ "alert__series_signature",
+ "alert__series_signature__platform",
+ "alert__summary__framework",
+ "alert__summary__repository",
).filter(
status=BackfillRecord.READY_FOR_PROCESSING,
alert__series_signature__platform__platform__icontains=platform,
@@ -132,7 +132,7 @@ def _backfill_record(self, record: BackfillRecord, left: int) -> Tuple[int, int]
try:
context = record.get_context()
except JSONDecodeError:
- logger.warning(f'Failed to backfill record {record.alert.id}: invalid JSON context.')
+ logger.warning(f"Failed to backfill record {record.alert.id}: invalid JSON context.")
record.status = BackfillRecord.FAILED
record.save()
else:
@@ -141,11 +141,11 @@ def _backfill_record(self, record: BackfillRecord, left: int) -> Tuple[int, int]
if left <= 0 or self.runtime_exceeded():
break
try:
- using_job_id = data_point['job_id']
+ using_job_id = data_point["job_id"]
self.backfill_tool.backfill_job(using_job_id)
left, consumed = left - 1, consumed + 1
except (KeyError, CannotBackfill, Exception) as ex:
- logger.debug(f'Failed to backfill record {record.alert.id}: {ex}')
+ logger.debug(f"Failed to backfill record {record.alert.id}: {ex}")
else:
record.try_remembering_job_properties(using_job_id)
@@ -153,7 +153,7 @@ def _backfill_record(self, record: BackfillRecord, left: int) -> Tuple[int, int]
record, len(data_points_to_backfill), consumed
)
log_level = INFO if success else WARNING
- logger.log(log_level, f'{outcome} (for backfill record {record.alert.id})')
+ logger.log(log_level, f"{outcome} (for backfill record {record.alert.id})")
return left, consumed
@@ -168,19 +168,19 @@ def _note_backfill_outcome(
if actually_backfilled == to_backfill:
record.status = BackfillRecord.BACKFILLED
success = True
- outcome = 'Backfilled all data points'
+ outcome = "Backfilled all data points"
else:
record.status = BackfillRecord.FAILED
if actually_backfilled == 0:
- outcome = 'Backfill attempts on all data points failed right upon request.'
+ outcome = "Backfill attempts on all data points failed right upon request."
elif actually_backfilled < to_backfill:
- outcome = 'Backfill attempts on some data points failed right upon request.'
+ outcome = "Backfill attempts on some data points failed right upon request."
else:
raise ValueError(
- f'Cannot have backfilled more than available attempts ({actually_backfilled} out of {to_backfill}).'
+ f"Cannot have backfilled more than available attempts ({actually_backfilled} out of {to_backfill})."
)
- record.set_log_details({'action': 'BACKFILL', 'outcome': outcome})
+ record.set_log_details({"action": "BACKFILL", "outcome": outcome})
record.save()
return success, outcome
@@ -191,11 +191,11 @@ def _is_queue_overloaded(provisioner_id: str, worker_type: str, acceptable_limit
Usage example: _queue_is_too_loaded('gecko-3', 'b-linux')
:return: True/False
"""
- tc = TaskclusterConfig('https://firefox-ci-tc.services.mozilla.com')
+ tc = TaskclusterConfig("https://firefox-ci-tc.services.mozilla.com")
tc.auth(client_id=CLIENT_ID, access_token=ACCESS_TOKEN)
- queue = tc.get_service('queue')
+ queue = tc.get_service("queue")
- pending_tasks_count = queue.pendingTasks(provisioner_id, worker_type).get('pendingTasks')
+ pending_tasks_count = queue.pendingTasks(provisioner_id, worker_type).get("pendingTasks")
return pending_tasks_count > acceptable_limit
diff --git a/treeherder/perf/email.py b/treeherder/perf/email.py
index fb2f6137a5a..f959e7a14d5 100644
--- a/treeherder/perf/email.py
+++ b/treeherder/perf/email.py
@@ -105,7 +105,7 @@ def _initialize_report_intro(self):
def __prepare_report_description(self) -> str:
title_case_platforms = map(lambda platf: platf.title(), settings.SUPPORTED_PLATFORMS)
- platform_enumeration = ', '.join(title_case_platforms)
+ platform_enumeration = ", ".join(title_case_platforms)
description = self.DESCRIPTION.format(supported_platforms=platform_enumeration)
return description
@@ -117,7 +117,7 @@ def _include_in_report(self, record: BackfillRecord):
def _build_table_row(self, record: BackfillRecord) -> str:
alert_summary = record.alert.summary
alert = record.alert
- job_symbol = self.__escape_markdown(record.job_symbol) or 'N/A'
+ job_symbol = self.__escape_markdown(record.job_symbol) or "N/A"
total_backfills = (
record.total_backfills_failed
+ record.total_backfills_successful
@@ -152,7 +152,7 @@ def __build_push_range_md_link(self, record: BackfillRecord) -> str:
return f"[{text_to_link}]({hyperlink})"
except Exception:
- return 'N/A'
+ return "N/A"
def __build_push_range_link(self, record: BackfillRecord) -> str:
repo = record.repository.name
@@ -255,7 +255,7 @@ def _include_in_report(self, signature: PerformanceSignature):
def _build_table_row(self, signature: PerformanceSignature) -> str:
props = self.__extract_properties(signature)
- return '| {repository} | {framework} | {platform} | {suite} | {application} | {last_updated} |'.format(
+ return "| {repository} | {framework} | {platform} | {suite} | {application} | {last_updated} |".format(
repository=props["repository"],
framework=props["framework"],
platform=props["platform"],
diff --git a/treeherder/perf/exceptions.py b/treeherder/perf/exceptions.py
index 262cd2c54df..1cf50fd4de3 100644
--- a/treeherder/perf/exceptions.py
+++ b/treeherder/perf/exceptions.py
@@ -1,8 +1,8 @@
class NoDataCyclingAtAll(Exception):
def __str__(self):
- msg = 'No data cycling could be performed.'
+ msg = "No data cycling could be performed."
if self.__cause__:
- msg = f'{msg} (Reason: {self.__cause__})'
+ msg = f"{msg} (Reason: {self.__cause__})"
return msg
diff --git a/treeherder/perf/management/commands/backfill_perf_jobs.py b/treeherder/perf/management/commands/backfill_perf_jobs.py
index 2b632aa8486..b253e021293 100644
--- a/treeherder/perf/management/commands/backfill_perf_jobs.py
+++ b/treeherder/perf/management/commands/backfill_perf_jobs.py
@@ -17,15 +17,15 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- 'job',
- action='store',
+ "job",
+ action="store",
type=str,
help="Performance job to backfill from",
- metavar='JOB_ID',
+ metavar="JOB_ID",
)
def handle(self, *args, **options):
- job_id = options['job']
+ job_id = options["job"]
backfill_tool = backfill_tool_factory()
task_id = backfill_tool.backfill_job(job_id)
diff --git a/treeherder/perf/management/commands/compute_criteria_formulas.py b/treeherder/perf/management/commands/compute_criteria_formulas.py
index 14d4779f8f3..aaf22ba7bb3 100644
--- a/treeherder/perf/management/commands/compute_criteria_formulas.py
+++ b/treeherder/perf/management/commands/compute_criteria_formulas.py
@@ -16,76 +16,76 @@
def pretty_enumerated(formulas: List[str]) -> str:
- comma = ', '
- return ' & '.join(comma.join(formulas).rsplit(comma, maxsplit=1))
+ comma = ", "
+ return " & ".join(comma.join(formulas).rsplit(comma, maxsplit=1))
class Command(BaseCommand):
- ENGINEER_TRACTION = 'engineer traction'
- FIX_RATIO = 'fix ratio'
+ ENGINEER_TRACTION = "engineer traction"
+ FIX_RATIO = "fix ratio"
FORMULAS = [ENGINEER_TRACTION, FIX_RATIO] # register new formulas here
- help = f'''
+ help = f"""
Compute the {pretty_enumerated(FORMULAS)} for multiple framework/suite combinations,
according to the Perf Sheriffing Criteria specification.\nRequires "{criteria_tracking.CRITERIA_FILENAME}" to be provided for both program input & output.
- '''
+ """
- INITIAL_PROMPT_MSG = 'Computing Perf Sheriffing Criteria... (may take some time)'
- PRECISION = '.1f'
+ INITIAL_PROMPT_MSG = "Computing Perf Sheriffing Criteria... (may take some time)"
+ PRECISION = ".1f"
def add_arguments(self, parser):
parser.add_argument(
- '--quantifying-period',
- '-qp',
+ "--quantifying-period",
+ "-qp",
default=settings.QUANTIFYING_PERIOD,
type=self.parse_time_interval,
- help='''How far back to look for gathering formula's input data, from now.
+ help="""How far back to look for gathering formula's input data, from now.
Expressed in a humanized form.
Examples: 1year, 6month, 2weeks etc.
- More details about accepted forms: https://github.com/mozilla/ActiveData/blob/dev/docs/jx_time.md#duration''',
- metavar='QUANTIFYING_PERIOD',
+ More details about accepted forms: https://github.com/mozilla/ActiveData/blob/dev/docs/jx_time.md#duration""",
+ metavar="QUANTIFYING_PERIOD",
)
parser.add_argument(
- '--bug-cooldown',
- '-bc',
+ "--bug-cooldown",
+ "-bc",
default=settings.BUG_COOLDOWN_TIME,
type=self.parse_time_interval,
- help='''How old Bugzilla bugs should be to be taken into consideration.
+ help="""How old Bugzilla bugs should be to be taken into consideration.
Expressed in a humanized form.
Examples: 1year, 6month, 2weeks etc.
- More details about accepted forms: https://github.com/mozilla/ActiveData/blob/dev/docs/jx_time.md#duration''',
- metavar='BUG_COOLDOWN',
+ More details about accepted forms: https://github.com/mozilla/ActiveData/blob/dev/docs/jx_time.md#duration""",
+ metavar="BUG_COOLDOWN",
)
parser.add_argument(
- '--multiprocessing',
- '-mp',
- action='store_true',
- help='''Experimental! Whether to use a process pool instead of a thread pool''',
+ "--multiprocessing",
+ "-mp",
+ action="store_true",
+ help="""Experimental! Whether to use a process pool instead of a thread pool""",
)
- subparser = parser.add_subparsers(dest='individually')
+ subparser = parser.add_subparsers(dest="individually")
individual_parser = subparser.add_parser(
- 'individually',
- help='Compute perf sheriffing criteria for individual framework/suite combo (no CSV file required)',
+ "individually",
+ help="Compute perf sheriffing criteria for individual framework/suite combo (no CSV file required)",
)
- individual_parser.add_argument('framework', action='store')
- individual_parser.add_argument('suite', action='store')
- individual_parser.add_argument('--test', default=None)
+ individual_parser.add_argument("framework", action="store")
+ individual_parser.add_argument("suite", action="store")
+ individual_parser.add_argument("--test", default=None)
def handle(self, *args, **options):
- if options.get('individually'):
+ if options.get("individually"):
return self._handle_individually(options)
- quant_period = options['quantifying_period']
- bug_cooldown = options['bug_cooldown']
- multiprocessed = options['multiprocessing']
+ quant_period = options["quantifying_period"]
+ bug_cooldown = options["bug_cooldown"]
+ multiprocessed = options["multiprocessing"]
init_params = (None, quant_period, bug_cooldown)
formula_map = {
- 'EngineerTraction': EngineerTractionFormula(*init_params),
- 'FixRatio': FixRatioFormula(*init_params),
- 'TotalAlerts': TotalAlertsFormula(quant_period),
+ "EngineerTraction": EngineerTractionFormula(*init_params),
+ "FixRatio": FixRatioFormula(*init_params),
+ "TotalAlerts": TotalAlertsFormula(quant_period),
}
tracker = CriteriaTracker(formula_map, multiprocessed=multiprocessed)
@@ -94,18 +94,18 @@ def handle(self, *args, **options):
tracker.update_records()
duration = time.time() - start
- print(f'{self.INITIAL_PROMPT_MSG}', end='')
+ print(f"{self.INITIAL_PROMPT_MSG}", end="")
for record in tracker:
print(record)
print(f"Took {duration:.1f} seconds")
def _handle_individually(self, options):
- framework = options['framework']
- suite = options['suite']
- test = options['test']
- quant_period = options['quantifying_period']
- bug_cooldown = options['bug_cooldown']
+ framework = options["framework"]
+ suite = options["suite"]
+ test = options["test"]
+ quant_period = options["quantifying_period"]
+ bug_cooldown = options["bug_cooldown"]
init_params = (None, quant_period, bug_cooldown)
targetted_test = (framework, suite, test)
@@ -113,7 +113,7 @@ def _handle_individually(self, options):
engineer_traction = EngineerTractionFormula(*init_params)
fix_ratio = FixRatioFormula(*init_params)
- print(f'\r{self.INITIAL_PROMPT_MSG}', end='')
+ print(f"\r{self.INITIAL_PROMPT_MSG}", end="")
compute_start = time.time()
eng_traction_result = engineer_traction(*targetted_test)
@@ -125,9 +125,9 @@ def _handle_individually(self, options):
fix_ratio_result *= 100
# display results (inline)
- test_moniker = ' '.join(filter(None, (suite, test)))
- title = f'Perf Sheriffing Criteria for {framework} - {test_moniker}'
- big_underline = '-' * len(title)
+ test_moniker = " ".join(filter(None, (suite, test)))
+ title = f"Perf Sheriffing Criteria for {framework} - {test_moniker}"
+ big_underline = "-" * len(title)
# & results headers
eng_traction_head = self.ENGINEER_TRACTION.capitalize()
@@ -135,7 +135,7 @@ def _handle_individually(self, options):
justify_head = self.__get_head_justification(eng_traction_head, fix_ratio_head)
# let's update 1st prompt line
- print(f"\r{' ' * len(self.INITIAL_PROMPT_MSG)}", end='')
+ print(f"\r{' ' * len(self.INITIAL_PROMPT_MSG)}", end="")
print(
f"\rComputing Perf Sheriffing Criteria... (took {compute_duration:{self.PRECISION}} seconds)"
)
@@ -146,8 +146,8 @@ def _handle_individually(self, options):
print(big_underline)
# & actual results
- print(f'{eng_traction_head:<{justify_head}}: {eng_traction_result:{self.PRECISION}}%')
- print(f'{fix_ratio_head:<{justify_head}}: {fix_ratio_result:{self.PRECISION}}%')
+ print(f"{eng_traction_head:<{justify_head}}: {eng_traction_result:{self.PRECISION}}%")
+ print(f"{fix_ratio_head:<{justify_head}}: {fix_ratio_result:{self.PRECISION}}%")
print(big_underline)
def __get_head_justification(self, *result_heads):
diff --git a/treeherder/perf/management/commands/create_test_perf_data.py b/treeherder/perf/management/commands/create_test_perf_data.py
index 8a8a3db385b..79c0d2514d4 100644
--- a/treeherder/perf/management/commands/create_test_perf_data.py
+++ b/treeherder/perf/management/commands/create_test_perf_data.py
@@ -22,7 +22,7 @@ def handle(self, *args, **options):
if confirm != "yes":
return
- call_command('loaddata', 'test_performance_data')
+ call_command("loaddata", "test_performance_data")
# generating a test performance series by hand is a little overly
# verbose, so let's do that programmatically
@@ -34,8 +34,8 @@ def handle(self, *args, **options):
# create a push first as need a push_id
Push.objects.create(
repository=s.repository,
- revision='1234abcd',
- author='foo@bar.com',
+ revision="1234abcd",
+ author="foo@bar.com",
time=datetime.datetime.now(),
)
diff --git a/treeherder/perf/management/commands/generate_alerts.py b/treeherder/perf/management/commands/generate_alerts.py
index 7a443a1426c..5e33ec89c09 100644
--- a/treeherder/perf/management/commands/generate_alerts.py
+++ b/treeherder/perf/management/commands/generate_alerts.py
@@ -14,26 +14,26 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '--project',
- action='append',
- help='Project to get signatures from (specify multiple times to get multiple projects',
+ "--project",
+ action="append",
+ help="Project to get signatures from (specify multiple times to get multiple projects",
)
parser.add_argument(
- '--signature',
- action='append',
- help='Signature hashes to process, defaults to all non-subtests',
+ "--signature",
+ action="append",
+ help="Signature hashes to process, defaults to all non-subtests",
)
def handle(self, *args, **options):
- if not options['project']:
+ if not options["project"]:
raise CommandError("Must specify at least one project with " "--project")
- for project in options['project']:
+ for project in options["project"]:
repository = models.Repository.objects.get(name=project)
signatures = PerformanceSignature.objects.filter(repository=repository)
- if options['signature']:
- signatures_to_process = signatures.filter(signature_hash__in=options['signature'])
+ if options["signature"]:
+ signatures_to_process = signatures.filter(signature_hash__in=options["signature"])
else:
hashes_to_ignore = set()
# if doing everything, only handle series which are not a
@@ -42,7 +42,7 @@ def handle(self, *args, **options):
for signature in signatures:
# Don't alert on subtests which have a summary
hashes_to_ignore.update(
- signature.extra_properties.get('subtest_signatures', [])
+ signature.extra_properties.get("subtest_signatures", [])
)
signatures_to_process = [
signature
diff --git a/treeherder/perf/management/commands/import_perf_data.py b/treeherder/perf/management/commands/import_perf_data.py
index ee731585e0d..607c671f7ae 100644
--- a/treeherder/perf/management/commands/import_perf_data.py
+++ b/treeherder/perf/management/commands/import_perf_data.py
@@ -50,14 +50,14 @@ def progress_notifier(
tabs_no=0,
):
total_items = len(iterable)
- print('{0}Fetching {1} {2} item(s)...'.format('\t' * tabs_no, total_items, item_name))
+ print("{0}Fetching {1} {2} item(s)...".format("\t" * tabs_no, total_items, item_name))
prev_percentage = None
for idx, item in enumerate(iterable):
item_processor(item)
percentage = int((idx + 1) * 100 / total_items)
if percentage % 10 == 0 and percentage != prev_percentage:
- print('{0}Fetched {1}% of {2} item(s)'.format('\t' * tabs_no, percentage, item_name))
+ print("{0}Fetched {1}% of {2} item(s)".format("\t" * tabs_no, percentage, item_name))
prev_percentage = percentage
@@ -70,8 +70,8 @@ def _ignore_assignee(table_name, model):
SENSITIVE_TABLES_MAP = {
- 'performance_alert': _ignore_classifier,
- 'performance_alert_summary': _ignore_assignee,
+ "performance_alert": _ignore_classifier,
+ "performance_alert_summary": _ignore_assignee,
}
@@ -86,14 +86,14 @@ def fillup_target(self, **filters):
def show_progress(self, queryset, map, table_name):
total_rows = int(queryset.count())
- print('Fetching {0} {1}(s)...'.format(total_rows, table_name))
+ print("Fetching {0} {1}(s)...".format(total_rows, table_name))
prev_percentage = None
for idx, obj in enumerate(list(queryset)):
map(obj)
percentage = int((idx + 1) * 100 / total_rows)
if percentage % 10 == 0 and percentage != prev_percentage:
- print('Fetched {0}% of alert summaries'.format(percentage))
+ print("Fetched {0}% of alert summaries".format(percentage))
prev_percentage = percentage
@@ -112,20 +112,20 @@ class DecentSizedData(Data):
def delete_local_data(self):
for model in self.DECENT_SIZED_TABLES:
- print('Removing elements from {0} table... '.format(model._meta.db_table))
+ print("Removing elements from {0} table... ".format(model._meta.db_table))
model.objects.using(self.target).all().delete()
def save_local_data(self):
for model in self.DECENT_SIZED_TABLES:
- print('Fetching from {0} table...'.format(model._meta.db_table))
+ print("Fetching from {0} table...".format(model._meta.db_table))
model.objects.using(self.target).bulk_create(model.objects.using(self.source).all())
def fillup_target(self, **filters):
- print('Fetching all affordable data...\n')
+ print("Fetching all affordable data...\n")
# TODO: JSON dump the list
print(
- 'From tables {0}'.format(
- ', '.join([model._meta.db_table for model in self.DECENT_SIZED_TABLES])
+ "From tables {0}".format(
+ ", ".join([model._meta.db_table for model in self.DECENT_SIZED_TABLES])
)
)
@@ -151,19 +151,19 @@ class MassiveData(Data):
]
priority_dict = {
- 'reference_data_signature': {'download_order': 1, 'model': ReferenceDataSignatures},
- 'push': {'download_order': 1, 'model': Push},
- 'build_platform': {'download_order': 1, 'model': BuildPlatform},
- 'machine': {'download_order': 1, 'model': Machine},
- 'job_group': {'download_order': 1, 'model': JobGroup},
- 'job_type': {'download_order': 1, 'model': JobType},
- 'performance_signature': {'download_order': 2, 'model': PerformanceSignature},
- 'job': {'download_order': 2, 'model': Job},
- 'performance_alert_summary': {'download_order': 2, 'model': PerformanceAlertSummary},
- 'performance_datum': {'download_order': 3, 'model': PerformanceDatum},
- 'performance_alert': {'download_order': 3, 'model': PerformanceAlert},
- 'backfill_report': {'download_order': 3, 'model': BackfillReport},
- 'backfill_record': {'download_order': 4, 'model': BackfillRecord},
+ "reference_data_signature": {"download_order": 1, "model": ReferenceDataSignatures},
+ "push": {"download_order": 1, "model": Push},
+ "build_platform": {"download_order": 1, "model": BuildPlatform},
+ "machine": {"download_order": 1, "model": Machine},
+ "job_group": {"download_order": 1, "model": JobGroup},
+ "job_type": {"download_order": 1, "model": JobType},
+ "performance_signature": {"download_order": 2, "model": PerformanceSignature},
+ "job": {"download_order": 2, "model": Job},
+ "performance_alert_summary": {"download_order": 2, "model": PerformanceAlertSummary},
+ "performance_datum": {"download_order": 3, "model": PerformanceDatum},
+ "performance_alert": {"download_order": 3, "model": PerformanceAlert},
+ "backfill_report": {"download_order": 3, "model": BackfillReport},
+ "backfill_record": {"download_order": 4, "model": BackfillRecord},
}
def __init__(
@@ -184,7 +184,7 @@ def __init__(
oldest_day = datetime.datetime.now() - self.time_window
self.query_set = (
PerformanceAlertSummary.objects.using(self.source)
- .select_related('framework', 'repository')
+ .select_related("framework", "repository")
.filter(created__gte=oldest_day)
)
@@ -197,50 +197,50 @@ def __init__(
frameworks
if frameworks is not None
else list(
- PerformanceFramework.objects.using(self.source).values_list('name', flat=True)
+ PerformanceFramework.objects.using(self.source).values_list("name", flat=True)
)
)
self.repositories = (
repositories
if repositories is not None
- else list(Repository.objects.using(self.source).values_list('name', flat=True))
+ else list(Repository.objects.using(self.source).values_list("name", flat=True))
)
interproc_instance = interproc()
self.models_instances = {
- 'reference_data_signature': interproc_instance.list(),
- 'performance_alert': interproc_instance.list(),
- 'job': interproc_instance.list(),
- 'job_type': interproc_instance.list(),
- 'job_group': interproc_instance.list(),
- 'performance_datum': interproc_instance.list(),
- 'performance_alert_summary': interproc_instance.list(),
- 'push': interproc_instance.list(),
- 'build_platform': interproc_instance.list(),
- 'machine': interproc_instance.list(),
- 'performance_signature': interproc_instance.list(),
- 'backfill_report': interproc_instance.list(),
- 'backfill_record': interproc_instance.list(),
+ "reference_data_signature": interproc_instance.list(),
+ "performance_alert": interproc_instance.list(),
+ "job": interproc_instance.list(),
+ "job_type": interproc_instance.list(),
+ "job_group": interproc_instance.list(),
+ "performance_datum": interproc_instance.list(),
+ "performance_alert_summary": interproc_instance.list(),
+ "push": interproc_instance.list(),
+ "build_platform": interproc_instance.list(),
+ "machine": interproc_instance.list(),
+ "performance_signature": interproc_instance.list(),
+ "backfill_report": interproc_instance.list(),
+ "backfill_record": interproc_instance.list(),
}
def delete_local_data(self):
for model in self.BIG_SIZED_TABLES:
- print('Removing elements from {0} table... '.format(model._meta.db_table))
+ print("Removing elements from {0} table... ".format(model._meta.db_table))
model.objects.using(self.target).all().delete()
def save_local_data(self):
priority_dict = collections.OrderedDict(
- sorted(self.priority_dict.items(), key=lambda item: item[1]['download_order'])
+ sorted(self.priority_dict.items(), key=lambda item: item[1]["download_order"])
)
for table_name, properties in priority_dict.items():
- print('Saving {0} data...'.format(table_name))
+ print("Saving {0} data...".format(table_name))
model_values = (
- properties['model']
+ properties["model"]
.objects.using(self.source)
.filter(pk__in=self.models_instances[table_name])
)
self._ignore_sensitive_fields(table_name, model_values)
- properties['model'].objects.using(self.target).bulk_create(model_values)
+ properties["model"].objects.using(self.target).bulk_create(model_values)
def _ignore_sensitive_fields(self, table_name, model_values):
"""
@@ -257,7 +257,7 @@ def fillup_target(self, **filters):
# fetch all alert summaries & alerts
# with only a subset of the datum & jobs
oldest_day = datetime.datetime.now() - self.time_window
- print('\nFetching data subset no older than {0}...'.format(str(oldest_day)))
+ print("\nFetching data subset no older than {0}...".format(str(oldest_day)))
self.delete_local_data()
alert_summaries = list(self.query_set)
@@ -272,7 +272,7 @@ def fillup_target(self, **filters):
try:
stop_idx = step_size = math.ceil(alert_summaries_len / num_workers)
except ZeroDivisionError:
- raise RuntimeError('No alert summaries to fetch.')
+ raise RuntimeError("No alert summaries to fetch.")
start_idx = 0
for idx in range(num_workers):
@@ -293,51 +293,51 @@ def fillup_target(self, **filters):
self.save_local_data()
def db_worker(self, process_no, alert_summaries):
- print('Process no {0} up and running...'.format(process_no))
- self.progress_notifier(self.bring_in_alert_summary, alert_summaries, 'alert summary', 1)
+ print("Process no {0} up and running...".format(process_no))
+ self.progress_notifier(self.bring_in_alert_summary, alert_summaries, "alert summary", 1)
def bring_in_alert_summary(self, alert_summary):
- self.update_list('push', alert_summary.push)
- self.update_list('push', alert_summary.prev_push)
- self.update_list('performance_alert_summary', alert_summary)
- self.update_list('backfill_report', alert_summary)
+ self.update_list("push", alert_summary.push)
+ self.update_list("push", alert_summary.prev_push)
+ self.update_list("performance_alert_summary", alert_summary)
+ self.update_list("backfill_report", alert_summary)
# bring in all its alerts
alerts = list(
PerformanceAlert.objects.using(self.source)
- .select_related('series_signature')
+ .select_related("series_signature")
.filter(summary=alert_summary)
)
- self.progress_notifier(self.bring_in_alert, alerts, 'alert', 2)
+ self.progress_notifier(self.bring_in_alert, alerts, "alert", 2)
def bring_in_alert(self, alert):
- if alert.id in self.models_instances['performance_alert']:
+ if alert.id in self.models_instances["performance_alert"]:
return
- print('{0}Fetching alert #{1}...'.format('\t' * 2, alert.id))
+ print("{0}Fetching alert #{1}...".format("\t" * 2, alert.id))
if alert.related_summary:
- if alert.related_summary not in self.models_instances['performance_alert_summary']:
+ if alert.related_summary not in self.models_instances["performance_alert_summary"]:
# if the alert summary identified isn't registered yet
# register it with all its alerts
self.progress_notifier(
- self.bring_in_alert_summary, [alert.related_summary], 'alert summary', 1
+ self.bring_in_alert_summary, [alert.related_summary], "alert summary", 1
)
# pull parent signature first
parent_signature = alert.series_signature.parent_signature
if parent_signature:
self.bring_in_performance_data(alert.created, parent_signature)
- self.update_list('performance_signature', parent_signature)
+ self.update_list("performance_signature", parent_signature)
# then signature itself
self.bring_in_performance_data(alert.created, alert.series_signature)
- self.update_list('performance_signature', alert.series_signature)
+ self.update_list("performance_signature", alert.series_signature)
# then alert itself
# we don't have access to user table...
alert.classifier = None
- self.models_instances['performance_alert'].append(alert.id)
- self.models_instances['backfill_record'].append(alert.id)
+ self.models_instances["performance_alert"].append(alert.id)
+ self.models_instances["backfill_record"].append(alert.id)
def bring_in_performance_data(self, time_of_alert, performance_signature):
performance_data = list(
@@ -349,32 +349,32 @@ def bring_in_performance_data(self, time_of_alert, performance_signature):
)
self.progress_notifier(
- self.bring_in_performance_datum, performance_data, 'performance datum', 3
+ self.bring_in_performance_datum, performance_data, "performance datum", 3
)
def bring_in_performance_datum(self, performance_datum):
- if performance_datum.id in self.models_instances['performance_datum']:
+ if performance_datum.id in self.models_instances["performance_datum"]:
return
- self.update_list('push', performance_datum.push)
+ self.update_list("push", performance_datum.push)
self.bring_in_job(performance_datum.job)
- self.models_instances['performance_datum'].append(performance_datum.id)
+ self.models_instances["performance_datum"].append(performance_datum.id)
def bring_in_job(self, job):
- if job.id in self.models_instances['job']:
+ if job.id in self.models_instances["job"]:
return
- occasional_log('{0}Fetching job #{1}'.format('\t' * 4, job.id))
+ occasional_log("{0}Fetching job #{1}".format("\t" * 4, job.id))
- self.update_list('reference_data_signature', job.signature)
- self.update_list('build_platform', job.build_platform)
- self.update_list('machine', job.machine)
- self.update_list('job_group', job.job_group)
- self.update_list('job_type', job.job_type)
- self.update_list('push', job.push)
+ self.update_list("reference_data_signature", job.signature)
+ self.update_list("build_platform", job.build_platform)
+ self.update_list("machine", job.machine)
+ self.update_list("job_group", job.job_group)
+ self.update_list("job_type", job.job_type)
+ self.update_list("push", job.push)
- self.models_instances['job'].append(job.id)
+ self.models_instances["job"].append(job.id)
def update_list(self, database_key, element):
if element.id in self.models_instances[database_key]:
@@ -387,25 +387,25 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '--num-workers', action='store', dest='num_workers', type=int, default=4
+ "--num-workers", action="store", dest="num_workers", type=int, default=4
)
- parser.add_argument('--time-window', action='store', type=int, default=1)
+ parser.add_argument("--time-window", action="store", type=int, default=1)
- parser.add_argument('--frameworks', nargs='+', default=None)
+ parser.add_argument("--frameworks", nargs="+", default=None)
- parser.add_argument('--repositories', nargs='+', default=None)
+ parser.add_argument("--repositories", nargs="+", default=None)
def handle(self, *args, **options):
- time_window = datetime.timedelta(days=options['time_window'])
- num_workers = options['num_workers']
- frameworks = options['frameworks']
- repositories = options['repositories']
+ time_window = datetime.timedelta(days=options["time_window"])
+ num_workers = options["num_workers"]
+ frameworks = options["frameworks"]
+ repositories = options["repositories"]
- affordable_data = DecentSizedData(source='upstream', target='default')
+ affordable_data = DecentSizedData(source="upstream", target="default")
subseted_data = MassiveData(
- source='upstream',
- target='default',
+ source="upstream",
+ target="default",
progress_notifier=progress_notifier,
time_window=time_window,
num_workers=num_workers,
diff --git a/treeherder/perf/management/commands/perf_sheriff.py b/treeherder/perf/management/commands/perf_sheriff.py
index 0aee328fd67..eff2eba1df9 100644
--- a/treeherder/perf/management/commands/perf_sheriff.py
+++ b/treeherder/perf/management/commands/perf_sheriff.py
@@ -17,38 +17,38 @@ class Command(BaseCommand):
AVAILABLE_REPOS = Repository.fetch_all_names()
SHERIFFED_FRAMEWORKS = [
- 'browsertime',
- 'raptor',
- 'talos',
- 'awsy',
- 'build_metrics',
- 'js-bench',
- 'devtools',
+ "browsertime",
+ "raptor",
+ "talos",
+ "awsy",
+ "build_metrics",
+ "js-bench",
+ "devtools",
]
- SHERIFFED_REPOS = ['autoland', 'mozilla-beta']
+ SHERIFFED_REPOS = ["autoland", "mozilla-beta"]
help = "Select most relevant alerts and identify jobs to retrigger."
def add_arguments(self, parser):
parser.add_argument(
- '--time-window',
- action='store',
+ "--time-window",
+ action="store",
type=int,
default=60,
help="How far back to look for alerts to retrigger (expressed in minutes).",
)
parser.add_argument(
- '--frameworks',
- nargs='+',
+ "--frameworks",
+ nargs="+",
default=self.SHERIFFED_FRAMEWORKS,
choices=self.AVAILABLE_FRAMEWORKS,
help="Defaults to all registered performance frameworks.",
)
parser.add_argument(
- '--repositories',
- nargs='+',
+ "--repositories",
+ nargs="+",
default=self.SHERIFFED_REPOS,
choices=self.AVAILABLE_REPOS,
help=f"Defaults to {self.SHERIFFED_REPOS}.",
@@ -67,8 +67,8 @@ def handle(self, *args, **options):
def _parse_args(self, **options) -> Tuple[List, List, datetime, timedelta]:
return (
- options['frameworks'],
- options['repositories'],
- datetime.now() - timedelta(minutes=options['time_window']),
+ options["frameworks"],
+ options["repositories"],
+ datetime.now() - timedelta(minutes=options["time_window"]),
timedelta(days=1),
)
diff --git a/treeherder/perf/management/commands/reassign_perf_data.py b/treeherder/perf/management/commands/reassign_perf_data.py
index 14ed742015c..8b824fb313b 100644
--- a/treeherder/perf/management/commands/reassign_perf_data.py
+++ b/treeherder/perf/management/commands/reassign_perf_data.py
@@ -3,7 +3,7 @@
from treeherder.perf.models import PerformanceAlert, PerformanceDatum, PerformanceSignature
-RAPTOR_TP6_SUBTESTS = 'raptor-tp6-subtests'
+RAPTOR_TP6_SUBTESTS = "raptor-tp6-subtests"
USE_CASES = [RAPTOR_TP6_SUBTESTS]
@@ -24,38 +24,38 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '--from',
- action='append',
- help='Original signature (specify multiple times to get multiple signatures)',
+ "--from",
+ action="append",
+ help="Original signature (specify multiple times to get multiple signatures)",
)
parser.add_argument(
- '--to',
- action='append',
- help='New signature we want to move performance data to '
- '(specify multiple times to get multiple signatures)',
+ "--to",
+ action="append",
+ help="New signature we want to move performance data to "
+ "(specify multiple times to get multiple signatures)",
)
parser.add_argument(
- '--for',
- action='store',
+ "--for",
+ action="store",
choices=USE_CASES,
- metavar='USE CASE',
- help='''Rename "old" Raptor tp6 subtests, by pointing perf alerts & datum to new signatures.
+ metavar="USE CASE",
+ help="""Rename "old" Raptor tp6 subtests, by pointing perf alerts & datum to new signatures.
Cannot be used in conjunction with --from/--to arguments.
- Available use cases: {}'''.format(
- ','.join(USE_CASES)
+ Available use cases: {}""".format(
+ ",".join(USE_CASES)
),
)
parser.add_argument(
- '--keep-leftovers',
- action='store_true',
- help='Keep database rows even if they become useless after the script runs',
+ "--keep-leftovers",
+ action="store_true",
+ help="Keep database rows even if they become useless after the script runs",
)
def handle(self, *args, **options):
- from_signatures = options['from']
- to_signatures = options['to']
- use_case = options['for']
- keep_leftovers = options['keep_leftovers']
+ from_signatures = options["from"]
+ to_signatures = options["to"]
+ use_case = options["for"]
+ keep_leftovers = options["keep_leftovers"]
self.validate_arguments(from_signatures, to_signatures, use_case)
@@ -111,7 +111,7 @@ def fetch_tp6_signature_pairs(self):
old_signature.extra_options = new_signature.extra_options AND
old_signature.lower_is_better = new_signature.lower_is_better AND
old_signature.has_subtests = new_signature.has_subtests""".format(
- tp6_name_pattern='raptor-tp6%',
+ tp6_name_pattern="raptor-tp6%",
mozilla_central=self.mozilla_central,
mozilla_inbound=self.mozilla_inbound,
mozilla_beta=self.mozilla_beta,
diff --git a/treeherder/perf/management/commands/remove_multi_commit_data.py b/treeherder/perf/management/commands/remove_multi_commit_data.py
index 18431195623..71e5f2ae7ea 100644
--- a/treeherder/perf/management/commands/remove_multi_commit_data.py
+++ b/treeherder/perf/management/commands/remove_multi_commit_data.py
@@ -14,25 +14,25 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '--chunk-size',
+ "--chunk-size",
default=40,
type=int,
help="How many rows to delete at a time (this won't remove all rows in a single query, "
"but in multiple, smaller ones).",
- metavar='CHUNK-SIZE',
+ metavar="CHUNK-SIZE",
)
def handle(self, *args, **options):
- data_to_delete = MultiCommitDatum.objects.all().values_list('perf_datum', flat=True)
- chunk_size = options['chunk_size']
+ data_to_delete = MultiCommitDatum.objects.all().values_list("perf_datum", flat=True)
+ chunk_size = options["chunk_size"]
if not data_to_delete:
- print('No data to delete')
+ print("No data to delete")
return
- print('Removing `performance_datum` rows ingested as multi commit data...')
+ print("Removing `performance_datum` rows ingested as multi commit data...")
while data_to_delete:
delete_now, data_to_delete = data_to_delete[:chunk_size], data_to_delete[chunk_size:]
PerformanceDatum.objects.filter(id__in=delete_now).delete()
- print(f'\r{len(data_to_delete)} `performance_datum` rows left to delete', end='')
+ print(f"\r{len(data_to_delete)} `performance_datum` rows left to delete", end="")
print()
diff --git a/treeherder/perf/management/commands/remove_vcs_data.py b/treeherder/perf/management/commands/remove_vcs_data.py
index 727bd321369..6bc60af3d6d 100644
--- a/treeherder/perf/management/commands/remove_vcs_data.py
+++ b/treeherder/perf/management/commands/remove_vcs_data.py
@@ -26,7 +26,7 @@ def __init__(self, *args, **kwargs):
self.__timer.start_timer()
def handle(self, *args, **options):
- vcs_signatures = PerformanceSignature.objects.filter(framework__name='vcs')
+ vcs_signatures = PerformanceSignature.objects.filter(framework__name="vcs")
for signature in vcs_signatures:
signature.delete() # intentionally cascades to data points also
self._maybe_take_small_break() # so database won't cripple; blocking call
diff --git a/treeherder/perf/management/commands/report_backfill_outcome.py b/treeherder/perf/management/commands/report_backfill_outcome.py
index a917ad56652..a22c12c8323 100644
--- a/treeherder/perf/management/commands/report_backfill_outcome.py
+++ b/treeherder/perf/management/commands/report_backfill_outcome.py
@@ -13,7 +13,7 @@
class Command(BaseCommand):
help = (
- 'Command used for reporting the outcome of the automatic backfilling process once per day.'
+ "Command used for reporting the outcome of the automatic backfilling process once per day."
)
def handle(self, *args, **options):
@@ -39,7 +39,7 @@ def handle(self, *args, **options):
logger.debug(
f"Sherlock Notify Service: Email notification service replied with `{notification_outcome}`."
)
- if notification_outcome['response'].status_code == SUCCESS_STATUS:
+ if notification_outcome["response"].status_code == SUCCESS_STATUS:
logger.debug(
"Sherlock Notify Service: Removing notified records from helper table."
)
diff --git a/treeherder/perf/management/commands/test_analyze_perf.py b/treeherder/perf/management/commands/test_analyze_perf.py
index eb74c240e65..aeb031f643c 100644
--- a/treeherder/perf/management/commands/test_analyze_perf.py
+++ b/treeherder/perf/management/commands/test_analyze_perf.py
@@ -13,50 +13,50 @@ class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
- '--server',
- action='store',
- dest='server',
+ "--server",
+ action="store",
+ dest="server",
default=settings.SITE_URL,
- help='Server to get data from, default to local instance',
+ help="Server to get data from, default to local instance",
)
parser.add_argument(
- '--time-interval',
- action='store',
+ "--time-interval",
+ action="store",
default=PerformanceTimeInterval.WEEK,
type=int,
- help='Time interval to test alert code on (defaults to one week)',
+ help="Time interval to test alert code on (defaults to one week)",
)
parser.add_argument(
- '--project',
- action='append',
- help='Project to get signatures from (specify multiple time to get multiple projects',
+ "--project",
+ action="append",
+ help="Project to get signatures from (specify multiple time to get multiple projects",
)
parser.add_argument(
- '--signature',
- action='store',
- help='Signature hash to process, defaults to all non-subtests',
+ "--signature",
+ action="store",
+ help="Signature hash to process, defaults to all non-subtests",
)
@staticmethod
def _get_series_description(option_collection_hash, series_properties):
- testname = series_properties.get('test', 'summary')
+ testname = series_properties.get("test", "summary")
option_hash_strs = [
- o['name'] for o in option_collection_hash[series_properties['option_collection_hash']]
+ o["name"] for o in option_collection_hash[series_properties["option_collection_hash"]]
]
- test_options = series_properties.get('test_options', []) + option_hash_strs
- return " ".join([str(s) for s in [series_properties['suite'], testname] + test_options])
+ test_options = series_properties.get("test_options", []) + option_hash_strs
+ return " ".join([str(s) for s in [series_properties["suite"], testname] + test_options])
def handle(self, *args, **options):
- if not options['project']:
+ if not options["project"]:
raise CommandError("Must specify at least one project with " "--project")
- pc = PerfherderClient(server_url=options['server'])
+ pc = PerfherderClient(server_url=options["server"])
option_collection_hash = pc.get_option_collection_hash()
# print csv header
print(
- ','.join(
+ ",".join(
[
"project",
"platform",
@@ -72,46 +72,46 @@ def handle(self, *args, **options):
)
)
- for project in options['project']:
- if options['signature']:
- signatures = [options['signature']]
+ for project in options["project"]:
+ if options["signature"]:
+ signatures = [options["signature"]]
signature_data = pc.get_performance_signatures(
- project, signatures=signatures, interval=options['time_interval']
+ project, signatures=signatures, interval=options["time_interval"]
)
else:
signature_data = pc.get_performance_signatures(
- project, interval=options['time_interval']
+ project, interval=options["time_interval"]
)
signatures = []
signatures_to_ignore = set()
# if doing everything, only handle summary series
for signature, properties in signature_data.items():
signatures.append(signature)
- if 'subtest_signatures' in properties:
+ if "subtest_signatures" in properties:
# Don't alert on subtests which have a summary
- signatures_to_ignore.update(properties['subtest_signatures'])
+ signatures_to_ignore.update(properties["subtest_signatures"])
signatures = [
signature for signature in signatures if signature not in signatures_to_ignore
]
for signature in signatures:
series = pc.get_performance_data(
- project, signatures=signature, interval=options['time_interval']
+ project, signatures=signature, interval=options["time_interval"]
)[signature]
series_properties = signature_data.get(signature)
data = []
- for timestamp, value in zip(series['push_timestamp'], series['value']):
+ for timestamp, value in zip(series["push_timestamp"], series["value"]):
data.append(RevisionDatum(timestamp, value))
for r in detect_changes(data):
- if r.state == 'regression':
+ if r.state == "regression":
pushes = pc.get_pushes(project, id=r.testrun_id)
- revision = pushes[0]['revision'] if pushes else ''
- initial_value = r.historical_stats['avg']
- new_value = r.forward_stats['avg']
+ revision = pushes[0]["revision"] if pushes else ""
+ initial_value = r.historical_stats["avg"]
+ new_value = r.forward_stats["avg"]
if initial_value != 0:
pct_change = (
100.0 * abs(new_value - initial_value) / float(initial_value)
@@ -120,12 +120,12 @@ def handle(self, *args, **options):
pct_change = 0.0
delta = new_value - initial_value
print(
- ','.join(
+ ",".join(
map(
str,
[
project,
- series_properties['machine_platform'],
+ series_properties["machine_platform"],
signature,
self._get_series_description(
option_collection_hash, series_properties
diff --git a/treeherder/perf/models.py b/treeherder/perf/models.py
index d8a74c9b54a..2751cfabcc4 100644
--- a/treeherder/perf/models.py
+++ b/treeherder/perf/models.py
@@ -32,11 +32,11 @@ class PerformanceFramework(models.Model):
enabled = models.BooleanField(default=False)
class Meta:
- db_table = 'performance_framework'
+ db_table = "performance_framework"
@classmethod
def fetch_all_names(cls) -> List[str]:
- return cls.objects.values_list('name', flat=True)
+ return cls.objects.values_list("name", flat=True)
def __str__(self):
return self.name
@@ -55,14 +55,14 @@ class PerformanceSignature(models.Model):
test = models.CharField(max_length=80, blank=True)
application = models.CharField(
max_length=10,
- default='',
+ default="",
help_text="Application that runs the signature's tests. "
"Generally used to record browser's name, but not necessarily.",
)
lower_is_better = models.BooleanField(default=True)
last_updated = models.DateTimeField(db_index=True)
parent_signature = models.ForeignKey(
- 'self', on_delete=models.CASCADE, related_name='subtests', null=True, blank=True
+ "self", on_delete=models.CASCADE, related_name="subtests", null=True, blank=True
)
has_subtests = models.BooleanField()
@@ -90,7 +90,7 @@ class PerformanceSignature(models.Model):
# generation works
ALERT_PCT = 0
ALERT_ABS = 1
- ALERT_CHANGE_TYPES = ((ALERT_PCT, 'percentage'), (ALERT_ABS, 'absolute'))
+ ALERT_CHANGE_TYPES = ((ALERT_PCT, "percentage"), (ALERT_ABS, "absolute"))
should_alert = models.BooleanField(null=True)
alert_change_type = models.IntegerField(choices=ALERT_CHANGE_TYPES, null=True)
@@ -135,7 +135,7 @@ def has_performance_data(self):
).exists()
def has_data_with_historical_value(self):
- repositories = ['autoland', 'mozilla-central']
+ repositories = ["autoland", "mozilla-central"]
if self.repository.name in repositories:
perf_data = list(
PerformanceDatum.objects.filter(
@@ -148,36 +148,36 @@ def has_data_with_historical_value(self):
return False
class Meta:
- db_table = 'performance_signature'
+ db_table = "performance_signature"
unique_together = (
# ensure there is only one signature per repository with a
# particular set of properties
(
- 'repository',
- 'suite',
- 'test',
- 'framework',
- 'platform',
- 'option_collection',
- 'extra_options',
- 'last_updated',
- 'application',
+ "repository",
+ "suite",
+ "test",
+ "framework",
+ "platform",
+ "option_collection",
+ "extra_options",
+ "last_updated",
+ "application",
),
# suite_public_name/test_public_name must be unique
# and different than suite/test
(
- 'repository',
- 'suite_public_name',
- 'test_public_name',
- 'framework',
- 'platform',
- 'option_collection',
- 'extra_options',
+ "repository",
+ "suite_public_name",
+ "test_public_name",
+ "framework",
+ "platform",
+ "option_collection",
+ "extra_options",
),
# ensure there is only one signature of any hash per
# repository (same hash in different repositories is allowed)
- ('repository', 'framework', 'application', 'signature_hash'),
+ ("repository", "framework", "application", "signature_hash"),
)
def __str__(self):
@@ -203,15 +203,15 @@ class PerformanceDatum(models.Model):
push = models.ForeignKey(Push, on_delete=models.CASCADE)
class Meta:
- db_table = 'performance_datum'
+ db_table = "performance_datum"
index_together = [
# Speeds up the typical "get a range of performance datums" query
- ('repository', 'signature', 'push_timestamp'),
+ ("repository", "signature", "push_timestamp"),
# Speeds up the compare view in treeherder (we only index on
# repository because we currently filter on it in the query)
- ('repository', 'signature', 'push'),
+ ("repository", "signature", "push"),
]
- unique_together = ('repository', 'job', 'push', 'push_timestamp', 'signature')
+ unique_together = ("repository", "job", "push", "push_timestamp", "signature")
@staticmethod
def should_mark_as_multi_commit(is_multi_commit: bool, was_created: bool) -> bool:
@@ -233,7 +233,7 @@ class PerformanceDatumReplicate(models.Model):
value = models.FloatField()
class Meta:
- db_table = 'performance_datum_replicate'
+ db_table = "performance_datum_replicate"
class MultiCommitDatum(models.Model):
@@ -241,7 +241,7 @@ class MultiCommitDatum(models.Model):
PerformanceDatum,
on_delete=models.CASCADE,
primary_key=True,
- related_name='multi_commit_datum',
+ related_name="multi_commit_datum",
)
@@ -271,14 +271,14 @@ class PerformanceAlertSummary(models.Model):
repository = models.ForeignKey(Repository, on_delete=models.CASCADE)
framework = models.ForeignKey(PerformanceFramework, on_delete=models.CASCADE)
- prev_push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name='+')
- push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name='+')
+ prev_push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name="+")
+ push = models.ForeignKey(Push, on_delete=models.CASCADE, related_name="+")
manually_created = models.BooleanField(default=False)
notes = models.TextField(null=True, blank=True)
assignee = models.ForeignKey(
- User, on_delete=models.SET_NULL, null=True, related_name='assigned_alerts'
+ User, on_delete=models.SET_NULL, null=True, related_name="assigned_alerts"
)
created = models.DateTimeField(auto_now_add=True, db_index=True)
@@ -297,15 +297,15 @@ class PerformanceAlertSummary(models.Model):
BACKED_OUT = 8
STATUSES = (
- (UNTRIAGED, 'Untriaged'),
- (DOWNSTREAM, 'Downstream'),
- (REASSIGNED, 'Reassigned'),
- (INVALID, 'Invalid'),
- (IMPROVEMENT, 'Improvement'),
- (INVESTIGATING, 'Investigating'),
- (WONTFIX, 'Won\'t fix'),
- (FIXED, 'Fixed'),
- (BACKED_OUT, 'Backed out'),
+ (UNTRIAGED, "Untriaged"),
+ (DOWNSTREAM, "Downstream"),
+ (REASSIGNED, "Reassigned"),
+ (INVALID, "Invalid"),
+ (IMPROVEMENT, "Improvement"),
+ (INVESTIGATING, "Investigating"),
+ (WONTFIX, "Won't fix"),
+ (FIXED, "Fixed"),
+ (BACKED_OUT, "Backed out"),
)
status = models.IntegerField(choices=STATUSES, default=UNTRIAGED)
@@ -415,7 +415,7 @@ def timestamp_first_triage(self):
class Meta:
db_table = "performance_alert_summary"
- unique_together = ('repository', 'framework', 'prev_push', 'push')
+ unique_together = ("repository", "framework", "prev_push", "push")
def __str__(self):
return "{} {} {}-{}".format(
@@ -438,10 +438,10 @@ class PerformanceAlert(models.Model):
id = models.AutoField(primary_key=True)
summary = models.ForeignKey(
- PerformanceAlertSummary, on_delete=models.CASCADE, related_name='alerts'
+ PerformanceAlertSummary, on_delete=models.CASCADE, related_name="alerts"
)
related_summary = models.ForeignKey(
- PerformanceAlertSummary, on_delete=models.CASCADE, related_name='related_alerts', null=True
+ PerformanceAlertSummary, on_delete=models.CASCADE, related_name="related_alerts", null=True
)
series_signature = models.ForeignKey(PerformanceSignature, on_delete=models.CASCADE)
is_regression = models.BooleanField()
@@ -469,11 +469,11 @@ class PerformanceAlert(models.Model):
UNRELATIONAL_STATUS_IDS = (UNTRIAGED, INVALID, ACKNOWLEDGED)
STATUSES = (
- (UNTRIAGED, 'Untriaged'),
- (DOWNSTREAM, 'Downstream'),
- (REASSIGNED, 'Reassigned'),
- (INVALID, 'Invalid'),
- (ACKNOWLEDGED, 'Acknowledged'),
+ (UNTRIAGED, "Untriaged"),
+ (DOWNSTREAM, "Downstream"),
+ (REASSIGNED, "Reassigned"),
+ (INVALID, "Invalid"),
+ (ACKNOWLEDGED, "Acknowledged"),
)
status = models.IntegerField(choices=STATUSES, default=UNTRIAGED)
@@ -512,7 +512,7 @@ class PerformanceAlert(models.Model):
@property
def initial_culprit_job(self) -> Optional[Job]:
- if hasattr(self, '__initial_culprit_job'):
+ if hasattr(self, "__initial_culprit_job"):
return self.__initial_culprit_job
try:
@@ -522,7 +522,7 @@ def initial_culprit_job(self) -> Optional[Job]:
repository=self.series_signature.repository,
signature=self.series_signature,
push=self.summary.push,
- ).order_by('id')[0]
+ ).order_by("id")[0]
self.__initial_culprit_job = culprit_data_point.job
except IndexError:
logger.debug(f"Could not find the initial culprit job for alert {self.id}.")
@@ -566,7 +566,7 @@ def save(self, *args, **kwargs):
# just forward the explicit database
# so the summary properly updates there
- using = kwargs.get('using', None)
+ using = kwargs.get("using", None)
self.summary.update_status(using=using)
if self.related_summary:
self.related_summary.update_status(using=using)
@@ -581,7 +581,7 @@ def timestamp_first_triage(self):
class Meta:
db_table = "performance_alert"
- unique_together = ('summary', 'series_signature')
+ unique_together = ("summary", "series_signature")
def __str__(self):
return "{} {} {}%".format(self.summary, self.series_signature, self.amount_pct)
@@ -591,7 +591,7 @@ class PerformanceTag(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30, unique=True)
alert_summaries = models.ManyToManyField(
- PerformanceAlertSummary, related_name='performance_tags'
+ PerformanceAlertSummary, related_name="performance_tags"
)
class Meta:
@@ -617,7 +617,7 @@ class Meta:
db_table = "performance_bug_template"
def __str__(self):
- return '{} bug template'.format(self.framework.name)
+ return "{} bug template".format(self.framework.name)
# TODO: we actually need this name for the Sherlock' s hourly report
@@ -631,7 +631,7 @@ class BackfillReport(models.Model):
PerformanceAlertSummary,
on_delete=models.CASCADE,
primary_key=True,
- related_name='backfill_report',
+ related_name="backfill_report",
)
created = models.DateTimeField(auto_now_add=True)
@@ -658,10 +658,10 @@ def __str__(self):
class BackfillRecord(models.Model):
alert = models.OneToOneField(
- PerformanceAlert, on_delete=models.CASCADE, primary_key=True, related_name='backfill_record'
+ PerformanceAlert, on_delete=models.CASCADE, primary_key=True, related_name="backfill_record"
)
- report = models.ForeignKey(BackfillReport, on_delete=models.CASCADE, related_name='records')
+ report = models.ForeignKey(BackfillReport, on_delete=models.CASCADE, related_name="records")
# all data required to retrigger/backfill
# associated perf alert, as JSON dump
@@ -676,11 +676,11 @@ class BackfillRecord(models.Model):
FAILED = 4
STATUSES = (
- (PRELIMINARY, 'Preliminary'),
- (READY_FOR_PROCESSING, 'Ready for processing'),
- (BACKFILLED, 'Backfilled'),
- (SUCCESSFUL, 'Successful'),
- (FAILED, 'Failed'),
+ (PRELIMINARY, "Preliminary"),
+ (READY_FOR_PROCESSING, "Ready for processing"),
+ (BACKFILLED, "Backfilled"),
+ (SUCCESSFUL, "Successful"),
+ (FAILED, "Failed"),
)
status = models.IntegerField(choices=STATUSES, default=PRELIMINARY)
@@ -688,10 +688,10 @@ class BackfillRecord(models.Model):
# Backfill outcome
log_details = models.TextField() # JSON expected, not supported by Django
job_type = models.ForeignKey(
- JobType, null=True, on_delete=models.SET_NULL, related_name='backfill_records'
+ JobType, null=True, on_delete=models.SET_NULL, related_name="backfill_records"
)
job_group = models.ForeignKey(
- JobGroup, null=True, on_delete=models.SET_NULL, related_name='backfill_records'
+ JobGroup, null=True, on_delete=models.SET_NULL, related_name="backfill_records"
)
job_tier = models.PositiveIntegerField(null=True)
job_platform_option = models.CharField(max_length=100, null=True)
@@ -718,7 +718,7 @@ def job_symbol(self) -> Optional[str]:
if not all([self.job_tier, self.job_group, self.job_type]):
return None
- tier_label = ''
+ tier_label = ""
if self.job_tier > 1:
tier_label = f"[tier {self.job_tier}]"
@@ -763,23 +763,23 @@ def get_context_border_info(self, context_property: str) -> Tuple[str, str]:
return from_info, to_info
def get_pushes_in_context_range(self) -> List[Push]:
- from_time, to_time = self.get_context_border_info('push_timestamp')
+ from_time, to_time = self.get_context_border_info("push_timestamp")
return Push.objects.filter(
repository=self.repository, time__gte=from_time, time__lte=to_time
).all()
def get_job_search_str(self) -> str:
- platform = deepgetattr(self, 'platform.platform')
- platform_option = deepgetattr(self, 'job_platform_option')
- job_group_name = deepgetattr(self, 'job_group.name')
- job_type_name = deepgetattr(self, 'job_type.name')
- job_type_symbol = deepgetattr(self, 'job_type.symbol')
+ platform = deepgetattr(self, "platform.platform")
+ platform_option = deepgetattr(self, "job_platform_option")
+ job_group_name = deepgetattr(self, "job_group.name")
+ job_type_name = deepgetattr(self, "job_type.name")
+ job_type_symbol = deepgetattr(self, "job_type.symbol")
search_terms = [platform, platform_option, job_group_name, job_type_name, job_type_symbol]
search_terms = list(filter(None, search_terms))
- return ','.join(search_terms)
+ return ",".join(search_terms)
def get_context(self) -> List[dict]:
return json.loads(self.context)
@@ -793,7 +793,7 @@ def set_log_details(self, value: dict):
def save(self, *args, **kwargs):
# refresh parent's latest update time
super().save(*args, **kwargs)
- self.report.save(using=kwargs.get('using'))
+ self.report.save(using=kwargs.get("using"))
def delete(self, using=None, keep_parents=False):
super().delete(using, keep_parents)
@@ -815,7 +815,7 @@ class BackfillNotificationRecord(models.Model):
record = models.OneToOneField(
BackfillRecord,
on_delete=models.CASCADE,
- related_name='backfill_notification_record',
+ related_name="backfill_notification_record",
)
created = models.DateTimeField(auto_now_add=True)
last_updated = models.DateTimeField(auto_now=True)
@@ -846,7 +846,7 @@ def deepgetattr(obj: object, attr_chain: str) -> Optional[object]:
@return: None if any attribute within chain does not exist.
"""
try:
- return reduce(getattr, attr_chain.split('.'), obj)
+ return reduce(getattr, attr_chain.split("."), obj)
except AttributeError:
logger.debug(
f"Failed to access deeply nested attribute `{attr_chain}` on object of type {type(obj)}."
diff --git a/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py b/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py
index 542b27a4782..bb3f4cca95b 100644
--- a/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py
+++ b/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py
@@ -13,10 +13,10 @@
# Google Doc specification
PERF_SHERIFFING_CRITERIA = (
- 'https://docs.google.com/document/d/11WPIPFeq-i1IAVOQhBR-SzIMOPSqBVjLepgOWCrz_S4'
+ "https://docs.google.com/document/d/11WPIPFeq-i1IAVOQhBR-SzIMOPSqBVjLepgOWCrz_S4"
)
-ENGINEER_TRACTION_SPECIFICATION = f'{PERF_SHERIFFING_CRITERIA}#heading=h.8th4thm4twvx'
-FIX_RATIO_SPECIFICATION = f'{PERF_SHERIFFING_CRITERIA}#heading=h.8sevd69iqfz9'
+ENGINEER_TRACTION_SPECIFICATION = f"{PERF_SHERIFFING_CRITERIA}#heading=h.8th4thm4twvx"
+FIX_RATIO_SPECIFICATION = f"{PERF_SHERIFFING_CRITERIA}#heading=h.8sevd69iqfz9"
class NonBlockableSession(Session):
@@ -31,9 +31,9 @@ def __init__(self, referer=None):
# will be more likely to contact us before blocking our
# IP when making many queries with this
self.headers = {
- 'Referer': f'{referer}',
- 'User-Agent': 'treeherder/{}'.format(settings.SITE_HOSTNAME),
- 'Accept': 'application/json',
+ "Referer": f"{referer}",
+ "User-Agent": "treeherder/{}".format(settings.SITE_HOSTNAME),
+ "Accept": "application/json",
}
@@ -55,7 +55,7 @@ def __init__(
if not isinstance(self._session, NonBlockableSession):
raise TypeError(
- 'Engineer traction formula should only query using an non blockable HTTP session'
+ "Engineer traction formula should only query using an non blockable HTTP session"
) # otherwise Bugzilla OPS will block us by IP
# for breakdown
@@ -94,15 +94,15 @@ def __call__(self, framework: str, suite: str, test: str = None) -> float:
def breakdown(self) -> Tuple[list, list]:
breakdown_items = (self._denominator_bugs, self._numerator_bugs)
if None in breakdown_items:
- raise RuntimeError('Cannot breakdown results without running calculus first')
+ raise RuntimeError("Cannot breakdown results without running calculus first")
return tuple(deepcopy(item) for item in breakdown_items)
def has_cooled_down(self, bug: dict) -> bool:
try:
- creation_time = self.__get_datetime(bug['creation_time'])
+ creation_time = self.__get_datetime(bug["creation_time"])
except (KeyError, ValueError) as ex:
- raise ValueError('Bug has unexpected JSON body') from ex
+ raise ValueError("Bug has unexpected JSON body") from ex
else:
return creation_time <= datetime.now() - self._bug_cooldown
@@ -126,32 +126,32 @@ def __fetch_cooled_down_bugs(self, framework: str, suite: str, test: str = None)
return cooled_bugs
def __fetch_quantified_bugs(self, framework: str, suite: str, test: str = None) -> List[dict]:
- test_moniker = ' '.join(filter(None, (suite, test)))
+ test_moniker = " ".join(filter(None, (suite, test)))
test_id_fragments = filter(None, [framework, test_moniker])
creation_time = datetime.strftime(self.oldest_timestamp, BZ_DATETIME_FORMAT)
params = {
- 'longdesc': ','.join(test_id_fragments),
- 'longdesc_type': 'allwordssubstr',
- 'longdesc_initial': 1,
- 'keywords': 'perf,perf-alert',
- 'keywords_type': 'anywords',
- 'creation_time': creation_time,
- 'query_format': 'advanced',
- 'include_fields': 'id,type,resolution,last_change_time,is_open,creation_time,summary,whiteboard,status,keywords',
+ "longdesc": ",".join(test_id_fragments),
+ "longdesc_type": "allwordssubstr",
+ "longdesc_initial": 1,
+ "keywords": "perf,perf-alert",
+ "keywords_type": "anywords",
+ "creation_time": creation_time,
+ "query_format": "advanced",
+ "include_fields": "id,type,resolution,last_change_time,is_open,creation_time,summary,whiteboard,status,keywords",
}
try:
bugs_resp = self._session.get(
- f'{self._bugzilla_url}/rest/bug',
- headers={'Accept': 'application/json'},
+ f"{self._bugzilla_url}/rest/bug",
+ headers={"Accept": "application/json"},
params=params,
timeout=90, # query is demanding; give it a bit more patience
)
except Exception as ex:
raise BugzillaEndpointError from ex
else:
- return bugs_resp.json()['bugs']
+ return bugs_resp.json()["bugs"]
def __filter_cooled_down_bugs(self, bugs: List[dict]) -> List[dict]:
return [bug for bug in bugs if self.has_cooled_down(bug)]
@@ -168,9 +168,9 @@ class EngineerTractionFormula(BugzillaFormula):
def _filter_numerator_bugs(self, cooled_bugs: List[dict]) -> List[dict]:
tracted_bugs = []
for bug in cooled_bugs:
- bug_history = self._fetch_history(bug['id'])
+ bug_history = self._fetch_history(bug["id"])
up_to_date = (
- datetime.strptime(bug['creation_time'], BZ_DATETIME_FORMAT) + self._bug_cooldown
+ datetime.strptime(bug["creation_time"], BZ_DATETIME_FORMAT) + self._bug_cooldown
)
if self._notice_any_status_change_in(bug_history, up_to_date):
tracted_bugs.append(bug)
@@ -183,19 +183,19 @@ def _filter_denominator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
def _fetch_history(self, bug_id: int) -> list:
try:
history_resp = self._session.get(
- f'{self._bugzilla_url}/rest/bug/{bug_id}/history',
- headers={'Accept': 'application/json'},
+ f"{self._bugzilla_url}/rest/bug/{bug_id}/history",
+ headers={"Accept": "application/json"},
timeout=60,
)
except Exception as ex:
raise BugzillaEndpointError from ex
else:
body = history_resp.json()
- return body['bugs'][0]['history']
+ return body["bugs"][0]["history"]
def _notice_any_status_change_in(self, bug_history: List[dict], up_to: datetime) -> bool:
def during_interval(change: dict) -> bool:
- when = datetime.strptime(change['when'], BZ_DATETIME_FORMAT)
+ when = datetime.strptime(change["when"], BZ_DATETIME_FORMAT)
return when <= up_to
# filter changes that occurred during bug cool down
@@ -203,13 +203,13 @@ def during_interval(change: dict) -> bool:
# return on any changes WRT 'status' or 'resolution'
for compound_change in relevant_changes:
- for change in compound_change['changes']:
- if change['field_name'] in {'status', 'resolution'}:
+ for change in compound_change["changes"]:
+ if change["field_name"] in {"status", "resolution"}:
return True
return False
def _create_default_session(self) -> NonBlockableSession:
- return NonBlockableSession(referer=f'{ENGINEER_TRACTION_SPECIFICATION}')
+ return NonBlockableSession(referer=f"{ENGINEER_TRACTION_SPECIFICATION}")
class FixRatioFormula(BugzillaFormula):
@@ -218,15 +218,15 @@ def _filter_numerator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
return [
bug
for bug in all_filed_bugs
- if bug.get('status') == "RESOLVED" and bug.get('resolution') == 'FIXED'
+ if bug.get("status") == "RESOLVED" and bug.get("resolution") == "FIXED"
]
def _filter_denominator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
# select RESOLVED bugs, no matter what resolution they have
- return [bug for bug in all_filed_bugs if bug.get('status') == "RESOLVED"]
+ return [bug for bug in all_filed_bugs if bug.get("status") == "RESOLVED"]
def _create_default_session(self) -> NonBlockableSession:
- return NonBlockableSession(referer=f'{FIX_RATIO_SPECIFICATION}')
+ return NonBlockableSession(referer=f"{FIX_RATIO_SPECIFICATION}")
class TotalAlertsFormula:
@@ -249,13 +249,13 @@ def oldest_timestamp(self):
return datetime.now() - (self._quant_period + self.MAX_INVESTIGATION_TIME)
def __call__(self, framework: str, suite: str, test: str = None) -> int:
- filters = {'series_signature__framework__name': framework, 'series_signature__suite': suite}
+ filters = {"series_signature__framework__name": framework, "series_signature__suite": suite}
if test is not None:
- filters['series_signature__test'] = test
+ filters["series_signature__test"] = test
return (
PerformanceAlert.objects.select_related(
- 'series_signature', 'series_signature__framework'
+ "series_signature", "series_signature__framework"
)
.filter(**filters, last_updated__gte=self.oldest_timestamp)
.count()
diff --git a/treeherder/perf/sheriffing_criteria/criteria_tracking.py b/treeherder/perf/sheriffing_criteria/criteria_tracking.py
index cb34e789d17..ddd449f4ab6 100644
--- a/treeherder/perf/sheriffing_criteria/criteria_tracking.py
+++ b/treeherder/perf/sheriffing_criteria/criteria_tracking.py
@@ -12,7 +12,7 @@
from .bugzilla_formulas import BugzillaFormula, EngineerTractionFormula, FixRatioFormula
from treeherder.utils import PROJECT_ROOT
-CRITERIA_FILENAME = 'perf-sheriffing-criteria.csv'
+CRITERIA_FILENAME = "perf-sheriffing-criteria.csv"
LOGGER = logging.getLogger(__name__)
@@ -29,20 +29,20 @@ class CriteriaRecord:
AllowSync: bool
def __post_init__(self):
- if self.EngineerTraction not in ('', 'N/A'):
+ if self.EngineerTraction not in ("", "N/A"):
self.EngineerTraction = float(self.EngineerTraction)
- if self.FixRatio not in ('', 'N/A'):
+ if self.FixRatio not in ("", "N/A"):
self.FixRatio = float(self.FixRatio)
- if self.TotalAlerts not in ('', 'N/A'):
+ if self.TotalAlerts not in ("", "N/A"):
self.TotalAlerts = int(self.TotalAlerts)
- if self.LastUpdatedOn != '':
+ if self.LastUpdatedOn != "":
if isinstance(self.LastUpdatedOn, str):
self.LastUpdatedOn = datetime.fromisoformat(self.LastUpdatedOn)
- if self.AllowSync in ('', 'True'):
+ if self.AllowSync in ("", "True"):
self.AllowSync = True
- elif self.AllowSync == 'False':
+ elif self.AllowSync == "False":
self.AllowSync = False
@@ -69,7 +69,7 @@ def should_update(self, record: CriteriaRecord) -> bool:
return False
# missing data
- if '' in (engineer_traction, fix_ratio, last_updated_on):
+ if "" in (engineer_traction, fix_ratio, last_updated_on):
return True
# expired data
@@ -84,12 +84,12 @@ def apply_formulas(self, record: CriteriaRecord) -> CriteriaRecord:
try:
result = formula(record.Framework, record.Suite, record.Test)
except (NoFiledBugs, Exception) as ex:
- result = 'N/A'
+ result = "N/A"
self.__log_unexpected(ex, form_name, record)
record = replace(
record,
- **{form_name: result, 'LastUpdatedOn': datetime.utcnow().isoformat()},
+ **{form_name: result, "LastUpdatedOn": datetime.utcnow().isoformat()},
)
self.__let_web_service_rest_a_bit()
return record
@@ -101,7 +101,7 @@ def __log_unexpected(self, exception: Exception, formula_name: str, record: Crit
elif type(exception) is Exception:
# maybe web service problem
self.log.warning(
- f'Unexpected exception when applying {formula_name} formula over {record.Framework} - {record.Suite}: {exception}'
+ f"Unexpected exception when applying {formula_name} formula over {record.Framework} - {record.Suite}: {exception}"
)
def __let_web_service_rest_a_bit(self):
@@ -127,15 +127,15 @@ def __init__(
self.log = logger or LOGGER
if not issubclass(self._pool_class, Pool):
- raise TypeError(f'Expected Pool (sub)class parameter. Got {self._pool_class} instead')
+ raise TypeError(f"Expected Pool (sub)class parameter. Got {self._pool_class} instead")
if type(thread_wait) is not timedelta:
- raise TypeError('Expected timedelta parameter.')
+ raise TypeError("Expected timedelta parameter.")
if type(check_interval) is not timedelta:
- raise TypeError('Expected timedelta parameter.')
+ raise TypeError("Expected timedelta parameter.")
def pool(self):
size = self.figure_out_pool_size()
- self.log.debug(f'Preparing a {self._pool_class.__name__} of size {size}...')
+ self.log.debug(f"Preparing a {self._pool_class.__name__} of size {size}...")
return self._pool_class(size)
def figure_out_pool_size(self) -> int:
@@ -168,7 +168,7 @@ def wait_for_results(self, results: List[AsyncResult]):
while True:
last_check_on = time.time()
if all(r.ready() for r in results):
- self.log.info('Finished computing updates for all records.')
+ self.log.info("Finished computing updates for all records.")
break
time.sleep(self._check_interval.total_seconds())
@@ -204,8 +204,8 @@ def __reset_change_track(self, last_change=None):
class CriteriaTracker:
TIME_UNTIL_EXPIRES = timedelta(days=3)
- ENGINEER_TRACTION = 'EngineerTraction'
- FIX_RATIO = 'FixRatio'
+ ENGINEER_TRACTION = "EngineerTraction"
+ FIX_RATIO = "FixRatio"
FIELDNAMES = [field.name for field in fields(CriteriaRecord)]
# Instance defaults
@@ -234,7 +234,7 @@ def __init__(
for formula in self._formula_map.values():
if not callable(formula):
- raise TypeError('Must provide callable as sheriffing criteria formula')
+ raise TypeError("Must provide callable as sheriffing criteria formula")
def get_test_moniker(self, record: CriteriaRecord) -> Tuple[str, str, str]:
return record.Framework, record.Suite, record.Test
@@ -244,18 +244,18 @@ def __iter__(self):
return iter(self._records_map.values())
def load_records(self):
- self.log.info(f'Loading records from {self._record_path}...')
+ self.log.info(f"Loading records from {self._record_path}...")
self._records_map = {} # reset them
- with open(self._record_path, 'r') as csv_file:
+ with open(self._record_path, "r") as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
- test_moniker = row.get('Framework'), row.get('Suite'), row.get('Test')
+ test_moniker = row.get("Framework"), row.get("Suite"), row.get("Test")
self._records_map[test_moniker] = CriteriaRecord(**row)
- self.log.debug(f'Loaded {len(self._records_map)} records')
+ self.log.debug(f"Loaded {len(self._records_map)} records")
def update_records(self):
- self.log.info('Updating records...')
+ self.log.info("Updating records...")
result_checker = ResultsChecker(self.__check_interval(), timeout_after=timedelta(minutes=5))
with self.fetch_strategy.pool() as pool:
@@ -274,11 +274,11 @@ def update_records(self):
self._records_map[test_moniker] = record
self.log.debug("Updated all records internally")
- self.log.info(f'Updating CSV file at {self._record_path}...')
+ self.log.info(f"Updating CSV file at {self._record_path}...")
self.__dump_records()
def compute_record_update(self, record: CriteriaRecord) -> CriteriaRecord:
- self.log.info(f'Computing update for record {record}...')
+ self.log.info(f"Computing update for record {record}...")
if self.__should_update(record):
record = self._computer.apply_formulas(record)
return record
@@ -291,21 +291,21 @@ def create_formula_map(self) -> Dict[str, BugzillaFormula]:
def create_fetch_strategy(self, multiprocessed: bool) -> ConcurrencyStrategy:
options = { # thread pool defaults
- 'pool_class': ThreadPool,
- 'thread_wait': timedelta(seconds=10),
- 'check_interval': timedelta(seconds=10),
- 'cpu_allocation': 0.75,
- 'threads_per_cpu': 12,
- 'logger': self.log,
+ "pool_class": ThreadPool,
+ "thread_wait": timedelta(seconds=10),
+ "check_interval": timedelta(seconds=10),
+ "cpu_allocation": 0.75,
+ "threads_per_cpu": 12,
+ "logger": self.log,
}
if multiprocessed:
options = { # process pool defaults (overrides upper ones)
- 'pool_class': Pool,
- 'thread_wait': timedelta(seconds=1.5),
- 'check_interval': timedelta(seconds=4),
- 'cpu_allocation': 0.8,
- 'threads_per_cpu': 12,
- 'logger': self.log,
+ "pool_class": Pool,
+ "thread_wait": timedelta(seconds=1.5),
+ "check_interval": timedelta(seconds=4),
+ "cpu_allocation": 0.8,
+ "threads_per_cpu": 12,
+ "logger": self.log,
}
return ConcurrencyStrategy(**options)
@@ -323,7 +323,7 @@ def __check_interval(self):
return wait_time
def __dump_records(self):
- with open(self._record_path, 'w') as csv_file:
+ with open(self._record_path, "w") as csv_file:
writer = csv.DictWriter(csv_file, self.FIELDNAMES)
writer.writeheader()
diff --git a/treeherder/perf/tasks.py b/treeherder/perf/tasks.py
index 7a437e505aa..53a0ec49d5f 100644
--- a/treeherder/perf/tasks.py
+++ b/treeherder/perf/tasks.py
@@ -5,7 +5,7 @@
from treeherder.workers.task import retryable_task
-@retryable_task(name='generate-alerts', max_retries=10)
+@retryable_task(name="generate-alerts", max_retries=10)
def generate_alerts(signature_id):
newrelic.agent.add_custom_attribute("signature_id", str(signature_id))
signature = PerformanceSignature.objects.get(id=signature_id)
diff --git a/treeherder/perfalert/perfalert/__init__.py b/treeherder/perfalert/perfalert/__init__.py
index eeed90549d7..a6d1dbe75f8 100644
--- a/treeherder/perfalert/perfalert/__init__.py
+++ b/treeherder/perfalert/perfalert/__init__.py
@@ -66,14 +66,14 @@ def calc_t(w1, w2, weight_fn=None):
s1 = analyze(w1, weight_fn)
s2 = analyze(w2, weight_fn)
- delta_s = s2['avg'] - s1['avg']
+ delta_s = s2["avg"] - s1["avg"]
if delta_s == 0:
return 0
- if s1['variance'] == 0 and s2['variance'] == 0:
- return float('inf')
+ if s1["variance"] == 0 and s2["variance"] == 0:
+ return float("inf")
- return delta_s / (((s1['variance'] / s1['n']) + (s2['variance'] / s2['n'])) ** 0.5)
+ return delta_s / (((s1["variance"] / s1["n"]) + (s2["variance"] / s2["n"])) ** 0.5)
@functools.total_ordering
@@ -106,7 +106,7 @@ def __lt__(self, o):
return self.push_timestamp < o.push_timestamp
def __repr__(self):
- values_str = '[ %s ]' % ', '.join(['%.3f' % value for value in self.values])
+ values_str = "[ %s ]" % ", ".join(["%.3f" % value for value in self.values])
return "<%s: %s, %s, %.3f, %s>" % (
self.push_timestamp,
self.push_id,
diff --git a/treeherder/perfalert/setup.py b/treeherder/perfalert/setup.py
index a1659fd0171..db9eb17ae4e 100644
--- a/treeherder/perfalert/setup.py
+++ b/treeherder/perfalert/setup.py
@@ -1,26 +1,26 @@
from setuptools import setup
-version = '0.1'
+version = "0.1"
setup(
- name='perfalert',
+ name="perfalert",
version=version,
description="Automated regression detection for performance data",
classifiers=[
- 'Environment :: Console',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
- 'Natural Language :: English',
- 'Operating System :: OS Independent',
- 'Programming Language :: Python',
- 'Topic :: Software Development :: Libraries :: Python Modules',
+ "Environment :: Console",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
+ "Natural Language :: English",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Topic :: Software Development :: Libraries :: Python Modules",
],
- keywords='',
- author='Mozilla Automation and Testing Team & others',
- author_email='tools@lists.mozilla.org',
- url='https://github.com/mozilla/treeherder',
- license='MPL',
- packages=['perfalert'],
+ keywords="",
+ author="Mozilla Automation and Testing Team & others",
+ author_email="tools@lists.mozilla.org",
+ url="https://github.com/mozilla/treeherder",
+ license="MPL",
+ packages=["perfalert"],
zip_safe=False,
install_requires=[],
)
diff --git a/treeherder/push_health/builds.py b/treeherder/push_health/builds.py
index e54934ab776..020c59ba5c9 100644
--- a/treeherder/push_health/builds.py
+++ b/treeherder/push_health/builds.py
@@ -5,14 +5,14 @@
def get_build_failures(push):
# icontains doesn't work with mysql unless collation settings are adjusted: https://code.djangoproject.com/ticket/9682
- build_types = JobType.objects.filter(Q(name__contains='Build') | Q(name__contains='build'))
+ build_types = JobType.objects.filter(Q(name__contains="Build") | Q(name__contains="build"))
build_results = Job.objects.filter(
push=push,
tier__lte=2,
job_type__in=build_types,
- ).select_related('machine_platform', 'taskcluster_metadata')
+ ).select_related("machine_platform", "taskcluster_metadata")
- result, failures, in_progress_count = get_job_results(build_results, 'busted')
+ result, failures, in_progress_count = get_job_results(build_results, "busted")
return (result, failures, in_progress_count)
diff --git a/treeherder/push_health/classification.py b/treeherder/push_health/classification.py
index e603ee2c105..13d564c5add 100644
--- a/treeherder/push_health/classification.py
+++ b/treeherder/push_health/classification.py
@@ -1,6 +1,6 @@
# Grouping names/keys for failures.
-KNOWN_ISSUES = 'knownIssues'
-NEED_INVESTIGATION = 'needInvestigation'
+KNOWN_ISSUES = "knownIssues"
+NEED_INVESTIGATION = "needInvestigation"
def set_classifications(failures, intermittent_history, fixed_by_commit_history):
@@ -12,10 +12,10 @@ def set_classifications(failures, intermittent_history, fixed_by_commit_history)
def set_fixed_by_commit(failure, fixed_by_commit_history):
# Not perfect, could have intermittent that is cause of fbc
if (
- failure['testName'] in fixed_by_commit_history.keys()
- and not failure['isClassifiedIntermittent']
+ failure["testName"] in fixed_by_commit_history.keys()
+ and not failure["isClassifiedIntermittent"]
):
- failure['suggestedClassification'] = 'fixedByCommit'
+ failure["suggestedClassification"] = "fixedByCommit"
return True
return False
@@ -25,10 +25,10 @@ def set_intermittent(failure, previous_failures):
# TODO: if there is >1 failure for platforms/config, increase pct
# TODO: if >1 failures in the same dir or platform, increase pct
- name = failure['testName']
- platform = failure['platform']
- config = failure['config']
- job_name = failure['jobName']
+ name = failure["testName"]
+ platform = failure["platform"]
+ config = failure["config"]
+ job_name = failure["jobName"]
confidence = 0
if name in previous_failures:
@@ -42,26 +42,26 @@ def set_intermittent(failure, previous_failures):
# Marking all win7 reftest failures as int, too many font issues
if (
confidence == 0
- and platform == 'windows7-32'
- and ('opt-reftest' in job_name or 'debug-reftest' in job_name)
+ and platform == "windows7-32"
+ and ("opt-reftest" in job_name or "debug-reftest" in job_name)
):
confidence = 50
- if failure['isClassifiedIntermittent']:
+ if failure["isClassifiedIntermittent"]:
confidence = 100
if confidence:
- failure['confidence'] = confidence
- failure['suggestedClassification'] = 'intermittent'
+ failure["confidence"] = confidence
+ failure["suggestedClassification"] = "intermittent"
return True
return False
def get_log_lines(failure):
messages = []
- for line in failure['logLines']:
- line = line.encode('ascii', 'ignore')
- parts = line.split(b'|')
+ for line in failure["logLines"]:
+ line = line.encode("ascii", "ignore")
+ parts = line.split(b"|")
if len(parts) == 3:
messages.append(parts[2].strip())
return messages
@@ -74,15 +74,15 @@ def get_grouped(failures):
}
for failure in failures:
- is_intermittent = failure['suggestedClassification'] == 'intermittent'
+ is_intermittent = failure["suggestedClassification"] == "intermittent"
- if (is_intermittent and failure['confidence'] == 100) or failure['totalFailures'] / failure[
- 'totalJobs'
+ if (is_intermittent and failure["confidence"] == 100) or failure["totalFailures"] / failure[
+ "totalJobs"
] <= 0.5:
classified[KNOWN_ISSUES].append(failure)
else:
classified[NEED_INVESTIGATION].append(failure)
# If it needs investigation, we, by definition, don't have 100% confidence.
- failure['confidence'] = min(failure['confidence'], 90)
+ failure["confidence"] = min(failure["confidence"], 90)
return classified
diff --git a/treeherder/push_health/compare.py b/treeherder/push_health/compare.py
index 3e8115dfb66..31da22e8708 100644
--- a/treeherder/push_health/compare.py
+++ b/treeherder/push_health/compare.py
@@ -28,27 +28,27 @@ def get_commit_history(repository, revision, push):
parent_push = parents[0] if len(parents) else None
resp = {
- 'parentSha': parent_sha,
- 'exactMatch': False,
- 'parentPushRevision': None,
- 'parentRepository': not parent_repo or RepositorySerializer(parent_repo).data,
- 'id': None,
- 'jobCounts': None,
- 'revisions': [
- CommitSerializer(commit).data for commit in push.commits.all().order_by('-id')
+ "parentSha": parent_sha,
+ "exactMatch": False,
+ "parentPushRevision": None,
+ "parentRepository": not parent_repo or RepositorySerializer(parent_repo).data,
+ "id": None,
+ "jobCounts": None,
+ "revisions": [
+ CommitSerializer(commit).data for commit in push.commits.all().order_by("-id")
],
- 'revisionCount': push.commits.count(),
- 'currentPush': PushSerializer(push).data,
+ "revisionCount": push.commits.count(),
+ "currentPush": PushSerializer(push).data,
}
if parent_push:
resp.update(
{
# This will be the revision of the Parent, as long as we could find a Push in
# Treeherder for it.
- 'parentPushRevision': parent_push.revision,
- 'id': parent_push.id,
- 'jobCounts': parent_push.get_status(),
- 'exactMatch': parent_sha == parent_push.revision,
+ "parentPushRevision": parent_push.revision,
+ "id": parent_push.id,
+ "jobCounts": parent_push.get_status(),
+ "exactMatch": parent_sha == parent_push.revision,
}
)
diff --git a/treeherder/push_health/filter.py b/treeherder/push_health/filter.py
index 6f918989224..ba94b08f5cb 100644
--- a/treeherder/push_health/filter.py
+++ b/treeherder/push_health/filter.py
@@ -9,9 +9,9 @@ def filter_failure(failure):
def filter_job_type_names(failure):
- name = failure['jobName']
+ name = failure["jobName"]
return (
- not name.startswith(('build', 'repackage', 'hazard', 'valgrind', 'spidermonkey'))
- and 'test-verify' not in name
+ not name.startswith(("build", "repackage", "hazard", "valgrind", "spidermonkey"))
+ and "test-verify" not in name
)
diff --git a/treeherder/push_health/linting.py b/treeherder/push_health/linting.py
index c624b1af335..45925530eb8 100644
--- a/treeherder/push_health/linting.py
+++ b/treeherder/push_health/linting.py
@@ -6,11 +6,11 @@
def get_lint_failures(push):
lint_results = Job.objects.filter(
- Q(machine_platform__platform='lint') | Q(job_type__symbol='mozlint'),
+ Q(machine_platform__platform="lint") | Q(job_type__symbol="mozlint"),
push=push,
tier__lte=2,
- ).select_related('machine_platform', 'taskcluster_metadata')
+ ).select_related("machine_platform", "taskcluster_metadata")
- result, failures, in_progress_count = get_job_results(lint_results, 'testfailed')
+ result, failures, in_progress_count = get_job_results(lint_results, "testfailed")
return (result, failures, in_progress_count)
diff --git a/treeherder/push_health/performance.py b/treeherder/push_health/performance.py
index c88bbe8f9ce..8c449c4033f 100644
--- a/treeherder/push_health/performance.py
+++ b/treeherder/push_health/performance.py
@@ -3,9 +3,9 @@
def get_perf_failures(push):
- perf_groups = JobGroup.objects.filter(name__contains='performance')
+ perf_groups = JobGroup.objects.filter(name__contains="performance")
perf_failures = Job.objects.filter(
- push=push, tier__lte=2, result='testfailed', job_group__in=perf_groups
- ).select_related('machine_platform', 'taskcluster_metadata')
+ push=push, tier__lte=2, result="testfailed", job_group__in=perf_groups
+ ).select_related("machine_platform", "taskcluster_metadata")
return [job_to_dict(job) for job in perf_failures]
diff --git a/treeherder/push_health/tests.py b/treeherder/push_health/tests.py
index 31f84428c7c..6ad411e9893 100644
--- a/treeherder/push_health/tests.py
+++ b/treeherder/push_health/tests.py
@@ -15,15 +15,15 @@
logger = logging.getLogger(__name__)
-CACHE_KEY_ROOT = 'failure_history'
+CACHE_KEY_ROOT = "failure_history"
ONE_WEEK_IN_SECONDS = 604800
intermittent_history_days = 14
fixed_by_commit_history_days = 30
ignored_log_lines = [
- 'Return code: 1',
- 'exit status 1',
- 'unexpected status',
- 'Force-terminating active process(es)',
+ "Return code: 1",
+ "exit status 1",
+ "unexpected status",
+ "Force-terminating active process(es)",
]
@@ -32,13 +32,13 @@ def get_history(
):
start_date = push_date - datetime.timedelta(days=num_days)
end_date = push_date - datetime.timedelta(days=2)
- cache_key = f'{CACHE_KEY_ROOT}:{failure_classification_id}:{push_date}'
+ cache_key = f"{CACHE_KEY_ROOT}:{failure_classification_id}:{push_date}"
previous_failures_json = cache.get(cache_key)
if not previous_failures_json or force_update:
failure_lines = (
FailureLine.objects.filter(
- job_log__job__result='testfailed',
+ job_log__job__result="testfailed",
job_log__job__tier__lte=2,
job_log__job__failure_classification_id=failure_classification_id,
job_log__job__push__repository_id__in=repository_ids,
@@ -46,22 +46,22 @@ def get_history(
job_log__job__push__time__lt=end_date,
)
.exclude(test=None)
- .select_related('job_log__job__machine_platform', 'job_log__job__push')
+ .select_related("job_log__job__machine_platform", "job_log__job__push")
.values(
- 'action',
- 'test',
- 'signature',
- 'message',
- 'job_log__job__machine_platform__platform',
- 'job_log__job__option_collection_hash',
+ "action",
+ "test",
+ "signature",
+ "message",
+ "job_log__job__machine_platform__platform",
+ "job_log__job__option_collection_hash",
)
.distinct()
)
previous_failures = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for line in failure_lines:
- previous_failures[clean_test(line['test'], line['signature'], line['message'])][
- clean_platform(line['job_log__job__machine_platform__platform'])
- ][clean_config(option_map[line['job_log__job__option_collection_hash']])] += 1
+ previous_failures[clean_test(line["test"], line["signature"], line["message"])][
+ clean_platform(line["job_log__job__machine_platform__platform"])
+ ][clean_config(option_map[line["job_log__job__option_collection_hash"]])] += 1
cache.set(cache_key, json.dumps(previous_failures), ONE_WEEK_IN_SECONDS)
else:
@@ -78,15 +78,15 @@ def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
# for the same job (with different sub-tests), but it's only supported by
# postgres. Just using .distinct() has no effect.
new_failure_lines = FailureLine.objects.filter(
- action__in=['test_result', 'log', 'crash'],
+ action__in=["test_result", "log", "crash"],
job_log__job__push=push,
- job_log__job__result='testfailed',
+ job_log__job__result="testfailed",
job_log__job__tier__lte=2,
).select_related(
- 'job_log__job__job_type',
- 'job_log__job__job_group',
- 'job_log__job__machine_platform',
- 'job_log__job__taskcluster_metadata',
+ "job_log__job__job_type",
+ "job_log__job__job_group",
+ "job_log__job__machine_platform",
+ "job_log__job__taskcluster_metadata",
)
# using a dict here to avoid duplicates due to multiple failure_lines for
# each job.
@@ -103,14 +103,14 @@ def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
job_symbol = job.job_type.symbol
job_group = job.job_group.name
job_group_symbol = job.job_group.symbol
- job.job_key = '{}{}{}{}'.format(config, platform, job_name, job_group)
+ job.job_key = "{}{}{}{}".format(config, platform, job_name, job_group)
all_failed_jobs[job.id] = job
# The 't' ensures the key starts with a character, as required for a query selector
test_key = re.sub(
- r'\W+', '', 't{}{}{}{}{}'.format(test_name, config, platform, job_name, job_group)
+ r"\W+", "", "t{}{}{}{}{}".format(test_name, config, platform, job_name, job_group)
)
isClassifiedIntermittent = any(
- job['failure_classification_id'] == 4 for job in jobs[job_name]
+ job["failure_classification_id"] == 4 for job in jobs[job_name]
)
isInvestigated = False
@@ -126,46 +126,46 @@ def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
if test_key not in tests:
line = {
- 'testName': test_name,
- 'action': failure_line.action.split('_')[0],
- 'jobName': job_name,
- 'jobSymbol': job_symbol,
- 'jobGroup': job_group,
- 'jobGroupSymbol': job_group_symbol,
- 'platform': platform,
- 'config': config,
- 'key': test_key,
- 'jobKey': job.job_key,
- 'suggestedClassification': 'New Failure',
- 'confidence': 0,
- 'tier': job.tier,
- 'totalFailures': 0,
- 'totalJobs': 0,
- 'failedInParent': False,
- 'isClassifiedIntermittent': isClassifiedIntermittent,
- 'isInvestigated': isInvestigated,
- 'investigatedTestId': investigatedTestId,
+ "testName": test_name,
+ "action": failure_line.action.split("_")[0],
+ "jobName": job_name,
+ "jobSymbol": job_symbol,
+ "jobGroup": job_group,
+ "jobGroupSymbol": job_group_symbol,
+ "platform": platform,
+ "config": config,
+ "key": test_key,
+ "jobKey": job.job_key,
+ "suggestedClassification": "New Failure",
+ "confidence": 0,
+ "tier": job.tier,
+ "totalFailures": 0,
+ "totalJobs": 0,
+ "failedInParent": False,
+ "isClassifiedIntermittent": isClassifiedIntermittent,
+ "isInvestigated": isInvestigated,
+ "investigatedTestId": investigatedTestId,
}
tests[test_key] = line
countJobs = len(
- list(filter(lambda x: x['result'] in ['success', 'testfailed'], jobs[job_name]))
+ list(filter(lambda x: x["result"] in ["success", "testfailed"], jobs[job_name]))
)
- tests[test_key]['totalFailures'] += 1
- tests[test_key]['totalJobs'] = countJobs
+ tests[test_key]["totalFailures"] += 1
+ tests[test_key]["totalJobs"] = countJobs
# Each line of the sorted list that is returned here represents one test file per platform/
# config. Each line will have at least one failing job, but may have several
# passing/failing jobs associated with it.
- return sorted(tests.values(), key=lambda k: k['testName'])
+ return sorted(tests.values(), key=lambda k: k["testName"])
def has_job(job, job_list):
- return next((find_job for find_job in job_list if find_job['id'] == job.id), False)
+ return next((find_job for find_job in job_list if find_job["id"] == job.id), False)
def has_line(failure_line, log_line_list):
return next(
- (find_line for find_line in log_line_list if find_line['line_number'] == failure_line.line),
+ (find_line for find_line in log_line_list if find_line["line_number"] == failure_line.line),
False,
)
@@ -175,20 +175,20 @@ def get_test_failure_jobs(push):
Job.objects.filter(
push=push,
tier__lte=2,
- result='testfailed',
+ result="testfailed",
)
.exclude(
- Q(machine_platform__platform='lint')
- | Q(job_type__symbol='mozlint')
- | Q(job_type__name__contains='build'),
+ Q(machine_platform__platform="lint")
+ | Q(job_type__symbol="mozlint")
+ | Q(job_type__name__contains="build"),
)
- .select_related('job_type', 'machine_platform', 'taskcluster_metadata')
+ .select_related("job_type", "machine_platform", "taskcluster_metadata")
)
failed_job_types = [job.job_type.name for job in testfailed_jobs]
passing_jobs = Job.objects.filter(
- push=push, job_type__name__in=failed_job_types, result__in=['success', 'unknown']
- ).select_related('job_type', 'machine_platform', 'taskcluster_metadata')
+ push=push, job_type__name__in=failed_job_types, result__in=["success", "unknown"]
+ ).select_related("job_type", "machine_platform", "taskcluster_metadata")
jobs = {}
result_status = set()
@@ -205,7 +205,7 @@ def add_jobs(job_list):
add_jobs(passing_jobs)
for job in jobs:
- (jobs[job]).sort(key=lambda x: x['start_time'])
+ (jobs[job]).sort(key=lambda x: x["start_time"])
return (result_status, jobs)
@@ -215,16 +215,16 @@ def get_test_failures(
jobs,
result_status=set(),
):
- logger.debug('Getting test failures for push: {}'.format(push.id))
+ logger.debug("Getting test failures for push: {}".format(push.id))
# query for jobs for the last two weeks excluding today
# find tests that have failed in the last 14 days
# this is very cache-able for reuse on other pushes.
- result = 'pass'
+ result = "pass"
if not len(jobs):
- return ('none', get_grouped([]))
+ return ("none", get_grouped([]))
- repository_ids = REPO_GROUPS['trunk']
+ repository_ids = REPO_GROUPS["trunk"]
# option_map is used to map platforms for the job.option_collection_hash
option_map = OptionCollection.objects.get_option_collection_map()
push_date = push.time.date()
@@ -253,10 +253,10 @@ def get_test_failures(
failures = get_grouped(filtered_push_failures)
- if len(failures['needInvestigation']):
- result = 'fail'
- elif 'unknown' in result_status:
- result = 'unknown'
+ if len(failures["needInvestigation"]):
+ result = "fail"
+ elif "unknown" in result_status:
+ result = "unknown"
return (result, failures)
@@ -264,16 +264,16 @@ def get_test_failures(
def get_test_in_progress_count(push):
test_types = JobType.objects.exclude(
name__contains="build",
- symbol='mozlint',
+ symbol="mozlint",
)
return (
Job.objects.filter(
push=push,
tier__lte=2,
- result='unknown',
+ result="unknown",
job_type__in=test_types,
)
- .exclude(machine_platform__platform='lint')
- .select_related('machine_platform')
+ .exclude(machine_platform__platform="lint")
+ .select_related("machine_platform")
.count()
)
diff --git a/treeherder/push_health/usage.py b/treeherder/push_health/usage.py
index d6cffabcb8d..8fe14445b7b 100644
--- a/treeherder/push_health/usage.py
+++ b/treeherder/push_health/usage.py
@@ -12,55 +12,55 @@
def get_peak(facet):
peak = 0
date = 0
- for item in facet['timeSeries']:
- max = item['results'][-1]['max']
- if item['inspectedCount'] > 0 and max > peak:
+ for item in facet["timeSeries"]:
+ max = item["results"][-1]["max"]
+ if item["inspectedCount"] > 0 and max > peak:
peak = max
- date = item['endTimeSeconds']
+ date = item["endTimeSeconds"]
- return {NEED_INVESTIGATION: peak, 'time': date}
+ return {NEED_INVESTIGATION: peak, "time": date}
def get_latest(facet):
- for item in reversed(facet['timeSeries']):
- if item['inspectedCount'] > 0:
- latest = item['results'][-1]
- return {NEED_INVESTIGATION: latest['max'], 'time': item['endTimeSeconds']}
+ for item in reversed(facet["timeSeries"]):
+ if item["inspectedCount"] > 0:
+ latest = item["results"][-1]
+ return {NEED_INVESTIGATION: latest["max"], "time": item["endTimeSeconds"]}
def jobs_retriggered(push):
- retrigger_jobs = Job.objects.filter(push=push, job_type__name='Action: Retrigger')
+ retrigger_jobs = Job.objects.filter(push=push, job_type__name="Action: Retrigger")
return len(retrigger_jobs)
def get_usage():
nrql = "SELECT%20max(needInvestigation)%20FROM%20push_health_need_investigation%20FACET%20revision%20SINCE%201%20DAY%20AGO%20TIMESERIES%20where%20repo%3D'{}'%20AND%20appName%3D'{}'".format(
- 'try', 'treeherder-prod'
+ "try", "treeherder-prod"
)
- new_relic_url = '{}?nrql={}'.format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
+ new_relic_url = "{}?nrql={}".format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
headers = {
- 'Accept': 'application/json',
- 'Content-Type': 'application/json',
- 'X-Query-Key': settings.NEW_RELIC_INSIGHTS_API_KEY,
+ "Accept": "application/json",
+ "Content-Type": "application/json",
+ "X-Query-Key": settings.NEW_RELIC_INSIGHTS_API_KEY,
}
# TODO: make this check happen during deploy or setup? Not here.
if not settings.NEW_RELIC_INSIGHTS_API_KEY:
- logger.error('NEW_RELIC_INSIGHTS_API_KEY not set.')
+ logger.error("NEW_RELIC_INSIGHTS_API_KEY not set.")
resp = make_request(new_relic_url, headers=headers)
data = resp.json()
- push_revisions = [facet['name'] for facet in data['facets']]
+ push_revisions = [facet["name"] for facet in data["facets"]]
pushes = Push.objects.filter(revision__in=push_revisions)
results = [
{
- 'push': PushSerializer(pushes.get(revision=facet['name'])).data,
- 'peak': get_peak(facet),
- 'latest': get_latest(facet),
- 'retriggers': jobs_retriggered(pushes.get(revision=facet['name'])),
+ "push": PushSerializer(pushes.get(revision=facet["name"])).data,
+ "peak": get_peak(facet),
+ "latest": get_latest(facet),
+ "retriggers": jobs_retriggered(pushes.get(revision=facet["name"])),
}
- for facet in data['facets']
+ for facet in data["facets"]
]
return results
diff --git a/treeherder/push_health/utils.py b/treeherder/push_health/utils.py
index 24579bec0bc..132af648867 100644
--- a/treeherder/push_health/utils.py
+++ b/treeherder/push_health/utils.py
@@ -1,53 +1,53 @@
# These strings will be omitted from test paths to more easily correllate
# them to other test paths.
trim_parts = [
- 'TEST-UNEXPECTED-FAIL',
- 'REFTEST TEST-UNEXPECTED-FAIL',
- 'TEST-UNEXPECTED-PASS',
- 'REFTEST TEST-UNEXPECTED-PASS',
+ "TEST-UNEXPECTED-FAIL",
+ "REFTEST TEST-UNEXPECTED-FAIL",
+ "TEST-UNEXPECTED-PASS",
+ "REFTEST TEST-UNEXPECTED-PASS",
]
def clean_test(test, signature, message):
try:
- clean_name = test or signature or message or 'Non-Test Error'
+ clean_name = test or signature or message or "Non-Test Error"
except UnicodeEncodeError:
- return ''
+ return ""
- if clean_name.startswith('pid:'):
+ if clean_name.startswith("pid:"):
return None
- if ' == ' in clean_name or ' != ' in clean_name:
- splitter = ' == ' if ' == ' in clean_name else ' != '
+ if " == " in clean_name or " != " in clean_name:
+ splitter = " == " if " == " in clean_name else " != "
left, right, *rest = clean_name.split(splitter)
- if 'tests/layout/' in left and 'tests/layout/' in right:
- left = 'layout%s' % left.split('tests/layout')[1]
- right = 'layout%s' % right.split('tests/layout')[1]
- elif 'build/tests/reftest/tests/' in left and 'build/tests/reftest/tests/' in right:
- left = '%s' % left.split('build/tests/reftest/tests/')[1]
- right = '%s' % right.split('build/tests/reftest/tests/')[1]
- elif clean_name.startswith('http://10.0'):
- left = '/tests/'.join(left.split('/tests/')[1:])
- right = '/tests/'.join(right.split('/tests/')[1:])
+ if "tests/layout/" in left and "tests/layout/" in right:
+ left = "layout%s" % left.split("tests/layout")[1]
+ right = "layout%s" % right.split("tests/layout")[1]
+ elif "build/tests/reftest/tests/" in left and "build/tests/reftest/tests/" in right:
+ left = "%s" % left.split("build/tests/reftest/tests/")[1]
+ right = "%s" % right.split("build/tests/reftest/tests/")[1]
+ elif clean_name.startswith("http://10.0"):
+ left = "/tests/".join(left.split("/tests/")[1:])
+ right = "/tests/".join(right.split("/tests/")[1:])
clean_name = "%s%s%s" % (left, splitter, right)
- if 'test_end for' in clean_name:
+ if "test_end for" in clean_name:
clean_name = clean_name.split()[2]
- if 'build/tests/reftest/tests/' in clean_name:
- clean_name = clean_name.split('build/tests/reftest/tests/')[1]
+ if "build/tests/reftest/tests/" in clean_name:
+ clean_name = clean_name.split("build/tests/reftest/tests/")[1]
- if 'jsreftest.html' in clean_name:
- clean_name = clean_name.split('test=')[1]
+ if "jsreftest.html" in clean_name:
+ clean_name = clean_name.split("test=")[1]
- if clean_name.startswith('http://10.0'):
- clean_name = '/tests/'.join(clean_name.split('/tests/')[1:])
+ if clean_name.startswith("http://10.0"):
+ clean_name = "/tests/".join(clean_name.split("/tests/")[1:])
# http://localhost:50462/1545303666006/4/41276-1.html
- if clean_name.startswith('http://localhost:'):
- parts = clean_name.split('/')
+ if clean_name.startswith("http://localhost:"):
+ parts = clean_name.split("/")
clean_name = parts[-1]
if " (finished)" in clean_name:
@@ -55,17 +55,17 @@ def clean_test(test, signature, message):
# Now that we don't bail on a blank test_name, these filters
# may sometimes apply.
- if clean_name in ['Last test finished', '(SimpleTest/TestRunner.js)']:
+ if clean_name in ["Last test finished", "(SimpleTest/TestRunner.js)"]:
return None
clean_name = clean_name.strip()
- clean_name = clean_name.replace('\\', '/')
- clean_name = clean_name.lstrip('/')
+ clean_name = clean_name.replace("\\", "/")
+ clean_name = clean_name.lstrip("/")
- if '|' in clean_name:
- parts = clean_name.split('|')
+ if "|" in clean_name:
+ parts = clean_name.split("|")
clean_parts = filter(lambda x: x.strip() not in trim_parts, parts)
- clean_name = '|'.join(clean_parts)
+ clean_name = "|".join(clean_parts)
return clean_name
@@ -73,48 +73,48 @@ def clean_test(test, signature, message):
def clean_config(config):
# We have found that pgo ~= opt for our needs, so this helps us get a
# more representative sample size of data.
- if config in ['pgo', 'shippable']:
- config = 'opt'
+ if config in ["pgo", "shippable"]:
+ config = "opt"
- return config.encode('ascii', 'ignore').decode('utf-8')
+ return config.encode("ascii", "ignore").decode("utf-8")
def clean_platform(platform):
# This is needed because of macosx-qr
- if platform.startswith('macosx64'):
- platform = platform.replace('macosx64', 'osx-10-10')
+ if platform.startswith("macosx64"):
+ platform = platform.replace("macosx64", "osx-10-10")
- return platform.encode('ascii', 'ignore').decode('utf-8')
+ return platform.encode("ascii", "ignore").decode("utf-8")
def is_valid_failure_line(line):
skip_lines = [
- 'Return code:',
- 'unexpected status',
- 'unexpected crashes',
- 'exit status',
- 'Finished in',
+ "Return code:",
+ "unexpected status",
+ "unexpected crashes",
+ "exit status",
+ "Finished in",
]
return not any(skip_line in line for skip_line in skip_lines)
job_fields = [
- 'id',
- 'machine_platform_id',
- 'option_collection_hash',
- 'job_type_id',
- 'job_group_id',
- 'result',
- 'state',
- 'failure_classification_id',
- 'push_id',
- 'start_time',
+ "id",
+ "machine_platform_id",
+ "option_collection_hash",
+ "job_type_id",
+ "job_group_id",
+ "result",
+ "state",
+ "failure_classification_id",
+ "push_id",
+ "start_time",
]
def get_job_key(job):
- return '{}-{}-{}'.format(
- job['machine_platform_id'], job['option_collection_hash'], job['job_type_id']
+ return "{}-{}-{}".format(
+ job["machine_platform_id"], job["option_collection_hash"], job["job_type_id"]
)
@@ -122,11 +122,11 @@ def job_to_dict(job):
job_dict = {field: getattr(job, field) for field in job_fields}
job_dict.update(
{
- 'job_type_name': job.job_type.name,
- 'job_type_symbol': job.job_type.symbol,
- 'platform': job.machine_platform.platform,
- 'task_id': job.taskcluster_metadata.task_id,
- 'run_id': job.taskcluster_metadata.retry_id,
+ "job_type_name": job.job_type.name,
+ "job_type_symbol": job.job_type.symbol,
+ "platform": job.machine_platform.platform,
+ "task_id": job.taskcluster_metadata.task_id,
+ "run_id": job.taskcluster_metadata.retry_id,
}
)
return job_dict
@@ -134,23 +134,23 @@ def job_to_dict(job):
def get_job_results(results, failure_type):
result_status = set()
- result = 'pass'
+ result = "pass"
failures = []
count_in_progress = 0
if not len(results):
- return ('none', failures, count_in_progress)
+ return ("none", failures, count_in_progress)
for job in results:
result_status.add(job.result)
if job.result == failure_type:
failures.append(job_to_dict(job))
- elif job.result == 'unknown':
+ elif job.result == "unknown":
count_in_progress += 1
if len(failures):
- result = 'fail'
- elif 'unknown' in result_status:
- result = 'unknown'
+ result = "fail"
+ elif "unknown" in result_status:
+ result = "unknown"
return (result, failures, count_in_progress)
diff --git a/treeherder/services/elasticsearch/__init__.py b/treeherder/services/elasticsearch/__init__.py
index 4b363f98ea0..5065aa6dadc 100644
--- a/treeherder/services/elasticsearch/__init__.py
+++ b/treeherder/services/elasticsearch/__init__.py
@@ -11,13 +11,13 @@
)
__all__ = [
- 'all_documents',
- 'bulk',
- 'count_index',
- 'es_conn',
- 'get_document',
- 'index',
- 'refresh_index',
- 'reinit_index',
- 'search',
+ "all_documents",
+ "bulk",
+ "count_index",
+ "es_conn",
+ "get_document",
+ "index",
+ "refresh_index",
+ "reinit_index",
+ "search",
]
diff --git a/treeherder/services/elasticsearch/mapping.py b/treeherder/services/elasticsearch/mapping.py
index 2e3d3f37729..a17e8282b83 100644
--- a/treeherder/services/elasticsearch/mapping.py
+++ b/treeherder/services/elasticsearch/mapping.py
@@ -1,44 +1,44 @@
-boolean = {'type': 'boolean'}
-integer = {'type': 'integer'}
-keyword = {'type': 'keyword'}
+boolean = {"type": "boolean"}
+integer = {"type": "integer"}
+keyword = {"type": "keyword"}
-DOC_TYPE = 'failure-line'
-INDEX_NAME = 'failure-lines'
+DOC_TYPE = "failure-line"
+INDEX_NAME = "failure-lines"
INDEX_SETTINGS = {
- 'failure-lines': {
- 'mappings': {
- 'failure-line': {
- 'properties': {
- 'job_guid': keyword,
- 'test': keyword,
- 'subtest': keyword,
- 'status': keyword,
- 'expected': keyword,
- 'best_classification': integer,
- 'best_is_verified': boolean,
- 'message': {
- 'type': 'text',
- 'analyzer': 'message_analyzer',
- 'search_analyzer': 'message_analyzer',
+ "failure-lines": {
+ "mappings": {
+ "failure-line": {
+ "properties": {
+ "job_guid": keyword,
+ "test": keyword,
+ "subtest": keyword,
+ "status": keyword,
+ "expected": keyword,
+ "best_classification": integer,
+ "best_is_verified": boolean,
+ "message": {
+ "type": "text",
+ "analyzer": "message_analyzer",
+ "search_analyzer": "message_analyzer",
},
},
},
},
- 'settings': {
- 'number_of_shards': 1,
- 'analysis': {
- 'analyzer': {
- 'message_analyzer': {
- 'type': 'custom',
- 'tokenizer': 'message_tokenizer',
- 'filters': [],
+ "settings": {
+ "number_of_shards": 1,
+ "analysis": {
+ "analyzer": {
+ "message_analyzer": {
+ "type": "custom",
+ "tokenizer": "message_tokenizer",
+ "filters": [],
},
},
- 'tokenizer': {
- 'message_tokenizer': {
- 'type': 'pattern',
- 'pattern': r'0x[0-9a-fA-F]+|[\W0-9]+?',
+ "tokenizer": {
+ "message_tokenizer": {
+ "type": "pattern",
+ "pattern": r"0x[0-9a-fA-F]+|[\W0-9]+?",
},
},
},
diff --git a/treeherder/services/elasticsearch/utils.py b/treeherder/services/elasticsearch/utils.py
index 7b7a8d7f59a..fb9828cc637 100644
--- a/treeherder/services/elasticsearch/utils.py
+++ b/treeherder/services/elasticsearch/utils.py
@@ -1,23 +1,23 @@
-def dict_to_op(d, index_name, doc_type, op_type='index'):
+def dict_to_op(d, index_name, doc_type, op_type="index"):
"""
Create a bulk-indexing operation from the given dictionary.
"""
if d is None:
return d
- op_types = ('create', 'delete', 'index', 'update')
+ op_types = ("create", "delete", "index", "update")
if op_type not in op_types:
msg = 'Unknown operation type "{}", must be one of: {}'
- raise Exception(msg.format(op_type, ', '.join(op_types)))
+ raise Exception(msg.format(op_type, ", ".join(op_types)))
- if 'id' not in d:
+ if "id" not in d:
raise Exception('"id" key not found')
operation = {
- '_op_type': op_type,
- '_index': index_name,
- '_type': doc_type,
- '_id': d.pop('id'),
+ "_op_type": op_type,
+ "_index": index_name,
+ "_type": doc_type,
+ "_id": d.pop("id"),
}
operation.update(d)
@@ -38,15 +38,15 @@ def to_dict(obj):
return
keys = [
- 'id',
- 'job_guid',
- 'test',
- 'subtest',
- 'status',
- 'expected',
- 'message',
- 'best_classification',
- 'best_is_verified',
+ "id",
+ "job_guid",
+ "test",
+ "subtest",
+ "status",
+ "expected",
+ "message",
+ "best_classification",
+ "best_is_verified",
]
all_fields = obj.to_dict()
diff --git a/treeherder/services/pulse/consumers.py b/treeherder/services/pulse/consumers.py
index cf3c27cce5d..8d176d0bce5 100644
--- a/treeherder/services/pulse/consumers.py
+++ b/treeherder/services/pulse/consumers.py
@@ -56,11 +56,11 @@ class PulseConsumer(ConsumerMixin):
"""
def __init__(self, source, build_routing_key):
- self.connection = Connection(source['pulse_url'], virtual_host=source.get('vhost', '/'))
+ self.connection = Connection(source["pulse_url"], virtual_host=source.get("vhost", "/"))
self.consumers = []
self.queue = None
self.queue_name = "queue/{}/{}".format(self.connection.userid, self.queue_suffix)
- self.root_url = source['root_url']
+ self.root_url = source["root_url"]
self.source = source
self.build_routing_key = build_routing_key
@@ -76,13 +76,13 @@ def prepare(self):
bindings = []
for binding in self.bindings():
# split source string into exchange and routing key sections
- exchange, _, routing_keys = binding.partition('.')
+ exchange, _, routing_keys = binding.partition(".")
# built an exchange object with our connection and exchange name
exchange = get_exchange(self.connection, exchange)
# split the routing keys up using the delimiter
- for routing_key in routing_keys.split(':'):
+ for routing_key in routing_keys.split(":"):
if self.build_routing_key is not None: # build routing key
routing_key = self.build_routing_key(routing_key)
@@ -159,13 +159,13 @@ class TaskConsumer(PulseConsumer):
def bindings(self):
return TASKCLUSTER_TASK_BINDINGS
- @newrelic.agent.background_task(name='pulse-listener-tasks.on_message', group='Pulse Listener')
+ @newrelic.agent.background_task(name="pulse-listener-tasks.on_message", group="Pulse Listener")
def on_message(self, body, message):
- exchange = message.delivery_info['exchange']
- routing_key = message.delivery_info['routing_key']
- logger.debug('received job message from %s#%s', exchange, routing_key)
+ exchange = message.delivery_info["exchange"]
+ routing_key = message.delivery_info["routing_key"]
+ logger.debug("received job message from %s#%s", exchange, routing_key)
store_pulse_tasks.apply_async(
- args=[body, exchange, routing_key, self.root_url], queue='store_pulse_tasks'
+ args=[body, exchange, routing_key, self.root_url], queue="store_pulse_tasks"
)
message.ack()
@@ -174,26 +174,26 @@ class MozciClassificationConsumer(PulseConsumer):
queue_suffix = env("PULSE_MOZCI_CLASSIFICATION_QUEUE_NAME", default="tasksclassification")
def bindings(self):
- mozci_env = env('PULSE_MOZCI_ENVIRONMENT', default='production')
- if mozci_env == 'testing':
+ mozci_env = env("PULSE_MOZCI_ENVIRONMENT", default="production")
+ if mozci_env == "testing":
return MOZCI_CLASSIFICATION_TESTING_BINDINGS
- if mozci_env != 'production':
+ if mozci_env != "production":
logger.warning(
- f'PULSE_MOZCI_ENVIRONMENT should be testing or production not {mozci_env}, defaulting to production'
+ f"PULSE_MOZCI_ENVIRONMENT should be testing or production not {mozci_env}, defaulting to production"
)
return MOZCI_CLASSIFICATION_PRODUCTION_BINDINGS
@newrelic.agent.background_task(
- name='pulse-listener-tasks-classification.on_message', group='Pulse Listener'
+ name="pulse-listener-tasks-classification.on_message", group="Pulse Listener"
)
def on_message(self, body, message):
- exchange = message.delivery_info['exchange']
- routing_key = message.delivery_info['routing_key']
- logger.debug('received mozci classification job message from %s#%s', exchange, routing_key)
+ exchange = message.delivery_info["exchange"]
+ routing_key = message.delivery_info["routing_key"]
+ logger.debug("received mozci classification job message from %s#%s", exchange, routing_key)
store_pulse_tasks_classification.apply_async(
args=[body, exchange, routing_key, self.root_url],
- queue='store_pulse_tasks_classification',
+ queue="store_pulse_tasks_classification",
)
message.ack()
@@ -203,19 +203,19 @@ class PushConsumer(PulseConsumer):
def bindings(self):
rv = []
- if self.source.get('hgmo'):
+ if self.source.get("hgmo"):
rv += HGMO_PUSH_BINDINGS
- if self.source.get('github'):
+ if self.source.get("github"):
rv += GITHUB_PUSH_BINDINGS
return rv
- @newrelic.agent.background_task(name='pulse-listener-pushes.on_message', group='Pulse Listener')
+ @newrelic.agent.background_task(name="pulse-listener-pushes.on_message", group="Pulse Listener")
def on_message(self, body, message):
- exchange = message.delivery_info['exchange']
- routing_key = message.delivery_info['routing_key']
- logger.info('received push message from %s#%s', exchange, routing_key)
+ exchange = message.delivery_info["exchange"]
+ routing_key = message.delivery_info["routing_key"]
+ logger.info("received push message from %s#%s", exchange, routing_key)
store_pulse_pushes.apply_async(
- args=[body, exchange, routing_key, self.root_url], queue='store_pulse_pushes'
+ args=[body, exchange, routing_key, self.root_url], queue="store_pulse_pushes"
)
message.ack()
@@ -231,42 +231,42 @@ class JointConsumer(PulseConsumer):
def bindings(self):
rv = []
- if self.source.get('hgmo'):
+ if self.source.get("hgmo"):
rv += HGMO_PUSH_BINDINGS
- if self.source.get('github'):
+ if self.source.get("github"):
rv += GITHUB_PUSH_BINDINGS
- if self.source.get('tasks'):
+ if self.source.get("tasks"):
rv += TASKCLUSTER_TASK_BINDINGS
- if self.source.get('mozci-classification'):
- mozci_env = env('PULSE_MOZCI_ENVIRONMENT', default='production')
- if mozci_env == 'testing':
+ if self.source.get("mozci-classification"):
+ mozci_env = env("PULSE_MOZCI_ENVIRONMENT", default="production")
+ if mozci_env == "testing":
rv += MOZCI_CLASSIFICATION_TESTING_BINDINGS
else:
- if mozci_env != 'production':
+ if mozci_env != "production":
logger.warning(
- f'PULSE_MOZCI_ENVIRONMENT should be testing or production not {mozci_env}, defaulting to production'
+ f"PULSE_MOZCI_ENVIRONMENT should be testing or production not {mozci_env}, defaulting to production"
)
rv += MOZCI_CLASSIFICATION_PRODUCTION_BINDINGS
return rv
- @newrelic.agent.background_task(name='pulse-joint-listener.on_message', group='Pulse Listener')
+ @newrelic.agent.background_task(name="pulse-joint-listener.on_message", group="Pulse Listener")
def on_message(self, body, message):
- exchange = message.delivery_info['exchange']
- routing_key = message.delivery_info['routing_key']
- logger.debug('received job message from %s#%s', exchange, routing_key)
- if exchange.startswith('exchange/taskcluster-queue/v1/'):
+ exchange = message.delivery_info["exchange"]
+ routing_key = message.delivery_info["routing_key"]
+ logger.debug("received job message from %s#%s", exchange, routing_key)
+ if exchange.startswith("exchange/taskcluster-queue/v1/"):
store_pulse_tasks.apply_async(
- args=[body, exchange, routing_key, self.root_url], queue='store_pulse_tasks'
+ args=[body, exchange, routing_key, self.root_url], queue="store_pulse_tasks"
)
- if 'task-completed' in exchange and '.proj-mozci.' in routing_key:
+ if "task-completed" in exchange and ".proj-mozci." in routing_key:
store_pulse_tasks_classification.apply_async(
args=[body, exchange, routing_key, self.root_url],
- queue='store_pulse_tasks_classification',
+ queue="store_pulse_tasks_classification",
)
else:
store_pulse_pushes.apply_async(
- args=[body, exchange, routing_key, self.root_url], queue='store_pulse_pushes'
+ args=[body, exchange, routing_key, self.root_url], queue="store_pulse_pushes"
)
message.ack()
diff --git a/treeherder/services/taskcluster.py b/treeherder/services/taskcluster.py
index 271323f4768..98e3865ff24 100644
--- a/treeherder/services/taskcluster.py
+++ b/treeherder/services/taskcluster.py
@@ -12,7 +12,7 @@
logger = logging.getLogger(__name__)
-DEFAULT_ROOT_URL = 'https://firefox-ci-tc.services.mozilla.com'
+DEFAULT_ROOT_URL = "https://firefox-ci-tc.services.mozilla.com"
class TaskclusterModel(ABC):
@@ -32,16 +32,16 @@ class TaskclusterModelImpl(TaskclusterModel):
"""Javascript -> Python rewrite of frontend' s TaskclusterModel"""
def __init__(self, root_url, client_id=None, access_token=None):
- options = {'rootUrl': root_url}
+ options = {"rootUrl": root_url}
credentials = {}
if client_id:
- credentials['clientId'] = client_id
+ credentials["clientId"] = client_id
if access_token:
- credentials['accessToken'] = access_token
+ credentials["accessToken"] = access_token
# Taskcluster APIs
- self.hooks = taskcluster.Hooks({**options, 'credentials': credentials})
+ self.hooks = taskcluster.Hooks({**options, "credentials": credentials})
# Following least-privilege principle, as services
# bellow don't really need authorization credentials.
@@ -55,43 +55,43 @@ def trigger_action(
self.__set_root_url(root_url)
actions_context = self._load(decision_task_id, task_id)
- action_to_trigger = self._get_action(actions_context['actions'], action)
+ action_to_trigger = self._get_action(actions_context["actions"], action)
return self._submit(
action=action_to_trigger,
decision_task_id=decision_task_id,
task_id=task_id,
input=input,
- static_action_variables=actions_context['staticActionVariables'],
+ static_action_variables=actions_context["staticActionVariables"],
)
def __set_root_url(self, root_url):
for service in (self.hooks, self.queue, self.auth):
- service.options['rootUrl'] = root_url
+ service.options["rootUrl"] = root_url
def _load(self, decision_task_id: str, task_id: str) -> dict:
if not decision_task_id:
raise ValueError("No decision task, can't find taskcluster actions")
# fetch
- logger.debug('Fetching actions.json...')
+ logger.debug("Fetching actions.json...")
actions_url = self.queue.buildUrl(
- self.queue.funcinfo['getLatestArtifact']['name'],
+ self.queue.funcinfo["getLatestArtifact"]["name"],
decision_task_id,
- 'public/actions.json',
+ "public/actions.json",
)
- response = requests.request('GET', actions_url)
+ response = requests.request("GET", actions_url)
actions_json = response.json()
task_definition = self.queue.task(task_id)
- if actions_json['version'] != 1:
- raise RuntimeError('Wrong version of actions.json, unable to continue')
+ if actions_json["version"] != 1:
+ raise RuntimeError("Wrong version of actions.json, unable to continue")
return {
- 'staticActionVariables': actions_json['variables'],
- 'actions': self._filter_relevant_actions(actions_json, task_definition),
+ "staticActionVariables": actions_json["variables"],
+ "actions": self._filter_relevant_actions(actions_json, task_definition),
}
def _submit(
@@ -132,16 +132,16 @@ def _submit(
def _filter_relevant_actions(cls, actions_json: dict, original_task: dict) -> list:
relevant_actions = {}
- for action in actions_json['actions']:
- action_name = action['name']
+ for action in actions_json["actions"]:
+ action_name = action["name"]
if action_name in relevant_actions:
continue
- no_context_or_task_to_check = (not len(action['context'])) and (not original_task)
+ no_context_or_task_to_check = (not len(action["context"])) and (not original_task)
task_is_in_context = (
original_task
- and original_task.get('tags')
- and cls._task_in_context(action['context'], original_task['tags'])
+ and original_task.get("tags")
+ and cls._task_in_context(action["context"], original_task["tags"])
)
if no_context_or_task_to_check or task_is_in_context:
@@ -242,10 +242,10 @@ def notify_client_factory(
if client_id and access_token: # we're on production
options = {
- 'rootUrl': root_url or DEFAULT_ROOT_URL,
- 'credentials': {
- 'clientId': client_id,
- 'accessToken': access_token,
+ "rootUrl": root_url or DEFAULT_ROOT_URL,
+ "credentials": {
+ "clientId": client_id,
+ "accessToken": access_token,
},
}
return NotifyAdapter(options)
diff --git a/treeherder/utils/__init__.py b/treeherder/utils/__init__.py
index aa2f9a04127..bbe4f491ed5 100644
--- a/treeherder/utils/__init__.py
+++ b/treeherder/utils/__init__.py
@@ -1,7 +1,7 @@
from datetime import datetime
from pathlib import Path
-PROJECT_ROOT = Path(__file__) / '..' / '..' / '..'
+PROJECT_ROOT = Path(__file__) / ".." / ".." / ".."
def default_serializer(val):
diff --git a/treeherder/utils/http.py b/treeherder/utils/http.py
index 17810a368d2..455bb59daec 100644
--- a/treeherder/utils/http.py
+++ b/treeherder/utils/http.py
@@ -3,18 +3,18 @@
from django.conf import settings
-def make_request(url, method='GET', headers=None, timeout=30, **kwargs):
+def make_request(url, method="GET", headers=None, timeout=30, **kwargs):
"""A wrapper around requests to set defaults & call raise_for_status()."""
headers = headers or {}
- headers['User-Agent'] = 'treeherder/{}'.format(settings.SITE_HOSTNAME)
+ headers["User-Agent"] = "treeherder/{}".format(settings.SITE_HOSTNAME)
response = requests.request(method, url, headers=headers, timeout=timeout, **kwargs)
if response.history:
params = {
- 'url': url,
- 'redirects': len(response.history),
- 'duration': sum(r.elapsed.total_seconds() for r in response.history),
+ "url": url,
+ "redirects": len(response.history),
+ "duration": sum(r.elapsed.total_seconds() for r in response.history),
}
- newrelic.agent.record_custom_event('RedirectedRequest', params=params)
+ newrelic.agent.record_custom_event("RedirectedRequest", params=params)
response.raise_for_status()
return response
@@ -22,9 +22,9 @@ def make_request(url, method='GET', headers=None, timeout=30, **kwargs):
def fetch_json(url, params=None, headers=None):
if headers is None:
- headers = {'Accept': 'application/json'}
+ headers = {"Accept": "application/json"}
else:
- headers['Accept'] = 'application/json'
+ headers["Accept"] = "application/json"
response = make_request(url, params=params, headers=headers)
return response.json()
diff --git a/treeherder/utils/queryset.py b/treeherder/utils/queryset.py
index c178c69b2ca..885060f89b5 100644
--- a/treeherder/utils/queryset.py
+++ b/treeherder/utils/queryset.py
@@ -18,7 +18,7 @@ def chunked_qs(qs, chunk_size=10000, fields=None):
min_id = 0
while True:
- chunk = qs.filter(id__gt=min_id).order_by('id')
+ chunk = qs.filter(id__gt=min_id).order_by("id")
if fields is not None:
chunk = chunk.only(*fields)
@@ -56,7 +56,7 @@ def chunked_qs_reverse(qs, chunk_size=10000):
if not qs:
return
- qs = qs.order_by('-id')
+ qs = qs.order_by("-id")
# Can't use .only() here in case the query used select_related
max_id = qs.first().id
diff --git a/treeherder/utils/taskcluster.py b/treeherder/utils/taskcluster.py
index 9baf07d619b..4f15423170a 100644
--- a/treeherder/utils/taskcluster.py
+++ b/treeherder/utils/taskcluster.py
@@ -5,7 +5,7 @@
def get_task_definition(root_url, task_id):
- task_url = taskcluster_urls.api(root_url, 'queue', 'v1', 'task/{}'.format(task_id))
+ task_url = taskcluster_urls.api(root_url, "queue", "v1", "task/{}".format(task_id))
return fetch_json(task_url)
@@ -17,7 +17,7 @@ def download_artifact(root_url, task_id, path):
Returns either the parsed json, the parsed yaml or the plain response.
"""
artifact_url = taskcluster_urls.api(
- root_url, 'queue', 'v1', 'task/{}/artifacts/{}'.format(task_id, path)
+ root_url, "queue", "v1", "task/{}/artifacts/{}".format(task_id, path)
)
if path.endswith(".json"):
diff --git a/treeherder/utils/taskcluster_lib_scopes.py b/treeherder/utils/taskcluster_lib_scopes.py
index 3f005fd2369..0f8d9db9f24 100644
--- a/treeherder/utils/taskcluster_lib_scopes.py
+++ b/treeherder/utils/taskcluster_lib_scopes.py
@@ -26,7 +26,7 @@ def patternMatch(pattern: str, scope):
if scope == pattern:
return True
- if pattern.endswith('*'):
+ if pattern.endswith("*"):
return scope.startswith(pattern[:-1])
return False
diff --git a/treeherder/webapp/api/bug.py b/treeherder/webapp/api/bug.py
index f98044f02ee..69406ede36c 100644
--- a/treeherder/webapp/api/bug.py
+++ b/treeherder/webapp/api/bug.py
@@ -11,8 +11,8 @@
class BugJobMapViewSet(viewsets.ViewSet):
def create(self, request, project):
"""Add a new relation between a job and a bug."""
- job_id = int(request.data['job_id'])
- bug_id = int(request.data['bug_id'])
+ job_id = int(request.data["job_id"])
+ bug_id = int(request.data["bug_id"])
try:
BugJobMap.create(
@@ -56,14 +56,14 @@ def list(self, request, project):
try:
# Casting to list since Python 3's `map` returns an iterator,
# which would hide any ValueError until used by the ORM below.
- job_ids = list(map(int, request.query_params.getlist('job_id')))
+ job_ids = list(map(int, request.query_params.getlist("job_id")))
except ValueError:
return Response({"message": "Valid job_id required"}, status=400)
if not job_ids:
return Response({"message": "At least one job_id is required"}, status=400)
jobs = Job.objects.filter(repository__name=project, id__in=job_ids)
- bug_job_maps = BugJobMap.objects.filter(job__in=jobs).select_related('user')
+ bug_job_maps = BugJobMap.objects.filter(job__in=jobs).select_related("user")
serializer = BugJobMapSerializer(bug_job_maps, many=True)
return Response(serializer.data)
diff --git a/treeherder/webapp/api/bug_creation.py b/treeherder/webapp/api/bug_creation.py
index bbdb3e36abd..55b9b12e63d 100644
--- a/treeherder/webapp/api/bug_creation.py
+++ b/treeherder/webapp/api/bug_creation.py
@@ -13,67 +13,67 @@
class FilesBugzillaMapViewSet(viewsets.ReadOnlyModelViewSet):
def filter_product_component(self, queryset):
filtered_queryset = []
- product = 'bugzilla_component__product'
- component = 'bugzilla_component__component'
+ product = "bugzilla_component__product"
+ component = "bugzilla_component__component"
# Don't suggest these. While a file associated with one of these
# combinations can be in the failure line, it might not be a test and
# the real issue gets logged earlier but not detected as failure line.
# Require user input for the product and component to use.
IGNORE_LIST_PRODUCT_COMPONENT = [
- {product: 'Testing', component: 'Mochitest'},
+ {product: "Testing", component: "Mochitest"},
]
for product_component in queryset:
if product_component not in IGNORE_LIST_PRODUCT_COMPONENT:
filtered_queryset.append(product_component)
return filtered_queryset[:5]
- @action(detail=True, methods=['get'])
+ @action(detail=True, methods=["get"])
def get_queryset(self):
"""
Gets a set of bug suggestions for this job
"""
- path = self.request.query_params.get('path')
- if path.startswith('org.mozilla.'):
- path = (path.split('#'))[0]
- path = (path.split('.'))[-1]
- path = path.replace('\\', '/')
+ path = self.request.query_params.get("path")
+ if path.startswith("org.mozilla."):
+ path = (path.split("#"))[0]
+ path = (path.split("."))[-1]
+ path = path.replace("\\", "/")
# Drop parameters
- path = (path.split('?'))[0]
- file = (path.split('/'))[-1]
- fileNameParts = file.split('.')
- file_without_extension = fileNameParts[0] + ('.' if len(fileNameParts) > 1 else '')
+ path = (path.split("?"))[0]
+ file = (path.split("/"))[-1]
+ fileNameParts = file.split(".")
+ file_without_extension = fileNameParts[0] + ("." if len(fileNameParts) > 1 else "")
queryset = (
- FilesBugzillaMap.objects.select_related('bugzilla_component')
+ FilesBugzillaMap.objects.select_related("bugzilla_component")
.filter(path__endswith=path)
- .exclude(path__startswith='testing/web-platform/meta/')
- .values('bugzilla_component__product', 'bugzilla_component__component')
+ .exclude(path__startswith="testing/web-platform/meta/")
+ .values("bugzilla_component__product", "bugzilla_component__component")
.distinct()
)
if len(queryset) == 0:
# E.g. web-platform-tests ("wpt") can use test files generated from
# other files which just have different file extensions.
- path_without_extension = (path.rsplit('/', 1))[0] + '/' + file_without_extension
+ path_without_extension = (path.rsplit("/", 1))[0] + "/" + file_without_extension
queryset = (
- FilesBugzillaMap.objects.select_related('bugzilla_component')
+ FilesBugzillaMap.objects.select_related("bugzilla_component")
.filter(path__contains=path_without_extension)
- .exclude(path__startswith='testing/web-platform/meta/')
- .values('bugzilla_component__product', 'bugzilla_component__component')
+ .exclude(path__startswith="testing/web-platform/meta/")
+ .values("bugzilla_component__product", "bugzilla_component__component")
.distinct()
)
if len(queryset) > 0:
return self.filter_product_component(queryset)
queryset = (
- FilesBugzillaMap.objects.select_related('bugzilla_component')
+ FilesBugzillaMap.objects.select_related("bugzilla_component")
.filter(file_name=file)
- .values('bugzilla_component__product', 'bugzilla_component__component')
+ .values("bugzilla_component__product", "bugzilla_component__component")
.distinct()
)
if len(queryset) > 0:
return self.filter_product_component(queryset)
queryset = (
- FilesBugzillaMap.objects.select_related('bugzilla_component')
+ FilesBugzillaMap.objects.select_related("bugzilla_component")
.filter(file_name__startswith=file_without_extension)
- .values('bugzilla_component__product', 'bugzilla_component__component')
+ .values("bugzilla_component__product", "bugzilla_component__component")
.distinct()
)
return self.filter_product_component(queryset)
diff --git a/treeherder/webapp/api/bugzilla.py b/treeherder/webapp/api/bugzilla.py
index 01d9d5a7419..62fe545b4cd 100644
--- a/treeherder/webapp/api/bugzilla.py
+++ b/treeherder/webapp/api/bugzilla.py
@@ -13,7 +13,7 @@
class BugzillaViewSet(viewsets.ViewSet):
- @action(detail=False, methods=['post'])
+ @action(detail=False, methods=["post"])
def create_bug(self, request):
"""
Create a bugzilla bug with passed params
@@ -31,27 +31,27 @@ def create_bug(self, request):
status=HTTP_400_BAD_REQUEST,
)
- description = u"**Filed by:** {}\n{}".format(
- request.user.email.replace('@', " [at] "), params.get("comment", "")
+ description = "**Filed by:** {}\n{}".format(
+ request.user.email.replace("@", " [at] "), params.get("comment", "")
).encode("utf-8")
summary = params.get("summary").encode("utf-8").strip()
url = settings.BUGFILER_API_URL + "/rest/bug"
- headers = {'x-bugzilla-api-key': settings.BUGFILER_API_KEY, 'Accept': 'application/json'}
+ headers = {"x-bugzilla-api-key": settings.BUGFILER_API_KEY, "Accept": "application/json"}
data = {
- 'type': "defect",
- 'product': params.get("product"),
- 'component': params.get("component"),
- 'summary': summary,
- 'keywords': params.get("keywords"),
- 'whiteboard': params.get("whiteboard"),
- 'regressed_by': params.get("regressed_by"),
- 'see_also': params.get("see_also"),
- 'version': params.get("version"),
- 'cf_crash_signature': params.get("crash_signature"),
- 'severity': params.get("severity"),
- 'priority': params.get("priority"),
- 'description': description,
- 'comment_tags': "treeherder",
+ "type": "defect",
+ "product": params.get("product"),
+ "component": params.get("component"),
+ "summary": summary,
+ "keywords": params.get("keywords"),
+ "whiteboard": params.get("whiteboard"),
+ "regressed_by": params.get("regressed_by"),
+ "see_also": params.get("see_also"),
+ "version": params.get("version"),
+ "cf_crash_signature": params.get("crash_signature"),
+ "severity": params.get("severity"),
+ "priority": params.get("priority"),
+ "description": description,
+ "comment_tags": "treeherder",
}
if params.get("is_security_issue"):
security_group_list = list(
@@ -69,10 +69,10 @@ def create_bug(self, request):
data["groups"] = security_group_list
try:
- response = make_request(url, method='POST', headers=headers, json=data)
+ response = make_request(url, method="POST", headers=headers, json=data)
except requests.exceptions.HTTPError as e:
try:
- message = e.response.json()['message']
+ message = e.response.json()["message"]
except (ValueError, KeyError):
message = e.response.text
return Response({"failure": message}, status=HTTP_400_BAD_REQUEST)
diff --git a/treeherder/webapp/api/changelog.py b/treeherder/webapp/api/changelog.py
index 60b251369a7..8c216db8dbc 100644
--- a/treeherder/webapp/api/changelog.py
+++ b/treeherder/webapp/api/changelog.py
@@ -15,8 +15,8 @@ def list(self, request):
"""
GET method implementation for list view
"""
- start_date = request.query_params.get('startdate') # YYYY-MM-DD
- end_date = request.query_params.get('enddate') # YYYY-MM-DD
+ start_date = request.query_params.get("startdate") # YYYY-MM-DD
+ end_date = request.query_params.get("enddate") # YYYY-MM-DD
serializer = ChangelogSerializer(get_changes(start_date, end_date), many=True)
return Response(serializer.data)
diff --git a/treeherder/webapp/api/classification.py b/treeherder/webapp/api/classification.py
index ffa4ba9f9a2..9100841380a 100644
--- a/treeherder/webapp/api/classification.py
+++ b/treeherder/webapp/api/classification.py
@@ -19,7 +19,7 @@ def delete(self, request, project, format=None):
return Response("Must be logged in", status=HTTP_401_UNAUTHORIZED)
if not request.user.is_staff:
return Response("Must be staff or in sheriffing group", status=HTTP_403_FORBIDDEN)
- job_ids = [job['id'] for job in request.data]
+ job_ids = [job["id"] for job in request.data]
if not job_ids:
return Response("Must provide job IDs", status=HTTP_404_NOT_FOUND)
Job.objects.filter(id__in=job_ids).update(failure_classification_id=1)
diff --git a/treeherder/webapp/api/csp_report.py b/treeherder/webapp/api/csp_report.py
index 926f7b3de3f..99ce55a6dc9 100644
--- a/treeherder/webapp/api/csp_report.py
+++ b/treeherder/webapp/api/csp_report.py
@@ -24,10 +24,10 @@ def csp_report_collector(request):
permission_classes.
"""
try:
- report = json.loads(request.body)['csp-report']
+ report = json.loads(request.body)["csp-report"]
except (KeyError, TypeError, ValueError):
- return HttpResponseBadRequest('Invalid CSP violation report')
+ return HttpResponseBadRequest("Invalid CSP violation report")
- logger.warning('CSP violation: %s', report)
- newrelic.agent.record_custom_event('CSP violation', report)
+ logger.warning("CSP violation: %s", report)
+ newrelic.agent.record_custom_event("CSP violation", report)
return HttpResponse()
diff --git a/treeherder/webapp/api/exceptions.py b/treeherder/webapp/api/exceptions.py
index 30f16d79018..260254aab7f 100644
--- a/treeherder/webapp/api/exceptions.py
+++ b/treeherder/webapp/api/exceptions.py
@@ -3,4 +3,4 @@
class InsufficientAlertCreationData(APIException):
status_code = 400
- default_detail = 'Insufficient data to create an alert'
+ default_detail = "Insufficient data to create an alert"
diff --git a/treeherder/webapp/api/groups.py b/treeherder/webapp/api/groups.py
index efa2371721b..ccd3b1f157e 100644
--- a/treeherder/webapp/api/groups.py
+++ b/treeherder/webapp/api/groups.py
@@ -25,18 +25,18 @@ class SummaryByGroupName(generics.ListAPIView):
def list(self, request):
startdate = None
enddate = None
- if 'startdate' in request.query_params:
- startdate = request.query_params['startdate']
+ if "startdate" in request.query_params:
+ startdate = request.query_params["startdate"]
- if not startdate or not re.match(r'^[0-9]{4}-[0-9]{2}-[0-9]{2}$', startdate):
+ if not startdate or not re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$", startdate):
startdate = datetime.datetime.today()
else:
startdate = datetime.datetime.strptime(startdate, "%Y-%m-%d")
- if 'enddate' in request.query_params:
- enddate = request.query_params['enddate']
+ if "enddate" in request.query_params:
+ enddate = request.query_params["enddate"]
- if not enddate or not re.match(r'^[0-9]{4}-[0-9]{2}-[0-9]{2}$', enddate):
+ if not enddate or not re.match(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$", enddate):
enddate = startdate + datetime.timedelta(days=1)
else:
enddate = datetime.datetime.strptime(enddate, "%Y-%m-%d")
@@ -50,58 +50,58 @@ def list(self, request):
)
.filter(repository_id__in=(1, 77))
.values(
- 'job_log__groups__name',
- 'job_type__name',
- 'job_log__group_result__status',
- 'failure_classification_id',
+ "job_log__groups__name",
+ "job_type__name",
+ "job_log__group_result__status",
+ "failure_classification_id",
)
- .annotate(job_count=Count('id'))
- .order_by('job_log__groups__name')
+ .annotate(job_count=Count("id"))
+ .order_by("job_log__groups__name")
)
self.queryset = q
serializer = self.get_serializer(self.queryset, many=True)
summary = {}
job_type_names = []
for item in serializer.data:
- if not item['group_name'] or not item['job_type_name']:
+ if not item["group_name"] or not item["job_type_name"]:
continue
- if not item['job_type_name'].startswith('test-'):
+ if not item["job_type_name"].startswith("test-"):
continue
- if int(item['group_status']) == 1: # ok
- result = 'passed'
- elif int(item['group_status']) == 2: # testfailed
- result = 'testfailed'
+ if int(item["group_status"]) == 1: # ok
+ result = "passed"
+ elif int(item["group_status"]) == 2: # testfailed
+ result = "testfailed"
else:
# other: 3 (skipped), 10 (unsupported (i.e. crashed))
# we don't want to count this at all
continue
# TODO: consider stripping out some types; mostly care about FBC vs Intermittent
- classification = item['failure_classification']
-
- if item['job_type_name'] not in job_type_names:
- job_type_names.append(item['job_type_name'])
- if item['group_name'] not in summary:
- summary[item['group_name']] = {}
- if item['job_type_name'] not in summary[item['group_name']]:
- summary[item['group_name']][item['job_type_name']] = {}
- if result not in summary[item['group_name']][item['job_type_name']]:
- summary[item['group_name']][item['job_type_name']][result] = {}
- if classification not in summary[item['group_name']][item['job_type_name']][result]:
- summary[item['group_name']][item['job_type_name']][result][classification] = 0
- summary[item['group_name']][item['job_type_name']][result][classification] += item[
- 'job_count'
+ classification = item["failure_classification"]
+
+ if item["job_type_name"] not in job_type_names:
+ job_type_names.append(item["job_type_name"])
+ if item["group_name"] not in summary:
+ summary[item["group_name"]] = {}
+ if item["job_type_name"] not in summary[item["group_name"]]:
+ summary[item["group_name"]][item["job_type_name"]] = {}
+ if result not in summary[item["group_name"]][item["job_type_name"]]:
+ summary[item["group_name"]][item["job_type_name"]][result] = {}
+ if classification not in summary[item["group_name"]][item["job_type_name"]][result]:
+ summary[item["group_name"]][item["job_type_name"]][result][classification] = 0
+ summary[item["group_name"]][item["job_type_name"]][result][classification] += item[
+ "job_count"
]
- data = {'job_type_names': job_type_names, 'manifests': []}
+ data = {"job_type_names": job_type_names, "manifests": []}
for m in summary.keys():
mdata = []
for d in summary[m]:
for r in summary[m][d]:
for c in summary[m][d][r]:
mdata.append([job_type_names.index(d), r, int(c), summary[m][d][r][c]])
- data['manifests'].append({m: mdata})
+ data["manifests"].append({m: mdata})
return Response(data=data)
diff --git a/treeherder/webapp/api/infra_compare.py b/treeherder/webapp/api/infra_compare.py
index c66969cf9b3..56ec759da90 100644
--- a/treeherder/webapp/api/infra_compare.py
+++ b/treeherder/webapp/api/infra_compare.py
@@ -25,11 +25,11 @@ def list(self, request):
if not query_params.is_valid():
return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST)
- startday = query_params.validated_data['startday']
- endday = query_params.validated_data['endday']
- project = query_params.validated_data['project']
- revision = query_params.validated_data['revision']
- interval = query_params.validated_data['interval']
+ startday = query_params.validated_data["startday"]
+ endday = query_params.validated_data["endday"]
+ project = query_params.validated_data["project"]
+ revision = query_params.validated_data["revision"]
+ interval = query_params.validated_data["interval"]
repository = models.Repository.objects.get(name=project)
if revision:
@@ -47,7 +47,7 @@ def list(self, request):
)
# division by 1000000 is done to convert it to seconds
- jobs = jobs.annotate(duration=(F('end_time') - F('start_time')) / 1000000)
+ jobs = jobs.annotate(duration=(F("end_time") - F("start_time")) / 1000000)
self.queryset = jobs.values("id", "job_type__name", "duration", "result")
serializer = self.get_serializer(self.queryset, many=True)
return Response(data=serializer.data)
diff --git a/treeherder/webapp/api/infra_serializers.py b/treeherder/webapp/api/infra_serializers.py
index 48cf2762b65..f2fc9cf5cc2 100644
--- a/treeherder/webapp/api/infra_serializers.py
+++ b/treeherder/webapp/api/infra_serializers.py
@@ -22,12 +22,12 @@ class InfraCompareQuerySerializers(serializers.Serializer):
def validate(self, data):
# Atleast revision or interval or startDay along with endDay need to be present
if (
- data['revision'] is None
- and data['interval'] is None
- and (data['startday'] is None or data['endday'] is None)
+ data["revision"] is None
+ and data["interval"] is None
+ and (data["startday"] is None or data["endday"] is None)
):
raise serializers.ValidationError(
- 'Required: revision, startday and endday, or interval.'
+ "Required: revision, startday and endday, or interval."
)
return data
@@ -37,6 +37,6 @@ def validate_repository(self, project):
Repository.objects.get(name=project)
except ObjectDoesNotExist:
- raise serializers.ValidationError('{} does not exist.'.format(project))
+ raise serializers.ValidationError("{} does not exist.".format(project))
return project
diff --git a/treeherder/webapp/api/intermittents_view.py b/treeherder/webapp/api/intermittents_view.py
index 2f8a4b090db..5faa568bfaa 100644
--- a/treeherder/webapp/api/intermittents_view.py
+++ b/treeherder/webapp/api/intermittents_view.py
@@ -27,17 +27,17 @@ def list(self, request):
if not query_params.is_valid():
return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST)
- startday = query_params.validated_data['startday']
- endday = get_end_of_day(query_params.validated_data['endday'])
- repo = query_params.validated_data['tree']
+ startday = query_params.validated_data["startday"]
+ endday = get_end_of_day(query_params.validated_data["endday"])
+ repo = query_params.validated_data["tree"]
self.queryset = (
BugJobMap.failures.by_date(startday, endday)
.by_repo(repo)
- .values('bug_id')
- .annotate(bug_count=Count('job_id'))
- .values('bug_id', 'bug_count')
- .order_by('-bug_count')
+ .values("bug_id")
+ .annotate(bug_count=Count("job_id"))
+ .values("bug_id", "bug_count")
+ .order_by("-bug_count")
)
serializer = self.get_serializer(self.queryset, many=True)
@@ -52,38 +52,38 @@ class FailuresByBug(generics.ListAPIView):
def list(self, request):
query_params = FailuresQueryParamsSerializer(
- data=request.query_params, context='requireBug'
+ data=request.query_params, context="requireBug"
)
if not query_params.is_valid():
return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST)
- startday = query_params.validated_data['startday']
- endday = get_end_of_day(query_params.validated_data['endday'])
- repo = query_params.validated_data['tree']
- bug_id = query_params.validated_data['bug']
+ startday = query_params.validated_data["startday"]
+ endday = get_end_of_day(query_params.validated_data["endday"])
+ repo = query_params.validated_data["tree"]
+ bug_id = query_params.validated_data["bug"]
self.queryset = (
BugJobMap.failures.by_date(startday, endday)
.by_repo(repo)
.by_bug(bug_id)
.values(
- 'job__repository__name',
- 'job__machine_platform__platform',
- 'bug_id',
- 'job_id',
- 'job__push__time',
- 'job__push__revision',
- 'job__signature__job_type_name',
- 'job__option_collection_hash',
- 'job__machine__name',
+ "job__repository__name",
+ "job__machine_platform__platform",
+ "bug_id",
+ "job_id",
+ "job__push__time",
+ "job__push__revision",
+ "job__signature__job_type_name",
+ "job__option_collection_hash",
+ "job__machine__name",
)
- .order_by('-job__push__time')
+ .order_by("-job__push__time")
)
lines = TextLogError.objects.filter(
- job_id__in=self.queryset.values_list('job_id', flat=True),
- line__contains='TEST-UNEXPECTED-FAIL',
- ).values_list('job_id', 'line')
+ job_id__in=self.queryset.values_list("job_id", flat=True),
+ line__contains="TEST-UNEXPECTED-FAIL",
+ ).values_list("job_id", "line")
grouped_lines = defaultdict(list)
for job_id, line in lines:
@@ -93,25 +93,25 @@ def list(self, request):
hash_list = set()
for item in self.queryset:
- item['lines'] = grouped_lines.get(item['job_id'], [])
- hash_list.add(item['job__option_collection_hash'])
+ item["lines"] = grouped_lines.get(item["job_id"], [])
+ hash_list.add(item["job__option_collection_hash"])
hash_query = (
OptionCollection.objects.filter(option_collection_hash__in=hash_list)
- .select_related('option')
- .values('option__name', 'option_collection_hash')
+ .select_related("option")
+ .values("option__name", "option_collection_hash")
)
for item in self.queryset:
match = [
- x['option__name']
+ x["option__name"]
for x in hash_query
- if x['option_collection_hash'] == item['job__option_collection_hash']
+ if x["option_collection_hash"] == item["job__option_collection_hash"]
]
if match:
- item['build_type'] = match[0]
+ item["build_type"] = match[0]
else:
- item['build_type'] = 'unknown'
+ item["build_type"] = "unknown"
serializer = self.get_serializer(self.queryset, many=True)
return Response(data=serializer.data)
@@ -128,18 +128,18 @@ def list(self, request):
if not query_params.is_valid():
return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST)
- startday = query_params.validated_data['startday']
- endday = get_end_of_day(query_params.validated_data['endday'])
- repo = query_params.validated_data['tree']
- bug_id = query_params.validated_data['bug']
+ startday = query_params.validated_data["startday"]
+ endday = get_end_of_day(query_params.validated_data["endday"])
+ repo = query_params.validated_data["tree"]
+ bug_id = query_params.validated_data["bug"]
push_query = (
Push.failures.filter(time__range=(startday, endday))
.by_repo(repo, False)
- .annotate(date=TruncDate('time'))
- .values('date')
- .annotate(test_runs=Count('author'))
- .values('date', 'test_runs')
+ .annotate(date=TruncDate("time"))
+ .values("date")
+ .annotate(test_runs=Count("author"))
+ .values("date", "test_runs")
)
if bug_id:
@@ -147,10 +147,10 @@ def list(self, request):
BugJobMap.failures.by_date(startday, endday)
.by_repo(repo)
.by_bug(bug_id)
- .annotate(date=TruncDate('job__push__time'))
- .values('date')
- .annotate(failure_count=Count('id'))
- .values('date', 'failure_count')
+ .annotate(date=TruncDate("job__push__time"))
+ .values("date")
+ .annotate(failure_count=Count("id"))
+ .values("date", "failure_count")
)
else:
job_query = (
@@ -158,11 +158,11 @@ def list(self, request):
push__time__range=(startday, endday), failure_classification_id=4
)
.by_repo(repo, False)
- .select_related('push')
- .annotate(date=TruncDate('push__time'))
- .values('date')
- .annotate(failure_count=Count('id'))
- .values('date', 'failure_count')
+ .select_related("push")
+ .annotate(date=TruncDate("push__time"))
+ .values("date")
+ .annotate(failure_count=Count("id"))
+ .values("date", "failure_count")
)
# merges the push_query and job_query results into a list; if a date is found in both queries,
@@ -170,13 +170,13 @@ def list(self, request):
# add a new object with push_query data and a default for failure_count
self.queryset = []
for push in push_query:
- match = [job for job in job_query if push['date'] == job['date']]
+ match = [job for job in job_query if push["date"] == job["date"]]
if match:
- match[0]['test_runs'] = push['test_runs']
+ match[0]["test_runs"] = push["test_runs"]
self.queryset.append(match[0])
else:
self.queryset.append(
- {'date': push['date'], 'test_runs': push['test_runs'], 'failure_count': 0}
+ {"date": push["date"], "test_runs": push["test_runs"], "failure_count": 0}
)
serializer = self.get_serializer(self.queryset, many=True)
diff --git a/treeherder/webapp/api/investigated_test.py b/treeherder/webapp/api/investigated_test.py
index b92b130c671..1857810681a 100644
--- a/treeherder/webapp/api/investigated_test.py
+++ b/treeherder/webapp/api/investigated_test.py
@@ -13,11 +13,11 @@ class InvestigatedViewSet(viewsets.ModelViewSet):
"""
serializer_class = InvestigatedTestsSerializers
- allowed_methods = ['GET', 'POST', 'DELETE']
+ allowed_methods = ["GET", "POST", "DELETE"]
def get_queryset(self):
- revision = self.request.GET['revision']
- project = self.kwargs['project']
+ revision = self.request.GET["revision"]
+ project = self.kwargs["project"]
try:
repository = Repository.objects.get(name=project)
@@ -36,11 +36,11 @@ def get_queryset(self):
)
def create(self, request, *args, **kwargs):
- project = kwargs['project']
- revision = request.query_params.get('revision')
- test = request.data['test']
- jobName = request.data['jobName']
- jobSymbol = request.data['jobSymbol']
+ project = kwargs["project"]
+ revision = request.query_params.get("revision")
+ test = request.data["test"]
+ jobName = request.data["jobName"]
+ jobSymbol = request.data["jobSymbol"]
try:
repository = Repository.objects.get(name=project)
diff --git a/treeherder/webapp/api/job_log_url.py b/treeherder/webapp/api/job_log_url.py
index edefae5dc9f..eee0634d404 100644
--- a/treeherder/webapp/api/job_log_url.py
+++ b/treeherder/webapp/api/job_log_url.py
@@ -13,11 +13,11 @@ class JobLogUrlViewSet(viewsets.ViewSet):
@staticmethod
def _log_as_dict(log):
return {
- 'id': log.id,
- 'job_id': log.job_id,
- 'name': log.name,
- 'url': log.url,
- 'parse_status': log.get_status_display(),
+ "id": log.id,
+ "job_id": log.job_id,
+ "name": log.name,
+ "url": log.url,
+ "parse_status": log.get_status_display(),
}
def retrieve(self, request, project, pk=None):
@@ -32,7 +32,7 @@ def list(self, request, project):
GET method implementation for list view
job_id -- Mandatory filter indicating which job these log belongs to.
"""
- job_ids = request.query_params.getlist('job_id')
+ job_ids = request.query_params.getlist("job_id")
if not job_ids:
raise ParseError(detail="The job_id parameter is mandatory for this endpoint")
try:
diff --git a/treeherder/webapp/api/jobs.py b/treeherder/webapp/api/jobs.py
index 196dfe7ce4f..f17da7d86ca 100644
--- a/treeherder/webapp/api/jobs.py
+++ b/treeherder/webapp/api/jobs.py
@@ -32,54 +32,54 @@ class JobFilter(django_filters.FilterSet):
as the previous jobs API
"""
- id = django_filters.NumberFilter(field_name='id')
- id__in = NumberInFilter(field_name='id', lookup_expr='in')
- tier__in = NumberInFilter(field_name='tier', lookup_expr='in')
- push_id__in = NumberInFilter(field_name='push_id', lookup_expr='in')
- job_guid = django_filters.CharFilter(field_name='guid')
- job_guid__in = CharInFilter(field_name='guid', lookup_expr='in')
- build_architecture = django_filters.CharFilter(field_name='build_platform__architecture')
- build_os = django_filters.CharFilter(field_name='build_platform__os_name')
- build_platform = django_filters.CharFilter(field_name='build_platform__platform')
- build_system_type = django_filters.CharFilter(field_name='signature__build_system_type')
- job_group_id = django_filters.NumberFilter(field_name='job_group_id')
- job_group_name = django_filters.CharFilter(field_name='job_group__name')
- job_group_symbol = django_filters.CharFilter(field_name='job_group__symbol')
- job_type_name = django_filters.CharFilter(field_name='job_type__name')
- job_type_symbol = django_filters.CharFilter(field_name='job_type__symbol')
- machine_name = django_filters.CharFilter(field_name='machine__name')
+ id = django_filters.NumberFilter(field_name="id")
+ id__in = NumberInFilter(field_name="id", lookup_expr="in")
+ tier__in = NumberInFilter(field_name="tier", lookup_expr="in")
+ push_id__in = NumberInFilter(field_name="push_id", lookup_expr="in")
+ job_guid = django_filters.CharFilter(field_name="guid")
+ job_guid__in = CharInFilter(field_name="guid", lookup_expr="in")
+ build_architecture = django_filters.CharFilter(field_name="build_platform__architecture")
+ build_os = django_filters.CharFilter(field_name="build_platform__os_name")
+ build_platform = django_filters.CharFilter(field_name="build_platform__platform")
+ build_system_type = django_filters.CharFilter(field_name="signature__build_system_type")
+ job_group_id = django_filters.NumberFilter(field_name="job_group_id")
+ job_group_name = django_filters.CharFilter(field_name="job_group__name")
+ job_group_symbol = django_filters.CharFilter(field_name="job_group__symbol")
+ job_type_name = django_filters.CharFilter(field_name="job_type__name")
+ job_type_symbol = django_filters.CharFilter(field_name="job_type__symbol")
+ machine_name = django_filters.CharFilter(field_name="machine__name")
machine_platform_architecture = django_filters.CharFilter(
- field_name='machine_platform__architecture'
+ field_name="machine_platform__architecture"
)
- machine_platform_os = django_filters.CharFilter(field_name='machine_platform__os_name')
- platform = django_filters.CharFilter(field_name='machine_platform__platform')
- ref_data_name = django_filters.CharFilter(field_name='signature__name')
- signature = django_filters.CharFilter(field_name='signature__signature')
- task_id = django_filters.CharFilter(field_name='taskcluster_metadata__task_id')
- retry_id = django_filters.NumberFilter(field_name='taskcluster_metadata__retry_id')
+ machine_platform_os = django_filters.CharFilter(field_name="machine_platform__os_name")
+ platform = django_filters.CharFilter(field_name="machine_platform__platform")
+ ref_data_name = django_filters.CharFilter(field_name="signature__name")
+ signature = django_filters.CharFilter(field_name="signature__signature")
+ task_id = django_filters.CharFilter(field_name="taskcluster_metadata__task_id")
+ retry_id = django_filters.NumberFilter(field_name="taskcluster_metadata__retry_id")
class Meta:
model = Job
fields = {
- 'option_collection_hash': ['exact'],
- 'build_platform_id': ['exact'],
- 'failure_classification_id': ['exact'],
- 'job_type_id': ['exact'],
- 'job_group_id': ['exact'],
- 'reason': ['exact'],
- 'state': ['exact'],
- 'result': ['exact'],
- 'who': ['exact'],
- 'tier': ['lt', 'lte', 'exact', 'gt', 'gte'],
- 'id': ['lt', 'lte', 'exact', 'gt', 'gte'],
- 'push_id': ['lt', 'lte', 'exact', 'gt', 'gte'],
- 'last_modified': ['lt', 'lte', 'exact', 'gt', 'gte'],
- 'submit_time': ['lt', 'lte', 'exact', 'gt', 'gte'],
- 'start_time': ['lt', 'lte', 'exact', 'gt', 'gte'],
- 'end_time': ['lt', 'lte', 'exact', 'gt', 'gte'],
+ "option_collection_hash": ["exact"],
+ "build_platform_id": ["exact"],
+ "failure_classification_id": ["exact"],
+ "job_type_id": ["exact"],
+ "job_group_id": ["exact"],
+ "reason": ["exact"],
+ "state": ["exact"],
+ "result": ["exact"],
+ "who": ["exact"],
+ "tier": ["lt", "lte", "exact", "gt", "gte"],
+ "id": ["lt", "lte", "exact", "gt", "gte"],
+ "push_id": ["lt", "lte", "exact", "gt", "gte"],
+ "last_modified": ["lt", "lte", "exact", "gt", "gte"],
+ "submit_time": ["lt", "lte", "exact", "gt", "gte"],
+ "start_time": ["lt", "lte", "exact", "gt", "gte"],
+ "end_time": ["lt", "lte", "exact", "gt", "gte"],
}
filter_overrides = {
- django_models.DateTimeField: {'filter_class': django_filters.IsoDateTimeFilter}
+ django_models.DateTimeField: {"filter_class": django_filters.IsoDateTimeFilter}
}
@@ -89,59 +89,59 @@ class JobsViewSet(viewsets.ReadOnlyModelViewSet):
"""
_default_select_related = [
- 'job_type',
- 'job_group',
- 'machine_platform',
- 'signature',
- 'taskcluster_metadata',
- 'push',
+ "job_type",
+ "job_group",
+ "machine_platform",
+ "signature",
+ "taskcluster_metadata",
+ "push",
]
_query_field_names = [
- 'submit_time',
- 'start_time',
- 'end_time',
- 'failure_classification_id',
- 'id',
- 'job_group__name',
- 'job_group__symbol',
- 'job_type__name',
- 'job_type__symbol',
- 'last_modified',
- 'option_collection_hash',
- 'machine_platform__platform',
- 'option_collection_hash',
- 'push_id',
- 'push__revision',
- 'result',
- 'signature__signature',
- 'state',
- 'tier',
- 'taskcluster_metadata__task_id',
- 'taskcluster_metadata__retry_id',
+ "submit_time",
+ "start_time",
+ "end_time",
+ "failure_classification_id",
+ "id",
+ "job_group__name",
+ "job_group__symbol",
+ "job_type__name",
+ "job_type__symbol",
+ "last_modified",
+ "option_collection_hash",
+ "machine_platform__platform",
+ "option_collection_hash",
+ "push_id",
+ "push__revision",
+ "result",
+ "signature__signature",
+ "state",
+ "tier",
+ "taskcluster_metadata__task_id",
+ "taskcluster_metadata__retry_id",
]
_output_field_names = [
- 'failure_classification_id',
- 'id',
- 'job_group_name',
- 'job_group_symbol',
- 'job_type_name',
- 'job_type_symbol',
- 'last_modified',
- 'platform',
- 'push_id',
- 'push_revision',
- 'result',
- 'signature',
- 'state',
- 'tier',
- 'task_id',
- 'retry_id',
- 'duration',
- 'platform_option',
+ "failure_classification_id",
+ "id",
+ "job_group_name",
+ "job_group_symbol",
+ "job_type_name",
+ "job_type_symbol",
+ "last_modified",
+ "platform",
+ "push_id",
+ "push_revision",
+ "result",
+ "signature",
+ "state",
+ "tier",
+ "task_id",
+ "retry_id",
+ "duration",
+ "platform_option",
]
queryset = (
Job.objects.all()
- .order_by('id')
+ .order_by("id")
.select_related(*_default_select_related)
.values(*_query_field_names)
)
@@ -151,11 +151,11 @@ class JobsViewSet(viewsets.ReadOnlyModelViewSet):
def get_serializer_context(self):
option_collection_map = OptionCollection.objects.get_option_collection_map()
- return {'option_collection_map': option_collection_map}
+ return {"option_collection_map": option_collection_map}
def list(self, request, *args, **kwargs):
resp = super().list(request, *args, **kwargs)
- resp.data['job_property_names'] = self._output_field_names
+ resp.data["job_property_names"] = self._output_field_names
return Response(resp.data)
@@ -167,57 +167,57 @@ class JobsProjectViewSet(viewsets.ViewSet):
# data that we want to do select_related on when returning job objects
# (so we don't have a zillion db queries)
_default_select_related = [
- 'build_platform',
- 'job_type',
- 'job_group',
- 'machine_platform',
- 'machine',
- 'signature',
- 'repository',
- 'taskcluster_metadata',
+ "build_platform",
+ "job_type",
+ "job_group",
+ "machine_platform",
+ "machine",
+ "signature",
+ "repository",
+ "taskcluster_metadata",
]
_property_query_mapping = [
- ('build_architecture', 'build_platform__architecture', None),
- ('build_os', 'build_platform__os_name', None),
- ('build_platform', 'build_platform__platform', None),
- ('build_platform_id', 'build_platform_id', None),
- ('build_system_type', 'signature__build_system_type', None),
- ('end_timestamp', 'end_time', to_timestamp),
- ('failure_classification_id', 'failure_classification_id', None),
- ('id', 'id', None),
- ('job_group_description', 'job_group__description', None),
- ('job_group_id', 'job_group_id', None),
- ('job_group_name', 'job_group__name', None),
- ('job_group_symbol', 'job_group__symbol', None),
- ('job_guid', 'guid', None),
- ('job_type_description', 'job_type__description', None),
- ('job_type_id', 'job_type_id', None),
- ('job_type_name', 'job_type__name', None),
- ('job_type_symbol', 'job_type__symbol', None),
- ('last_modified', 'last_modified', None),
- ('machine_name', 'machine__name', None),
- ('machine_platform_architecture', 'machine_platform__architecture', None),
- ('machine_platform_os', 'machine_platform__os_name', None),
- ('option_collection_hash', 'option_collection_hash', None),
- ('platform', 'machine_platform__platform', None),
- ('push_id', 'push_id', None),
- ('reason', 'reason', None),
- ('ref_data_name', 'signature__name', None),
- ('result', 'result', None),
- ('result_set_id', 'push_id', None),
- ('signature', 'signature__signature', None),
- ('start_timestamp', 'start_time', to_timestamp),
- ('state', 'state', None),
- ('submit_timestamp', 'submit_time', to_timestamp),
- ('tier', 'tier', None),
- ('who', 'who', None),
- ('task_id', 'taskcluster_metadata__task_id', None),
- ('retry_id', 'taskcluster_metadata__retry_id', None),
+ ("build_architecture", "build_platform__architecture", None),
+ ("build_os", "build_platform__os_name", None),
+ ("build_platform", "build_platform__platform", None),
+ ("build_platform_id", "build_platform_id", None),
+ ("build_system_type", "signature__build_system_type", None),
+ ("end_timestamp", "end_time", to_timestamp),
+ ("failure_classification_id", "failure_classification_id", None),
+ ("id", "id", None),
+ ("job_group_description", "job_group__description", None),
+ ("job_group_id", "job_group_id", None),
+ ("job_group_name", "job_group__name", None),
+ ("job_group_symbol", "job_group__symbol", None),
+ ("job_guid", "guid", None),
+ ("job_type_description", "job_type__description", None),
+ ("job_type_id", "job_type_id", None),
+ ("job_type_name", "job_type__name", None),
+ ("job_type_symbol", "job_type__symbol", None),
+ ("last_modified", "last_modified", None),
+ ("machine_name", "machine__name", None),
+ ("machine_platform_architecture", "machine_platform__architecture", None),
+ ("machine_platform_os", "machine_platform__os_name", None),
+ ("option_collection_hash", "option_collection_hash", None),
+ ("platform", "machine_platform__platform", None),
+ ("push_id", "push_id", None),
+ ("reason", "reason", None),
+ ("ref_data_name", "signature__name", None),
+ ("result", "result", None),
+ ("result_set_id", "push_id", None),
+ ("signature", "signature__signature", None),
+ ("start_timestamp", "start_time", to_timestamp),
+ ("state", "state", None),
+ ("submit_timestamp", "submit_time", to_timestamp),
+ ("tier", "tier", None),
+ ("who", "who", None),
+ ("task_id", "taskcluster_metadata__task_id", None),
+ ("retry_id", "taskcluster_metadata__retry_id", None),
]
_option_collection_hash_idx = [pq[0] for pq in _property_query_mapping].index(
- 'option_collection_hash'
+ "option_collection_hash"
)
def _get_job_list_response(self, job_qs, offset, count, return_type):
@@ -244,11 +244,11 @@ def _get_job_list_response(self, job_qs, offset, count, return_type):
values[i] = func(values[i])
# append results differently depending on if we are returning
# a dictionary or a list
- if return_type == 'dict':
+ if return_type == "dict":
results.append(
dict(
zip(
- [pq[0] for pq in self._property_query_mapping] + ['platform_option'],
+ [pq[0] for pq in self._property_query_mapping] + ["platform_option"],
values + [platform_option],
)
)
@@ -256,12 +256,12 @@ def _get_job_list_response(self, job_qs, offset, count, return_type):
else:
results.append(values + [platform_option])
- response_dict = {'results': results}
- if return_type == 'list':
+ response_dict = {"results": results}
+ if return_type == "list":
response_dict.update(
{
- 'job_property_names': [pq[0] for pq in self._property_query_mapping]
- + ['platform_option']
+ "job_property_names": [pq[0] for pq in self._property_query_mapping]
+ + ["platform_option"]
}
)
@@ -285,20 +285,20 @@ def retrieve(self, request, project, pk=None):
resp["resource_uri"] = reverse("jobs-detail", kwargs={"project": project, "pk": pk})
resp["logs"] = []
- for name, url in JobLog.objects.filter(job=job).values_list('name', 'url'):
- resp["logs"].append({'name': name, 'url': url})
+ for name, url in JobLog.objects.filter(job=job).values_list("name", "url"):
+ resp["logs"].append({"name": name, "url": url})
platform_option = job.get_platform_option()
if platform_option:
resp["platform_option"] = platform_option
try:
- resp['task_id'] = job.taskcluster_metadata.task_id
- resp['retry_id'] = job.taskcluster_metadata.retry_id
+ resp["task_id"] = job.taskcluster_metadata.task_id
+ resp["retry_id"] = job.taskcluster_metadata.retry_id
# Keep for backwards compatability
- resp['taskcluster_metadata'] = {
- 'task_id': job.taskcluster_metadata.task_id,
- 'retry_id': job.taskcluster_metadata.retry_id,
+ resp["taskcluster_metadata"] = {
+ "task_id": job.taskcluster_metadata.task_id,
+ "retry_id": job.taskcluster_metadata.retry_id,
}
except ObjectDoesNotExist:
pass
@@ -320,15 +320,15 @@ def list(self, request, project):
# various hacks to ensure API backwards compatibility
for param_key, param_value in request.query_params.items():
# replace `result_set_id` with `push_id`
- if param_key.startswith('result_set_id'):
- new_param_key = param_key.replace('result_set_id', 'push_id')
+ if param_key.startswith("result_set_id"):
+ new_param_key = param_key.replace("result_set_id", "push_id")
filter_params[new_param_key] = param_value
# convert legacy timestamp parameters to time ones
- elif param_key in ['submit_timestamp', 'start_timestamp', 'end_timestamp']:
- new_param_key = param_key.replace('timestamp', 'time')
+ elif param_key in ["submit_timestamp", "start_timestamp", "end_timestamp"]:
+ new_param_key = param_key.replace("timestamp", "time")
filter_params[new_param_key] = datetime.datetime.fromtimestamp(float(param_value))
# sanity check 'last modified'
- elif param_key.startswith('last_modified'):
+ elif param_key.startswith("last_modified"):
try:
parser.parse(param_value)
except ValueError:
@@ -371,7 +371,7 @@ def list(self, request, project):
return Response(response_body)
# TODO remove
- @action(detail=True, methods=['get'])
+ @action(detail=True, methods=["get"])
def text_log_steps(self, request, project, pk=None):
"""
Gets a list of steps associated with this job
@@ -383,14 +383,14 @@ def text_log_steps(self, request, project, pk=None):
textlog_steps = (
TextLogStep.objects.filter(job=job)
- .order_by('started_line_number')
- .prefetch_related('errors')
+ .order_by("started_line_number")
+ .prefetch_related("errors")
)
return Response(
serializers.TextLogStepSerializer(textlog_steps, many=True, read_only=True).data
)
- @action(detail=True, methods=['get'])
+ @action(detail=True, methods=["get"])
def text_log_errors(self, request, project, pk=None):
"""
Gets a list of error lines associated with this job
@@ -403,13 +403,13 @@ def text_log_errors(self, request, project, pk=None):
TextLogError.objects.filter(job=job)
.select_related("_metadata", "_metadata__failure_line")
.prefetch_related("classified_failures", "matches")
- .order_by('id')
+ .order_by("id")
)
return Response(
serializers.TextLogErrorSerializer(textlog_errors, many=True, read_only=True).data
)
- @action(detail=True, methods=['get'])
+ @action(detail=True, methods=["get"])
def bug_suggestions(self, request, project, pk=None):
"""
Gets a set of bug suggestions for this job
@@ -421,7 +421,7 @@ def bug_suggestions(self, request, project, pk=None):
return Response(get_error_summary(job))
- @action(detail=True, methods=['get'])
+ @action(detail=True, methods=["get"])
def similar_jobs(self, request, project, pk=None):
"""
Get a list of jobs similar to the one selected.
@@ -456,12 +456,12 @@ def similar_jobs(self, request, project, pk=None):
{k: v for (k, v) in filter_params.items()},
queryset=Job.objects.filter(job_type_id=job.job_type_id, repository=repository)
.exclude(id=job.id)
- .exclude(result=('success' if nosuccess is not False else None))
+ .exclude(result=("success" if nosuccess is not False else None))
.select_related(*self._default_select_related),
).qs
# similar jobs we want in descending order from most recent
- jobs = jobs.order_by('-push_id', '-start_time')
+ jobs = jobs.order_by("-push_id", "-start_time")
response_body = self._get_job_list_response(jobs, offset, count, return_type)
response_body["meta"] = dict(offset=offset, count=count, repository=project)
diff --git a/treeherder/webapp/api/note.py b/treeherder/webapp/api/note.py
index 53242bb29f2..94c6d40a04e 100644
--- a/treeherder/webapp/api/note.py
+++ b/treeherder/webapp/api/note.py
@@ -42,7 +42,7 @@ def list(self, request, project):
job_id -- Mandatory filter indicating which job these notes belong to.
"""
- job_id = request.query_params.get('job_id')
+ job_id = request.query_params.get("job_id")
if not job_id:
raise ParseError(detail="The job_id parameter is mandatory for this endpoint")
try:
@@ -54,7 +54,7 @@ def list(self, request, project):
serializer = JobNoteSerializer(JobNote.objects.filter(job=job), many=True)
return Response(serializer.data)
- @action(detail=False, methods=['get'])
+ @action(detail=False, methods=["get"])
def push_notes(self, request, project):
"""
GET method to get all classifications for a push revision with some
@@ -63,13 +63,13 @@ def push_notes(self, request, project):
:param project: Repository of the revision
:return:
"""
- revision = request.query_params.get('revision')
+ revision = request.query_params.get("revision")
if not revision:
raise ParseError(detail="The revision parameter is mandatory for this endpoint")
push = Push.objects.get(repository__name=project, revision=revision)
notes = JobNote.objects.filter(job__push=push).select_related(
- 'job', 'job__push', 'job__job_type', 'job__taskcluster_metadata'
+ "job", "job__push", "job__job_type", "job__taskcluster_metadata"
)
serializer = JobNoteDetailSerializer(notes, many=True)
return Response(serializer.data)
@@ -78,21 +78,21 @@ def create(self, request, project):
"""
POST method implementation
"""
- current_job = Job.objects.get(repository__name=project, id=int(request.data['job_id']))
- fc_id = int(request.data['failure_classification_id'])
+ current_job = Job.objects.get(repository__name=project, id=int(request.data["job_id"]))
+ fc_id = int(request.data["failure_classification_id"])
revision = None
- if 'text' in request.data:
- revision = request.data['text']
+ if "text" in request.data:
+ revision = request.data["text"]
JobNote.objects.create(
job=current_job,
failure_classification_id=fc_id,
user=request.user,
- text=request.data.get('text', ''),
+ text=request.data.get("text", ""),
)
if fc_id == 2: # this is for fixed_by_commit (backout | follow_up_commit)
# remove cached failure line counts
- line_cache_key = 'error_lines'
+ line_cache_key = "error_lines"
line_cache = cache.get(line_cache_key)
date = current_job.submit_time.date().isoformat()
if line_cache and date in line_cache.keys():
@@ -110,13 +110,13 @@ def create(self, request, project):
cache.set(line_cache_key, line_cache, LINE_CACHE_TIMEOUT)
except Exception as e:
logger.error(
- 'error caching error_lines for job %s: %s',
+ "error caching error_lines for job %s: %s",
current_job.id,
e,
exc_info=True,
)
- return Response({'message': 'note stored for job {0}'.format(request.data['job_id'])})
+ return Response({"message": "note stored for job {0}".format(request.data["job_id"])})
def destroy(self, request, project, pk=None):
"""
diff --git a/treeherder/webapp/api/pagination.py b/treeherder/webapp/api/pagination.py
index 86b96f1defd..3259b6e4f36 100644
--- a/treeherder/webapp/api/pagination.py
+++ b/treeherder/webapp/api/pagination.py
@@ -4,7 +4,7 @@
class IdPagination(pagination.CursorPagination):
- ordering = '-id'
+ ordering = "-id"
page_size = 100
@@ -21,6 +21,6 @@ def count(self):
class JobPagination(pagination.PageNumberPagination):
page_size = 2000
- page_size_query_param = 'count'
+ page_size_query_param = "count"
max_page_size = 2000
django_paginator_class = CustomPaginator
diff --git a/treeherder/webapp/api/perfcompare_utils.py b/treeherder/webapp/api/perfcompare_utils.py
index bbf8eb9c100..89d3d3ad80f 100644
--- a/treeherder/webapp/api/perfcompare_utils.py
+++ b/treeherder/webapp/api/perfcompare_utils.py
@@ -7,7 +7,7 @@
""" Constants """
-NOISE_METRIC_HEADER = 'noise metric'
+NOISE_METRIC_HEADER = "noise metric"
"""
Default stddev is used for get_ttest_value if both sets have only a single value - 15%.
Should be rare case and it's unreliable, but at least we have something.
@@ -16,37 +16,37 @@
T_VALUE_CARE_MIN = 3 # Anything below this is "low" in confidence
T_VALUE_CONFIDENCE = 5 # Anything above this is "high" in confidence
PERFHERDER_TIMERANGES = [
- {'value': 86400, 'text': 'Last day'},
- {'value': 86400 * 2, 'text': 'Last 2 days'},
- {'value': 604800, 'text': 'Last 7 days'},
- {'value': 1209600, 'text': 'Last 14 days'},
- {'value': 2592000, 'text': 'Last 30 days'},
- {'value': 5184000, 'text': 'Last 60 days'},
- {'value': 7776000, 'text': 'Last 90 days'},
- {'value': 31536000, 'text': 'Last year'},
+ {"value": 86400, "text": "Last day"},
+ {"value": 86400 * 2, "text": "Last 2 days"},
+ {"value": 604800, "text": "Last 7 days"},
+ {"value": 1209600, "text": "Last 14 days"},
+ {"value": 2592000, "text": "Last 30 days"},
+ {"value": 5184000, "text": "Last 60 days"},
+ {"value": 7776000, "text": "Last 90 days"},
+ {"value": 31536000, "text": "Last year"},
]
""" Helpers """
def get_test_suite(suite, test):
- return suite if test == '' or test == suite else '{} {}'.format(suite, test)
+ return suite if test == "" or test == suite else "{} {}".format(suite, test)
def get_header_name(extra_options, option_name, test_suite):
- name = '{} {} {}'.format(test_suite, option_name, extra_options)
+ name = "{} {} {}".format(test_suite, option_name, extra_options)
return name
def get_sig_identifier(header, platform):
- return '{} {}'.format(header, platform)
+ return "{} {}".format(header, platform)
def get_option_collection_map():
- option_collection = OptionCollection.objects.select_related('option').values(
- 'id', 'option__name'
+ option_collection = OptionCollection.objects.select_related("option").values(
+ "id", "option__name"
)
- option_collection_map = {item['id']: item['option__name'] for item in list(option_collection)}
+ option_collection_map = {item["id"]: item["option__name"] for item in list(option_collection)}
return option_collection_map
@@ -149,13 +149,13 @@ def get_abs_ttest_value(control_values, test_values):
def get_confidence_text(abs_tvalue):
if abs_tvalue == 0 or abs_tvalue is None:
- return ''
+ return ""
if abs_tvalue < T_VALUE_CARE_MIN:
- confidence_text = 'Low'
+ confidence_text = "Low"
elif abs_tvalue < T_VALUE_CONFIDENCE:
- confidence_text = 'Medium'
+ confidence_text = "Medium"
else:
- confidence_text = 'High'
+ confidence_text = "High"
return confidence_text
@@ -204,21 +204,21 @@ def more_runs_are_needed(is_complete, is_confident, base_runs_count):
def get_class_name(new_is_better, base_avg_value, new_avg_value, abs_t_value):
# Returns a class name, if any, based on a relative change in the absolute value
if not base_avg_value or not new_avg_value:
- return ''
+ return ""
ratio = new_avg_value / base_avg_value
if ratio < 1:
ratio = 1 / ratio # Direction agnostic and always >= 1
if ratio < 1.02 or abs_t_value < T_VALUE_CARE_MIN:
- return ''
+ return ""
if abs_t_value < T_VALUE_CONFIDENCE:
if new_is_better:
- return ''
- return 'warning'
+ return ""
+ return "warning"
if new_is_better:
- return 'success'
+ return "success"
else:
- return 'danger'
+ return "danger"
diff --git a/treeherder/webapp/api/performance_data.py b/treeherder/webapp/api/performance_data.py
index 45dde17cf16..e26d72791f8 100644
--- a/treeherder/webapp/api/performance_data.py
+++ b/treeherder/webapp/api/performance_data.py
@@ -56,20 +56,20 @@ def list(self, request, project):
repository = models.Repository.objects.get(name=project)
signature_data = PerformanceSignature.objects.filter(repository=repository).select_related(
- 'parent_signature__signature_hash', 'option_collection', 'platform'
+ "parent_signature__signature_hash", "option_collection", "platform"
)
- parent_signature_hashes = request.query_params.getlist('parent_signature')
+ parent_signature_hashes = request.query_params.getlist("parent_signature")
if parent_signature_hashes:
parent_signatures = PerformanceSignature.objects.filter(
repository=repository, signature_hash__in=parent_signature_hashes
)
signature_data = signature_data.filter(parent_signature__in=parent_signatures)
- if not int(request.query_params.get('subtests', True)):
+ if not int(request.query_params.get("subtests", True)):
signature_data = signature_data.filter(parent_signature__isnull=True)
- signature_ids = request.query_params.getlist('id')
+ signature_ids = request.query_params.getlist("id")
if signature_ids:
try:
signature_data = signature_data.filter(id__in=map(int, signature_ids))
@@ -79,17 +79,17 @@ def list(self, request, project):
status=HTTP_400_BAD_REQUEST,
)
- signature_hashes = request.query_params.getlist('signature')
+ signature_hashes = request.query_params.getlist("signature")
if signature_hashes:
signature_data = signature_data.filter(signature_hash__in=signature_hashes)
- frameworks = request.query_params.getlist('framework')
+ frameworks = request.query_params.getlist("framework")
if frameworks:
signature_data = signature_data.filter(framework__in=frameworks)
- interval = request.query_params.get('interval')
- start_date = request.query_params.get('start_date') # YYYY-MM-DDTHH:MM:SS
- end_date = request.query_params.get('end_date') # YYYY-MM-DDTHH:MM:SS
+ interval = request.query_params.get("interval")
+ start_date = request.query_params.get("start_date") # YYYY-MM-DDTHH:MM:SS
+ end_date = request.query_params.get("end_date") # YYYY-MM-DDTHH:MM:SS
if interval and (start_date or end_date):
return Response(
{"message": "Provide either interval only -or- start (and end) date"},
@@ -108,7 +108,7 @@ def list(self, request, project):
if end_date:
signature_data = signature_data.filter(last_updated__lte=end_date)
- platform = request.query_params.get('platform')
+ platform = request.query_params.get("platform")
if platform:
platforms = models.MachinePlatform.objects.filter(platform=platform)
signature_data = signature_data.filter(platform__in=platforms)
@@ -131,56 +131,56 @@ def list(self, request, project):
parent_signature_hash,
should_alert,
) in signature_data.values_list(
- 'id',
- 'signature_hash',
- 'option_collection__option_collection_hash',
- 'platform__platform',
- 'framework',
- 'suite',
- 'test',
- 'application',
- 'lower_is_better',
- 'extra_options',
- 'measurement_unit',
- 'has_subtests',
- 'tags',
- 'parent_signature__signature_hash',
- 'should_alert',
+ "id",
+ "signature_hash",
+ "option_collection__option_collection_hash",
+ "platform__platform",
+ "framework",
+ "suite",
+ "test",
+ "application",
+ "lower_is_better",
+ "extra_options",
+ "measurement_unit",
+ "has_subtests",
+ "tags",
+ "parent_signature__signature_hash",
+ "should_alert",
).distinct():
signature_map[id] = signature_props = {
- 'id': id,
- 'signature_hash': signature_hash,
- 'framework_id': framework,
- 'option_collection_hash': option_collection_hash,
- 'machine_platform': platform,
- 'suite': suite,
- 'should_alert': should_alert,
+ "id": id,
+ "signature_hash": signature_hash,
+ "framework_id": framework,
+ "option_collection_hash": option_collection_hash,
+ "machine_platform": platform,
+ "suite": suite,
+ "should_alert": should_alert,
}
if not lower_is_better:
# almost always true, save some bandwidth by assuming that by
# default
- signature_props['lower_is_better'] = False
+ signature_props["lower_is_better"] = False
if test:
# test may be empty in case of a summary test, leave it empty
# then
- signature_props['test'] = test
+ signature_props["test"] = test
if application:
- signature_props['application'] = application
+ signature_props["application"] = application
if has_subtests:
- signature_props['has_subtests'] = True
+ signature_props["has_subtests"] = True
if tags:
# tags stored as charField but api returns as list
- signature_props['tags'] = tags.split(' ')
+ signature_props["tags"] = tags.split(" ")
if parent_signature_hash:
# this value is often null, save some bandwidth by excluding
# it if not present
- signature_props['parent_signature'] = parent_signature_hash
+ signature_props["parent_signature"] = parent_signature_hash
if extra_options:
# extra_options stored as charField but api returns as list
- signature_props['extra_options'] = extra_options.split(' ')
+ signature_props["extra_options"] = extra_options.split(" ")
if measurement_unit:
- signature_props['measurement_unit'] = measurement_unit
+ signature_props["measurement_unit"] = measurement_unit
return Response(signature_map)
@@ -192,7 +192,7 @@ class PerformancePlatformViewSet(viewsets.ViewSet):
def list(self, request, project):
signature_data = PerformanceSignature.objects.filter(repository__name=project)
- interval = request.query_params.get('interval')
+ interval = request.query_params.get("interval")
if interval:
signature_data = signature_data.filter(
last_updated__gte=datetime.datetime.utcfromtimestamp(
@@ -200,18 +200,18 @@ def list(self, request, project):
)
)
- frameworks = request.query_params.getlist('framework')
+ frameworks = request.query_params.getlist("framework")
if frameworks:
signature_data = signature_data.filter(framework__in=frameworks)
- return Response(signature_data.values_list('platform__platform', flat=True).distinct())
+ return Response(signature_data.values_list("platform__platform", flat=True).distinct())
class PerformanceFrameworkViewSet(viewsets.ReadOnlyModelViewSet):
queryset = PerformanceFramework.objects.filter(enabled=True)
serializer_class = PerformanceFrameworkSerializer
filter_backends = [filters.OrderingFilter]
- ordering = 'id'
+ ordering = "id"
class PerformanceDatumViewSet(viewsets.ViewSet):
@@ -237,21 +237,21 @@ def list(self, request, project):
if not (signature_ids or signature_hashes or push_ids or job_ids):
raise exceptions.ValidationError(
- 'Need to specify either ' 'signature_id, signatures, ' 'push_id, or job_id'
+ "Need to specify either " "signature_id, signatures, " "push_id, or job_id"
)
if signature_ids and signature_hashes:
raise exceptions.ValidationError(
- 'Can\'t specify both signature_id ' 'and signatures in same query'
+ "Can't specify both signature_id " "and signatures in same query"
)
datums = PerformanceDatum.objects.filter(repository=repository).select_related(
- 'signature', 'push'
+ "signature", "push"
)
if signature_hashes:
signature_ids = PerformanceSignature.objects.filter(
repository=repository, signature_hash__in=signature_hashes
- ).values_list('id', flat=True)
+ ).values_list("id", flat=True)
datums = datums.filter(signature__id__in=list(signature_ids))
elif signature_ids:
@@ -261,13 +261,13 @@ def list(self, request, project):
if job_ids:
datums = datums.filter(job_id__in=job_ids)
- frameworks = request.query_params.getlist('framework')
+ frameworks = request.query_params.getlist("framework")
if frameworks:
datums = datums.filter(signature__framework__in=frameworks)
- interval = request.query_params.get('interval')
- start_date = request.query_params.get('start_date') # 'YYYY-MM-DDTHH:MM:SS
- end_date = request.query_params.get('end_date') # 'YYYY-MM-DDTHH:MM:SS'
+ interval = request.query_params.get("interval")
+ start_date = request.query_params.get("start_date") # 'YYYY-MM-DDTHH:MM:SS
+ end_date = request.query_params.get("end_date") # 'YYYY-MM-DDTHH:MM:SS'
if interval and (start_date or end_date):
return Response(
{"message": "Provide either interval only -or- start (and end) date"},
@@ -288,14 +288,14 @@ def list(self, request, project):
ret, seen_push_ids = defaultdict(list), defaultdict(set)
values_list = datums.values_list(
- 'id',
- 'signature_id',
- 'signature__signature_hash',
- 'job_id',
- 'push_id',
- 'push_timestamp',
- 'value',
- 'push__revision',
+ "id",
+ "signature_id",
+ "signature__signature_hash",
+ "job_id",
+ "push_id",
+ "push_timestamp",
+ "value",
+ "push__revision",
)
for (
id,
@@ -317,13 +317,13 @@ def list(self, request, project):
if should_include_datum:
ret[signature_hash].append(
{
- 'id': id,
- 'signature_id': signature_id,
- 'job_id': job_id,
- 'push_id': push_id,
- 'revision': push__revision,
- 'push_timestamp': int(time.mktime(push_timestamp.timetuple())),
- 'value': round(value, 2), # round to 2 decimal places
+ "id": id,
+ "signature_id": signature_id,
+ "job_id": job_id,
+ "push_id": push_id,
+ "revision": push__revision,
+ "push_timestamp": int(time.mktime(push_timestamp.timetuple())),
+ "value": round(value, 2), # round to 2 decimal places
}
)
@@ -331,27 +331,27 @@ def list(self, request, project):
class AlertSummaryPagination(pagination.PageNumberPagination):
- ordering = ('-created', '-id')
- page_size_query_param = 'limit'
+ ordering = ("-created", "-id")
+ page_size_query_param = "limit"
max_page_size = 100
page_size = 10
class PerformanceAlertSummaryFilter(django_filters.FilterSet):
- id = django_filters.NumberFilter(field_name='id')
- status = django_filters.NumberFilter(field_name='status')
- framework = django_filters.NumberFilter(field_name='framework')
- repository = django_filters.NumberFilter(field_name='repository')
- alerts__series_signature = django_filters.NumberFilter(field_name='alerts__series_signature')
- filter_text = django_filters.CharFilter(method='_filter_text')
- hide_improvements = django_filters.BooleanFilter(method='_hide_improvements')
- hide_related_and_invalid = django_filters.BooleanFilter(method='_hide_related_and_invalid')
- with_assignee = django_filters.CharFilter(method='_with_assignee')
- timerange = django_filters.NumberFilter(method='_timerange')
+ id = django_filters.NumberFilter(field_name="id")
+ status = django_filters.NumberFilter(field_name="status")
+ framework = django_filters.NumberFilter(field_name="framework")
+ repository = django_filters.NumberFilter(field_name="repository")
+ alerts__series_signature = django_filters.NumberFilter(field_name="alerts__series_signature")
+ filter_text = django_filters.CharFilter(method="_filter_text")
+ hide_improvements = django_filters.BooleanFilter(method="_hide_improvements")
+ hide_related_and_invalid = django_filters.BooleanFilter(method="_hide_related_and_invalid")
+ with_assignee = django_filters.CharFilter(method="_with_assignee")
+ timerange = django_filters.NumberFilter(method="_timerange")
def _filter_text(self, queryset, name, value):
- sep = Value(' ')
- words = value.split(' ')
+ sep = Value(" ")
+ words = value.split(" ")
contains_all_words = [
Q(full_name__contains=word) | Q(related_full_name__contains=word) for word in words
@@ -362,43 +362,43 @@ def _filter_text(self, queryset, name, value):
filtered_summaries = (
queryset.annotate(
full_name=Concat(
- 'alerts__series_signature__suite',
+ "alerts__series_signature__suite",
sep,
- 'alerts__series_signature__test',
+ "alerts__series_signature__test",
sep,
- 'alerts__series_signature__platform__platform',
+ "alerts__series_signature__platform__platform",
sep,
- 'alerts__series_signature__extra_options',
+ "alerts__series_signature__extra_options",
sep,
- 'bug_number',
+ "bug_number",
sep,
- 'push__revision',
+ "push__revision",
output_field=CharField(),
),
related_full_name=Concat(
- 'related_alerts__series_signature__suite',
+ "related_alerts__series_signature__suite",
sep,
- 'related_alerts__series_signature__test',
+ "related_alerts__series_signature__test",
sep,
- 'related_alerts__series_signature__platform__platform',
+ "related_alerts__series_signature__platform__platform",
sep,
- 'related_alerts__series_signature__extra_options',
+ "related_alerts__series_signature__extra_options",
sep,
- 'bug_number',
+ "bug_number",
sep,
- 'push__revision',
+ "push__revision",
output_field=CharField(),
),
)
.filter(*contains_all_words)
- .values('id')
+ .values("id")
.distinct()
)
return queryset.filter(id__in=Subquery(filtered_summaries))
def _hide_improvements(self, queryset, name, value):
- return queryset.annotate(total_regressions=Count('alerts__is_regression')).filter(
+ return queryset.annotate(total_regressions=Count("alerts__is_regression")).filter(
alerts__is_regression=True, total_regressions__gte=1
)
@@ -422,16 +422,16 @@ def _timerange(self, queryset, name, value):
class Meta:
model = PerformanceAlertSummary
fields = [
- 'id',
- 'status',
- 'framework',
- 'repository',
- 'alerts__series_signature',
- 'filter_text',
- 'hide_improvements',
- 'hide_related_and_invalid',
- 'with_assignee',
- 'timerange',
+ "id",
+ "status",
+ "framework",
+ "repository",
+ "alerts__series_signature",
+ "filter_text",
+ "hide_improvements",
+ "hide_related_and_invalid",
+ "with_assignee",
+ "timerange",
]
@@ -439,29 +439,29 @@ class PerformanceTagViewSet(viewsets.ReadOnlyModelViewSet):
queryset = PerformanceTag.objects.all()
serializer_class = PerformanceTagSerializer
filter_backends = [filters.OrderingFilter]
- ordering = 'id'
+ ordering = "id"
class PerformanceAlertSummaryViewSet(viewsets.ModelViewSet):
"""ViewSet for the performance alert summary model"""
queryset = (
- PerformanceAlertSummary.objects.filter(repository__active_status='active')
- .select_related('repository', 'push')
+ PerformanceAlertSummary.objects.filter(repository__active_status="active")
+ .select_related("repository", "push")
.prefetch_related(
- 'alerts',
- 'alerts__classifier',
- 'alerts__series_signature',
- 'alerts__series_signature__platform',
- 'alerts__series_signature__option_collection',
- 'alerts__series_signature__option_collection__option',
- 'related_alerts',
- 'related_alerts__classifier',
- 'related_alerts__series_signature',
- 'related_alerts__series_signature__platform',
- 'related_alerts__series_signature__option_collection',
- 'related_alerts__series_signature__option_collection__option',
- 'performance_tags',
+ "alerts",
+ "alerts__classifier",
+ "alerts__series_signature",
+ "alerts__series_signature__platform",
+ "alerts__series_signature__option_collection",
+ "alerts__series_signature__option_collection__option",
+ "related_alerts",
+ "related_alerts__classifier",
+ "related_alerts__series_signature",
+ "related_alerts__series_signature__platform",
+ "related_alerts__series_signature__option_collection",
+ "related_alerts__series_signature__option_collection__option",
+ "performance_tags",
)
)
permission_classes = (IsStaffOrReadOnly,)
@@ -470,12 +470,12 @@ class PerformanceAlertSummaryViewSet(viewsets.ModelViewSet):
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
filterset_class = PerformanceAlertSummaryFilter
- ordering = ('-created', '-id')
+ ordering = ("-created", "-id")
pagination_class = AlertSummaryPagination
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.queryset)
- pk = request.query_params.get('id')
+ pk = request.query_params.get("id")
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
@@ -506,17 +506,17 @@ def list(self, request, *args, **kwargs):
def create(self, request, *args, **kwargs):
data = request.data
- if data['push_id'] == data['prev_push_id']:
+ if data["push_id"] == data["prev_push_id"]:
return Response(
"IDs of push & previous push cannot be identical", status=HTTP_400_BAD_REQUEST
)
alert_summary, _ = PerformanceAlertSummary.objects.get_or_create(
- repository_id=data['repository_id'],
- framework=PerformanceFramework.objects.get(id=data['framework_id']),
- push_id=data['push_id'],
- prev_push_id=data['prev_push_id'],
- defaults={'manually_created': True, 'created': datetime.datetime.now()},
+ repository_id=data["repository_id"],
+ framework=PerformanceFramework.objects.get(id=data["framework_id"]),
+ push_id=data["push_id"],
+ prev_push_id=data["prev_push_id"],
+ defaults={"manually_created": True, "created": datetime.datetime.now()},
)
return Response({"alert_summary_id": alert_summary.id})
@@ -536,24 +536,24 @@ class PerformanceAlertViewSet(viewsets.ModelViewSet):
serializer_class = PerformanceAlertSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
- filterset_fields = ['id']
- ordering = '-id'
+ filterset_fields = ["id"]
+ ordering = "-id"
class AlertPagination(pagination.CursorPagination):
- ordering = '-id'
+ ordering = "-id"
page_size = 10
pagination_class = AlertPagination
def update(self, request, *args, **kwargs):
- new_push_id = request.data.get('push_id')
- new_prev_push_id = request.data.get('prev_push_id')
+ new_push_id = request.data.get("push_id")
+ new_prev_push_id = request.data.get("prev_push_id")
if new_push_id is None and new_prev_push_id is None:
- request.data['classifier'] = request.user.username
+ request.data["classifier"] = request.user.username
return super().update(request, *args, **kwargs)
else:
- alert = PerformanceAlert.objects.get(pk=kwargs['pk'])
+ alert = PerformanceAlert.objects.get(pk=kwargs["pk"])
if all([new_push_id, new_prev_push_id]) and alert.summary.push.id != new_push_id:
return self.nudge(alert, new_push_id, new_prev_push_id)
@@ -561,14 +561,14 @@ def update(self, request, *args, **kwargs):
def create(self, request, *args, **kwargs):
data = request.data
- if 'summary_id' not in data or 'signature_id' not in data:
+ if "summary_id" not in data or "signature_id" not in data:
return Response(
{"message": "Summary and signature ids necessary " "to create alert"},
status=HTTP_400_BAD_REQUEST,
)
- summary = PerformanceAlertSummary.objects.get(id=data['summary_id'])
- signature = PerformanceSignature.objects.get(id=data['signature_id'])
+ summary = PerformanceAlertSummary.objects.get(id=data["summary_id"])
+ signature = PerformanceSignature.objects.get(id=data["signature_id"])
alert_properties = self.calculate_alert_properties(summary, signature)
@@ -576,13 +576,13 @@ def create(self, request, *args, **kwargs):
summary=summary,
series_signature=signature,
defaults={
- 'is_regression': alert_properties.is_regression,
- 'manually_created': True,
- 'amount_pct': alert_properties.pct_change,
- 'amount_abs': alert_properties.delta,
- 'prev_value': alert_properties.prev_value,
- 'new_value': alert_properties.new_value,
- 't_value': 1000,
+ "is_regression": alert_properties.is_regression,
+ "manually_created": True,
+ "amount_pct": alert_properties.pct_change,
+ "amount_abs": alert_properties.delta,
+ "prev_value": alert_properties.prev_value,
+ "new_value": alert_properties.new_value,
+ "t_value": 1000,
},
)
alert.timestamp_first_triage().save()
@@ -599,13 +599,13 @@ def calculate_alert_properties(self, alert_summary, series_signature):
prev_data = PerformanceDatum.objects.filter(
signature=series_signature, push_timestamp__lte=alert_summary.prev_push.time
- ).order_by('-push_timestamp')
- prev_values = prev_data.values_list('value', flat=True)[:prev_range]
+ ).order_by("-push_timestamp")
+ prev_values = prev_data.values_list("value", flat=True)[:prev_range]
new_data = PerformanceDatum.objects.filter(
signature=series_signature, push_timestamp__gt=alert_summary.prev_push.time
- ).order_by('push_timestamp')
- new_values = new_data.values_list('value', flat=True)[:new_range]
+ ).order_by("push_timestamp")
+ new_values = new_data.values_list("value", flat=True)[:new_range]
if not prev_data or not new_data:
raise InsufficientAlertCreationData
@@ -619,21 +619,21 @@ def calculate_alert_properties(self, alert_summary, series_signature):
def nudge(self, alert, new_push_id, new_prev_push_id):
# Bug 1532230 disabled nudging because it broke links
# Bug 1532283 will re enable a better version of it
- raise exceptions.APIException('Nudging has been disabled', 400)
+ raise exceptions.APIException("Nudging has been disabled", 400)
class PerformanceBugTemplateViewSet(viewsets.ReadOnlyModelViewSet):
queryset = PerformanceBugTemplate.objects.all()
serializer_class = PerformanceBugTemplateSerializer
filter_backends = (django_filters.rest_framework.DjangoFilterBackend, filters.OrderingFilter)
- filterset_fields = ['framework']
+ filterset_fields = ["framework"]
class PerformanceIssueTrackerViewSet(viewsets.ReadOnlyModelViewSet):
queryset = IssueTracker.objects.all()
serializer_class = IssueTrackerSerializer
filter_backends = [filters.OrderingFilter]
- ordering = 'id'
+ ordering = "id"
class PerformanceSummary(generics.ListAPIView):
@@ -645,21 +645,21 @@ def list(self, request):
if not query_params.is_valid():
return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST)
- startday = query_params.validated_data['startday']
- endday = query_params.validated_data['endday']
- revision = query_params.validated_data['revision']
- repository_name = query_params.validated_data['repository']
- interval = query_params.validated_data['interval']
- frameworks = query_params.validated_data['framework']
- parent_signature = query_params.validated_data['parent_signature']
- signature = query_params.validated_data['signature']
- no_subtests = query_params.validated_data['no_subtests']
- all_data = query_params.validated_data['all_data']
- no_retriggers = query_params.validated_data['no_retriggers']
- replicates = query_params.validated_data['replicates']
+ startday = query_params.validated_data["startday"]
+ endday = query_params.validated_data["endday"]
+ revision = query_params.validated_data["revision"]
+ repository_name = query_params.validated_data["repository"]
+ interval = query_params.validated_data["interval"]
+ frameworks = query_params.validated_data["framework"]
+ parent_signature = query_params.validated_data["parent_signature"]
+ signature = query_params.validated_data["signature"]
+ no_subtests = query_params.validated_data["no_subtests"]
+ all_data = query_params.validated_data["all_data"]
+ no_retriggers = query_params.validated_data["no_retriggers"]
+ replicates = query_params.validated_data["replicates"]
signature_data = PerformanceSignature.objects.select_related(
- 'framework', 'repository', 'platform', 'push', 'job'
+ "framework", "repository", "platform", "push", "job"
).filter(repository__name=repository_name)
# TODO deprecate signature hash support
@@ -687,29 +687,29 @@ def list(self, request):
# TODO signature_hash is being returned for legacy support - should be removed at some point
self.queryset = signature_data.values(
- 'framework_id',
- 'id',
- 'lower_is_better',
- 'has_subtests',
- 'extra_options',
- 'suite',
- 'signature_hash',
- 'platform__platform',
- 'test',
- 'option_collection_id',
- 'parent_signature_id',
- 'repository_id',
- 'tags',
- 'measurement_unit',
- 'application',
+ "framework_id",
+ "id",
+ "lower_is_better",
+ "has_subtests",
+ "extra_options",
+ "suite",
+ "signature_hash",
+ "platform__platform",
+ "test",
+ "option_collection_id",
+ "parent_signature_id",
+ "repository_id",
+ "tags",
+ "measurement_unit",
+ "application",
)
- signature_ids = [item['id'] for item in list(self.queryset)]
+ signature_ids = [item["id"] for item in list(self.queryset)]
data = (
- PerformanceDatum.objects.select_related('push', 'repository', 'id')
+ PerformanceDatum.objects.select_related("push", "repository", "id")
.filter(signature_id__in=signature_ids, repository__name=repository_name)
- .order_by('job_id', 'id')
+ .order_by("job_id", "id")
)
if revision:
@@ -724,17 +724,17 @@ def list(self, request):
data = data.filter(push_timestamp__gt=startday, push_timestamp__lt=endday)
# more efficient than creating a join on option_collection and option
- option_collection = OptionCollection.objects.select_related('option').values(
- 'id', 'option__name'
+ option_collection = OptionCollection.objects.select_related("option").values(
+ "id", "option__name"
)
option_collection_map = {
- item['id']: item['option__name'] for item in list(option_collection)
+ item["id"]: item["option__name"] for item in list(option_collection)
}
if signature and all_data:
for item in self.queryset:
if replicates:
- item['data'] = list()
+ item["data"] = list()
for (
value,
job_id,
@@ -744,18 +744,18 @@ def list(self, request):
push_revision,
replicate_value,
) in data.values_list(
- 'value',
- 'job_id',
- 'id',
- 'push_id',
- 'push_timestamp',
- 'push__revision',
- 'performancedatumreplicate__value',
+ "value",
+ "job_id",
+ "id",
+ "push_id",
+ "push_timestamp",
+ "push__revision",
+ "performancedatumreplicate__value",
).order_by(
- 'push_timestamp', 'push_id', 'job_id'
+ "push_timestamp", "push_id", "job_id"
):
if replicate_value is not None:
- item['data'].append(
+ item["data"].append(
{
"value": replicate_value,
"job_id": job_id,
@@ -766,7 +766,7 @@ def list(self, request):
}
)
elif value is not None:
- item['data'].append(
+ item["data"].append(
{
"value": value,
"job_id": job_id,
@@ -777,19 +777,19 @@ def list(self, request):
}
)
else:
- item['data'] = data.values(
- 'value', 'job_id', 'id', 'push_id', 'push_timestamp', 'push__revision'
- ).order_by('push_timestamp', 'push_id', 'job_id')
+ item["data"] = data.values(
+ "value", "job_id", "id", "push_id", "push_timestamp", "push__revision"
+ ).order_by("push_timestamp", "push_id", "job_id")
- item['option_name'] = option_collection_map[item['option_collection_id']]
- item['repository_name'] = repository_name
+ item["option_name"] = option_collection_map[item["option_collection_id"]]
+ item["repository_name"] = repository_name
else:
grouped_values = defaultdict(list)
grouped_job_ids = defaultdict(list)
if replicates:
for signature_id, value, job_id, replicate_value in data.values_list(
- 'signature_id', 'value', 'job_id', 'performancedatumreplicate__value'
+ "signature_id", "value", "job_id", "performancedatumreplicate__value"
):
if replicate_value is not None:
grouped_values[signature_id].append(replicate_value)
@@ -799,7 +799,7 @@ def list(self, request):
grouped_job_ids[signature_id].append(job_id)
else:
for signature_id, value, job_id in data.values_list(
- 'signature_id', 'value', 'job_id'
+ "signature_id", "value", "job_id"
):
if value is not None:
grouped_values[signature_id].append(value)
@@ -807,10 +807,10 @@ def list(self, request):
# name field is created in the serializer
for item in self.queryset:
- item['values'] = grouped_values.get(item['id'], [])
- item['job_ids'] = grouped_job_ids.get(item['id'], [])
- item['option_name'] = option_collection_map[item['option_collection_id']]
- item['repository_name'] = repository_name
+ item["values"] = grouped_values.get(item["id"], [])
+ item["job_ids"] = grouped_job_ids.get(item["id"], [])
+ item["option_name"] = option_collection_map[item["option_collection_id"]]
+ item["repository_name"] = repository_name
serializer = self.get_serializer(self.queryset, many=True)
serialized_data = serializer.data
@@ -828,16 +828,16 @@ def _filter_out_retriggers(serialized_data: List[dict]) -> List[dict]:
for perf_summary in serialized_data:
retriggered_jobs, seen_push_id = set(), None
- for idx, datum in enumerate(perf_summary['data']):
- if seen_push_id == datum['push_id']:
+ for idx, datum in enumerate(perf_summary["data"]):
+ if seen_push_id == datum["push_id"]:
retriggered_jobs.add(idx)
else:
- seen_push_id = datum['push_id']
+ seen_push_id = datum["push_id"]
if retriggered_jobs:
- perf_summary['data'] = [
+ perf_summary["data"] = [
datum
- for idx, datum in enumerate(perf_summary['data'])
+ for idx, datum in enumerate(perf_summary["data"])
if idx not in retriggered_jobs
]
@@ -853,14 +853,14 @@ def list(self, request):
if not query_params.is_valid():
return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST)
- alert_summary_id = query_params.validated_data['id']
+ alert_summary_id = query_params.validated_data["id"]
signature_ids = PerformanceAlertSummary.objects.filter(id=alert_summary_id).values_list(
- 'alerts__series_signature__id', 'related_alerts__series_signature__id'
+ "alerts__series_signature__id", "related_alerts__series_signature__id"
)
signature_ids = [id for id_set in signature_ids for id in id_set]
tasks = (
PerformanceDatum.objects.filter(signature__in=signature_ids)
- .values_list('job__job_type__name', flat=True)
+ .values_list("job__job_type__name", flat=True)
.order_by("job__job_type__name")
.distinct()
)
@@ -879,13 +879,13 @@ def list(self, request):
if not query_params.is_valid():
return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST)
- base_rev = query_params.validated_data['base_revision']
- new_rev = query_params.validated_data['new_revision']
- base_repo_name = query_params.validated_data['base_repository']
- new_repo_name = query_params.validated_data['new_repository']
- interval = query_params.validated_data['interval']
- framework = query_params.validated_data['framework']
- no_subtests = query_params.validated_data['no_subtests']
+ base_rev = query_params.validated_data["base_revision"]
+ new_rev = query_params.validated_data["new_revision"]
+ base_repo_name = query_params.validated_data["base_repository"]
+ new_repo_name = query_params.validated_data["new_repository"]
+ interval = query_params.validated_data["interval"]
+ framework = query_params.validated_data["framework"]
+ no_subtests = query_params.validated_data["no_subtests"]
try:
new_push = models.Push.objects.get(revision=new_rev, repository__name=new_repo_name)
@@ -949,10 +949,10 @@ def list(self, request):
for platform in platforms:
sig_identifier = perfcompare_utils.get_sig_identifier(header, platform)
base_sig = base_signatures_map.get(sig_identifier, {})
- base_sig_id = base_sig.get('id', '')
+ base_sig_id = base_sig.get("id", "")
new_sig = new_signatures_map.get(sig_identifier, {})
- new_sig_id = new_sig.get('id', '')
- lower_is_better = base_sig.get('lower_is_better', '')
+ new_sig_id = new_sig.get("id", "")
+ lower_is_better = base_sig.get("lower_is_better", "")
is_empty = not (base_sig and new_sig)
if is_empty:
continue
@@ -977,9 +977,9 @@ def list(self, request):
)
confidence_text = perfcompare_utils.get_confidence_text(confidence)
sig_hash = (
- base_sig.get('signature_hash', '')
+ base_sig.get("signature_hash", "")
if base_sig
- else new_sig.get('signature_hash', '')
+ else new_sig.get("signature_hash", "")
)
delta_value = perfcompare_utils.get_delta_value(new_avg_value, base_avg_value)
delta_percentage = perfcompare_utils.get_delta_percentage(
@@ -997,53 +997,53 @@ def list(self, request):
new_is_better, base_avg_value, new_avg_value, confidence
)
- is_improvement = class_name == 'success'
- is_regression = class_name == 'danger'
- is_meaningful = class_name == ''
+ is_improvement = class_name == "success"
+ is_regression = class_name == "danger"
+ is_meaningful = class_name == ""
row_result = {
- 'base_rev': base_rev,
- 'new_rev': new_rev,
- 'header_name': header,
- 'platform': platform,
- 'base_app': base_sig.get('application', ''),
- 'new_app': new_sig.get('application', ''),
- 'suite': base_sig.get('suite', ''), # same suite for base_result and new_result
- 'test': base_sig.get('test', ''), # same test for base_result and new_result
- 'is_complete': is_complete,
- 'framework_id': framework,
- 'is_empty': is_empty,
- 'option_name': option_collection_map.get(
- base_sig.get('option_collection_id', ''), ''
+ "base_rev": base_rev,
+ "new_rev": new_rev,
+ "header_name": header,
+ "platform": platform,
+ "base_app": base_sig.get("application", ""),
+ "new_app": new_sig.get("application", ""),
+ "suite": base_sig.get("suite", ""), # same suite for base_result and new_result
+ "test": base_sig.get("test", ""), # same test for base_result and new_result
+ "is_complete": is_complete,
+ "framework_id": framework,
+ "is_empty": is_empty,
+ "option_name": option_collection_map.get(
+ base_sig.get("option_collection_id", ""), ""
),
- 'extra_options': base_sig.get('extra_options', ''),
- 'base_repository_name': base_repo_name,
- 'new_repository_name': new_repo_name,
- 'base_measurement_unit': base_sig.get('measurement_unit', ''),
- 'new_measurement_unit': new_sig.get('measurement_unit', ''),
- 'base_runs': sorted(base_perf_data_values),
- 'new_runs': sorted(new_perf_data_values),
- 'base_avg_value': base_avg_value,
- 'new_avg_value': new_avg_value,
- 'base_median_value': base_median_value,
- 'new_median_value': new_median_value,
- 'base_stddev': base_stddev,
- 'new_stddev': new_stddev,
- 'base_stddev_pct': base_stddev_pct,
- 'new_stddev_pct': new_stddev_pct,
- 'base_retriggerable_job_ids': base_grouped_job_ids.get(base_sig_id, []),
- 'new_retriggerable_job_ids': new_grouped_job_ids.get(new_sig_id, []),
- 'confidence': confidence,
- 'confidence_text': confidence_text,
- 'delta_value': delta_value,
- 'delta_percentage': delta_percentage,
- 'magnitude': magnitude,
- 'new_is_better': new_is_better,
- 'lower_is_better': lower_is_better,
- 'is_confident': is_confident,
- 'more_runs_are_needed': more_runs_are_needed,
+ "extra_options": base_sig.get("extra_options", ""),
+ "base_repository_name": base_repo_name,
+ "new_repository_name": new_repo_name,
+ "base_measurement_unit": base_sig.get("measurement_unit", ""),
+ "new_measurement_unit": new_sig.get("measurement_unit", ""),
+ "base_runs": sorted(base_perf_data_values),
+ "new_runs": sorted(new_perf_data_values),
+ "base_avg_value": base_avg_value,
+ "new_avg_value": new_avg_value,
+ "base_median_value": base_median_value,
+ "new_median_value": new_median_value,
+ "base_stddev": base_stddev,
+ "new_stddev": new_stddev,
+ "base_stddev_pct": base_stddev_pct,
+ "new_stddev_pct": new_stddev_pct,
+ "base_retriggerable_job_ids": base_grouped_job_ids.get(base_sig_id, []),
+ "new_retriggerable_job_ids": new_grouped_job_ids.get(new_sig_id, []),
+ "confidence": confidence,
+ "confidence_text": confidence_text,
+ "delta_value": delta_value,
+ "delta_percentage": delta_percentage,
+ "magnitude": magnitude,
+ "new_is_better": new_is_better,
+ "lower_is_better": lower_is_better,
+ "is_confident": is_confident,
+ "more_runs_are_needed": more_runs_are_needed,
# highlighted revisions is the base_revision and the other highlighted revisions is new_revision
- 'graphs_link': self._create_graph_links(
+ "graphs_link": self._create_graph_links(
base_repo_name,
new_repo_name,
base_rev,
@@ -1052,9 +1052,9 @@ def list(self, request):
push_timestamp,
str(sig_hash),
),
- 'is_improvement': is_improvement,
- 'is_regression': is_regression,
- 'is_meaningful': is_meaningful,
+ "is_improvement": is_improvement,
+ "is_regression": is_regression,
+ "is_meaningful": is_meaningful,
}
self.queryset.append(row_result)
@@ -1082,8 +1082,8 @@ def _get_push_timestamp(base_push, new_push):
for ts in timestamps:
ph_value = date_now - to_timestamp(str(ts))
for ph_range in timeranges:
- if ph_value < ph_range['value']:
- values.append(ph_range['value'])
+ if ph_value < ph_range["value"]:
+ values.append(ph_range["value"])
break
return max(values)
@@ -1122,33 +1122,33 @@ def _create_graph_links(
time_range,
signature,
):
- highlighted_revision_key = 'highlightedRevisions'
- time_range_key = 'timerange'
- series_key = 'series'
+ highlighted_revision_key = "highlightedRevisions"
+ time_range_key = "timerange"
+ series_key = "series"
highlighted_revisions_params = []
if base_revision:
highlighted_revisions_params.append((highlighted_revision_key, base_revision[:12]))
highlighted_revisions_params.append((highlighted_revision_key, new_revision[:12]))
- graph_link = 'graphs?%s' % urlencode(highlighted_revisions_params)
+ graph_link = "graphs?%s" % urlencode(highlighted_revisions_params)
if new_repo_name == base_repo_name:
# if repo for base and new are not the same then make diff
# series data one for each repo, else generate one
- repo_value = ','.join([new_repo_name, signature, '1', framework])
- graph_link = graph_link + '&%s' % urlencode({series_key: repo_value})
+ repo_value = ",".join([new_repo_name, signature, "1", framework])
+ graph_link = graph_link + "&%s" % urlencode({series_key: repo_value})
else:
# if repos selected are not the same
- base_repo_value = ','.join([base_repo_name, signature, '1', framework])
- new_repo_value = ','.join([new_repo_name, signature, '1', framework])
+ base_repo_value = ",".join([base_repo_name, signature, "1", framework])
+ new_repo_value = ",".join([new_repo_name, signature, "1", framework])
encoded = urlencode([(series_key, base_repo_value), (series_key, new_repo_value)])
- graph_link = graph_link + '&%s' % encoded
+ graph_link = graph_link + "&%s" % encoded
- graph_link = graph_link + '&%s' % urlencode({time_range_key: time_range})
+ graph_link = graph_link + "&%s" % urlencode({time_range_key: time_range})
- return 'https://treeherder.mozilla.org/perfherder/%s' % graph_link
+ return "https://treeherder.mozilla.org/perfherder/%s" % graph_link
@staticmethod
def _get_interval(base_push, new_push):
@@ -1161,15 +1161,15 @@ def _get_interval(base_push, new_push):
ph_ranges = perfcompare_utils.PERFHERDER_TIMERANGES
for ph_range in ph_ranges:
- if ph_range['value'] >= time_range:
- new_time_range = ph_range['value']
+ if ph_range["value"] >= time_range:
+ new_time_range = ph_range["value"]
break
return new_time_range
@staticmethod
def _get_perf_data_by_repo_and_signatures(repository_name, signatures):
- signature_ids = [signature['id'] for signature in list(signatures)]
- return PerformanceDatum.objects.select_related('push', 'repository', 'id').filter(
+ signature_ids = [signature["id"] for signature in list(signatures)]
+ return PerformanceDatum.objects.select_related("push", "repository", "id").filter(
signature_id__in=signature_ids,
repository__name=repository_name,
)
@@ -1183,31 +1183,31 @@ def _get_filtered_signatures_by_interval(signatures, interval):
@staticmethod
def _get_signatures_values(signatures: List[PerformanceSignature]):
return signatures.values(
- 'framework_id',
- 'id',
- 'extra_options',
- 'suite',
- 'platform__platform',
- 'test',
- 'option_collection_id',
- 'repository_id',
- 'measurement_unit',
- 'lower_is_better',
- 'signature_hash',
- 'application',
+ "framework_id",
+ "id",
+ "extra_options",
+ "suite",
+ "platform__platform",
+ "test",
+ "option_collection_id",
+ "repository_id",
+ "measurement_unit",
+ "lower_is_better",
+ "signature_hash",
+ "application",
)
@staticmethod
def _get_filtered_signatures_by_repo(repository_name):
return PerformanceSignature.objects.select_related(
- 'framework', 'repository', 'platform', 'push', 'job'
+ "framework", "repository", "platform", "push", "job"
).filter(repository__name=repository_name)
@staticmethod
def _get_grouped_perf_data(perf_data):
grouped_values = defaultdict(list)
grouped_job_ids = defaultdict(list)
- for signature_id, value, job_id in perf_data.values_list('signature_id', 'value', 'job_id'):
+ for signature_id, value, job_id in perf_data.values_list("signature_id", "value", "job_id"):
if value is not None:
grouped_values[signature_id].append(value)
grouped_job_ids[signature_id].append(job_id)
@@ -1223,18 +1223,18 @@ def _get_signatures_map(self, signatures, grouped_values, option_collection_map)
platforms = []
signatures_map = {}
for signature in signatures:
- suite = signature['suite']
- test = signature['test']
- extra_options = signature['extra_options']
- option_name = option_collection_map[signature['option_collection_id']]
+ suite = signature["suite"]
+ test = signature["test"]
+ extra_options = signature["extra_options"]
+ option_name = option_collection_map[signature["option_collection_id"]]
test_suite = perfcompare_utils.get_test_suite(suite, test)
- platform = signature['platform__platform']
+ platform = signature["platform__platform"]
header = perfcompare_utils.get_header_name(extra_options, option_name, test_suite)
sig_identifier = perfcompare_utils.get_sig_identifier(header, platform)
if sig_identifier not in signatures_map or (
sig_identifier in signatures_map
- and len(grouped_values.get(signature['id'], [])) != 0
+ and len(grouped_values.get(signature["id"], [])) != 0
):
signatures_map[sig_identifier] = signature
header_names.append(header)
@@ -1249,14 +1249,14 @@ def list(self, request):
if not query_params.is_valid():
return Response(data=query_params.errors, status=HTTP_400_BAD_REQUEST)
- framework_id = query_params.validated_data['framework']
+ framework_id = query_params.validated_data["framework"]
query_set = (
- PerformanceSignature.objects.prefetch_related('performancealert')
+ PerformanceSignature.objects.prefetch_related("performancealert")
.filter(framework_id=framework_id, parent_signature_id=None)
- .values('suite', 'test')
- .annotate(repositories=GroupConcat('repository_id', distinct=True))
- .annotate(platforms=GroupConcat('platform_id', distinct=True))
- .annotate(total_alerts=Count('performancealert'))
+ .values("suite", "test")
+ .annotate(repositories=GroupConcat("repository_id", distinct=True))
+ .annotate(platforms=GroupConcat("platform_id", distinct=True))
+ .annotate(total_alerts=Count("performancealert"))
.annotate(
total_regressions=Count(
Case(When(performancealert__is_regression=1, then=Value(1)))
@@ -1267,7 +1267,7 @@ def list(self, request):
Case(When(performancealert__status=PerformanceAlert.UNTRIAGED, then=Value(1)))
)
)
- .order_by('suite', 'test')
+ .order_by("suite", "test")
)
serializer = TestSuiteHealthSerializer(query_set, many=True)
diff --git a/treeherder/webapp/api/performance_serializers.py b/treeherder/webapp/api/performance_serializers.py
index 3727db33374..cbf422c9ed3 100644
--- a/treeherder/webapp/api/performance_serializers.py
+++ b/treeherder/webapp/api/performance_serializers.py
@@ -30,8 +30,8 @@ def get_tc_metadata(alert, push):
if datum:
metadata = TaskclusterMetadata.objects.get(job=datum.job)
return {
- 'task_id': metadata.task_id,
- 'retry_id': metadata.retry_id,
+ "task_id": metadata.task_id,
+ "retry_id": metadata.retry_id,
}
else:
return {}
@@ -39,24 +39,24 @@ def get_tc_metadata(alert, push):
class OptionalBooleanField(serializers.BooleanField):
def __init__(self, *args, **kwargs):
- kwargs['default'] = False
+ kwargs["default"] = False
super().__init__(*args, **kwargs)
class PerformanceDecimalField(serializers.DecimalField):
def __init__(self, *args, **kwargs):
- kwargs['max_digits'] = 20
- kwargs['decimal_places'] = 2
- kwargs['coerce_to_string'] = False
- kwargs['allow_null'] = True
+ kwargs["max_digits"] = 20
+ kwargs["decimal_places"] = 2
+ kwargs["coerce_to_string"] = False
+ kwargs["allow_null"] = True
super().__init__(*args, **kwargs)
class PerfCompareDecimalField(serializers.DecimalField):
def __init__(self, *args, **kwargs):
- kwargs['max_digits'] = None
- kwargs['decimal_places'] = 2
- kwargs['coerce_to_string'] = False
+ kwargs["max_digits"] = None
+ kwargs["decimal_places"] = 2
+ kwargs["coerce_to_string"] = False
super().__init__(*args, **kwargs)
@@ -69,7 +69,7 @@ class WordsField(serializers.CharField):
def to_representation(self, obj):
# if string's value is blank, just return nothing
if isinstance(obj, str):
- return obj.split(' ')
+ return obj.split(" ")
return []
@@ -84,20 +84,20 @@ class BackfillRecordSerializer(serializers.Serializer):
class Meta:
model = BackfillRecord
fields = (
- 'alert',
- 'context',
- 'status',
- 'total_actions_triggered',
- 'total_backfills_failed',
- 'total_backfills_successful',
- 'total_backfills_in_progress',
+ "alert",
+ "context",
+ "status",
+ "total_actions_triggered",
+ "total_backfills_failed",
+ "total_backfills_successful",
+ "total_backfills_in_progress",
)
class PerformanceFrameworkSerializer(serializers.ModelSerializer):
class Meta:
model = PerformanceFramework
- fields = ['id', 'name']
+ fields = ["id", "name"]
class PerformanceSignatureSerializer(serializers.ModelSerializer):
@@ -116,20 +116,20 @@ class PerformanceSignatureSerializer(serializers.ModelSerializer):
class Meta:
model = PerformanceSignature
fields = [
- 'id',
- 'framework_id',
- 'signature_hash',
- 'machine_platform',
- 'suite',
- 'test',
- 'lower_is_better',
- 'has_subtests',
- 'option_collection_hash',
- 'tags',
- 'extra_options',
- 'measurement_unit',
- 'suite_public_name',
- 'test_public_name',
+ "id",
+ "framework_id",
+ "signature_hash",
+ "machine_platform",
+ "suite",
+ "test",
+ "lower_is_better",
+ "has_subtests",
+ "option_collection_hash",
+ "tags",
+ "extra_options",
+ "measurement_unit",
+ "suite_public_name",
+ "test_public_name",
]
@@ -175,10 +175,10 @@ class PerformanceAlertSerializer(serializers.ModelSerializer):
def update(self, instance, validated_data):
# ensure the related summary, if set, has the same repository and
# framework as the original summary
- related_summary = validated_data.get('related_summary')
+ related_summary = validated_data.get("related_summary")
if related_summary:
if (
- validated_data.get('status', instance.status) != PerformanceAlert.DOWNSTREAM
+ validated_data.get("status", instance.status) != PerformanceAlert.DOWNSTREAM
and instance.summary.repository_id != related_summary.repository_id
):
raise exceptions.ValidationError(
@@ -195,7 +195,7 @@ def update(self, instance, validated_data):
)
)
- status = validated_data.get('status')
+ status = validated_data.get("status")
if status and status in PerformanceAlert.RELATIONAL_STATUS_IDS:
# we've caught a downstream/reassignment: timestamp it
related_summary.timestamp_first_triage().save()
@@ -228,32 +228,32 @@ def get_prev_profile_url(self, alert):
return "N/A"
def get_classifier_email(self, performance_alert):
- return getattr(performance_alert.classifier, 'email', None)
+ return getattr(performance_alert.classifier, "email", None)
class Meta:
model = PerformanceAlert
fields = [
- 'id',
- 'status',
- 'series_signature',
- 'taskcluster_metadata',
- 'prev_taskcluster_metadata',
- 'profile_url',
- 'prev_profile_url',
- 'is_regression',
- 'prev_value',
- 'new_value',
- 't_value',
- 'amount_abs',
- 'amount_pct',
- 'summary_id',
- 'related_summary_id',
- 'manually_created',
- 'classifier',
- 'starred',
- 'classifier_email',
- 'backfill_record',
- 'noise_profile',
+ "id",
+ "status",
+ "series_signature",
+ "taskcluster_metadata",
+ "prev_taskcluster_metadata",
+ "profile_url",
+ "prev_profile_url",
+ "is_regression",
+ "prev_value",
+ "new_value",
+ "t_value",
+ "amount_abs",
+ "amount_pct",
+ "summary_id",
+ "related_summary_id",
+ "manually_created",
+ "classifier",
+ "starred",
+ "classifier_email",
+ "backfill_record",
+ "noise_profile",
]
@@ -262,21 +262,21 @@ class PerformanceTagSerializer(serializers.ModelSerializer):
class Meta:
model = PerformanceTag
- fields = ['id', 'name']
+ fields = ["id", "name"]
class PerformanceAlertSummarySerializer(serializers.ModelSerializer):
alerts = PerformanceAlertSerializer(many=True, read_only=True)
related_alerts = PerformanceAlertSerializer(many=True, read_only=True)
performance_tags = serializers.SlugRelatedField(
- many=True, required=False, slug_field='name', queryset=PerformanceTag.objects.all()
+ many=True, required=False, slug_field="name", queryset=PerformanceTag.objects.all()
)
- repository = serializers.SlugRelatedField(read_only=True, slug_field='name')
- framework = serializers.SlugRelatedField(read_only=True, slug_field='id')
- revision = serializers.SlugRelatedField(read_only=True, slug_field='revision', source='push')
- push_timestamp = TimestampField(source='push', read_only=True)
+ repository = serializers.SlugRelatedField(read_only=True, slug_field="name")
+ framework = serializers.SlugRelatedField(read_only=True, slug_field="id")
+ revision = serializers.SlugRelatedField(read_only=True, slug_field="revision", source="push")
+ push_timestamp = TimestampField(source="push", read_only=True)
prev_push_revision = serializers.SlugRelatedField(
- read_only=True, slug_field='revision', source='prev_push'
+ read_only=True, slug_field="revision", source="prev_push"
)
assignee_username = serializers.SlugRelatedField(
slug_field="username",
@@ -301,59 +301,59 @@ def update(self, instance, validated_data):
return super().update(instance, validated_data)
def get_assignee_email(self, performance_alert_summary):
- return getattr(performance_alert_summary.assignee, 'email', None)
+ return getattr(performance_alert_summary.assignee, "email", None)
class Meta:
model = PerformanceAlertSummary
fields = [
- 'id',
- 'push_id',
- 'prev_push_id',
- 'created',
- 'first_triaged',
- 'triage_due_date',
- 'repository',
- 'framework',
- 'alerts',
- 'related_alerts',
- 'status',
- 'bug_number',
- 'bug_due_date',
- 'bug_updated',
- 'issue_tracker',
- 'notes',
- 'revision',
- 'push_timestamp',
- 'prev_push_revision',
- 'assignee_username',
- 'assignee_email',
- 'performance_tags',
+ "id",
+ "push_id",
+ "prev_push_id",
+ "created",
+ "first_triaged",
+ "triage_due_date",
+ "repository",
+ "framework",
+ "alerts",
+ "related_alerts",
+ "status",
+ "bug_number",
+ "bug_due_date",
+ "bug_updated",
+ "issue_tracker",
+ "notes",
+ "revision",
+ "push_timestamp",
+ "prev_push_revision",
+ "assignee_username",
+ "assignee_email",
+ "performance_tags",
]
class PerformanceBugTemplateSerializer(serializers.ModelSerializer):
- framework = serializers.SlugRelatedField(read_only=True, slug_field='id')
+ framework = serializers.SlugRelatedField(read_only=True, slug_field="id")
class Meta:
model = PerformanceBugTemplate
fields = [
- 'framework',
- 'keywords',
- 'status_whiteboard',
- 'default_component',
- 'default_product',
- 'cc_list',
- 'text',
+ "framework",
+ "keywords",
+ "status_whiteboard",
+ "default_component",
+ "default_product",
+ "cc_list",
+ "text",
]
class IssueTrackerSerializer(serializers.ModelSerializer):
- text = serializers.CharField(read_only=True, source='name')
- issueTrackerUrl = serializers.URLField(read_only=True, source='task_base_url')
+ text = serializers.CharField(read_only=True, source="name")
+ issueTrackerUrl = serializers.URLField(read_only=True, source="task_base_url")
class Meta:
model = IssueTracker
- fields = ['id', 'text', 'issueTrackerUrl']
+ fields = ["id", "text", "issueTrackerUrl"]
class PerformanceQueryParamsSerializer(serializers.Serializer):
@@ -372,12 +372,12 @@ class PerformanceQueryParamsSerializer(serializers.Serializer):
def validate(self, data):
if (
- data['revision'] is None
- and data['interval'] is None
- and (data['startday'] is None or data['endday'] is None)
+ data["revision"] is None
+ and data["interval"] is None
+ and (data["startday"] is None or data["endday"] is None)
):
raise serializers.ValidationError(
- 'Required: revision, startday and endday or interval.'
+ "Required: revision, startday and endday or interval."
)
return data
@@ -387,17 +387,17 @@ def validate_repository(self, repository):
Repository.objects.get(name=repository)
except ObjectDoesNotExist:
- raise serializers.ValidationError('{} does not exist.'.format(repository))
+ raise serializers.ValidationError("{} does not exist.".format(repository))
return repository
class PerformanceDatumSerializer(serializers.ModelSerializer):
- revision = serializers.CharField(source='push__revision')
+ revision = serializers.CharField(source="push__revision")
class Meta:
model = PerformanceDatum
- fields = ['job_id', 'id', 'value', 'push_timestamp', 'push_id', 'revision']
+ fields = ["job_id", "id", "value", "push_timestamp", "push_id", "revision"]
class PerformanceSummarySerializer(serializers.ModelSerializer):
@@ -422,31 +422,31 @@ class PerformanceSummarySerializer(serializers.ModelSerializer):
class Meta:
model = PerformanceSignature
fields = [
- 'signature_id',
- 'framework_id',
- 'signature_hash',
- 'platform',
- 'test',
- 'suite',
- 'lower_is_better',
- 'has_subtests',
- 'tags',
- 'values',
- 'name',
- 'parent_signature',
- 'job_ids',
- 'repository_name',
- 'repository_id',
- 'data',
- 'measurement_unit',
- 'application',
+ "signature_id",
+ "framework_id",
+ "signature_hash",
+ "platform",
+ "test",
+ "suite",
+ "lower_is_better",
+ "has_subtests",
+ "tags",
+ "values",
+ "name",
+ "parent_signature",
+ "job_ids",
+ "repository_name",
+ "repository_id",
+ "data",
+ "measurement_unit",
+ "application",
]
def get_name(self, value):
- test = value['test']
- suite = value['suite']
- test_suite = suite if test == '' or test == suite else '{} {}'.format(suite, test)
- return '{} {} {}'.format(test_suite, value['option_name'], value['extra_options'])
+ test = value["test"]
+ suite = value["suite"]
+ test_suite = suite if test == "" or test == suite else "{} {}".format(suite, test)
+ return "{} {} {}".format(test_suite, value["option_name"], value["extra_options"])
class PerfAlertSummaryTasksQueryParamSerializer(serializers.Serializer):
@@ -454,10 +454,10 @@ class PerfAlertSummaryTasksQueryParamSerializer(serializers.Serializer):
def validate(self, data):
try:
- PerformanceAlertSummary.objects.get(id=data['id'])
+ PerformanceAlertSummary.objects.get(id=data["id"])
except PerformanceAlertSummary.DoesNotExist:
raise serializers.ValidationError(
- {'message': 'PerformanceAlertSummary does not exist.'}
+ {"message": "PerformanceAlertSummary does not exist."}
)
return data
@@ -478,15 +478,15 @@ class PerfCompareResultsQueryParamsSerializer(serializers.Serializer):
no_subtests = serializers.BooleanField(required=False)
def validate(self, data):
- if data['base_revision'] is None and data['interval'] is None:
- raise serializers.ValidationError('Field required: interval.')
+ if data["base_revision"] is None and data["interval"] is None:
+ raise serializers.ValidationError("Field required: interval.")
try:
- Repository.objects.get(name=data['base_repository'])
- Repository.objects.get(name=data['new_repository'])
+ Repository.objects.get(name=data["base_repository"])
+ Repository.objects.get(name=data["new_repository"])
except ObjectDoesNotExist:
raise serializers.ValidationError(
- '{} or {} does not exist.'.format(data['base_repository'], data['new_repository'])
+ "{} or {} does not exist.".format(data["base_repository"], data["new_repository"])
)
return data
@@ -497,11 +497,11 @@ class PerfCompareResultsSerializer(serializers.ModelSerializer):
new_rev = serializers.CharField()
base_app = serializers.CharField(
max_length=10,
- default='',
+ default="",
)
new_app = serializers.CharField(
max_length=10,
- default='',
+ default="",
)
is_empty = serializers.BooleanField()
is_complete = serializers.BooleanField()
@@ -509,8 +509,8 @@ class PerfCompareResultsSerializer(serializers.ModelSerializer):
header_name = serializers.CharField()
base_repository_name = serializers.CharField()
new_repository_name = serializers.CharField()
- base_measurement_unit = serializers.CharField(default='')
- new_measurement_unit = serializers.CharField(default='')
+ base_measurement_unit = serializers.CharField(default="")
+ new_measurement_unit = serializers.CharField(default="")
base_retriggerable_job_ids = serializers.ListField(child=serializers.IntegerField(), default=[])
new_retriggerable_job_ids = serializers.ListField(child=serializers.IntegerField(), default=[])
option_name = serializers.CharField()
@@ -548,49 +548,49 @@ class PerfCompareResultsSerializer(serializers.ModelSerializer):
class Meta:
model = PerformanceSignature
fields = [
- 'base_rev',
- 'new_rev',
- 'base_app',
- 'new_app',
- 'framework_id',
- 'platform',
- 'suite',
- 'is_empty',
- 'header_name',
- 'base_repository_name',
- 'new_repository_name',
- 'is_complete',
- 'base_measurement_unit',
- 'new_measurement_unit',
- 'base_retriggerable_job_ids',
- 'new_retriggerable_job_ids',
- 'base_runs',
- 'new_runs',
- 'base_avg_value',
- 'new_avg_value',
- 'base_median_value',
- 'new_median_value',
- 'test',
- 'option_name',
- 'extra_options',
- 'base_stddev',
- 'new_stddev',
- 'base_stddev_pct',
- 'new_stddev_pct',
- 'confidence',
- 'confidence_text',
- 'graphs_link',
- 'delta_value',
- 'delta_percentage',
- 'magnitude',
- 'new_is_better',
- 'lower_is_better',
- 'is_confident',
- 'more_runs_are_needed',
- 'noise_metric',
- 'is_improvement',
- 'is_regression',
- 'is_meaningful',
+ "base_rev",
+ "new_rev",
+ "base_app",
+ "new_app",
+ "framework_id",
+ "platform",
+ "suite",
+ "is_empty",
+ "header_name",
+ "base_repository_name",
+ "new_repository_name",
+ "is_complete",
+ "base_measurement_unit",
+ "new_measurement_unit",
+ "base_retriggerable_job_ids",
+ "new_retriggerable_job_ids",
+ "base_runs",
+ "new_runs",
+ "base_avg_value",
+ "new_avg_value",
+ "base_median_value",
+ "new_median_value",
+ "test",
+ "option_name",
+ "extra_options",
+ "base_stddev",
+ "new_stddev",
+ "base_stddev_pct",
+ "new_stddev_pct",
+ "confidence",
+ "confidence_text",
+ "graphs_link",
+ "delta_value",
+ "delta_percentage",
+ "magnitude",
+ "new_is_better",
+ "lower_is_better",
+ "is_confident",
+ "more_runs_are_needed",
+ "noise_metric",
+ "is_improvement",
+ "is_regression",
+ "is_meaningful",
]
@@ -600,7 +600,7 @@ class TestSuiteHealthParamsSerializer(serializers.Serializer):
class CommaSeparatedField(serializers.Field):
def to_representation(self, value):
- return value.split(',')
+ return value.split(",")
class TestSuiteHealthSerializer(serializers.Serializer):
diff --git a/treeherder/webapp/api/push.py b/treeherder/webapp/api/push.py
index 9cf1b03777b..6b1dbb8d87e 100644
--- a/treeherder/webapp/api/push.py
+++ b/treeherder/webapp/api/push.py
@@ -57,9 +57,9 @@ def list(self, request, project):
del filter_params[param]
meta[param] = v
- all_repos = request.query_params.get('all_repos')
+ all_repos = request.query_params.get("all_repos")
- pushes = Push.objects.order_by('-time')
+ pushes = Push.objects.order_by("-time")
if not all_repos:
try:
@@ -72,52 +72,52 @@ def list(self, request, project):
pushes = pushes.filter(repository=repository)
for param, value in meta.items():
- if param == 'fromchange':
- revision_field = 'revision__startswith' if len(value) < 40 else 'revision'
- filter_kwargs = {revision_field: value, 'repository': repository}
- frompush_time = Push.objects.values_list('time', flat=True).get(**filter_kwargs)
+ if param == "fromchange":
+ revision_field = "revision__startswith" if len(value) < 40 else "revision"
+ filter_kwargs = {revision_field: value, "repository": repository}
+ frompush_time = Push.objects.values_list("time", flat=True).get(**filter_kwargs)
pushes = pushes.filter(time__gte=frompush_time)
filter_params.update({"push_timestamp__gte": to_timestamp(frompush_time)})
self.report_if_short_revision(param, value)
- elif param == 'tochange':
- revision_field = 'revision__startswith' if len(value) < 40 else 'revision'
- filter_kwargs = {revision_field: value, 'repository': repository}
- topush_time = Push.objects.values_list('time', flat=True).get(**filter_kwargs)
+ elif param == "tochange":
+ revision_field = "revision__startswith" if len(value) < 40 else "revision"
+ filter_kwargs = {revision_field: value, "repository": repository}
+ topush_time = Push.objects.values_list("time", flat=True).get(**filter_kwargs)
pushes = pushes.filter(time__lte=topush_time)
filter_params.update({"push_timestamp__lte": to_timestamp(topush_time)})
self.report_if_short_revision(param, value)
- elif param == 'startdate':
+ elif param == "startdate":
pushes = pushes.filter(time__gte=to_datetime(value))
filter_params.update({"push_timestamp__gte": to_timestamp(to_datetime(value))})
- elif param == 'enddate':
+ elif param == "enddate":
real_end_date = to_datetime(value) + datetime.timedelta(days=1)
pushes = pushes.filter(time__lte=real_end_date)
filter_params.update({"push_timestamp__lt": to_timestamp(real_end_date)})
- elif param == 'revision':
+ elif param == "revision":
# revision must be the tip revision of the push itself
- revision_field = 'revision__startswith' if len(value) < 40 else 'revision'
+ revision_field = "revision__startswith" if len(value) < 40 else "revision"
filter_kwargs = {revision_field: value}
pushes = pushes.filter(**filter_kwargs)
rev_key = (
"revisions_long_revision"
- if len(meta['revision']) == 40
+ if len(meta["revision"]) == 40
else "revisions_short_revision"
)
- filter_params.update({rev_key: meta['revision']})
+ filter_params.update({rev_key: meta["revision"]})
self.report_if_short_revision(param, value)
- elif param == 'commit_revision':
+ elif param == "commit_revision":
# revision can be either the revision of the push itself, or
# any of the commits it refers to
pushes = pushes.filter(commits__revision=value)
self.report_if_short_revision(param, value)
for param in [
- 'push_timestamp__lt',
- 'push_timestamp__lte',
- 'push_timestamp__gt',
- 'push_timestamp__gte',
+ "push_timestamp__lt",
+ "push_timestamp__lte",
+ "push_timestamp__gt",
+ "push_timestamp__gte",
]:
if filter_params.get(param):
# translate push timestamp directly into a filter
@@ -128,9 +128,9 @@ def list(self, request, project):
{"detail": "Invalid timestamp specified for {}".format(param)},
status=HTTP_400_BAD_REQUEST,
)
- pushes = pushes.filter(**{param.replace('push_timestamp', 'time'): value})
+ pushes = pushes.filter(**{param.replace("push_timestamp", "time"): value})
- for param in ['id__lt', 'id__lte', 'id__gt', 'id__gte', 'id']:
+ for param in ["id__lt", "id__lte", "id__gt", "id__gte", "id"]:
try:
value = int(filter_params.get(param, 0))
except ValueError:
@@ -144,7 +144,7 @@ def list(self, request, project):
id_in = filter_params.get("id__in")
if id_in:
try:
- id_in_list = [int(id) for id in id_in.split(',')]
+ id_in_list = [int(id) for id in id_in.split(",")]
except ValueError:
return Response(
{"detail": "Invalid id__in specification"}, status=HTTP_400_BAD_REQUEST
@@ -176,14 +176,14 @@ def list(self, request, project):
# false. however AFAIK no one ever used it (default was to fetch
# everything), so let's just leave it out. it doesn't break
# anything to send extra data when not required.
- pushes = pushes.select_related('repository').prefetch_related('commits')[:count]
+ pushes = pushes.select_related("repository").prefetch_related("commits")[:count]
serializer = PushSerializer(pushes, many=True)
- meta['count'] = len(pushes)
- meta['repository'] = 'all' if all_repos else project
- meta['filter_params'] = filter_params
+ meta["count"] = len(pushes)
+ meta["repository"] = "all" if all_repos else project
+ meta["filter_params"] = filter_params
- resp = {'meta': meta, 'results': serializer.data}
+ resp = {"meta": meta, "results": serializer.data}
return Response(resp)
@@ -215,17 +215,17 @@ def health_summary(self, request, project):
"""
Return a calculated summary of the health of this push.
"""
- revision = request.query_params.get('revision')
- author = request.query_params.get('author')
- count = request.query_params.get('count')
- all_repos = request.query_params.get('all_repos')
- with_history = request.query_params.get('with_history')
- with_in_progress_tests = request.query_params.get('with_in_progress_tests', False)
+ revision = request.query_params.get("revision")
+ author = request.query_params.get("author")
+ count = request.query_params.get("count")
+ all_repos = request.query_params.get("all_repos")
+ with_history = request.query_params.get("with_history")
+ with_in_progress_tests = request.query_params.get("with_in_progress_tests", False)
if revision:
try:
pushes = Push.objects.filter(
- revision__in=revision.split(','), repository__name=project
+ revision__in=revision.split(","), repository__name=project
)
except Push.DoesNotExist:
return Response(
@@ -235,9 +235,9 @@ def health_summary(self, request, project):
try:
pushes = (
Push.objects.filter(author=author)
- .select_related('repository')
- .prefetch_related('commits')
- .order_by('-time')
+ .select_related("repository")
+ .prefetch_related("commits")
+ .order_by("-time")
)
if not all_repos:
@@ -270,7 +270,7 @@ def health_summary(self, request, project):
push
)
- test_failure_count = len(push_health_test_failures['needInvestigation'])
+ test_failure_count = len(push_health_test_failures["needInvestigation"])
build_failure_count = len(push_health_build_failures)
lint_failure_count = len(push_health_lint_failures)
test_in_progress_count = 0
@@ -279,7 +279,7 @@ def health_summary(self, request, project):
total_failures = test_failure_count + build_failure_count + lint_failure_count
# Override the testfailed value added in push.get_status so that it aligns with how we detect lint, build and test failures
# for the push health API's (total_failures doesn't include known intermittent failures)
- status['testfailed'] = total_failures
+ status["testfailed"] = total_failures
if with_history:
serializer = PushSerializer([push], many=True)
@@ -289,31 +289,31 @@ def health_summary(self, request, project):
data.append(
{
- 'revision': push.revision,
- 'repository': push.repository.name,
- 'testFailureCount': test_failure_count,
- 'testInProgressCount': test_in_progress_count,
- 'buildFailureCount': build_failure_count,
- 'buildInProgressCount': builds_in_progress_count,
- 'lintFailureCount': lint_failure_count,
- 'lintingInProgressCount': linting_in_progress_count,
- 'needInvestigation': test_failure_count
+ "revision": push.revision,
+ "repository": push.repository.name,
+ "testFailureCount": test_failure_count,
+ "testInProgressCount": test_in_progress_count,
+ "buildFailureCount": build_failure_count,
+ "buildInProgressCount": builds_in_progress_count,
+ "lintFailureCount": lint_failure_count,
+ "lintingInProgressCount": linting_in_progress_count,
+ "needInvestigation": test_failure_count
+ build_failure_count
+ lint_failure_count,
- 'status': status,
- 'history': commit_history,
- 'metrics': {
- 'linting': {
- 'name': 'Linting',
- 'result': lint_result,
+ "status": status,
+ "history": commit_history,
+ "metrics": {
+ "linting": {
+ "name": "Linting",
+ "result": lint_result,
},
- 'tests': {
- 'name': 'Tests',
- 'result': test_result,
+ "tests": {
+ "name": "Tests",
+ "result": test_result,
},
- 'builds': {
- 'name': 'Builds',
- 'result': build_result,
+ "builds": {
+ "name": "Builds",
+ "result": build_result,
},
},
}
@@ -324,14 +324,14 @@ def health_summary(self, request, project):
@action(detail=False)
def health_usage(self, request, project):
usage = get_usage()
- return Response({'usage': usage})
+ return Response({"usage": usage})
@action(detail=False)
def health(self, request, project):
"""
Return a calculated assessment of the health of this push.
"""
- revision = request.query_params.get('revision')
+ revision = request.query_params.get("revision")
try:
repository = Repository.objects.get(name=project)
@@ -345,7 +345,7 @@ def health(self, request, project):
result_status, jobs = get_test_failure_jobs(push)
# Parent compare only supported for Hg at this time.
# Bug https://bugzilla.mozilla.org/show_bug.cgi?id=1612645
- if repository.dvcs_type == 'hg':
+ if repository.dvcs_type == "hg":
commit_history_details = get_commit_history(repository, revision, push)
test_result, push_health_test_failures = get_test_failures(
@@ -358,97 +358,97 @@ def health(self, request, project):
lint_result, lint_failures, _unused = get_lint_failures(push)
- push_result = 'pass'
+ push_result = "pass"
for metric_result in [test_result, lint_result, build_result]:
if (
- metric_result == 'indeterminate'
- or metric_result == 'unknown'
- and push_result != 'fail'
+ metric_result == "indeterminate"
+ or metric_result == "unknown"
+ and push_result != "fail"
):
push_result = metric_result
- elif metric_result == 'fail':
+ elif metric_result == "fail":
push_result = metric_result
status = push.get_status()
total_failures = (
- len(push_health_test_failures['needInvestigation'])
+ len(push_health_test_failures["needInvestigation"])
+ len(build_failures)
+ len(lint_failures)
)
# Override the testfailed value added in push.get_status so that it aligns with how we detect lint, build and test failures
# for the push health API's (total_failures doesn't include known intermittent failures)
- status['testfailed'] = total_failures
+ status["testfailed"] = total_failures
newrelic.agent.record_custom_event(
- 'push_health_need_investigation',
+ "push_health_need_investigation",
{
- 'revision': revision,
- 'repo': repository.name,
- 'needInvestigation': len(push_health_test_failures['needInvestigation']),
- 'author': push.author,
+ "revision": revision,
+ "repo": repository.name,
+ "needInvestigation": len(push_health_test_failures["needInvestigation"]),
+ "author": push.author,
},
)
return Response(
{
- 'revision': revision,
- 'id': push.id,
- 'result': push_result,
- 'jobs': jobs,
- 'metrics': {
- 'commitHistory': {
- 'name': 'Commit History',
- 'result': 'none',
- 'details': commit_history_details,
+ "revision": revision,
+ "id": push.id,
+ "result": push_result,
+ "jobs": jobs,
+ "metrics": {
+ "commitHistory": {
+ "name": "Commit History",
+ "result": "none",
+ "details": commit_history_details,
},
- 'linting': {
- 'name': 'Linting',
- 'result': lint_result,
- 'details': lint_failures,
+ "linting": {
+ "name": "Linting",
+ "result": lint_result,
+ "details": lint_failures,
},
- 'tests': {
- 'name': 'Tests',
- 'result': test_result,
- 'details': push_health_test_failures,
+ "tests": {
+ "name": "Tests",
+ "result": test_result,
+ "details": push_health_test_failures,
},
- 'builds': {
- 'name': 'Builds',
- 'result': build_result,
- 'details': build_failures,
+ "builds": {
+ "name": "Builds",
+ "result": build_result,
+ "details": build_failures,
},
},
- 'status': status,
+ "status": status,
}
)
@cache_memoize(60 * 60)
def get_decision_jobs(self, push_ids):
- job_types = JobType.objects.filter(name__endswith='Decision Task', symbol='D')
+ job_types = JobType.objects.filter(name__endswith="Decision Task", symbol="D")
return Job.objects.filter(
push_id__in=push_ids,
job_type__in=job_types,
- result='success',
- ).select_related('taskcluster_metadata')
+ result="success",
+ ).select_related("taskcluster_metadata")
@action(detail=False)
def decisiontask(self, request, project):
"""
Return the decision task ids for the pushes.
"""
- push_ids = self.request.query_params.get('push_ids', '').split(',')
+ push_ids = self.request.query_params.get("push_ids", "").split(",")
decision_jobs = self.get_decision_jobs(push_ids)
if decision_jobs:
return Response(
{
job.push_id: {
- 'id': job.taskcluster_metadata.task_id,
- 'run': job.guid.split('/')[1],
+ "id": job.taskcluster_metadata.task_id,
+ "run": job.guid.split("/")[1],
}
for job in decision_jobs
}
)
- logger.error('/decisiontask/ found no decision jobs for {}'.format(push_ids))
+ logger.error("/decisiontask/ found no decision jobs for {}".format(push_ids))
self.get_decision_jobs.invalidate(push_ids)
return Response(
"No decision tasks found for pushes: {}".format(push_ids), status=HTTP_404_NOT_FOUND
@@ -458,8 +458,8 @@ def decisiontask(self, request, project):
def report_if_short_revision(self, param, revision):
if len(revision) < 40:
newrelic.agent.record_custom_event(
- 'short_revision_push_api',
- {'error': 'Revision <40 chars', 'param': param, 'revision': revision},
+ "short_revision_push_api",
+ {"error": "Revision <40 chars", "param": param, "revision": revision},
)
@action(detail=False)
@@ -467,7 +467,7 @@ def group_results(self, request, project):
"""
Return the results of all the test groups for this push.
"""
- revision = request.query_params.get('revision')
+ revision = request.query_params.get("revision")
try:
repository = Repository.objects.get(name=project)
diff --git a/treeherder/webapp/api/refdata.py b/treeherder/webapp/api/refdata.py
index 804264c8a9d..ec9b0d3d009 100644
--- a/treeherder/webapp/api/refdata.py
+++ b/treeherder/webapp/api/refdata.py
@@ -14,8 +14,8 @@ class RepositoryViewSet(viewsets.ReadOnlyModelViewSet):
"""ViewSet for the refdata Repository model"""
- queryset = models.Repository.objects.filter(active_status='active').select_related(
- 'repository_group'
+ queryset = models.Repository.objects.filter(active_status="active").select_related(
+ "repository_group"
)
serializer_class = th_serializers.RepositorySerializer
@@ -31,8 +31,8 @@ def list(self, request):
for option_hash, option_names in option_collection_map.items():
ret.append(
{
- 'option_collection_hash': option_hash,
- 'options': [{'name': name} for name in option_names.split(' ')],
+ "option_collection_hash": option_hash,
+ "options": [{"name": name} for name in option_names.split(" ")],
}
)
return Response(ret)
@@ -53,7 +53,7 @@ class TaskclusterMetadataViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = th_serializers.TaskclusterMetadataSerializer
def get_queryset(self):
- job_ids = self.request.query_params.get('job_ids', '').split(',')
+ job_ids = self.request.query_params.get("job_ids", "").split(",")
return models.TaskclusterMetadata.objects.filter(job_id__in=job_ids)
diff --git a/treeherder/webapp/api/serializers.py b/treeherder/webapp/api/serializers.py
index a1882707c9a..47228f60702 100644
--- a/treeherder/webapp/api/serializers.py
+++ b/treeherder/webapp/api/serializers.py
@@ -32,7 +32,7 @@ class Meta:
class RepositoryGroupSerializer(serializers.ModelSerializer):
class Meta:
model = models.RepositoryGroup
- fields = ('name', 'description')
+ fields = ("name", "description")
class RepositorySerializer(serializers.ModelSerializer):
@@ -40,103 +40,103 @@ class RepositorySerializer(serializers.ModelSerializer):
class Meta:
model = models.Repository
- fields = '__all__'
+ fields = "__all__"
class TaskclusterMetadataSerializer(serializers.ModelSerializer):
class Meta:
model = models.TaskclusterMetadata
- fields = '__all__'
+ fields = "__all__"
class JobProjectSerializer(serializers.ModelSerializer):
def to_representation(self, job):
return {
- 'build_architecture': job.build_platform.architecture,
- 'build_os': job.build_platform.os_name,
- 'build_platform': job.build_platform.platform,
- 'build_platform_id': job.build_platform_id,
- 'build_system_type': job.signature.build_system_type,
- 'end_timestamp': to_timestamp(job.end_time),
- 'failure_classification_id': job.failure_classification_id,
- 'id': job.id,
- 'job_group_description': job.job_group.description,
- 'job_group_id': job.job_group_id,
- 'job_group_name': job.job_group.name,
- 'job_group_symbol': job.job_group.symbol,
- 'job_guid': job.guid,
- 'job_type_description': job.job_type.description,
- 'job_type_id': job.job_type_id,
- 'job_type_name': job.job_type.name,
- 'job_type_symbol': job.job_type.symbol,
- 'last_modified': job.last_modified,
- 'machine_name': job.machine.name,
- 'machine_platform_architecture': job.machine_platform.architecture,
- 'machine_platform_os': job.machine_platform.os_name,
- 'option_collection_hash': job.option_collection_hash,
- 'platform': job.machine_platform.platform,
- 'push_id': job.push_id,
- 'reason': job.reason,
- 'ref_data_name': job.signature.name,
- 'result': job.result,
- 'result_set_id': job.push_id,
- 'signature': job.signature.signature,
- 'start_timestamp': to_timestamp(job.start_time),
- 'state': job.state,
- 'submit_timestamp': to_timestamp(job.submit_time),
- 'tier': job.tier,
- 'who': job.who,
+ "build_architecture": job.build_platform.architecture,
+ "build_os": job.build_platform.os_name,
+ "build_platform": job.build_platform.platform,
+ "build_platform_id": job.build_platform_id,
+ "build_system_type": job.signature.build_system_type,
+ "end_timestamp": to_timestamp(job.end_time),
+ "failure_classification_id": job.failure_classification_id,
+ "id": job.id,
+ "job_group_description": job.job_group.description,
+ "job_group_id": job.job_group_id,
+ "job_group_name": job.job_group.name,
+ "job_group_symbol": job.job_group.symbol,
+ "job_guid": job.guid,
+ "job_type_description": job.job_type.description,
+ "job_type_id": job.job_type_id,
+ "job_type_name": job.job_type.name,
+ "job_type_symbol": job.job_type.symbol,
+ "last_modified": job.last_modified,
+ "machine_name": job.machine.name,
+ "machine_platform_architecture": job.machine_platform.architecture,
+ "machine_platform_os": job.machine_platform.os_name,
+ "option_collection_hash": job.option_collection_hash,
+ "platform": job.machine_platform.platform,
+ "push_id": job.push_id,
+ "reason": job.reason,
+ "ref_data_name": job.signature.name,
+ "result": job.result,
+ "result_set_id": job.push_id,
+ "signature": job.signature.signature,
+ "start_timestamp": to_timestamp(job.start_time),
+ "state": job.state,
+ "submit_timestamp": to_timestamp(job.submit_time),
+ "tier": job.tier,
+ "who": job.who,
}
class Meta:
model = models.Job
- fields = '__all__'
+ fields = "__all__"
class JobSerializer(serializers.ModelSerializer):
def to_representation(self, job):
- option_collection_map = self.context['option_collection_map']
- submit = job.pop('submit_time')
- start = job.pop('start_time')
- end = job.pop('end_time')
- option_collection_hash = job.pop('option_collection_hash')
+ option_collection_map = self.context["option_collection_map"]
+ submit = job.pop("submit_time")
+ start = job.pop("start_time")
+ end = job.pop("end_time")
+ option_collection_hash = job.pop("option_collection_hash")
ret_val = list(job.values())
ret_val.extend(
[
models.Job.get_duration(submit, start, end), # duration
- option_collection_map.get(option_collection_hash, ''), # platform option
+ option_collection_map.get(option_collection_hash, ""), # platform option
]
)
return ret_val
class Meta:
model = models.Job
- fields = '__all__'
+ fields = "__all__"
class FailureClassificationSerializer(serializers.ModelSerializer):
class Meta:
model = models.FailureClassification
- fields = '__all__'
+ fields = "__all__"
class BugscacheSerializer(serializers.ModelSerializer):
class Meta:
model = models.Bugscache
- fields = '__all__'
+ fields = "__all__"
class FilesBugzillaMapSerializer(serializers.ModelSerializer):
def to_representation(self, file_bugzilla_component):
return {
- 'product': file_bugzilla_component['bugzilla_component__product'],
- 'component': file_bugzilla_component['bugzilla_component__component'],
+ "product": file_bugzilla_component["bugzilla_component__product"],
+ "component": file_bugzilla_component["bugzilla_component__component"],
}
class Meta:
model = models.BugzillaComponent
- fields = '__all__'
+ fields = "__all__"
class ClassifiedFailureSerializer(serializers.ModelSerializer):
@@ -144,13 +144,13 @@ class ClassifiedFailureSerializer(serializers.ModelSerializer):
class Meta:
model = models.ClassifiedFailure
- exclude = ['created', 'modified', 'text_log_errors']
+ exclude = ["created", "modified", "text_log_errors"]
class TextLogErrorMatchSerializer(serializers.ModelSerializer):
class Meta:
model = models.TextLogErrorMatch
- exclude = ['text_log_error']
+ exclude = ["text_log_error"]
class FailureLineNoStackSerializer(serializers.ModelSerializer):
@@ -158,7 +158,7 @@ class FailureLineNoStackSerializer(serializers.ModelSerializer):
class Meta:
model = models.FailureLine
- exclude = ['stack', 'stackwalk_stdout', 'stackwalk_stderr']
+ exclude = ["stack", "stackwalk_stdout", "stackwalk_stderr"]
def to_representation(self, failure_line):
"""
@@ -177,15 +177,15 @@ def to_representation(self, failure_line):
cf_serializer = ClassifiedFailureSerializer(classified_failures, many=True)
response = super().to_representation(failure_line)
- response['matches'] = tle_serializer.data
- response['classified_failures'] = cf_serializer.data
+ response["matches"] = tle_serializer.data
+ response["classified_failures"] = cf_serializer.data
return response
class TextLogErrorSerializer(serializers.ModelSerializer):
class Meta:
model = models.TextLogError
- exclude = ['step']
+ exclude = ["step"]
class TextLogStepSerializer(serializers.ModelSerializer):
@@ -197,7 +197,7 @@ def get_result(self, obj):
class Meta:
model = models.TextLogStep
- exclude = ['job']
+ exclude = ["job"]
class BugJobMapSerializer(serializers.ModelSerializer):
@@ -205,7 +205,7 @@ class BugJobMapSerializer(serializers.ModelSerializer):
class Meta:
model = models.BugJobMap
- fields = ['job_id', 'bug_id', 'created', 'who']
+ fields = ["job_id", "bug_id", "created", "who"]
class JobNoteSerializer(serializers.ModelSerializer):
@@ -218,7 +218,7 @@ class JobNoteSerializer(serializers.ModelSerializer):
class Meta:
model = models.JobNote
- fields = ['id', 'job_id', 'failure_classification_id', 'created', 'who', 'text']
+ fields = ["id", "job_id", "failure_classification_id", "created", "who", "text"]
class JobNoteJobSerializer(serializers.ModelSerializer):
@@ -229,15 +229,15 @@ def to_representation(self, job):
duration = models.Job.get_duration(submit, start, end)
return {
- 'task_id': job.taskcluster_metadata.task_id,
- 'job_type_name': job.job_type.name,
- 'result': job.result,
- 'duration': duration,
+ "task_id": job.taskcluster_metadata.task_id,
+ "job_type_name": job.job_type.name,
+ "result": job.result,
+ "duration": duration,
}
class Meta:
model = models.Job
- fields = ['duration', 'label', 'result', 'task_id']
+ fields = ["duration", "label", "result", "task_id"]
class JobNoteDetailSerializer(serializers.ModelSerializer):
@@ -249,12 +249,12 @@ class JobNoteDetailSerializer(serializers.ModelSerializer):
class Meta:
model = models.JobNote
fields = [
- 'id',
- 'job',
- 'failure_classification_name',
- 'created',
- 'who',
- 'text',
+ "id",
+ "job",
+ "failure_classification_name",
+ "created",
+ "who",
+ "text",
]
@@ -266,12 +266,12 @@ class CommitSerializer(serializers.ModelSerializer):
class Meta:
model = models.Commit
- fields = ['result_set_id', 'repository_id', 'revision', 'author', 'comments']
+ fields = ["result_set_id", "repository_id", "revision", "author", "comments"]
class PushSerializer(serializers.ModelSerializer):
def get_revisions(self, push):
- serializer = CommitSerializer(instance=push.commits.all().order_by('-id')[:20], many=True)
+ serializer = CommitSerializer(instance=push.commits.all().order_by("-id")[:20], many=True)
return serializer.data
def get_revision_count(self, push):
@@ -288,13 +288,13 @@ def get_push_timestamp(self, push):
class Meta:
model = models.Push
fields = [
- 'id',
- 'revision',
- 'author',
- 'revisions',
- 'revision_count',
- 'push_timestamp',
- 'repository_id',
+ "id",
+ "revision",
+ "author",
+ "revisions",
+ "revision_count",
+ "push_timestamp",
+ "repository_id",
]
@@ -303,7 +303,7 @@ class FailuresSerializer(serializers.ModelSerializer):
class Meta:
model = models.BugJobMap
- fields = ('bug_id', 'bug_count')
+ fields = ("bug_id", "bug_count")
class JobTypeNameField(serializers.Field):
@@ -313,7 +313,7 @@ def to_representation(self, value):
parts = value.split("-")
try:
_ = int(parts[-1])
- return '-'.join(parts[:-1])
+ return "-".join(parts[:-1])
except ValueError:
return value
@@ -328,11 +328,11 @@ class GroupNameSerializer(serializers.ModelSerializer):
class Meta:
model = models.JobLog
fields = (
- 'group_name',
- 'job_type_name',
- 'group_status',
- 'failure_classification',
- 'job_count',
+ "group_name",
+ "job_type_name",
+ "group_status",
+ "failure_classification",
+ "job_count",
)
@@ -340,16 +340,16 @@ class TestSuiteField(serializers.Field):
"""Removes all characters from test_suite that's also found in platform"""
def to_representation(self, value):
- build_type = value['build_type']
- platform = value['job__machine_platform__platform']
- test_suite = value['job__signature__job_type_name']
- new_string = test_suite.replace('test-{}'.format(platform), '')
- new_test_suite = new_string.replace(build_type, '')
- return re.sub(r'^.(/|-)|(/|-)$', '', new_test_suite)
+ build_type = value["build_type"]
+ platform = value["job__machine_platform__platform"]
+ test_suite = value["job__signature__job_type_name"]
+ new_string = test_suite.replace("test-{}".format(platform), "")
+ new_test_suite = new_string.replace(build_type, "")
+ return re.sub(r"^.(/|-)|(/|-)$", "", new_test_suite)
class FailuresByBugSerializer(serializers.ModelSerializer):
- test_suite = TestSuiteField(source='*')
+ test_suite = TestSuiteField(source="*")
platform = serializers.CharField(source="job__machine_platform__platform")
revision = serializers.CharField(source="job__push__revision")
tree = serializers.CharField(source="job__repository__name")
@@ -361,16 +361,16 @@ class FailuresByBugSerializer(serializers.ModelSerializer):
class Meta:
model = models.BugJobMap
fields = (
- 'push_time',
- 'platform',
- 'revision',
- 'test_suite',
- 'tree',
- 'build_type',
- 'job_id',
- 'bug_id',
- 'machine_name',
- 'lines',
+ "push_time",
+ "platform",
+ "revision",
+ "test_suite",
+ "tree",
+ "build_type",
+ "job_id",
+ "bug_id",
+ "machine_name",
+ "lines",
)
@@ -381,28 +381,28 @@ class FailureCountSerializer(serializers.ModelSerializer):
class Meta:
model = models.Push
- fields = ('date', 'test_runs', 'failure_count')
+ fields = ("date", "test_runs", "failure_count")
class FailuresQueryParamsSerializer(serializers.Serializer):
- startday = serializers.DateTimeField(format='%Y-%m-%d', input_formats=['%Y-%m-%d'])
- endday = serializers.DateTimeField(format='%Y-%m-%d', input_formats=['%Y-%m-%d'])
+ startday = serializers.DateTimeField(format="%Y-%m-%d", input_formats=["%Y-%m-%d"])
+ endday = serializers.DateTimeField(format="%Y-%m-%d", input_formats=["%Y-%m-%d"])
tree = serializers.CharField()
bug = serializers.IntegerField(required=False, allow_null=True, default=None)
def validate_bug(self, bug):
- if bug is None and self.context == 'requireBug':
- raise serializers.ValidationError('This field is required.')
+ if bug is None and self.context == "requireBug":
+ raise serializers.ValidationError("This field is required.")
return bug
def validate_tree(self, tree):
- if tree != 'all' and tree not in REPO_GROUPS:
+ if tree != "all" and tree not in REPO_GROUPS:
try:
models.Repository.objects.get(name=tree)
except ObjectDoesNotExist:
- raise serializers.ValidationError('{} does not exist.'.format(tree))
+ raise serializers.ValidationError("{} does not exist.".format(tree))
return tree
@@ -410,7 +410,7 @@ def validate_tree(self, tree):
class MachinePlatformSerializer(serializers.ModelSerializer):
class Meta:
model = models.MachinePlatform
- fields = ('id', 'platform')
+ fields = ("id", "platform")
class ChangelogSerializer(serializers.ModelSerializer):
@@ -419,25 +419,25 @@ class ChangelogSerializer(serializers.ModelSerializer):
class Meta:
model = Changelog
fields = (
- 'id',
- 'remote_id',
- 'date',
- 'author',
- 'message',
- 'description',
- 'owner',
- 'project',
- 'project_url',
- 'type',
- 'url',
- 'files',
+ "id",
+ "remote_id",
+ "date",
+ "author",
+ "message",
+ "description",
+ "owner",
+ "project",
+ "project_url",
+ "type",
+ "url",
+ "files",
)
class InvestigatedTestsSerializers(serializers.ModelSerializer):
- jobName = serializers.CharField(source='job_type.name')
- jobSymbol = serializers.CharField(source='job_type.symbol')
+ jobName = serializers.CharField(source="job_type.name")
+ jobSymbol = serializers.CharField(source="job_type.symbol")
class Meta:
model = models.InvestigatedTests
- fields = ('id', 'test', 'jobName', 'jobSymbol')
+ fields = ("id", "test", "jobName", "jobSymbol")
diff --git a/treeherder/webapp/api/urls.py b/treeherder/webapp/api/urls.py
index c3d5e15cc47..f9e596c61b1 100644
--- a/treeherder/webapp/api/urls.py
+++ b/treeherder/webapp/api/urls.py
@@ -32,147 +32,147 @@
# DEPRECATED (in process): The UI is transitioning to the /jobs/ endpoint
# from the default_router.
project_bound_router.register(
- r'jobs',
+ r"jobs",
jobs.JobsProjectViewSet,
- basename='jobs',
+ basename="jobs",
)
project_bound_router.register(
- r'push',
+ r"push",
push.PushViewSet,
- basename='push',
+ basename="push",
)
project_bound_router.register(
- r'investigated-tests',
+ r"investigated-tests",
investigated_test.InvestigatedViewSet,
- basename='investigated-tests',
+ basename="investigated-tests",
)
project_bound_router.register(
- r'note',
+ r"note",
note.NoteViewSet,
- basename='note',
+ basename="note",
)
project_bound_router.register(
- r'classification',
+ r"classification",
classification.ClassificationViewSet,
- basename='classification',
+ basename="classification",
)
project_bound_router.register(
- r'bug-job-map',
+ r"bug-job-map",
bug.BugJobMapViewSet,
- basename='bug-job-map',
+ basename="bug-job-map",
)
project_bound_router.register(
- r'job-log-url',
+ r"job-log-url",
job_log_url.JobLogUrlViewSet,
- basename='job-log-url',
+ basename="job-log-url",
)
project_bound_router.register(
- r'performance/data', performance_data.PerformanceDatumViewSet, basename='performance-data'
+ r"performance/data", performance_data.PerformanceDatumViewSet, basename="performance-data"
)
project_bound_router.register(
- r'performance/signatures',
+ r"performance/signatures",
performance_data.PerformanceSignatureViewSet,
- basename='performance-signatures',
+ basename="performance-signatures",
)
project_bound_router.register(
- r'performance/platforms',
+ r"performance/platforms",
performance_data.PerformancePlatformViewSet,
- basename='performance-signatures-platforms',
+ basename="performance-signatures-platforms",
)
# refdata endpoints:
default_router = routers.DefaultRouter()
-default_router.register(r'jobs', jobs.JobsViewSet, basename='jobs')
-default_router.register(r'repository', refdata.RepositoryViewSet)
+default_router.register(r"jobs", jobs.JobsViewSet, basename="jobs")
+default_router.register(r"repository", refdata.RepositoryViewSet)
default_router.register(
- r'taskclustermetadata', refdata.TaskclusterMetadataViewSet, basename='taskclustermetadata'
+ r"taskclustermetadata", refdata.TaskclusterMetadataViewSet, basename="taskclustermetadata"
)
default_router.register(
- r'optioncollectionhash', refdata.OptionCollectionHashViewSet, basename='optioncollectionhash'
+ r"optioncollectionhash", refdata.OptionCollectionHashViewSet, basename="optioncollectionhash"
)
-default_router.register(r'failureclassification', refdata.FailureClassificationViewSet)
+default_router.register(r"failureclassification", refdata.FailureClassificationViewSet)
default_router.register(
- r'bugzilla-component',
+ r"bugzilla-component",
bug_creation.FilesBugzillaMapViewSet,
- basename='bugzilla-component',
+ basename="bugzilla-component",
)
-default_router.register(r'user', refdata.UserViewSet, basename='user')
+default_router.register(r"user", refdata.UserViewSet, basename="user")
default_router.register(
- r'machineplatforms', machine_platforms.MachinePlatformsViewSet, basename='machineplatforms'
+ r"machineplatforms", machine_platforms.MachinePlatformsViewSet, basename="machineplatforms"
)
default_router.register(
- r'performance/tag', performance_data.PerformanceTagViewSet, basename='performance-tags'
+ r"performance/tag", performance_data.PerformanceTagViewSet, basename="performance-tags"
)
default_router.register(
- r'performance/alertsummary',
+ r"performance/alertsummary",
performance_data.PerformanceAlertSummaryViewSet,
- basename='performance-alert-summaries',
+ basename="performance-alert-summaries",
)
default_router.register(
- r'performance/alert', performance_data.PerformanceAlertViewSet, basename='performance-alerts'
+ r"performance/alert", performance_data.PerformanceAlertViewSet, basename="performance-alerts"
)
default_router.register(
- r'performance/framework',
+ r"performance/framework",
performance_data.PerformanceFrameworkViewSet,
- basename='performance-frameworks',
+ basename="performance-frameworks",
)
default_router.register(
- r'performance/bug-template',
+ r"performance/bug-template",
performance_data.PerformanceBugTemplateViewSet,
- basename='performance-bug-template',
+ basename="performance-bug-template",
)
default_router.register(
- r'performance/issue-tracker',
+ r"performance/issue-tracker",
performance_data.PerformanceIssueTrackerViewSet,
- basename='performance-issue-tracker',
+ basename="performance-issue-tracker",
)
default_router.register(
- r'performance/validity-dashboard',
+ r"performance/validity-dashboard",
performance_data.TestSuiteHealthViewSet,
- basename='validity-dashboard',
+ basename="validity-dashboard",
)
-default_router.register(r'bugzilla', bugzilla.BugzillaViewSet, basename='bugzilla')
-default_router.register(r'auth', auth.AuthViewSet, basename='auth')
-default_router.register(r'changelog', changelog.ChangelogViewSet, basename='changelog')
+default_router.register(r"bugzilla", bugzilla.BugzillaViewSet, basename="bugzilla")
+default_router.register(r"auth", auth.AuthViewSet, basename="auth")
+default_router.register(r"changelog", changelog.ChangelogViewSet, basename="changelog")
urlpatterns = [
- re_path(r'^groupsummary/$', groups.SummaryByGroupName.as_view(), name='groupsummary'),
- re_path(r'^project/(?P[\w-]{0,50})/', include(project_bound_router.urls)),
- re_path(r'^', include(default_router.urls)),
- re_path(r'^failures/$', intermittents_view.Failures.as_view(), name='failures'),
+ re_path(r"^groupsummary/$", groups.SummaryByGroupName.as_view(), name="groupsummary"),
+ re_path(r"^project/(?P[\w-]{0,50})/", include(project_bound_router.urls)),
+ re_path(r"^", include(default_router.urls)),
+ re_path(r"^failures/$", intermittents_view.Failures.as_view(), name="failures"),
re_path(
- r'^failuresbybug/$',
+ r"^failuresbybug/$",
intermittents_view.FailuresByBug.as_view(),
- name='failures-by-bug',
+ name="failures-by-bug",
),
- re_path(r'^failurecount/$', intermittents_view.FailureCount.as_view(), name='failure-count'),
- re_path(r'^infracompare/$', infra_compare.InfraCompareView.as_view(), name='infra-compare'),
+ re_path(r"^failurecount/$", intermittents_view.FailureCount.as_view(), name="failure-count"),
+ re_path(r"^infracompare/$", infra_compare.InfraCompareView.as_view(), name="infra-compare"),
re_path(
- r'^performance/summary/$',
+ r"^performance/summary/$",
performance_data.PerformanceSummary.as_view(),
- name='performance-summary',
+ name="performance-summary",
),
re_path(
- r'^performance/alertsummary-tasks/$',
+ r"^performance/alertsummary-tasks/$",
performance_data.PerformanceAlertSummaryTasks.as_view(),
- name='performance-alertsummary-tasks',
+ name="performance-alertsummary-tasks",
),
re_path(
- r'^perfcompare/results/$',
+ r"^perfcompare/results/$",
performance_data.PerfCompareResults.as_view(),
- name='perfcompare-results',
+ name="perfcompare-results",
),
- re_path(r'^csp-report/$', csp_report.csp_report_collector, name='csp-report'),
- re_path(r'^schema/', get_schema_view(title='Treeherder Rest API'), name='openapi-schema'),
+ re_path(r"^csp-report/$", csp_report.csp_report_collector, name="csp-report"),
+ re_path(r"^schema/", get_schema_view(title="Treeherder Rest API"), name="openapi-schema"),
]
diff --git a/treeherder/webapp/api/utils.py b/treeherder/webapp/api/utils.py
index c71cef9c533..e0a615b9427 100644
--- a/treeherder/webapp/api/utils.py
+++ b/treeherder/webapp/api/utils.py
@@ -13,22 +13,22 @@
# firefox-releases: mozilla-beta, mozilla-release
# comm-releases: comm-beta, comm-release
REPO_GROUPS = {
- 'trunk': [1, 2, 77],
- 'firefox-releases': [6, 7],
- 'comm-releases': [38, 135],
+ "trunk": [1, 2, 77],
+ "firefox-releases": [6, 7],
+ "comm-releases": [38, 135],
}
FIVE_DAYS = 432000
class GroupConcat(Aggregate):
- function = 'GROUP_CONCAT'
- template = '%(function)s(%(distinct)s%(expressions)s)'
+ function = "GROUP_CONCAT"
+ template = "%(function)s(%(distinct)s%(expressions)s)"
allow_distinct = True
def __init__(self, expression, distinct=False, **extra):
super().__init__(
- expression, distinct='DISTINCT ' if distinct else '', output_field=CharField(), **extra
+ expression, distinct="DISTINCT " if distinct else "", output_field=CharField(), **extra
)
@@ -58,7 +58,7 @@ def get_end_of_day(date):
def get_artifact_list(root_url, task_id):
- artifacts_url = taskcluster_urls.api(root_url, 'queue', 'v1', f"task/{task_id}/artifacts")
+ artifacts_url = taskcluster_urls.api(root_url, "queue", "v1", f"task/{task_id}/artifacts")
artifacts = {"artifacts": []}
try:
artifacts = fetch_json(artifacts_url)
@@ -71,12 +71,12 @@ def get_artifact_list(root_url, task_id):
def get_profile_artifact_url(alert, task_metadata):
tc_root_url = cache.get("tc_root_url", "")
# Return a string to tell that task_id wasn't found
- if not task_metadata.get('task_id') or not tc_root_url:
+ if not task_metadata.get("task_id") or not tc_root_url:
return "task_id not found"
# If the url was already cached, don't calculate again, just return it
- if cache.get(task_metadata.get('task_id')):
- return cache.get(task_metadata.get('task_id'))
- artifacts_json = get_artifact_list(tc_root_url, task_metadata.get('task_id'))
+ if cache.get(task_metadata.get("task_id")):
+ return cache.get(task_metadata.get("task_id"))
+ artifacts_json = get_artifact_list(tc_root_url, task_metadata.get("task_id"))
profile_artifact = [
artifact
for artifact in artifacts_json
@@ -92,6 +92,6 @@ def get_profile_artifact_url(alert, task_metadata):
artifact_url = (
f"{task_url}/runs/{str(task_metadata['retry_id'])}/artifacts/{profile_artifact[0]['name']}"
)
- cache.set(task_metadata.get('task_id'), artifact_url, FIVE_DAYS)
+ cache.set(task_metadata.get("task_id"), artifact_url, FIVE_DAYS)
return artifact_url
diff --git a/treeherder/workers/stats.py b/treeherder/workers/stats.py
index 5742f108838..4217876de2f 100644
--- a/treeherder/workers/stats.py
+++ b/treeherder/workers/stats.py
@@ -19,13 +19,13 @@ def get_stats_client():
)
-@shared_task(name='publish-stats')
+@shared_task(name="publish-stats")
def publish_stats():
"""
Publish runtime stats on statsd
"""
stats_client = get_stats_client()
- logger.info('Publishing runtime statistics to statsd')
+ logger.info("Publishing runtime statistics to statsd")
end_date = timezone.now()
# Round the date to the current date range
# This should not overlapse as the beat is set as a relative cron based delay in minutes
@@ -36,41 +36,41 @@ def publish_stats():
)
start_date = end_date - timedelta(minutes=settings.CELERY_STATS_PUBLICATION_DELAY)
- logger.debug(f'Reading data ingested from {start_date} to {end_date}')
+ logger.debug(f"Reading data ingested from {start_date} to {end_date}")
# Nb of pushes
pushes_count = Push.objects.filter(time__lte=end_date, time__gt=start_date).count()
- logger.info(f'Ingested {pushes_count} pushes')
+ logger.info(f"Ingested {pushes_count} pushes")
if pushes_count:
- stats_client.incr('push', pushes_count)
+ stats_client.incr("push", pushes_count)
# Compute stats for jobs in a single request
jobs_stats = (
Job.objects.filter(end_time__lte=end_date, end_time__gt=start_date)
- .values('push__repository__name', 'state')
- .annotate(count=Count('id'))
- .values_list('push__repository__name', 'state', 'count')
+ .values("push__repository__name", "state")
+ .annotate(count=Count("id"))
+ .values_list("push__repository__name", "state", "count")
)
# nb of job total
jobs_total = sum(ct for _, _, ct in jobs_stats)
- logger.info(f'Ingested {jobs_total} jobs in total')
+ logger.info(f"Ingested {jobs_total} jobs in total")
if jobs_total:
- stats_client.incr('jobs', jobs_total)
+ stats_client.incr("jobs", jobs_total)
# nb of job per repo
jobs_per_repo = {
key: sum(ct for k, ct in vals)
for key, vals in groupby(sorted((repo, ct) for repo, _, ct in jobs_stats), lambda x: x[0])
}
- logger.debug(f'Jobs per repo: {jobs_per_repo}')
+ logger.debug(f"Jobs per repo: {jobs_per_repo}")
for key, value in jobs_per_repo.items():
- stats_client.incr(f'jobs_repo.{key}', value)
+ stats_client.incr(f"jobs_repo.{key}", value)
# nb of job per state
jobs_per_state = {
key: sum(ct for k, ct in vals)
for key, vals in groupby(sorted((state, ct) for _, state, ct in jobs_stats), lambda x: x[0])
}
- logger.debug(f'Jobs per state : {jobs_per_state}')
+ logger.debug(f"Jobs per state : {jobs_per_state}")
for key, value in jobs_per_state.items():
- stats_client.incr(f'jobs_state.{key}', value)
+ stats_client.incr(f"jobs_state.{key}", value)
From 99ad7718e6e1a4bfe25d58c744360222641c02d2 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Thu, 25 Jan 2024 11:33:57 +0100
Subject: [PATCH 005/128] Minimal changes introduced by Ruff formatter after
double quotes enforcement
---
.pre-commit-config.yaml | 6 +-----
pyproject.toml | 10 ++++------
.../test_report_backfill_outcome.py | 2 +-
.../perf/management/commands/reassign_perf_data.py | 4 +---
treeherder/perf/models.py | 6 ++----
treeherder/webapp/api/performance_data.py | 4 +---
6 files changed, 10 insertions(+), 22 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f45e6642c6a..1f5317503da 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -17,9 +17,5 @@ repos:
hooks:
- id: ruff
args: [--fix]
- - repo: https://github.com/psf/black
- rev: 23.3.0
- hooks:
- - id: black
- language_version: python3.9
+ - id: ruff-format
exclude: ^treeherder/.*/migrations
diff --git a/pyproject.toml b/pyproject.toml
index cfc336d8848..8efc5091277 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,11 +21,6 @@ mkdocs = { version = "==1.4.2", optional = true }
mkdocs-material = { version = "==8.5.11", optional = true }
mdx_truly_sane_lists = { version = "1.3", optional = true }
-[tool.black]
-line-length = 100
-target-version = ['py39']
-include = '\.pyi?$'
-
[tool.ruff]
# Same as Black.
line-length = 100
@@ -46,9 +41,12 @@ select = [
ignore = [
# E501: line too long
- "E501"
+ "E501",
]
+# Also lint/format pyi files
+extend-include = ["*.pyi"]
+
[tool.ruff.per-file-ignores]
# Ignore `module-import-not-at-top-of-file` rule of `pycodestyle`
"treeherder/model/models.py" = ["E402"]
diff --git a/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py b/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py
index 7416e1f7ac1..b8eeec56807 100644
--- a/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py
+++ b/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py
@@ -43,7 +43,7 @@ def test_email_is_still_sent_if_context_is_too_corrupt_to_be_actionable(
record_ready_for_processing,
sherlock_settings,
broken_context_str,
- tc_notify_mock
+ tc_notify_mock,
# Note: parametrizes the test
):
record_ready_for_processing.context = broken_context_str
diff --git a/treeherder/perf/management/commands/reassign_perf_data.py b/treeherder/perf/management/commands/reassign_perf_data.py
index 8b824fb313b..f6056b01545 100644
--- a/treeherder/perf/management/commands/reassign_perf_data.py
+++ b/treeherder/perf/management/commands/reassign_perf_data.py
@@ -41,9 +41,7 @@ def add_arguments(self, parser):
metavar="USE CASE",
help="""Rename "old" Raptor tp6 subtests, by pointing perf alerts & datum to new signatures.
Cannot be used in conjunction with --from/--to arguments.
- Available use cases: {}""".format(
- ",".join(USE_CASES)
- ),
+ Available use cases: {}""".format(",".join(USE_CASES)),
)
parser.add_argument(
"--keep-leftovers",
diff --git a/treeherder/perf/models.py b/treeherder/perf/models.py
index 2751cfabcc4..16a2176ce10 100644
--- a/treeherder/perf/models.py
+++ b/treeherder/perf/models.py
@@ -535,8 +535,7 @@ def save(self, *args, **kwargs):
# or absence of a related summary
if self.related_summary and self.status not in self.RELATIONAL_STATUS_IDS:
raise ValidationError(
- "Related summary set but status not in "
- "'{}'!".format(
+ "Related summary set but status not in " "'{}'!".format(
", ".join(
[
STATUS[1]
@@ -548,8 +547,7 @@ def save(self, *args, **kwargs):
)
if not self.related_summary and self.status not in self.UNRELATIONAL_STATUS_IDS:
raise ValidationError(
- "Related summary not set but status not in "
- "'{}'!".format(
+ "Related summary not set but status not in " "'{}'!".format(
", ".join(
[
STATUS[1]
diff --git a/treeherder/webapp/api/performance_data.py b/treeherder/webapp/api/performance_data.py
index e26d72791f8..58a646bd2ed 100644
--- a/treeherder/webapp/api/performance_data.py
+++ b/treeherder/webapp/api/performance_data.py
@@ -751,9 +751,7 @@ def list(self, request):
"push_timestamp",
"push__revision",
"performancedatumreplicate__value",
- ).order_by(
- "push_timestamp", "push_id", "job_id"
- ):
+ ).order_by("push_timestamp", "push_id", "job_id"):
if replicate_value is not None:
item["data"].append(
{
From 7cd8ee581795c213af0585bbc739a2087c531b71 Mon Sep 17 00:00:00 2001
From: Yoann Schneider <114239491+yschneider-sinneria@users.noreply.github.com>
Date: Thu, 1 Feb 2024 15:01:27 +0100
Subject: [PATCH 006/128] Bug 1823654 - Ignore Python Double quotes commit in
git blame (#7902)
* Ignore Double quotes commit in git blame
* Update .git-blame-ignore-revs
---------
Co-authored-by: Sebastian Hengst
---
.git-blame-ignore-revs | 2 ++
1 file changed, 2 insertions(+)
create mode 100644 .git-blame-ignore-revs
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
new file mode 100644
index 00000000000..b35d99602c5
--- /dev/null
+++ b/.git-blame-ignore-revs
@@ -0,0 +1,2 @@
+# Switch to double quotes everywhere in Python
+cfb19a5ef8eb49c4b74d2356eeefaa242ccc51f0
From e95d7033be04a3d8358734ecbaa28924b7c95a98 Mon Sep 17 00:00:00 2001
From: Yoann Schneider <114239491+yschneider-sinneria@users.noreply.github.com>
Date: Fri, 2 Feb 2024 21:02:10 +0100
Subject: [PATCH 007/128] Bug 1823654 - Introduce pyupgrade (#7904)
* Ruff Auto fix
* Ruff unsafe fixes auto fix
* Use builtin list instead of typing.List
---------
Co-authored-by: Sebastian Hengst
---
misc/compare_pushes.py | 2 +-
pyproject.toml | 2 +
tests/autoclassify/utils.py | 4 +-
tests/conftest.py | 10 ++--
tests/e2e/test_job_ingestion.py | 2 +-
tests/etl/test_perf_data_load.py | 3 +-
tests/etl/test_pushlog.py | 2 +-
tests/etl/test_text.py | 1 -
.../intermittents_commenter/test_commenter.py | 2 +-
.../test_log_view_artifact_builder.py | 4 +-
tests/log_parser/test_performance_parser.py | 2 +-
.../test_backfill_report_maintainer.py | 3 +-
.../test_common_behaviour.py | 8 +--
.../test_criteria_tracker.py | 2 +-
.../test_engineer_traction.py | 3 +-
tests/push_health/test_usage.py | 2 +-
tests/sampledata.py | 56 ++++++-------------
tests/services/test_taskcluster.py | 8 +--
tests/test_dockerflow.py | 2 +-
tests/test_utils.py | 2 +-
tests/webapp/api/test_bug_job_map_api.py | 4 +-
tests/webapp/api/test_bugzilla.py | 2 -
tests/webapp/api/test_jobs_api.py | 16 ++----
.../webapp/api/test_performance_alerts_api.py | 2 +-
.../api/test_performance_alertsummary_api.py | 2 +-
.../api/test_performance_bug_template_api.py | 14 ++---
tests/webapp/api/test_performance_data_api.py | 28 ++++------
tests/webapp/api/test_version.py | 2 +-
treeherder/changelog/models.py | 2 +-
treeherder/client/setup.py | 3 +-
treeherder/client/thclient/client.py | 8 +--
treeherder/etl/management/commands/ingest.py | 18 +++---
.../management/commands/publish_to_pulse.py | 4 +-
.../etl/management/commands/pulse_listener.py | 2 +-
.../commands/pulse_listener_tasks.py | 2 +-
.../pulse_listener_tasks_classification.py | 2 +-
treeherder/etl/perf.py | 8 +--
treeherder/etl/push_loader.py | 4 +-
treeherder/etl/pushlog.py | 4 +-
treeherder/etl/taskcluster_pulse/handler.py | 8 +--
treeherder/etl/text.py | 3 +-
.../intermittents_commenter/commenter.py | 10 ++--
treeherder/log_parser/failureline.py | 2 +-
.../management/commands/test_parse_log.py | 2 +-
treeherder/log_parser/parsers.py | 32 +++++------
treeherder/log_parser/utils.py | 2 +-
treeherder/model/data_cycling/cyclers.py | 17 +++---
.../model/data_cycling/removal_strategies.py | 5 +-
.../model/data_cycling/signature_remover.py | 5 +-
treeherder/model/error_summary.py | 2 +-
.../commands/cache_failure_history.py | 2 +-
...hed_0022_modify_bugscache_and_bugjobmap.py | 1 -
.../0002_add_bugjobmap_model_manager.py | 1 -
.../0003_add_matcher_name_fields.py | 1 -
.../0004_populate_matcher_name_fields.py | 1 -
..._use_matcher_name_for_unique_constraint.py | 1 -
.../model/migrations/0006_drop_matcher_fks.py | 1 -
...n_classified_failures_and_failure_match.py | 1 -
.../migrations/0008_remove_failure_match.py | 1 -
.../0009_add_manager_to_push_and_job.py | 1 -
.../migrations/0010_remove_runnable_job.py | 1 -
.../migrations/0011_remove_matcher_table.py | 1 -
.../model/migrations/0012_branch_maxlen.py | 1 -
.../0013_add_index_to_push_revision.py | 1 -
.../0015_add_repository_tc_root_url.py | 1 -
treeherder/model/models.py | 43 +++++++-------
.../auto_perf_sheriffing/backfill_reports.py | 38 ++++++-------
.../perf/auto_perf_sheriffing/secretary.py | 3 +-
.../perf/auto_perf_sheriffing/sherlock.py | 21 ++++---
treeherder/perf/email.py | 18 +++---
.../commands/compute_criteria_formulas.py | 3 +-
.../management/commands/import_perf_data.py | 26 ++++-----
.../perf/management/commands/perf_sheriff.py | 3 +-
.../0001_squashed_0005_permit_github_links.py | 1 -
.../0006_add_alert_summary_notes.py | 1 -
.../migrations/0007_star_performancealert.py | 1 -
.../migrations/0008_add_confirming_state.py | 1 -
.../0009_non_nullable_issue_tracker.py | 1 -
.../0010_fix_signature_uniqueness.py | 1 -
.../0011_inc_extra_options_length.py | 1 -
.../0012_rename_summary_last_updated.py | 1 -
treeherder/perf/models.py | 38 ++++++-------
.../sheriffing_criteria/bugzilla_formulas.py | 25 ++++-----
.../sheriffing_criteria/criteria_tracking.py | 16 +++---
treeherder/perfalert/perfalert/__init__.py | 2 +-
treeherder/push_health/tests.py | 8 +--
treeherder/push_health/usage.py | 2 +-
treeherder/push_health/utils.py | 2 +-
treeherder/services/pulse/consumers.py | 10 ++--
treeherder/services/taskcluster.py | 5 +-
treeherder/utils/github.py | 16 +++---
treeherder/utils/http.py | 2 +-
treeherder/utils/taskcluster.py | 6 +-
treeherder/webapp/api/bugzilla.py | 2 -
treeherder/webapp/api/infra_serializers.py | 2 +-
treeherder/webapp/api/investigated_test.py | 20 ++-----
treeherder/webapp/api/jobs.py | 18 +++---
treeherder/webapp/api/note.py | 6 +-
treeherder/webapp/api/perfcompare_utils.py | 6 +-
treeherder/webapp/api/performance_data.py | 9 ++-
.../webapp/api/performance_serializers.py | 4 +-
treeherder/webapp/api/push.py | 32 ++++-------
treeherder/webapp/api/serializers.py | 4 +-
103 files changed, 320 insertions(+), 429 deletions(-)
diff --git a/misc/compare_pushes.py b/misc/compare_pushes.py
index 47853ee9543..c23844d660d 100755
--- a/misc/compare_pushes.py
+++ b/misc/compare_pushes.py
@@ -27,7 +27,7 @@ def main(args):
# Support comma separated projects
projects = args.projects.split(",")
for _project in projects:
- logger.info("Comparing {} against production.".format(_project))
+ logger.info(f"Comparing {_project} against production.")
# Remove properties that are irrelevant for the comparison
pushes = compare_to_client.get_pushes(_project, count=50)
for _push in sorted(pushes, key=lambda push: push["revision"]):
diff --git a/pyproject.toml b/pyproject.toml
index 8efc5091277..0feb7161416 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -37,6 +37,8 @@ select = [
"W",
# pyflakes
"F",
+ # pyupgrade
+ "UP",
]
ignore = [
diff --git a/tests/autoclassify/utils.py b/tests/autoclassify/utils.py
index 90c4d669ffd..3fc5241cef7 100644
--- a/tests/autoclassify/utils.py
+++ b/tests/autoclassify/utils.py
@@ -42,10 +42,10 @@ def create_failure_lines(job, failure_line_list, start_line=0):
job_log = JobLog.objects.create(
job=job,
name="{}{}".format(base_data.get("test"), job.id),
- url="bar{}".format(i),
+ url=f"bar{i}",
status=1,
)
- print("create jobLog for job id: {}".format(job.id))
+ print(f"create jobLog for job id: {job.id}")
failure_line.job_log = job_log
failure_line.save()
failure_lines.append(failure_line)
diff --git a/tests/conftest.py b/tests/conftest.py
index 7e7b4527df7..0b485d6e9a9 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -427,7 +427,7 @@ def eleven_job_blobs(sample_data, sample_push, test_repository, mock_log_parser)
del blob["sources"]
blob["revision"] = sample_push[push_index]["revision"]
- blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcXL{}".format(task_id_index)
+ blob["taskcluster_task_id"] = f"V3SVuxO8TFy37En_6HcXL{task_id_index}"
blob["taskcluster_retry_id"] = "0"
blobs.append(blob)
@@ -463,7 +463,7 @@ def eleven_job_blobs_new_date(sample_data, sample_push, test_repository, mock_lo
del blob["sources"]
blob["revision"] = sample_push[push_index]["revision"]
- blob["taskcluster_task_id"] = "V3SVuxO8TFy37En_6HcX{:0>2}".format(task_id_index)
+ blob["taskcluster_task_id"] = f"V3SVuxO8TFy37En_6HcX{task_id_index:0>2}"
blob["taskcluster_retry_id"] = "0"
blob["job"]["revision"] = sample_push[push_index]["revision"]
blob["job"]["submit_timestamp"] = sample_push[push_index]["push_timestamp"]
@@ -843,7 +843,7 @@ def _fetch_data(self, project):
% project
)
files_bugzilla_data = None
- file_name = "files_bugzilla_map_%s_%s.json" % (project, self.run_id)
+ file_name = f"files_bugzilla_map_{project}_{self.run_id}.json"
exception = None
try:
tests_folder = os.path.dirname(__file__)
@@ -1117,7 +1117,7 @@ def bug_data(eleven_jobs_stored, test_repository, test_push, bugs):
bug_id = bugs[0].id
job_id = jobs[0].id
th_models.BugJobMap.create(job_id=job_id, bug_id=bug_id)
- query_string = "?startday=2012-05-09&endday=2018-05-10&tree={}".format(test_repository.name)
+ query_string = f"?startday=2012-05-09&endday=2018-05-10&tree={test_repository.name}"
return {
"tree": test_repository.name,
@@ -1270,7 +1270,7 @@ def __init__(self, *prior_dirs):
def __call__(self, fixture_filename):
fixture_path = join(*self._prior_dirs, fixture_filename)
- with open(fixture_path, "r") as f:
+ with open(fixture_path) as f:
return json.load(f)
diff --git a/tests/e2e/test_job_ingestion.py b/tests/e2e/test_job_ingestion.py
index 7dff6573c11..3c8264231a7 100644
--- a/tests/e2e/test_job_ingestion.py
+++ b/tests/e2e/test_job_ingestion.py
@@ -1,4 +1,4 @@
-from mock import MagicMock
+from unittest.mock import MagicMock
from tests.test_utils import add_log_response
from treeherder.etl.jobs import store_job_data
diff --git a/tests/etl/test_perf_data_load.py b/tests/etl/test_perf_data_load.py
index 518af66550b..3aa459f725f 100644
--- a/tests/etl/test_perf_data_load.py
+++ b/tests/etl/test_perf_data_load.py
@@ -5,7 +5,6 @@
import time
import pytest
-from typing import List
from django.core.management import call_command
from django.db import IntegrityError
@@ -87,7 +86,7 @@ def sample_perf_artifact() -> dict:
@pytest.fixture
-def sibling_perf_artifacts(sample_perf_artifact: dict) -> List[dict]:
+def sibling_perf_artifacts(sample_perf_artifact: dict) -> list[dict]:
"""intended to belong to the same job"""
artifacts = [copy.deepcopy(sample_perf_artifact) for _ in range(3)]
diff --git a/tests/etl/test_pushlog.py b/tests/etl/test_pushlog.py
index 8da2f658d8f..2cee7d84945 100644
--- a/tests/etl/test_pushlog.py
+++ b/tests/etl/test_pushlog.py
@@ -104,7 +104,7 @@ def test_ingest_hg_pushlog_cache_last_push(test_repository, test_base_dir, activ
pushes = pushlog_dict["pushes"]
max_push_id = max(int(k) for k in pushes.keys())
- cache_key = "{}:last_push_id".format(test_repository.name)
+ cache_key = f"{test_repository.name}:last_push_id"
assert cache.get(cache_key) == max_push_id
diff --git a/tests/etl/test_text.py b/tests/etl/test_text.py
index 9046ae12c6d..62950c3df1b 100644
--- a/tests/etl/test_text.py
+++ b/tests/etl/test_text.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
from treeherder.etl.text import astral_filter, filter_re
diff --git a/tests/intermittents_commenter/test_commenter.py b/tests/intermittents_commenter/test_commenter.py
index c32a0e2d60c..965521bf6b2 100644
--- a/tests/intermittents_commenter/test_commenter.py
+++ b/tests/intermittents_commenter/test_commenter.py
@@ -37,7 +37,7 @@ def test_intermittents_commenter(bug_data):
comment_params = process.generate_bug_changes(startday, endday, alt_startday, alt_endday)
- with open("tests/intermittents_commenter/expected_comment.text", "r") as comment:
+ with open("tests/intermittents_commenter/expected_comment.text") as comment:
expected_comment = comment.read()
print(len(expected_comment))
print(len(comment_params[0]["changes"]["comment"]["body"]))
diff --git a/tests/log_parser/test_log_view_artifact_builder.py b/tests/log_parser/test_log_view_artifact_builder.py
index 74b7160ce52..d1e345490fd 100644
--- a/tests/log_parser/test_log_view_artifact_builder.py
+++ b/tests/log_parser/test_log_view_artifact_builder.py
@@ -18,7 +18,7 @@ def do_test(log):
result file with the same prefix.
"""
- url = add_log_response("{}.txt.gz".format(log))
+ url = add_log_response(f"{log}.txt.gz")
builder = LogViewerArtifactBuilder(url)
lpc = ArtifactBuilderCollection(url, builders=builder)
@@ -31,7 +31,7 @@ def do_test(log):
# with open(SampleData().get_log_path("{0}.logview.json".format(log)), "w") as f:
# f.write(json.dumps(act, indent=2))
- exp = test_utils.load_exp("{0}.logview.json".format(log))
+ exp = test_utils.load_exp(f"{log}.logview.json")
assert act == exp
diff --git a/tests/log_parser/test_performance_parser.py b/tests/log_parser/test_performance_parser.py
index 34944a36789..1c36e142a96 100644
--- a/tests/log_parser/test_performance_parser.py
+++ b/tests/log_parser/test_performance_parser.py
@@ -27,6 +27,6 @@ def test_performance_log_parsing_malformed_perfherder_data():
}
],
}
- parser.parse_line("PERFHERDER_DATA: {}".format(json.dumps(valid_perfherder_data)), 3)
+ parser.parse_line(f"PERFHERDER_DATA: {json.dumps(valid_perfherder_data)}", 3)
assert parser.get_artifact() == [valid_perfherder_data]
diff --git a/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_backfill_report_maintainer.py b/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_backfill_report_maintainer.py
index 9729c58f394..682e23d07a4 100644
--- a/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_backfill_report_maintainer.py
+++ b/tests/perf/auto_perf_sheriffing/test_backfill_reports/test_backfill_report_maintainer.py
@@ -1,6 +1,5 @@
import random
import datetime
-from typing import Tuple
from treeherder.perf.auto_perf_sheriffing.backfill_reports import (
BackfillReportMaintainer,
@@ -141,7 +140,7 @@ def test_reports_are_updated_after_alert_summaries_change(
assert initial_records_timestamps != records_timestamps
-def __fetch_report_timestamps(test_perf_alert_summary) -> Tuple:
+def __fetch_report_timestamps(test_perf_alert_summary) -> tuple:
report = BackfillReport.objects.get(summary=test_perf_alert_summary)
report_timestamps = report.created, report.last_updated
records_timestamps = [record.created for record in report.records.all()]
diff --git a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
index 994466fd9e4..d1c04998054 100644
--- a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
+++ b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
@@ -2,7 +2,7 @@
import pytest
from django.conf import settings
-from typing import List, Type, Callable
+from typing import Callable
from tests.perf.auto_sheriffing_criteria.conftest import CASSETTES_RECORDING_DATE
from treeherder.config.settings import BZ_DATETIME_FORMAT
@@ -18,15 +18,15 @@
pytestmark = [pytest.mark.freeze_time(CASSETTES_RECORDING_DATE, tick=True)]
-def bugzilla_formula_instances() -> List[BugzillaFormula]:
+def bugzilla_formula_instances() -> list[BugzillaFormula]:
return [EngineerTractionFormula(), FixRatioFormula()]
-def formula_instances() -> List[Callable]:
+def formula_instances() -> list[Callable]:
return bugzilla_formula_instances() + [TotalAlertsFormula()]
-def concrete_formula_classes() -> List[Type[BugzillaFormula]]:
+def concrete_formula_classes() -> list[type[BugzillaFormula]]:
return [EngineerTractionFormula, FixRatioFormula]
diff --git a/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py b/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
index 33d972fab38..6b49ff0d4ff 100644
--- a/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
+++ b/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
@@ -151,7 +151,7 @@ def should_take_more_than(seconds: float):
@pytest.fixture
def updatable_criteria_csv(tmp_path):
updatable_csv = tmp_path / "updatable-criteria.csv"
- with open(RECORD_TEST_PATH, "r") as file_:
+ with open(RECORD_TEST_PATH) as file_:
updatable_csv.write_text(file_.read())
return updatable_csv
diff --git a/tests/perf/auto_sheriffing_criteria/test_engineer_traction.py b/tests/perf/auto_sheriffing_criteria/test_engineer_traction.py
index 530d693e4d9..dde39207d04 100644
--- a/tests/perf/auto_sheriffing_criteria/test_engineer_traction.py
+++ b/tests/perf/auto_sheriffing_criteria/test_engineer_traction.py
@@ -1,7 +1,6 @@
import pytest
from datetime import datetime, timedelta
-from typing import List
from tests.perf.auto_sheriffing_criteria.conftest import CASSETTES_RECORDING_DATE
from treeherder.config.settings import BZ_DATETIME_FORMAT
@@ -44,7 +43,7 @@ def quantified_bugs(betamax_recorder) -> list:
@pytest.fixture
-def cooled_down_bugs(nonblock_session, quantified_bugs) -> List[dict]:
+def cooled_down_bugs(nonblock_session, quantified_bugs) -> list[dict]:
bugs = []
for bug in quantified_bugs:
created_at = datetime.strptime(bug["creation_time"], BZ_DATETIME_FORMAT)
diff --git a/tests/push_health/test_usage.py b/tests/push_health/test_usage.py
index 04fd6bbaf04..09da19fbf2b 100644
--- a/tests/push_health/test_usage.py
+++ b/tests/push_health/test_usage.py
@@ -34,7 +34,7 @@ def test_get_usage(push_usage, test_repository):
nrql = "SELECT%20max(needInvestigation)%20FROM%20push_health_need_investigation%20FACET%20revision%20SINCE%201%20DAY%20AGO%20TIMESERIES%20where%20repo%3D'{}'%20AND%20appName%3D'{}'".format(
"try", "treeherder-prod"
)
- new_relic_url = "{}?nrql={}".format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
+ new_relic_url = f"{settings.NEW_RELIC_INSIGHTS_API_URL}?nrql={nrql}"
responses.add(
responses.GET,
diff --git a/tests/sampledata.py b/tests/sampledata.py
index 2296b67c331..99dda0f6d8d 100644
--- a/tests/sampledata.py
+++ b/tests/sampledata.py
@@ -5,89 +5,67 @@
class SampleData:
@classmethod
def get_perf_data(cls, filename):
- with open(
- "{0}/sample_data/artifacts/performance/{1}".format(os.path.dirname(__file__), filename)
- ) as f:
+ with open(f"{os.path.dirname(__file__)}/sample_data/artifacts/performance/{filename}") as f:
return json.load(f)
def __init__(self):
- self.job_data_file = "{0}/sample_data/job_data.txt".format(os.path.dirname(__file__))
+ self.job_data_file = f"{os.path.dirname(__file__)}/sample_data/job_data.txt"
- self.push_data_file = "{0}/sample_data/push_data.json".format(os.path.dirname(__file__))
+ self.push_data_file = f"{os.path.dirname(__file__)}/sample_data/push_data.json"
- self.logs_dir = "{0}/sample_data/logs".format(os.path.dirname(__file__))
+ self.logs_dir = f"{os.path.dirname(__file__)}/sample_data/logs"
- with open(
- "{0}/sample_data/artifacts/text_log_summary.json".format(os.path.dirname(__file__))
- ) as f:
+ with open(f"{os.path.dirname(__file__)}/sample_data/artifacts/text_log_summary.json") as f:
self.text_log_summary = json.load(f)
with open(
- "{0}/sample_data/pulse_consumer/taskcluster_pulse_messages.json".format(
+ "{}/sample_data/pulse_consumer/taskcluster_pulse_messages.json".format(
os.path.dirname(__file__)
)
) as f:
self.taskcluster_pulse_messages = json.load(f)
with open(
- "{0}/sample_data/pulse_consumer/taskcluster_tasks.json".format(
- os.path.dirname(__file__)
- )
+ f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/taskcluster_tasks.json"
) as f:
self.taskcluster_tasks = json.load(f)
with open(
- "{0}/sample_data/pulse_consumer/taskcluster_transformed_jobs.json".format(
+ "{}/sample_data/pulse_consumer/taskcluster_transformed_jobs.json".format(
os.path.dirname(__file__)
)
) as f:
self.taskcluster_transformed_jobs = json.load(f)
- with open(
- "{0}/sample_data/pulse_consumer/job_data.json".format(os.path.dirname(__file__))
- ) as f:
+ with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/job_data.json") as f:
self.pulse_jobs = json.load(f)
with open(
- "{0}/sample_data/pulse_consumer/transformed_job_data.json".format(
- os.path.dirname(__file__)
- )
+ f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_job_data.json"
) as f:
self.transformed_pulse_jobs = json.load(f)
- with open(
- "{0}/sample_data/pulse_consumer/github_push.json".format(os.path.dirname(__file__))
- ) as f:
+ with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/github_push.json") as f:
self.github_push = json.load(f)
with open(
- "{0}/sample_data/pulse_consumer/transformed_gh_push.json".format(
- os.path.dirname(__file__)
- )
+ f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_gh_push.json"
) as f:
self.transformed_github_push = json.load(f)
- with open(
- "{0}/sample_data/pulse_consumer/github_pr.json".format(os.path.dirname(__file__))
- ) as f:
+ with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/github_pr.json") as f:
self.github_pr = json.load(f)
with open(
- "{0}/sample_data/pulse_consumer/transformed_gh_pr.json".format(
- os.path.dirname(__file__)
- )
+ f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_gh_pr.json"
) as f:
self.transformed_github_pr = json.load(f)
- with open(
- "{0}/sample_data/pulse_consumer/hg_push.json".format(os.path.dirname(__file__))
- ) as f:
+ with open(f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/hg_push.json") as f:
self.hg_push = json.load(f)
with open(
- "{0}/sample_data/pulse_consumer/transformed_hg_push.json".format(
- os.path.dirname(__file__)
- )
+ f"{os.path.dirname(__file__)}/sample_data/pulse_consumer/transformed_hg_push.json"
) as f:
self.transformed_hg_push = json.load(f)
@@ -106,4 +84,4 @@ def initialize_data(self):
def get_log_path(self, name):
"""Returns the full path to a log file"""
- return "{0}/{1}".format(self.logs_dir, name)
+ return f"{self.logs_dir}/{name}"
diff --git a/tests/services/test_taskcluster.py b/tests/services/test_taskcluster.py
index d0244cb067b..3b55fe455ce 100644
--- a/tests/services/test_taskcluster.py
+++ b/tests/services/test_taskcluster.py
@@ -54,15 +54,15 @@ def test_filter_relevant_actions(self, actions_json, original_task, expected_act
def test_task_in_context(self):
# match
- tag_set_list, task_tags = [
+ tag_set_list, task_tags = (
load_json_fixture(f) for f in ("matchingTagSetList.json", "matchingTaskTags.json")
- ]
+ )
assert TaskclusterModelImpl._task_in_context(tag_set_list, task_tags) is True
# mismatch
- tag_set_list, task_tags = [
+ tag_set_list, task_tags = (
load_json_fixture(f) for f in ("mismatchingTagSetList.json", "mismatchingTaskTags.json")
- ]
+ )
assert TaskclusterModelImpl._task_in_context(tag_set_list, task_tags) is False
def test_get_action(self, actions_json, expected_backfill_task):
diff --git a/tests/test_dockerflow.py b/tests/test_dockerflow.py
index f8362c54e91..497fe3a9a45 100644
--- a/tests/test_dockerflow.py
+++ b/tests/test_dockerflow.py
@@ -9,7 +9,7 @@ def test_get_version(client):
response = client.get("/__version__")
assert response.status_code == 200
- with open(f"{settings.BASE_DIR}/version.json", "r") as version_file:
+ with open(f"{settings.BASE_DIR}/version.json") as version_file:
assert response.json() == json.loads(version_file.read())
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 81042a789f1..69eedc54d42 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -213,7 +213,7 @@ def add_log_response(filename):
Set up responses for a local gzipped log and return the url for it.
"""
log_path = SampleData().get_log_path(filename)
- log_url = "http://my-log.mozilla.org/{}".format(filename)
+ log_url = f"http://my-log.mozilla.org/{filename}"
with open(log_path, "rb") as log_file:
content = log_file.read()
diff --git a/tests/webapp/api/test_bug_job_map_api.py b/tests/webapp/api/test_bug_job_map_api.py
index a937b8b25f6..b7d8fadd748 100644
--- a/tests/webapp/api/test_bug_job_map_api.py
+++ b/tests/webapp/api/test_bug_job_map_api.py
@@ -95,7 +95,7 @@ def test_bug_job_map_detail(client, eleven_jobs_stored, test_repository, test_us
user=test_user,
)
- pk = "{0}-{1}".format(job.id, bug.id)
+ pk = f"{job.id}-{bug.id}"
resp = client.get(
reverse("bug-job-map-detail", kwargs={"project": test_repository.name, "pk": pk})
@@ -130,7 +130,7 @@ def test_bug_job_map_delete(
if not test_no_auth:
client.force_authenticate(user=test_user)
- pk = "{0}-{1}".format(job.id, bug.id)
+ pk = f"{job.id}-{bug.id}"
resp = client.delete(
reverse("bug-job-map-detail", kwargs={"project": test_repository.name, "pk": pk})
diff --git a/tests/webapp/api/test_bugzilla.py b/tests/webapp/api/test_bugzilla.py
index c02ca9fcf88..7f6d09f5508 100644
--- a/tests/webapp/api/test_bugzilla.py
+++ b/tests/webapp/api/test_bugzilla.py
@@ -1,5 +1,3 @@
-# coding: utf-8
-
import json
import responses
diff --git a/tests/webapp/api/test_jobs_api.py b/tests/webapp/api/test_jobs_api.py
index 4a2e453a7e7..7b1fb6e3b35 100644
--- a/tests/webapp/api/test_jobs_api.py
+++ b/tests/webapp/api/test_jobs_api.py
@@ -18,11 +18,9 @@ def test_job_list(client, eleven_jobs_stored, test_repository, offset, count, ex
endpoint.
"""
url = reverse("jobs-list", kwargs={"project": test_repository.name})
- params = "&".join(
- ["{}={}".format(k, v) for k, v in [("offset", offset), ("count", count)] if v]
- )
+ params = "&".join([f"{k}={v}" for k, v in [("offset", offset), ("count", count)] if v])
if params:
- url += "?{}".format(params)
+ url += f"?{params}"
resp = client.get(url)
assert resp.status_code == 200
response_dict = resp.json()
@@ -143,7 +141,7 @@ def test_job_list_filter_fields(client, eleven_jobs_stored, test_repository, fie
to make this test easy.
"""
url = reverse("jobs-list", kwargs={"project": test_repository.name})
- final_url = url + "?{}={}".format(fieldname, expected)
+ final_url = url + f"?{fieldname}={expected}"
resp = client.get(final_url)
assert resp.status_code == 200
first = resp.json()["results"][0]
@@ -245,11 +243,9 @@ def test_list_similar_jobs(client, eleven_jobs_stored, offset, count, expected_n
job = Job.objects.get(id=1)
url = reverse("jobs-similar-jobs", kwargs={"project": job.repository.name, "pk": job.id})
- params = "&".join(
- ["{}={}".format(k, v) for k, v in [("offset", offset), ("count", count)] if v]
- )
+ params = "&".join([f"{k}={v}" for k, v in [("offset", offset), ("count", count)] if v])
if params:
- url += "?{}".format(params)
+ url += f"?{params}"
resp = client.get(url)
assert resp.status_code == 200
@@ -288,7 +284,7 @@ def test_last_modified(
pass
url = reverse("jobs-list", kwargs={"project": test_repository.name})
- final_url = url + ("?{}={}".format(lm_key, lm_value))
+ final_url = url + (f"?{lm_key}={lm_value}")
resp = client.get(final_url)
assert resp.status_code == exp_status
diff --git a/tests/webapp/api/test_performance_alerts_api.py b/tests/webapp/api/test_performance_alerts_api.py
index aeb87ce93d0..6c92f63a3f8 100644
--- a/tests/webapp/api/test_performance_alerts_api.py
+++ b/tests/webapp/api/test_performance_alerts_api.py
@@ -673,4 +673,4 @@ def dump(an_alert):
for alert in alerts:
dump(alert)
for perf_datum in perf_data:
- pprint("PerfData(id={0.push_id}, push_timestamp={0.push_timestamp})".format(perf_datum))
+ pprint(f"PerfData(id={perf_datum.push_id}, push_timestamp={perf_datum.push_timestamp})")
diff --git a/tests/webapp/api/test_performance_alertsummary_api.py b/tests/webapp/api/test_performance_alertsummary_api.py
index 32b31b314e3..46c6e00fd24 100644
--- a/tests/webapp/api/test_performance_alertsummary_api.py
+++ b/tests/webapp/api/test_performance_alertsummary_api.py
@@ -31,7 +31,7 @@ def test_perf_alert_summary_onhold(test_repository_onhold, test_perf_framework):
for i in range(2):
Push.objects.create(
repository=test_repository_onhold,
- revision="1234abcd{}".format(i),
+ revision=f"1234abcd{i}",
author="foo@bar.com",
time=datetime.now(),
)
diff --git a/tests/webapp/api/test_performance_bug_template_api.py b/tests/webapp/api/test_performance_bug_template_api.py
index 089853dfbc3..08aced1258b 100644
--- a/tests/webapp/api/test_performance_bug_template_api.py
+++ b/tests/webapp/api/test_performance_bug_template_api.py
@@ -9,12 +9,12 @@ def test_perf_bug_template_api(client, test_perf_framework):
template_dicts = []
for framework, i in zip((test_perf_framework, framework2), range(2)):
dict = {
- "keywords": "keyword{}".format(i),
- "status_whiteboard": "sw{}".format(i),
- "default_component": "dfcom{}".format(i),
- "default_product": "dfprod{}".format(i),
- "cc_list": "foo{}@bar.com".format(i),
- "text": "my great text {}".format(i),
+ "keywords": f"keyword{i}",
+ "status_whiteboard": f"sw{i}",
+ "default_component": f"dfcom{i}",
+ "default_product": f"dfprod{i}",
+ "cc_list": f"foo{i}@bar.com",
+ "text": f"my great text {i}",
}
PerformanceBugTemplate.objects.create(framework=framework, **dict)
dict["framework"] = framework.id
@@ -27,7 +27,7 @@ def test_perf_bug_template_api(client, test_perf_framework):
# test that we can get just one (the usual case, probably)
resp = client.get(
- reverse("performance-bug-template-list") + "?framework={}".format(test_perf_framework.id)
+ reverse("performance-bug-template-list") + f"?framework={test_perf_framework.id}"
)
assert resp.status_code == 200
assert resp.json() == [template_dicts[0]]
diff --git a/tests/webapp/api/test_performance_data_api.py b/tests/webapp/api/test_performance_data_api.py
index 126a04d7698..8bc29282e4f 100644
--- a/tests/webapp/api/test_performance_data_api.py
+++ b/tests/webapp/api/test_performance_data_api.py
@@ -102,7 +102,7 @@ def test_performance_platforms_expired_test(client, test_perf_signature):
"performance-signatures-platforms-list",
kwargs={"project": test_perf_signature.repository.name},
)
- + "?interval={}".format(86400)
+ + "?interval=86400"
)
assert resp.status_code == 200
assert resp.json() == []
@@ -140,7 +140,7 @@ def test_performance_platforms_framework_filtering(client, test_perf_signature):
"performance-signatures-platforms-list",
kwargs={"project": test_perf_signature.repository.name},
)
- + "?framework={}".format(framework2.id)
+ + f"?framework={framework2.id}"
)
assert resp.status_code == 200
assert resp.json() == ["win7-a"]
@@ -259,7 +259,7 @@ def test_filter_data_by_no_retriggers(
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
- + "?signatures={}&no_retriggers=true".format(test_perf_signature.signature_hash)
+ + f"?signatures={test_perf_signature.signature_hash}&no_retriggers=true"
)
assert resp.status_code == 200
datums = resp.data[test_perf_signature.signature_hash]
@@ -316,9 +316,7 @@ def test_filter_data_by_framework(
# Filtering by second framework
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
- + "?signatures={}&framework={}".format(
- test_perf_signature.signature_hash, signature2.framework.id
- )
+ + f"?signatures={test_perf_signature.signature_hash}&framework={signature2.framework.id}"
)
assert resp.status_code == 200
datums = resp.data[test_perf_signature.signature_hash]
@@ -332,7 +330,7 @@ def test_filter_signatures_by_interval(client, test_perf_signature):
reverse(
"performance-signatures-list", kwargs={"project": test_perf_signature.repository.name}
)
- + "?interval={}".format(86400)
+ + "?interval=86400"
)
assert resp.status_code == 200
assert len(resp.json().keys()) == 1
@@ -354,7 +352,7 @@ def test_filter_signatures_by_range(
reverse(
"performance-signatures-list", kwargs={"project": test_perf_signature.repository.name}
)
- + "?start_date={}&end_date={}".format(start_date, end_date)
+ + f"?start_date={start_date}&end_date={end_date}"
)
assert resp.status_code == 200
assert len(resp.json().keys()) == exp_count
@@ -387,7 +385,7 @@ def test_filter_data_by_interval(
# going back interval of 1 day, should find 1 item
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
- + "?signature_id={}&interval={}".format(test_perf_signature.id, interval)
+ + f"?signature_id={test_perf_signature.id}&interval={interval}"
)
assert resp.status_code == 200
@@ -424,9 +422,7 @@ def test_filter_data_by_range(
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
- + "?signature_id={}&start_date={}&end_date={}".format(
- test_perf_signature.id, start_date, end_date
- )
+ + f"?signature_id={test_perf_signature.id}&start_date={start_date}&end_date={end_date}"
)
assert resp.status_code == 200
@@ -472,7 +468,7 @@ def test_filter_data_by_signature(
]:
resp = client.get(
reverse("performance-data-list", kwargs={"project": test_repository.name})
- + "?{}={}".format(param, value)
+ + f"?{param}={value}"
)
assert resp.status_code == 200
assert len(resp.data.keys()) == 1
@@ -719,7 +715,7 @@ def test_alert_summary_tasks_get(client, test_perf_alert_summary, test_perf_data
status=PerformanceAlert.REASSIGNED,
)
resp = client.get(
- reverse("performance-alertsummary-tasks") + "?id={}".format(test_perf_alert_summary.id)
+ reverse("performance-alertsummary-tasks") + f"?id={test_perf_alert_summary.id}"
)
assert resp.status_code == 200
assert resp.json() == {
@@ -737,9 +733,7 @@ def test_alert_summary_tasks_get_failure(client, test_perf_alert_summary):
# verify that we fail if PerformanceAlertSummary does not exist
not_exist_summary_id = test_perf_alert_summary.id
test_perf_alert_summary.delete()
- resp = client.get(
- reverse("performance-alertsummary-tasks") + "?id={}".format(not_exist_summary_id)
- )
+ resp = client.get(reverse("performance-alertsummary-tasks") + f"?id={not_exist_summary_id}")
assert resp.status_code == 400
assert resp.json() == {"message": ["PerformanceAlertSummary does not exist."]}
diff --git a/tests/webapp/api/test_version.py b/tests/webapp/api/test_version.py
index 62d38c8d1a9..e093ba75c94 100644
--- a/tests/webapp/api/test_version.py
+++ b/tests/webapp/api/test_version.py
@@ -26,7 +26,7 @@ def test_unsupported_version():
def test_correct_version():
view = RequestVersionView.as_view()
version = settings.REST_FRAMEWORK["ALLOWED_VERSIONS"][0]
- request = factory.get("/endpoint/", HTTP_ACCEPT="application/json; version={0}".format(version))
+ request = factory.get("/endpoint/", HTTP_ACCEPT=f"application/json; version={version}")
response = view(request)
assert response.data == {"version": version}
diff --git a/treeherder/changelog/models.py b/treeherder/changelog/models.py
index 51626ac3b77..62b7d925b44 100644
--- a/treeherder/changelog/models.py
+++ b/treeherder/changelog/models.py
@@ -19,7 +19,7 @@ class Meta:
unique_together = ("id", "remote_id", "type")
def __str__(self):
- return "[%s] %s by %s" % (self.id, self.message, self.author)
+ return f"[{self.id}] {self.message} by {self.author}"
class ChangelogFile(models.Model):
diff --git a/treeherder/client/setup.py b/treeherder/client/setup.py
index 71cedb709c4..302cd38a5ea 100644
--- a/treeherder/client/setup.py
+++ b/treeherder/client/setup.py
@@ -1,4 +1,3 @@
-import io
import os
import re
@@ -7,7 +6,7 @@
def read(*names, **kwargs):
# Taken from https://packaging.python.org/en/latest/single_source_version.html
- with io.open(
+ with open(
os.path.join(os.path.dirname(__file__), *names), encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
diff --git a/treeherder/client/thclient/client.py b/treeherder/client/thclient/client.py
index 890ef1214cd..69a798acb72 100644
--- a/treeherder/client/thclient/client.py
+++ b/treeherder/client/thclient/client.py
@@ -17,8 +17,8 @@ class TreeherderClient:
API_VERSION = "1.1"
REQUEST_HEADERS = {
- "Accept": "application/json; version={}".format(API_VERSION),
- "User-Agent": "treeherder-pyclient/{}".format(__version__),
+ "Accept": f"application/json; version={API_VERSION}",
+ "User-Agent": f"treeherder-pyclient/{__version__}",
}
PUSH_ENDPOINT = "push"
@@ -43,9 +43,9 @@ def __init__(self, server_url="https://treeherder.mozilla.org", timeout=30):
def _get_endpoint_url(self, endpoint, project=None):
if project:
- return "{}/api/project/{}/{}/".format(self.server_url, project, endpoint)
+ return f"{self.server_url}/api/project/{project}/{endpoint}/"
- return "{}/api/{}/".format(self.server_url, endpoint)
+ return f"{self.server_url}/api/{endpoint}/"
def _get_json_list(self, endpoint, project=None, **params):
if "count" in params and (params["count"] is None or params["count"] > self.MAX_COUNT):
diff --git a/treeherder/etl/management/commands/ingest.py b/treeherder/etl/management/commands/ingest.py
index f86c75b7c14..366d0042cfc 100644
--- a/treeherder/etl/management/commands/ingest.py
+++ b/treeherder/etl/management/commands/ingest.py
@@ -38,7 +38,7 @@
conn_sem = BoundedSemaphore(50)
-class Connection(object):
+class Connection:
def __enter__(self):
conn_sem.acquire()
@@ -51,15 +51,15 @@ def ingest_pr(pr_url, root_url):
_, _, _, org, repo, _, pull_number, _ = pr_url.split("/", 7)
pulse = {
"exchange": "exchange/taskcluster-github/v1/pull-request",
- "routingKey": "primary.{}.{}.synchronize".format(org, repo),
+ "routingKey": f"primary.{org}.{repo}.synchronize",
"payload": {
"repository": repo,
"organization": org,
"action": "synchronize",
"details": {
"event.pullNumber": pull_number,
- "event.base.repo.url": "https://github.com/{}/{}.git".format(org, repo),
- "event.head.repo.url": "https://github.com/{}/{}.git".format(org, repo),
+ "event.base.repo.url": f"https://github.com/{org}/{repo}.git",
+ "event.head.repo.url": f"https://github.com/{org}/{repo}.git",
},
},
}
@@ -233,10 +233,10 @@ def process_job_with_threads(pulse_job, root_url):
def find_task_id(index_path, root_url):
- index_url = liburls.api(root_url, "index", "v1", "task/{}".format(index_path))
+ index_url = liburls.api(root_url, "index", "v1", f"task/{index_path}")
response = requests.get(index_url)
if response.status_code == 404:
- raise Exception("Index URL {} not found".format(index_url))
+ raise Exception(f"Index URL {index_url} not found")
return response.json()["taskId"]
@@ -248,7 +248,7 @@ def get_decision_task_id(project, revision, root_url):
def repo_meta(project):
_repo = Repository.objects.filter(name=project)[0]
- assert _repo, "The project {} you specified is incorrect".format(project)
+ assert _repo, f"The project {project} you specified is incorrect"
splitUrl = _repo.url.split("/")
return {
"url": _repo.url,
@@ -388,9 +388,7 @@ def ingest_git_pushes(project, dry_run=False):
oldest_parent_revision = info["parents"][0]["sha"]
push_to_date[oldest_parent_revision] = info["commit"]["committer"]["date"]
logger.info(
- "Push: {} - Date: {}".format(
- oldest_parent_revision, push_to_date[oldest_parent_revision]
- )
+ f"Push: {oldest_parent_revision} - Date: {push_to_date[oldest_parent_revision]}"
)
push_revision.append(_commit["sha"])
diff --git a/treeherder/etl/management/commands/publish_to_pulse.py b/treeherder/etl/management/commands/publish_to_pulse.py
index 33e3c54a32c..58ddb8254e4 100644
--- a/treeherder/etl/management/commands/publish_to_pulse.py
+++ b/treeherder/etl/management/commands/publish_to_pulse.py
@@ -33,13 +33,13 @@ def handle(self, *args, **options):
userid = urlparse(connection_url).username
payload_file = options["payload_file"]
- exchange_name = "exchange/{}/jobs".format(userid)
+ exchange_name = f"exchange/{userid}/jobs"
connection = Connection(connection_url)
exchange = Exchange(exchange_name, type="topic")
producer = Producer(connection, exchange, routing_key=routing_key, auto_declare=True)
- self.stdout.write("Published to exchange: {}".format(exchange_name))
+ self.stdout.write(f"Published to exchange: {exchange_name}")
with open(payload_file) as f:
body = f.read()
diff --git a/treeherder/etl/management/commands/pulse_listener.py b/treeherder/etl/management/commands/pulse_listener.py
index 372027a3e53..34eafd9af6e 100644
--- a/treeherder/etl/management/commands/pulse_listener.py
+++ b/treeherder/etl/management/commands/pulse_listener.py
@@ -41,7 +41,7 @@ def handle(self, *args, **options):
],
)
- listener_params = (JointConsumer, pulse_sources, [lambda key: "#.{}".format(key), None])
+ listener_params = (JointConsumer, pulse_sources, [lambda key: f"#.{key}", None])
consumer = prepare_joint_consumers(listener_params)
try:
diff --git a/treeherder/etl/management/commands/pulse_listener_tasks.py b/treeherder/etl/management/commands/pulse_listener_tasks.py
index 000321189a2..68f30d5a797 100644
--- a/treeherder/etl/management/commands/pulse_listener_tasks.py
+++ b/treeherder/etl/management/commands/pulse_listener_tasks.py
@@ -36,7 +36,7 @@ def handle(self, *args, **options):
consumers = prepare_consumers(
TaskConsumer,
task_sources,
- lambda key: "#.{}".format(key),
+ lambda key: f"#.{key}",
)
try:
diff --git a/treeherder/etl/management/commands/pulse_listener_tasks_classification.py b/treeherder/etl/management/commands/pulse_listener_tasks_classification.py
index e0768515264..a61ccee1829 100644
--- a/treeherder/etl/management/commands/pulse_listener_tasks_classification.py
+++ b/treeherder/etl/management/commands/pulse_listener_tasks_classification.py
@@ -38,7 +38,7 @@ def handle(self, *args, **options):
consumers = prepare_consumers(
MozciClassificationConsumer,
classification_sources,
- lambda key: "#.{}".format(key),
+ lambda key: f"#.{key}",
)
try:
diff --git a/treeherder/etl/perf.py b/treeherder/etl/perf.py
index 182f943a6c8..fef40e2a309 100644
--- a/treeherder/etl/perf.py
+++ b/treeherder/etl/perf.py
@@ -2,7 +2,7 @@
import logging
from datetime import datetime
from hashlib import sha1
-from typing import List, Optional, Tuple
+from typing import Optional
import simplejson as json
@@ -51,7 +51,7 @@ def _get_signature_hash(signature_properties):
return sha.hexdigest()
-def _order_and_concat(words: List) -> str:
+def _order_and_concat(words: list) -> str:
return " ".join(sorted(words))
@@ -76,7 +76,7 @@ def _create_or_update_signature(repository, signature_hash, framework, applicati
return signature
-def _deduce_push_timestamp(perf_datum: dict, job_push_time: datetime) -> Tuple[datetime, bool]:
+def _deduce_push_timestamp(perf_datum: dict, job_push_time: datetime) -> tuple[datetime, bool]:
is_multi_commit = False
if not settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION:
# the old way of ingestion
@@ -119,7 +119,7 @@ def _test_should_alert_based_on(
def _test_should_gather_replicates_based_on(
- repository: Repository, suite_name: str, replicates: Optional[List] = None
+ repository: Repository, suite_name: str, replicates: Optional[list] = None
) -> bool:
"""
Determine if we should gather/ingest replicates. Currently, it's
diff --git a/treeherder/etl/push_loader.py b/treeherder/etl/push_loader.py
index 4d64b419f15..e41687c19b9 100644
--- a/treeherder/etl/push_loader.py
+++ b/treeherder/etl/push_loader.py
@@ -58,7 +58,7 @@ def get_transformer_class(self, exchange):
return GithubPullRequestTransformer
elif "/hgpushes/" in exchange:
return HgPushTransformer
- raise PulsePushError("Unsupported push exchange: {}".format(exchange))
+ raise PulsePushError(f"Unsupported push exchange: {exchange}")
class GithubTransformer:
@@ -156,7 +156,7 @@ def get_branch(self):
if self.message_body["details"].get("event.head.tag"):
return "tag"
- return super(GithubPushTransformer, self).get_branch()
+ return super().get_branch()
def transform(self, repository):
push_data = compare_shas(
diff --git a/treeherder/etl/pushlog.py b/treeherder/etl/pushlog.py
index 49e703bca6f..2ff234bb805 100644
--- a/treeherder/etl/pushlog.py
+++ b/treeherder/etl/pushlog.py
@@ -53,14 +53,14 @@ def transform_push(self, push):
}
def run(self, source_url, repository_name, changeset=None, last_push_id=None):
- cache_key = "{}:last_push_id".format(repository_name)
+ cache_key = f"{repository_name}:last_push_id"
if not last_push_id:
# get the last object seen from cache. this will
# reduce the number of pushes processed every time
last_push_id = cache.get(cache_key)
if not changeset and last_push_id:
- startid_url = "{}&startID={}".format(source_url, last_push_id)
+ startid_url = f"{source_url}&startID={last_push_id}"
logger.debug(
"Extracted last push for '%s', '%s', from cache, "
"attempting to get changes only from that point at: %s",
diff --git a/treeherder/etl/taskcluster_pulse/handler.py b/treeherder/etl/taskcluster_pulse/handler.py
index 57f9944f862..a2cfc15692c 100644
--- a/treeherder/etl/taskcluster_pulse/handler.py
+++ b/treeherder/etl/taskcluster_pulse/handler.py
@@ -77,7 +77,7 @@ def parseRouteInfo(prefix, taskId, routes, task):
raise PulseHandlerError(
"Could not determine Treeherder route. Either there is no route, "
+ "or more than one matching route exists."
- + "Task ID: {taskId} Routes: {routes}".format(taskId=taskId, routes=routes)
+ + f"Task ID: {taskId} Routes: {routes}"
)
parsedRoute = parseRoute(matchingRoutes[0])
@@ -156,7 +156,7 @@ def ignore_task(task, taskId, rootUrl, project):
break
if ignore:
- logger.debug("Task to be ignored ({})".format(taskId))
+ logger.debug(f"Task to be ignored ({taskId})")
return ignore
@@ -225,7 +225,7 @@ def buildMessage(pushInfo, task, runId, payload):
job = {
"buildSystem": "taskcluster",
"owner": task["metadata"]["owner"],
- "taskId": "{taskId}/{runId}".format(taskId=slugid.decode(taskId), runId=runId),
+ "taskId": f"{slugid.decode(taskId)}/{runId}",
"retryId": runId,
"isRetried": False,
"display": {
@@ -397,7 +397,7 @@ async def addArtifactUploadedLinks(root_url, taskId, runId, job, session):
seen[name] = [artifact["name"]]
else:
seen[name].append(artifact["name"])
- name = "{name} ({length})".format(name=name, length=len(seen[name]) - 1)
+ name = f"{name} ({len(seen[name]) - 1})"
links.append(
{
diff --git a/treeherder/etl/text.py b/treeherder/etl/text.py
index e327600f372..10dd2a0292d 100644
--- a/treeherder/etl/text.py
+++ b/treeherder/etl/text.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
import re
# Regexp that matches all non-BMP unicode characters.
@@ -19,7 +18,7 @@ def convert_unicode_character_to_ascii_repr(match_obj):
hex_value = hex_code_point.zfill(6).upper()
- return "".format(hex_value)
+ return f""
def astral_filter(text):
diff --git a/treeherder/intermittents_commenter/commenter.py b/treeherder/intermittents_commenter/commenter.py
index c7c597c6a83..5a1e9aacb65 100644
--- a/treeherder/intermittents_commenter/commenter.py
+++ b/treeherder/intermittents_commenter/commenter.py
@@ -175,7 +175,7 @@ def print_or_submit_changes(self, all_bug_changes):
)
def open_file(self, filename, load):
- with open("treeherder/intermittents_commenter/{}".format(filename), "r") as myfile:
+ with open(f"treeherder/intermittents_commenter/{filename}") as myfile:
if load:
return json.load(myfile)
else:
@@ -212,7 +212,7 @@ def new_request(self):
# Use a custom HTTP adapter, so we can set a non-zero max_retries value.
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=3))
session.headers = {
- "User-Agent": "treeherder/{}".format(settings.SITE_HOSTNAME),
+ "User-Agent": f"treeherder/{settings.SITE_HOSTNAME}",
"x-bugzilla-api-key": settings.COMMENTER_API_KEY,
"Accept": "application/json",
}
@@ -233,7 +233,7 @@ def fetch_bug_details(self, bug_ids):
)
response.raise_for_status()
except RequestException as e:
- logger.warning("error fetching bugzilla metadata for bugs due to {}".format(e))
+ logger.warning(f"error fetching bugzilla metadata for bugs due to {e}")
return None
if response.headers["Content-Type"] == "text/html; charset=UTF-8":
@@ -246,12 +246,12 @@ def fetch_bug_details(self, bug_ids):
return data["bugs"]
def submit_bug_changes(self, changes, bug_id):
- url = "{}/rest/bug/{}".format(settings.BZ_API_URL, str(bug_id))
+ url = f"{settings.BZ_API_URL}/rest/bug/{str(bug_id)}"
try:
response = self.session.put(url, headers=self.session.headers, json=changes, timeout=30)
response.raise_for_status()
except RequestException as e:
- logger.error("error posting comment to bugzilla for bug {} due to {}".format(bug_id, e))
+ logger.error(f"error posting comment to bugzilla for bug {bug_id} due to {e}")
def get_test_runs(self, startday, endday):
"""Returns an aggregate of pushes for specified date range and
diff --git a/treeherder/log_parser/failureline.py b/treeherder/log_parser/failureline.py
index e72d0a660c9..4171620765f 100644
--- a/treeherder/log_parser/failureline.py
+++ b/treeherder/log_parser/failureline.py
@@ -53,7 +53,7 @@ def write_failure_lines(job_log, log_iter):
try:
failure_lines = create(job_log, log_list)
except DataError as e:
- logger.warning("Got DataError inserting failure_line: {}".format(e.args))
+ logger.warning(f"Got DataError inserting failure_line: {e.args}")
except OperationalError as e:
logger.warning("Got OperationalError inserting failure_line")
# Retry iff this error is the "incorrect String Value" error
diff --git a/treeherder/log_parser/management/commands/test_parse_log.py b/treeherder/log_parser/management/commands/test_parse_log.py
index 534f84fba90..6623682a5ce 100644
--- a/treeherder/log_parser/management/commands/test_parse_log.py
+++ b/treeherder/log_parser/management/commands/test_parse_log.py
@@ -40,7 +40,7 @@ def handle(self, *args, **options):
if not options["profile"]:
for name, artifact in artifact_bc.artifacts.items():
- print("%s, %s" % (name, json.dumps(artifact, indent=2)))
+ print(f"{name}, {json.dumps(artifact, indent=2)}")
if options["profile"]:
print("Timings: %s" % times)
diff --git a/treeherder/log_parser/parsers.py b/treeherder/log_parser/parsers.py
index 2c9e5c18509..08406da5387 100644
--- a/treeherder/log_parser/parsers.py
+++ b/treeherder/log_parser/parsers.py
@@ -64,27 +64,23 @@ class ErrorParser(ParserBase):
)
RE_ERR_MATCH = re.compile(
- (
- r"^g?make(?:\[\d+\])?: \*\*\*"
- r"|^[A-Za-z.]+Error: "
- r"|^[A-Za-z.]*Exception: "
- r"|^\[ FAILED \] "
- r"|^remoteFailed:"
- r"|^rm: cannot "
- r"|^abort:"
- r"|^\[taskcluster\] Error:"
- r"|^\[[\w._-]+:(?:error|exception)\]"
- )
+ r"^g?make(?:\[\d+\])?: \*\*\*"
+ r"|^[A-Za-z.]+Error: "
+ r"|^[A-Za-z.]*Exception: "
+ r"|^\[ FAILED \] "
+ r"|^remoteFailed:"
+ r"|^rm: cannot "
+ r"|^abort:"
+ r"|^\[taskcluster\] Error:"
+ r"|^\[[\w._-]+:(?:error|exception)\]"
)
RE_ERR_SEARCH = re.compile(
- (
- r" error\(\d*\):"
- r"|:\d+: error:"
- r"| error R?C\d*:"
- r"|ERROR [45]\d\d:"
- r"|mozmake\.(?:exe|EXE)(?:\[\d+\])?: \*\*\*"
- )
+ r" error\(\d*\):"
+ r"|:\d+: error:"
+ r"| error R?C\d*:"
+ r"|ERROR [45]\d\d:"
+ r"|mozmake\.(?:exe|EXE)(?:\[\d+\])?: \*\*\*"
)
RE_EXCLUDE_1_SEARCH = re.compile(r"TEST-(?:INFO|PASS) ")
diff --git a/treeherder/log_parser/utils.py b/treeherder/log_parser/utils.py
index b81fe765edd..833287a9279 100644
--- a/treeherder/log_parser/utils.py
+++ b/treeherder/log_parser/utils.py
@@ -21,7 +21,7 @@ def validate_perf_data(performance_data: dict):
for suite in performance_data["suites"]:
# allow only one extraOption longer than 45
if len(_long_options(_extra_options(suite), *expected_range)) > 1:
- raise ValidationError("Too many extra options longer than {}".format(SECOND_MAX_LENGTH))
+ raise ValidationError(f"Too many extra options longer than {SECOND_MAX_LENGTH}")
def _long_options(all_extra_options: list, second_max: int, first_max: int):
diff --git a/treeherder/model/data_cycling/cyclers.py b/treeherder/model/data_cycling/cyclers.py
index a4ef1599177..04346df86f0 100644
--- a/treeherder/model/data_cycling/cyclers.py
+++ b/treeherder/model/data_cycling/cyclers.py
@@ -1,7 +1,6 @@
import logging
from abc import ABC, abstractmethod
from datetime import timedelta, datetime
-from typing import List
from django.db import OperationalError, connection
from django.db.backends.utils import CursorWrapper
@@ -69,9 +68,9 @@ def cycle(self):
rs_deleted = Job.objects.cycle_data(
self.cycle_interval, self.chunk_size, self.sleep_time
)
- logger.warning("Deleted {} jobs".format(rs_deleted))
+ logger.warning(f"Deleted {rs_deleted} jobs")
except OperationalError as e:
- logger.error("Error running cycle_data: {}".format(e))
+ logger.error(f"Error running cycle_data: {e}")
self._remove_leftovers()
@@ -79,17 +78,17 @@ def _remove_leftovers(self):
logger.warning("Pruning ancillary data: job types, groups and machines")
def prune(reference_model, id_name, model):
- logger.warning("Pruning {}s".format(model.__name__))
+ logger.warning(f"Pruning {model.__name__}s")
used_ids = (
reference_model.objects.only(id_name).values_list(id_name, flat=True).distinct()
)
unused_ids = model.objects.exclude(id__in=used_ids).values_list("id", flat=True)
- logger.warning("Removing {} records from {}".format(len(unused_ids), model.__name__))
+ logger.warning(f"Removing {len(unused_ids)} records from {model.__name__}")
while len(unused_ids):
delete_ids = unused_ids[: self.chunk_size]
- logger.warning("deleting {} of {}".format(len(delete_ids), len(unused_ids)))
+ logger.warning(f"deleting {len(delete_ids)} of {len(unused_ids)}")
model.objects.filter(id__in=delete_ids).delete()
unused_ids = unused_ids[self.chunk_size :]
@@ -111,7 +110,7 @@ def __init__(
sleep_time: int,
is_debug: bool = None,
days: int = None,
- strategies: List[RemovalStrategy] = None,
+ strategies: list[RemovalStrategy] = None,
**kwargs,
):
super().__init__(chunk_size, sleep_time, is_debug)
@@ -223,9 +222,7 @@ def _delete_in_chunks(self, strategy: RemovalStrategy):
break # either finished removing all expired data or failed
else:
any_successful_attempt = True
- logger.debug(
- "Successfully deleted {} performance datum rows".format(deleted_rows)
- )
+ logger.debug(f"Successfully deleted {deleted_rows} performance datum rows")
def __handle_chunk_removal_exception(
self, exception, cursor: CursorWrapper, any_successful_attempt: bool
diff --git a/treeherder/model/data_cycling/removal_strategies.py b/treeherder/model/data_cycling/removal_strategies.py
index 2f9fc6469cb..4470bb1c537 100644
--- a/treeherder/model/data_cycling/removal_strategies.py
+++ b/treeherder/model/data_cycling/removal_strategies.py
@@ -4,7 +4,6 @@
from abc import ABC, abstractmethod
from datetime import timedelta, datetime
from itertools import cycle
-from typing import List
from django.conf import settings
from django.db.backends.utils import CursorWrapper
@@ -48,7 +47,7 @@ def name(self) -> str:
pass
@staticmethod
- def fabricate_all_strategies(*args, **kwargs) -> List[RemovalStrategy]:
+ def fabricate_all_strategies(*args, **kwargs) -> list[RemovalStrategy]:
return [
MainRemovalStrategy(*args, **kwargs),
TryDataRemoval(*args, **kwargs),
@@ -364,7 +363,7 @@ def target_signature(self) -> PerformanceSignature:
return self._target_signature
@property
- def removable_signatures(self) -> List[PerformanceSignature]:
+ def removable_signatures(self) -> list[PerformanceSignature]:
if self._removable_signatures is None:
self._removable_signatures = list(
PerformanceSignature.objects.filter(last_updated__lte=self._max_timestamp).order_by(
diff --git a/treeherder/model/data_cycling/signature_remover.py b/treeherder/model/data_cycling/signature_remover.py
index 605f764d8be..46ca24e3e41 100644
--- a/treeherder/model/data_cycling/signature_remover.py
+++ b/treeherder/model/data_cycling/signature_remover.py
@@ -1,5 +1,4 @@
import logging
-from typing import List
import taskcluster
from django.conf import settings
@@ -85,7 +84,7 @@ def _delete(chunk_of_signatures):
def _send_email(self):
self._notify.email(self._email_writer.email)
- def __delete_and_notify(self, signatures: List[PerformanceSignature]) -> bool:
+ def __delete_and_notify(self, signatures: list[PerformanceSignature]) -> bool:
"""
Atomically deletes perf signatures & notifies about this.
@return: whether atomic operation was successful or not
@@ -104,5 +103,5 @@ def __delete_and_notify(self, signatures: List[PerformanceSignature]) -> bool:
return True
- def _prepare_notification(self, signatures: List[PerformanceSignature]):
+ def _prepare_notification(self, signatures: list[PerformanceSignature]):
self._email_writer.prepare_new_email(signatures)
diff --git a/treeherder/model/error_summary.py b/treeherder/model/error_summary.py
index 00b42671515..5c6278a05de 100644
--- a/treeherder/model/error_summary.py
+++ b/treeherder/model/error_summary.py
@@ -32,7 +32,7 @@ def get_error_summary(job, queryset=None):
Caches the results if there are any.
"""
- cache_key = "error-summary-{}".format(job.id)
+ cache_key = f"error-summary-{job.id}"
cached_error_summary = cache.get(cache_key)
if cached_error_summary is not None:
return cached_error_summary
diff --git a/treeherder/model/management/commands/cache_failure_history.py b/treeherder/model/management/commands/cache_failure_history.py
index 7925da70990..429c2f3f0e8 100644
--- a/treeherder/model/management/commands/cache_failure_history.py
+++ b/treeherder/model/management/commands/cache_failure_history.py
@@ -37,7 +37,7 @@ def handle(self, *args, **options):
self.is_debug = options["debug"]
days = options["days"]
- self.debug("Fetching {} sets of history...".format(days))
+ self.debug(f"Fetching {days} sets of history...")
option_map = OptionCollection.objects.get_option_collection_map()
repository_ids = REPO_GROUPS["trunk"]
diff --git a/treeherder/model/migrations/0001_squashed_0022_modify_bugscache_and_bugjobmap.py b/treeherder/model/migrations/0001_squashed_0022_modify_bugscache_and_bugjobmap.py
index 919060e0d8d..703156e6d78 100644
--- a/treeherder/model/migrations/0001_squashed_0022_modify_bugscache_and_bugjobmap.py
+++ b/treeherder/model/migrations/0001_squashed_0022_modify_bugscache_and_bugjobmap.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-08 11:41
import django.core.validators
import django.db.models.deletion
diff --git a/treeherder/model/migrations/0002_add_bugjobmap_model_manager.py b/treeherder/model/migrations/0002_add_bugjobmap_model_manager.py
index 6d6ed0465f3..66da6c03dad 100644
--- a/treeherder/model/migrations/0002_add_bugjobmap_model_manager.py
+++ b/treeherder/model/migrations/0002_add_bugjobmap_model_manager.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-30 16:50
from django.db import migrations
import django.db.models.manager
diff --git a/treeherder/model/migrations/0003_add_matcher_name_fields.py b/treeherder/model/migrations/0003_add_matcher_name_fields.py
index 50c7a63b070..650da8dc06a 100644
--- a/treeherder/model/migrations/0003_add_matcher_name_fields.py
+++ b/treeherder/model/migrations/0003_add_matcher_name_fields.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-18 08:11
from django.db import migrations, models
diff --git a/treeherder/model/migrations/0004_populate_matcher_name_fields.py b/treeherder/model/migrations/0004_populate_matcher_name_fields.py
index 793039c1111..07491ddad77 100644
--- a/treeherder/model/migrations/0004_populate_matcher_name_fields.py
+++ b/treeherder/model/migrations/0004_populate_matcher_name_fields.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-18 08:11
from django.db import migrations
diff --git a/treeherder/model/migrations/0005_use_matcher_name_for_unique_constraint.py b/treeherder/model/migrations/0005_use_matcher_name_for_unique_constraint.py
index 9ff2d120510..75edc1183a5 100644
--- a/treeherder/model/migrations/0005_use_matcher_name_for_unique_constraint.py
+++ b/treeherder/model/migrations/0005_use_matcher_name_for_unique_constraint.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-18 08:23
from django.db import migrations
diff --git a/treeherder/model/migrations/0006_drop_matcher_fks.py b/treeherder/model/migrations/0006_drop_matcher_fks.py
index e9362a6a8fc..b84a93e39fe 100644
--- a/treeherder/model/migrations/0006_drop_matcher_fks.py
+++ b/treeherder/model/migrations/0006_drop_matcher_fks.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-18 08:30
from django.db import migrations
diff --git a/treeherder/model/migrations/0007_remove_m2m_between_classified_failures_and_failure_match.py b/treeherder/model/migrations/0007_remove_m2m_between_classified_failures_and_failure_match.py
index e48926ad9a5..f9da764698e 100644
--- a/treeherder/model/migrations/0007_remove_m2m_between_classified_failures_and_failure_match.py
+++ b/treeherder/model/migrations/0007_remove_m2m_between_classified_failures_and_failure_match.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-05 09:29
from django.db import migrations
diff --git a/treeherder/model/migrations/0008_remove_failure_match.py b/treeherder/model/migrations/0008_remove_failure_match.py
index 8d1f456a7a3..98a05119bc2 100644
--- a/treeherder/model/migrations/0008_remove_failure_match.py
+++ b/treeherder/model/migrations/0008_remove_failure_match.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-05 09:40
from django.db import migrations
diff --git a/treeherder/model/migrations/0009_add_manager_to_push_and_job.py b/treeherder/model/migrations/0009_add_manager_to_push_and_job.py
index 2eab91e4568..06fff69d146 100644
--- a/treeherder/model/migrations/0009_add_manager_to_push_and_job.py
+++ b/treeherder/model/migrations/0009_add_manager_to_push_and_job.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-18 18:21
from django.db import migrations
import django.db.models.manager
diff --git a/treeherder/model/migrations/0010_remove_runnable_job.py b/treeherder/model/migrations/0010_remove_runnable_job.py
index 6e1531cfe15..71141c13094 100644
--- a/treeherder/model/migrations/0010_remove_runnable_job.py
+++ b/treeherder/model/migrations/0010_remove_runnable_job.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-26 21:21
from django.db import migrations
diff --git a/treeherder/model/migrations/0011_remove_matcher_table.py b/treeherder/model/migrations/0011_remove_matcher_table.py
index a3bb74630d5..315c8d4a46a 100644
--- a/treeherder/model/migrations/0011_remove_matcher_table.py
+++ b/treeherder/model/migrations/0011_remove_matcher_table.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-06-06 09:25
from django.db import migrations
diff --git a/treeherder/model/migrations/0012_branch_maxlen.py b/treeherder/model/migrations/0012_branch_maxlen.py
index b68eb90920a..06052e1c25d 100644
--- a/treeherder/model/migrations/0012_branch_maxlen.py
+++ b/treeherder/model/migrations/0012_branch_maxlen.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-13 20:29
from django.db import migrations, models
diff --git a/treeherder/model/migrations/0013_add_index_to_push_revision.py b/treeherder/model/migrations/0013_add_index_to_push_revision.py
index d6aaf1e2609..ef5f3e16a19 100644
--- a/treeherder/model/migrations/0013_add_index_to_push_revision.py
+++ b/treeherder/model/migrations/0013_add_index_to_push_revision.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.17 on 2019-01-02 23:34
from django.db import migrations, models
diff --git a/treeherder/model/migrations/0015_add_repository_tc_root_url.py b/treeherder/model/migrations/0015_add_repository_tc_root_url.py
index 3adc6eb2a11..e9f10afb0fc 100644
--- a/treeherder/model/migrations/0015_add_repository_tc_root_url.py
+++ b/treeherder/model/migrations/0015_add_repository_tc_root_url.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 2.2.4 on 2019-08-28 17:39
from django.db import migrations, models
diff --git a/treeherder/model/models.py b/treeherder/model/models.py
index 4adfe7baab9..0aa283097d4 100644
--- a/treeherder/model/models.py
+++ b/treeherder/model/models.py
@@ -4,7 +4,6 @@
import re
import time
from hashlib import sha1
-from typing import List
import warnings
@@ -80,7 +79,7 @@ class Meta:
unique_together = ("os_name", "platform", "architecture")
def __str__(self):
- return "{0} {1} {2}".format(self.os_name, self.platform, self.architecture)
+ return f"{self.os_name} {self.platform} {self.architecture}"
class Option(NamedModel):
@@ -117,11 +116,11 @@ class Meta:
verbose_name_plural = "repositories"
@classmethod
- def fetch_all_names(cls) -> List[str]:
+ def fetch_all_names(cls) -> list[str]:
return cls.objects.values_list("name", flat=True)
def __str__(self):
- return "{0} {1}".format(self.name, self.repository_group)
+ return f"{self.name} {self.repository_group}"
class Push(models.Model):
@@ -145,7 +144,7 @@ class Meta:
unique_together = ("repository", "revision")
def __str__(self):
- return "{0} {1}".format(self.repository.name, self.revision)
+ return f"{self.repository.name} {self.revision}"
def total_jobs(self, job_type, result):
return self.jobs.filter(job_type=job_type, result=result).count()
@@ -194,7 +193,7 @@ class Meta:
unique_together = ("push", "revision")
def __str__(self):
- return "{0} {1}".format(self.push.repository.name, self.revision)
+ return f"{self.push.repository.name} {self.revision}"
class MachinePlatform(models.Model):
@@ -208,7 +207,7 @@ class Meta:
unique_together = ("os_name", "platform", "architecture")
def __str__(self):
- return "{0} {1} {2}".format(self.os_name, self.platform, self.architecture)
+ return f"{self.os_name} {self.platform} {self.architecture}"
class Bugscache(models.Model):
@@ -232,7 +231,7 @@ class Meta:
]
def __str__(self):
- return "{0}".format(self.id)
+ return f"{self.id}"
@classmethod
def sanitized_search_term(self, search_term):
@@ -322,7 +321,7 @@ class Meta:
unique_together = ("product", "component")
def __str__(self):
- return "{0} :: {1}".format(self.product, self.component)
+ return f"{self.product} :: {self.component}"
class FilesBugzillaMap(models.Model):
@@ -335,7 +334,7 @@ class Meta:
verbose_name_plural = "files_bugzilla_components"
def __str__(self):
- return "{0}".format(self.path)
+ return f"{self.path}"
class BugzillaSecurityGroup(models.Model):
@@ -363,7 +362,7 @@ class Meta:
unique_together = ("name", "symbol")
def __str__(self):
- return "{0} ({1})".format(self.name, self.symbol)
+ return f"{self.name} ({self.symbol})"
class OptionCollectionManager(models.Manager):
@@ -413,7 +412,7 @@ class Meta:
unique_together = ("option_collection_hash", "option")
def __str__(self):
- return "{0}".format(self.option)
+ return f"{self.option}"
class JobType(models.Model):
@@ -427,7 +426,7 @@ class Meta:
unique_together = (("name", "symbol"),)
def __str__(self):
- return "{0} ({1})".format(self.name, self.symbol)
+ return f"{self.name} ({self.symbol})"
class FailureClassification(NamedModel):
@@ -602,7 +601,7 @@ def tier_is_sheriffable(self) -> bool:
return self.tier < 3
def __str__(self):
- return "{0} {1} {2}".format(self.id, self.repository, self.guid)
+ return f"{self.id} {self.repository} {self.guid}"
def get_platform_option(self, option_collection_map=None):
if not hasattr(self, "platform_option"):
@@ -723,7 +722,7 @@ class Meta:
unique_together = ("job", "name", "url")
def __str__(self):
- return "{0} {1} {2} {3}".format(self.id, self.job.guid, self.name, self.status)
+ return f"{self.id} {self.job.guid} {self.name} {self.status}"
def update_status(self, status):
self.status = status
@@ -793,7 +792,7 @@ def create(cls, job_id, bug_id, user=None):
return bug_map
def __str__(self):
- return "{0} {1} {2} {3}".format(self.id, self.job.guid, self.bug_id, self.user)
+ return f"{self.id} {self.job.guid} {self.bug_id} {self.user}"
class JobNote(models.Model):
@@ -899,9 +898,7 @@ def delete(self, *args, **kwargs):
self._ensure_classification()
def __str__(self):
- return "{0} {1} {2} {3}".format(
- self.id, self.job.guid, self.failure_classification, self.who
- )
+ return f"{self.id} {self.job.guid} {self.failure_classification} {self.who}"
class FailureLine(models.Model):
@@ -959,7 +956,7 @@ class Meta:
unique_together = ("job_log", "line")
def __str__(self):
- return "{0} {1}".format(self.id, Job.objects.get(guid=self.job_guid).id)
+ return f"{self.id} {Job.objects.get(guid=self.job_guid).id}"
@property
def error(self):
@@ -1116,7 +1113,7 @@ class ClassifiedFailure(models.Model):
modified = models.DateTimeField(auto_now=True)
def __str__(self):
- return "{0} {1}".format(self.id, self.bug_number)
+ return f"{self.id} {self.bug_number}"
def bug(self):
# Putting this here forces one query per object; there should be a way
@@ -1256,7 +1253,7 @@ class Meta:
unique_together = (("step", "line_number"), ("job", "line_number"))
def __str__(self):
- return "{0} {1}".format(self.id, self.job.id)
+ return f"{self.id} {self.job.id}"
@property
def metadata(self):
@@ -1387,7 +1384,7 @@ class Meta:
unique_together = ("text_log_error", "classified_failure", "matcher_name")
def __str__(self):
- return "{0} {1}".format(self.text_log_error.id, self.classified_failure.id)
+ return f"{self.text_log_error.id} {self.classified_failure.id}"
class InvestigatedTests(models.Model):
diff --git a/treeherder/perf/auto_perf_sheriffing/backfill_reports.py b/treeherder/perf/auto_perf_sheriffing/backfill_reports.py
index f80c79c6308..5bf0a061c01 100644
--- a/treeherder/perf/auto_perf_sheriffing/backfill_reports.py
+++ b/treeherder/perf/auto_perf_sheriffing/backfill_reports.py
@@ -1,7 +1,7 @@
import logging
from datetime import timedelta, datetime
from itertools import zip_longest, groupby
-from typing import Tuple, List, Optional
+from typing import Optional
import simplejson as json
from django.db.models import QuerySet, Q, F
@@ -24,7 +24,7 @@ class AlertsPicker:
"""
def __init__(
- self, max_alerts: int, max_improvements: int, platforms_of_interest: Tuple[str, ...]
+ self, max_alerts: int, max_improvements: int, platforms_of_interest: tuple[str, ...]
):
"""
:param max_alerts: the maximum number of selected alerts
@@ -49,7 +49,7 @@ def __init__(
self.max_improvements = max_improvements
self.ordered_platforms_of_interest = platforms_of_interest
- def extract_important_alerts(self, alerts: Tuple[PerformanceAlert, ...]):
+ def extract_important_alerts(self, alerts: tuple[PerformanceAlert, ...]):
if any(not isinstance(alert, PerformanceAlert) for alert in alerts):
raise ValueError("Provided parameter does not contain only PerformanceAlert objects.")
relevant_alerts = self._extract_by_relevant_platforms(alerts)
@@ -57,7 +57,7 @@ def extract_important_alerts(self, alerts: Tuple[PerformanceAlert, ...]):
sorted_alerts = self._multi_criterion_sort(alerts_with_distinct_jobs)
return self._ensure_alerts_variety(sorted_alerts)
- def _ensure_alerts_variety(self, sorted_alerts: List[PerformanceAlert]):
+ def _ensure_alerts_variety(self, sorted_alerts: list[PerformanceAlert]):
"""
The alerts container must be sorted before being passed to this function.
The returned list must contain regressions and (if present) improvements.
@@ -81,12 +81,12 @@ def _ensure_alerts_variety(self, sorted_alerts: List[PerformanceAlert]):
: self.max_improvements if improvements_only else self.max_alerts
]
- def _ensure_distinct_jobs(self, alerts: List[PerformanceAlert]) -> List[PerformanceAlert]:
+ def _ensure_distinct_jobs(self, alerts: list[PerformanceAlert]) -> list[PerformanceAlert]:
def initial_culprit_job(alert):
return alert.initial_culprit_job
def parent_or_sibling_from(
- alert_group: List[PerformanceAlert],
+ alert_group: list[PerformanceAlert],
) -> Optional[PerformanceAlert]:
if len(alert_group) == 0:
return None
@@ -105,8 +105,8 @@ def parent_or_sibling_from(
return list(filter(None, alerts))
def _ensure_platform_variety(
- self, sorted_all_alerts: List[PerformanceAlert]
- ) -> List[PerformanceAlert]:
+ self, sorted_all_alerts: list[PerformanceAlert]
+ ) -> list[PerformanceAlert]:
"""
Note: Ensure that the sorted_all_alerts container has only
platforms of interest (example: 'windows10', 'windows7', 'linux', 'osx', 'android').
@@ -191,7 +191,7 @@ def __init__(self, max_data_points: int, time_interval: timedelta, logger=None):
self._time_interval = time_interval
self.log = logger or logging.getLogger(self.__class__.__name__)
- def __call__(self, alert: PerformanceAlert) -> List[dict]:
+ def __call__(self, alert: PerformanceAlert) -> list[dict]:
"""
Main method
"""
@@ -238,7 +238,7 @@ def _fetch_suspect_data_points(self, alert: PerformanceAlert) -> QuerySet:
)
return annotated_data_points
- def _one_data_point_per_push(self, annotated_data_points: QuerySet) -> List[dict]:
+ def _one_data_point_per_push(self, annotated_data_points: QuerySet) -> list[dict]:
seen_push_ids = set()
seen_add = seen_push_ids.add
return [
@@ -247,7 +247,7 @@ def _one_data_point_per_push(self, annotated_data_points: QuerySet) -> List[dict
if not (data_point["push_id"] in seen_push_ids or seen_add(data_point["push_id"]))
]
- def _find_push_id_index(self, push_id: int, flattened_data_points: List[dict]) -> int:
+ def _find_push_id_index(self, push_id: int, flattened_data_points: list[dict]) -> int:
for index, data_point in enumerate(flattened_data_points):
if data_point["push_id"] == push_id:
return index
@@ -261,7 +261,7 @@ def __compute_window_slices(self, center_index: int) -> slice:
return slice(left_margin, right_margin)
- def _glance_over_retrigger_range(self, data_points_to_retrigger: List[dict]):
+ def _glance_over_retrigger_range(self, data_points_to_retrigger: list[dict]):
retrigger_range = len(data_points_to_retrigger)
if retrigger_range < self._range_width:
self.log.warning(
@@ -286,12 +286,12 @@ def __init__(
self.log = logger or logging.getLogger(self.__class__.__name__)
def provide_updated_reports(
- self, since: datetime, frameworks: List[str], repositories: List[str]
- ) -> List[BackfillReport]:
+ self, since: datetime, frameworks: list[str], repositories: list[str]
+ ) -> list[BackfillReport]:
alert_summaries = self.__fetch_summaries_to_retrigger(since, frameworks, repositories)
return self.compile_reports_for(alert_summaries)
- def compile_reports_for(self, summaries_to_retrigger: QuerySet) -> List[BackfillReport]:
+ def compile_reports_for(self, summaries_to_retrigger: QuerySet) -> list[BackfillReport]:
reports = []
for summary in summaries_to_retrigger:
@@ -317,12 +317,12 @@ def compile_reports_for(self, summaries_to_retrigger: QuerySet) -> List[Backfill
def _pick_important_alerts(
self, from_summary: PerformanceAlertSummary
- ) -> List[PerformanceAlert]:
+ ) -> list[PerformanceAlert]:
return self.alerts_picker.extract_important_alerts(
from_summary.alerts.filter(status=PerformanceAlert.UNTRIAGED)
)
- def _provide_records(self, backfill_report: BackfillReport, alert_context_map: List[Tuple]):
+ def _provide_records(self, backfill_report: BackfillReport, alert_context_map: list[tuple]):
for alert, retrigger_context in alert_context_map:
BackfillRecord.objects.create(
alert=alert,
@@ -331,7 +331,7 @@ def _provide_records(self, backfill_report: BackfillReport, alert_context_map: L
)
def __fetch_summaries_to_retrigger(
- self, since: datetime, frameworks: List[str], repositories: List[str]
+ self, since: datetime, frameworks: list[str], repositories: list[str]
) -> QuerySet:
no_reports_yet = Q(last_updated__gte=since, backfill_report__isnull=True)
with_outdated_reports = Q(last_updated__gt=F("backfill_report__last_updated"))
@@ -348,7 +348,7 @@ def __fetch_summaries_to_retrigger(
.filter(filters)
)
- def _associate_retrigger_context(self, important_alerts: List[PerformanceAlert]) -> List[Tuple]:
+ def _associate_retrigger_context(self, important_alerts: list[PerformanceAlert]) -> list[tuple]:
retrigger_map = []
incomplete_mapping = False
diff --git a/treeherder/perf/auto_perf_sheriffing/secretary.py b/treeherder/perf/auto_perf_sheriffing/secretary.py
index 8c4558049b6..f8c5b5b493d 100644
--- a/treeherder/perf/auto_perf_sheriffing/secretary.py
+++ b/treeherder/perf/auto_perf_sheriffing/secretary.py
@@ -1,6 +1,5 @@
import logging
from datetime import datetime, timedelta
-from typing import List
import simplejson as json
from django.conf import settings as django_settings
@@ -22,7 +21,7 @@ class Secretary:
"""
def __init__(
- self, outcome_checker: OutcomeChecker = None, supported_platforms: List[str] = None
+ self, outcome_checker: OutcomeChecker = None, supported_platforms: list[str] = None
):
self.outcome_checker = outcome_checker or OutcomeChecker()
self.supported_platforms = supported_platforms or django_settings.SUPPORTED_PLATFORMS
diff --git a/treeherder/perf/auto_perf_sheriffing/sherlock.py b/treeherder/perf/auto_perf_sheriffing/sherlock.py
index dcaa5cb6efd..77e4e387f62 100644
--- a/treeherder/perf/auto_perf_sheriffing/sherlock.py
+++ b/treeherder/perf/auto_perf_sheriffing/sherlock.py
@@ -2,7 +2,6 @@
from datetime import datetime, timedelta
from json import JSONDecodeError
from logging import INFO, WARNING
-from typing import List, Tuple
from django.conf import settings
from django.db.models import QuerySet
@@ -35,7 +34,7 @@ def __init__(
backfill_tool: BackfillTool,
secretary: Secretary,
max_runtime: timedelta = None,
- supported_platforms: List[str] = None,
+ supported_platforms: list[str] = None,
):
self.report_maintainer = report_maintainer
self.backfill_tool = backfill_tool
@@ -45,7 +44,7 @@ def __init__(
self.supported_platforms = supported_platforms or settings.SUPPORTED_PLATFORMS
self._wake_up_time = datetime.now()
- def sheriff(self, since: datetime, frameworks: List[str], repositories: List[str]):
+ def sheriff(self, since: datetime, frameworks: list[str], repositories: list[str]):
logger.info("Sherlock: Validating settings...")
self.secretary.validate_settings()
@@ -76,15 +75,15 @@ def assert_can_run(self):
raise MaxRuntimeExceeded("Sherlock: Max runtime exceeded.")
def _report(
- self, since: datetime, frameworks: List[str], repositories: List[str]
- ) -> List[BackfillReport]:
+ self, since: datetime, frameworks: list[str], repositories: list[str]
+ ) -> list[BackfillReport]:
return self.report_maintainer.provide_updated_reports(since, frameworks, repositories)
- def _backfill(self, frameworks: List[str], repositories: List[str]):
+ def _backfill(self, frameworks: list[str], repositories: list[str]):
for platform in self.supported_platforms:
self.__backfill_on(platform, frameworks, repositories)
- def __backfill_on(self, platform: str, frameworks: List[str], repositories: List[str]):
+ def __backfill_on(self, platform: str, frameworks: list[str], repositories: list[str]):
left = self.secretary.backfills_left(on_platform=platform)
total_consumed = 0
@@ -110,7 +109,7 @@ def __backfill_on(self, platform: str, frameworks: List[str], repositories: List
@staticmethod
def __fetch_records_requiring_backfills_on(
- platform: str, frameworks: List[str], repositories: List[str]
+ platform: str, frameworks: list[str], repositories: list[str]
) -> QuerySet:
records_to_backfill = BackfillRecord.objects.select_related(
"alert",
@@ -126,7 +125,7 @@ def __fetch_records_requiring_backfills_on(
)
return records_to_backfill
- def _backfill_record(self, record: BackfillRecord, left: int) -> Tuple[int, int]:
+ def _backfill_record(self, record: BackfillRecord, left: int) -> tuple[int, int]:
consumed = 0
try:
@@ -160,7 +159,7 @@ def _backfill_record(self, record: BackfillRecord, left: int) -> Tuple[int, int]
@staticmethod
def _note_backfill_outcome(
record: BackfillRecord, to_backfill: int, actually_backfilled: int
- ) -> Tuple[bool, str]:
+ ) -> tuple[bool, str]:
success = False
record.total_actions_triggered = actually_backfilled
@@ -200,7 +199,7 @@ def _is_queue_overloaded(provisioner_id: str, worker_type: str, acceptable_limit
return pending_tasks_count > acceptable_limit
@staticmethod
- def __get_data_points_to_backfill(context: List[dict]) -> List[dict]:
+ def __get_data_points_to_backfill(context: list[dict]) -> list[dict]:
context_len = len(context)
start = None
diff --git a/treeherder/perf/email.py b/treeherder/perf/email.py
index f959e7a14d5..cd41f53bc3e 100644
--- a/treeherder/perf/email.py
+++ b/treeherder/perf/email.py
@@ -11,7 +11,7 @@
from abc import ABC, abstractmethod
import urllib.parse
-from typing import List, Union, Optional
+from typing import Union, Optional
from django.conf import settings
from treeherder.perf.models import (
@@ -40,7 +40,7 @@ class EmailWriter(ABC):
def __init__(self):
self._email = Email()
- def prepare_new_email(self, must_mention: Union[List[object], object]) -> dict:
+ def prepare_new_email(self, must_mention: Union[list[object], object]) -> dict:
"""
Template method
"""
@@ -64,12 +64,12 @@ def _write_subject(self):
pass # pragma: no cover
@abstractmethod
- def _write_content(self, must_mention: List[object]):
+ def _write_content(self, must_mention: list[object]):
pass # pragma: no cover
@staticmethod
- def __ensure_its_list(must_mention) -> List[object]:
- if not isinstance(must_mention, List):
+ def __ensure_its_list(must_mention) -> list[object]:
+ if not isinstance(must_mention, list):
must_mention = [must_mention]
return must_mention
@@ -90,7 +90,7 @@ class BackfillReportContent:
def __init__(self):
self._raw_content = None
- def include_records(self, records: List[BackfillRecord]):
+ def include_records(self, records: list[BackfillRecord]):
self._initialize_report_intro()
for record in records:
@@ -216,7 +216,7 @@ def _write_address(self):
def _write_subject(self):
self._email.subject = "Automatic Backfilling Report"
- def _write_content(self, must_mention: List[BackfillRecord]):
+ def _write_content(self, must_mention: list[BackfillRecord]):
content = BackfillReportContent()
content.include_records(must_mention)
@@ -238,7 +238,7 @@ class DeletionReportContent:
def __init__(self):
self._raw_content = None
- def include_signatures(self, signatures: List[PerformanceSignature]):
+ def include_signatures(self, signatures: list[PerformanceSignature]):
self._initialize_report_intro()
for signature in signatures:
@@ -287,7 +287,7 @@ def _write_address(self):
def _write_subject(self):
self._email.subject = "Summary of deleted Performance Signatures"
- def _write_content(self, must_mention: List[PerformanceSignature]):
+ def _write_content(self, must_mention: list[PerformanceSignature]):
content = DeletionReportContent()
content.include_signatures(must_mention)
diff --git a/treeherder/perf/management/commands/compute_criteria_formulas.py b/treeherder/perf/management/commands/compute_criteria_formulas.py
index aaf22ba7bb3..5abdce99437 100644
--- a/treeherder/perf/management/commands/compute_criteria_formulas.py
+++ b/treeherder/perf/management/commands/compute_criteria_formulas.py
@@ -1,7 +1,6 @@
import time
from datetime import timedelta
-from typing import List
from treeherder.config import settings
from treeherder.perf.sheriffing_criteria import (
@@ -15,7 +14,7 @@
from django.core.management.base import BaseCommand
-def pretty_enumerated(formulas: List[str]) -> str:
+def pretty_enumerated(formulas: list[str]) -> str:
comma = ", "
return " & ".join(comma.join(formulas).rsplit(comma, maxsplit=1))
diff --git a/treeherder/perf/management/commands/import_perf_data.py b/treeherder/perf/management/commands/import_perf_data.py
index 607c671f7ae..4b0a5e88f53 100644
--- a/treeherder/perf/management/commands/import_perf_data.py
+++ b/treeherder/perf/management/commands/import_perf_data.py
@@ -50,14 +50,14 @@ def progress_notifier(
tabs_no=0,
):
total_items = len(iterable)
- print("{0}Fetching {1} {2} item(s)...".format("\t" * tabs_no, total_items, item_name))
+ print("{}Fetching {} {} item(s)...".format("\t" * tabs_no, total_items, item_name))
prev_percentage = None
for idx, item in enumerate(iterable):
item_processor(item)
percentage = int((idx + 1) * 100 / total_items)
if percentage % 10 == 0 and percentage != prev_percentage:
- print("{0}Fetched {1}% of {2} item(s)".format("\t" * tabs_no, percentage, item_name))
+ print("{}Fetched {}% of {} item(s)".format("\t" * tabs_no, percentage, item_name))
prev_percentage = percentage
@@ -86,14 +86,14 @@ def fillup_target(self, **filters):
def show_progress(self, queryset, map, table_name):
total_rows = int(queryset.count())
- print("Fetching {0} {1}(s)...".format(total_rows, table_name))
+ print(f"Fetching {total_rows} {table_name}(s)...")
prev_percentage = None
for idx, obj in enumerate(list(queryset)):
map(obj)
percentage = int((idx + 1) * 100 / total_rows)
if percentage % 10 == 0 and percentage != prev_percentage:
- print("Fetched {0}% of alert summaries".format(percentage))
+ print(f"Fetched {percentage}% of alert summaries")
prev_percentage = percentage
@@ -112,19 +112,19 @@ class DecentSizedData(Data):
def delete_local_data(self):
for model in self.DECENT_SIZED_TABLES:
- print("Removing elements from {0} table... ".format(model._meta.db_table))
+ print(f"Removing elements from {model._meta.db_table} table... ")
model.objects.using(self.target).all().delete()
def save_local_data(self):
for model in self.DECENT_SIZED_TABLES:
- print("Fetching from {0} table...".format(model._meta.db_table))
+ print(f"Fetching from {model._meta.db_table} table...")
model.objects.using(self.target).bulk_create(model.objects.using(self.source).all())
def fillup_target(self, **filters):
print("Fetching all affordable data...\n")
# TODO: JSON dump the list
print(
- "From tables {0}".format(
+ "From tables {}".format(
", ".join([model._meta.db_table for model in self.DECENT_SIZED_TABLES])
)
)
@@ -224,7 +224,7 @@ def __init__(
def delete_local_data(self):
for model in self.BIG_SIZED_TABLES:
- print("Removing elements from {0} table... ".format(model._meta.db_table))
+ print(f"Removing elements from {model._meta.db_table} table... ")
model.objects.using(self.target).all().delete()
def save_local_data(self):
@@ -233,7 +233,7 @@ def save_local_data(self):
)
for table_name, properties in priority_dict.items():
- print("Saving {0} data...".format(table_name))
+ print(f"Saving {table_name} data...")
model_values = (
properties["model"]
.objects.using(self.source)
@@ -257,7 +257,7 @@ def fillup_target(self, **filters):
# fetch all alert summaries & alerts
# with only a subset of the datum & jobs
oldest_day = datetime.datetime.now() - self.time_window
- print("\nFetching data subset no older than {0}...".format(str(oldest_day)))
+ print(f"\nFetching data subset no older than {str(oldest_day)}...")
self.delete_local_data()
alert_summaries = list(self.query_set)
@@ -293,7 +293,7 @@ def fillup_target(self, **filters):
self.save_local_data()
def db_worker(self, process_no, alert_summaries):
- print("Process no {0} up and running...".format(process_no))
+ print(f"Process no {process_no} up and running...")
self.progress_notifier(self.bring_in_alert_summary, alert_summaries, "alert summary", 1)
def bring_in_alert_summary(self, alert_summary):
@@ -314,7 +314,7 @@ def bring_in_alert(self, alert):
if alert.id in self.models_instances["performance_alert"]:
return
- print("{0}Fetching alert #{1}...".format("\t" * 2, alert.id))
+ print("{}Fetching alert #{}...".format("\t" * 2, alert.id))
if alert.related_summary:
if alert.related_summary not in self.models_instances["performance_alert_summary"]:
# if the alert summary identified isn't registered yet
@@ -365,7 +365,7 @@ def bring_in_job(self, job):
if job.id in self.models_instances["job"]:
return
- occasional_log("{0}Fetching job #{1}".format("\t" * 4, job.id))
+ occasional_log("{}Fetching job #{}".format("\t" * 4, job.id))
self.update_list("reference_data_signature", job.signature)
self.update_list("build_platform", job.build_platform)
diff --git a/treeherder/perf/management/commands/perf_sheriff.py b/treeherder/perf/management/commands/perf_sheriff.py
index eff2eba1df9..2bb80788770 100644
--- a/treeherder/perf/management/commands/perf_sheriff.py
+++ b/treeherder/perf/management/commands/perf_sheriff.py
@@ -1,6 +1,5 @@
import logging
from datetime import datetime, timedelta
-from typing import List, Tuple
from django.core.management.base import BaseCommand
@@ -65,7 +64,7 @@ def handle(self, *args, **options):
logging.info("Sherlock: Going back to sleep.")
- def _parse_args(self, **options) -> Tuple[List, List, datetime, timedelta]:
+ def _parse_args(self, **options) -> tuple[list, list, datetime, timedelta]:
return (
options["frameworks"],
options["repositories"],
diff --git a/treeherder/perf/migrations/0001_squashed_0005_permit_github_links.py b/treeherder/perf/migrations/0001_squashed_0005_permit_github_links.py
index 2c335120074..ed21986b48e 100644
--- a/treeherder/perf/migrations/0001_squashed_0005_permit_github_links.py
+++ b/treeherder/perf/migrations/0001_squashed_0005_permit_github_links.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-08 13:19
import django.core.validators
import django.db.models.deletion
diff --git a/treeherder/perf/migrations/0006_add_alert_summary_notes.py b/treeherder/perf/migrations/0006_add_alert_summary_notes.py
index 96044127567..2066690b6c2 100644
--- a/treeherder/perf/migrations/0006_add_alert_summary_notes.py
+++ b/treeherder/perf/migrations/0006_add_alert_summary_notes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-08 14:53
from django.db import migrations, models
diff --git a/treeherder/perf/migrations/0007_star_performancealert.py b/treeherder/perf/migrations/0007_star_performancealert.py
index cb19f0e5b25..bcc725e2bd0 100644
--- a/treeherder/perf/migrations/0007_star_performancealert.py
+++ b/treeherder/perf/migrations/0007_star_performancealert.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-04-19 09:25
from django.db import migrations, models
diff --git a/treeherder/perf/migrations/0008_add_confirming_state.py b/treeherder/perf/migrations/0008_add_confirming_state.py
index f15b4de23e9..af529a4ac6c 100644
--- a/treeherder/perf/migrations/0008_add_confirming_state.py
+++ b/treeherder/perf/migrations/0008_add_confirming_state.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-14 11:40
from django.db import migrations, models
diff --git a/treeherder/perf/migrations/0009_non_nullable_issue_tracker.py b/treeherder/perf/migrations/0009_non_nullable_issue_tracker.py
index bf5aa84c5e3..f87344b66e8 100644
--- a/treeherder/perf/migrations/0009_non_nullable_issue_tracker.py
+++ b/treeherder/perf/migrations/0009_non_nullable_issue_tracker.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-05-23 08:07
from django.db import migrations, models
import django.db.models.deletion
diff --git a/treeherder/perf/migrations/0010_fix_signature_uniqueness.py b/treeherder/perf/migrations/0010_fix_signature_uniqueness.py
index 135906db1bf..1f08d9810fc 100644
--- a/treeherder/perf/migrations/0010_fix_signature_uniqueness.py
+++ b/treeherder/perf/migrations/0010_fix_signature_uniqueness.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-28 11:41
from django.db import migrations
diff --git a/treeherder/perf/migrations/0011_inc_extra_options_length.py b/treeherder/perf/migrations/0011_inc_extra_options_length.py
index 34b39843d0d..5549922fd78 100644
--- a/treeherder/perf/migrations/0011_inc_extra_options_length.py
+++ b/treeherder/perf/migrations/0011_inc_extra_options_length.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-06 08:20
from django.db import migrations, models
diff --git a/treeherder/perf/migrations/0012_rename_summary_last_updated.py b/treeherder/perf/migrations/0012_rename_summary_last_updated.py
index 369c7b79b61..ca3ff0302fc 100644
--- a/treeherder/perf/migrations/0012_rename_summary_last_updated.py
+++ b/treeherder/perf/migrations/0012_rename_summary_last_updated.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-20 15:02
from django.db import migrations
diff --git a/treeherder/perf/models.py b/treeherder/perf/models.py
index 16a2176ce10..e146b2e0088 100644
--- a/treeherder/perf/models.py
+++ b/treeherder/perf/models.py
@@ -1,7 +1,7 @@
import logging
from datetime import datetime
import json
-from typing import List, Tuple, Optional
+from typing import Optional
from functools import reduce
from django.contrib.auth.models import User
@@ -35,7 +35,7 @@ class Meta:
db_table = "performance_framework"
@classmethod
- def fetch_all_names(cls) -> List[str]:
+ def fetch_all_names(cls) -> list[str]:
return cls.objects.values_list("name", flat=True)
def __str__(self):
@@ -183,11 +183,11 @@ class Meta:
def __str__(self):
name = self.suite
if self.test:
- name += " {}".format(self.test)
+ name += f" {self.test}"
else:
name += " summary"
- return "{} {} {} {}".format(self.signature_hash, name, self.platform, self.last_updated)
+ return f"{self.signature_hash} {name} {self.platform} {self.last_updated}"
class PerformanceDatum(models.Model):
@@ -224,7 +224,7 @@ def save(self, *args, **kwargs):
self.signature.save()
def __str__(self):
- return "{} {}".format(self.value, self.push_timestamp)
+ return f"{self.value} {self.push_timestamp}"
class PerformanceDatumReplicate(models.Model):
@@ -254,7 +254,7 @@ class Meta:
db_table = "issue_tracker"
def __str__(self):
- return "{} (tasks via {})".format(self.name, self.task_base_url)
+ return f"{self.name} (tasks via {self.task_base_url})"
class PerformanceAlertSummary(models.Model):
@@ -317,7 +317,7 @@ class PerformanceAlertSummary(models.Model):
issue_tracker = models.ForeignKey(IssueTracker, on_delete=models.PROTECT, default=1) # Bugzilla
def __init__(self, *args, **kwargs):
- super(PerformanceAlertSummary, self).__init__(*args, **kwargs)
+ super().__init__(*args, **kwargs)
# allows updating timestamps only on new values
self.__prev_bug_number = self.bug_number
@@ -333,7 +333,7 @@ def save(self, *args, **kwargs):
self.triage_due_date = triage_due
if self.bug_due_date != bug_due:
self.bug_due_date = bug_due
- super(PerformanceAlertSummary, self).save(*args, **kwargs)
+ super().save(*args, **kwargs)
self.__prev_bug_number = self.bug_number
def update_status(self, using=None):
@@ -418,9 +418,7 @@ class Meta:
unique_together = ("repository", "framework", "prev_push", "push")
def __str__(self):
- return "{} {} {}-{}".format(
- self.framework, self.repository, self.prev_push.revision, self.push.revision
- )
+ return f"{self.framework} {self.repository} {self.prev_push.revision}-{self.push.revision}"
class PerformanceAlert(models.Model):
@@ -582,7 +580,7 @@ class Meta:
unique_together = ("summary", "series_signature")
def __str__(self):
- return "{} {} {}%".format(self.summary, self.series_signature, self.amount_pct)
+ return f"{self.summary} {self.series_signature} {self.amount_pct}%"
class PerformanceTag(models.Model):
@@ -615,7 +613,7 @@ class Meta:
db_table = "performance_bug_template"
def __str__(self):
- return "{} bug template".format(self.framework.name)
+ return f"{self.framework.name} bug template"
# TODO: we actually need this name for the Sherlock' s hourly report
@@ -649,9 +647,7 @@ class Meta:
db_table = "backfill_report"
def __str__(self):
- return "BackfillReport(summary #{}, last update {})".format(
- self.summary.id, self.last_updated
- )
+ return f"BackfillReport(summary #{self.summary.id}, last update {self.last_updated})"
class BackfillRecord(models.Model):
@@ -750,7 +746,7 @@ def __remember_job_properties(self, job: Job):
self.job_platform_option = job.get_platform_option()
self.save()
- def get_context_border_info(self, context_property: str) -> Tuple[str, str]:
+ def get_context_border_info(self, context_property: str) -> tuple[str, str]:
"""
Provides border(first and last) information from context based on the property
"""
@@ -760,7 +756,7 @@ def get_context_border_info(self, context_property: str) -> Tuple[str, str]:
return from_info, to_info
- def get_pushes_in_context_range(self) -> List[Push]:
+ def get_pushes_in_context_range(self) -> list[Push]:
from_time, to_time = self.get_context_border_info("push_timestamp")
return Push.objects.filter(
@@ -779,10 +775,10 @@ def get_job_search_str(self) -> str:
return ",".join(search_terms)
- def get_context(self) -> List[dict]:
+ def get_context(self) -> list[dict]:
return json.loads(self.context)
- def set_context(self, value: List[dict]):
+ def set_context(self, value: list[dict]):
self.context = json.dumps(value, default=str)
def set_log_details(self, value: dict):
@@ -801,7 +797,7 @@ class Meta:
db_table = "backfill_record"
def __str__(self):
- return "BackfillRecord(alert #{}, from {})".format(self.alert.id, self.report)
+ return f"BackfillRecord(alert #{self.alert.id}, from {self.report})"
class BackfillNotificationRecord(models.Model):
diff --git a/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py b/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py
index bb3f4cca95b..0c529c6e88f 100644
--- a/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py
+++ b/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py
@@ -1,7 +1,6 @@
from abc import ABC, abstractmethod
from copy import deepcopy
from datetime import timedelta, datetime
-from typing import Tuple, List
import requests
from django.conf import settings
@@ -32,7 +31,7 @@ def __init__(self, referer=None):
# IP when making many queries with this
self.headers = {
"Referer": f"{referer}",
- "User-Agent": "treeherder/{}".format(settings.SITE_HOSTNAME),
+ "User-Agent": f"treeherder/{settings.SITE_HOSTNAME}",
"Accept": "application/json",
}
@@ -91,7 +90,7 @@ def __call__(self, framework: str, suite: str, test: str = None) -> float:
return result
- def breakdown(self) -> Tuple[list, list]:
+ def breakdown(self) -> tuple[list, list]:
breakdown_items = (self._denominator_bugs, self._numerator_bugs)
if None in breakdown_items:
raise RuntimeError("Cannot breakdown results without running calculus first")
@@ -107,11 +106,11 @@ def has_cooled_down(self, bug: dict) -> bool:
return creation_time <= datetime.now() - self._bug_cooldown
@abstractmethod
- def _filter_numerator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
+ def _filter_numerator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
pass
@abstractmethod
- def _filter_denominator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
+ def _filter_denominator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
pass
def _create_default_session(self) -> NonBlockableSession:
@@ -120,12 +119,12 @@ def _create_default_session(self) -> NonBlockableSession:
"""
return NonBlockableSession()
- def __fetch_cooled_down_bugs(self, framework: str, suite: str, test: str = None) -> List[dict]:
+ def __fetch_cooled_down_bugs(self, framework: str, suite: str, test: str = None) -> list[dict]:
quantified_bugs = self.__fetch_quantified_bugs(framework, suite, test)
cooled_bugs = self.__filter_cooled_down_bugs(quantified_bugs)
return cooled_bugs
- def __fetch_quantified_bugs(self, framework: str, suite: str, test: str = None) -> List[dict]:
+ def __fetch_quantified_bugs(self, framework: str, suite: str, test: str = None) -> list[dict]:
test_moniker = " ".join(filter(None, (suite, test)))
test_id_fragments = filter(None, [framework, test_moniker])
creation_time = datetime.strftime(self.oldest_timestamp, BZ_DATETIME_FORMAT)
@@ -153,7 +152,7 @@ def __fetch_quantified_bugs(self, framework: str, suite: str, test: str = None)
else:
return bugs_resp.json()["bugs"]
- def __filter_cooled_down_bugs(self, bugs: List[dict]) -> List[dict]:
+ def __filter_cooled_down_bugs(self, bugs: list[dict]) -> list[dict]:
return [bug for bug in bugs if self.has_cooled_down(bug)]
def __reset_breakdown(self):
@@ -165,7 +164,7 @@ def __get_datetime(self, datetime_: str) -> datetime:
class EngineerTractionFormula(BugzillaFormula):
- def _filter_numerator_bugs(self, cooled_bugs: List[dict]) -> List[dict]:
+ def _filter_numerator_bugs(self, cooled_bugs: list[dict]) -> list[dict]:
tracted_bugs = []
for bug in cooled_bugs:
bug_history = self._fetch_history(bug["id"])
@@ -177,7 +176,7 @@ def _filter_numerator_bugs(self, cooled_bugs: List[dict]) -> List[dict]:
return tracted_bugs
- def _filter_denominator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
+ def _filter_denominator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
return all_filed_bugs
def _fetch_history(self, bug_id: int) -> list:
@@ -193,7 +192,7 @@ def _fetch_history(self, bug_id: int) -> list:
body = history_resp.json()
return body["bugs"][0]["history"]
- def _notice_any_status_change_in(self, bug_history: List[dict], up_to: datetime) -> bool:
+ def _notice_any_status_change_in(self, bug_history: list[dict], up_to: datetime) -> bool:
def during_interval(change: dict) -> bool:
when = datetime.strptime(change["when"], BZ_DATETIME_FORMAT)
return when <= up_to
@@ -213,7 +212,7 @@ def _create_default_session(self) -> NonBlockableSession:
class FixRatioFormula(BugzillaFormula):
- def _filter_numerator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
+ def _filter_numerator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
# select only RESOLVED - FIXED bugs
return [
bug
@@ -221,7 +220,7 @@ def _filter_numerator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
if bug.get("status") == "RESOLVED" and bug.get("resolution") == "FIXED"
]
- def _filter_denominator_bugs(self, all_filed_bugs: List[dict]) -> List[dict]:
+ def _filter_denominator_bugs(self, all_filed_bugs: list[dict]) -> list[dict]:
# select RESOLVED bugs, no matter what resolution they have
return [bug for bug in all_filed_bugs if bug.get("status") == "RESOLVED"]
diff --git a/treeherder/perf/sheriffing_criteria/criteria_tracking.py b/treeherder/perf/sheriffing_criteria/criteria_tracking.py
index ddd449f4ab6..73019967a52 100644
--- a/treeherder/perf/sheriffing_criteria/criteria_tracking.py
+++ b/treeherder/perf/sheriffing_criteria/criteria_tracking.py
@@ -4,7 +4,7 @@
from multiprocessing import cpu_count
from multiprocessing.pool import Pool, ThreadPool, AsyncResult
import time
-from typing import Tuple, Dict, Union, List
+from typing import Union
from datetime import datetime, timedelta
@@ -49,7 +49,7 @@ def __post_init__(self):
class RecordComputer:
def __init__(
self,
- formula_map: Dict[str, BugzillaFormula],
+ formula_map: dict[str, BugzillaFormula],
time_until_expires: timedelta,
webservice_rest_time: timedelta,
logger=None,
@@ -162,7 +162,7 @@ def __init__(self, check_interval, timeout_after: timedelta, logger=None):
self.__last_change = 0
self.__since_last_change = timedelta(seconds=0)
- def wait_for_results(self, results: List[AsyncResult]):
+ def wait_for_results(self, results: list[AsyncResult]):
self.__reset_change_track()
while True:
@@ -180,7 +180,7 @@ def wait_for_results(self, results: List[AsyncResult]):
f"Haven't computed updates for all records yet (only {len(ready)} out of {len(results)}). Still waiting..."
)
- def __updates_stagnated(self, results: List[AsyncResult], last_check_on: float) -> bool:
+ def __updates_stagnated(self, results: list[AsyncResult], last_check_on: float) -> bool:
ready_amount = len([r for r in results if r.ready()])
total_results = len(results)
new_change = total_results - ready_amount
@@ -213,7 +213,7 @@ class CriteriaTracker:
def __init__(
self,
- formula_map: Dict[str, BugzillaFormula] = None,
+ formula_map: dict[str, BugzillaFormula] = None,
record_path: str = None,
webservice_rest_time: timedelta = None,
multiprocessed: bool = False,
@@ -236,7 +236,7 @@ def __init__(
if not callable(formula):
raise TypeError("Must provide callable as sheriffing criteria formula")
- def get_test_moniker(self, record: CriteriaRecord) -> Tuple[str, str, str]:
+ def get_test_moniker(self, record: CriteriaRecord) -> tuple[str, str, str]:
return record.Framework, record.Suite, record.Test
def __iter__(self):
@@ -247,7 +247,7 @@ def load_records(self):
self.log.info(f"Loading records from {self._record_path}...")
self._records_map = {} # reset them
- with open(self._record_path, "r") as csv_file:
+ with open(self._record_path) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
test_moniker = row.get("Framework"), row.get("Suite"), row.get("Test")
@@ -283,7 +283,7 @@ def compute_record_update(self, record: CriteriaRecord) -> CriteriaRecord:
record = self._computer.apply_formulas(record)
return record
- def create_formula_map(self) -> Dict[str, BugzillaFormula]:
+ def create_formula_map(self) -> dict[str, BugzillaFormula]:
return {
self.ENGINEER_TRACTION: EngineerTractionFormula(),
self.FIX_RATIO: FixRatioFormula(),
diff --git a/treeherder/perfalert/perfalert/__init__.py b/treeherder/perfalert/perfalert/__init__.py
index a6d1dbe75f8..22f2fe7d3ec 100644
--- a/treeherder/perfalert/perfalert/__init__.py
+++ b/treeherder/perfalert/perfalert/__init__.py
@@ -107,7 +107,7 @@ def __lt__(self, o):
def __repr__(self):
values_str = "[ %s ]" % ", ".join(["%.3f" % value for value in self.values])
- return "<%s: %s, %s, %.3f, %s>" % (
+ return "<{}: {}, {}, {:.3f}, {}>".format(
self.push_timestamp,
self.push_id,
values_str,
diff --git a/treeherder/push_health/tests.py b/treeherder/push_health/tests.py
index 6ad411e9893..d7b11a4a597 100644
--- a/treeherder/push_health/tests.py
+++ b/treeherder/push_health/tests.py
@@ -103,12 +103,10 @@ def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
job_symbol = job.job_type.symbol
job_group = job.job_group.name
job_group_symbol = job.job_group.symbol
- job.job_key = "{}{}{}{}".format(config, platform, job_name, job_group)
+ job.job_key = f"{config}{platform}{job_name}{job_group}"
all_failed_jobs[job.id] = job
# The 't' ensures the key starts with a character, as required for a query selector
- test_key = re.sub(
- r"\W+", "", "t{}{}{}{}{}".format(test_name, config, platform, job_name, job_group)
- )
+ test_key = re.sub(r"\W+", "", f"t{test_name}{config}{platform}{job_name}{job_group}")
isClassifiedIntermittent = any(
job["failure_classification_id"] == 4 for job in jobs[job_name]
)
@@ -215,7 +213,7 @@ def get_test_failures(
jobs,
result_status=set(),
):
- logger.debug("Getting test failures for push: {}".format(push.id))
+ logger.debug(f"Getting test failures for push: {push.id}")
# query for jobs for the last two weeks excluding today
# find tests that have failed in the last 14 days
# this is very cache-able for reuse on other pushes.
diff --git a/treeherder/push_health/usage.py b/treeherder/push_health/usage.py
index 8fe14445b7b..c1167f82237 100644
--- a/treeherder/push_health/usage.py
+++ b/treeherder/push_health/usage.py
@@ -37,7 +37,7 @@ def get_usage():
nrql = "SELECT%20max(needInvestigation)%20FROM%20push_health_need_investigation%20FACET%20revision%20SINCE%201%20DAY%20AGO%20TIMESERIES%20where%20repo%3D'{}'%20AND%20appName%3D'{}'".format(
"try", "treeherder-prod"
)
- new_relic_url = "{}?nrql={}".format(settings.NEW_RELIC_INSIGHTS_API_URL, nrql)
+ new_relic_url = f"{settings.NEW_RELIC_INSIGHTS_API_URL}?nrql={nrql}"
headers = {
"Accept": "application/json",
"Content-Type": "application/json",
diff --git a/treeherder/push_health/utils.py b/treeherder/push_health/utils.py
index 132af648867..0eac39f1964 100644
--- a/treeherder/push_health/utils.py
+++ b/treeherder/push_health/utils.py
@@ -31,7 +31,7 @@ def clean_test(test, signature, message):
elif clean_name.startswith("http://10.0"):
left = "/tests/".join(left.split("/tests/")[1:])
right = "/tests/".join(right.split("/tests/")[1:])
- clean_name = "%s%s%s" % (left, splitter, right)
+ clean_name = f"{left}{splitter}{right}"
if "test_end for" in clean_name:
clean_name = clean_name.split()[2]
diff --git a/treeherder/services/pulse/consumers.py b/treeherder/services/pulse/consumers.py
index 8d176d0bce5..ff3b2a02d1b 100644
--- a/treeherder/services/pulse/consumers.py
+++ b/treeherder/services/pulse/consumers.py
@@ -59,7 +59,7 @@ def __init__(self, source, build_routing_key):
self.connection = Connection(source["pulse_url"], virtual_host=source.get("vhost", "/"))
self.consumers = []
self.queue = None
- self.queue_name = "queue/{}/{}".format(self.connection.userid, self.queue_suffix)
+ self.queue_name = f"queue/{self.connection.userid}/{self.queue_suffix}"
self.root_url = source["root_url"]
self.source = source
self.build_routing_key = build_routing_key
@@ -110,7 +110,7 @@ def bind_to(self, exchange, routing_key):
# get the binding key for this consumer
binding = self.get_binding_str(exchange.name, routing_key)
- logger.info("Pulse queue {} bound to: {}".format(self.queue_name, binding))
+ logger.info(f"Pulse queue {self.queue_name} bound to: {binding}")
return binding
@@ -146,11 +146,11 @@ def prune_bindings(self, new_bindings):
def get_binding_str(self, exchange, routing_key):
"""Use consistent string format for binding comparisons"""
- return "{} {}".format(exchange, routing_key)
+ return f"{exchange} {routing_key}"
def get_bindings(self, queue_name):
"""Get list of bindings from the pulse API"""
- return fetch_json("{}queue/{}/bindings".format(PULSE_GUARDIAN_URL, queue_name))
+ return fetch_json(f"{PULSE_GUARDIAN_URL}queue/{queue_name}/bindings")
class TaskConsumer(PulseConsumer):
@@ -227,7 +227,7 @@ class JointConsumer(PulseConsumer):
thread, so we use multiple threads, one per consumer.
"""
- queue_suffix = env("PULSE_QUEUE_NAME", default="queue_{}".format(socket.gethostname()))
+ queue_suffix = env("PULSE_QUEUE_NAME", default=f"queue_{socket.gethostname()}")
def bindings(self):
rv = []
diff --git a/treeherder/services/taskcluster.py b/treeherder/services/taskcluster.py
index 98e3865ff24..c24736b1ee4 100644
--- a/treeherder/services/taskcluster.py
+++ b/treeherder/services/taskcluster.py
@@ -1,7 +1,6 @@
import logging
import uuid
from abc import ABC, abstractmethod
-from typing import List, Tuple
import requests
import jsone
@@ -169,7 +168,7 @@ def _get_action(action_array: list, action_name: str) -> str:
)
@classmethod
- def _task_in_context(cls, context: List[dict], task_tags: dict) -> bool:
+ def _task_in_context(cls, context: list[dict], task_tags: dict) -> bool:
"""
A task (as defined by its tags) is said to match a tag-set if its
tags are a super-set of the tag-set. A tag-set is a set of key-value pairs.
@@ -254,7 +253,7 @@ def notify_client_factory(
return NotifyNullObject()
-def autofind_unprovided(access_token, client_id) -> Tuple[str, str]:
+def autofind_unprovided(access_token, client_id) -> tuple[str, str]:
client_id = client_id or settings.NOTIFY_CLIENT_ID
access_token = access_token or settings.NOTIFY_ACCESS_TOKEN
return client_id, access_token
diff --git a/treeherder/utils/github.py b/treeherder/utils/github.py
index e57a839957a..8207eee0d24 100644
--- a/treeherder/utils/github.py
+++ b/treeherder/utils/github.py
@@ -4,31 +4,31 @@
def fetch_api(path, params=None):
if GITHUB_TOKEN:
- headers = {"Authorization": "token {}".format(GITHUB_TOKEN)}
+ headers = {"Authorization": f"token {GITHUB_TOKEN}"}
else:
headers = {}
- return fetch_json("https://api.github.com/{}".format(path), params, headers)
+ return fetch_json(f"https://api.github.com/{path}", params, headers)
def get_releases(owner, repo, params=None):
- return fetch_api("repos/{}/{}/releases".format(owner, repo), params)
+ return fetch_api(f"repos/{owner}/{repo}/releases", params)
def get_repo(owner, repo, params=None):
- return fetch_api("repos/{}/{}".format(owner, repo), params)
+ return fetch_api(f"repos/{owner}/{repo}", params)
def compare_shas(owner, repo, base, head):
- return fetch_api("repos/{}/{}/compare/{}...{}".format(owner, repo, base, head))
+ return fetch_api(f"repos/{owner}/{repo}/compare/{base}...{head}")
def get_all_commits(owner, repo, params=None):
- return fetch_api("repos/{}/{}/commits".format(owner, repo), params)
+ return fetch_api(f"repos/{owner}/{repo}/commits", params)
def get_commit(owner, repo, sha, params=None):
- return fetch_api("repos/{}/{}/commits/{}".format(owner, repo, sha), params)
+ return fetch_api(f"repos/{owner}/{repo}/commits/{sha}", params)
def get_pull_request(owner, repo, sha, params=None):
- return fetch_api("repos/{}/{}/pulls/{}/commits".format(owner, repo, sha), params)
+ return fetch_api(f"repos/{owner}/{repo}/pulls/{sha}/commits", params)
diff --git a/treeherder/utils/http.py b/treeherder/utils/http.py
index 455bb59daec..f7326451694 100644
--- a/treeherder/utils/http.py
+++ b/treeherder/utils/http.py
@@ -6,7 +6,7 @@
def make_request(url, method="GET", headers=None, timeout=30, **kwargs):
"""A wrapper around requests to set defaults & call raise_for_status()."""
headers = headers or {}
- headers["User-Agent"] = "treeherder/{}".format(settings.SITE_HOSTNAME)
+ headers["User-Agent"] = f"treeherder/{settings.SITE_HOSTNAME}"
response = requests.request(method, url, headers=headers, timeout=timeout, **kwargs)
if response.history:
params = {
diff --git a/treeherder/utils/taskcluster.py b/treeherder/utils/taskcluster.py
index 4f15423170a..97ad7695c9d 100644
--- a/treeherder/utils/taskcluster.py
+++ b/treeherder/utils/taskcluster.py
@@ -5,7 +5,7 @@
def get_task_definition(root_url, task_id):
- task_url = taskcluster_urls.api(root_url, "queue", "v1", "task/{}".format(task_id))
+ task_url = taskcluster_urls.api(root_url, "queue", "v1", f"task/{task_id}")
return fetch_json(task_url)
@@ -16,9 +16,7 @@ def download_artifact(root_url, task_id, path):
Returns either the parsed json, the parsed yaml or the plain response.
"""
- artifact_url = taskcluster_urls.api(
- root_url, "queue", "v1", "task/{}/artifacts/{}".format(task_id, path)
- )
+ artifact_url = taskcluster_urls.api(root_url, "queue", "v1", f"task/{task_id}/artifacts/{path}")
if path.endswith(".json"):
return fetch_json(artifact_url)
diff --git a/treeherder/webapp/api/bugzilla.py b/treeherder/webapp/api/bugzilla.py
index 62fe545b4cd..6ce39aad259 100644
--- a/treeherder/webapp/api/bugzilla.py
+++ b/treeherder/webapp/api/bugzilla.py
@@ -1,5 +1,3 @@
-# coding: utf-8
-
import requests
from django.conf import settings
from rest_framework import viewsets
diff --git a/treeherder/webapp/api/infra_serializers.py b/treeherder/webapp/api/infra_serializers.py
index f2fc9cf5cc2..af80d785020 100644
--- a/treeherder/webapp/api/infra_serializers.py
+++ b/treeherder/webapp/api/infra_serializers.py
@@ -37,6 +37,6 @@ def validate_repository(self, project):
Repository.objects.get(name=project)
except ObjectDoesNotExist:
- raise serializers.ValidationError("{} does not exist.".format(project))
+ raise serializers.ValidationError(f"{project} does not exist.")
return project
diff --git a/treeherder/webapp/api/investigated_test.py b/treeherder/webapp/api/investigated_test.py
index 1857810681a..580750428e3 100644
--- a/treeherder/webapp/api/investigated_test.py
+++ b/treeherder/webapp/api/investigated_test.py
@@ -26,14 +26,10 @@ def get_queryset(self):
return queryset
except Push.DoesNotExist:
- return Response(
- "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
- )
+ return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
except InvestigatedTests.DoesNotExist:
- return Response(
- "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
- )
+ return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
def create(self, request, *args, **kwargs):
project = kwargs["project"]
@@ -52,19 +48,13 @@ def create(self, request, *args, **kwargs):
return Response(serializer.data, status=status.HTTP_201_CREATED)
except IntegrityError:
- return Response(
- "{0} already marked investigated".format(test), status=HTTP_400_BAD_REQUEST
- )
+ return Response(f"{test} already marked investigated", status=HTTP_400_BAD_REQUEST)
except Push.DoesNotExist:
- return Response(
- "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
- )
+ return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
except JobType.DoesNotExist:
- return Response(
- "No JobType with job name: {0}".format(jobName), status=HTTP_404_NOT_FOUND
- )
+ return Response(f"No JobType with job name: {jobName}", status=HTTP_404_NOT_FOUND)
def destroy(self, request, project, pk=None):
try:
diff --git a/treeherder/webapp/api/jobs.py b/treeherder/webapp/api/jobs.py
index f17da7d86ca..6eac51608ab 100644
--- a/treeherder/webapp/api/jobs.py
+++ b/treeherder/webapp/api/jobs.py
@@ -279,7 +279,7 @@ def retrieve(self, request, project, pk=None):
repository__name=project, id=pk
)
except Job.DoesNotExist:
- return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
+ return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
resp = serializers.JobProjectSerializer(job, read_only=True).data
@@ -333,7 +333,7 @@ def list(self, request, project):
parser.parse(param_value)
except ValueError:
return Response(
- "Invalid date value for `last_modified`: {}".format(param_value),
+ f"Invalid date value for `last_modified`: {param_value}",
status=HTTP_400_BAD_REQUEST,
)
filter_params[param_key] = param_value
@@ -349,14 +349,14 @@ def list(self, request, project):
return_type = filter_params.get("return_type", "dict").lower()
if count > MAX_JOBS_COUNT:
- msg = "Specified count exceeds API MAX_JOBS_COUNT value: {}".format(MAX_JOBS_COUNT)
+ msg = f"Specified count exceeds API MAX_JOBS_COUNT value: {MAX_JOBS_COUNT}"
return Response({"detail": msg}, status=HTTP_400_BAD_REQUEST)
try:
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
return Response(
- {"detail": "No project with name {}".format(project)}, status=HTTP_404_NOT_FOUND
+ {"detail": f"No project with name {project}"}, status=HTTP_404_NOT_FOUND
)
jobs = JobFilter(
{k: v for (k, v) in filter_params.items()},
@@ -379,7 +379,7 @@ def text_log_steps(self, request, project, pk=None):
try:
job = Job.objects.get(repository__name=project, id=pk)
except ObjectDoesNotExist:
- return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
+ return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
textlog_steps = (
TextLogStep.objects.filter(job=job)
@@ -398,7 +398,7 @@ def text_log_errors(self, request, project, pk=None):
try:
job = Job.objects.get(repository__name=project, id=pk)
except Job.DoesNotExist:
- return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
+ return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
textlog_errors = (
TextLogError.objects.filter(job=job)
.select_related("_metadata", "_metadata__failure_line")
@@ -417,7 +417,7 @@ def bug_suggestions(self, request, project, pk=None):
try:
job = Job.objects.get(repository__name=project, id=pk)
except ObjectDoesNotExist:
- return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
+ return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
return Response(get_error_summary(job))
@@ -430,13 +430,13 @@ def similar_jobs(self, request, project, pk=None):
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
return Response(
- {"detail": "No project with name {}".format(project)}, status=HTTP_404_NOT_FOUND
+ {"detail": f"No project with name {project}"}, status=HTTP_404_NOT_FOUND
)
try:
job = Job.objects.get(repository=repository, id=pk)
except ObjectDoesNotExist:
- return Response("No job with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
+ return Response(f"No job with id: {pk}", status=HTTP_404_NOT_FOUND)
filter_params = request.query_params.copy()
diff --git a/treeherder/webapp/api/note.py b/treeherder/webapp/api/note.py
index 94c6d40a04e..2ba421fb258 100644
--- a/treeherder/webapp/api/note.py
+++ b/treeherder/webapp/api/note.py
@@ -34,7 +34,7 @@ def retrieve(self, request, project, pk=None):
serializer = JobNoteSerializer(JobNote.objects.get(id=pk))
return Response(serializer.data)
except JobNote.DoesNotExist:
- return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
+ return Response(f"No note with id: {pk}", status=HTTP_404_NOT_FOUND)
def list(self, request, project):
"""
@@ -116,7 +116,7 @@ def create(self, request, project):
exc_info=True,
)
- return Response({"message": "note stored for job {0}".format(request.data["job_id"])})
+ return Response({"message": "note stored for job {}".format(request.data["job_id"])})
def destroy(self, request, project, pk=None):
"""
@@ -127,4 +127,4 @@ def destroy(self, request, project, pk=None):
note.delete()
return Response({"message": "Note deleted"})
except JobNote.DoesNotExist:
- return Response("No note with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
+ return Response(f"No note with id: {pk}", status=HTTP_404_NOT_FOUND)
diff --git a/treeherder/webapp/api/perfcompare_utils.py b/treeherder/webapp/api/perfcompare_utils.py
index 89d3d3ad80f..697faf867c4 100644
--- a/treeherder/webapp/api/perfcompare_utils.py
+++ b/treeherder/webapp/api/perfcompare_utils.py
@@ -30,16 +30,16 @@
def get_test_suite(suite, test):
- return suite if test == "" or test == suite else "{} {}".format(suite, test)
+ return suite if test == "" or test == suite else f"{suite} {test}"
def get_header_name(extra_options, option_name, test_suite):
- name = "{} {} {}".format(test_suite, option_name, extra_options)
+ name = f"{test_suite} {option_name} {extra_options}"
return name
def get_sig_identifier(header, platform):
- return "{} {}".format(header, platform)
+ return f"{header} {platform}"
def get_option_collection_map():
diff --git a/treeherder/webapp/api/performance_data.py b/treeherder/webapp/api/performance_data.py
index 58a646bd2ed..df3aa06af26 100644
--- a/treeherder/webapp/api/performance_data.py
+++ b/treeherder/webapp/api/performance_data.py
@@ -1,7 +1,6 @@
import datetime
import time
from collections import defaultdict
-from typing import List
from urllib.parse import urlencode
import django_filters
@@ -819,7 +818,7 @@ def list(self, request):
return Response(data=serialized_data)
@staticmethod
- def _filter_out_retriggers(serialized_data: List[dict]) -> List[dict]:
+ def _filter_out_retriggers(serialized_data):
"""
Removes data points resulted from retriggers
"""
@@ -889,7 +888,7 @@ def list(self, request):
new_push = models.Push.objects.get(revision=new_rev, repository__name=new_repo_name)
except models.Push.DoesNotExist:
return Response(
- "No new push with revision {} from repo {}.".format(new_rev, new_repo_name),
+ f"No new push with revision {new_rev} from repo {new_repo_name}.",
status=HTTP_400_BAD_REQUEST,
)
@@ -910,7 +909,7 @@ def list(self, request):
end_day = new_push.time
except models.Push.DoesNotExist:
return Response(
- "No base push with revision {} from repo {}.".format(base_rev, base_repo_name),
+ f"No base push with revision {base_rev} from repo {base_repo_name}.",
status=HTTP_400_BAD_REQUEST,
)
@@ -1179,7 +1178,7 @@ def _get_filtered_signatures_by_interval(signatures, interval):
)
@staticmethod
- def _get_signatures_values(signatures: List[PerformanceSignature]):
+ def _get_signatures_values(signatures):
return signatures.values(
"framework_id",
"id",
diff --git a/treeherder/webapp/api/performance_serializers.py b/treeherder/webapp/api/performance_serializers.py
index cbf422c9ed3..5e56e1601eb 100644
--- a/treeherder/webapp/api/performance_serializers.py
+++ b/treeherder/webapp/api/performance_serializers.py
@@ -387,7 +387,7 @@ def validate_repository(self, repository):
Repository.objects.get(name=repository)
except ObjectDoesNotExist:
- raise serializers.ValidationError("{} does not exist.".format(repository))
+ raise serializers.ValidationError(f"{repository} does not exist.")
return repository
@@ -445,7 +445,7 @@ class Meta:
def get_name(self, value):
test = value["test"]
suite = value["suite"]
- test_suite = suite if test == "" or test == suite else "{} {}".format(suite, test)
+ test_suite = suite if test == "" or test == suite else f"{suite} {test}"
return "{} {} {}".format(test_suite, value["option_name"], value["extra_options"])
diff --git a/treeherder/webapp/api/push.py b/treeherder/webapp/api/push.py
index 6b1dbb8d87e..212e14f006c 100644
--- a/treeherder/webapp/api/push.py
+++ b/treeherder/webapp/api/push.py
@@ -66,7 +66,7 @@ def list(self, request, project):
repository = Repository.objects.get(name=project)
except Repository.DoesNotExist:
return Response(
- {"detail": "No project with name {}".format(project)}, status=HTTP_404_NOT_FOUND
+ {"detail": f"No project with name {project}"}, status=HTTP_404_NOT_FOUND
)
pushes = pushes.filter(repository=repository)
@@ -125,7 +125,7 @@ def list(self, request, project):
value = datetime.datetime.fromtimestamp(float(filter_params.get(param)))
except ValueError:
return Response(
- {"detail": "Invalid timestamp specified for {}".format(param)},
+ {"detail": f"Invalid timestamp specified for {param}"},
status=HTTP_400_BAD_REQUEST,
)
pushes = pushes.filter(**{param.replace("push_timestamp", "time"): value})
@@ -135,7 +135,7 @@ def list(self, request, project):
value = int(filter_params.get(param, 0))
except ValueError:
return Response(
- {"detail": "Invalid timestamp specified for {}".format(param)},
+ {"detail": f"Invalid timestamp specified for {param}"},
status=HTTP_400_BAD_REQUEST,
)
if value:
@@ -168,7 +168,7 @@ def list(self, request, project):
return Response({"detail": "Valid count value required"}, status=HTTP_400_BAD_REQUEST)
if count > MAX_PUSH_COUNT:
- msg = "Specified count exceeds api limit: {}".format(MAX_PUSH_COUNT)
+ msg = f"Specified count exceeds api limit: {MAX_PUSH_COUNT}"
return Response({"detail": msg}, status=HTTP_400_BAD_REQUEST)
# we used to have a "full" parameter for this endpoint so you could
@@ -196,7 +196,7 @@ def retrieve(self, request, project, pk=None):
serializer = PushSerializer(push)
return Response(serializer.data)
except Push.DoesNotExist:
- return Response("No push with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
+ return Response(f"No push with id: {pk}", status=HTTP_404_NOT_FOUND)
@action(detail=True)
def status(self, request, project, pk=None):
@@ -207,7 +207,7 @@ def status(self, request, project, pk=None):
try:
push = Push.objects.get(id=pk)
except Push.DoesNotExist:
- return Response("No push with id: {0}".format(pk), status=HTTP_404_NOT_FOUND)
+ return Response(f"No push with id: {pk}", status=HTTP_404_NOT_FOUND)
return Response(push.get_status())
@action(detail=False)
@@ -228,9 +228,7 @@ def health_summary(self, request, project):
revision__in=revision.split(","), repository__name=project
)
except Push.DoesNotExist:
- return Response(
- "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
- )
+ return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
else:
try:
pushes = (
@@ -246,9 +244,7 @@ def health_summary(self, request, project):
pushes = pushes[: int(count)]
except Push.DoesNotExist:
- return Response(
- "No pushes found for author: {0}".format(author), status=HTTP_404_NOT_FOUND
- )
+ return Response(f"No pushes found for author: {author}", status=HTTP_404_NOT_FOUND)
data = []
commit_history = None
@@ -337,9 +333,7 @@ def health(self, request, project):
repository = Repository.objects.get(name=project)
push = Push.objects.get(revision=revision, repository=repository)
except Push.DoesNotExist:
- return Response(
- "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
- )
+ return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
commit_history_details = None
result_status, jobs = get_test_failure_jobs(push)
@@ -448,10 +442,10 @@ def decisiontask(self, request, project):
for job in decision_jobs
}
)
- logger.error("/decisiontask/ found no decision jobs for {}".format(push_ids))
+ logger.error(f"/decisiontask/ found no decision jobs for {push_ids}")
self.get_decision_jobs.invalidate(push_ids)
return Response(
- "No decision tasks found for pushes: {}".format(push_ids), status=HTTP_404_NOT_FOUND
+ f"No decision tasks found for pushes: {push_ids}", status=HTTP_404_NOT_FOUND
)
# TODO: Remove when we no longer support short revisions: Bug 1306707
@@ -473,9 +467,7 @@ def group_results(self, request, project):
repository = Repository.objects.get(name=project)
push = Push.objects.get(revision=revision, repository=repository)
except Push.DoesNotExist:
- return Response(
- "No push with revision: {0}".format(revision), status=HTTP_404_NOT_FOUND
- )
+ return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
groups = get_group_results(push)
return Response(groups)
diff --git a/treeherder/webapp/api/serializers.py b/treeherder/webapp/api/serializers.py
index 47228f60702..cc1e43fd371 100644
--- a/treeherder/webapp/api/serializers.py
+++ b/treeherder/webapp/api/serializers.py
@@ -343,7 +343,7 @@ def to_representation(self, value):
build_type = value["build_type"]
platform = value["job__machine_platform__platform"]
test_suite = value["job__signature__job_type_name"]
- new_string = test_suite.replace("test-{}".format(platform), "")
+ new_string = test_suite.replace(f"test-{platform}", "")
new_test_suite = new_string.replace(build_type, "")
return re.sub(r"^.(/|-)|(/|-)$", "", new_test_suite)
@@ -402,7 +402,7 @@ def validate_tree(self, tree):
models.Repository.objects.get(name=tree)
except ObjectDoesNotExist:
- raise serializers.ValidationError("{} does not exist.".format(tree))
+ raise serializers.ValidationError(f"{tree} does not exist.")
return tree
From fceb87bb880f14517ca62346b229e1939bd638e9 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Fri, 2 Feb 2024 15:32:34 +0100
Subject: [PATCH 008/128] Remove flake8 from requirements and update pip-deps
---
docs/backend_tasks.md | 6 +-
requirements/common.txt | 338 +++++++++----------
requirements/dev.in | 1 -
requirements/dev.txt | 697 ++++++++++++++++++++--------------------
4 files changed, 537 insertions(+), 505 deletions(-)
diff --git a/docs/backend_tasks.md b/docs/backend_tasks.md
index 05c35a9018e..303bd8a0688 100644
--- a/docs/backend_tasks.md
+++ b/docs/backend_tasks.md
@@ -2,7 +2,7 @@
## Running the tests
-You can run flake8 and the pytest suite inside Docker, using:
+You can run the linter and the pytest suite inside Docker, using:
```bash
docker-compose run backend ./runtests.sh
@@ -37,10 +37,10 @@ Then run the individual tools within that shell, like so:
For more options, see `pytest --help` or .
-- [flake8](https://flake8.readthedocs.io/):
+- [Ruff](https://docs.astral.sh/ruff/):
```bash
- flake8
+ ruff check .
```
## Hide Jobs with Tiers
diff --git a/requirements/common.txt b/requirements/common.txt
index d3e18440e2d..c7c3d73c515 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -4,83 +4,83 @@
#
# pip-compile --generate-hashes --output-file=requirements/common.txt requirements/common.in
#
-aiohttp==3.9.1 \
- --hash=sha256:02ab6006ec3c3463b528374c4cdce86434e7b89ad355e7bf29e2f16b46c7dd6f \
- --hash=sha256:04fa38875e53eb7e354ece1607b1d2fdee2d175ea4e4d745f6ec9f751fe20c7c \
- --hash=sha256:0b0a6a36ed7e164c6df1e18ee47afbd1990ce47cb428739d6c99aaabfaf1b3af \
- --hash=sha256:0d406b01a9f5a7e232d1b0d161b40c05275ffbcbd772dc18c1d5a570961a1ca4 \
- --hash=sha256:0e49b08eafa4f5707ecfb321ab9592717a319e37938e301d462f79b4e860c32a \
- --hash=sha256:0e7ba7ff228c0d9a2cd66194e90f2bca6e0abca810b786901a569c0de082f489 \
- --hash=sha256:11cb254e397a82efb1805d12561e80124928e04e9c4483587ce7390b3866d213 \
- --hash=sha256:11ff168d752cb41e8492817e10fb4f85828f6a0142b9726a30c27c35a1835f01 \
- --hash=sha256:176df045597e674fa950bf5ae536be85699e04cea68fa3a616cf75e413737eb5 \
- --hash=sha256:219a16763dc0294842188ac8a12262b5671817042b35d45e44fd0a697d8c8361 \
- --hash=sha256:22698f01ff5653fe66d16ffb7658f582a0ac084d7da1323e39fd9eab326a1f26 \
- --hash=sha256:237533179d9747080bcaad4d02083ce295c0d2eab3e9e8ce103411a4312991a0 \
- --hash=sha256:289ba9ae8e88d0ba16062ecf02dd730b34186ea3b1e7489046fc338bdc3361c4 \
- --hash=sha256:2c59e0076ea31c08553e868cec02d22191c086f00b44610f8ab7363a11a5d9d8 \
- --hash=sha256:2c9376e2b09895c8ca8b95362283365eb5c03bdc8428ade80a864160605715f1 \
- --hash=sha256:3135713c5562731ee18f58d3ad1bf41e1d8883eb68b363f2ffde5b2ea4b84cc7 \
- --hash=sha256:3b9c7426923bb7bd66d409da46c41e3fb40f5caf679da624439b9eba92043fa6 \
- --hash=sha256:3c0266cd6f005e99f3f51e583012de2778e65af6b73860038b968a0a8888487a \
- --hash=sha256:41473de252e1797c2d2293804e389a6d6986ef37cbb4a25208de537ae32141dd \
- --hash=sha256:4831df72b053b1eed31eb00a2e1aff6896fb4485301d4ccb208cac264b648db4 \
- --hash=sha256:49f0c1b3c2842556e5de35f122fc0f0b721334ceb6e78c3719693364d4af8499 \
- --hash=sha256:4b4c452d0190c5a820d3f5c0f3cd8a28ace48c54053e24da9d6041bf81113183 \
- --hash=sha256:4ee8caa925aebc1e64e98432d78ea8de67b2272252b0a931d2ac3bd876ad5544 \
- --hash=sha256:500f1c59906cd142d452074f3811614be04819a38ae2b3239a48b82649c08821 \
- --hash=sha256:5216b6082c624b55cfe79af5d538e499cd5f5b976820eac31951fb4325974501 \
- --hash=sha256:54311eb54f3a0c45efb9ed0d0a8f43d1bc6060d773f6973efd90037a51cd0a3f \
- --hash=sha256:54631fb69a6e44b2ba522f7c22a6fb2667a02fd97d636048478db2fd8c4e98fe \
- --hash=sha256:565760d6812b8d78d416c3c7cfdf5362fbe0d0d25b82fed75d0d29e18d7fc30f \
- --hash=sha256:598db66eaf2e04aa0c8900a63b0101fdc5e6b8a7ddd805c56d86efb54eb66672 \
- --hash=sha256:5c4fa235d534b3547184831c624c0b7c1e262cd1de847d95085ec94c16fddcd5 \
- --hash=sha256:69985d50a2b6f709412d944ffb2e97d0be154ea90600b7a921f95a87d6f108a2 \
- --hash=sha256:69da0f3ed3496808e8cbc5123a866c41c12c15baaaead96d256477edf168eb57 \
- --hash=sha256:6c93b7c2e52061f0925c3382d5cb8980e40f91c989563d3d32ca280069fd6a87 \
- --hash=sha256:70907533db712f7aa791effb38efa96f044ce3d4e850e2d7691abd759f4f0ae0 \
- --hash=sha256:81b77f868814346662c96ab36b875d7814ebf82340d3284a31681085c051320f \
- --hash=sha256:82eefaf1a996060602f3cc1112d93ba8b201dbf5d8fd9611227de2003dddb3b7 \
- --hash=sha256:85c3e3c9cb1d480e0b9a64c658cd66b3cfb8e721636ab8b0e746e2d79a7a9eed \
- --hash=sha256:8a22a34bc594d9d24621091d1b91511001a7eea91d6652ea495ce06e27381f70 \
- --hash=sha256:8cef8710fb849d97c533f259103f09bac167a008d7131d7b2b0e3a33269185c0 \
- --hash=sha256:8d44e7bf06b0c0a70a20f9100af9fcfd7f6d9d3913e37754c12d424179b4e48f \
- --hash=sha256:8d7f98fde213f74561be1d6d3fa353656197f75d4edfbb3d94c9eb9b0fc47f5d \
- --hash=sha256:8d8e4450e7fe24d86e86b23cc209e0023177b6d59502e33807b732d2deb6975f \
- --hash=sha256:8fc49a87ac269d4529da45871e2ffb6874e87779c3d0e2ccd813c0899221239d \
- --hash=sha256:90ec72d231169b4b8d6085be13023ece8fa9b1bb495e4398d847e25218e0f431 \
- --hash=sha256:91c742ca59045dce7ba76cab6e223e41d2c70d79e82c284a96411f8645e2afff \
- --hash=sha256:9b05d33ff8e6b269e30a7957bd3244ffbce2a7a35a81b81c382629b80af1a8bf \
- --hash=sha256:9b05d5cbe9dafcdc733262c3a99ccf63d2f7ce02543620d2bd8db4d4f7a22f83 \
- --hash=sha256:9c5857612c9813796960c00767645cb5da815af16dafb32d70c72a8390bbf690 \
- --hash=sha256:a34086c5cc285be878622e0a6ab897a986a6e8bf5b67ecb377015f06ed316587 \
- --hash=sha256:ab221850108a4a063c5b8a70f00dd7a1975e5a1713f87f4ab26a46e5feac5a0e \
- --hash=sha256:b796b44111f0cab6bbf66214186e44734b5baab949cb5fb56154142a92989aeb \
- --hash=sha256:b8c3a67eb87394386847d188996920f33b01b32155f0a94f36ca0e0c635bf3e3 \
- --hash=sha256:bcb6532b9814ea7c5a6a3299747c49de30e84472fa72821b07f5a9818bce0f66 \
- --hash=sha256:bcc0ea8d5b74a41b621ad4a13d96c36079c81628ccc0b30cfb1603e3dfa3a014 \
- --hash=sha256:bea94403a21eb94c93386d559bce297381609153e418a3ffc7d6bf772f59cc35 \
- --hash=sha256:bff7e2811814fa2271be95ab6e84c9436d027a0e59665de60edf44e529a42c1f \
- --hash=sha256:c72444d17777865734aa1a4d167794c34b63e5883abb90356a0364a28904e6c0 \
- --hash=sha256:c7b5d5d64e2a14e35a9240b33b89389e0035e6de8dbb7ffa50d10d8b65c57449 \
- --hash=sha256:c7e939f1ae428a86e4abbb9a7c4732bf4706048818dfd979e5e2839ce0159f23 \
- --hash=sha256:c88a15f272a0ad3d7773cf3a37cc7b7d077cbfc8e331675cf1346e849d97a4e5 \
- --hash=sha256:c9110c06eaaac7e1f5562caf481f18ccf8f6fdf4c3323feab28a93d34cc646bd \
- --hash=sha256:ca7ca5abfbfe8d39e653870fbe8d7710be7a857f8a8386fc9de1aae2e02ce7e4 \
- --hash=sha256:cae4c0c2ca800c793cae07ef3d40794625471040a87e1ba392039639ad61ab5b \
- --hash=sha256:cdefe289681507187e375a5064c7599f52c40343a8701761c802c1853a504558 \
- --hash=sha256:cf2a0ac0615842b849f40c4d7f304986a242f1e68286dbf3bd7a835e4f83acfd \
- --hash=sha256:cfeadf42840c1e870dc2042a232a8748e75a36b52d78968cda6736de55582766 \
- --hash=sha256:d737e69d193dac7296365a6dcb73bbbf53bb760ab25a3727716bbd42022e8d7a \
- --hash=sha256:d7481f581251bb5558ba9f635db70908819caa221fc79ee52a7f58392778c636 \
- --hash=sha256:df9cf74b9bc03d586fc53ba470828d7b77ce51b0582d1d0b5b2fb673c0baa32d \
- --hash=sha256:e1f80197f8b0b846a8d5cf7b7ec6084493950d0882cc5537fb7b96a69e3c8590 \
- --hash=sha256:ecca113f19d5e74048c001934045a2b9368d77b0b17691d905af18bd1c21275e \
- --hash=sha256:ee2527134f95e106cc1653e9ac78846f3a2ec1004cf20ef4e02038035a74544d \
- --hash=sha256:f27fdaadce22f2ef950fc10dcdf8048407c3b42b73779e48a4e76b3c35bca26c \
- --hash=sha256:f694dc8a6a3112059258a725a4ebe9acac5fe62f11c77ac4dcf896edfa78ca28 \
- --hash=sha256:f800164276eec54e0af5c99feb9494c295118fc10a11b997bbb1348ba1a52065 \
- --hash=sha256:ffcd828e37dc219a72c9012ec44ad2e7e3066bec6ff3aaa19e7d435dbf4032ca
+aiohttp==3.9.3 \
+ --hash=sha256:017a21b0df49039c8f46ca0971b3a7fdc1f56741ab1240cb90ca408049766168 \
+ --hash=sha256:039df344b45ae0b34ac885ab5b53940b174530d4dd8a14ed8b0e2155b9dddccb \
+ --hash=sha256:055ce4f74b82551678291473f66dc9fb9048a50d8324278751926ff0ae7715e5 \
+ --hash=sha256:06a9b2c8837d9a94fae16c6223acc14b4dfdff216ab9b7202e07a9a09541168f \
+ --hash=sha256:07b837ef0d2f252f96009e9b8435ec1fef68ef8b1461933253d318748ec1acdc \
+ --hash=sha256:0ed621426d961df79aa3b963ac7af0d40392956ffa9be022024cd16297b30c8c \
+ --hash=sha256:0fa43c32d1643f518491d9d3a730f85f5bbaedcbd7fbcae27435bb8b7a061b29 \
+ --hash=sha256:1f5a71d25cd8106eab05f8704cd9167b6e5187bcdf8f090a66c6d88b634802b4 \
+ --hash=sha256:1f5cd333fcf7590a18334c90f8c9147c837a6ec8a178e88d90a9b96ea03194cc \
+ --hash=sha256:27468897f628c627230dba07ec65dc8d0db566923c48f29e084ce382119802bc \
+ --hash=sha256:298abd678033b8571995650ccee753d9458dfa0377be4dba91e4491da3f2be63 \
+ --hash=sha256:2c895a656dd7e061b2fd6bb77d971cc38f2afc277229ce7dd3552de8313a483e \
+ --hash=sha256:361a1026c9dd4aba0109e4040e2aecf9884f5cfe1b1b1bd3d09419c205e2e53d \
+ --hash=sha256:363afe77cfcbe3a36353d8ea133e904b108feea505aa4792dad6585a8192c55a \
+ --hash=sha256:38a19bc3b686ad55804ae931012f78f7a534cce165d089a2059f658f6c91fa60 \
+ --hash=sha256:38f307b41e0bea3294a9a2a87833191e4bcf89bb0365e83a8be3a58b31fb7f38 \
+ --hash=sha256:3e59c23c52765951b69ec45ddbbc9403a8761ee6f57253250c6e1536cacc758b \
+ --hash=sha256:4b4af9f25b49a7be47c0972139e59ec0e8285c371049df1a63b6ca81fdd216a2 \
+ --hash=sha256:504b6981675ace64c28bf4a05a508af5cde526e36492c98916127f5a02354d53 \
+ --hash=sha256:50fca156d718f8ced687a373f9e140c1bb765ca16e3d6f4fe116e3df7c05b2c5 \
+ --hash=sha256:522a11c934ea660ff8953eda090dcd2154d367dec1ae3c540aff9f8a5c109ab4 \
+ --hash=sha256:52df73f14ed99cee84865b95a3d9e044f226320a87af208f068ecc33e0c35b96 \
+ --hash=sha256:595f105710293e76b9dc09f52e0dd896bd064a79346234b521f6b968ffdd8e58 \
+ --hash=sha256:59c26c95975f26e662ca78fdf543d4eeaef70e533a672b4113dd888bd2423caa \
+ --hash=sha256:5bce0dc147ca85caa5d33debc4f4d65e8e8b5c97c7f9f660f215fa74fc49a321 \
+ --hash=sha256:5eafe2c065df5401ba06821b9a054d9cb2848867f3c59801b5d07a0be3a380ae \
+ --hash=sha256:5ed3e046ea7b14938112ccd53d91c1539af3e6679b222f9469981e3dac7ba1ce \
+ --hash=sha256:5fe9ce6c09668063b8447f85d43b8d1c4e5d3d7e92c63173e6180b2ac5d46dd8 \
+ --hash=sha256:648056db9a9fa565d3fa851880f99f45e3f9a771dd3ff3bb0c048ea83fb28194 \
+ --hash=sha256:69361bfdca5468c0488d7017b9b1e5ce769d40b46a9f4a2eed26b78619e9396c \
+ --hash=sha256:6b0e029353361f1746bac2e4cc19b32f972ec03f0f943b390c4ab3371840aabf \
+ --hash=sha256:6b88f9386ff1ad91ace19d2a1c0225896e28815ee09fc6a8932fded8cda97c3d \
+ --hash=sha256:770d015888c2a598b377bd2f663adfd947d78c0124cfe7b959e1ef39f5b13869 \
+ --hash=sha256:7943c414d3a8d9235f5f15c22ace69787c140c80b718dcd57caaade95f7cd93b \
+ --hash=sha256:7cf5c9458e1e90e3c390c2639f1017a0379a99a94fdfad3a1fd966a2874bba52 \
+ --hash=sha256:7f46acd6a194287b7e41e87957bfe2ad1ad88318d447caf5b090012f2c5bb528 \
+ --hash=sha256:82e6aa28dd46374f72093eda8bcd142f7771ee1eb9d1e223ff0fa7177a96b4a5 \
+ --hash=sha256:835a55b7ca49468aaaac0b217092dfdff370e6c215c9224c52f30daaa735c1c1 \
+ --hash=sha256:84871a243359bb42c12728f04d181a389718710129b36b6aad0fc4655a7647d4 \
+ --hash=sha256:8aacb477dc26797ee089721536a292a664846489c49d3ef9725f992449eda5a8 \
+ --hash=sha256:8e2c45c208c62e955e8256949eb225bd8b66a4c9b6865729a786f2aa79b72e9d \
+ --hash=sha256:90842933e5d1ff760fae6caca4b2b3edba53ba8f4b71e95dacf2818a2aca06f7 \
+ --hash=sha256:938a9653e1e0c592053f815f7028e41a3062e902095e5a7dc84617c87267ebd5 \
+ --hash=sha256:939677b61f9d72a4fa2a042a5eee2a99a24001a67c13da113b2e30396567db54 \
+ --hash=sha256:9d3c9b50f19704552f23b4eaea1fc082fdd82c63429a6506446cbd8737823da3 \
+ --hash=sha256:a6fe5571784af92b6bc2fda8d1925cccdf24642d49546d3144948a6a1ed58ca5 \
+ --hash=sha256:a78ed8a53a1221393d9637c01870248a6f4ea5b214a59a92a36f18151739452c \
+ --hash=sha256:ab40e6251c3873d86ea9b30a1ac6d7478c09277b32e14745d0d3c6e76e3c7e29 \
+ --hash=sha256:abf151955990d23f84205286938796c55ff11bbfb4ccfada8c9c83ae6b3c89a3 \
+ --hash=sha256:acef0899fea7492145d2bbaaaec7b345c87753168589cc7faf0afec9afe9b747 \
+ --hash=sha256:b40670ec7e2156d8e57f70aec34a7216407848dfe6c693ef131ddf6e76feb672 \
+ --hash=sha256:b791a3143681a520c0a17e26ae7465f1b6f99461a28019d1a2f425236e6eedb5 \
+ --hash=sha256:b955ed993491f1a5da7f92e98d5dad3c1e14dc175f74517c4e610b1f2456fb11 \
+ --hash=sha256:ba39e9c8627edc56544c8628cc180d88605df3892beeb2b94c9bc857774848ca \
+ --hash=sha256:bca77a198bb6e69795ef2f09a5f4c12758487f83f33d63acde5f0d4919815768 \
+ --hash=sha256:c3452ea726c76e92f3b9fae4b34a151981a9ec0a4847a627c43d71a15ac32aa6 \
+ --hash=sha256:c46956ed82961e31557b6857a5ca153c67e5476972e5f7190015018760938da2 \
+ --hash=sha256:c7c8b816c2b5af5c8a436df44ca08258fc1a13b449393a91484225fcb7545533 \
+ --hash=sha256:cd73265a9e5ea618014802ab01babf1940cecb90c9762d8b9e7d2cc1e1969ec6 \
+ --hash=sha256:dad46e6f620574b3b4801c68255492e0159d1712271cc99d8bdf35f2043ec266 \
+ --hash=sha256:dc9b311743a78043b26ffaeeb9715dc360335e5517832f5a8e339f8a43581e4d \
+ --hash=sha256:df822ee7feaaeffb99c1a9e5e608800bd8eda6e5f18f5cfb0dc7eeb2eaa6bbec \
+ --hash=sha256:e083c285857b78ee21a96ba1eb1b5339733c3563f72980728ca2b08b53826ca5 \
+ --hash=sha256:e5e46b578c0e9db71d04c4b506a2121c0cb371dd89af17a0586ff6769d4c58c1 \
+ --hash=sha256:e99abf0bba688259a496f966211c49a514e65afa9b3073a1fcee08856e04425b \
+ --hash=sha256:ee43080e75fc92bf36219926c8e6de497f9b247301bbf88c5c7593d931426679 \
+ --hash=sha256:f033d80bc6283092613882dfe40419c6a6a1527e04fc69350e87a9df02bbc283 \
+ --hash=sha256:f1088fa100bf46e7b398ffd9904f4808a0612e1d966b4aa43baa535d1b6341eb \
+ --hash=sha256:f56455b0c2c7cc3b0c584815264461d07b177f903a04481dfc33e08a89f0c26b \
+ --hash=sha256:f59dfe57bb1ec82ac0698ebfcdb7bcd0e99c255bd637ff613760d5f33e7c81b3 \
+ --hash=sha256:f7217af2e14da0856e082e96ff637f14ae45c10a5714b63c77f26d8884cf1051 \
+ --hash=sha256:f734e38fd8666f53da904c52a23ce517f1b07722118d750405af7e4123933511 \
+ --hash=sha256:f95511dd5d0e05fd9728bac4096319f80615aaef4acbecb35a990afebe953b0e \
+ --hash=sha256:fdd215b7b7fd4a53994f238d0f46b7ba4ac4c0adb12452beee724ddd0743ae5d \
+ --hash=sha256:feeb18a801aacb098220e2c3eea59a512362eb408d4afd0c242044c33ad6d542 \
+ --hash=sha256:ff30218887e62209942f91ac1be902cc80cddb86bf00fbc6783b7a43b2bea26f
# via taskcluster
aiosignal==1.3.1 \
--hash=sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc \
@@ -123,13 +123,13 @@ blessed==1.20.0 \
--hash=sha256:0c542922586a265e699188e52d5f5ac5ec0dd517e5a1041d90d2bbf23f906058 \
--hash=sha256:2cdd67f8746e048f00df47a2880f4d6acbcdb399031b604e34ba8f71d5787680
# via mozlog
-boto3==1.34.26 \
- --hash=sha256:0491a65e55de999d07f42bb28ff6a38bad493934154b6304fcdfb4699a612d6c \
- --hash=sha256:881b07d0d55e5d85b62e6c965efcb2820bdfbd8f23a73a7bc9dac3a4997a1343
+boto3==1.34.33 \
+ --hash=sha256:5a5db6defe73238c25c0c4f9e5522401d2563d75fb10e1cf925bf4ea16514280 \
+ --hash=sha256:5bbd73711f7664c6e8b80981ff247ba8dd2a8c5aa0bf619c5466cb9c24b9f279
# via mozci
-botocore==1.34.26 \
- --hash=sha256:4f3df0f6ed722e944d6f0eed964bc00b6489e50c6e8d5fdbbb68eb0c6c16c7c9 \
- --hash=sha256:63543102467b3b5ba73903f11a14c3157ee442a360f3cb2f5316a8d6bc3e10e7
+botocore==1.34.33 \
+ --hash=sha256:5d154d0af41d5978d58f198837450953ae7168e292071f013ef7b739f40fb18f \
+ --hash=sha256:a50fb5e0c1ddf17d28dc8d0d2c33242b78009fb7f28e390cadcdc310908492b0
# via
# boto3
# s3transfer
@@ -704,81 +704,97 @@ mozterm==1.0.0 \
--hash=sha256:b1e91acec188de07c704dbb7b0100a7be5c1e06567b3beb67f6ea11d00a483a4 \
--hash=sha256:f5eafa25c23d391e2a2bb1dd45ee928fc9e3c811977a3856b5a5a0778011053c
# via mozlog
-multidict==6.0.4 \
- --hash=sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9 \
- --hash=sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8 \
- --hash=sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03 \
- --hash=sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710 \
- --hash=sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161 \
- --hash=sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664 \
- --hash=sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569 \
- --hash=sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067 \
- --hash=sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313 \
- --hash=sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706 \
- --hash=sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2 \
- --hash=sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636 \
- --hash=sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49 \
- --hash=sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93 \
- --hash=sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603 \
- --hash=sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0 \
- --hash=sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60 \
- --hash=sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4 \
- --hash=sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e \
- --hash=sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1 \
- --hash=sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60 \
- --hash=sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951 \
- --hash=sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc \
- --hash=sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe \
- --hash=sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95 \
- --hash=sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d \
- --hash=sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8 \
- --hash=sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed \
- --hash=sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2 \
- --hash=sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775 \
- --hash=sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87 \
- --hash=sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c \
- --hash=sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2 \
- --hash=sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98 \
- --hash=sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3 \
- --hash=sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe \
- --hash=sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78 \
- --hash=sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660 \
- --hash=sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176 \
- --hash=sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e \
- --hash=sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988 \
- --hash=sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c \
- --hash=sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c \
- --hash=sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0 \
- --hash=sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449 \
- --hash=sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f \
- --hash=sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde \
- --hash=sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5 \
- --hash=sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d \
- --hash=sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac \
- --hash=sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a \
- --hash=sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9 \
- --hash=sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca \
- --hash=sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11 \
- --hash=sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35 \
- --hash=sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063 \
- --hash=sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b \
- --hash=sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982 \
- --hash=sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258 \
- --hash=sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1 \
- --hash=sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52 \
- --hash=sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480 \
- --hash=sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7 \
- --hash=sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461 \
- --hash=sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d \
- --hash=sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc \
- --hash=sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779 \
- --hash=sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a \
- --hash=sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547 \
- --hash=sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0 \
- --hash=sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171 \
- --hash=sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf \
- --hash=sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d \
- --hash=sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba
+multidict==6.0.5 \
+ --hash=sha256:01265f5e40f5a17f8241d52656ed27192be03bfa8764d88e8220141d1e4b3556 \
+ --hash=sha256:0275e35209c27a3f7951e1ce7aaf93ce0d163b28948444bec61dd7badc6d3f8c \
+ --hash=sha256:04bde7a7b3de05732a4eb39c94574db1ec99abb56162d6c520ad26f83267de29 \
+ --hash=sha256:04da1bb8c8dbadf2a18a452639771951c662c5ad03aefe4884775454be322c9b \
+ --hash=sha256:09a892e4a9fb47331da06948690ae38eaa2426de97b4ccbfafbdcbe5c8f37ff8 \
+ --hash=sha256:0d63c74e3d7ab26de115c49bffc92cc77ed23395303d496eae515d4204a625e7 \
+ --hash=sha256:107c0cdefe028703fb5dafe640a409cb146d44a6ae201e55b35a4af8e95457dd \
+ --hash=sha256:141b43360bfd3bdd75f15ed811850763555a251e38b2405967f8e25fb43f7d40 \
+ --hash=sha256:14c2976aa9038c2629efa2c148022ed5eb4cb939e15ec7aace7ca932f48f9ba6 \
+ --hash=sha256:19fe01cea168585ba0f678cad6f58133db2aa14eccaf22f88e4a6dccadfad8b3 \
+ --hash=sha256:1d147090048129ce3c453f0292e7697d333db95e52616b3793922945804a433c \
+ --hash=sha256:1d9ea7a7e779d7a3561aade7d596649fbecfa5c08a7674b11b423783217933f9 \
+ --hash=sha256:215ed703caf15f578dca76ee6f6b21b7603791ae090fbf1ef9d865571039ade5 \
+ --hash=sha256:21fd81c4ebdb4f214161be351eb5bcf385426bf023041da2fd9e60681f3cebae \
+ --hash=sha256:220dd781e3f7af2c2c1053da9fa96d9cf3072ca58f057f4c5adaaa1cab8fc442 \
+ --hash=sha256:228b644ae063c10e7f324ab1ab6b548bdf6f8b47f3ec234fef1093bc2735e5f9 \
+ --hash=sha256:29bfeb0dff5cb5fdab2023a7a9947b3b4af63e9c47cae2a10ad58394b517fddc \
+ --hash=sha256:2f4848aa3baa109e6ab81fe2006c77ed4d3cd1e0ac2c1fbddb7b1277c168788c \
+ --hash=sha256:2faa5ae9376faba05f630d7e5e6be05be22913782b927b19d12b8145968a85ea \
+ --hash=sha256:2ffc42c922dbfddb4a4c3b438eb056828719f07608af27d163191cb3e3aa6cc5 \
+ --hash=sha256:37b15024f864916b4951adb95d3a80c9431299080341ab9544ed148091b53f50 \
+ --hash=sha256:3cc2ad10255f903656017363cd59436f2111443a76f996584d1077e43ee51182 \
+ --hash=sha256:3d25f19500588cbc47dc19081d78131c32637c25804df8414463ec908631e453 \
+ --hash=sha256:403c0911cd5d5791605808b942c88a8155c2592e05332d2bf78f18697a5fa15e \
+ --hash=sha256:411bf8515f3be9813d06004cac41ccf7d1cd46dfe233705933dd163b60e37600 \
+ --hash=sha256:425bf820055005bfc8aa9a0b99ccb52cc2f4070153e34b701acc98d201693733 \
+ --hash=sha256:435a0984199d81ca178b9ae2c26ec3d49692d20ee29bc4c11a2a8d4514c67eda \
+ --hash=sha256:4a6a4f196f08c58c59e0b8ef8ec441d12aee4125a7d4f4fef000ccb22f8d7241 \
+ --hash=sha256:4cc0ef8b962ac7a5e62b9e826bd0cd5040e7d401bc45a6835910ed699037a461 \
+ --hash=sha256:51d035609b86722963404f711db441cf7134f1889107fb171a970c9701f92e1e \
+ --hash=sha256:53689bb4e102200a4fafa9de9c7c3c212ab40a7ab2c8e474491914d2305f187e \
+ --hash=sha256:55205d03e8a598cfc688c71ca8ea5f66447164efff8869517f175ea632c7cb7b \
+ --hash=sha256:5c0631926c4f58e9a5ccce555ad7747d9a9f8b10619621f22f9635f069f6233e \
+ --hash=sha256:5cb241881eefd96b46f89b1a056187ea8e9ba14ab88ba632e68d7a2ecb7aadf7 \
+ --hash=sha256:60d698e8179a42ec85172d12f50b1668254628425a6bd611aba022257cac1386 \
+ --hash=sha256:612d1156111ae11d14afaf3a0669ebf6c170dbb735e510a7438ffe2369a847fd \
+ --hash=sha256:6214c5a5571802c33f80e6c84713b2c79e024995b9c5897f794b43e714daeec9 \
+ --hash=sha256:6939c95381e003f54cd4c5516740faba40cf5ad3eeff460c3ad1d3e0ea2549bf \
+ --hash=sha256:69db76c09796b313331bb7048229e3bee7928eb62bab5e071e9f7fcc4879caee \
+ --hash=sha256:6bf7a982604375a8d49b6cc1b781c1747f243d91b81035a9b43a2126c04766f5 \
+ --hash=sha256:766c8f7511df26d9f11cd3a8be623e59cca73d44643abab3f8c8c07620524e4a \
+ --hash=sha256:76c0de87358b192de7ea9649beb392f107dcad9ad27276324c24c91774ca5271 \
+ --hash=sha256:76f067f5121dcecf0d63a67f29080b26c43c71a98b10c701b0677e4a065fbd54 \
+ --hash=sha256:7901c05ead4b3fb75113fb1dd33eb1253c6d3ee37ce93305acd9d38e0b5f21a4 \
+ --hash=sha256:79660376075cfd4b2c80f295528aa6beb2058fd289f4c9252f986751a4cd0496 \
+ --hash=sha256:79a6d2ba910adb2cbafc95dad936f8b9386e77c84c35bc0add315b856d7c3abb \
+ --hash=sha256:7afcdd1fc07befad18ec4523a782cde4e93e0a2bf71239894b8d61ee578c1319 \
+ --hash=sha256:7be7047bd08accdb7487737631d25735c9a04327911de89ff1b26b81745bd4e3 \
+ --hash=sha256:7c6390cf87ff6234643428991b7359b5f59cc15155695deb4eda5c777d2b880f \
+ --hash=sha256:7df704ca8cf4a073334e0427ae2345323613e4df18cc224f647f251e5e75a527 \
+ --hash=sha256:85f67aed7bb647f93e7520633d8f51d3cbc6ab96957c71272b286b2f30dc70ed \
+ --hash=sha256:896ebdcf62683551312c30e20614305f53125750803b614e9e6ce74a96232604 \
+ --hash=sha256:92d16a3e275e38293623ebf639c471d3e03bb20b8ebb845237e0d3664914caef \
+ --hash=sha256:99f60d34c048c5c2fabc766108c103612344c46e35d4ed9ae0673d33c8fb26e8 \
+ --hash=sha256:9fe7b0653ba3d9d65cbe7698cca585bf0f8c83dbbcc710db9c90f478e175f2d5 \
+ --hash=sha256:a3145cb08d8625b2d3fee1b2d596a8766352979c9bffe5d7833e0503d0f0b5e5 \
+ --hash=sha256:aeaf541ddbad8311a87dd695ed9642401131ea39ad7bc8cf3ef3967fd093b626 \
+ --hash=sha256:b55358304d7a73d7bdf5de62494aaf70bd33015831ffd98bc498b433dfe5b10c \
+ --hash=sha256:b82cc8ace10ab5bd93235dfaab2021c70637005e1ac787031f4d1da63d493c1d \
+ --hash=sha256:c0868d64af83169e4d4152ec612637a543f7a336e4a307b119e98042e852ad9c \
+ --hash=sha256:c1c1496e73051918fcd4f58ff2e0f2f3066d1c76a0c6aeffd9b45d53243702cc \
+ --hash=sha256:c9bf56195c6bbd293340ea82eafd0071cb3d450c703d2c93afb89f93b8386ccc \
+ --hash=sha256:cbebcd5bcaf1eaf302617c114aa67569dd3f090dd0ce8ba9e35e9985b41ac35b \
+ --hash=sha256:cd6c8fca38178e12c00418de737aef1261576bd1b6e8c6134d3e729a4e858b38 \
+ --hash=sha256:ceb3b7e6a0135e092de86110c5a74e46bda4bd4fbfeeb3a3bcec79c0f861e450 \
+ --hash=sha256:cf590b134eb70629e350691ecca88eac3e3b8b3c86992042fb82e3cb1830d5e1 \
+ --hash=sha256:d3eb1ceec286eba8220c26f3b0096cf189aea7057b6e7b7a2e60ed36b373b77f \
+ --hash=sha256:d65f25da8e248202bd47445cec78e0025c0fe7582b23ec69c3b27a640dd7a8e3 \
+ --hash=sha256:d6f6d4f185481c9669b9447bf9d9cf3b95a0e9df9d169bbc17e363b7d5487755 \
+ --hash=sha256:d84a5c3a5f7ce6db1f999fb9438f686bc2e09d38143f2d93d8406ed2dd6b9226 \
+ --hash=sha256:d946b0a9eb8aaa590df1fe082cee553ceab173e6cb5b03239716338629c50c7a \
+ --hash=sha256:dce1c6912ab9ff5f179eaf6efe7365c1f425ed690b03341911bf4939ef2f3046 \
+ --hash=sha256:de170c7b4fe6859beb8926e84f7d7d6c693dfe8e27372ce3b76f01c46e489fcf \
+ --hash=sha256:e02021f87a5b6932fa6ce916ca004c4d441509d33bbdbeca70d05dff5e9d2479 \
+ --hash=sha256:e030047e85cbcedbfc073f71836d62dd5dadfbe7531cae27789ff66bc551bd5e \
+ --hash=sha256:e0e79d91e71b9867c73323a3444724d496c037e578a0e1755ae159ba14f4f3d1 \
+ --hash=sha256:e4428b29611e989719874670fd152b6625500ad6c686d464e99f5aaeeaca175a \
+ --hash=sha256:e4972624066095e52b569e02b5ca97dbd7a7ddd4294bf4e7247d52635630dd83 \
+ --hash=sha256:e7be68734bd8c9a513f2b0cfd508802d6609da068f40dc57d4e3494cefc92929 \
+ --hash=sha256:e8e94e6912639a02ce173341ff62cc1201232ab86b8a8fcc05572741a5dc7d93 \
+ --hash=sha256:ea1456df2a27c73ce51120fa2f519f1bea2f4a03a917f4a43c8707cf4cbbae1a \
+ --hash=sha256:ebd8d160f91a764652d3e51ce0d2956b38efe37c9231cd82cfc0bed2e40b581c \
+ --hash=sha256:eca2e9d0cc5a889850e9bbd68e98314ada174ff6ccd1129500103df7a94a7a44 \
+ --hash=sha256:edd08e6f2f1a390bf137080507e44ccc086353c8e98c657e666c017718561b89 \
+ --hash=sha256:f285e862d2f153a70586579c15c44656f888806ed0e5b56b64489afe4a2dbfba \
+ --hash=sha256:f2a1dee728b52b33eebff5072817176c172050d44d67befd681609b4746e1c2e \
+ --hash=sha256:f7e301075edaf50500f0b341543c41194d8df3ae5caf4702f2095f3ca73dd8da \
+ --hash=sha256:fb616be3538599e797a2017cccca78e354c767165e8858ab5116813146041a24 \
+ --hash=sha256:fce28b3c8a81b6b36dfac9feb1de115bab619b3c13905b419ec71d03a3fc1423 \
+ --hash=sha256:fe5d7785250541f7f5019ab9cba2c71169dc7d74d0f45253f8313f436458a4ef
# via
# aiohttp
# yarl
@@ -1018,9 +1034,9 @@ python-jose[pycryptodome]==3.3.0 \
python3-memcached==1.51 \
--hash=sha256:7cbe5951d68eef69d948b7a7ed7decfbd101e15e7f5be007dcd1219ccc584859
# via mozci
-pytz==2023.3.post1 \
- --hash=sha256:7b4fddbeb94a1eba4b557da24f19fdf9db575192544270a9101d8509f9f43d7b \
- --hash=sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7
+pytz==2024.1 \
+ --hash=sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812 \
+ --hash=sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319
# via djangorestframework
pyyaml==6.0 \
--hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \
diff --git a/requirements/dev.in b/requirements/dev.in
index df01d8e6dca..3f262285038 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -14,7 +14,6 @@ pytest-testmon==2.0.9
pytest-watch==4.2.0
# Required by django-extension's runserver_plus command.
-flake8==6.0.0
pytest-django==4.5.2
pytest==7.3.2
black==23.3.0
diff --git a/requirements/dev.txt b/requirements/dev.txt
index b44b76300e0..5865cc877d6 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -8,9 +8,9 @@ asgiref==3.7.2 \
--hash=sha256:89b2ef2247e3b562a16eef663bc0e2e703ec6468e2fa8a5cd61cd449786d4f6e \
--hash=sha256:9e0ce3aa93a819ba5b45120216b23878cf6e8525eb3848653452b4192b92afed
# via django
-attrs==23.1.0 \
- --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \
- --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015
+attrs==23.2.0 \
+ --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \
+ --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1
# via
# outcome
# trio
@@ -51,100 +51,115 @@ black==23.3.0 \
--hash=sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4 \
--hash=sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3
# via -r requirements/dev.in
-build==0.10.0 \
- --hash=sha256:af266720050a66c893a6096a2f410989eeac74ff9a68ba194b3f6473e8e26171 \
- --hash=sha256:d5b71264afdb5951d6704482aac78de887c80691c52b88a9ad195983ca2c9269
+build==1.0.3 \
+ --hash=sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b \
+ --hash=sha256:589bf99a67df7c9cf07ec0ac0e5e2ea5d4b37ac63301c4986d1acb126aa83f8f
# via pip-tools
-certifi==2023.5.7 \
- --hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \
- --hash=sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716
+certifi==2024.2.2 \
+ --hash=sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f \
+ --hash=sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1
# via
# requests
# selenium
-cfgv==3.3.1 \
- --hash=sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426 \
- --hash=sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736
+cfgv==3.4.0 \
+ --hash=sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9 \
+ --hash=sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560
# via pre-commit
-charset-normalizer==3.2.0 \
- --hash=sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96 \
- --hash=sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c \
- --hash=sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710 \
- --hash=sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706 \
- --hash=sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020 \
- --hash=sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252 \
- --hash=sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad \
- --hash=sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329 \
- --hash=sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a \
- --hash=sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f \
- --hash=sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6 \
- --hash=sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4 \
- --hash=sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a \
- --hash=sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46 \
- --hash=sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2 \
- --hash=sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23 \
- --hash=sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace \
- --hash=sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd \
- --hash=sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982 \
- --hash=sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10 \
- --hash=sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2 \
- --hash=sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea \
- --hash=sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09 \
- --hash=sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5 \
- --hash=sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149 \
- --hash=sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489 \
- --hash=sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9 \
- --hash=sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80 \
- --hash=sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592 \
- --hash=sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3 \
- --hash=sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6 \
- --hash=sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed \
- --hash=sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c \
- --hash=sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200 \
- --hash=sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a \
- --hash=sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e \
- --hash=sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d \
- --hash=sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6 \
- --hash=sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623 \
- --hash=sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669 \
- --hash=sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3 \
- --hash=sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa \
- --hash=sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9 \
- --hash=sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2 \
- --hash=sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f \
- --hash=sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1 \
- --hash=sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4 \
- --hash=sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a \
- --hash=sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8 \
- --hash=sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3 \
- --hash=sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029 \
- --hash=sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f \
- --hash=sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959 \
- --hash=sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22 \
- --hash=sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7 \
- --hash=sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952 \
- --hash=sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346 \
- --hash=sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e \
- --hash=sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d \
- --hash=sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299 \
- --hash=sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd \
- --hash=sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a \
- --hash=sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3 \
- --hash=sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037 \
- --hash=sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94 \
- --hash=sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c \
- --hash=sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858 \
- --hash=sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a \
- --hash=sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449 \
- --hash=sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c \
- --hash=sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918 \
- --hash=sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1 \
- --hash=sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c \
- --hash=sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac \
- --hash=sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa
+charset-normalizer==3.3.2 \
+ --hash=sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027 \
+ --hash=sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087 \
+ --hash=sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786 \
+ --hash=sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8 \
+ --hash=sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09 \
+ --hash=sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185 \
+ --hash=sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574 \
+ --hash=sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e \
+ --hash=sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519 \
+ --hash=sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898 \
+ --hash=sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269 \
+ --hash=sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3 \
+ --hash=sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f \
+ --hash=sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6 \
+ --hash=sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8 \
+ --hash=sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a \
+ --hash=sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73 \
+ --hash=sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc \
+ --hash=sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714 \
+ --hash=sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2 \
+ --hash=sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc \
+ --hash=sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce \
+ --hash=sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d \
+ --hash=sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e \
+ --hash=sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6 \
+ --hash=sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269 \
+ --hash=sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96 \
+ --hash=sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d \
+ --hash=sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a \
+ --hash=sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4 \
+ --hash=sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77 \
+ --hash=sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d \
+ --hash=sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0 \
+ --hash=sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed \
+ --hash=sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068 \
+ --hash=sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac \
+ --hash=sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25 \
+ --hash=sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8 \
+ --hash=sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab \
+ --hash=sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26 \
+ --hash=sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2 \
+ --hash=sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db \
+ --hash=sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f \
+ --hash=sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5 \
+ --hash=sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99 \
+ --hash=sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c \
+ --hash=sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d \
+ --hash=sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811 \
+ --hash=sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa \
+ --hash=sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a \
+ --hash=sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03 \
+ --hash=sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b \
+ --hash=sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04 \
+ --hash=sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c \
+ --hash=sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001 \
+ --hash=sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458 \
+ --hash=sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389 \
+ --hash=sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99 \
+ --hash=sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985 \
+ --hash=sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537 \
+ --hash=sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238 \
+ --hash=sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f \
+ --hash=sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d \
+ --hash=sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796 \
+ --hash=sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a \
+ --hash=sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143 \
+ --hash=sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8 \
+ --hash=sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c \
+ --hash=sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5 \
+ --hash=sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5 \
+ --hash=sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711 \
+ --hash=sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4 \
+ --hash=sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6 \
+ --hash=sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c \
+ --hash=sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7 \
+ --hash=sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4 \
+ --hash=sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b \
+ --hash=sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae \
+ --hash=sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12 \
+ --hash=sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c \
+ --hash=sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae \
+ --hash=sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8 \
+ --hash=sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887 \
+ --hash=sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b \
+ --hash=sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4 \
+ --hash=sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f \
+ --hash=sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5 \
+ --hash=sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33 \
+ --hash=sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519 \
+ --hash=sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561
# via requests
-click==8.1.4 \
- --hash=sha256:2739815aaa5d2c986a88f1e9230c55e17f0caad3d958a5e13ad0797c166db9e3 \
- --hash=sha256:b97d0c74955da062a7d4ef92fadb583806a585b2ea81958a81bd72726cbb8e37
+click==8.1.7 \
+ --hash=sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28 \
+ --hash=sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de
# via
# black
# pip-tools
@@ -152,77 +167,69 @@ colorama==0.4.6 \
--hash=sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44 \
--hash=sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6
# via pytest-watch
-coverage[toml]==7.2.7 \
- --hash=sha256:06a9a2be0b5b576c3f18f1a241f0473575c4a26021b52b2a85263a00f034d51f \
- --hash=sha256:06fb182e69f33f6cd1d39a6c597294cff3143554b64b9825d1dc69d18cc2fff2 \
- --hash=sha256:0a5f9e1dbd7fbe30196578ca36f3fba75376fb99888c395c5880b355e2875f8a \
- --hash=sha256:0e1f928eaf5469c11e886fe0885ad2bf1ec606434e79842a879277895a50942a \
- --hash=sha256:171717c7cb6b453aebac9a2ef603699da237f341b38eebfee9be75d27dc38e01 \
- --hash=sha256:1e9d683426464e4a252bf70c3498756055016f99ddaec3774bf368e76bbe02b6 \
- --hash=sha256:201e7389591af40950a6480bd9edfa8ed04346ff80002cec1a66cac4549c1ad7 \
- --hash=sha256:245167dd26180ab4c91d5e1496a30be4cd721a5cf2abf52974f965f10f11419f \
- --hash=sha256:2aee274c46590717f38ae5e4650988d1af340fe06167546cc32fe2f58ed05b02 \
- --hash=sha256:2e07b54284e381531c87f785f613b833569c14ecacdcb85d56b25c4622c16c3c \
- --hash=sha256:31563e97dae5598556600466ad9beea39fb04e0229e61c12eaa206e0aa202063 \
- --hash=sha256:33d6d3ea29d5b3a1a632b3c4e4f4ecae24ef170b0b9ee493883f2df10039959a \
- --hash=sha256:3d376df58cc111dc8e21e3b6e24606b5bb5dee6024f46a5abca99124b2229ef5 \
- --hash=sha256:419bfd2caae268623dd469eff96d510a920c90928b60f2073d79f8fe2bbc5959 \
- --hash=sha256:48c19d2159d433ccc99e729ceae7d5293fbffa0bdb94952d3579983d1c8c9d97 \
- --hash=sha256:49969a9f7ffa086d973d91cec8d2e31080436ef0fb4a359cae927e742abfaaa6 \
- --hash=sha256:52edc1a60c0d34afa421c9c37078817b2e67a392cab17d97283b64c5833f427f \
- --hash=sha256:537891ae8ce59ef63d0123f7ac9e2ae0fc8b72c7ccbe5296fec45fd68967b6c9 \
- --hash=sha256:54b896376ab563bd38453cecb813c295cf347cf5906e8b41d340b0321a5433e5 \
- --hash=sha256:58c2ccc2f00ecb51253cbe5d8d7122a34590fac9646a960d1430d5b15321d95f \
- --hash=sha256:5b7540161790b2f28143191f5f8ec02fb132660ff175b7747b95dcb77ac26562 \
- --hash=sha256:5baa06420f837184130752b7c5ea0808762083bf3487b5038d68b012e5937dbe \
- --hash=sha256:5e330fc79bd7207e46c7d7fd2bb4af2963f5f635703925543a70b99574b0fea9 \
- --hash=sha256:61b9a528fb348373c433e8966535074b802c7a5d7f23c4f421e6c6e2f1697a6f \
- --hash=sha256:63426706118b7f5cf6bb6c895dc215d8a418d5952544042c8a2d9fe87fcf09cb \
- --hash=sha256:6d040ef7c9859bb11dfeb056ff5b3872436e3b5e401817d87a31e1750b9ae2fb \
- --hash=sha256:6f48351d66575f535669306aa7d6d6f71bc43372473b54a832222803eb956fd1 \
- --hash=sha256:7ee7d9d4822c8acc74a5e26c50604dff824710bc8de424904c0982e25c39c6cb \
- --hash=sha256:81c13a1fc7468c40f13420732805a4c38a105d89848b7c10af65a90beff25250 \
- --hash=sha256:8d13c64ee2d33eccf7437961b6ea7ad8673e2be040b4f7fd4fd4d4d28d9ccb1e \
- --hash=sha256:8de8bb0e5ad103888d65abef8bca41ab93721647590a3f740100cd65c3b00511 \
- --hash=sha256:8fa03bce9bfbeeef9f3b160a8bed39a221d82308b4152b27d82d8daa7041fee5 \
- --hash=sha256:924d94291ca674905fe9481f12294eb11f2d3d3fd1adb20314ba89e94f44ed59 \
- --hash=sha256:975d70ab7e3c80a3fe86001d8751f6778905ec723f5b110aed1e450da9d4b7f2 \
- --hash=sha256:976b9c42fb2a43ebf304fa7d4a310e5f16cc99992f33eced91ef6f908bd8f33d \
- --hash=sha256:9e31cb64d7de6b6f09702bb27c02d1904b3aebfca610c12772452c4e6c21a0d3 \
- --hash=sha256:a342242fe22407f3c17f4b499276a02b01e80f861f1682ad1d95b04018e0c0d4 \
- --hash=sha256:a3d33a6b3eae87ceaefa91ffdc130b5e8536182cd6dfdbfc1aa56b46ff8c86de \
- --hash=sha256:a895fcc7b15c3fc72beb43cdcbdf0ddb7d2ebc959edac9cef390b0d14f39f8a9 \
- --hash=sha256:afb17f84d56068a7c29f5fa37bfd38d5aba69e3304af08ee94da8ed5b0865833 \
- --hash=sha256:b1c546aca0ca4d028901d825015dc8e4d56aac4b541877690eb76490f1dc8ed0 \
- --hash=sha256:b29019c76039dc3c0fd815c41392a044ce555d9bcdd38b0fb60fb4cd8e475ba9 \
- --hash=sha256:b46517c02ccd08092f4fa99f24c3b83d8f92f739b4657b0f146246a0ca6a831d \
- --hash=sha256:b7aa5f8a41217360e600da646004f878250a0d6738bcdc11a0a39928d7dc2050 \
- --hash=sha256:b7b4c971f05e6ae490fef852c218b0e79d4e52f79ef0c8475566584a8fb3e01d \
- --hash=sha256:ba90a9563ba44a72fda2e85302c3abc71c5589cea608ca16c22b9804262aaeb6 \
- --hash=sha256:cb017fd1b2603ef59e374ba2063f593abe0fc45f2ad9abdde5b4d83bd922a353 \
- --hash=sha256:d22656368f0e6189e24722214ed8d66b8022db19d182927b9a248a2a8a2f67eb \
- --hash=sha256:d2c2db7fd82e9b72937969bceac4d6ca89660db0a0967614ce2481e81a0b771e \
- --hash=sha256:d39b5b4f2a66ccae8b7263ac3c8170994b65266797fb96cbbfd3fb5b23921db8 \
- --hash=sha256:d62a5c7dad11015c66fbb9d881bc4caa5b12f16292f857842d9d1871595f4495 \
- --hash=sha256:e7d9405291c6928619403db1d10bd07888888ec1abcbd9748fdaa971d7d661b2 \
- --hash=sha256:e84606b74eb7de6ff581a7915e2dab7a28a0517fbe1c9239eb227e1354064dcd \
- --hash=sha256:eb393e5ebc85245347950143969b241d08b52b88a3dc39479822e073a1a8eb27 \
- --hash=sha256:ebba1cd308ef115925421d3e6a586e655ca5a77b5bf41e02eb0e4562a111f2d1 \
- --hash=sha256:ee57190f24fba796e36bb6d3aa8a8783c643d8fa9760c89f7a98ab5455fbf818 \
- --hash=sha256:f2f67fe12b22cd130d34d0ef79206061bfb5eda52feb6ce0dba0644e20a03cf4 \
- --hash=sha256:f6951407391b639504e3b3be51b7ba5f3528adbf1a8ac3302b687ecababf929e \
- --hash=sha256:f75f7168ab25dd93110c8a8117a22450c19976afbc44234cbf71481094c1b850 \
- --hash=sha256:fdec9e8cbf13a5bf63290fc6013d216a4c7232efb51548594ca3631a7f13c3a3
+coverage[toml]==7.4.1 \
+ --hash=sha256:0193657651f5399d433c92f8ae264aff31fc1d066deee4b831549526433f3f61 \
+ --hash=sha256:02f2edb575d62172aa28fe00efe821ae31f25dc3d589055b3fb64d51e52e4ab1 \
+ --hash=sha256:0491275c3b9971cdbd28a4595c2cb5838f08036bca31765bad5e17edf900b2c7 \
+ --hash=sha256:077d366e724f24fc02dbfe9d946534357fda71af9764ff99d73c3c596001bbd7 \
+ --hash=sha256:10e88e7f41e6197ea0429ae18f21ff521d4f4490aa33048f6c6f94c6045a6a75 \
+ --hash=sha256:18e961aa13b6d47f758cc5879383d27b5b3f3dcd9ce8cdbfdc2571fe86feb4dd \
+ --hash=sha256:1a78b656a4d12b0490ca72651fe4d9f5e07e3c6461063a9b6265ee45eb2bdd35 \
+ --hash=sha256:1ed4b95480952b1a26d863e546fa5094564aa0065e1e5f0d4d0041f293251d04 \
+ --hash=sha256:23b27b8a698e749b61809fb637eb98ebf0e505710ec46a8aa6f1be7dc0dc43a6 \
+ --hash=sha256:23f5881362dcb0e1a92b84b3c2809bdc90db892332daab81ad8f642d8ed55042 \
+ --hash=sha256:32a8d985462e37cfdab611a6f95b09d7c091d07668fdc26e47a725ee575fe166 \
+ --hash=sha256:3468cc8720402af37b6c6e7e2a9cdb9f6c16c728638a2ebc768ba1ef6f26c3a1 \
+ --hash=sha256:379d4c7abad5afbe9d88cc31ea8ca262296480a86af945b08214eb1a556a3e4d \
+ --hash=sha256:3cacfaefe6089d477264001f90f55b7881ba615953414999c46cc9713ff93c8c \
+ --hash=sha256:3e3424c554391dc9ef4a92ad28665756566a28fecf47308f91841f6c49288e66 \
+ --hash=sha256:46342fed0fff72efcda77040b14728049200cbba1279e0bf1188f1f2078c1d70 \
+ --hash=sha256:536d609c6963c50055bab766d9951b6c394759190d03311f3e9fcf194ca909e1 \
+ --hash=sha256:5d6850e6e36e332d5511a48a251790ddc545e16e8beaf046c03985c69ccb2676 \
+ --hash=sha256:6008adeca04a445ea6ef31b2cbaf1d01d02986047606f7da266629afee982630 \
+ --hash=sha256:64e723ca82a84053dd7bfcc986bdb34af8d9da83c521c19d6b472bc6880e191a \
+ --hash=sha256:6b00e21f86598b6330f0019b40fb397e705135040dbedc2ca9a93c7441178e74 \
+ --hash=sha256:6d224f0c4c9c98290a6990259073f496fcec1b5cc613eecbd22786d398ded3ad \
+ --hash=sha256:6dceb61d40cbfcf45f51e59933c784a50846dc03211054bd76b421a713dcdf19 \
+ --hash=sha256:7ac8f8eb153724f84885a1374999b7e45734bf93a87d8df1e7ce2146860edef6 \
+ --hash=sha256:85ccc5fa54c2ed64bd91ed3b4a627b9cce04646a659512a051fa82a92c04a448 \
+ --hash=sha256:869b5046d41abfea3e381dd143407b0d29b8282a904a19cb908fa24d090cc018 \
+ --hash=sha256:8bdb0285a0202888d19ec6b6d23d5990410decb932b709f2b0dfe216d031d218 \
+ --hash=sha256:8dfc5e195bbef80aabd81596ef52a1277ee7143fe419efc3c4d8ba2754671756 \
+ --hash=sha256:8e738a492b6221f8dcf281b67129510835461132b03024830ac0e554311a5c54 \
+ --hash=sha256:918440dea04521f499721c039863ef95433314b1db00ff826a02580c1f503e45 \
+ --hash=sha256:9641e21670c68c7e57d2053ddf6c443e4f0a6e18e547e86af3fad0795414a628 \
+ --hash=sha256:9d2f9d4cc2a53b38cabc2d6d80f7f9b7e3da26b2f53d48f05876fef7956b6968 \
+ --hash=sha256:a07f61fc452c43cd5328b392e52555f7d1952400a1ad09086c4a8addccbd138d \
+ --hash=sha256:a3277f5fa7483c927fe3a7b017b39351610265308f5267ac6d4c2b64cc1d8d25 \
+ --hash=sha256:a4a3907011d39dbc3e37bdc5df0a8c93853c369039b59efa33a7b6669de04c60 \
+ --hash=sha256:aeb2c2688ed93b027eb0d26aa188ada34acb22dceea256d76390eea135083950 \
+ --hash=sha256:b094116f0b6155e36a304ff912f89bbb5067157aff5f94060ff20bbabdc8da06 \
+ --hash=sha256:b8ffb498a83d7e0305968289441914154fb0ef5d8b3157df02a90c6695978295 \
+ --hash=sha256:b9bb62fac84d5f2ff523304e59e5c439955fb3b7f44e3d7b2085184db74d733b \
+ --hash=sha256:c61f66d93d712f6e03369b6a7769233bfda880b12f417eefdd4f16d1deb2fc4c \
+ --hash=sha256:ca6e61dc52f601d1d224526360cdeab0d0712ec104a2ce6cc5ccef6ed9a233bc \
+ --hash=sha256:ca7b26a5e456a843b9b6683eada193fc1f65c761b3a473941efe5a291f604c74 \
+ --hash=sha256:d12c923757de24e4e2110cf8832d83a886a4cf215c6e61ed506006872b43a6d1 \
+ --hash=sha256:d17bbc946f52ca67adf72a5ee783cd7cd3477f8f8796f59b4974a9b59cacc9ee \
+ --hash=sha256:dfd1e1b9f0898817babf840b77ce9fe655ecbe8b1b327983df485b30df8cc011 \
+ --hash=sha256:e0860a348bf7004c812c8368d1fc7f77fe8e4c095d661a579196a9533778e156 \
+ --hash=sha256:f2f5968608b1fe2a1d00d01ad1017ee27efd99b3437e08b83ded9b7af3f6f766 \
+ --hash=sha256:f3771b23bb3675a06f5d885c3630b1d01ea6cac9e84a01aaf5508706dba546c5 \
+ --hash=sha256:f68ef3660677e6624c8cace943e4765545f8191313a07288a53d3da188bd8581 \
+ --hash=sha256:f86f368e1c7ce897bf2457b9eb61169a44e2ef797099fb5728482b8d69f3f016 \
+ --hash=sha256:f90515974b39f4dea2f27c0959688621b46d96d5a626cf9c53dbc653a895c05c \
+ --hash=sha256:fe558371c1bdf3b8fa03e097c523fb9645b8730399c14fe7721ee9c9e2a545d3
# via
# pytest-cov
# pytest-testmon
-distlib==0.3.6 \
- --hash=sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46 \
- --hash=sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e
+distlib==0.3.8 \
+ --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \
+ --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64
# via virtualenv
-django==4.2.3 \
- --hash=sha256:45a747e1c5b3d6df1b141b1481e193b033fd1fdbda3ff52677dc81afdaacbaed \
- --hash=sha256:f7c7852a5ac5a3da5a8d5b35cc6168f31b605971441798dac845f17ca8028039
+django==4.2.9 \
+ --hash=sha256:12498cc3cb8bc8038539fef9e90e95f507502436c1f0c3a673411324fa675d14 \
+ --hash=sha256:2cc2fc7d1708ada170ddd6c99f35cc25db664f165d3794bc7723f46b2f8c8984
# via
# django-debug-toolbar
# django-extensions
@@ -237,47 +244,43 @@ django-extensions==3.2.3 \
docopt==0.6.2 \
--hash=sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491
# via pytest-watch
-exceptiongroup==1.1.2 \
- --hash=sha256:12c3e887d6485d16943a309616de20ae5582633e0a2eda17f4e10fd61c1e8af5 \
- --hash=sha256:e346e69d186172ca7cf029c8c1d16235aa0e04035e5750b4b95039e65204328f
+exceptiongroup==1.2.0 \
+ --hash=sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14 \
+ --hash=sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68
# via
# pytest
# trio
# trio-websocket
-filelock==3.12.2 \
- --hash=sha256:002740518d8aa59a26b0c76e10fb8c6e15eae825d34b6fdf670333fd7b938d81 \
- --hash=sha256:cbb791cdea2a72f23da6ac5b5269ab0a0d161e9ef0100e653b69049a7706d1ec
+filelock==3.13.1 \
+ --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \
+ --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c
# via virtualenv
-flake8==6.0.0 \
- --hash=sha256:3833794e27ff64ea4e9cf5d410082a8b97ff1a06c16aa3d2027339cd0f1195c7 \
- --hash=sha256:c61007e76655af75e6785a931f452915b371dc48f56efd765247c8fe68f2b181
- # via -r requirements/dev.in
-freezegun==1.2.2 \
- --hash=sha256:cd22d1ba06941384410cd967d8a99d5ae2442f57dfafeff2fda5de8dc5c05446 \
- --hash=sha256:ea1b963b993cb9ea195adbd893a48d573fda951b0da64f60883d7e988b606c9f
+freezegun==1.4.0 \
+ --hash=sha256:10939b0ba0ff5adaecf3b06a5c2f73071d9678e507c5eaedb23c761d56ac774b \
+ --hash=sha256:55e0fc3c84ebf0a96a5aa23ff8b53d70246479e9a68863f1fcac5a3e52f19dd6
# via pytest-freezegun
h11==0.14.0 \
--hash=sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d \
--hash=sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761
# via wsproto
-identify==2.5.24 \
- --hash=sha256:0aac67d5b4812498056d28a9a512a483f5085cc28640b02b258a59dac34301d4 \
- --hash=sha256:986dbfb38b1140e763e413e6feb44cd731faf72d1909543178aa79b0e258265d
+identify==2.5.33 \
+ --hash=sha256:161558f9fe4559e1557e1bff323e8631f6a0e4837f7497767c1782832f16b62d \
+ --hash=sha256:d40ce5fcd762817627670da8a7d8d8e65f24342d14539c59488dc603bf662e34
# via pre-commit
-idna==3.4 \
- --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \
- --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2
+idna==3.6 \
+ --hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \
+ --hash=sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f
# via
# requests
# trio
+importlib-metadata==7.0.1 \
+ --hash=sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e \
+ --hash=sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc
+ # via build
iniconfig==2.0.0 \
--hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \
--hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374
# via pytest
-mccabe==0.7.0 \
- --hash=sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325 \
- --hash=sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e
- # via flake8
mock==5.0.2 \
--hash=sha256:06f18d7d65b44428202b145a9a36e99c2ee00d1eb992df0caf881d4664377891 \
--hash=sha256:0e0bc5ba78b8db3667ad636d964eb963dc97a59f04c6f6214c5f0e4a8f726c56
@@ -290,34 +293,34 @@ nodeenv==1.8.0 \
--hash=sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2 \
--hash=sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec
# via pre-commit
-outcome==1.2.0 \
- --hash=sha256:6f82bd3de45da303cf1f771ecafa1633750a358436a8bb60e06a1ceb745d2672 \
- --hash=sha256:c4ab89a56575d6d38a05aa16daeaa333109c1f96167aba8901ab18b6b5e0f7f5
+outcome==1.3.0.post0 \
+ --hash=sha256:9dcf02e65f2971b80047b377468e72a268e15c0af3cf1238e6ff14f7f91143b8 \
+ --hash=sha256:e771c5ce06d1415e356078d3bdd68523f284b4ce5419828922b6871e65eda82b
# via trio
-packaging==23.1 \
- --hash=sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61 \
- --hash=sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f
+packaging==23.2 \
+ --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \
+ --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7
# via
# black
# build
# pytest
-pathspec==0.11.1 \
- --hash=sha256:2798de800fa92780e33acca925945e9a19a133b715067cf165b8866c15a31687 \
- --hash=sha256:d8af70af76652554bd134c22b3e8a1cc46ed7d91edcdd721ef1a0c51a84a5293
+pathspec==0.12.1 \
+ --hash=sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08 \
+ --hash=sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712
# via black
pip-tools==6.13.0 \
--hash=sha256:50943f151d87e752abddec8158622c34ad7f292e193836e90e30d87da60b19d9 \
--hash=sha256:61d46bd2eb8016ed4a924e196e6e5b0a268cd3babd79e593048720db23522bb1
# via -r requirements/dev.in
-platformdirs==3.8.1 \
- --hash=sha256:cec7b889196b9144d088e4c57d9ceef7374f6c39694ad1577a0aab50d27ea28c \
- --hash=sha256:f87ca4fcff7d2b0f81c6a748a77973d7af0f4d526f98f308477c3c436c74d528
+platformdirs==4.2.0 \
+ --hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \
+ --hash=sha256:ef0cc731df711022c174543cb70a9b5bd22e5a9337c8624ef2c2ceb8ddad8768
# via
# black
# virtualenv
-pluggy==1.2.0 \
- --hash=sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849 \
- --hash=sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3
+pluggy==1.4.0 \
+ --hash=sha256:7db9f7b503d67d1c5b95f59773ebb58a8c1c288129a88665838012cfb07b8981 \
+ --hash=sha256:8c85c2876142a764e5b7548e7d9a0e0ddb46f5185161049a79b7e974454223be
# via
# pypom
# pytest
@@ -325,14 +328,6 @@ pre-commit==3.3.3 \
--hash=sha256:10badb65d6a38caff29703362271d7dca483d01da88f9d7e05d0b97171c136cb \
--hash=sha256:a2256f489cd913d575c145132ae196fe335da32d91a8294b7afe6622335dd023
# via -r requirements/dev.in
-pycodestyle==2.10.0 \
- --hash=sha256:347187bdb476329d98f695c213d7295a846d1152ff4fe9bacb8a9590b8ee7053 \
- --hash=sha256:8a4eaf0d0495c7395bdab3589ac2db602797d76207242c17d470186815706610
- # via flake8
-pyflakes==3.0.1 \
- --hash=sha256:ec55bf7fe21fff7f1ad2f7da62363d749e2a470500eab1b555334b67aa1ef8cf \
- --hash=sha256:ec8b276a6b60bd80defed25add7e439881c19e64850afd9b346283d4165fd0fd
- # via flake8
pypom==2.2.4 \
--hash=sha256:5da52cf447e62f43a0cfa47dfe52eb822eff07b2fdad759f930d1d227c15220b \
--hash=sha256:8b4dc6d1a24580298bf5ad8ad6c586f33b73c326c10a4419f83aee1abb20077d
@@ -383,47 +378,58 @@ python-dateutil==2.8.2 \
--hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
--hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9
# via freezegun
-pyyaml==6.0 \
- --hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \
- --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \
- --hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \
- --hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \
- --hash=sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b \
- --hash=sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4 \
- --hash=sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07 \
- --hash=sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba \
- --hash=sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 \
- --hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \
- --hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \
- --hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \
- --hash=sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782 \
- --hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \
- --hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \
- --hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \
- --hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \
- --hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \
- --hash=sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1 \
- --hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \
- --hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \
- --hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \
- --hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \
- --hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \
- --hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \
- --hash=sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d \
- --hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \
- --hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \
- --hash=sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7 \
- --hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \
- --hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \
- --hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \
- --hash=sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358 \
- --hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \
- --hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \
- --hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \
- --hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \
- --hash=sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f \
- --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \
- --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5
+pyyaml==6.0.1 \
+ --hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \
+ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \
+ --hash=sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df \
+ --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \
+ --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \
+ --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \
+ --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \
+ --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \
+ --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \
+ --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \
+ --hash=sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290 \
+ --hash=sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9 \
+ --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \
+ --hash=sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6 \
+ --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \
+ --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \
+ --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \
+ --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \
+ --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \
+ --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \
+ --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \
+ --hash=sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0 \
+ --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \
+ --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \
+ --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \
+ --hash=sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28 \
+ --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \
+ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \
+ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \
+ --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \
+ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \
+ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \
+ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \
+ --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \
+ --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \
+ --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \
+ --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \
+ --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \
+ --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \
+ --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \
+ --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \
+ --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \
+ --hash=sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54 \
+ --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \
+ --hash=sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b \
+ --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \
+ --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \
+ --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \
+ --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \
+ --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
+ --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
# via
# pre-commit
# responses
@@ -438,9 +444,9 @@ responses==0.23.1 \
--hash=sha256:8a3a5915713483bf353b6f4079ba8b2a29029d1d1090a503c70b0dc5d9d0c7bd \
--hash=sha256:c4d9aa9fc888188f0c673eff79a8dadbe2e75b7fe879dc80a221a06e0a68138f
# via -r requirements/dev.in
-selenium==4.10.0 \
- --hash=sha256:40241b9d872f58959e9b34e258488bf11844cd86142fd68182bd41db9991fc5c \
- --hash=sha256:871bf800c4934f745b909c8dfc7d15c65cf45bd2e943abd54451c810ada395e3
+selenium==4.17.2 \
+ --hash=sha256:5aee79026c07985dc1b0c909f34084aa996dfe5b307602de9016d7a621a473f2 \
+ --hash=sha256:d43d6972e516855fb242ef9ce4ce759057b115070e702e7b1c1032fe7b38d87b
# via pypom
shellcheck-py==0.9.0.5 \
--hash=sha256:50b2057fac7227fd83614a9bf9d123042e53e03d92f2c7f1778448a8937f07a4 \
@@ -475,26 +481,27 @@ tomli==2.0.1 \
# coverage
# pyproject-hooks
# pytest
-trio==0.22.2 \
- --hash=sha256:3887cf18c8bcc894433420305468388dac76932e9668afa1c49aa3806b6accb3 \
- --hash=sha256:f43da357620e5872b3d940a2e3589aa251fd3f881b65a608d742e00809b1ec38
+trio==0.24.0 \
+ --hash=sha256:c3bd3a4e3e3025cd9a2241eae75637c43fe0b9e88b4c97b9161a55b9e54cd72c \
+ --hash=sha256:ffa09a74a6bf81b84f8613909fb0beaee84757450183a7a2e0b47b455c0cac5d
# via
# selenium
# trio-websocket
-trio-websocket==0.10.3 \
- --hash=sha256:1a748604ad906a7dcab9a43c6eb5681e37de4793ba0847ef0bc9486933ed027b \
- --hash=sha256:a9937d48e8132ebf833019efde2a52ca82d223a30a7ea3e8d60a7d28f75a4e3a
+trio-websocket==0.11.1 \
+ --hash=sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f \
+ --hash=sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638
# via selenium
-types-pyyaml==6.0.12.10 \
- --hash=sha256:662fa444963eff9b68120d70cda1af5a5f2aa57900003c2006d7626450eaae5f \
- --hash=sha256:ebab3d0700b946553724ae6ca636ea932c1b0868701d4af121630e78d695fc97
+types-pyyaml==6.0.12.12 \
+ --hash=sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062 \
+ --hash=sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24
# via responses
-typing-extensions==4.7.1 \
- --hash=sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36 \
- --hash=sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2
+typing-extensions==4.9.0 \
+ --hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \
+ --hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd
# via
# asgiref
# black
+ # selenium
urllib3[socks]==2.0.3 \
--hash=sha256:48e7fafa40319d358848e1bc6809b208340fafe2096f1725d05d67443d0483d1 \
--hash=sha256:bee28b5e56addb8226c96f7f13ac28cb4c301dd5ea8a6ca179c0b9835e032825
@@ -503,9 +510,9 @@ urllib3[socks]==2.0.3 \
# requests
# responses
# selenium
-virtualenv==20.23.1 \
- --hash=sha256:34da10f14fea9be20e0fd7f04aba9732f84e593dac291b757ce42e3368a39419 \
- --hash=sha256:8ff19a38c1021c742148edc4f81cb43d7f8c6816d2ede2ab72af5b84c749ade1
+virtualenv==20.25.0 \
+ --hash=sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3 \
+ --hash=sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b
# via pre-commit
watchdog==3.0.0 \
--hash=sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a \
@@ -536,14 +543,18 @@ watchdog==3.0.0 \
--hash=sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44 \
--hash=sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33
# via pytest-watch
-wheel==0.40.0 \
- --hash=sha256:cd1196f3faee2b31968d626e1731c94f99cbdb67cf5a46e4f5656cbee7738873 \
- --hash=sha256:d236b20e7cb522daf2390fa84c55eea81c5c30190f90f29ae2ca1ad8355bf247
+wheel==0.42.0 \
+ --hash=sha256:177f9c9b0d45c47873b619f5b650346d632cdc35fb5e4d25058e09c9e581433d \
+ --hash=sha256:c45be39f7882c9d34243236f2d63cbd58039e360f85d0913425fbd7ceea617a8
# via pip-tools
wsproto==1.2.0 \
--hash=sha256:ad565f26ecb92588a3e43bc3d96164de84cd9902482b130d0ddbaa9664a85065 \
--hash=sha256:b9acddd652b585d75b20477888c56642fdade28bdfd3579aa24a4d2c037dd736
# via trio-websocket
+zipp==3.17.0 \
+ --hash=sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31 \
+ --hash=sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0
+ # via importlib-metadata
zope-component==6.0 \
--hash=sha256:96d0a04db39643caf2dfaec152340f3e914df1dc3fa32fbb913782620dc6c3c6 \
--hash=sha256:9a0a0472ad201b94b4fe6741ce9ac2c30b8bb22c516077bf03692dec4dfb6906
@@ -552,87 +563,93 @@ zope-event==5.0 \
--hash=sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26 \
--hash=sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd
# via zope-component
-zope-hookable==5.4 \
- --hash=sha256:0054539ed839751b7f511193912cba393f0b8b5f7dfe9f3601c65b2d3b74e731 \
- --hash=sha256:049ef54de127236e555d0864ad3b950b2b6e5048cdf1098cf340c6fb108104c7 \
- --hash=sha256:06570ed57b22624c7673ff203801bbdece14d2d42dc5d9879c24ef5612c53456 \
- --hash=sha256:0e9e5adc24954e157e084bee97362346470a06d0305cb095118367a8a776dce4 \
- --hash=sha256:2e8fd79437c2007020d3faac41e13c49bcbaa6a0738e4142b996c656dcb5bb69 \
- --hash=sha256:4313b3d282c1c26fcb69569b7988bc2de0b6dc59238ae7189b6b7b29503d47cb \
- --hash=sha256:448ca90d78bd3aef75fe5d55d19f5d05a217193738b7a8d5fd9e93ecf2c02c84 \
- --hash=sha256:4b2fd781571336b0b7655826d9a052379a06b62af138085409b2e3fef1e6fb3d \
- --hash=sha256:5215355203b9583b7f2a8f06fa7df272562cc12bf5be1a960a45ea49c3294426 \
- --hash=sha256:5cb0e4a23588435c6911bde300158d31e47c73c469fbf59d927e801e1cb457ef \
- --hash=sha256:71bff8f7c2e223f92a218b0909ccc6f612c075cc3b5ed164cf152f1537cae2ca \
- --hash=sha256:7241ab28df7288d9a8bf49339a0aabfbf035b93d6a2a843af13d13dfa735c46a \
- --hash=sha256:7269a0fbcd7c5901e255679f8dac835b628eab58d5490c38cf2b15508f181e64 \
- --hash=sha256:7401bd6138e58231aef751c63718726259a7aa6875d746d8a87bba70271b9cff \
- --hash=sha256:761c9bf1b8df6e2b2d5ae87cda27b8e82c33e2f328750e039de4f6f7f35b73cd \
- --hash=sha256:78c51f04aabd3b77ba8d3b2c2abaff8b7598376fea7bd1af9929e90549f6dd4c \
- --hash=sha256:93cfda0663d4d3db6b1818619fbc14e3df2e703454983c841b3b95894d559f86 \
- --hash=sha256:9af06ca83ff1ef9f94a98d08095dd8960fc5b71ffc7ed7db05988dc493e148a1 \
- --hash=sha256:9cffa01d8ef1172492fd6df0113ff5432006129b9bd6e8265e1e4985362b973d \
- --hash=sha256:9d398b1de407a5908c8e5f55fb7a26fa177916b1203e697ef0b4c3389dd28e14 \
- --hash=sha256:9f447ecaf7741257333f4b1cc215de633daaf147dbc87133638142ed88492617 \
- --hash=sha256:9f5d425eb57dee785e4d32703e45c5d6cf2b9fa7ad37c10214593b5f62daa60b \
- --hash=sha256:9f7dd1b45cd13976f49ad21f48a8253628c74ad5eefe3f6e14d50f38cc45f613 \
- --hash=sha256:9fd11381ec66a8569f999dbe11c94870ddf8aecd591300f203a927f18e938a24 \
- --hash=sha256:acec917178af910959205f98f48bcd0a165bdcd6b4d8b3f4baf06fa393ac5ff5 \
- --hash=sha256:b65e86a5cb8244d83eabd021f70968d4a80fac01edc99f6e35d29e5458a128bb \
- --hash=sha256:bad033b8adfe71f650fef2d4fc33452b3310a0e53139a530dbffbcf9fe08c8c8 \
- --hash=sha256:c39ffe1b1ef7543e8efafdc6472d7b9ece8ed1ebe20be261522346463aa2c8c0 \
- --hash=sha256:c79da9673a7d704f6ea2a4bbef6e5e161adbba9d8371476de28a0e3416510cc1 \
- --hash=sha256:d06da931ac88ebb4c02ac89d0b6fdb2e4fff130901edf9c6e7ea0338a2edf6bd \
- --hash=sha256:d44229a0aa8d3587491f359d7326c55b5db6379f68656785dece792afbcfcbae \
- --hash=sha256:d5e50bfbcde1afe32f9cf7fa5e8ea42e218090ecb989c31164d708d0491134b7 \
- --hash=sha256:d822b7ec71ebb5c96df000e2180127e94ba49258335ae796dc4b6201259b2502 \
- --hash=sha256:eeb4042f9b1771a1dd8377cb1cb307c4a4f5821d1491becbdc69bc9de66d3918 \
- --hash=sha256:fb601f00ac87e5aa582a81315ed96768ce3513280729d3f51f79312e2b8b94ac \
- --hash=sha256:fd49da3340339b8aeef31153ce898e93867ee5a7ffcf685e903ceae6717f0cc2
+zope-hookable==6.0 \
+ --hash=sha256:070776c9f36b99fb0df5af2a762a4d4f77e568df36637797e2e8a41c9d8d290d \
+ --hash=sha256:12959a3d70c35a6b835e69d9f70008d3a31e324d2f2d13536c8533f648fa8a96 \
+ --hash=sha256:1668993d40a7cfdc867843dd5725929e7f83a5b0c195c709af1daef8274f43cb \
+ --hash=sha256:1a97f4a46d87ee726d48f3058062e2a1570f136ba9aa788e9c0bcdd5e511609d \
+ --hash=sha256:20936873c8b17f903bc8b63ca13ec6c492665b48067988e4359ddd5d1c5b6f2f \
+ --hash=sha256:2968b37457079678a9d1bd9ef09ff1a224d4234e02120792a9e4e00117193df3 \
+ --hash=sha256:2d7bfcb11356e4dbb3e24943f0055819ff264dada4dc0b68ca012e5a1ff5b474 \
+ --hash=sha256:2d7c782bbfed7aa4846af2a67269718563daa904b33077d97665e5644b08ce2b \
+ --hash=sha256:351cc91c0bc4c9a6d537c033179be22b613e3a60be42ba08f863490c32f736cf \
+ --hash=sha256:3875bfb6d113ecb41c07dee67be16f5a0bbbae13199b9979e2bbeec97d97ec4b \
+ --hash=sha256:4d3200d955c4182223f04593fef4add9771d4156d4ba6f034e65396f3b132139 \
+ --hash=sha256:55a0a9d62ea907861fd79ae39e86f1d9e755064543e46c5430aa586c1b5a4854 \
+ --hash=sha256:5efffb4417604561ff0bae5c80ad3aa2ecd2145c5a8c3a4b0a4a1f55017e95a2 \
+ --hash=sha256:6cd064359ba8c356b1bdb6c84df028ce2f6402f3703a930c4e1bab25d3ff7fff \
+ --hash=sha256:6d5f83e626caa7ed2814609b446dcc6a3abb19db729bc67671c3eef2265006fd \
+ --hash=sha256:6f4d8b99c1d52e3da1b122e42e7c07eb02f6468cd315f0b6811f426586b7aa8c \
+ --hash=sha256:6ff30e7b24859974f2ff3f00b4564c4c8692730690c4c46f0019ef9b42b1f795 \
+ --hash=sha256:7761c5fdf97a274ce8576002a2444ff45645327179ee1bafde5d5d743d0d3556 \
+ --hash=sha256:78e4953334592c42aefa3e74f74d4c5b168a70d2c2d8cd945eb1a2f442eebee5 \
+ --hash=sha256:7c5a8204992fe677bffa0e5e190cb031aef74994c658a0402a338eed7b58fe8d \
+ --hash=sha256:7ca296b1fb0c5f69e8c0e5a90a5a953e456931016fd1f8c513b3baa3751b0640 \
+ --hash=sha256:86bc17b6b3d1d9274168318cf171d509cbe6c8a8bdd8be0282291dac4a768de0 \
+ --hash=sha256:968f196347fa1bd9ffc15e1d1c9d250f46137d36b75bdd2a482c51c5fc314402 \
+ --hash=sha256:aaac43ac9bf9359db5170627f645c6442e9cf74414f8299ee217e4cfb259bc5c \
+ --hash=sha256:ad48a4db8d12701759b93f3cec55aff9f53626dff12ec415144c2d0ee719b965 \
+ --hash=sha256:b99ddae52522dce614a0323812df944b1835d97f254f81c46b33c3bcf82dadf5 \
+ --hash=sha256:ba0e86642d5b33b5edf39d28da26ed34545780a2720aa79e6dda94402c3fc457 \
+ --hash=sha256:c0db442d2e78d5ea1afa5f1c2537bf7201155ec8963abc8d0f3b9257b52caffb \
+ --hash=sha256:c2cf62d40f689d4bfbe733e3ed41ed2b557d011d9050185abb2bc3e96130677d \
+ --hash=sha256:cd6fb03c174a4e20f4faec9ff22bace922bb59adb44078aebec862605bbcee92 \
+ --hash=sha256:e21dc34ba2453d798cf3cab92efb4994e55659c19a1f77d4cf0c2a0067e78583 \
+ --hash=sha256:ee1e32f54db69abfb6c7e227e65452d2b92e1cefae93a51106419ec623a845ff \
+ --hash=sha256:ee7ff109b2b4793137b6bd82ddc3516cbd643e67813e11e31e0bf613b387d2ec \
+ --hash=sha256:f2eeba6e2fd69e9e72a003edcceed7ce4614ad1c1e866bf168c92540c3658343 \
+ --hash=sha256:f58a129a63289c44ba84ae951019f8a60d34c4d948350be7fa2abda5106f8498 \
+ --hash=sha256:ff5ee2df0dc3ccc524772e00d5a1991c3b8d942cc12fd503a7bf9dc35a040779
# via zope-component
-zope-interface==6.0 \
- --hash=sha256:042f2381118b093714081fd82c98e3b189b68db38ee7d35b63c327c470ef8373 \
- --hash=sha256:0ec9653825f837fbddc4e4b603d90269b501486c11800d7c761eee7ce46d1bbb \
- --hash=sha256:12175ca6b4db7621aedd7c30aa7cfa0a2d65ea3a0105393e05482d7a2d367446 \
- --hash=sha256:1592f68ae11e557b9ff2bc96ac8fc30b187e77c45a3c9cd876e3368c53dc5ba8 \
- --hash=sha256:23ac41d52fd15dd8be77e3257bc51bbb82469cf7f5e9a30b75e903e21439d16c \
- --hash=sha256:424d23b97fa1542d7be882eae0c0fc3d6827784105264a8169a26ce16db260d8 \
- --hash=sha256:4407b1435572e3e1610797c9203ad2753666c62883b921318c5403fb7139dec2 \
- --hash=sha256:48f4d38cf4b462e75fac78b6f11ad47b06b1c568eb59896db5b6ec1094eb467f \
- --hash=sha256:4c3d7dfd897a588ec27e391edbe3dd320a03684457470415870254e714126b1f \
- --hash=sha256:5171eb073474a5038321409a630904fd61f12dd1856dd7e9d19cd6fe092cbbc5 \
- --hash=sha256:5a158846d0fca0a908c1afb281ddba88744d403f2550dc34405c3691769cdd85 \
- --hash=sha256:6ee934f023f875ec2cfd2b05a937bd817efcc6c4c3f55c5778cbf78e58362ddc \
- --hash=sha256:790c1d9d8f9c92819c31ea660cd43c3d5451df1df61e2e814a6f99cebb292788 \
- --hash=sha256:809fe3bf1a91393abc7e92d607976bbb8586512913a79f2bf7d7ec15bd8ea518 \
- --hash=sha256:87b690bbee9876163210fd3f500ee59f5803e4a6607d1b1238833b8885ebd410 \
- --hash=sha256:89086c9d3490a0f265a3c4b794037a84541ff5ffa28bb9c24cc9f66566968464 \
- --hash=sha256:99856d6c98a326abbcc2363827e16bd6044f70f2ef42f453c0bd5440c4ce24e5 \
- --hash=sha256:aab584725afd10c710b8f1e6e208dbee2d0ad009f57d674cb9d1b3964037275d \
- --hash=sha256:af169ba897692e9cd984a81cb0f02e46dacdc07d6cf9fd5c91e81f8efaf93d52 \
- --hash=sha256:b39b8711578dcfd45fc0140993403b8a81e879ec25d53189f3faa1f006087dca \
- --hash=sha256:b3f543ae9d3408549a9900720f18c0194ac0fe810cecda2a584fd4dca2eb3bb8 \
- --hash=sha256:d0583b75f2e70ec93f100931660328965bb9ff65ae54695fb3fa0a1255daa6f2 \
- --hash=sha256:dfbbbf0809a3606046a41f8561c3eada9db811be94138f42d9135a5c47e75f6f \
- --hash=sha256:e538f2d4a6ffb6edfb303ce70ae7e88629ac6e5581870e66c306d9ad7b564a58 \
- --hash=sha256:eba51599370c87088d8882ab74f637de0c4f04a6d08a312dce49368ba9ed5c2a \
- --hash=sha256:ee4b43f35f5dc15e1fec55ccb53c130adb1d11e8ad8263d68b1284b66a04190d \
- --hash=sha256:f2363e5fd81afb650085c6686f2ee3706975c54f331b426800b53531191fdf28 \
- --hash=sha256:f299c020c6679cb389814a3b81200fe55d428012c5e76da7e722491f5d205990 \
- --hash=sha256:f72f23bab1848edb7472309e9898603141644faec9fd57a823ea6b4d1c4c8995 \
- --hash=sha256:fa90bac61c9dc3e1a563e5babb3fd2c0c1c80567e815442ddbe561eadc803b30
+zope-interface==6.1 \
+ --hash=sha256:0c8cf55261e15590065039696607f6c9c1aeda700ceee40c70478552d323b3ff \
+ --hash=sha256:13b7d0f2a67eb83c385880489dbb80145e9d344427b4262c49fbf2581677c11c \
+ --hash=sha256:1f294a15f7723fc0d3b40701ca9b446133ec713eafc1cc6afa7b3d98666ee1ac \
+ --hash=sha256:239a4a08525c080ff833560171d23b249f7f4d17fcbf9316ef4159f44997616f \
+ --hash=sha256:2f8d89721834524a813f37fa174bac074ec3d179858e4ad1b7efd4401f8ac45d \
+ --hash=sha256:2fdc7ccbd6eb6b7df5353012fbed6c3c5d04ceaca0038f75e601060e95345309 \
+ --hash=sha256:34c15ca9248f2e095ef2e93af2d633358c5f048c49fbfddf5fdfc47d5e263736 \
+ --hash=sha256:387545206c56b0315fbadb0431d5129c797f92dc59e276b3ce82db07ac1c6179 \
+ --hash=sha256:43b576c34ef0c1f5a4981163b551a8781896f2a37f71b8655fd20b5af0386abb \
+ --hash=sha256:57d0a8ce40ce440f96a2c77824ee94bf0d0925e6089df7366c2272ccefcb7941 \
+ --hash=sha256:5a804abc126b33824a44a7aa94f06cd211a18bbf31898ba04bd0924fbe9d282d \
+ --hash=sha256:67be3ca75012c6e9b109860820a8b6c9a84bfb036fbd1076246b98e56951ca92 \
+ --hash=sha256:6af47f10cfc54c2ba2d825220f180cc1e2d4914d783d6fc0cd93d43d7bc1c78b \
+ --hash=sha256:6dc998f6de015723196a904045e5a2217f3590b62ea31990672e31fbc5370b41 \
+ --hash=sha256:70d2cef1bf529bff41559be2de9d44d47b002f65e17f43c73ddefc92f32bf00f \
+ --hash=sha256:7ebc4d34e7620c4f0da7bf162c81978fce0ea820e4fa1e8fc40ee763839805f3 \
+ --hash=sha256:964a7af27379ff4357dad1256d9f215047e70e93009e532d36dcb8909036033d \
+ --hash=sha256:97806e9ca3651588c1baaebb8d0c5ee3db95430b612db354c199b57378312ee8 \
+ --hash=sha256:9b9bc671626281f6045ad61d93a60f52fd5e8209b1610972cf0ef1bbe6d808e3 \
+ --hash=sha256:9ffdaa5290422ac0f1688cb8adb1b94ca56cee3ad11f29f2ae301df8aecba7d1 \
+ --hash=sha256:a0da79117952a9a41253696ed3e8b560a425197d4e41634a23b1507efe3273f1 \
+ --hash=sha256:a41f87bb93b8048fe866fa9e3d0c51e27fe55149035dcf5f43da4b56732c0a40 \
+ --hash=sha256:aa6fd016e9644406d0a61313e50348c706e911dca29736a3266fc9e28ec4ca6d \
+ --hash=sha256:ad54ed57bdfa3254d23ae04a4b1ce405954969c1b0550cc2d1d2990e8b439de1 \
+ --hash=sha256:b012d023b4fb59183909b45d7f97fb493ef7a46d2838a5e716e3155081894605 \
+ --hash=sha256:b51b64432eed4c0744241e9ce5c70dcfecac866dff720e746d0a9c82f371dfa7 \
+ --hash=sha256:bbe81def9cf3e46f16ce01d9bfd8bea595e06505e51b7baf45115c77352675fd \
+ --hash=sha256:c9559138690e1bd4ea6cd0954d22d1e9251e8025ce9ede5d0af0ceae4a401e43 \
+ --hash=sha256:e30506bcb03de8983f78884807e4fd95d8db6e65b69257eea05d13d519b83ac0 \
+ --hash=sha256:e33e86fd65f369f10608b08729c8f1c92ec7e0e485964670b4d2633a4812d36b \
+ --hash=sha256:e441e8b7d587af0414d25e8d05e27040d78581388eed4c54c30c0c91aad3a379 \
+ --hash=sha256:e8bb9c990ca9027b4214fa543fd4025818dc95f8b7abce79d61dc8a2112b561a \
+ --hash=sha256:ef43ee91c193f827e49599e824385ec7c7f3cd152d74cb1dfe02cb135f264d83 \
+ --hash=sha256:ef467d86d3cfde8b39ea1b35090208b0447caaabd38405420830f7fd85fbdd56 \
+ --hash=sha256:f89b28772fc2562ed9ad871c865f5320ef761a7fcc188a935e21fe8b31a38ca9 \
+ --hash=sha256:fddbab55a2473f1d3b8833ec6b7ac31e8211b0aa608df5ab09ce07f3727326de
# via
# pypom
# zope-component
# The following packages are considered to be unsafe in a requirements file:
-pip==23.1.2 \
- --hash=sha256:0e7c86f486935893c708287b30bd050a36ac827ec7fe5e43fe7cb198dd835fba \
- --hash=sha256:3ef6ac33239e4027d9a5598a381b9d30880a1477e50039db2eac6e8a8f6d1b18
+pip==23.3.2 \
+ --hash=sha256:5052d7889c1f9d05224cd41741acb7c5d6fa735ab34e339624a614eaaa7e7d76 \
+ --hash=sha256:7fd9972f96db22c8077a1ee2691b172c8089b17a5652a44494a9ecb0d78f9149
# via pip-tools
-setuptools==68.0.0 \
- --hash=sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f \
- --hash=sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235
+setuptools==69.0.3 \
+ --hash=sha256:385eb4edd9c9d5c17540511303e39a147ce2fc04bc55289c322b9e5904fe2c05 \
+ --hash=sha256:be1af57fc409f93647f2e8e4573a142ed38724b8cdd389706a867bb4efcf1e78
# via
# nodeenv
# pip-tools
From 485c9ac7d17b17da236cf36691593a78f2e85afe Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Thu, 1 Feb 2024 14:00:31 +0000
Subject: [PATCH 009/128] Update dependency puppeteer to v21
---
package.json | 2 +-
yarn.lock | 422 ++++++++++++++++++++++++++++++++++++---------------
2 files changed, 304 insertions(+), 120 deletions(-)
diff --git a/package.json b/package.json
index 5874434a911..994b09486a6 100644
--- a/package.json
+++ b/package.json
@@ -109,7 +109,7 @@
"mini-css-extract-plugin": "2.6.1",
"path": "0.12.7",
"prettier": "2.0.5",
- "puppeteer": "19.3.0",
+ "puppeteer": "21.10.0",
"setup-polly-jest": "0.9.1",
"style-loader": "3.3.3",
"webpack": "5.88.2",
diff --git a/yarn.lock b/yarn.lock
index 6d1b9f94331..1ee5d4efbb0 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1983,6 +1983,19 @@
qs "^6.10.1"
url-parse "^1.5.3"
+"@puppeteer/browsers@1.9.1":
+ version "1.9.1"
+ resolved "https://registry.yarnpkg.com/@puppeteer/browsers/-/browsers-1.9.1.tgz#384ee8b09786f0e8f62b1925e4c492424cb549ee"
+ integrity sha512-PuvK6xZzGhKPvlx3fpfdM2kYY3P/hB1URtK8wA7XUJ6prn6pp22zvJHu48th0SGcHL9SutbPHrFuQgfXTFobWA==
+ dependencies:
+ debug "4.3.4"
+ extract-zip "2.0.1"
+ progress "2.0.3"
+ proxy-agent "6.3.1"
+ tar-fs "3.0.4"
+ unbzip2-stream "1.4.3"
+ yargs "17.7.2"
+
"@redocly/ajv@^8.11.0":
version "8.11.0"
resolved "https://registry.yarnpkg.com/@redocly/ajv/-/ajv-8.11.0.tgz#2fad322888dc0113af026e08fceb3e71aae495ae"
@@ -2110,6 +2123,11 @@
resolved "https://registry.npmjs.org/@tootallnate/once/-/once-2.0.0.tgz"
integrity sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==
+"@tootallnate/quickjs-emscripten@^0.23.0":
+ version "0.23.0"
+ resolved "https://registry.yarnpkg.com/@tootallnate/quickjs-emscripten/-/quickjs-emscripten-0.23.0.tgz#db4ecfd499a9765ab24002c3b696d02e6d32a12c"
+ integrity sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==
+
"@types/aria-query@^4.2.0":
version "4.2.2"
resolved "https://registry.npmjs.org/@types/aria-query/-/aria-query-4.2.2.tgz"
@@ -2377,11 +2395,6 @@
resolved "https://registry.yarnpkg.com/@types/node/-/node-14.18.58.tgz#547e64027defb95f34824794574dabf5417bc615"
integrity sha512-Y8ETZc8afYf6lQ/mVp096phIVsgD/GmDxtm3YaPcc+71jmi/J6zdwbwaUU4JvS56mq6aSfbpkcKhQ5WugrWFPw==
-"@types/parse-json@^4.0.0":
- version "4.0.0"
- resolved "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz"
- integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA==
-
"@types/parse5@^6.0.3":
version "6.0.3"
resolved "https://registry.npmjs.org/@types/parse5/-/parse5-6.0.3.tgz"
@@ -2780,6 +2793,13 @@ agent-base@6:
dependencies:
debug "4"
+agent-base@^7.0.2, agent-base@^7.1.0:
+ version "7.1.0"
+ resolved "https://registry.yarnpkg.com/agent-base/-/agent-base-7.1.0.tgz#536802b76bc0b34aa50195eb2442276d613e3434"
+ integrity sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==
+ dependencies:
+ debug "^4.3.4"
+
aggregate-error@^3.0.0:
version "3.1.0"
resolved "https://registry.yarnpkg.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a"
@@ -3038,6 +3058,13 @@ ast-types-flow@0.0.7, ast-types-flow@^0.0.7:
resolved "https://registry.npmjs.org/ast-types-flow/-/ast-types-flow-0.0.7.tgz"
integrity sha1-9wtzXGvKGlycItmCw+Oef+ujva0=
+ast-types@^0.13.4:
+ version "0.13.4"
+ resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.13.4.tgz#ee0d77b343263965ecc3fb62da16e7222b2b6782"
+ integrity sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==
+ dependencies:
+ tslib "^2.0.1"
+
asynckit@^0.4.0:
version "0.4.0"
resolved "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz"
@@ -3075,6 +3102,11 @@ axobject-query@^2.0.2:
resolved "https://registry.npmjs.org/axobject-query/-/axobject-query-2.2.0.tgz"
integrity sha512-Td525n+iPOOyUQIeBfcASuG6uJsDOITl7Mds5gFyerkWiX7qhUTdYUBlSgNMyVqtSJqwpt1kXGLdUt6SykLMRA==
+b4a@^1.6.4:
+ version "1.6.4"
+ resolved "https://registry.yarnpkg.com/b4a/-/b4a-1.6.4.tgz#ef1c1422cae5ce6535ec191baeed7567443f36c9"
+ integrity sha512-fpWrvyVHEKyeEvbKZTVOeZF3VSKKWtJxFIxX/jaVPf+cLbGUSitjb49pHLqPV2BUNNZ0LcoeEGfE/YCpyDYHIw==
+
babel-jest@29.3.1:
version "29.3.1"
resolved "https://registry.npmjs.org/babel-jest/-/babel-jest-29.3.1.tgz"
@@ -3241,6 +3273,11 @@ basic-auth@~2.0.1:
dependencies:
safe-buffer "5.1.2"
+basic-ftp@^5.0.2:
+ version "5.0.4"
+ resolved "https://registry.yarnpkg.com/basic-ftp/-/basic-ftp-5.0.4.tgz#28aeab7bfbbde5f5d0159cd8bb3b8e633bbb091d"
+ integrity sha512-8PzkB0arJFV4jJWSGOYR+OEic6aeKMu/osRhBULN6RY0ykby6LKhbmuQ5ublvaas5BOwboah5D87nrHyuh8PPA==
+
batch@0.6.1:
version "0.6.1"
resolved "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz"
@@ -3256,15 +3293,6 @@ binary-extensions@^2.0.0:
resolved "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz"
integrity sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==
-bl@^4.0.3:
- version "4.1.0"
- resolved "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz"
- integrity sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==
- dependencies:
- buffer "^5.5.0"
- inherits "^2.0.4"
- readable-stream "^3.4.0"
-
blueimp-md5@^2.10.0:
version "2.19.0"
resolved "https://registry.npmjs.org/blueimp-md5/-/blueimp-md5-2.19.0.tgz"
@@ -3450,7 +3478,7 @@ buffer@6.0.3:
base64-js "^1.3.1"
ieee754 "^1.2.1"
-buffer@^5.2.1, buffer@^5.5.0:
+buffer@^5.2.1:
version "5.7.1"
resolved "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz"
integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==
@@ -3577,11 +3605,6 @@ chokidar@^3.5.3:
optionalDependencies:
fsevents "~2.3.2"
-chownr@^1.1.1:
- version "1.1.4"
- resolved "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz"
- integrity sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==
-
chownr@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/chownr/-/chownr-2.0.0.tgz#15bfbe53d2eab4cf70f18a8cd68ebe5b3cb1dece"
@@ -3592,6 +3615,14 @@ chrome-trace-event@^1.0.2:
resolved "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz"
integrity sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==
+chromium-bidi@0.5.6:
+ version "0.5.6"
+ resolved "https://registry.yarnpkg.com/chromium-bidi/-/chromium-bidi-0.5.6.tgz#a19e0aacda098878b49428939a39c477db0b4c45"
+ integrity sha512-ber8smgoAs4EqSUHRb0I8fpx371ZmvsdQav8HRM9oO4fk5Ox16vQiNYXlsZkRj4FfvVL2dCef+zBFQixp+79CA==
+ dependencies:
+ mitt "3.0.1"
+ urlpattern-polyfill "10.0.0"
+
ci-info@^3.2.0:
version "3.3.0"
resolved "https://registry.npmjs.org/ci-info/-/ci-info-3.3.0.tgz"
@@ -3903,16 +3934,15 @@ cors@^2.8.5:
object-assign "^4"
vary "^1"
-cosmiconfig@7.0.1:
- version "7.0.1"
- resolved "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.0.1.tgz"
- integrity sha512-a1YWNUV2HwGimB7dU2s1wUMurNKjpx60HxBB6xUM8Re+2s1g1IIfJvFR0/iCF+XHdE0GMTKTuLR32UQff4TEyQ==
+cosmiconfig@9.0.0:
+ version "9.0.0"
+ resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-9.0.0.tgz#34c3fc58287b915f3ae905ab6dc3de258b55ad9d"
+ integrity sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==
dependencies:
- "@types/parse-json" "^4.0.0"
- import-fresh "^3.2.1"
- parse-json "^5.0.0"
- path-type "^4.0.0"
- yaml "^1.10.0"
+ env-paths "^2.2.1"
+ import-fresh "^3.3.0"
+ js-yaml "^4.1.0"
+ parse-json "^5.2.0"
cosmiconfig@^8.3.6:
version "8.3.6"
@@ -3955,12 +3985,12 @@ create-hmac@^1.1.0, create-hmac@^1.1.4, create-hmac@^1.1.7:
safe-buffer "^5.0.1"
sha.js "^2.4.8"
-cross-fetch@3.1.5:
- version "3.1.5"
- resolved "https://registry.npmjs.org/cross-fetch/-/cross-fetch-3.1.5.tgz"
- integrity sha512-lvb1SBsI0Z7GDwmuid+mU3kWVBwTVUbe7S0H52yaaAdQOXq2YktTCZdlAcNKFzE6QtRz0snpw9bNiPeOIkkQvw==
+cross-fetch@4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/cross-fetch/-/cross-fetch-4.0.0.tgz#f037aef1580bb3a1a35164ea2a848ba81b445983"
+ integrity sha512-e4a5N8lVvuLgAWgnCrLr2PP0YyDOTHa9H/Rj54dirp61qXnNq46m82bRhNqIA5VccJtWBvPTFRV3TtvHUKPB1g==
dependencies:
- node-fetch "2.6.7"
+ node-fetch "^2.6.12"
cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3:
version "7.0.3"
@@ -4158,6 +4188,11 @@ damerau-levenshtein@^1.0.4:
resolved "https://registry.npmjs.org/damerau-levenshtein/-/damerau-levenshtein-1.0.7.tgz"
integrity sha512-VvdQIPGdWP0SqFXghj79Wf/5LArmreyMsGLa6FG6iC4t3j7j5s71TrwWmT/4akbDQIqjfACkLZmjXhA7g2oUZw==
+data-uri-to-buffer@^6.0.0:
+ version "6.0.1"
+ resolved "https://registry.yarnpkg.com/data-uri-to-buffer/-/data-uri-to-buffer-6.0.1.tgz#540bd4c8753a25ee129035aebdedf63b078703c7"
+ integrity sha512-MZd3VlchQkp8rdend6vrx7MmVDJzSNTBvghvKjirLkD+WTChA3KUf0jkE68Q4UyctNqI11zZO9/x2Yx+ub5Cvg==
+
data-urls@^3.0.1:
version "3.0.2"
resolved "https://registry.npmjs.org/data-urls/-/data-urls-3.0.2.tgz"
@@ -4272,6 +4307,15 @@ define-properties@^1.1.4:
has-property-descriptors "^1.0.0"
object-keys "^1.1.1"
+degenerator@^5.0.0:
+ version "5.0.1"
+ resolved "https://registry.yarnpkg.com/degenerator/-/degenerator-5.0.1.tgz#9403bf297c6dad9a1ece409b37db27954f91f2f5"
+ integrity sha512-TllpMR/t0M5sqCXfj85i4XaAzxmS5tVA16dqvdkMwGmzI+dXLXnw3J+3Vdv7VKw+ThlTMboK6i9rnZ6Nntj5CQ==
+ dependencies:
+ ast-types "^0.13.4"
+ escodegen "^2.1.0"
+ esprima "^4.0.1"
+
del@^4.1.1:
version "4.1.1"
resolved "https://registry.npmjs.org/del/-/del-4.1.1.tgz"
@@ -4335,10 +4379,10 @@ detect-node@^2.0.4:
resolved "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz"
integrity sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==
-devtools-protocol@0.0.1056733:
- version "0.0.1056733"
- resolved "https://registry.npmjs.org/devtools-protocol/-/devtools-protocol-0.0.1056733.tgz"
- integrity sha512-CmTu6SQx2g3TbZzDCAV58+LTxVdKplS7xip0g5oDXpZ+isr0rv5dDP8ToyVRywzPHkCCPKgKgScEcwz4uPWDIA==
+devtools-protocol@0.0.1232444:
+ version "0.0.1232444"
+ resolved "https://registry.yarnpkg.com/devtools-protocol/-/devtools-protocol-0.0.1232444.tgz#406345a90a871ba852c530d73482275234936eed"
+ integrity sha512-pM27vqEfxSxRkTMnF+XCmxSEb6duO5R+t8A9DEEJgy4Wz2RVanje2mmj99B6A3zv2r/qGfYlOvYznUhuokizmg==
dezalgo@^1.0.4:
version "1.0.4"
@@ -4552,7 +4596,7 @@ encodeurl@~1.0.2:
resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59"
integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==
-end-of-stream@^1.1.0, end-of-stream@^1.4.1:
+end-of-stream@^1.1.0:
version "1.4.4"
resolved "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz"
integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==
@@ -4582,6 +4626,11 @@ entities@~3.0.1:
resolved "https://registry.npmjs.org/entities/-/entities-3.0.1.tgz"
integrity sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q==
+env-paths@^2.2.1:
+ version "2.2.1"
+ resolved "https://registry.yarnpkg.com/env-paths/-/env-paths-2.2.1.tgz#420399d416ce1fbe9bc0a07c62fa68d67fd0f8f2"
+ integrity sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==
+
envinfo@^7.7.3:
version "7.8.1"
resolved "https://registry.npmjs.org/envinfo/-/envinfo-7.8.1.tgz"
@@ -4730,6 +4779,17 @@ escodegen@^2.0.0:
optionalDependencies:
source-map "~0.6.1"
+escodegen@^2.1.0:
+ version "2.1.0"
+ resolved "https://registry.yarnpkg.com/escodegen/-/escodegen-2.1.0.tgz#ba93bbb7a43986d29d6041f99f5262da773e2e17"
+ integrity sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==
+ dependencies:
+ esprima "^4.0.1"
+ estraverse "^5.2.0"
+ esutils "^2.0.2"
+ optionalDependencies:
+ source-map "~0.6.1"
+
eslint-config-airbnb-base@^15.0.0:
version "15.0.0"
resolved "https://registry.npmjs.org/eslint-config-airbnb-base/-/eslint-config-airbnb-base-15.0.0.tgz"
@@ -5086,6 +5146,11 @@ fast-diff@^1.1.2:
resolved "https://registry.npmjs.org/fast-diff/-/fast-diff-1.2.0.tgz"
integrity sha512-xJuoT5+L99XlZ8twedaRf6Ax2TgQVxvgZOYoPKqZufmJib0tL2tegPBOZb1pVNgIhlqDlA0eO0c3wBvQcmzx4w==
+fast-fifo@^1.1.0, fast-fifo@^1.2.0:
+ version "1.3.2"
+ resolved "https://registry.yarnpkg.com/fast-fifo/-/fast-fifo-1.3.2.tgz#286e31de96eb96d38a97899815740ba2a4f3640c"
+ integrity sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==
+
fast-glob@^3.2.11, fast-glob@^3.2.9:
version "3.2.11"
resolved "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.11.tgz"
@@ -5344,11 +5409,6 @@ fresh@0.5.2:
resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7"
integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==
-fs-constants@^1.0.0:
- version "1.0.0"
- resolved "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz"
- integrity sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==
-
fs-exists-sync@^0.1.0:
version "0.1.0"
resolved "https://registry.npmjs.org/fs-exists-sync/-/fs-exists-sync-0.1.0.tgz"
@@ -5363,6 +5423,15 @@ fs-extra@^10.0.0:
jsonfile "^6.0.1"
universalify "^2.0.0"
+fs-extra@^8.1.0:
+ version "8.1.0"
+ resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-8.1.0.tgz#49d43c45a88cd9677668cb7be1b46efdb8d2e1c0"
+ integrity sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==
+ dependencies:
+ graceful-fs "^4.2.0"
+ jsonfile "^4.0.0"
+ universalify "^0.1.0"
+
fs-minipass@^2.0.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/fs-minipass/-/fs-minipass-2.1.0.tgz#7f5036fdbf12c63c169190cbe4199c852271f9fb"
@@ -5480,6 +5549,16 @@ get-symbol-description@^1.0.0:
call-bind "^1.0.2"
get-intrinsic "^1.1.1"
+get-uri@^6.0.1:
+ version "6.0.2"
+ resolved "https://registry.yarnpkg.com/get-uri/-/get-uri-6.0.2.tgz#e019521646f4a8ff6d291fbaea2c46da204bb75b"
+ integrity sha512-5KLucCJobh8vBY1K07EFV4+cPZH3mrV9YeAruUseCQKHB58SGjjT2l9/eA9LD082IiuMjSlFJEcdJ27TXvbZNw==
+ dependencies:
+ basic-ftp "^5.0.2"
+ data-uri-to-buffer "^6.0.0"
+ debug "^4.3.4"
+ fs-extra "^8.1.0"
+
glob-parent@^5.1.2, glob-parent@~5.1.2:
version "5.1.2"
resolved "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz"
@@ -5895,6 +5974,14 @@ http-proxy-agent@^5.0.0:
agent-base "6"
debug "4"
+http-proxy-agent@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.yarnpkg.com/http-proxy-agent/-/http-proxy-agent-7.0.0.tgz#e9096c5afd071a3fce56e6252bb321583c124673"
+ integrity sha512-+ZT+iBxVUQ1asugqnD6oWoRiS25AkjNfG085dKJGtGxkdwLQrMKU5wJr2bOOFAXzKcTuqq+7fZlTMgG3SRfIYQ==
+ dependencies:
+ agent-base "^7.1.0"
+ debug "^4.3.4"
+
http-proxy-middleware@^2.0.3:
version "2.0.6"
resolved "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz"
@@ -5920,14 +6007,6 @@ http2-client@^1.2.5:
resolved "https://registry.yarnpkg.com/http2-client/-/http2-client-1.3.5.tgz#20c9dc909e3cc98284dd20af2432c524086df181"
integrity sha512-EC2utToWl4RKfs5zd36Mxq7nzHHBuomZboI0yYL6Y0RmBgT7Sgkq4rQ0ezFTYoIsSs7Tm9SJe+o2FcAg6GBhGA==
-https-proxy-agent@5.0.1:
- version "5.0.1"
- resolved "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz"
- integrity sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==
- dependencies:
- agent-base "6"
- debug "4"
-
https-proxy-agent@^5.0.0:
version "5.0.0"
resolved "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-5.0.0.tgz"
@@ -5936,6 +6015,14 @@ https-proxy-agent@^5.0.0:
agent-base "6"
debug "4"
+https-proxy-agent@^7.0.2:
+ version "7.0.2"
+ resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-7.0.2.tgz#e2645b846b90e96c6e6f347fb5b2e41f1590b09b"
+ integrity sha512-NmLNjm6ucYwtcUmL7JQC1ZQ57LmHP4lT15FQ8D61nak1rO6DH+fz5qNK2Ap5UN4ZapYICE3/0KodcLYSPsPbaA==
+ dependencies:
+ agent-base "^7.0.2"
+ debug "4"
+
human-signals@^2.1.0:
version "2.1.0"
resolved "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz"
@@ -6060,6 +6147,16 @@ interpret@^3.1.1:
resolved "https://registry.yarnpkg.com/interpret/-/interpret-3.1.1.tgz#5be0ceed67ca79c6c4bc5cf0d7ee843dcea110c4"
integrity sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==
+ip@^1.1.8:
+ version "1.1.8"
+ resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.8.tgz#ae05948f6b075435ed3307acce04629da8cdbf48"
+ integrity sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==
+
+ip@^2.0.0:
+ version "2.0.0"
+ resolved "https://registry.yarnpkg.com/ip/-/ip-2.0.0.tgz#4cf4ab182fee2314c75ede1276f8c80b479936da"
+ integrity sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==
+
ipaddr.js@1.9.1:
version "1.9.1"
resolved "https://registry.yarnpkg.com/ipaddr.js/-/ipaddr.js-1.9.1.tgz#bff38543eeb8984825079ff3a2a8e6cbd46781b3"
@@ -7085,6 +7182,13 @@ jsonc-parser@~3.1.0:
resolved "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.1.0.tgz"
integrity sha512-DRf0QjnNeCUds3xTjKlQQ3DpJD51GvDjJfnxUVWg6PZTo2otSm+slzNAxU/35hF8/oJIKoG9slq30JYOsF2azg==
+jsonfile@^4.0.0:
+ version "4.0.0"
+ resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb"
+ integrity sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==
+ optionalDependencies:
+ graceful-fs "^4.1.6"
+
jsonfile@^6.0.1:
version "6.1.0"
resolved "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz"
@@ -7293,6 +7397,11 @@ lru-cache@^6.0.0:
dependencies:
yallist "^4.0.0"
+lru-cache@^7.14.1:
+ version "7.18.3"
+ resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-7.18.3.tgz#f793896e0fd0e954a59dfdd82f0773808df6aa89"
+ integrity sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==
+
"lru-cache@^9.1.1 || ^10.0.0":
version "10.0.0"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.0.0.tgz#b9e2a6a72a129d81ab317202d93c7691df727e61"
@@ -7586,6 +7695,11 @@ minizlib@^2.1.1:
minipass "^3.0.0"
yallist "^4.0.0"
+mitt@3.0.1:
+ version "3.0.1"
+ resolved "https://registry.yarnpkg.com/mitt/-/mitt-3.0.1.tgz#ea36cf0cc30403601ae074c8f77b7092cdab36d1"
+ integrity sha512-vKivATfr97l2/QBCYAkXYDbrIWPM2IIKEl7YPhjCvKlG3kE2gm+uBo6nEXK3M5/Ffh/FLpKExzOQ3JJoJGFKBw==
+
mitt@^1.1.2:
version "1.2.0"
resolved "https://registry.npmjs.org/mitt/-/mitt-1.2.0.tgz"
@@ -7677,6 +7791,11 @@ neo-async@^2.6.2:
resolved "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz"
integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==
+netmask@^2.0.2:
+ version "2.0.2"
+ resolved "https://registry.yarnpkg.com/netmask/-/netmask-2.0.2.tgz#8b01a07644065d536383835823bc52004ebac5e7"
+ integrity sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==
+
no-case@^3.0.4:
version "3.0.4"
resolved "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz"
@@ -7707,14 +7826,7 @@ node-fetch-h2@^2.3.0:
dependencies:
http2-client "^1.2.5"
-node-fetch@2.6.7:
- version "2.6.7"
- resolved "https://registry.npmjs.org/node-fetch/-/node-fetch-2.6.7.tgz"
- integrity sha512-ZjMPFEfVx5j+y2yF35Kzx5sF7kDzxuDj6ziH4FFbOp87zKDZNx8yExJIb05OGF4Nlt9IHFIMBkRl41VdvcNdbQ==
- dependencies:
- whatwg-url "^5.0.0"
-
-node-fetch@^2.6.1:
+node-fetch@^2.6.1, node-fetch@^2.6.12:
version "2.7.0"
resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d"
integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==
@@ -8053,6 +8165,29 @@ p-try@^2.0.0:
resolved "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz"
integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
+pac-proxy-agent@^7.0.1:
+ version "7.0.1"
+ resolved "https://registry.yarnpkg.com/pac-proxy-agent/-/pac-proxy-agent-7.0.1.tgz#6b9ddc002ec3ff0ba5fdf4a8a21d363bcc612d75"
+ integrity sha512-ASV8yU4LLKBAjqIPMbrgtaKIvxQri/yh2OpI+S6hVa9JRkUI3Y3NPFbfngDtY7oFtSMD3w31Xns89mDa3Feo5A==
+ dependencies:
+ "@tootallnate/quickjs-emscripten" "^0.23.0"
+ agent-base "^7.0.2"
+ debug "^4.3.4"
+ get-uri "^6.0.1"
+ http-proxy-agent "^7.0.0"
+ https-proxy-agent "^7.0.2"
+ pac-resolver "^7.0.0"
+ socks-proxy-agent "^8.0.2"
+
+pac-resolver@^7.0.0:
+ version "7.0.0"
+ resolved "https://registry.yarnpkg.com/pac-resolver/-/pac-resolver-7.0.0.tgz#79376f1ca26baf245b96b34c339d79bff25e900c"
+ integrity sha512-Fd9lT9vJbHYRACT8OhCbZBbxr6KRSawSovFpy8nDGshaK99S/EBhVIHp9+crhxrsZOuvLpgL1n23iyPg6Rl2hg==
+ dependencies:
+ degenerator "^5.0.0"
+ ip "^1.1.8"
+ netmask "^2.0.2"
+
pako@2.0.4:
version "2.0.4"
resolved "https://registry.npmjs.org/pako/-/pako-2.0.4.tgz"
@@ -8084,7 +8219,7 @@ parse-asn1@^5.0.0, parse-asn1@^5.1.6:
pbkdf2 "^3.0.3"
safe-buffer "^5.1.1"
-parse-json@^5.0.0, parse-json@^5.2.0:
+parse-json@^5.2.0:
version "5.2.0"
resolved "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz"
integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==
@@ -8478,9 +8613,23 @@ proxy-addr@~2.0.7:
forwarded "0.2.0"
ipaddr.js "1.9.1"
-proxy-from-env@1.1.0:
+proxy-agent@6.3.1:
+ version "6.3.1"
+ resolved "https://registry.yarnpkg.com/proxy-agent/-/proxy-agent-6.3.1.tgz#40e7b230552cf44fd23ffaf7c59024b692612687"
+ integrity sha512-Rb5RVBy1iyqOtNl15Cw/llpeLH8bsb37gM1FUfKQ+Wck6xHlbAhWGUFiTRHtkjqGTA5pSHz6+0hrPW/oECihPQ==
+ dependencies:
+ agent-base "^7.0.2"
+ debug "^4.3.4"
+ http-proxy-agent "^7.0.0"
+ https-proxy-agent "^7.0.2"
+ lru-cache "^7.14.1"
+ pac-proxy-agent "^7.0.1"
+ proxy-from-env "^1.1.0"
+ socks-proxy-agent "^8.0.2"
+
+proxy-from-env@^1.1.0:
version "1.1.0"
- resolved "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz"
+ resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2"
integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==
psl@^1.1.33:
@@ -8518,33 +8667,26 @@ punycode@^2.1.0, punycode@^2.1.1:
resolved "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz"
integrity sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==
-puppeteer-core@19.3.0:
- version "19.3.0"
- resolved "https://registry.npmjs.org/puppeteer-core/-/puppeteer-core-19.3.0.tgz"
- integrity sha512-P8VAAOBnBJo/7DKJnj1b0K9kZBF2D8lkdL94CjJ+DZKCp182LQqYemPI9omUSZkh4bgykzXjZhaVR1qtddTTQg==
+puppeteer-core@21.10.0:
+ version "21.10.0"
+ resolved "https://registry.yarnpkg.com/puppeteer-core/-/puppeteer-core-21.10.0.tgz#d1b61c44e258e51e0fa74f1110c540be096a3e28"
+ integrity sha512-NVaqO3K462qwMuLO4Gurs/Mau1Wss+08QgNYzF0dIqZWMvpskrt/TbxbmHU+7zMTUOvPEq/lR4BLJmjMBgBGfQ==
dependencies:
- cross-fetch "3.1.5"
+ "@puppeteer/browsers" "1.9.1"
+ chromium-bidi "0.5.6"
+ cross-fetch "4.0.0"
debug "4.3.4"
- devtools-protocol "0.0.1056733"
- extract-zip "2.0.1"
- https-proxy-agent "5.0.1"
- proxy-from-env "1.1.0"
- rimraf "3.0.2"
- tar-fs "2.1.1"
- unbzip2-stream "1.4.3"
- ws "8.10.0"
+ devtools-protocol "0.0.1232444"
+ ws "8.16.0"
-puppeteer@19.3.0:
- version "19.3.0"
- resolved "https://registry.npmjs.org/puppeteer/-/puppeteer-19.3.0.tgz"
- integrity sha512-WJbi/ULaeuFOz7cfMgJlJCBAZiyqIFeQ6os4h5ex3PVTt2qosXgwI9eruFZqFAwJRv8x5pOuMhWR0aSRgyDqEg==
+puppeteer@21.10.0:
+ version "21.10.0"
+ resolved "https://registry.yarnpkg.com/puppeteer/-/puppeteer-21.10.0.tgz#0cfa8f57ca8d4d53a5f843715a270d36acd36b86"
+ integrity sha512-Y1yQjcLE00hHTDAmv3M3A6hhW0Ytjdp6xr6nyjl7FZ7E7hzp/6Rsw80FbaTJzJHFCplBNi082wrgynbmD7RlYw==
dependencies:
- cosmiconfig "7.0.1"
- devtools-protocol "0.0.1056733"
- https-proxy-agent "5.0.1"
- progress "2.0.3"
- proxy-from-env "1.1.0"
- puppeteer-core "19.3.0"
+ "@puppeteer/browsers" "1.9.1"
+ cosmiconfig "9.0.0"
+ puppeteer-core "21.10.0"
qs@6.11.0, qs@^6.10.1, qs@^6.7.0:
version "6.11.0"
@@ -8595,6 +8737,11 @@ queue-microtask@^1.2.2:
resolved "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz"
integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==
+queue-tick@^1.0.1:
+ version "1.0.1"
+ resolved "https://registry.yarnpkg.com/queue-tick/-/queue-tick-1.0.1.tgz#f6f07ac82c1fd60f82e098b417a80e52f1f4c142"
+ integrity sha512-kJt5qhMxoszgU/62PLP1CJytzd2NKetjSRnyuj31fDd3Rlcz3fzlFdFLD1SItunPwyqEOkca6GbV612BWfaBag==
+
raf@^3.4.1:
version "3.4.1"
resolved "https://registry.npmjs.org/raf/-/raf-3.4.1.tgz"
@@ -8980,7 +9127,7 @@ readable-stream@^2.0.1:
string_decoder "~1.1.1"
util-deprecate "~1.0.1"
-readable-stream@^3.0.6, readable-stream@^3.1.1, readable-stream@^3.4.0, readable-stream@^3.5.0, readable-stream@^3.6.0:
+readable-stream@^3.0.6, readable-stream@^3.5.0, readable-stream@^3.6.0:
version "3.6.0"
resolved "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz"
integrity sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==
@@ -9269,13 +9416,6 @@ reusify@^1.0.4:
resolved "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz"
integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==
-rimraf@3.0.2, rimraf@^3.0.0, rimraf@^3.0.2:
- version "3.0.2"
- resolved "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz"
- integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==
- dependencies:
- glob "^7.1.3"
-
rimraf@^2.6.3:
version "2.7.1"
resolved "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz"
@@ -9283,6 +9423,13 @@ rimraf@^2.6.3:
dependencies:
glob "^7.1.3"
+rimraf@^3.0.0, rimraf@^3.0.2:
+ version "3.0.2"
+ resolved "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz"
+ integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==
+ dependencies:
+ glob "^7.1.3"
+
ripemd160@^2.0.0, ripemd160@^2.0.1:
version "2.0.2"
resolved "https://registry.npmjs.org/ripemd160/-/ripemd160-2.0.2.tgz"
@@ -9608,6 +9755,11 @@ slugify@~1.4.7:
resolved "https://registry.yarnpkg.com/slugify/-/slugify-1.4.7.tgz#e42359d505afd84a44513280868e31202a79a628"
integrity sha512-tf+h5W1IrjNm/9rKKj0JU2MDMruiopx0jjVA5zCdBtcGjfp0+c5rHw/zADLC3IeKlGHtVbHtpfzvYA0OYT+HKg==
+smart-buffer@^4.2.0:
+ version "4.2.0"
+ resolved "https://registry.yarnpkg.com/smart-buffer/-/smart-buffer-4.2.0.tgz#6e1d71fa4f18c05f7d0ff216dd16a481d0e8d9ae"
+ integrity sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==
+
sockjs@^0.3.24:
version "0.3.24"
resolved "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz"
@@ -9617,6 +9769,23 @@ sockjs@^0.3.24:
uuid "^8.3.2"
websocket-driver "^0.7.4"
+socks-proxy-agent@^8.0.2:
+ version "8.0.2"
+ resolved "https://registry.yarnpkg.com/socks-proxy-agent/-/socks-proxy-agent-8.0.2.tgz#5acbd7be7baf18c46a3f293a840109a430a640ad"
+ integrity sha512-8zuqoLv1aP/66PHF5TqwJ7Czm3Yv32urJQHrVyhD7mmA6d61Zv8cIXQYPTWwmg6qlupnPvs/QKDmfa4P/qct2g==
+ dependencies:
+ agent-base "^7.0.2"
+ debug "^4.3.4"
+ socks "^2.7.1"
+
+socks@^2.7.1:
+ version "2.7.1"
+ resolved "https://registry.yarnpkg.com/socks/-/socks-2.7.1.tgz#d8e651247178fde79c0663043e07240196857d55"
+ integrity sha512-7maUZy1N7uo6+WVEX6psASxtNlKaNVMlGQKkG/63nEDdLOWNbiUMoLK7X4uYoLhQstau72mLgfEWcXcwsaHbYQ==
+ dependencies:
+ ip "^2.0.0"
+ smart-buffer "^4.2.0"
+
source-map-js@^1.0.2:
version "1.0.2"
resolved "https://registry.npmjs.org/source-map-js/-/source-map-js-1.0.2.tgz"
@@ -9731,6 +9900,14 @@ stream-browserify@3.0.0:
inherits "~2.0.4"
readable-stream "^3.5.0"
+streamx@^2.15.0:
+ version "2.15.6"
+ resolved "https://registry.yarnpkg.com/streamx/-/streamx-2.15.6.tgz#28bf36997ebc7bf6c08f9eba958735231b833887"
+ integrity sha512-q+vQL4AAz+FdfT137VF69Cc/APqUbxy+MDOImRrMvchJpigHj9GksgDU2LYbO9rx7RX6osWgxJB2WxhYv4SZAw==
+ dependencies:
+ fast-fifo "^1.1.0"
+ queue-tick "^1.0.1"
+
strict-uri-encode@^2.0.0:
version "2.0.0"
resolved "https://registry.npmjs.org/strict-uri-encode/-/strict-uri-encode-2.0.0.tgz"
@@ -9971,26 +10148,23 @@ tapable@^2.0.0, tapable@^2.1.1, tapable@^2.2.0:
resolved "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz"
integrity sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==
-tar-fs@2.1.1:
- version "2.1.1"
- resolved "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz"
- integrity sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==
+tar-fs@3.0.4:
+ version "3.0.4"
+ resolved "https://registry.yarnpkg.com/tar-fs/-/tar-fs-3.0.4.tgz#a21dc60a2d5d9f55e0089ccd78124f1d3771dbbf"
+ integrity sha512-5AFQU8b9qLfZCX9zp2duONhPmZv0hGYiBPJsyUdqMjzq/mqVpy/rEUSeHk1+YitmxugaptgBh5oDGU3VsAJq4w==
dependencies:
- chownr "^1.1.1"
mkdirp-classic "^0.5.2"
pump "^3.0.0"
- tar-stream "^2.1.4"
+ tar-stream "^3.1.5"
-tar-stream@^2.1.4:
- version "2.2.0"
- resolved "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz"
- integrity sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==
+tar-stream@^3.1.5:
+ version "3.1.7"
+ resolved "https://registry.yarnpkg.com/tar-stream/-/tar-stream-3.1.7.tgz#24b3fb5eabada19fe7338ed6d26e5f7c482e792b"
+ integrity sha512-qJj60CXt7IU1Ffyc3NJMjh6EkuCFej46zUqJ4J7pqYlThyd9bO0XBTmcOIhSzZJVWfsLks0+nle/j538YAW9RQ==
dependencies:
- bl "^4.0.3"
- end-of-stream "^1.4.1"
- fs-constants "^1.0.0"
- inherits "^2.0.3"
- readable-stream "^3.1.1"
+ b4a "^1.6.4"
+ fast-fifo "^1.2.0"
+ streamx "^2.15.0"
tar@^6.1.11:
version "6.1.13"
@@ -10204,16 +10378,16 @@ tslib@^1.8.1:
resolved "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz"
integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
+tslib@^2.0.1, tslib@^2.1.0:
+ version "2.6.2"
+ resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae"
+ integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==
+
tslib@^2.0.3, tslib@^2.3.1:
version "2.4.0"
resolved "https://registry.npmjs.org/tslib/-/tslib-2.4.0.tgz"
integrity sha512-d6xOpEDfsi2CZVlPQzGeux8XMwLT9hssAsaPYExaQMuYskwb+x1x7J371tWlbBdWHroy99KnVB6qIkUbs5X3UQ==
-tslib@^2.1.0:
- version "2.6.2"
- resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae"
- integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==
-
tsutils@^3.21.0:
version "3.21.0"
resolved "https://registry.npmjs.org/tsutils/-/tsutils-3.21.0.tgz"
@@ -10338,6 +10512,11 @@ unique-slug@^4.0.0:
dependencies:
imurmurhash "^0.1.4"
+universalify@^0.1.0:
+ version "0.1.2"
+ resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66"
+ integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==
+
universalify@^0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.2.0.tgz#6451760566fa857534745ab1dde952d1b1761be0"
@@ -10386,6 +10565,11 @@ url@0.11.3:
punycode "^1.4.1"
qs "^6.11.2"
+urlpattern-polyfill@10.0.0:
+ version "10.0.0"
+ resolved "https://registry.yarnpkg.com/urlpattern-polyfill/-/urlpattern-polyfill-10.0.0.tgz#f0a03a97bfb03cdf33553e5e79a2aadd22cac8ec"
+ integrity sha512-H/A06tKD7sS1O1X2SshBVeA5FLycRpjqiBeqGKmBwBDBy28EnRjORxTNe269KSSr5un5qyWi1iL61wLxpd+ZOg==
+
use-sync-external-store@^1.0.0:
version "1.2.0"
resolved "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz"
@@ -11123,10 +11307,10 @@ write-file-atomic@^4.0.1:
imurmurhash "^0.1.4"
signal-exit "^3.0.7"
-ws@8.10.0:
- version "8.10.0"
- resolved "https://registry.npmjs.org/ws/-/ws-8.10.0.tgz"
- integrity sha512-+s49uSmZpvtAsd2h37vIPy1RBusaLawVe8of+GyEPsaJTCMpj/2v8NpeK1SHXjBlQ95lQTmQofOJnFiLoaN3yw==
+ws@8.16.0:
+ version "8.16.0"
+ resolved "https://registry.yarnpkg.com/ws/-/ws-8.16.0.tgz#d1cd774f36fbc07165066a60e40323eab6446fd4"
+ integrity sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==
ws@^8.2.3:
version "8.8.1"
@@ -11178,7 +11362,7 @@ yargs-parser@^21.1.1:
resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35"
integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==
-yargs@^17.0.1:
+yargs@17.7.2, yargs@^17.0.1:
version "17.7.2"
resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269"
integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==
From a747b2656441194a168391c126c562c7a3997db0 Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Fri, 2 Feb 2024 21:21:04 +0100
Subject: [PATCH 010/128] Bug 1823654 - Ignore standardized use of f-strings in
git blame
---
.git-blame-ignore-revs | 3 +++
1 file changed, 3 insertions(+)
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index b35d99602c5..0888963e9f5 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -1,2 +1,5 @@
# Switch to double quotes everywhere in Python
cfb19a5ef8eb49c4b74d2356eeefaa242ccc51f0
+
+# Standardize on modern Python features like f-strings
+8028121253101328c3c8576c5186cfeafcb8a691
\ No newline at end of file
From d9a1ac1e960d1f1df934ece3f60e22a4ac80d4ad Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Fri, 2 Feb 2024 21:06:25 +0100
Subject: [PATCH 011/128] Bug 1823654 - show fixes applied by ruff
---
.pre-commit-config.yaml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 1f5317503da..b8db3b8b2f9 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -16,6 +16,6 @@ repos:
rev: v0.1.14
hooks:
- id: ruff
- args: [--fix]
+ args: [--fix, --show-fixes]
- id: ruff-format
exclude: ^treeherder/.*/migrations
From d46c4b4b647bb80ab971a1331a774c124b9d2dca Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 5 Feb 2024 01:34:05 +0000
Subject: [PATCH 012/128] Update Yarn to v1.22.21
---
package.json | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/package.json b/package.json
index 994b09486a6..8fa0b1bbb8d 100644
--- a/package.json
+++ b/package.json
@@ -8,7 +8,7 @@
"license": "MPL-2.0",
"engines": {
"node": "21.1.0",
- "yarn": "1.22.19"
+ "yarn": "1.22.21"
},
"dependencies": {
"@fortawesome/fontawesome-svg-core": "6.2.1",
From 73e59986afbb6c6de6e522a4ec5758c6cd26b5cd Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Mon, 5 Feb 2024 17:08:48 +0100
Subject: [PATCH 013/128] Update yarn in Dockerfile to 1.22.21
---
docker/Dockerfile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 903753098b5..82b39804f30 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -9,7 +9,7 @@ COPY package.json babel.config.json webpack.config.js yarn.lock /app/
# ensure we have python-venv available for glean
RUN apt-get update && apt-get install python3-venv -y
-RUN npm install -g --force yarn@1.22.19
+RUN npm install -g --force yarn@1.22.21
RUN yarn install
RUN yarn build
From aff22be36d44ba0cca38ef8ae1143cfadd3a7e58 Mon Sep 17 00:00:00 2001
From: Geoff Brown
Date: Mon, 5 Feb 2024 14:53:58 -0700
Subject: [PATCH 014/128] Bug 1878769 - Add pretty names for new Android
platforms
---
ui/helpers/constants.js | 2 ++
1 file changed, 2 insertions(+)
diff --git a/ui/helpers/constants.js b/ui/helpers/constants.js
index 539e221ec7d..5caf7437275 100644
--- a/ui/helpers/constants.js
+++ b/ui/helpers/constants.js
@@ -140,9 +140,11 @@ export const thPlatformMap = {
'android-5-0-x86_64': 'Android 5.0 x86-64',
'android-5-0-x86_64-shippable': 'Android 5.0 x86-64 Shippable',
'android-5-0-x86_64-shippable-lite': 'Android 5.0 x86-64 Lite Shippable',
+ 'android-5-0-geckoview-fat-aar': 'Android 5.0 GeckoView multi-arch fat AAR',
'android-5-0-geckoview-fat-aar-shippable':
'Android 5.0 GeckoView multi-arch fat AAR Shippable',
'android-em-7-0-x86': 'Android 7.0 x86',
+ 'android-em-7-0-x86-qr': 'Android 7.0 x86 WebRender',
'android-em-7-0-x86_64-qr': 'Android 7.0 x86-64 WebRender',
'android-em-7-0-x86_64-lite-qr': 'Android 7.0 x86-64 Lite WebRender',
'android-em-7-0-x86_64-shippable-lite-qr':
From edf68bdb710caf0036dd0c1e9479ea480482c906 Mon Sep 17 00:00:00 2001
From: Tooru Fujisawa
Date: Sat, 18 Nov 2023 16:08:30 +0900
Subject: [PATCH 015/128] Bug 1864125 - Dynamically update intermittent icon
when other sucessful job appears.
---
ui/job-view/pushes/JobButton.jsx | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/ui/job-view/pushes/JobButton.jsx b/ui/job-view/pushes/JobButton.jsx
index a5a5c61300a..261448a352f 100644
--- a/ui/job-view/pushes/JobButton.jsx
+++ b/ui/job-view/pushes/JobButton.jsx
@@ -39,13 +39,19 @@ export default class JobButtonComponent extends React.Component {
* shallow compare would allow.
*/
shouldComponentUpdate(nextProps, nextState) {
- const { visible, resultStatus, failureClassificationId } = this.props;
+ const {
+ visible,
+ resultStatus,
+ failureClassificationId,
+ intermittent,
+ } = this.props;
const { isSelected, isRunnableSelected } = this.state;
return (
visible !== nextProps.visible ||
resultStatus !== nextProps.resultStatus ||
failureClassificationId !== nextProps.failureClassificationId ||
+ intermittent !== nextProps.intermittent ||
isSelected !== nextState.isSelected ||
isRunnableSelected !== nextState.isRunnableSelected
);
From d8b46596538a1cf26b2c90fd3f8798f92ebfce68 Mon Sep 17 00:00:00 2001
From: Tooru Fujisawa
Date: Sun, 19 Nov 2023 00:04:20 +0900
Subject: [PATCH 016/128] Bug 1766520 - Use the same font size for job button
and group button.
---
ui/css/treeherder-job-buttons.css | 1 +
1 file changed, 1 insertion(+)
diff --git a/ui/css/treeherder-job-buttons.css b/ui/css/treeherder-job-buttons.css
index 7cb5a718572..a3ab3735b48 100644
--- a/ui/css/treeherder-job-buttons.css
+++ b/ui/css/treeherder-job-buttons.css
@@ -23,6 +23,7 @@
vertical-align: 0;
line-height: 1.32;
cursor: pointer;
+ font-size: 12px;
}
.group-btn::before {
From a2da69a319e89bc19c795ea085e74444521e1639 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 12 Feb 2024 01:21:37 +0000
Subject: [PATCH 017/128] Update dependency @testing-library/jest-dom to v6.1.6
---
package.json | 2 +-
yarn.lock | 18 +++++++++---------
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/package.json b/package.json
index 8fa0b1bbb8d..e25fd899af8 100644
--- a/package.json
+++ b/package.json
@@ -85,7 +85,7 @@
"@pollyjs/adapter-puppeteer": "5.1.1",
"@pollyjs/core": "5.1.1",
"@pollyjs/persister-fs": "6.0.6",
- "@testing-library/jest-dom": "6.1.4",
+ "@testing-library/jest-dom": "6.1.6",
"@testing-library/react": "12.0.0",
"babel-loader": "9.1.3",
"clean-webpack-plugin": "4.0.0",
diff --git a/yarn.lock b/yarn.lock
index 1ee5d4efbb0..d670eb95e8f 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -2,10 +2,10 @@
# yarn lockfile v1
-"@adobe/css-tools@^4.3.1":
- version "4.3.1"
- resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.3.1.tgz#abfccb8ca78075a2b6187345c26243c1a0842f28"
- integrity sha512-/62yikz7NLScCGAAST5SHdnjaDJQBDq0M2muyRTpf2VQhw6StBg2ALiu73zSJQ4fMVLA+0uBhBHAle7Wg+2kSg==
+"@adobe/css-tools@^4.3.2":
+ version "4.3.3"
+ resolved "https://registry.yarnpkg.com/@adobe/css-tools/-/css-tools-4.3.3.tgz#90749bde8b89cd41764224f5aac29cd4138f75ff"
+ integrity sha512-rE0Pygv0sEZ4vBWHlAgJLGDU7Pm8xoO6p3wsEceb7GYAjScrOHpEo8KK/eVkAcnSM+slAEtXjA2JpdjLp4fJQQ==
"@ampproject/remapping@^2.1.0":
version "2.2.0"
@@ -2096,12 +2096,12 @@
lz-string "^1.4.4"
pretty-format "^27.0.2"
-"@testing-library/jest-dom@6.1.4":
- version "6.1.4"
- resolved "https://registry.yarnpkg.com/@testing-library/jest-dom/-/jest-dom-6.1.4.tgz#cf0835c33bc5ef00befb9e672b1e3e6a710e30e3"
- integrity sha512-wpoYrCYwSZ5/AxcrjLxJmCU6I5QAJXslEeSiMQqaWmP2Kzpd1LvF/qxmAIW2qposULGWq2gw30GgVNFLSc2Jnw==
+"@testing-library/jest-dom@6.1.6":
+ version "6.1.6"
+ resolved "https://registry.yarnpkg.com/@testing-library/jest-dom/-/jest-dom-6.1.6.tgz#d9a3ce61cd74ea792622d3da78a830f6786e8d93"
+ integrity sha512-YwuiOdYEcxhfC2u5iNKlvg2Q5MgbutovP6drq7J1HrCbvR+G58BbtoCoq+L/kNlrNFsu2Kt3jaFAviLVxYHJZg==
dependencies:
- "@adobe/css-tools" "^4.3.1"
+ "@adobe/css-tools" "^4.3.2"
"@babel/runtime" "^7.9.2"
aria-query "^5.0.0"
chalk "^3.0.0"
From a867b8b0de2d9af7feb5f3b522c568af35568951 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 12 Feb 2024 03:09:24 +0000
Subject: [PATCH 018/128] Update dependency cacache to v18.0.2
---
package.json | 2 +-
yarn.lock | 20 ++++++++++----------
2 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/package.json b/package.json
index e25fd899af8..485f6693695 100644
--- a/package.json
+++ b/package.json
@@ -137,6 +137,6 @@
"test:watch": "node ./node_modules/jest/bin/jest --watch"
},
"resolutions": {
- "cacache": "18.0.0"
+ "cacache": "18.0.2"
}
}
diff --git a/yarn.lock b/yarn.lock
index d670eb95e8f..ba1765664f6 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -3496,17 +3496,17 @@ bytes@3.1.2:
resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5"
integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==
-cacache@18.0.0:
- version "18.0.0"
- resolved "https://registry.yarnpkg.com/cacache/-/cacache-18.0.0.tgz#17a9ecd6e1be2564ebe6cdca5f7cfed2bfeb6ddc"
- integrity sha512-I7mVOPl3PUCeRub1U8YoGz2Lqv9WOBpobZ8RyWFXmReuILz+3OAyTa5oH3QPdtKZD7N0Yk00aLfzn0qvp8dZ1w==
+cacache@18.0.2:
+ version "18.0.2"
+ resolved "https://registry.yarnpkg.com/cacache/-/cacache-18.0.2.tgz#fd527ea0f03a603be5c0da5805635f8eef00c60c"
+ integrity sha512-r3NU8h/P+4lVUHfeRw1dtgQYar3DZMm4/cm2bZgOvrFC/su7budSOeqh52VJIC4U4iG1WWwV6vRW0znqBvxNuw==
dependencies:
"@npmcli/fs" "^3.1.0"
fs-minipass "^3.0.0"
glob "^10.2.2"
lru-cache "^10.0.1"
minipass "^7.0.3"
- minipass-collect "^1.0.2"
+ minipass-collect "^2.0.1"
minipass-flush "^1.0.5"
minipass-pipeline "^1.2.4"
p-map "^4.0.0"
@@ -7642,12 +7642,12 @@ minimist@^1.2.7:
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c"
integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
-minipass-collect@^1.0.2:
- version "1.0.2"
- resolved "https://registry.yarnpkg.com/minipass-collect/-/minipass-collect-1.0.2.tgz#22b813bf745dc6edba2576b940022ad6edc8c617"
- integrity sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==
+minipass-collect@^2.0.1:
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/minipass-collect/-/minipass-collect-2.0.1.tgz#1621bc77e12258a12c60d34e2276ec5c20680863"
+ integrity sha512-D7V8PO9oaz7PWGLbCACuI1qEOsq7UKfLotx/C0Aet43fCUB/wfQ7DYeq2oR/svFJGYDHPr38SHATeaj/ZoKHKw==
dependencies:
- minipass "^3.0.0"
+ minipass "^7.0.3"
minipass-flush@^1.0.5:
version "1.0.5"
From 67d97a04c18b9388c05a77f646a2a07603f3d186 Mon Sep 17 00:00:00 2001
From: Gregory Mierzwinski
Date: Mon, 12 Feb 2024 13:49:20 -0500
Subject: [PATCH 019/128] Bug 1879316 - Use the absolute value of the delta in
the alert threshold check. (#7909)
This patch fixes an issue where the absolute alert threshold was negative on an improvement, and this was checked against the alert threshold. This meant that improvements would never have an alert generated for them since they were always negative. With this change, improvement alerts will now be produced for tests using the absolute threshold.
---
treeherder/perf/alerts.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/treeherder/perf/alerts.py b/treeherder/perf/alerts.py
index 7b39b68982f..7c01b5d5704 100644
--- a/treeherder/perf/alerts.py
+++ b/treeherder/perf/alerts.py
@@ -129,7 +129,7 @@ def generate_new_alerts_in_series(signature):
and alert_properties.pct_change < alert_threshold
) or (
signature.alert_change_type == PerformanceSignature.ALERT_ABS
- and alert_properties.delta < alert_threshold
+ and abs(alert_properties.delta) < alert_threshold
):
continue
From 106db496a7dee8a234173a224cb06694d37e788e Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 19 Feb 2024 00:35:42 +0000
Subject: [PATCH 020/128] Update dependency html-webpack-plugin to v5.5.4
---
package.json | 2 +-
yarn.lock | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/package.json b/package.json
index 485f6693695..418728ae729 100644
--- a/package.json
+++ b/package.json
@@ -101,7 +101,7 @@
"eslint-plugin-react": "7.16.0",
"fetch-mock": "9.4.0",
"html-loader": "4.2.0",
- "html-webpack-plugin": "5.5.3",
+ "html-webpack-plugin": "5.5.4",
"jest": "28.1.3",
"jest-environment-puppeteer": "9.0.1",
"jest-puppeteer": "9.0.1",
diff --git a/yarn.lock b/yarn.lock
index ba1765664f6..b907144bdd5 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -5906,10 +5906,10 @@ html-minifier-terser@^7.0.0:
relateurl "^0.2.7"
terser "^5.14.2"
-html-webpack-plugin@5.5.3:
- version "5.5.3"
- resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-5.5.3.tgz#72270f4a78e222b5825b296e5e3e1328ad525a3e"
- integrity sha512-6YrDKTuqaP/TquFH7h4srYWsZx+x6k6+FbsTm0ziCwGHDP78Unr1r9F/H4+sGmMbX08GQcJ+K64x55b+7VM/jg==
+html-webpack-plugin@5.5.4:
+ version "5.5.4"
+ resolved "https://registry.yarnpkg.com/html-webpack-plugin/-/html-webpack-plugin-5.5.4.tgz#517a48e6f046ff1ae1a172c983cd993eb79d2f6a"
+ integrity sha512-3wNSaVVxdxcu0jd4FpQFoICdqgxs4zIQQvj+2yQKFfBOnLETQ6X5CDWdeasuGlSsooFlMkEioWDTqBv1wvw5Iw==
dependencies:
"@types/html-minifier-terser" "^6.0.0"
html-minifier-terser "^6.0.2"
From b91e3743fe3aa615ad545184d07dc5eabd10dfa0 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 26 Feb 2024 01:47:03 +0000
Subject: [PATCH 021/128] Update dependency jest-environment-puppeteer to
v9.0.2
---
package.json | 2 +-
yarn.lock | 87 ++++++++++++++++++++++++++++++++++++++++++++++++----
2 files changed, 82 insertions(+), 7 deletions(-)
diff --git a/package.json b/package.json
index 418728ae729..340ded2ec29 100644
--- a/package.json
+++ b/package.json
@@ -103,7 +103,7 @@
"html-loader": "4.2.0",
"html-webpack-plugin": "5.5.4",
"jest": "28.1.3",
- "jest-environment-puppeteer": "9.0.1",
+ "jest-environment-puppeteer": "9.0.2",
"jest-puppeteer": "9.0.1",
"markdownlint-cli": "0.32.2",
"mini-css-extract-plugin": "2.6.1",
diff --git a/yarn.lock b/yarn.lock
index b907144bdd5..5d12a25720d 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1390,7 +1390,7 @@
dependencies:
"@hapi/boom" "9.x.x"
-"@hapi/hoek@9.x.x":
+"@hapi/hoek@9.x.x", "@hapi/hoek@^9.3.0":
version "9.3.0"
resolved "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz"
integrity sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ==
@@ -1400,9 +1400,9 @@
resolved "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.2.1.tgz"
integrity sha512-gfta+H8aziZsm8pZa0vj04KO6biEiisppNgA1kbJvFrrWu9Vm7eaUEy76DIxsuTaWvti5fkJVhllWc6ZTE+Mdw==
-"@hapi/topo@^5.0.0":
+"@hapi/topo@^5.0.0", "@hapi/topo@^5.1.0":
version "5.1.0"
- resolved "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz"
+ resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-5.1.0.tgz#dc448e332c6c6e37a4dc02fd84ba8d44b9afb012"
integrity sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==
dependencies:
"@hapi/hoek" "^9.0.0"
@@ -2029,6 +2029,13 @@
dependencies:
"@hapi/hoek" "^9.0.0"
+"@sideway/address@^4.1.5":
+ version "4.1.5"
+ resolved "https://registry.yarnpkg.com/@sideway/address/-/address-4.1.5.tgz#4bc149a0076623ced99ca8208ba780d65a99b9d5"
+ integrity sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q==
+ dependencies:
+ "@hapi/hoek" "^9.0.0"
+
"@sideway/formula@^3.0.1":
version "3.0.1"
resolved "https://registry.yarnpkg.com/@sideway/formula/-/formula-3.0.1.tgz#80fcbcbaf7ce031e0ef2dd29b1bfc7c3f583611f"
@@ -3097,6 +3104,15 @@ axios@^0.27.2:
follow-redirects "^1.14.9"
form-data "^4.0.0"
+axios@^1.6.1:
+ version "1.6.7"
+ resolved "https://registry.yarnpkg.com/axios/-/axios-1.6.7.tgz#7b48c2e27c96f9c68a2f8f31e2ab19f59b06b0a7"
+ integrity sha512-/hDJGff6/c7u0hDkvkGxR/oy6CbCs8ziCsC7SqmhjfozqiJGc8Z11wrv9z9lYfY4K8l+H9TpjcMDX0xOZmx+RA==
+ dependencies:
+ follow-redirects "^1.15.4"
+ form-data "^4.0.0"
+ proxy-from-env "^1.1.0"
+
axobject-query@^2.0.2:
version "2.2.0"
resolved "https://registry.npmjs.org/axobject-query/-/axobject-query-2.2.0.tgz"
@@ -5360,6 +5376,11 @@ follow-redirects@^1.14.9:
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.3.tgz#fe2f3ef2690afce7e82ed0b44db08165b207123a"
integrity sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==
+follow-redirects@^1.15.4:
+ version "1.15.5"
+ resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.5.tgz#54d4d6d062c0fa7d9d17feb008461550e3ba8020"
+ integrity sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==
+
for-each@^0.3.3:
version "0.3.3"
resolved "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz"
@@ -6600,6 +6621,19 @@ jest-dev-server@^9.0.1:
tree-kill "^1.2.2"
wait-on "^7.0.1"
+jest-dev-server@^9.0.2:
+ version "9.0.2"
+ resolved "https://registry.yarnpkg.com/jest-dev-server/-/jest-dev-server-9.0.2.tgz#9a1ab8a8eefe76e5115c9266944b7390cd1495b3"
+ integrity sha512-Zc/JB0IlNNrpXkhBw+h86cGrde/Mey52KvF+FER2eyrtYJTHObOwW7Iarxm3rPyTKby5+3Y2QZtl8pRz/5GCxg==
+ dependencies:
+ chalk "^4.1.2"
+ cwd "^0.10.0"
+ find-process "^1.4.7"
+ prompts "^2.4.2"
+ spawnd "^9.0.2"
+ tree-kill "^1.2.2"
+ wait-on "^7.2.0"
+
jest-diff@^28.1.3:
version "28.1.3"
resolved "https://registry.npmjs.org/jest-diff/-/jest-diff-28.1.3.tgz"
@@ -6666,7 +6700,18 @@ jest-environment-node@^29.7.0:
jest-mock "^29.7.0"
jest-util "^29.7.0"
-jest-environment-puppeteer@9.0.1, jest-environment-puppeteer@^9.0.1:
+jest-environment-puppeteer@9.0.2:
+ version "9.0.2"
+ resolved "https://registry.yarnpkg.com/jest-environment-puppeteer/-/jest-environment-puppeteer-9.0.2.tgz#c0a0382c8147e2e7f6fff085c3d8916ae210cbf5"
+ integrity sha512-t7+W4LUiPoOz+xpKREgnu6IElMuRthOWTkrThDZqVKPmLhwbK3yx7OCiX8xT1Pw/Cv5WnSoNhwtN7czdCC3fQg==
+ dependencies:
+ chalk "^4.1.2"
+ cosmiconfig "^8.3.6"
+ deepmerge "^4.3.1"
+ jest-dev-server "^9.0.2"
+ jest-environment-node "^29.7.0"
+
+jest-environment-puppeteer@^9.0.1:
version "9.0.1"
resolved "https://registry.yarnpkg.com/jest-environment-puppeteer/-/jest-environment-puppeteer-9.0.1.tgz#1b87f07410652c5a5782ed6693bb04bed593d086"
integrity sha512-5WC3w2+gUNMNVNdeRwyc5xpd9lbTGTVEanESaW3bCW7SIKJKIPMDLgfXhYswW2V6VeHIisuxIDx+hx5qczt4Rw==
@@ -7015,6 +7060,17 @@ jest@28.1.3:
import-local "^3.0.2"
jest-cli "^28.1.3"
+joi@^17.11.0:
+ version "17.12.2"
+ resolved "https://registry.yarnpkg.com/joi/-/joi-17.12.2.tgz#283a664dabb80c7e52943c557aab82faea09f521"
+ integrity sha512-RonXAIzCiHLc8ss3Ibuz45u28GOsWE1UpfDXLbN/9NKbL4tCJf8TWYVKsoYuuh+sAUt7fsSNpA+r2+TBA6Wjmw==
+ dependencies:
+ "@hapi/hoek" "^9.3.0"
+ "@hapi/topo" "^5.1.0"
+ "@sideway/address" "^4.1.5"
+ "@sideway/formula" "^3.0.1"
+ "@sideway/pinpoint" "^2.0.0"
+
joi@^17.7.0:
version "17.11.0"
resolved "https://registry.yarnpkg.com/joi/-/joi-17.11.0.tgz#aa9da753578ec7720e6f0ca2c7046996ed04fc1a"
@@ -7637,7 +7693,7 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6:
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18"
integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==
-minimist@^1.2.7:
+minimist@^1.2.7, minimist@^1.2.8:
version "1.2.8"
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c"
integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
@@ -9460,7 +9516,7 @@ run-parallel@^1.1.9:
dependencies:
queue-microtask "^1.2.2"
-rxjs@^7.8.0:
+rxjs@^7.8.0, rxjs@^7.8.1:
version "7.8.1"
resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.8.1.tgz#6f6f3d99ea8044291efd92e7c7fcf562c4057543"
integrity sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==
@@ -9830,6 +9886,14 @@ spawnd@^9.0.1:
signal-exit "^4.1.0"
tree-kill "^1.2.2"
+spawnd@^9.0.2:
+ version "9.0.2"
+ resolved "https://registry.yarnpkg.com/spawnd/-/spawnd-9.0.2.tgz#7799635d183b27552e90ca639876dac10d45f7f7"
+ integrity sha512-nl8DVHEDQ57IcKakzpjanspVChkMpGLuVwMR/eOn9cXE55Qr6luD2Kn06sA0ootRMdgrU4tInN6lA6ohTNvysw==
+ dependencies:
+ signal-exit "^4.1.0"
+ tree-kill "^1.2.2"
+
spdy-transport@^3.0.0:
version "3.0.0"
resolved "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz"
@@ -10999,6 +11063,17 @@ wait-on@^7.0.1:
minimist "^1.2.7"
rxjs "^7.8.0"
+wait-on@^7.2.0:
+ version "7.2.0"
+ resolved "https://registry.yarnpkg.com/wait-on/-/wait-on-7.2.0.tgz#d76b20ed3fc1e2bebc051fae5c1ff93be7892928"
+ integrity sha512-wCQcHkRazgjG5XoAq9jbTMLpNIjoSlZslrJ2+N9MxDsGEv1HnFoVjOCexL0ESva7Y9cu350j+DWADdk54s4AFQ==
+ dependencies:
+ axios "^1.6.1"
+ joi "^17.11.0"
+ lodash "^4.17.21"
+ minimist "^1.2.8"
+ rxjs "^7.8.1"
+
walker@^1.0.8:
version "1.0.8"
resolved "https://registry.npmjs.org/walker/-/walker-1.0.8.tgz"
From 8f55e03f71d158d0b004b578f22b21d01016aff2 Mon Sep 17 00:00:00 2001
From: florinbilt <160469273+florinbilt@users.noreply.github.com>
Date: Tue, 27 Feb 2024 15:02:12 +0200
Subject: [PATCH 022/128] 3305 - Make the length of the short revision hash
constant across Treeherder (#7914)
* 3305 - Make the length of the short revision hash constant with bugzilla
* 3305 - Make the length of the short revision hash constant with bugzilla
---
tests/ui/perfherder/graphs-view/graphs_view_test.jsx | 4 ++--
ui/perfherder/graphs/GraphTooltip.jsx | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/tests/ui/perfherder/graphs-view/graphs_view_test.jsx b/tests/ui/perfherder/graphs-view/graphs_view_test.jsx
index 5da39913ad4..521600f83ac 100644
--- a/tests/ui/perfherder/graphs-view/graphs_view_test.jsx
+++ b/tests/ui/perfherder/graphs-view/graphs_view_test.jsx
@@ -206,7 +206,7 @@ test('Using select query param displays tooltip for correct datapoint', async ()
const graphTooltip = await waitFor(() => getByTestId('graphTooltip'));
const expectedRevision = '3afb892abb74c6d281f3e66431408cbb2e16b8c4';
const revision = await waitFor(() =>
- getByText(expectedRevision.slice(0, 13)),
+ getByText(expectedRevision.slice(0, 12)),
);
const repoName = await waitFor(() => getByTestId('repoName'));
const platform = await waitFor(() => getByTestId('platform'));
@@ -226,7 +226,7 @@ test('Using select query param displays tooltip for correct datapoint with repli
const graphTooltip = await waitFor(() => getByTestId('graphTooltip'));
const expectedRevision = '3afb892abb74c6d281f3e66431408cbb2e16b8c4';
const revision = await waitFor(() =>
- getByText(expectedRevision.slice(0, 13)),
+ getByText(expectedRevision.slice(0, 12)),
);
const repoName = await waitFor(() => getByTestId('repoName'));
const platform = await waitFor(() => getByTestId('platform'));
diff --git a/ui/perfherder/graphs/GraphTooltip.jsx b/ui/perfherder/graphs/GraphTooltip.jsx
index 3aa4d07472e..e394848fde4 100644
--- a/ui/perfherder/graphs/GraphTooltip.jsx
+++ b/ui/perfherder/graphs/GraphTooltip.jsx
@@ -212,7 +212,7 @@ const GraphTooltip = ({
- {dataPointDetails.revision.slice(0, 13)}
+ {dataPointDetails.revision.slice(0, 12)}
{' '}
{(dataPointDetails.jobId || prevRevision) && '('}
{dataPointDetails.jobId && (
From 90ed2aeb00dff41030cc614ca3aef6536088a028 Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Fri, 9 Feb 2024 14:04:18 +0100
Subject: [PATCH 023/128] Bug 1879527 - drop ESR 102 repositories
Development work on these stopped more than 4 months ago, longer than the data
retention in Treeherder.
---
treeherder/etl/jobs.py | 1 -
treeherder/model/fixtures/repository.json | 4 ++--
ui/intermittent-failures/constants.js | 2 --
3 files changed, 2 insertions(+), 5 deletions(-)
diff --git a/treeherder/etl/jobs.py b/treeherder/etl/jobs.py
index c98ca196d41..9ca1732c8f1 100644
--- a/treeherder/etl/jobs.py
+++ b/treeherder/etl/jobs.py
@@ -320,7 +320,6 @@ def _schedule_log_parsing(job, job_logs, result, repository):
"mozilla-central",
"mozilla-beta",
"mozilla-release",
- "mozilla-esr102",
"mozilla-esr115",
"firefox-android",
"reference-browser",
diff --git a/treeherder/model/fixtures/repository.json b/treeherder/model/fixtures/repository.json
index 6d48c5a4b04..47739e6c603 100644
--- a/treeherder/model/fixtures/repository.json
+++ b/treeherder/model/fixtures/repository.json
@@ -1734,7 +1734,7 @@
"dvcs_type": "hg",
"name": "mozilla-esr102",
"url": "https://hg.mozilla.org/releases/mozilla-esr102",
- "active_status": "active",
+ "active_status": "onhold",
"codebase": "gecko",
"repository_group": 2,
"life_cycle_order": 9998,
@@ -1749,7 +1749,7 @@
"dvcs_type": "hg",
"name": "comm-esr102",
"url": "https://hg.mozilla.org/releases/comm-esr102",
- "active_status": "active",
+ "active_status": "onhold",
"codebase": "comm",
"repository_group": 8,
"description": "",
diff --git a/ui/intermittent-failures/constants.js b/ui/intermittent-failures/constants.js
index bc9a66c9461..4bca264f780 100644
--- a/ui/intermittent-failures/constants.js
+++ b/ui/intermittent-failures/constants.js
@@ -3,12 +3,10 @@ export const treeOptions = [
'all',
'trunk',
'mozilla-central',
- 'mozilla-esr102',
'mozilla-esr115',
'autoland',
'firefox-releases',
'comm-central',
- 'comm-esr102',
'comm-esr115',
'comm-releases',
'fenix',
From fdefeb8d9890ac55f1c6e5984886f035fd1d8c59 Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Mon, 4 Mar 2024 16:58:14 +0100
Subject: [PATCH 024/128] Remote docker and ubuntu upgrades (#7922)
* Switch to docker24
* Switch to default Ubuntu image in circleCI
* Switch tox config from whitelist_externals to allowlist_externals
---
.circleci/config.yml | 12 ++++--------
tox.ini | 6 +++---
2 files changed, 7 insertions(+), 11 deletions(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 716468c4a7d..5fd7ddf2602 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -51,11 +51,9 @@ jobs:
python-tests-mysql:
machine:
- image: ubuntu-2004:202010-01
+ image: default
steps:
- checkout
- - docker/install-docker:
- version: 19.03.13
- docker/install-docker-compose:
version: 1.29.2
- run:
@@ -71,11 +69,9 @@ jobs:
python-tests-postgres:
machine:
- image: ubuntu-2004:202010-01
+ image: default
steps:
- checkout
- - docker/install-docker:
- version: 19.03.13
- docker/install-docker-compose:
version: 1.29.2
- run:
@@ -94,7 +90,7 @@ jobs:
- image: docker:19.03.15
steps:
- setup_remote_docker:
- version: 20.10.23
+ version: docker24
- checkout
- run:
name: Create a version.json
@@ -115,7 +111,7 @@ jobs:
- image: docker:19.03.15
steps:
- setup_remote_docker:
- version: 20.10.23
+ version: docker24
- checkout
- run:
name: Create a version.json
diff --git a/tox.ini b/tox.ini
index 94fda567703..b285c98fc10 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,7 +6,7 @@ skipsdist=True
toxworkdir={toxinidir}/.tox
[testenv]
-whitelist_externals =
+allowlist_externals =
sh
docker-compose
commands_pre =
@@ -43,14 +43,14 @@ commands_post =
[testenv:docker]
commands_pre =
-whitelist_externals=
+allowlist_externals=
docker-compose
commands =
docker-compose run -e TREEHERDER_DEBUG=False backend bash -c "pytest --cov --cov-report=xml tests/ --runslow -p no:unraisableexception"
[testenv:docker-postgres]
commands_pre =
-whitelist_externals=
+allowlist_externals=
docker-compose
commands =
docker-compose run -e TREEHERDER_DEBUG=False -e DATABASE_URL=psql://postgres:mozilla1234@postgres:5432/treeherder backend bash -c "pytest --cov --cov-report=xml tests/ --runslow -p no:unraisableexception"
From f2f9a0dc4468a09ab3c3a5b8da38ab6aaa4bcf71 Mon Sep 17 00:00:00 2001
From: Marco Castelluccio
Date: Thu, 22 Feb 2024 16:09:33 +0100
Subject: [PATCH 025/128] Bug 1881540 - Enable Dependabot for pyproject.toml
too
---
.github/dependabot.yml | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index c4c0d448b8c..277d495f36b 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -1,5 +1,13 @@
version: 2
updates:
+- package-ecosystem: pip
+ directory: "/"
+ schedule:
+ interval: daily
+ open-pull-requests-limit: 99
+ labels:
+ - dependencies
+ - python
- package-ecosystem: pip
directory: "/requirements"
schedule:
From d1160eac12ef4bef34a526416a45440ea146b046 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 16:21:30 +0000
Subject: [PATCH 026/128] Update dependency jest-puppeteer to v9.0.2
---
package.json | 2 +-
yarn.lock | 102 +++++++--------------------------------------------
2 files changed, 15 insertions(+), 89 deletions(-)
diff --git a/package.json b/package.json
index 340ded2ec29..782472aabe1 100644
--- a/package.json
+++ b/package.json
@@ -104,7 +104,7 @@
"html-webpack-plugin": "5.5.4",
"jest": "28.1.3",
"jest-environment-puppeteer": "9.0.2",
- "jest-puppeteer": "9.0.1",
+ "jest-puppeteer": "9.0.2",
"markdownlint-cli": "0.32.2",
"mini-css-extract-plugin": "2.6.1",
"path": "0.12.7",
diff --git a/yarn.lock b/yarn.lock
index 5d12a25720d..eca9daeea8f 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1400,7 +1400,7 @@
resolved "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.2.1.tgz"
integrity sha512-gfta+H8aziZsm8pZa0vj04KO6biEiisppNgA1kbJvFrrWu9Vm7eaUEy76DIxsuTaWvti5fkJVhllWc6ZTE+Mdw==
-"@hapi/topo@^5.0.0", "@hapi/topo@^5.1.0":
+"@hapi/topo@^5.1.0":
version "5.1.0"
resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-5.1.0.tgz#dc448e332c6c6e37a4dc02fd84ba8d44b9afb012"
integrity sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==
@@ -2022,13 +2022,6 @@
pluralize "^8.0.0"
yaml-ast-parser "0.0.43"
-"@sideway/address@^4.1.3":
- version "4.1.3"
- resolved "https://registry.npmjs.org/@sideway/address/-/address-4.1.3.tgz"
- integrity sha512-8ncEUtmnTsMmL7z1YPB47kPUq7LpKWJNFPsRzHiIajGC5uXlWGn+AmkYPcHNl8S4tcEGx+cnORnNYaw2wvL+LQ==
- dependencies:
- "@hapi/hoek" "^9.0.0"
-
"@sideway/address@^4.1.5":
version "4.1.5"
resolved "https://registry.yarnpkg.com/@sideway/address/-/address-4.1.5.tgz#4bc149a0076623ced99ca8208ba780d65a99b9d5"
@@ -3096,14 +3089,6 @@ available-typed-arrays@^1.0.5:
resolved "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.5.tgz"
integrity sha512-DMD0KiN46eipeziST1LPP/STfDU0sufISXmjSgvVsoU2tqxctQeASejWcfNtxYKqETM1UxQ8sp2OrSBWpHY6sw==
-axios@^0.27.2:
- version "0.27.2"
- resolved "https://registry.yarnpkg.com/axios/-/axios-0.27.2.tgz#207658cc8621606e586c85db4b41a750e756d972"
- integrity sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==
- dependencies:
- follow-redirects "^1.14.9"
- form-data "^4.0.0"
-
axios@^1.6.1:
version "1.6.7"
resolved "https://registry.yarnpkg.com/axios/-/axios-1.6.7.tgz#7b48c2e27c96f9c68a2f8f31e2ab19f59b06b0a7"
@@ -5088,10 +5073,10 @@ expand-tilde@^1.2.2:
dependencies:
os-homedir "^1.0.1"
-expect-puppeteer@^9.0.1:
- version "9.0.1"
- resolved "https://registry.yarnpkg.com/expect-puppeteer/-/expect-puppeteer-9.0.1.tgz#2c2efa55984939f0d2bd8dd1443a2d3c3c26d5d0"
- integrity sha512-LqGzoyW4XgZbfJadjllSMCwZflX9gVBqjFUg8qde+etXr/SEFWLBgn98nRAmO3hUjMRNyh5gIFcaSi4DzHAWLw==
+expect-puppeteer@^9.0.2:
+ version "9.0.2"
+ resolved "https://registry.yarnpkg.com/expect-puppeteer/-/expect-puppeteer-9.0.2.tgz#ffbfe163962a607afae942bd70f7f192100000cf"
+ integrity sha512-nv3RD8MOStXOf4bLpr1wiqxPMLL7MwXvtMeZBtGvg5bubAHiHcYBcvDTJwkUjdOWz3scjOnOOl5z6KZakMobCw==
expect@^28.1.3:
version "28.1.3"
@@ -5371,11 +5356,6 @@ follow-redirects@^1.0.0:
resolved "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz"
integrity sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==
-follow-redirects@^1.14.9:
- version "1.15.3"
- resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.3.tgz#fe2f3ef2690afce7e82ed0b44db08165b207123a"
- integrity sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==
-
follow-redirects@^1.15.4:
version "1.15.5"
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.5.tgz#54d4d6d062c0fa7d9d17feb008461550e3ba8020"
@@ -6608,19 +6588,6 @@ jest-config@^28.1.3:
slash "^3.0.0"
strip-json-comments "^3.1.1"
-jest-dev-server@^9.0.1:
- version "9.0.1"
- resolved "https://registry.yarnpkg.com/jest-dev-server/-/jest-dev-server-9.0.1.tgz#75d50b946c94e278401158bcc9a29da23d21204d"
- integrity sha512-eqpJKSvVl4M0ojHZUPNbka8yEzLNbIMiINXDsuMF3lYfIdRO2iPqy+ASR4wBQ6nUyR3OT24oKPWhpsfLhgAVyg==
- dependencies:
- chalk "^4.1.2"
- cwd "^0.10.0"
- find-process "^1.4.7"
- prompts "^2.4.2"
- spawnd "^9.0.1"
- tree-kill "^1.2.2"
- wait-on "^7.0.1"
-
jest-dev-server@^9.0.2:
version "9.0.2"
resolved "https://registry.yarnpkg.com/jest-dev-server/-/jest-dev-server-9.0.2.tgz#9a1ab8a8eefe76e5115c9266944b7390cd1495b3"
@@ -6700,7 +6667,7 @@ jest-environment-node@^29.7.0:
jest-mock "^29.7.0"
jest-util "^29.7.0"
-jest-environment-puppeteer@9.0.2:
+jest-environment-puppeteer@9.0.2, jest-environment-puppeteer@^9.0.2:
version "9.0.2"
resolved "https://registry.yarnpkg.com/jest-environment-puppeteer/-/jest-environment-puppeteer-9.0.2.tgz#c0a0382c8147e2e7f6fff085c3d8916ae210cbf5"
integrity sha512-t7+W4LUiPoOz+xpKREgnu6IElMuRthOWTkrThDZqVKPmLhwbK3yx7OCiX8xT1Pw/Cv5WnSoNhwtN7czdCC3fQg==
@@ -6711,17 +6678,6 @@ jest-environment-puppeteer@9.0.2:
jest-dev-server "^9.0.2"
jest-environment-node "^29.7.0"
-jest-environment-puppeteer@^9.0.1:
- version "9.0.1"
- resolved "https://registry.yarnpkg.com/jest-environment-puppeteer/-/jest-environment-puppeteer-9.0.1.tgz#1b87f07410652c5a5782ed6693bb04bed593d086"
- integrity sha512-5WC3w2+gUNMNVNdeRwyc5xpd9lbTGTVEanESaW3bCW7SIKJKIPMDLgfXhYswW2V6VeHIisuxIDx+hx5qczt4Rw==
- dependencies:
- chalk "^4.1.2"
- cosmiconfig "^8.3.6"
- deepmerge "^4.3.1"
- jest-dev-server "^9.0.1"
- jest-environment-node "^29.7.0"
-
jest-get-type@^28.0.2:
version "28.0.2"
resolved "https://registry.npmjs.org/jest-get-type/-/jest-get-type-28.0.2.tgz"
@@ -6835,13 +6791,13 @@ jest-pnp-resolver@^1.2.2:
resolved "https://registry.npmjs.org/jest-pnp-resolver/-/jest-pnp-resolver-1.2.2.tgz"
integrity sha512-olV41bKSMm8BdnuMsewT4jqlZ8+3TCARAXjZGT9jcoSnrfUnRCqnMoF9XEeoWjbzObpqF9dRhHQj0Xb9QdF6/w==
-jest-puppeteer@9.0.1:
- version "9.0.1"
- resolved "https://registry.yarnpkg.com/jest-puppeteer/-/jest-puppeteer-9.0.1.tgz#a267f0f0abb67806fdede5e20ad8c968743c3781"
- integrity sha512-lNWoUCn1zKO6vlD0uvHLBJHvgBmZ7+DKy+Kd6TkQJO4mJ5aDRqeG4XOuy43yYlS2EYVuzqEru2BgbXSpA8V8Vw==
+jest-puppeteer@9.0.2:
+ version "9.0.2"
+ resolved "https://registry.yarnpkg.com/jest-puppeteer/-/jest-puppeteer-9.0.2.tgz#c5e7d29ad4b2094a27bc20c6dffa201376111664"
+ integrity sha512-ZB0K/tH+0e7foRRn+VpKIufvkW1by8l7ifh62VOdOh5ijEf7yt8W2/PcBNNwP0RLm46AytiBkrIEenvWhxcBRQ==
dependencies:
- expect-puppeteer "^9.0.1"
- jest-environment-puppeteer "^9.0.1"
+ expect-puppeteer "^9.0.2"
+ jest-environment-puppeteer "^9.0.2"
jest-regex-util@^28.0.2:
version "28.0.2"
@@ -7071,17 +7027,6 @@ joi@^17.11.0:
"@sideway/formula" "^3.0.1"
"@sideway/pinpoint" "^2.0.0"
-joi@^17.7.0:
- version "17.11.0"
- resolved "https://registry.yarnpkg.com/joi/-/joi-17.11.0.tgz#aa9da753578ec7720e6f0ca2c7046996ed04fc1a"
- integrity sha512-NgB+lZLNoqISVy1rZocE9PZI36bL/77ie924Ri43yEvi9GUUMPeyVIr8KdFTMUlby1p0PBYMk9spIxEUQYqrJQ==
- dependencies:
- "@hapi/hoek" "^9.0.0"
- "@hapi/topo" "^5.0.0"
- "@sideway/address" "^4.1.3"
- "@sideway/formula" "^3.0.1"
- "@sideway/pinpoint" "^2.0.0"
-
jose@^4.0.4:
version "4.11.1"
resolved "https://registry.npmjs.org/jose/-/jose-4.11.1.tgz"
@@ -7693,7 +7638,7 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6:
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.7.tgz#daa1c4d91f507390437c6a8bc01078e7000c4d18"
integrity sha512-bzfL1YUZsP41gmu/qjrEk0Q6i2ix/cVeAhbCbqH9u3zYutS1cLg00qhrD0M2MVdCcx4Sc0UpP2eBWo9rotpq6g==
-minimist@^1.2.7, minimist@^1.2.8:
+minimist@^1.2.8:
version "1.2.8"
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c"
integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
@@ -9516,7 +9461,7 @@ run-parallel@^1.1.9:
dependencies:
queue-microtask "^1.2.2"
-rxjs@^7.8.0, rxjs@^7.8.1:
+rxjs@^7.8.1:
version "7.8.1"
resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-7.8.1.tgz#6f6f3d99ea8044291efd92e7c7fcf562c4057543"
integrity sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==
@@ -9878,14 +9823,6 @@ source-map@^0.7.3:
resolved "https://registry.npmjs.org/source-map/-/source-map-0.7.3.tgz"
integrity sha512-CkCj6giN3S+n9qrYiBTX5gystlENnRW5jZeNLHpe6aue+SrHcG5VYwujhW9s4dY31mEGsxBDrHR6oI69fTXsaQ==
-spawnd@^9.0.1:
- version "9.0.1"
- resolved "https://registry.yarnpkg.com/spawnd/-/spawnd-9.0.1.tgz#43b39b4bf5bdf6b5e38fbb7fabb2fbd0385b23b9"
- integrity sha512-vaMk8E9CpbjTYToBxLXowDeArGf1+yI7A6PU6Nr57b2g8BVY8nRi5vTBj3bMF8UkCrMdTMyf/Lh+lrcrW2z7pw==
- dependencies:
- signal-exit "^4.1.0"
- tree-kill "^1.2.2"
-
spawnd@^9.0.2:
version "9.0.2"
resolved "https://registry.yarnpkg.com/spawnd/-/spawnd-9.0.2.tgz#7799635d183b27552e90ca639876dac10d45f7f7"
@@ -11052,17 +10989,6 @@ w3c-xmlserializer@^3.0.0:
dependencies:
xml-name-validator "^4.0.0"
-wait-on@^7.0.1:
- version "7.0.1"
- resolved "https://registry.yarnpkg.com/wait-on/-/wait-on-7.0.1.tgz#5cff9f8427e94f4deacbc2762e6b0a489b19eae9"
- integrity sha512-9AnJE9qTjRQOlTZIldAaf/da2eW0eSRSgcqq85mXQja/DW3MriHxkpODDSUEg+Gri/rKEcXUZHe+cevvYItaog==
- dependencies:
- axios "^0.27.2"
- joi "^17.7.0"
- lodash "^4.17.21"
- minimist "^1.2.7"
- rxjs "^7.8.0"
-
wait-on@^7.2.0:
version "7.2.0"
resolved "https://registry.yarnpkg.com/wait-on/-/wait-on-7.2.0.tgz#d76b20ed3fc1e2bebc051fae5c1ff93be7892928"
From 44df3dc2ec9a27be327c6e36f176dacda66b504b Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 18:18:14 +0000
Subject: [PATCH 027/128] Update dependency style-loader to v3.3.4
---
package.json | 2 +-
yarn.lock | 7 ++++++-
2 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/package.json b/package.json
index 782472aabe1..98adb2d1348 100644
--- a/package.json
+++ b/package.json
@@ -111,7 +111,7 @@
"prettier": "2.0.5",
"puppeteer": "21.10.0",
"setup-polly-jest": "0.9.1",
- "style-loader": "3.3.3",
+ "style-loader": "3.3.4",
"webpack": "5.88.2",
"webpack-cli": "5.1.4",
"webpack-dev-server": "4.9.3",
diff --git a/yarn.lock b/yarn.lock
index eca9daeea8f..df968a22adb 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -10045,7 +10045,12 @@ strip-json-comments@^3.1.0, strip-json-comments@^3.1.1, strip-json-comments@~3.1
resolved "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz"
integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==
-style-loader@3.3.3, style-loader@^3.3.1:
+style-loader@3.3.4:
+ version "3.3.4"
+ resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-3.3.4.tgz#f30f786c36db03a45cbd55b6a70d930c479090e7"
+ integrity sha512-0WqXzrsMTyb8yjZJHDqwmnwRJvhALK9LfRtRc6B4UTWe8AijYLZYZ9thuJTZc2VfQWINADW/j+LiJnfy2RoC1w==
+
+style-loader@^3.3.1:
version "3.3.3"
resolved "https://registry.yarnpkg.com/style-loader/-/style-loader-3.3.3.tgz#bba8daac19930169c0c9c96706749a597ae3acff"
integrity sha512-53BiGLXAcll9maCYtZi2RCQZKa8NQQai5C4horqKyRmHj9H7QmcUyucrH+4KW/gBQbXM2AsB0axoEcFZPlfPcw==
From 35ae2e23c70d4751b3cb8699220c18be89f764a9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 21:22:45 +0000
Subject: [PATCH 028/128] Bump ip from 1.1.8 to 1.1.9
Bumps [ip](https://github.com/indutny/node-ip) from 1.1.8 to 1.1.9.
- [Commits](https://github.com/indutny/node-ip/compare/v1.1.8...v1.1.9)
---
updated-dependencies:
- dependency-name: ip
dependency-type: indirect
...
Signed-off-by: dependabot[bot]
---
yarn.lock | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/yarn.lock b/yarn.lock
index df968a22adb..3952532ec92 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -6149,14 +6149,14 @@ interpret@^3.1.1:
integrity sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==
ip@^1.1.8:
- version "1.1.8"
- resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.8.tgz#ae05948f6b075435ed3307acce04629da8cdbf48"
- integrity sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==
+ version "1.1.9"
+ resolved "https://registry.yarnpkg.com/ip/-/ip-1.1.9.tgz#8dfbcc99a754d07f425310b86a99546b1151e396"
+ integrity sha512-cyRxvOEpNHNtchU3Ln9KC/auJgup87llfQpQ+t5ghoC/UhL16SWzbueiCsdTnWmqAWl7LadfuwhlqmtOaqMHdQ==
ip@^2.0.0:
- version "2.0.0"
- resolved "https://registry.yarnpkg.com/ip/-/ip-2.0.0.tgz#4cf4ab182fee2314c75ede1276f8c80b479936da"
- integrity sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==
+ version "2.0.1"
+ resolved "https://registry.yarnpkg.com/ip/-/ip-2.0.1.tgz#e8f3595d33a3ea66490204234b77636965307105"
+ integrity sha512-lJUL9imLTNi1ZfXT+DU6rBBdbiKGBuay9B6xGSPVjUeQwaH1RIGqef8RZkUtHioLmSNpPR5M4HVKJGm1j8FWVQ==
ipaddr.js@1.9.1:
version "1.9.1"
From 6662238516d6fbe0dfb758bb67ad32f82e9c6a5f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 17:02:43 +0000
Subject: [PATCH 029/128] Bump betamax from 0.8.1 to 0.9.0
Bumps [betamax](https://github.com/sigmavirus24/betamax) from 0.8.1 to 0.9.0.
- [Release notes](https://github.com/sigmavirus24/betamax/releases)
- [Changelog](https://github.com/betamaxpy/betamax/blob/main/HISTORY.rst)
- [Commits](https://github.com/sigmavirus24/betamax/compare/0.8.1...0.9.0)
---
updated-dependencies:
- dependency-name: betamax
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 7 ++++---
2 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 3f262285038..beaa49dc529 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -30,7 +30,7 @@ https://github.com/hugovk/pytest-freezegun/archive/03d7107a877e8f07617f931a379f5
# To test code that's doing advanced communication
# with web services via `requests` library
-betamax==0.8.1
+betamax==0.9.0
betamax-serializers==0.2.1
# pip-compile for pinning versions
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 5865cc877d6..4e9496f3583 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -14,9 +14,9 @@ attrs==23.2.0 \
# via
# outcome
# trio
-betamax==0.8.1 \
- --hash=sha256:5bf004ceffccae881213fb722f34517166b84a34919b92ffc14d1dbd050b71c2 \
- --hash=sha256:aa5ad34cc8d018b35814fb0557d15c78ced9ac56fdc43ccacdb882aa7a5217c1
+betamax==0.9.0 \
+ --hash=sha256:82316e1679bc6879e3c83318d016b54b7c9225ff08c4462de4813e22038d5f94 \
+ --hash=sha256:880d6da87eaf7e61c42bdc4240f0ac04ab5365bd7f2798784c18d37d8cf747bc
# via
# -r requirements/dev.in
# betamax-serializers
@@ -221,6 +221,7 @@ coverage[toml]==7.4.1 \
--hash=sha256:f90515974b39f4dea2f27c0959688621b46d96d5a626cf9c53dbc653a895c05c \
--hash=sha256:fe558371c1bdf3b8fa03e097c523fb9645b8730399c14fe7721ee9c9e2a545d3
# via
+ # coverage
# pytest-cov
# pytest-testmon
distlib==0.3.8 \
From 6413a236627bbe2e24f4e0f6168cd68705fa9c5b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 17:01:30 +0000
Subject: [PATCH 030/128] Bump pyyaml from 6.0 to 6.0.1
Bumps [pyyaml](https://github.com/yaml/pyyaml) from 6.0 to 6.0.1.
- [Changelog](https://github.com/yaml/pyyaml/blob/main/CHANGES)
- [Commits](https://github.com/yaml/pyyaml/compare/6.0...6.0.1)
---
updated-dependencies:
- dependency-name: pyyaml
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 108 +++++++++++++++++++++++-----------------
2 files changed, 64 insertions(+), 46 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index cff15e432b4..d929cd42a98 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -17,7 +17,7 @@ django-cors-headers==4.1.0 # Listed as 3rd party app on settings.py
mozlog==8.0.0
# Used directly and also by Django's YAML serializer.
-PyYAML==6.0 # Imported as yaml
+PyYAML==6.0.1 # Imported as yaml
django-environ==0.10.0 # Imported as environ
uritemplate==4.1.1 # For OpenAPI schema
diff --git a/requirements/common.txt b/requirements/common.txt
index c7c3d73c515..8b05eb124b0 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -692,7 +692,9 @@ moz-measure-noise==2.60.1 \
mozci[cache]==2.3.2 \
--hash=sha256:7f9256f400792c46254bd5422c214f6715e824015696c1ab7ffce5457628c646 \
--hash=sha256:c7126a7bd044e9275cf0f4801ff18561d2420eca436e62bdd920601c1d3b4085
- # via -r requirements/common.in
+ # via
+ # -r requirements/common.in
+ # mozci
mozfile==3.0.0 \
--hash=sha256:3b0afcda2fa8b802ef657df80a56f21619008f61fcc14b756124028d7b7adf5c \
--hash=sha256:92ca1a786abbdf5e6a7aada62d3a4e28f441ef069c7623223add45268e53c789
@@ -1030,7 +1032,9 @@ python-dateutil==2.8.2 \
python-jose[pycryptodome]==3.3.0 \
--hash=sha256:55779b5e6ad599c6336191246e95eb2293a9ddebd555f796a65f838f07e5d78a \
--hash=sha256:9b1376b023f8b298536eedd47ae1089bcdb848f1535ab30555cd92002d78923a
- # via -r requirements/common.in
+ # via
+ # -r requirements/common.in
+ # python-jose
python3-memcached==1.51 \
--hash=sha256:7cbe5951d68eef69d948b7a7ed7decfbd101e15e7f5be007dcd1219ccc584859
# via mozci
@@ -1038,47 +1042,58 @@ pytz==2024.1 \
--hash=sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812 \
--hash=sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319
# via djangorestframework
-pyyaml==6.0 \
- --hash=sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf \
- --hash=sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293 \
- --hash=sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b \
- --hash=sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57 \
- --hash=sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b \
- --hash=sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4 \
- --hash=sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07 \
- --hash=sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba \
- --hash=sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9 \
- --hash=sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287 \
- --hash=sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513 \
- --hash=sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0 \
- --hash=sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782 \
- --hash=sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0 \
- --hash=sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92 \
- --hash=sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f \
- --hash=sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2 \
- --hash=sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc \
- --hash=sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1 \
- --hash=sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c \
- --hash=sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86 \
- --hash=sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4 \
- --hash=sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c \
- --hash=sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34 \
- --hash=sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b \
- --hash=sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d \
- --hash=sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c \
- --hash=sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb \
- --hash=sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7 \
- --hash=sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737 \
- --hash=sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3 \
- --hash=sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d \
- --hash=sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358 \
- --hash=sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53 \
- --hash=sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78 \
- --hash=sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803 \
- --hash=sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a \
- --hash=sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f \
- --hash=sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174 \
- --hash=sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5
+pyyaml==6.0.1 \
+ --hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \
+ --hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \
+ --hash=sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df \
+ --hash=sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741 \
+ --hash=sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206 \
+ --hash=sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27 \
+ --hash=sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595 \
+ --hash=sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62 \
+ --hash=sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98 \
+ --hash=sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696 \
+ --hash=sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290 \
+ --hash=sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9 \
+ --hash=sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d \
+ --hash=sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6 \
+ --hash=sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867 \
+ --hash=sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47 \
+ --hash=sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486 \
+ --hash=sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6 \
+ --hash=sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3 \
+ --hash=sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007 \
+ --hash=sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938 \
+ --hash=sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0 \
+ --hash=sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c \
+ --hash=sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735 \
+ --hash=sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d \
+ --hash=sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28 \
+ --hash=sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4 \
+ --hash=sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba \
+ --hash=sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8 \
+ --hash=sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef \
+ --hash=sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5 \
+ --hash=sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd \
+ --hash=sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3 \
+ --hash=sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0 \
+ --hash=sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515 \
+ --hash=sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c \
+ --hash=sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c \
+ --hash=sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924 \
+ --hash=sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34 \
+ --hash=sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43 \
+ --hash=sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859 \
+ --hash=sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673 \
+ --hash=sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54 \
+ --hash=sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a \
+ --hash=sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b \
+ --hash=sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab \
+ --hash=sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa \
+ --hash=sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c \
+ --hash=sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585 \
+ --hash=sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d \
+ --hash=sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f
# via
# -r requirements/common.in
# mozci
@@ -1436,7 +1451,9 @@ wcwidth==0.2.13 \
whitenoise[brotli]==6.5.0 \
--hash=sha256:15fe60546ac975b58e357ccaeb165a4ca2d0ab697e48450b8f0307ca368195a8 \
--hash=sha256:16468e9ad2189f09f4a8c635a9031cc9bb2cdbc8e5e53365407acf99f7ade9ec
- # via -r requirements/common.in
+ # via
+ # -r requirements/common.in
+ # whitenoise
yarl==1.9.4 \
--hash=sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51 \
--hash=sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce \
@@ -1579,5 +1596,6 @@ zstandard==0.22.0 \
# via mozci
# WARNING: The following packages were not pinned, but pip requires them to be
-# pinned when the requirements file includes hashes. Consider using the --allow-unsafe flag.
+# pinned when the requirements file includes hashes and the requirement is not
+# satisfied by a package already installed. Consider using the --allow-unsafe flag.
# setuptools
From 9183f8d7b821920d1456c7f96157a95f3fb7de9a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 16:29:58 +0000
Subject: [PATCH 031/128] Bump celery from 5.3.1 to 5.3.6
Bumps [celery](https://github.com/celery/celery) from 5.3.1 to 5.3.6.
- [Release notes](https://github.com/celery/celery/releases)
- [Changelog](https://github.com/celery/celery/blob/main/Changelog.rst)
- [Commits](https://github.com/celery/celery/compare/v5.3.1...v5.3.6)
---
updated-dependencies:
- dependency-name: celery
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index d929cd42a98..f139b2065b7 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -2,7 +2,7 @@
gunicorn==20.1.0
whitenoise[brotli]==6.5.0 # Used by Whitenoise to provide Brotli-compressed versions of static files.
Django==4.1.13
-celery==5.3.1 # celery needed for data ingestion
+celery==5.3.6 # celery needed for data ingestion
cached-property==1.5.2 # needed for kombu with --require-hashes
simplejson==3.19.1 # import simplejson
newrelic==8.8.0
diff --git a/requirements/common.txt b/requirements/common.txt
index 8b05eb124b0..f2b24004781 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -226,9 +226,9 @@ cachy==0.3.0 \
--hash=sha256:186581f4ceb42a0bbe040c407da73c14092379b1e4c0e327fdb72ae4a9b269b1 \
--hash=sha256:338ca09c8860e76b275aff52374330efedc4d5a5e45dc1c5b539c1ead0786fe7
# via mozci
-celery==5.3.1 \
- --hash=sha256:27f8f3f3b58de6e0ab4f174791383bbd7445aff0471a43e99cfd77727940753f \
- --hash=sha256:f84d1c21a1520c116c2b7d26593926581191435a03aa74b77c941b93ca1c6210
+celery==5.3.6 \
+ --hash=sha256:870cc71d737c0200c397290d730344cc991d13a057534353d124c9380267aab9 \
+ --hash=sha256:9da4ea0118d232ce97dff5ed4974587fb1c0ff5c10042eb15278487cdd27d1af
# via -r requirements/common.in
certifi==2023.5.7 \
--hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \
From 97a4c8ec0d98981d471d18443d54c59afa92d7c4 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 16:24:03 +0000
Subject: [PATCH 032/128] Bump shellcheck-py from 0.9.0.5 to 0.9.0.6
Bumps [shellcheck-py](https://github.com/ryanrhee/shellcheck-py) from 0.9.0.5 to 0.9.0.6.
- [Commits](https://github.com/ryanrhee/shellcheck-py/compare/v0.9.0.5...v0.9.0.6)
---
updated-dependencies:
- dependency-name: shellcheck-py
dependency-type: direct:development
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 10 +++++-----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index beaa49dc529..06e2612e143 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -17,7 +17,7 @@ pytest-watch==4.2.0
pytest-django==4.5.2
pytest==7.3.2
black==23.3.0
-shellcheck-py==0.9.0.5
+shellcheck-py==0.9.0.6
# To test async code
pytest-asyncio==0.21.0 # required to pass test_new_job_transformation
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 4e9496f3583..0446d6c7381 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -449,11 +449,11 @@ selenium==4.17.2 \
--hash=sha256:5aee79026c07985dc1b0c909f34084aa996dfe5b307602de9016d7a621a473f2 \
--hash=sha256:d43d6972e516855fb242ef9ce4ce759057b115070e702e7b1c1032fe7b38d87b
# via pypom
-shellcheck-py==0.9.0.5 \
- --hash=sha256:50b2057fac7227fd83614a9bf9d123042e53e03d92f2c7f1778448a8937f07a4 \
- --hash=sha256:65ddc19a1ae4249802a663682834ed452f9e75615d58c3ce6b3f1b0d2a484f32 \
- --hash=sha256:98d9668f72afeb65c7a8e60f02202b00d64f2de9e9b103dfb5d0067ded391ef3 \
- --hash=sha256:9f50a7354f355753f365668e79aa3d410cb6f4d9358e4c5d8464018cf2b4863a
+shellcheck-py==0.9.0.6 \
+ --hash=sha256:38d48a4e2279f5deac374574e7625cd53b7f615301f36b1b1fffd22105dc066d \
+ --hash=sha256:730235c4f92657884f8b343d5426e4dc28e9a6ba9ad54d469cd038e340ea5be0 \
+ --hash=sha256:d1d0c285e2c094813659e0920559a2892da598c1176da59cb4eb9e2f505e5ee8 \
+ --hash=sha256:f83a0ee1e9762f787ab52e8a906e553b9583586c44e3f9730b6e635f296a69e8
# via -r requirements/dev.in
six==1.16.0 \
--hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
From df09b4b97fae0c0cc505761b2f50e8da53fe582f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 16:27:57 +0000
Subject: [PATCH 033/128] Bump jinja2 from 3.1.2 to 3.1.3
Bumps [jinja2](https://github.com/pallets/jinja) from 3.1.2 to 3.1.3.
- [Release notes](https://github.com/pallets/jinja/releases)
- [Changelog](https://github.com/pallets/jinja/blob/main/CHANGES.rst)
- [Commits](https://github.com/pallets/jinja/compare/3.1.2...3.1.3)
---
updated-dependencies:
- dependency-name: jinja2
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index f139b2065b7..d1f079f77bd 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -43,7 +43,7 @@ dockerflow==2024.1.0
moz-measure-noise==2.60.1
# Used in the intermittents commenter
-jinja2==3.1.2
+jinja2==3.1.3
# Client to publish runtime statistics to statsd
statsd==4.0.1
diff --git a/requirements/common.txt b/requirements/common.txt
index f2b24004781..84cdfcd36c4 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -500,9 +500,9 @@ idna==3.6 \
# via
# requests
# yarl
-jinja2==3.1.2 \
- --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \
- --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61
+jinja2==3.1.3 \
+ --hash=sha256:7d6d50dd97d52cbc355597bd845fabfbac3f551e1f99619e39a35ce8c370b5fa \
+ --hash=sha256:ac8bd6544d4bb2c9792bf3a159e80bba8fda7f07e81bc3aed565432d5925ba90
# via -r requirements/common.in
jmespath==1.0.1 \
--hash=sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980 \
From 4c0db54e437fc4b57da4083f268d80c212e6d3c2 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 16:37:53 +0000
Subject: [PATCH 034/128] Bump json-e from 4.5.2 to 4.7.0
Bumps [json-e](https://github.com/json-e/json-e) from 4.5.2 to 4.7.0.
- [Changelog](https://github.com/json-e/json-e/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/json-e/json-e/compare/v4.5.2...v4.7.0)
---
updated-dependencies:
- dependency-name: json-e
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index d1f079f77bd..d0d909998bb 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -30,7 +30,7 @@ python-jose[pycryptodome]==3.3.0 # from jose import jwt
furl==2.1.3 # Imported as furl
first==2.0.2 # Imported as first
-json-e==4.5.2 # import jsone
+json-e==4.7.0 # import jsone
django-cache-memoize==0.1.10 # Imported as cache_memoize
# Required for Push Health
diff --git a/requirements/common.txt b/requirements/common.txt
index 84cdfcd36c4..c423ab2a53e 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -510,9 +510,9 @@ jmespath==1.0.1 \
# via
# boto3
# botocore
-json-e==4.5.2 \
- --hash=sha256:0d1203645a5753dec2da1ceab279f82169023948eff858b87968117e8a592e10 \
- --hash=sha256:b1c82e79ec232b8a86393488b39aa086f8c098cf67fa190ac03517daf0e51aed
+json-e==4.7.0 \
+ --hash=sha256:c12b00552111ab2c43e1a87111a7113a73aee903709df96d7a778f45dc0a7ea8 \
+ --hash=sha256:e5df7be84bf80d4e9bb8217580b50602f59fa7df6af0ba5c5473a7388afb85ae
# via
# -r requirements/common.in
# mozci
From 1bd10ebc2f1b233c44da422f80415646077cdeeb Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 16:39:53 +0000
Subject: [PATCH 035/128] Bump jsonschema from 4.17.3 to 4.21.1
Bumps [jsonschema](https://github.com/python-jsonschema/jsonschema) from 4.17.3 to 4.21.1.
- [Release notes](https://github.com/python-jsonschema/jsonschema/releases)
- [Changelog](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/python-jsonschema/jsonschema/compare/v4.17.3...v4.21.1)
---
updated-dependencies:
- dependency-name: jsonschema
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 154 ++++++++++++++++++++++++++++++----------
2 files changed, 118 insertions(+), 38 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index d0d909998bb..75e31634001 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -11,7 +11,7 @@ certifi==2023.5.7
mysqlclient==2.1.1 # Required by Django
psycopg2-binary==2.9.6
-jsonschema==4.17.3 # import jsonschema
+jsonschema==4.21.1 # import jsonschema
djangorestframework==3.14.0 # Imported as rest_framework
django-cors-headers==4.1.0 # Listed as 3rd party app on settings.py
mozlog==8.0.0
diff --git a/requirements/common.txt b/requirements/common.txt
index c423ab2a53e..d07ddfd1b5b 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -115,6 +115,7 @@ attrs==23.2.0 \
# via
# aiohttp
# jsonschema
+ # referencing
billiard==4.2.0 \
--hash=sha256:07aa978b308f334ff8282bd4a746e681b3513db5c9a514cbdd810cbbdc19714d \
--hash=sha256:9a3c3184cb275aa17a732f93f65b20c525d3d9f253722d26a82194803ade5a2c
@@ -516,10 +517,14 @@ json-e==4.7.0 \
# via
# -r requirements/common.in
# mozci
-jsonschema==4.17.3 \
- --hash=sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d \
- --hash=sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6
+jsonschema==4.21.1 \
+ --hash=sha256:7996507afae316306f9e2290407761157c6f78002dcf7419acb99822143d1c6f \
+ --hash=sha256:85727c00279f5fa6bedbe6238d2aa6403bedd8b4864ab11207d07df3cc1b2ee5
# via -r requirements/common.in
+jsonschema-specifications==2023.12.1 \
+ --hash=sha256:48a76787b3e70f5ed53f1160d2b81f586e4ca6d1548c5de7085d1682674764cc \
+ --hash=sha256:87e4fdf3a94858b8a2ba2778d9ba57d8a9cafca7c7489c46ba0d30a8bc6a9c3c
+ # via jsonschema
kombu==5.3.5 \
--hash=sha256:0eac1bbb464afe6fb0924b21bf79460416d25d8abc52546d4f16cad94f789488 \
--hash=sha256:30e470f1a6b49c70dc6f6d13c3e4cc4e178aa6c469ceb6bcd55645385fc84b93
@@ -986,40 +991,6 @@ pyflakes==2.4.0 \
--hash=sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c \
--hash=sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e
# via flake8
-pyrsistent==0.20.0 \
- --hash=sha256:0724c506cd8b63c69c7f883cc233aac948c1ea946ea95996ad8b1380c25e1d3f \
- --hash=sha256:09848306523a3aba463c4b49493a760e7a6ca52e4826aa100ee99d8d39b7ad1e \
- --hash=sha256:0f3b1bcaa1f0629c978b355a7c37acd58907390149b7311b5db1b37648eb6958 \
- --hash=sha256:21cc459636983764e692b9eba7144cdd54fdec23ccdb1e8ba392a63666c60c34 \
- --hash=sha256:2e14c95c16211d166f59c6611533d0dacce2e25de0f76e4c140fde250997b3ca \
- --hash=sha256:2e2c116cc804d9b09ce9814d17df5edf1df0c624aba3b43bc1ad90411487036d \
- --hash=sha256:4021a7f963d88ccd15b523787d18ed5e5269ce57aa4037146a2377ff607ae87d \
- --hash=sha256:4c48f78f62ab596c679086084d0dd13254ae4f3d6c72a83ffdf5ebdef8f265a4 \
- --hash=sha256:4f5c2d012671b7391803263419e31b5c7c21e7c95c8760d7fc35602353dee714 \
- --hash=sha256:58b8f6366e152092194ae68fefe18b9f0b4f89227dfd86a07770c3d86097aebf \
- --hash=sha256:59a89bccd615551391f3237e00006a26bcf98a4d18623a19909a2c48b8e986ee \
- --hash=sha256:5cdd7ef1ea7a491ae70d826b6cc64868de09a1d5ff9ef8d574250d0940e275b8 \
- --hash=sha256:6288b3fa6622ad8a91e6eb759cfc48ff3089e7c17fb1d4c59a919769314af224 \
- --hash=sha256:6d270ec9dd33cdb13f4d62c95c1a5a50e6b7cdd86302b494217137f760495b9d \
- --hash=sha256:79ed12ba79935adaac1664fd7e0e585a22caa539dfc9b7c7c6d5ebf91fb89054 \
- --hash=sha256:7d29c23bdf6e5438c755b941cef867ec2a4a172ceb9f50553b6ed70d50dfd656 \
- --hash=sha256:8441cf9616d642c475684d6cf2520dd24812e996ba9af15e606df5f6fd9d04a7 \
- --hash=sha256:881bbea27bbd32d37eb24dd320a5e745a2a5b092a17f6debc1349252fac85423 \
- --hash=sha256:8c3aba3e01235221e5b229a6c05f585f344734bd1ad42a8ac51493d74722bbce \
- --hash=sha256:a14798c3005ec892bbada26485c2eea3b54109cb2533713e355c806891f63c5e \
- --hash=sha256:b14decb628fac50db5e02ee5a35a9c0772d20277824cfe845c8a8b717c15daa3 \
- --hash=sha256:b318ca24db0f0518630e8b6f3831e9cba78f099ed5c1d65ffe3e023003043ba0 \
- --hash=sha256:c1beb78af5423b879edaf23c5591ff292cf7c33979734c99aa66d5914ead880f \
- --hash=sha256:c55acc4733aad6560a7f5f818466631f07efc001fd023f34a6c203f8b6df0f0b \
- --hash=sha256:ca52d1ceae015859d16aded12584c59eb3825f7b50c6cfd621d4231a6cc624ce \
- --hash=sha256:cae40a9e3ce178415040a0383f00e8d68b569e97f31928a3a8ad37e3fde6df6a \
- --hash=sha256:e78d0c7c1e99a4a45c99143900ea0546025e41bb59ebc10182e947cf1ece9174 \
- --hash=sha256:ef3992833fbd686ee783590639f4b8343a57f1f75de8633749d984dc0eb16c86 \
- --hash=sha256:f058a615031eea4ef94ead6456f5ec2026c19fb5bd6bfe86e9665c4158cf802f \
- --hash=sha256:f5ac696f02b3fc01a710427585c855f65cd9c640e14f52abe52020722bb4906b \
- --hash=sha256:f920385a11207dc372a028b3f1e1038bb244b3ec38d448e6d8e43c6b3ba20e98 \
- --hash=sha256:fed2c3216a605dc9a6ea50c7e84c82906e3684c4e80d2908208f662a6cbf9022
- # via jsonschema
python-dateutil==2.8.2 \
--hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
--hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9
@@ -1195,12 +1166,121 @@ redis==4.6.0 \
# via
# django-redis
# mozci
+referencing==0.33.0 \
+ --hash=sha256:39240f2ecc770258f28b642dd47fd74bc8b02484de54e1882b74b35ebd779bd5 \
+ --hash=sha256:c775fedf74bc0f9189c2a3be1c12fd03e8c23f4d371dce795df44e06c5b412f7
+ # via
+ # jsonschema
+ # jsonschema-specifications
requests==2.31.0 \
--hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \
--hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1
# via
# mozci
# taskcluster
+rpds-py==0.18.0 \
+ --hash=sha256:01e36a39af54a30f28b73096dd39b6802eddd04c90dbe161c1b8dbe22353189f \
+ --hash=sha256:044a3e61a7c2dafacae99d1e722cc2d4c05280790ec5a05031b3876809d89a5c \
+ --hash=sha256:08231ac30a842bd04daabc4d71fddd7e6d26189406d5a69535638e4dcb88fe76 \
+ --hash=sha256:08f9ad53c3f31dfb4baa00da22f1e862900f45908383c062c27628754af2e88e \
+ --hash=sha256:0ab39c1ba9023914297dd88ec3b3b3c3f33671baeb6acf82ad7ce883f6e8e157 \
+ --hash=sha256:0af039631b6de0397ab2ba16eaf2872e9f8fca391b44d3d8cac317860a700a3f \
+ --hash=sha256:0b8612cd233543a3781bc659c731b9d607de65890085098986dfd573fc2befe5 \
+ --hash=sha256:11a8c85ef4a07a7638180bf04fe189d12757c696eb41f310d2426895356dcf05 \
+ --hash=sha256:1374f4129f9bcca53a1bba0bb86bf78325a0374577cf7e9e4cd046b1e6f20e24 \
+ --hash=sha256:1d4acf42190d449d5e89654d5c1ed3a4f17925eec71f05e2a41414689cda02d1 \
+ --hash=sha256:1d9a5be316c15ffb2b3c405c4ff14448c36b4435be062a7f578ccd8b01f0c4d8 \
+ --hash=sha256:1df3659d26f539ac74fb3b0c481cdf9d725386e3552c6fa2974f4d33d78e544b \
+ --hash=sha256:22806714311a69fd0af9b35b7be97c18a0fc2826e6827dbb3a8c94eac6cf7eeb \
+ --hash=sha256:2644e47de560eb7bd55c20fc59f6daa04682655c58d08185a9b95c1970fa1e07 \
+ --hash=sha256:2e6d75ab12b0bbab7215e5d40f1e5b738aa539598db27ef83b2ec46747df90e1 \
+ --hash=sha256:30f43887bbae0d49113cbaab729a112251a940e9b274536613097ab8b4899cf6 \
+ --hash=sha256:34b18ba135c687f4dac449aa5157d36e2cbb7c03cbea4ddbd88604e076aa836e \
+ --hash=sha256:36b3ee798c58ace201289024b52788161e1ea133e4ac93fba7d49da5fec0ef9e \
+ --hash=sha256:39514da80f971362f9267c600b6d459bfbbc549cffc2cef8e47474fddc9b45b1 \
+ --hash=sha256:39f5441553f1c2aed4de4377178ad8ff8f9d733723d6c66d983d75341de265ab \
+ --hash=sha256:3a96e0c6a41dcdba3a0a581bbf6c44bb863f27c541547fb4b9711fd8cf0ffad4 \
+ --hash=sha256:3f26b5bd1079acdb0c7a5645e350fe54d16b17bfc5e71f371c449383d3342e17 \
+ --hash=sha256:41ef53e7c58aa4ef281da975f62c258950f54b76ec8e45941e93a3d1d8580594 \
+ --hash=sha256:42821446ee7a76f5d9f71f9e33a4fb2ffd724bb3e7f93386150b61a43115788d \
+ --hash=sha256:43fbac5f22e25bee1d482c97474f930a353542855f05c1161fd804c9dc74a09d \
+ --hash=sha256:4457a94da0d5c53dc4b3e4de1158bdab077db23c53232f37a3cb7afdb053a4e3 \
+ --hash=sha256:465a3eb5659338cf2a9243e50ad9b2296fa15061736d6e26240e713522b6235c \
+ --hash=sha256:482103aed1dfe2f3b71a58eff35ba105289b8d862551ea576bd15479aba01f66 \
+ --hash=sha256:4832d7d380477521a8c1644bbab6588dfedea5e30a7d967b5fb75977c45fd77f \
+ --hash=sha256:4901165d170a5fde6f589acb90a6b33629ad1ec976d4529e769c6f3d885e3e80 \
+ --hash=sha256:5307def11a35f5ae4581a0b658b0af8178c65c530e94893345bebf41cc139d33 \
+ --hash=sha256:5417558f6887e9b6b65b4527232553c139b57ec42c64570569b155262ac0754f \
+ --hash=sha256:56a737287efecafc16f6d067c2ea0117abadcd078d58721f967952db329a3e5c \
+ --hash=sha256:586f8204935b9ec884500498ccc91aa869fc652c40c093bd9e1471fbcc25c022 \
+ --hash=sha256:5b4e7d8d6c9b2e8ee2d55c90b59c707ca59bc30058269b3db7b1f8df5763557e \
+ --hash=sha256:5ddcba87675b6d509139d1b521e0c8250e967e63b5909a7e8f8944d0f90ff36f \
+ --hash=sha256:618a3d6cae6ef8ec88bb76dd80b83cfe415ad4f1d942ca2a903bf6b6ff97a2da \
+ --hash=sha256:635dc434ff724b178cb192c70016cc0ad25a275228f749ee0daf0eddbc8183b1 \
+ --hash=sha256:661d25cbffaf8cc42e971dd570d87cb29a665f49f4abe1f9e76be9a5182c4688 \
+ --hash=sha256:66e6a3af5a75363d2c9a48b07cb27c4ea542938b1a2e93b15a503cdfa8490795 \
+ --hash=sha256:67071a6171e92b6da534b8ae326505f7c18022c6f19072a81dcf40db2638767c \
+ --hash=sha256:685537e07897f173abcf67258bee3c05c374fa6fff89d4c7e42fb391b0605e98 \
+ --hash=sha256:69e64831e22a6b377772e7fb337533c365085b31619005802a79242fee620bc1 \
+ --hash=sha256:6b0817e34942b2ca527b0e9298373e7cc75f429e8da2055607f4931fded23e20 \
+ --hash=sha256:6c81e5f372cd0dc5dc4809553d34f832f60a46034a5f187756d9b90586c2c307 \
+ --hash=sha256:6d7faa6f14017c0b1e69f5e2c357b998731ea75a442ab3841c0dbbbfe902d2c4 \
+ --hash=sha256:6ef0befbb5d79cf32d0266f5cff01545602344eda89480e1dd88aca964260b18 \
+ --hash=sha256:6ef687afab047554a2d366e112dd187b62d261d49eb79b77e386f94644363294 \
+ --hash=sha256:7223a2a5fe0d217e60a60cdae28d6949140dde9c3bcc714063c5b463065e3d66 \
+ --hash=sha256:77f195baa60a54ef9d2de16fbbfd3ff8b04edc0c0140a761b56c267ac11aa467 \
+ --hash=sha256:793968759cd0d96cac1e367afd70c235867831983f876a53389ad869b043c948 \
+ --hash=sha256:7bd339195d84439cbe5771546fe8a4e8a7a045417d8f9de9a368c434e42a721e \
+ --hash=sha256:7cd863afe7336c62ec78d7d1349a2f34c007a3cc6c2369d667c65aeec412a5b1 \
+ --hash=sha256:7f2facbd386dd60cbbf1a794181e6aa0bd429bd78bfdf775436020172e2a23f0 \
+ --hash=sha256:84ffab12db93b5f6bad84c712c92060a2d321b35c3c9960b43d08d0f639d60d7 \
+ --hash=sha256:8c8370641f1a7f0e0669ddccca22f1da893cef7628396431eb445d46d893e5cd \
+ --hash=sha256:8db715ebe3bb7d86d77ac1826f7d67ec11a70dbd2376b7cc214199360517b641 \
+ --hash=sha256:8e8916ae4c720529e18afa0b879473049e95949bf97042e938530e072fde061d \
+ --hash=sha256:8f03bccbd8586e9dd37219bce4d4e0d3ab492e6b3b533e973fa08a112cb2ffc9 \
+ --hash=sha256:8f2fc11e8fe034ee3c34d316d0ad8808f45bc3b9ce5857ff29d513f3ff2923a1 \
+ --hash=sha256:923d39efa3cfb7279a0327e337a7958bff00cc447fd07a25cddb0a1cc9a6d2da \
+ --hash=sha256:93df1de2f7f7239dc9cc5a4a12408ee1598725036bd2dedadc14d94525192fc3 \
+ --hash=sha256:998e33ad22dc7ec7e030b3df701c43630b5bc0d8fbc2267653577e3fec279afa \
+ --hash=sha256:99f70b740dc04d09e6b2699b675874367885217a2e9f782bdf5395632ac663b7 \
+ --hash=sha256:9a00312dea9310d4cb7dbd7787e722d2e86a95c2db92fbd7d0155f97127bcb40 \
+ --hash=sha256:9d54553c1136b50fd12cc17e5b11ad07374c316df307e4cfd6441bea5fb68496 \
+ --hash=sha256:9dbbeb27f4e70bfd9eec1be5477517365afe05a9b2c441a0b21929ee61048124 \
+ --hash=sha256:a1ce3ba137ed54f83e56fb983a5859a27d43a40188ba798993812fed73c70836 \
+ --hash=sha256:a34d557a42aa28bd5c48a023c570219ba2593bcbbb8dc1b98d8cf5d529ab1434 \
+ --hash=sha256:a5f446dd5055667aabaee78487f2b5ab72e244f9bc0b2ffebfeec79051679984 \
+ --hash=sha256:ad36cfb355e24f1bd37cac88c112cd7730873f20fb0bdaf8ba59eedf8216079f \
+ --hash=sha256:aec493917dd45e3c69d00a8874e7cbed844efd935595ef78a0f25f14312e33c6 \
+ --hash=sha256:b316144e85316da2723f9d8dc75bada12fa58489a527091fa1d5a612643d1a0e \
+ --hash=sha256:b34ae4636dfc4e76a438ab826a0d1eed2589ca7d9a1b2d5bb546978ac6485461 \
+ --hash=sha256:b34b7aa8b261c1dbf7720b5d6f01f38243e9b9daf7e6b8bc1fd4657000062f2c \
+ --hash=sha256:bc362ee4e314870a70f4ae88772d72d877246537d9f8cb8f7eacf10884862432 \
+ --hash=sha256:bed88b9a458e354014d662d47e7a5baafd7ff81c780fd91584a10d6ec842cb73 \
+ --hash=sha256:c0013fe6b46aa496a6749c77e00a3eb07952832ad6166bd481c74bda0dcb6d58 \
+ --hash=sha256:c0b5dcf9193625afd8ecc92312d6ed78781c46ecbf39af9ad4681fc9f464af88 \
+ --hash=sha256:c4325ff0442a12113a6379af66978c3fe562f846763287ef66bdc1d57925d337 \
+ --hash=sha256:c463ed05f9dfb9baebef68048aed8dcdc94411e4bf3d33a39ba97e271624f8f7 \
+ --hash=sha256:c8362467a0fdeccd47935f22c256bec5e6abe543bf0d66e3d3d57a8fb5731863 \
+ --hash=sha256:cd5bf1af8efe569654bbef5a3e0a56eca45f87cfcffab31dd8dde70da5982475 \
+ --hash=sha256:cf1ea2e34868f6fbf070e1af291c8180480310173de0b0c43fc38a02929fc0e3 \
+ --hash=sha256:d62dec4976954a23d7f91f2f4530852b0c7608116c257833922a896101336c51 \
+ --hash=sha256:d68c93e381010662ab873fea609bf6c0f428b6d0bb00f2c6939782e0818d37bf \
+ --hash=sha256:d7c36232a90d4755b720fbd76739d8891732b18cf240a9c645d75f00639a9024 \
+ --hash=sha256:dd18772815d5f008fa03d2b9a681ae38d5ae9f0e599f7dda233c439fcaa00d40 \
+ --hash=sha256:ddc2f4dfd396c7bfa18e6ce371cba60e4cf9d2e5cdb71376aa2da264605b60b9 \
+ --hash=sha256:e003b002ec72c8d5a3e3da2989c7d6065b47d9eaa70cd8808b5384fbb970f4ec \
+ --hash=sha256:e32a92116d4f2a80b629778280103d2a510a5b3f6314ceccd6e38006b5e92dcb \
+ --hash=sha256:e4461d0f003a0aa9be2bdd1b798a041f177189c1a0f7619fe8c95ad08d9a45d7 \
+ --hash=sha256:e541ec6f2ec456934fd279a3120f856cd0aedd209fc3852eca563f81738f6861 \
+ --hash=sha256:e546e768d08ad55b20b11dbb78a745151acbd938f8f00d0cfbabe8b0199b9880 \
+ --hash=sha256:ea7d4a99f3b38c37eac212dbd6ec42b7a5ec51e2c74b5d3223e43c811609e65f \
+ --hash=sha256:ed4eb745efbff0a8e9587d22a84be94a5eb7d2d99c02dacf7bd0911713ed14dd \
+ --hash=sha256:f8a2f084546cc59ea99fda8e070be2fd140c3092dc11524a71aa8f0f3d5a55ca \
+ --hash=sha256:fcb25daa9219b4cf3a0ab24b0eb9a5cc8949ed4dc72acb8fa16b7e1681aa3c58 \
+ --hash=sha256:fdea4952db2793c4ad0bdccd27c1d8fdd1423a92f04598bc39425bcc2b8ee46e
+ # via
+ # jsonschema
+ # referencing
rsa==4.9 \
--hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \
--hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21
From 5eeee89504cc62e60d25720b56319f9e4ecf82ce Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 21:47:30 +0000
Subject: [PATCH 036/128] Bump pytest-testmon from 2.0.9 to 2.1.1
Bumps [pytest-testmon]() from 2.0.9 to 2.1.1.
---
updated-dependencies:
- dependency-name: pytest-testmon
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 06e2612e143..d148fb1dfb8 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -10,7 +10,7 @@ PyPOM==2.2.4
pre-commit==3.3.3
# for test driven development
-pytest-testmon==2.0.9
+pytest-testmon==2.1.1
pytest-watch==4.2.0
# Required by django-extension's runserver_plus command.
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 0446d6c7381..c04a4162e1d 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -368,9 +368,9 @@ pytest-django==4.5.2 \
pytest-freezegun @ https://github.com/hugovk/pytest-freezegun/archive/03d7107a877e8f07617f931a379f567d89060085.zip \
--hash=sha256:60cf7c6592c612d3fbcb12c77c96b97f011bd313a238f07c31505b9d50f855a0
# via -r requirements/dev.in
-pytest-testmon==2.0.9 \
- --hash=sha256:5011e093c5d897a48e4f5769678bf75db57576827bf1112a511315b24a8e4e4d \
- --hash=sha256:8ee537a279cdd02ab999a395010be353e30cd59c167b85f46a901aa50168648f
+pytest-testmon==2.1.1 \
+ --hash=sha256:8271ca47bc8c80760c4fc7fd7895ea786b111bbb31f13eeea879a6fd11fe2226 \
+ --hash=sha256:8ebe2c3de42d99306ee54cd4536fed0fc48346a954420da904b18e8d59b5da98
# via -r requirements/dev.in
pytest-watch==4.2.0 \
--hash=sha256:06136f03d5b361718b8d0d234042f7b2f203910d8568f63df2f866b547b3d4b9
From 54b25da5e9bbe7961031757968f1ec95e75e62bc Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 21:52:19 +0000
Subject: [PATCH 037/128] Bump mozci[cache] from 2.3.2 to 2.4.0
Bumps [mozci[cache]]() from 2.3.2 to 2.4.0.
---
updated-dependencies:
- dependency-name: mozci[cache]
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 75e31634001..ccb7e828731 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -34,7 +34,7 @@ json-e==4.7.0 # import jsone
django-cache-memoize==0.1.10 # Imported as cache_memoize
# Required for Push Health
-mozci[cache]==2.3.2
+mozci[cache]==2.4.0
# Dockerflow/CloudOps APIs
dockerflow==2024.1.0
diff --git a/requirements/common.txt b/requirements/common.txt
index d07ddfd1b5b..4ffaa1863b2 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -694,9 +694,9 @@ moz-measure-noise==2.60.1 \
--hash=sha256:d147b9af6a1ccbe94f951152596fbef3959dddf4284fb47c9a4ca3211f1da06a \
--hash=sha256:f8811a904fab113ba195c5eed84a448283a95c46751409a70fc168634c7d9613
# via -r requirements/common.in
-mozci[cache]==2.3.2 \
- --hash=sha256:7f9256f400792c46254bd5422c214f6715e824015696c1ab7ffce5457628c646 \
- --hash=sha256:c7126a7bd044e9275cf0f4801ff18561d2420eca436e62bdd920601c1d3b4085
+mozci[cache]==2.4.0 \
+ --hash=sha256:1302ce8b08f53e608b654e54313b1f36f978dafad9a913a58a3331139b2d9225 \
+ --hash=sha256:b1ee163b31e1696bee7f2b203f508fcd4a3869c1158969615f9bdab2e1a57a9b
# via
# -r requirements/common.in
# mozci
From 313a51d878f9df751a0a8b3ff1bdcef7cbf2d87b Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 21:52:38 +0000
Subject: [PATCH 038/128] Bump gunicorn from 20.1.0 to 21.2.0
Bumps [gunicorn](https://github.com/benoitc/gunicorn) from 20.1.0 to 21.2.0.
- [Release notes](https://github.com/benoitc/gunicorn/releases)
- [Commits](https://github.com/benoitc/gunicorn/compare/20.1.0...21.2.0)
---
updated-dependencies:
- dependency-name: gunicorn
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 15 +++++++--------
2 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index ccb7e828731..d4afb7a7423 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -1,5 +1,5 @@
# Packages that are shared between deployment and dev environments.
-gunicorn==20.1.0
+gunicorn==21.2.0
whitenoise[brotli]==6.5.0 # Used by Whitenoise to provide Brotli-compressed versions of static files.
Django==4.1.13
celery==5.3.6 # celery needed for data ingestion
diff --git a/requirements/common.txt b/requirements/common.txt
index 4ffaa1863b2..7aef84cd1fd 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -491,9 +491,9 @@ furl==2.1.3 \
--hash=sha256:5a6188fe2666c484a12159c18be97a1977a71d632ef5bb867ef15f54af39cc4e \
--hash=sha256:9ab425062c4217f9802508e45feb4a83e54324273ac4b202f1850363309666c0
# via -r requirements/common.in
-gunicorn==20.1.0 \
- --hash=sha256:9dcc4547dbb1cb284accfb15ab5667a0e5d1881cc443e0677b4882a4067a807e \
- --hash=sha256:e0a968b5ba15f8a328fdfd7ab1fcb5af4470c28aaf7e55df02a99bc13138e6e8
+gunicorn==21.2.0 \
+ --hash=sha256:3213aa5e8c24949e792bcacfc176fef362e7aac80b76c56f6b5122bf350722f0 \
+ --hash=sha256:88ec8bff1d634f98e61b9f65bc4bf3cd918a90806c6f5c48bc5603849ec81033
# via -r requirements/common.in
idna==3.6 \
--hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \
@@ -875,6 +875,10 @@ orderedmultidict==1.0.1 \
--hash=sha256:04070bbb5e87291cc9bfa51df413677faf2141c73c61d2a5f7b26bea3cd882ad \
--hash=sha256:43c839a17ee3cdd62234c47deca1a8508a3f2ca1d0678a3bf791c87cf84adbf3
# via furl
+packaging==23.2 \
+ --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \
+ --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7
+ # via gunicorn
prompt-toolkit==3.0.43 \
--hash=sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d \
--hash=sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6
@@ -1674,8 +1678,3 @@ zstandard==0.22.0 \
--hash=sha256:f9b2cde1cd1b2a10246dbc143ba49d942d14fb3d2b4bccf4618d475c65464912 \
--hash=sha256:fe3390c538f12437b859d815040763abc728955a52ca6ff9c5d4ac707c4ad98e
# via mozci
-
-# WARNING: The following packages were not pinned, but pip requires them to be
-# pinned when the requirements file includes hashes and the requirement is not
-# satisfied by a package already installed. Consider using the --allow-unsafe flag.
-# setuptools
From 89c0aac511a76b1e37329edf05031df015af7d8e Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 22:05:37 +0000
Subject: [PATCH 039/128] Bump pytest-asyncio from 0.21.0 to 0.23.5
Bumps [pytest-asyncio](https://github.com/pytest-dev/pytest-asyncio) from 0.21.0 to 0.23.5.
- [Release notes](https://github.com/pytest-dev/pytest-asyncio/releases)
- [Commits](https://github.com/pytest-dev/pytest-asyncio/compare/v0.21.0...v0.23.5)
---
updated-dependencies:
- dependency-name: pytest-asyncio
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index d148fb1dfb8..b2d9b0681fa 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -20,7 +20,7 @@ black==23.3.0
shellcheck-py==0.9.0.6
# To test async code
-pytest-asyncio==0.21.0 # required to pass test_new_job_transformation
+pytest-asyncio==0.23.5 # required to pass test_new_job_transformation
# To test code that's making system time calls
# pytest-freezegun is not compatible with recent Django versions
diff --git a/requirements/dev.txt b/requirements/dev.txt
index c04a4162e1d..0e3584d4a1c 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -353,9 +353,9 @@ pytest==7.3.2 \
# pytest-freezegun
# pytest-testmon
# pytest-watch
-pytest-asyncio==0.21.0 \
- --hash=sha256:2b38a496aef56f56b0e87557ec313e11e1ab9276fc3863f6a7be0f1d0e415e1b \
- --hash=sha256:f2b3366b7cd501a4056858bd39349d5af19742aed2d81660b7998b6341c7eb9c
+pytest-asyncio==0.23.5 \
+ --hash=sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675 \
+ --hash=sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac
# via -r requirements/dev.in
pytest-cov==4.1.0 \
--hash=sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6 \
From 7c895019ff5a48314301f9f4c4f3a710f579e18c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 21:59:54 +0000
Subject: [PATCH 040/128] Bump taskcluster from 53.2.1 to 60.4.2
Bumps [taskcluster](https://github.com/taskcluster/taskcluster) from 53.2.1 to 60.4.2.
- [Release notes](https://github.com/taskcluster/taskcluster/releases)
- [Changelog](https://github.com/taskcluster/taskcluster/blob/main/CHANGELOG.md)
- [Commits](https://github.com/taskcluster/taskcluster/compare/v53.2.1...v60.4.2)
---
updated-dependencies:
- dependency-name: taskcluster
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index d4afb7a7423..609ce64ffd2 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -25,7 +25,7 @@ python-dateutil==2.8.2
django-filter==23.2 # Listed in DEFAULT_FILTER_BACKENDS on settings.py
django-redis==5.3.0 # Listed in CACHES on settings.py
-taskcluster==53.2.1 # import taskcluster
+taskcluster==60.4.2 # import taskcluster
python-jose[pycryptodome]==3.3.0 # from jose import jwt
furl==2.1.3 # Imported as furl
diff --git a/requirements/common.txt b/requirements/common.txt
index 7aef84cd1fd..591f19b8e41 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -1432,9 +1432,9 @@ tabulate==0.9.0 \
--hash=sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c \
--hash=sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f
# via mozci
-taskcluster==53.2.1 \
- --hash=sha256:3a3f8559ca805e5a53674d501f0a96934cbd169491fad2dbca5175ec988f94e6 \
- --hash=sha256:538ff339e885dc11b6cf28d43792cdccea3313626cb20d1d9a8ee3d260d315f6
+taskcluster==60.4.2 \
+ --hash=sha256:5217073dd3c6642d976ab4a3f5861308b18bba533cd246d378a095d85e39597c \
+ --hash=sha256:9cacf06e790e81535a019c0623e5cdf284a7ecefce7f02de8f2802cdad161077
# via
# -r requirements/common.in
# mozci
From 8e9e097418901cde6b9219ed8fb217534d21437a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 16:55:58 +0000
Subject: [PATCH 041/128] Bump django-debug-toolbar from 4.1.0 to 4.3.0
Bumps [django-debug-toolbar](https://github.com/jazzband/django-debug-toolbar) from 4.1.0 to 4.3.0.
- [Release notes](https://github.com/jazzband/django-debug-toolbar/releases)
- [Changelog](https://github.com/jazzband/django-debug-toolbar/blob/main/docs/changes.rst)
- [Commits](https://github.com/jazzband/django-debug-toolbar/compare/4.1...4.3)
---
updated-dependencies:
- dependency-name: django-debug-toolbar
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index b2d9b0681fa..13611d61cc6 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -1,6 +1,6 @@
# Dependencies needed only for development/testing.
pytest-cov==4.1.0
-django-debug-toolbar==4.1.0
+django-debug-toolbar==4.3.0
mock==5.0.2
responses==0.23.1
django-extensions==3.2.3
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 0e3584d4a1c..602a3c85202 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -234,9 +234,9 @@ django==4.2.9 \
# via
# django-debug-toolbar
# django-extensions
-django-debug-toolbar==4.1.0 \
- --hash=sha256:a0b532ef5d52544fd745d1dcfc0557fa75f6f0d1962a8298bd568427ef2fa436 \
- --hash=sha256:f57882e335593cb8e74c2bda9f1116bbb9ca8fc0d81b50a75ace0f83de5173c7
+django-debug-toolbar==4.3.0 \
+ --hash=sha256:0b0dddee5ea29b9cb678593bc0d7a6d76b21d7799cb68e091a2148341a80f3c4 \
+ --hash=sha256:e09b7dcb8417b743234dfc57c95a7c1d1d87a88844abd13b4c5387f807b31bf6
# via -r requirements/dev.in
django-extensions==3.2.3 \
--hash=sha256:44d27919d04e23b3f40231c4ab7af4e61ce832ef46d610cc650d53e68328410a \
From 4004c435db63ebf921b8f8392568fabebcdf6fcd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 22:32:51 +0000
Subject: [PATCH 042/128] Bump mock from 5.0.2 to 5.1.0
Bumps [mock](https://github.com/testing-cabal/mock) from 5.0.2 to 5.1.0.
- [Changelog](https://github.com/testing-cabal/mock/blob/master/CHANGELOG.rst)
- [Commits](https://github.com/testing-cabal/mock/compare/5.0.2...5.1.0)
---
updated-dependencies:
- dependency-name: mock
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 13611d61cc6..752e1eadb1a 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -1,7 +1,7 @@
# Dependencies needed only for development/testing.
pytest-cov==4.1.0
django-debug-toolbar==4.3.0
-mock==5.0.2
+mock==5.1.0
responses==0.23.1
django-extensions==3.2.3
PyPOM==2.2.4
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 602a3c85202..b9728915f4d 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -282,9 +282,9 @@ iniconfig==2.0.0 \
--hash=sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3 \
--hash=sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374
# via pytest
-mock==5.0.2 \
- --hash=sha256:06f18d7d65b44428202b145a9a36e99c2ee00d1eb992df0caf881d4664377891 \
- --hash=sha256:0e0bc5ba78b8db3667ad636d964eb963dc97a59f04c6f6214c5f0e4a8f726c56
+mock==5.1.0 \
+ --hash=sha256:18c694e5ae8a208cdb3d2c20a993ca1a7b0efa258c247a1e565150f477f83744 \
+ --hash=sha256:5e96aad5ccda4718e0a229ed94b2024df75cc2d55575ba5762d31f5767b8767d
# via -r requirements/dev.in
mypy-extensions==1.0.0 \
--hash=sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d \
From ad57fe93895da86db2d059b035c5c3b97970e9c0 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 22:23:31 +0000
Subject: [PATCH 043/128] Bump pytest-django from 4.5.2 to 4.8.0
Bumps [pytest-django](https://github.com/pytest-dev/pytest-django) from 4.5.2 to 4.8.0.
- [Release notes](https://github.com/pytest-dev/pytest-django/releases)
- [Changelog](https://github.com/pytest-dev/pytest-django/blob/master/docs/changelog.rst)
- [Commits](https://github.com/pytest-dev/pytest-django/compare/v4.5.2...v4.8.0)
---
updated-dependencies:
- dependency-name: pytest-django
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 752e1eadb1a..0d58a2b1e2b 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -14,7 +14,7 @@ pytest-testmon==2.1.1
pytest-watch==4.2.0
# Required by django-extension's runserver_plus command.
-pytest-django==4.5.2
+pytest-django==4.8.0
pytest==7.3.2
black==23.3.0
shellcheck-py==0.9.0.6
diff --git a/requirements/dev.txt b/requirements/dev.txt
index b9728915f4d..5ecd8136044 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -361,9 +361,9 @@ pytest-cov==4.1.0 \
--hash=sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6 \
--hash=sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a
# via -r requirements/dev.in
-pytest-django==4.5.2 \
- --hash=sha256:c60834861933773109334fe5a53e83d1ef4828f2203a1d6a0fa9972f4f75ab3e \
- --hash=sha256:d9076f759bb7c36939dbdd5ae6633c18edfc2902d1a69fdbefd2426b970ce6c2
+pytest-django==4.8.0 \
+ --hash=sha256:5d054fe011c56f3b10f978f41a8efb2e5adfc7e680ef36fb571ada1f24779d90 \
+ --hash=sha256:ca1ddd1e0e4c227cf9e3e40a6afc6d106b3e70868fd2ac5798a22501271cd0c7
# via -r requirements/dev.in
pytest-freezegun @ https://github.com/hugovk/pytest-freezegun/archive/03d7107a877e8f07617f931a379f567d89060085.zip \
--hash=sha256:60cf7c6592c612d3fbcb12c77c96b97f011bd313a238f07c31505b9d50f855a0
From 45ed860042c42525ebe5c8db954a6781ba9a7ec6 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Mon, 26 Feb 2024 18:36:50 +0100
Subject: [PATCH 044/128] N806: variables in functions should be lowercase
---
misc/compare_tasks.py | 8 +-
pyproject.toml | 2 +
tests/etl/test_bugzilla.py | 4 +-
tests/etl/test_job_loader.py | 22 +--
tests/log_parser/test_utils.py | 4 +-
tests/model/test_bugscache.py | 10 +-
tests/model/test_files_bugzilla_map.py | 16 +--
.../test_common_behaviour.py | 38 ++---
tests/perfalert/test_alerts.py | 52 +++----
tests/perfalert/test_analyze.py | 16 +--
tests/webapp/api/test_bug_creation.py | 98 ++++++-------
treeherder/etl/bugzilla.py | 4 +-
treeherder/etl/management/commands/ingest.py | 66 ++++-----
treeherder/etl/taskcluster_pulse/handler.py | 134 +++++++++---------
.../etl/taskcluster_pulse/parse_route.py | 32 ++---
treeherder/middleware.py | 4 +-
treeherder/model/error_summary.py | 4 +-
.../commands/create_test_perf_data.py | 6 +-
treeherder/push_health/compare.py | 4 +-
treeherder/push_health/tests.py | 32 ++---
treeherder/webapp/api/bug_creation.py | 8 +-
treeherder/webapp/api/investigated_test.py | 8 +-
treeherder/webapp/api/jobs.py | 6 +-
treeherder/webapp/api/push.py | 6 +-
24 files changed, 293 insertions(+), 291 deletions(-)
diff --git a/misc/compare_tasks.py b/misc/compare_tasks.py
index 73d5cdf4ba4..53f53e32c57 100755
--- a/misc/compare_tasks.py
+++ b/misc/compare_tasks.py
@@ -53,8 +53,8 @@ def print_url_to_taskcluster(job_guid):
job_guid = job["job_guid"]
(decoded_task_id, _) = job_guid.split("/")
# As of slugid v2, slugid.encode() returns a string not bytestring under Python 3.
- taskId = slugid.encode(uuid.UUID(decoded_task_id))
- logger.info("https://firefox-ci-tc.services.mozilla.com/tasks/%s", taskId)
+ task_id = slugid.encode(uuid.UUID(decoded_task_id))
+ logger.info("https://firefox-ci-tc.services.mozilla.com/tasks/%s", task_id)
if __name__ == "__main__":
@@ -95,13 +95,13 @@ def print_url_to_taskcluster(job_guid):
th_instance_not_found.append(job)
else:
# You can use this value in a url with &selectedJob=
- jobId = job["id"]
+ job_id = job["id"]
remove_some_attributes(job, production_job)
differences = DeepDiff(job, production_dict[job["job_guid"]])
if differences:
pprint.pprint(differences)
- logger.info(jobId)
+ logger.info(job_id)
else:
# Delete jobs that don"t have any differences
del production_dict[job["job_guid"]]
diff --git a/pyproject.toml b/pyproject.toml
index 0feb7161416..a03486f4355 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -39,6 +39,8 @@ select = [
"F",
# pyupgrade
"UP",
+ # pep-naming
+ "N806",
]
ignore = [
diff --git a/tests/etl/test_bugzilla.py b/tests/etl/test_bugzilla.py
index 3ef3f5ec7e2..ebecec56e9c 100644
--- a/tests/etl/test_bugzilla.py
+++ b/tests/etl/test_bugzilla.py
@@ -48,7 +48,7 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
import json
- EXPECTED_REOPEN_ATTEMPTS = {
+ expected_reopen_attempts = {
"https://thisisnotbugzilla.org/rest/bug/202": json.dumps(
{
"status": "REOPENED",
@@ -68,4 +68,4 @@ def test_bz_reopen_bugs(request, mock_bugzilla_reopen_request, client, test_job,
}
),
}
- assert reopened_bugs == EXPECTED_REOPEN_ATTEMPTS
+ assert reopened_bugs == expected_reopen_attempts
diff --git a/tests/etl/test_job_loader.py b/tests/etl/test_job_loader.py
index 468c719a247..7b94028a371 100644
--- a/tests/etl/test_job_loader.py
+++ b/tests/etl/test_job_loader.py
@@ -38,12 +38,12 @@ def transformed_pulse_jobs(sample_data, test_repository):
def mock_artifact(taskId, runId, artifactName):
# Mock artifact with empty body
- baseUrl = (
+ base_url = (
"https://taskcluster.net/api/queue/v1/task/{taskId}/runs/{runId}/artifacts/{artifactName}"
)
responses.add(
responses.GET,
- baseUrl.format(taskId=taskId, runId=runId, artifactName=artifactName),
+ base_url.format(taskId=taskId, runId=runId, artifactName=artifactName),
body="",
content_type="text/plain",
status=200,
@@ -53,20 +53,20 @@ def mock_artifact(taskId, runId, artifactName):
@pytest.fixture
async def new_pulse_jobs(sample_data, test_repository, push_stored):
revision = push_stored[0]["revisions"][0]["revision"]
- pulseMessages = copy.deepcopy(sample_data.taskcluster_pulse_messages)
+ pulse_messages = copy.deepcopy(sample_data.taskcluster_pulse_messages)
tasks = copy.deepcopy(sample_data.taskcluster_tasks)
jobs = []
# Over here we transform the Pulse messages into the intermediary taskcluster-treeherder
# generated messages
- for message in list(pulseMessages.values()):
- taskId = message["payload"]["status"]["taskId"]
- task = tasks[taskId]
+ for message in list(pulse_messages.values()):
+ task_id = message["payload"]["status"]["taskId"]
+ task = tasks[task_id]
# If we pass task to handleMessage we won't hit the network
- taskRuns = await handleMessage(message, task)
+ task_runs = await handleMessage(message, task)
# handleMessage returns [] when it is a task that is not meant for Treeherder
- for run in reversed(taskRuns):
- mock_artifact(taskId, run["retryId"], "public/logs/live_backing.log")
+ for run in reversed(task_runs):
+ mock_artifact(task_id, run["retryId"], "public/logs/live_backing.log")
run["origin"]["project"] = test_repository.name
run["origin"]["revision"] = revision
jobs.append(run)
@@ -99,11 +99,11 @@ def test_new_job_transformation(new_pulse_jobs, new_transformed_jobs, failure_cl
job_guid = message["taskId"]
(decoded_task_id, _) = job_guid.split("/")
# As of slugid v2, slugid.encode() returns a string not bytestring under Python 3.
- taskId = slugid.encode(uuid.UUID(decoded_task_id))
+ task_id = slugid.encode(uuid.UUID(decoded_task_id))
transformed_job = jl.process_job(message, "https://firefox-ci-tc.services.mozilla.com")
# Not all messages from Taskcluster will be processed
if transformed_job:
- assert new_transformed_jobs[taskId] == transformed_job
+ assert new_transformed_jobs[task_id] == transformed_job
def test_ingest_pulse_jobs(
diff --git a/tests/log_parser/test_utils.py b/tests/log_parser/test_utils.py
index dffd2a7a8c3..b0b3f1a926c 100644
--- a/tests/log_parser/test_utils.py
+++ b/tests/log_parser/test_utils.py
@@ -78,8 +78,8 @@ def test_smaller_than_bigger():
def test_extra_option_max_length():
with open(os.path.join("schemas", "performance-artifact.json")) as f:
- PERFHERDER_SCHEMA = json.load(f)
- assert 100 == _lookup_extra_options_max(PERFHERDER_SCHEMA)
+ perfherder_schema = json.load(f)
+ assert 100 == _lookup_extra_options_max(perfherder_schema)
def test_validate_perf_schema_no_exception():
diff --git a/tests/model/test_bugscache.py b/tests/model/test_bugscache.py
index a184b73365f..90fba8619d4 100644
--- a/tests/model/test_bugscache.py
+++ b/tests/model/test_bugscache.py
@@ -190,20 +190,20 @@ def test_import(mock_bugscache_bugzilla_request):
assert bug.dupe_of == 1662628
# key: open bug, values: duplicates
- EXPECTED_BUG_DUPE_OF_DATA = {
+ expected_bug_dupe_of_data = {
1392106: [1442991, 1443801],
1411358: [1204281],
1662628: [1652208, 1660324, 1660719, 1660765, 1663081, 1663118, 1702255],
1736534: [],
}
- for open_bug, duplicates in EXPECTED_BUG_DUPE_OF_DATA.items():
+ for open_bug, duplicates in expected_bug_dupe_of_data.items():
assert Bugscache.objects.get(id=open_bug).dupe_of is None
assert set(Bugscache.objects.filter(dupe_of=open_bug).values_list("id", flat=True)) == set(
duplicates
)
- EXPECTED_BUG_COUNT = sum(
- [1 + len(duplicates) for duplicates in EXPECTED_BUG_DUPE_OF_DATA.values()]
+ expected_bug_count = sum(
+ [1 + len(duplicates) for duplicates in expected_bug_dupe_of_data.values()]
)
- assert len(Bugscache.objects.all()) == EXPECTED_BUG_COUNT
+ assert len(Bugscache.objects.all()) == expected_bug_count
diff --git a/tests/model/test_files_bugzilla_map.py b/tests/model/test_files_bugzilla_map.py
index 1c54ca511af..732133e5323 100644
--- a/tests/model/test_files_bugzilla_map.py
+++ b/tests/model/test_files_bugzilla_map.py
@@ -40,7 +40,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
import_process.run()
assert FilesBugzillaMap.objects.count() == 7
- EXPECTED_FILES_BUGZILLA_DATA_IMPORT_1 = [
+ expected_files_bugzilla_data_import_1 = [
("AUTHORS", "AUTHORS", "mozilla.org", "Licensing"),
("browser/components/BrowserGlue.jsm", "BrowserGlue.jsm", "Firefox", "General"),
(
@@ -74,7 +74,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
"File first seen on mozilla-beta",
),
]
- assert EXPECTED_FILES_BUGZILLA_DATA_IMPORT_1 == list(
+ assert expected_files_bugzilla_data_import_1 == list(
FilesBugzillaMap.objects.all()
.values_list(
"path", "file_name", "bugzilla_component__product", "bugzilla_component__component"
@@ -82,7 +82,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
.order_by("path")
)
- EXPECTED_BUGZILLA_COMPONENTS_IMPORT_1 = [
+ expected_bugzilla_components_import_1 = [
("Core", "Storage: IndexedDB"),
("Firefox", "General"),
("Mock", "File first seen on mozilla-beta"),
@@ -91,7 +91,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
("mozilla.org", "Different path, same product, different component"),
("mozilla.org", "Licensing"),
]
- assert EXPECTED_BUGZILLA_COMPONENTS_IMPORT_1 == sorted(
+ assert expected_bugzilla_components_import_1 == sorted(
list(
BugzillaComponent.objects.all()
.values_list("product", "component")
@@ -103,7 +103,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
import_process.run()
assert FilesBugzillaMap.objects.count() == 6
- EXPECTED_FILES_BUGZILLA_DATA_IMPORT_2 = [
+ expected_files_bugzilla_data_import_2 = [
("AUTHORS", "AUTHORS", "mozilla.org", "Import 2: same product, different component"),
("browser/components/BrowserGlue.jsm", "BrowserGlue.jsm", "Firefox", "General"),
(
@@ -131,7 +131,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
"File first seen on mozilla-beta",
),
]
- assert EXPECTED_FILES_BUGZILLA_DATA_IMPORT_2 == sorted(
+ assert expected_files_bugzilla_data_import_2 == sorted(
list(
FilesBugzillaMap.objects.all()
.values_list(
@@ -141,7 +141,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
)
)
- EXPECTED_BUGZILLA_COMPONENTS_IMPORT_2 = [
+ expected_bugzilla_components_import_2 = [
("Core", "Storage: IndexedDB"),
("Core", "Storage: IndexedDB2"),
("Firefox", "General"),
@@ -149,7 +149,7 @@ def test_data_ingestion(setup_repository_data, mock_file_bugzilla_map_request):
("Testing", "web-platform-tests"),
("mozilla.org", "Import 2: same product, different component"),
]
- assert EXPECTED_BUGZILLA_COMPONENTS_IMPORT_2 == sorted(
+ assert expected_bugzilla_components_import_2 == sorted(
list(
BugzillaComponent.objects.all()
.values_list("product", "component")
diff --git a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
index d1c04998054..e4cc9098c64 100644
--- a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
+++ b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
@@ -132,42 +132,42 @@ def test_formula_demands_at_least_framework_and_suite(FormulaClass, betamax_reco
def test_breakdown_updates_between_calculations(FormulaClass, betamax_recorder):
formula = FormulaClass(betamax_recorder.session)
- test_moniker_A = ("build_metrics", "build times")
- test_moniker_B = ("talos", "tp5n", "nonmain_startup_fileio")
+ test_moniker_a = ("build_metrics", "build times")
+ test_moniker_b = ("talos", "tp5n", "nonmain_startup_fileio")
- cassette_preffix_A = "-".join(filter(None, test_moniker_A))
- cassette_preffix_B = "-".join(filter(None, test_moniker_B))
+ cassette_preffix_a = "-".join(filter(None, test_moniker_a))
+ cassette_preffix_b = "-".join(filter(None, test_moniker_b))
- with betamax_recorder.use_cassette(f"{cassette_preffix_A}", serialize_with="prettyjson"):
- formula(*test_moniker_A) # let it perform calculus & cache breakdown
- breakdown_A = formula.breakdown()
+ with betamax_recorder.use_cassette(f"{cassette_preffix_a}", serialize_with="prettyjson"):
+ formula(*test_moniker_a) # let it perform calculus & cache breakdown
+ breakdown_a = formula.breakdown()
- with betamax_recorder.use_cassette(f"{cassette_preffix_B}", serialize_with="prettyjson"):
- formula(*test_moniker_B) # let it perform calculus & cache breakdown
- breakdown_B = formula.breakdown()
+ with betamax_recorder.use_cassette(f"{cassette_preffix_b}", serialize_with="prettyjson"):
+ formula(*test_moniker_b) # let it perform calculus & cache breakdown
+ breakdown_b = formula.breakdown()
- assert breakdown_A != breakdown_B
+ assert breakdown_a != breakdown_b
@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
def test_breakdown_resets_to_null_when_calculus_errors_out(FormulaClass, betamax_recorder):
formula = FormulaClass(betamax_recorder.session)
- test_moniker_A = ("build_metrics", "build times")
- test_moniker_B = ("nonexistent_framework", "nonexistent_suite")
+ test_moniker_a = ("build_metrics", "build times")
+ test_moniker_b = ("nonexistent_framework", "nonexistent_suite")
- cassette_preffix_A = "-".join(filter(None, test_moniker_A))
- cassette_preffix_B = "-".join(filter(None, test_moniker_B))
+ cassette_preffix_a = "-".join(filter(None, test_moniker_a))
+ cassette_preffix_b = "-".join(filter(None, test_moniker_b))
# run happy path calculus
- with betamax_recorder.use_cassette(f"{cassette_preffix_A}", serialize_with="prettyjson"):
- formula(*test_moniker_A) # let it perform calculus & cache breakdown
+ with betamax_recorder.use_cassette(f"{cassette_preffix_a}", serialize_with="prettyjson"):
+ formula(*test_moniker_a) # let it perform calculus & cache breakdown
_ = formula.breakdown()
# now run alternated path calculus
- with betamax_recorder.use_cassette(f"{cassette_preffix_B}", serialize_with="prettyjson"):
+ with betamax_recorder.use_cassette(f"{cassette_preffix_b}", serialize_with="prettyjson"):
with pytest.raises(NoFiledBugs):
- formula(*test_moniker_B) # intentionally blows up while doing calculus
+ formula(*test_moniker_b) # intentionally blows up while doing calculus
# cached breakdown got invalidated & can no longer be obtained
with pytest.raises(RuntimeError):
diff --git a/tests/perfalert/test_alerts.py b/tests/perfalert/test_alerts.py
index c02f80c9abd..1618de35d7e 100644
--- a/tests/perfalert/test_alerts.py
+++ b/tests/perfalert/test_alerts.py
@@ -83,22 +83,22 @@ def test_detect_alerts_in_series(
mock_deviance,
):
base_time = time.time() # generate it based off current time
- INTERVAL = 30
+ interval = 30
_generate_performance_data(
test_repository,
test_perf_signature,
base_time,
1,
0.5,
- int(INTERVAL / 2),
+ int(interval / 2),
)
_generate_performance_data(
test_repository,
test_perf_signature,
base_time,
- int(INTERVAL / 2) + 1,
+ int(interval / 2) + 1,
1.0,
- int(INTERVAL / 2),
+ int(interval / 2),
)
generate_new_alerts_in_series(test_perf_signature)
@@ -107,8 +107,8 @@ def test_detect_alerts_in_series(
assert PerformanceAlertSummary.objects.count() == 1
_verify_alert(
1,
- (INTERVAL / 2) + 1,
- (INTERVAL / 2),
+ (interval / 2) + 1,
+ (interval / 2),
test_perf_signature,
0.5,
1.0,
@@ -125,8 +125,8 @@ def test_detect_alerts_in_series(
assert PerformanceAlertSummary.objects.count() == 1
_verify_alert(
1,
- (INTERVAL / 2) + 1,
- (INTERVAL / 2),
+ (interval / 2) + 1,
+ (interval / 2),
test_perf_signature,
0.5,
1.0,
@@ -142,9 +142,9 @@ def test_detect_alerts_in_series(
test_repository,
test_perf_signature,
base_time,
- (INTERVAL + 1),
+ (interval + 1),
2.0,
- INTERVAL,
+ interval,
)
generate_new_alerts_in_series(test_perf_signature)
@@ -152,8 +152,8 @@ def test_detect_alerts_in_series(
assert PerformanceAlertSummary.objects.count() == 2
_verify_alert(
2,
- INTERVAL + 1,
- INTERVAL,
+ interval + 1,
+ interval,
test_perf_signature,
1.0,
2.0,
@@ -232,22 +232,22 @@ def test_no_alerts_with_old_data(
test_perf_signature,
):
base_time = 0 # 1970, too old!
- INTERVAL = 30
+ interval = 30
_generate_performance_data(
test_repository,
test_perf_signature,
base_time,
1,
0.5,
- int(INTERVAL / 2),
+ int(interval / 2),
)
_generate_performance_data(
test_repository,
test_perf_signature,
base_time,
- int(INTERVAL / 2) + 1,
+ int(interval / 2) + 1,
1.0,
- int(INTERVAL / 2),
+ int(interval / 2),
)
generate_new_alerts_in_series(test_perf_signature)
@@ -269,7 +269,7 @@ def test_custom_alert_threshold(
# under default settings, this set of data would generate
# 2 alerts, but we'll set an artificially high threshold
# of 200% that should only generate 1
- INTERVAL = 60
+ interval = 60
base_time = time.time()
_generate_performance_data(
test_repository,
@@ -277,23 +277,23 @@ def test_custom_alert_threshold(
base_time,
1,
0.5,
- int(INTERVAL / 3),
+ int(interval / 3),
)
_generate_performance_data(
test_repository,
test_perf_signature,
base_time,
- int(INTERVAL / 3) + 1,
+ int(interval / 3) + 1,
0.6,
- int(INTERVAL / 3),
+ int(interval / 3),
)
_generate_performance_data(
test_repository,
test_perf_signature,
base_time,
- 2 * int(INTERVAL / 3) + 1,
+ 2 * int(interval / 3) + 1,
2.0,
- int(INTERVAL / 3),
+ int(interval / 3),
)
generate_new_alerts_in_series(test_perf_signature)
@@ -319,22 +319,22 @@ def test_alert_change_type_absolute(
test_perf_signature.save()
base_time = time.time() # generate it based off current time
- INTERVAL = 30
+ interval = 30
_generate_performance_data(
test_repository,
test_perf_signature,
base_time,
1,
0.5,
- int(INTERVAL / 2),
+ int(interval / 2),
)
_generate_performance_data(
test_repository,
test_perf_signature,
base_time,
- int(INTERVAL / 2) + 1,
+ int(interval / 2) + 1,
new_value,
- int(INTERVAL / 2),
+ int(interval / 2),
)
generate_new_alerts_in_series(test_perf_signature)
diff --git a/tests/perfalert/test_analyze.py b/tests/perfalert/test_analyze.py
index 81a428d58f7..f1c38d1f249 100644
--- a/tests/perfalert/test_analyze.py
+++ b/tests/perfalert/test_analyze.py
@@ -123,10 +123,10 @@ def test_detect_changes_few_revisions_many_values():
def test_detect_changes_historical_data(filename, expected_timestamps):
"""Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
# Configuration for Analyzer
- FORE_WINDOW = 12
- MIN_BACK_WINDOW = 12
- MAX_BACK_WINDOW = 24
- THRESHOLD = 7
+ fore_window = 12
+ min_back_window = 12
+ max_back_window = 24
+ threshold = 7
payload = SampleData.get_perf_data(os.path.join("graphs", filename))
runs = payload["test_runs"]
@@ -134,10 +134,10 @@ def test_detect_changes_historical_data(filename, expected_timestamps):
results = detect_changes(
data,
- min_back_window=MIN_BACK_WINDOW,
- max_back_window=MAX_BACK_WINDOW,
- fore_window=FORE_WINDOW,
- t_threshold=THRESHOLD,
+ min_back_window=min_back_window,
+ max_back_window=max_back_window,
+ fore_window=fore_window,
+ t_threshold=threshold,
)
regression_timestamps = [d.push_timestamp for d in results if d.change_detected]
assert regression_timestamps == expected_timestamps
diff --git a/tests/webapp/api/test_bug_creation.py b/tests/webapp/api/test_bug_creation.py
index 7206d6f504c..72e9dee63b6 100644
--- a/tests/webapp/api/test_bug_creation.py
+++ b/tests/webapp/api/test_bug_creation.py
@@ -18,37 +18,37 @@ def test_bugzilla_components_for_path(client, test_job):
bugzilla_component=BugzillaComponent.objects.last(),
)
- URL_BASE = reverse("bugzilla-component-list")
+ url_base = reverse("bugzilla-component-list")
- EXPECTED_MOCK1 = [{"product": "Mock Product 1", "component": "Mock Component 1"}]
+ expected_mock1 = [{"product": "Mock Product 1", "component": "Mock Component 1"}]
- resp = client.get(URL_BASE + "?path=file_1.extension")
+ resp = client.get(url_base + "?path=file_1.extension")
assert resp.status_code == 200
- assert resp.json() == EXPECTED_MOCK1
+ assert resp.json() == expected_mock1
- resp = client.get(URL_BASE + "?path=file_2.extension")
+ resp = client.get(url_base + "?path=file_2.extension")
assert resp.json() == []
- resp = client.get(URL_BASE + "?path=ile_2.extension")
+ resp = client.get(url_base + "?path=ile_2.extension")
assert resp.json() == []
- resp = client.get(URL_BASE + "?path=file_1")
- assert resp.json() == EXPECTED_MOCK1
+ resp = client.get(url_base + "?path=file_1")
+ assert resp.json() == expected_mock1
- resp = client.get(URL_BASE + "?path=mock/folder/file_1.extension")
- assert resp.json() == EXPECTED_MOCK1
+ resp = client.get(url_base + "?path=mock/folder/file_1.extension")
+ assert resp.json() == expected_mock1
- resp = client.get(URL_BASE + "?path=other_mock/other_folder/file_1.extension")
+ resp = client.get(url_base + "?path=other_mock/other_folder/file_1.extension")
# Should also pass because search falls back to file name if no match for path.
- assert resp.json() == EXPECTED_MOCK1
+ assert resp.json() == expected_mock1
- resp = client.get(URL_BASE + "?path=folder/file_1.extension")
- assert resp.json() == EXPECTED_MOCK1
+ resp = client.get(url_base + "?path=folder/file_1.extension")
+ assert resp.json() == expected_mock1
- resp = client.get(URL_BASE + "?path=folder/file_1.other_extension")
- assert resp.json() == EXPECTED_MOCK1
+ resp = client.get(url_base + "?path=folder/file_1.other_extension")
+ assert resp.json() == expected_mock1
- resp = client.get(URL_BASE + "?path=completely.unrelated")
+ resp = client.get(url_base + "?path=completely.unrelated")
assert resp.json() == []
BugzillaComponent.objects.create(product="Mock Product 1", component="Mock Component 2")
@@ -59,25 +59,25 @@ def test_bugzilla_components_for_path(client, test_job):
bugzilla_component=BugzillaComponent.objects.last(),
)
- EXPECTED_MOCK2 = [{"product": "Mock Product 1", "component": "Mock Component 2"}]
+ expected_mock2 = [{"product": "Mock Product 1", "component": "Mock Component 2"}]
- EXPECTED_MOCK1_MOCK2 = [
+ expected_mock1_mock2 = [
{"product": "Mock Product 1", "component": "Mock Component 1"},
{"product": "Mock Product 1", "component": "Mock Component 2"},
]
- resp = client.get(URL_BASE + "?path=file_1.extension")
- assert resp.json() == EXPECTED_MOCK1_MOCK2
+ resp = client.get(url_base + "?path=file_1.extension")
+ assert resp.json() == expected_mock1_mock2
- resp = client.get(URL_BASE + "?path=mock/folder/file_1.extension")
- assert resp.json() == EXPECTED_MOCK1
+ resp = client.get(url_base + "?path=mock/folder/file_1.extension")
+ assert resp.json() == expected_mock1
- resp = client.get(URL_BASE + "?path=mock/folder_2/file_1.extension")
- assert resp.json() == EXPECTED_MOCK2
+ resp = client.get(url_base + "?path=mock/folder_2/file_1.extension")
+ assert resp.json() == expected_mock2
- resp = client.get(URL_BASE + "?path=other_mock/other_folder/file_1.extension")
+ resp = client.get(url_base + "?path=other_mock/other_folder/file_1.extension")
# Should also pass because search falls back to file name if no match for path.
- assert resp.json() == EXPECTED_MOCK1_MOCK2
+ assert resp.json() == expected_mock1_mock2
BugzillaComponent.objects.create(product="Mock Product 3", component="Mock Component 3")
@@ -87,16 +87,16 @@ def test_bugzilla_components_for_path(client, test_job):
bugzilla_component=BugzillaComponent.objects.last(),
)
- EXPECTED_MOCK3 = [{"product": "Mock Product 3", "component": "Mock Component 3"}]
+ expected_mock3 = [{"product": "Mock Product 3", "component": "Mock Component 3"}]
- resp = client.get(URL_BASE + "?path=other.file.js")
- assert resp.json() == EXPECTED_MOCK3
+ resp = client.get(url_base + "?path=other.file.js")
+ assert resp.json() == expected_mock3
- resp = client.get(URL_BASE + "?path=other.file")
- assert resp.json() == EXPECTED_MOCK3
+ resp = client.get(url_base + "?path=other.file")
+ assert resp.json() == expected_mock3
- resp = client.get(URL_BASE + "?path=other")
- assert resp.json() == EXPECTED_MOCK3
+ resp = client.get(url_base + "?path=other")
+ assert resp.json() == expected_mock3
BugzillaComponent.objects.create(product="Mock Product 4", component="Mock Component 4")
@@ -106,23 +106,23 @@ def test_bugzilla_components_for_path(client, test_job):
bugzilla_component=BugzillaComponent.objects.last(),
)
- EXPECTED_MOCK4 = [{"product": "Mock Product 4", "component": "Mock Component 4"}]
+ expected_mock4 = [{"product": "Mock Product 4", "component": "Mock Component 4"}]
- EXPECTED_MOCK3_MOCK4 = [
+ expected_mock3_mock4 = [
{"product": "Mock Product 3", "component": "Mock Component 3"},
{"product": "Mock Product 4", "component": "Mock Component 4"},
]
- resp = client.get(URL_BASE + "?path=other.file.js")
- assert resp.json() == EXPECTED_MOCK3
+ resp = client.get(url_base + "?path=other.file.js")
+ assert resp.json() == expected_mock3
- resp = client.get(URL_BASE + "?path=other.extension")
- assert resp.json() == EXPECTED_MOCK4
+ resp = client.get(url_base + "?path=other.extension")
+ assert resp.json() == expected_mock4
- resp = client.get(URL_BASE + "?path=other")
- assert resp.json() == EXPECTED_MOCK3_MOCK4
+ resp = client.get(url_base + "?path=other")
+ assert resp.json() == expected_mock3_mock4
- resp = client.get(URL_BASE + "?path=another")
+ resp = client.get(url_base + "?path=another")
assert resp.json() == []
BugzillaComponent.objects.create(
@@ -166,19 +166,19 @@ def test_bugzilla_components_for_path(client, test_job):
bugzilla_component=BugzillaComponent.objects.last(),
)
- EXPECTED_MOCK_ORG_MOZILLA = [
+ expected_mock_org_mozilla = [
{
"product": "Mock Product org.mozilla.*.",
"component": "Mock Component File Match",
}
]
- resp = client.get(URL_BASE + "?path=org.mozilla.geckoview.test.MockTestName#Subtest")
- assert resp.json() == EXPECTED_MOCK_ORG_MOZILLA
+ resp = client.get(url_base + "?path=org.mozilla.geckoview.test.MockTestName#Subtest")
+ assert resp.json() == expected_mock_org_mozilla
# Only take test name into account.
- resp = client.get(URL_BASE + "?path=org.mozilla.otherproduct.otherfolder.MockTestName")
- assert resp.json() == EXPECTED_MOCK_ORG_MOZILLA
+ resp = client.get(url_base + "?path=org.mozilla.otherproduct.otherfolder.MockTestName")
+ assert resp.json() == expected_mock_org_mozilla
BugzillaComponent.objects.create(product="Testing", component="Mochitest")
@@ -189,5 +189,5 @@ def test_bugzilla_components_for_path(client, test_job):
)
# Respect the ignore list of product and component combinations.
- resp = client.get(URL_BASE + "?path=mock/mochitest/mochitest.test")
+ resp = client.get(url_base + "?path=mock/mochitest/mochitest.test")
assert resp.json() == []
diff --git a/treeherder/etl/bugzilla.py b/treeherder/etl/bugzilla.py
index 535cf34b564..a8da3d198fc 100644
--- a/treeherder/etl/bugzilla.py
+++ b/treeherder/etl/bugzilla.py
@@ -27,9 +27,9 @@ def reopen_intermittent_bugs():
)
# Intermittent bugs get closed after 3 weeks of inactivity if other conditions don't apply:
# https://github.com/mozilla/relman-auto-nag/blob/c7439e247677333c1cd8c435234b3ef3adc49680/auto_nag/scripts/close_intermittents.py#L17
- RECENT_DAYS = 7
+ recent_days = 7
recently_used_bugs = set(
- BugJobMap.objects.filter(created__gt=datetime.now() - timedelta(RECENT_DAYS)).values_list(
+ BugJobMap.objects.filter(created__gt=datetime.now() - timedelta(recent_days)).values_list(
"bug_id", flat=True
)
)
diff --git a/treeherder/etl/management/commands/ingest.py b/treeherder/etl/management/commands/ingest.py
index 366d0042cfc..78a77f474c4 100644
--- a/treeherder/etl/management/commands/ingest.py
+++ b/treeherder/etl/management/commands/ingest.py
@@ -118,8 +118,8 @@ async def ingest_task(taskId, root_url):
# Remove default timeout limit of 5 minutes
timeout = aiohttp.ClientTimeout(total=0)
async with taskcluster.aio.createSession(connector=conn, timeout=timeout) as session:
- asyncQueue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
- results = await asyncio.gather(asyncQueue.status(taskId), asyncQueue.task(taskId))
+ async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
+ results = await asyncio.gather(async_queue.status(taskId), async_queue.task(taskId))
await handleTask(
{
"status": results[0]["status"],
@@ -130,7 +130,7 @@ async def ingest_task(taskId, root_url):
async def handleTask(task, root_url):
- taskId = task["status"]["taskId"]
+ task_id = task["status"]["taskId"]
runs = task["status"]["runs"]
# If we iterate in order of the runs, we will not be able to mark older runs as
# "retry" instead of exception
@@ -139,7 +139,7 @@ async def handleTask(task, root_url):
"exchange": stateToExchange[run["state"]],
"payload": {
"status": {
- "taskId": taskId,
+ "taskId": task_id,
"runs": runs,
},
"runId": run["runId"],
@@ -148,35 +148,35 @@ async def handleTask(task, root_url):
}
try:
- taskRuns = await handleMessage(message, task["task"])
+ task_runs = await handleMessage(message, task["task"])
except Exception as e:
logger.exception(e)
- if taskRuns:
+ if task_runs:
# Schedule and run jobs inside the thread pool executor
- jobFutures = [
- routine_to_future(process_job_with_threads, run, root_url) for run in taskRuns
+ job_futures = [
+ routine_to_future(process_job_with_threads, run, root_url) for run in task_runs
]
- await await_futures(jobFutures)
+ await await_futures(job_futures)
async def fetchGroupTasks(taskGroupId, root_url):
tasks = []
query = {}
- continuationToken = ""
+ continuation_token = ""
# Limiting the connection pool just in case we have too many
conn = aiohttp.TCPConnector(limit=10)
# Remove default timeout limit of 5 minutes
timeout = aiohttp.ClientTimeout(total=0)
async with taskcluster.aio.createSession(connector=conn, timeout=timeout) as session:
- asyncQueue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
+ async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
while True:
- if continuationToken:
- query = {"continuationToken": continuationToken}
- response = await asyncQueue.listTaskGroup(taskGroupId, query=query)
+ if continuation_token:
+ query = {"continuationToken": continuation_token}
+ response = await async_queue.listTaskGroup(taskGroupId, query=query)
tasks.extend(response["tasks"])
- continuationToken = response.get("continuationToken")
- if continuationToken is None:
+ continuation_token = response.get("continuationToken")
+ if continuation_token is None:
break
logger.info("Requesting more tasks. %s tasks so far...", len(tasks))
return tasks
@@ -193,8 +193,8 @@ async def processTasks(taskGroupId, root_url):
return
# Schedule and run tasks inside the thread pool executor
- taskFutures = [routine_to_future(handleTask, task, root_url) for task in tasks]
- await await_futures(taskFutures)
+ task_futures = [routine_to_future(handleTask, task, root_url) for task in tasks]
+ await await_futures(task_futures)
async def routine_to_future(func, *args):
@@ -249,12 +249,12 @@ def get_decision_task_id(project, revision, root_url):
def repo_meta(project):
_repo = Repository.objects.filter(name=project)[0]
assert _repo, f"The project {project} you specified is incorrect"
- splitUrl = _repo.url.split("/")
+ split_url = _repo.url.split("/")
return {
"url": _repo.url,
"branch": _repo.branch,
- "owner": splitUrl[3],
- "repo": splitUrl[4],
+ "owner": split_url[3],
+ "repo": split_url[4],
"tc_root_url": _repo.tc_root_url,
}
@@ -270,16 +270,16 @@ def query_data(repo_meta, commit):
event_base_sha = repo_meta["branch"]
# First we try with `master` being the base sha
# e.g. https://api.github.com/repos/servo/servo/compare/master...1418c0555ff77e5a3d6cf0c6020ba92ece36be2e
- compareResponse = github.compare_shas(
+ compare_response = github.compare_shas(
repo_meta["owner"], repo_meta["repo"], repo_meta["branch"], commit
)
- merge_base_commit = compareResponse.get("merge_base_commit")
+ merge_base_commit = compare_response.get("merge_base_commit")
if merge_base_commit:
commiter_date = merge_base_commit["commit"]["committer"]["date"]
# Since we don't use PushEvents that contain the "before" or "event.base.sha" fields [1]
# we need to discover the right parent which existed in the base branch.
# [1] https://github.com/taskcluster/taskcluster/blob/3dda0adf85619d18c5dcf255259f3e274d2be346/services/github/src/api.js#L55
- parents = compareResponse["merge_base_commit"]["parents"]
+ parents = compare_response["merge_base_commit"]["parents"]
if len(parents) == 1:
parent = parents[0]
commit_info = fetch_json(parent["url"])
@@ -301,12 +301,12 @@ def query_data(repo_meta, commit):
assert event_base_sha != repo_meta["branch"]
logger.info("We have a new base: %s", event_base_sha)
# When using the correct event_base_sha the "commits" field will be correct
- compareResponse = github.compare_shas(
+ compare_response = github.compare_shas(
repo_meta["owner"], repo_meta["repo"], event_base_sha, commit
)
commits = []
- for _commit in compareResponse["commits"]:
+ for _commit in compare_response["commits"]:
commits.append(
{
"message": _commit["commit"]["message"],
@@ -453,7 +453,7 @@ def add_arguments(self, parser):
def handle(self, *args, **options):
loop = asyncio.get_event_loop()
- typeOfIngestion = options["ingestion_type"][0]
+ type_of_ingestion = options["ingestion_type"][0]
root_url = options["root_url"]
if not options["enable_eager_celery"]:
@@ -462,22 +462,22 @@ def handle(self, *args, **options):
# Make sure all tasks are run synchronously / immediately
settings.CELERY_TASK_ALWAYS_EAGER = True
- if typeOfIngestion == "task":
+ if type_of_ingestion == "task":
assert options["taskId"]
loop.run_until_complete(ingest_task(options["taskId"], root_url))
- elif typeOfIngestion == "prUrl":
+ elif type_of_ingestion == "prUrl":
assert options["prUrl"]
ingest_pr(options["prUrl"], root_url)
- elif typeOfIngestion.find("git") > -1:
+ elif type_of_ingestion.find("git") > -1:
if not os.environ.get("GITHUB_TOKEN"):
logger.warning(
"If you don't set up GITHUB_TOKEN you might hit Github's rate limiting. See docs for info."
)
- if typeOfIngestion == "git-push":
+ if type_of_ingestion == "git-push":
ingest_push(options["project"], options["commit"])
- elif typeOfIngestion == "git-pushes":
+ elif type_of_ingestion == "git-pushes":
ingest_git_pushes(options["project"], options["dryRun"])
- elif typeOfIngestion == "push":
+ elif type_of_ingestion == "push":
ingest_hg_push(options)
else:
raise Exception("Please check the code for valid ingestion types.")
diff --git a/treeherder/etl/taskcluster_pulse/handler.py b/treeherder/etl/taskcluster_pulse/handler.py
index a2cfc15692c..0b931a7304d 100644
--- a/treeherder/etl/taskcluster_pulse/handler.py
+++ b/treeherder/etl/taskcluster_pulse/handler.py
@@ -38,17 +38,17 @@ def stateFromRun(jobRun):
def resultFromRun(jobRun):
- RUN_TO_RESULT = {
+ run_to_result = {
"completed": "success",
"failed": "fail",
}
state = jobRun["state"]
- if state in list(RUN_TO_RESULT.keys()):
- return RUN_TO_RESULT[state]
+ if state in list(run_to_result.keys()):
+ return run_to_result[state]
elif state == "exception":
- reasonResolved = jobRun.get("reasonResolved")
- if reasonResolved in ["canceled", "superseded"]:
- return reasonResolved
+ reason_resolved = jobRun.get("reasonResolved")
+ if reason_resolved in ["canceled", "superseded"]:
+ return reason_resolved
return "exception"
else:
return "unknown"
@@ -57,12 +57,12 @@ def resultFromRun(jobRun):
# Creates a log entry for Treeherder to retrieve and parse. This log is
# displayed on the Treeherder Log Viewer once parsed.
def createLogReference(root_url, taskId, runId):
- logUrl = taskcluster_urls.api(
+ log_url = taskcluster_urls.api(
root_url, "queue", "v1", "task/{taskId}/runs/{runId}/artifacts/public/logs/live_backing.log"
).format(taskId=taskId, runId=runId)
return {
"name": "live_backing_log",
- "url": logUrl,
+ "url": log_url,
}
@@ -71,27 +71,27 @@ def createLogReference(root_url, taskId, runId):
# Treeherder job message.
# TODO: Refactor https://bugzilla.mozilla.org/show_bug.cgi?id=1560596
def parseRouteInfo(prefix, taskId, routes, task):
- matchingRoutes = list(filter(lambda route: route.split(".")[0] == "tc-treeherder", routes))
+ matching_routes = list(filter(lambda route: route.split(".")[0] == "tc-treeherder", routes))
- if len(matchingRoutes) != 1:
+ if len(matching_routes) != 1:
raise PulseHandlerError(
"Could not determine Treeherder route. Either there is no route, "
+ "or more than one matching route exists."
+ f"Task ID: {taskId} Routes: {routes}"
)
- parsedRoute = parseRoute(matchingRoutes[0])
+ parsed_route = parseRoute(matching_routes[0])
- return parsedRoute
+ return parsed_route
def validateTask(task):
- treeherderMetadata = task.get("extra", {}).get("treeherder")
- if not treeherderMetadata:
+ treeherder_metadata = task.get("extra", {}).get("treeherder")
+ if not treeherder_metadata:
logger.debug("Task metadata is missing Treeherder job configuration.")
return False
try:
- jsonschema.validate(treeherderMetadata, get_json_schema("task-treeherder-config.yml"))
+ jsonschema.validate(treeherder_metadata, get_json_schema("task-treeherder-config.yml"))
except (jsonschema.ValidationError, jsonschema.SchemaError) as e:
logger.error("JSON Schema validation error during Taskcluser message ingestion: %s", e)
return False
@@ -169,26 +169,26 @@ def ignore_task(task, taskId, rootUrl, project):
async def handleMessage(message, taskDefinition=None):
async with taskcluster.aio.createSession() as session:
jobs = []
- taskId = message["payload"]["status"]["taskId"]
- asyncQueue = taskcluster.aio.Queue({"rootUrl": message["root_url"]}, session=session)
- task = (await asyncQueue.task(taskId)) if not taskDefinition else taskDefinition
+ task_id = message["payload"]["status"]["taskId"]
+ async_queue = taskcluster.aio.Queue({"rootUrl": message["root_url"]}, session=session)
+ task = (await async_queue.task(task_id)) if not taskDefinition else taskDefinition
try:
- parsedRoute = parseRouteInfo("tc-treeherder", taskId, task["routes"], task)
+ parsed_route = parseRouteInfo("tc-treeherder", task_id, task["routes"], task)
except PulseHandlerError as e:
logger.debug("%s", str(e))
return jobs
- if ignore_task(task, taskId, message["root_url"], parsedRoute["project"]):
+ if ignore_task(task, task_id, message["root_url"], parsed_route["project"]):
return jobs
- logger.debug("Message received for task %s", taskId)
+ logger.debug("Message received for task %s", task_id)
# Validation failures are common and logged, so do nothing more.
if not validateTask(task):
return jobs
- taskType = EXCHANGE_EVENT_MAP.get(message["exchange"])
+ task_type = EXCHANGE_EVENT_MAP.get(message["exchange"])
# Originally this code was only within the "pending" case, however, in order to support
# ingesting all tasks at once which might not have "pending" case
@@ -196,18 +196,18 @@ async def handleMessage(message, taskDefinition=None):
# This will only work if the previous run has not yet been processed by Treeherder
# since _remove_existing_jobs() will prevent it
if message["payload"]["runId"] > 0:
- jobs.append(await handleTaskRerun(parsedRoute, task, message, session))
+ jobs.append(await handleTaskRerun(parsed_route, task, message, session))
- if not taskType:
+ if not task_type:
raise Exception("Unknown exchange: {exchange}".format(exchange=message["exchange"]))
- elif taskType == "pending":
- jobs.append(handleTaskPending(parsedRoute, task, message))
- elif taskType == "running":
- jobs.append(handleTaskRunning(parsedRoute, task, message))
- elif taskType in ("completed", "failed"):
- jobs.append(await handleTaskCompleted(parsedRoute, task, message, session))
- elif taskType == "exception":
- jobs.append(await handleTaskException(parsedRoute, task, message, session))
+ elif task_type == "pending":
+ jobs.append(handleTaskPending(parsed_route, task, message))
+ elif task_type == "running":
+ jobs.append(handleTaskRunning(parsed_route, task, message))
+ elif task_type in ("completed", "failed"):
+ jobs.append(await handleTaskCompleted(parsed_route, task, message, session))
+ elif task_type == "exception":
+ jobs.append(await handleTaskException(parsed_route, task, message, session))
return jobs
@@ -218,30 +218,30 @@ async def handleMessage(message, taskDefinition=None):
# Specific handlers for each message type will add/remove information necessary
# for the type of task event..
def buildMessage(pushInfo, task, runId, payload):
- taskId = payload["status"]["taskId"]
- jobRun = payload["status"]["runs"][runId]
- treeherderConfig = task["extra"]["treeherder"]
+ task_id = payload["status"]["taskId"]
+ job_run = payload["status"]["runs"][runId]
+ treeherder_config = task["extra"]["treeherder"]
job = {
"buildSystem": "taskcluster",
"owner": task["metadata"]["owner"],
- "taskId": f"{slugid.decode(taskId)}/{runId}",
+ "taskId": f"{slugid.decode(task_id)}/{runId}",
"retryId": runId,
"isRetried": False,
"display": {
# jobSymbols could be an integer (i.e. Chunk ID) but need to be strings
# for treeherder
- "jobSymbol": str(treeherderConfig["symbol"]),
- "groupSymbol": treeherderConfig.get("groupSymbol", "?"),
+ "jobSymbol": str(treeherder_config["symbol"]),
+ "groupSymbol": treeherder_config.get("groupSymbol", "?"),
# Maximum job name length is 140 chars...
"jobName": task["metadata"]["name"][0:139],
},
- "state": stateFromRun(jobRun),
- "result": resultFromRun(jobRun),
- "tier": treeherderConfig.get("tier", 1),
+ "state": stateFromRun(job_run),
+ "result": resultFromRun(job_run),
+ "tier": treeherder_config.get("tier", 1),
"timeScheduled": task["created"],
- "jobKind": treeherderConfig.get("jobKind", "other"),
- "reason": treeherderConfig.get("reason", "scheduled"),
+ "jobKind": treeherder_config.get("jobKind", "other"),
+ "reason": treeherder_config.get("reason", "scheduled"),
"jobInfo": {
"links": [],
"summary": task["metadata"]["description"],
@@ -263,28 +263,28 @@ def buildMessage(pushInfo, task, runId, payload):
# Transform "collection" into an array of labels if task doesn't
# define "labels".
- labels = treeherderConfig.get("labels", [])
+ labels = treeherder_config.get("labels", [])
if not labels:
- if not treeherderConfig.get("collection"):
+ if not treeherder_config.get("collection"):
labels = ["opt"]
else:
- labels = list(treeherderConfig["collection"].keys())
+ labels = list(treeherder_config["collection"].keys())
job["labels"] = labels
- machine = treeherderConfig.get("machine", {})
+ machine = treeherder_config.get("machine", {})
job["buildMachine"] = {
- "name": jobRun.get("workerId", "unknown"),
+ "name": job_run.get("workerId", "unknown"),
"platform": machine.get("platform", task["workerType"]),
"os": machine.get("os", "-"),
"architecture": machine.get("architecture", "-"),
}
- if treeherderConfig.get("productName"):
- job["productName"] = treeherderConfig["productName"]
+ if treeherder_config.get("productName"):
+ job["productName"] = treeherder_config["productName"]
- if treeherderConfig.get("groupName"):
- job["display"]["groupName"] = treeherderConfig["groupName"]
+ if treeherder_config.get("groupName"):
+ job["display"]["groupName"] = treeherder_config["groupName"]
return job
@@ -318,13 +318,13 @@ def handleTaskRunning(pushInfo, task, message):
async def handleTaskCompleted(pushInfo, task, message, session):
payload = message["payload"]
- jobRun = payload["status"]["runs"][payload["runId"]]
+ job_run = payload["status"]["runs"][payload["runId"]]
job = buildMessage(pushInfo, task, payload["runId"], payload)
- job["timeStarted"] = jobRun["started"]
- job["timeCompleted"] = jobRun["resolved"]
+ job["timeStarted"] = job_run["started"]
+ job["timeCompleted"] = job_run["resolved"]
job["logs"] = [
- createLogReference(message["root_url"], payload["status"]["taskId"], jobRun["runId"]),
+ createLogReference(message["root_url"], payload["status"]["taskId"], job_run["runId"]),
]
job = await addArtifactUploadedLinks(
message["root_url"], payload["status"]["taskId"], payload["runId"], job, session
@@ -334,17 +334,17 @@ async def handleTaskCompleted(pushInfo, task, message, session):
async def handleTaskException(pushInfo, task, message, session):
payload = message["payload"]
- jobRun = payload["status"]["runs"][payload["runId"]]
+ job_run = payload["status"]["runs"][payload["runId"]]
# Do not report runs that were created as an exception. Such cases
# are deadline-exceeded
- if jobRun["reasonCreated"] == "exception":
+ if job_run["reasonCreated"] == "exception":
return
job = buildMessage(pushInfo, task, payload["runId"], payload)
# Jobs that get cancelled before running don't have a started time
- if jobRun.get("started"):
- job["timeStarted"] = jobRun["started"]
- job["timeCompleted"] = jobRun["resolved"]
+ if job_run.get("started"):
+ job["timeStarted"] = job_run["started"]
+ job["timeCompleted"] = job_run["resolved"]
# exceptions generally have no logs, so in the interest of not linking to a 404'ing artifact,
# don't include a link
job["logs"] = []
@@ -355,21 +355,21 @@ async def handleTaskException(pushInfo, task, message, session):
async def fetchArtifacts(root_url, taskId, runId, session):
- asyncQueue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
- res = await asyncQueue.listArtifacts(taskId, runId)
+ async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
+ res = await async_queue.listArtifacts(taskId, runId)
artifacts = res["artifacts"]
- continuationToken = res.get("continuationToken")
- while continuationToken is not None:
+ continuation_token = res.get("continuationToken")
+ while continuation_token is not None:
continuation = {"continuationToken": res["continuationToken"]}
try:
- res = await asyncQueue.listArtifacts(taskId, runId, continuation)
+ res = await async_queue.listArtifacts(taskId, runId, continuation)
except Exception:
break
artifacts = artifacts.concat(res["artifacts"])
- continuationToken = res.get("continuationToken")
+ continuation_token = res.get("continuationToken")
return artifacts
diff --git a/treeherder/etl/taskcluster_pulse/parse_route.py b/treeherder/etl/taskcluster_pulse/parse_route.py
index 49a95f2977e..b4c1a15da7d 100644
--- a/treeherder/etl/taskcluster_pulse/parse_route.py
+++ b/treeherder/etl/taskcluster_pulse/parse_route.py
@@ -14,28 +14,28 @@
def parseRoute(route):
id = None
owner = None
- parsedProject = None
- parsedRoute = route.split(".")
- project = parsedRoute[2]
+ parsed_project = None
+ parsed_route = route.split(".")
+ project = parsed_route[2]
if len(project.split("/")) == 2:
- [owner, parsedProject] = project.split("/")
+ [owner, parsed_project] = project.split("/")
else:
- parsedProject = project
+ parsed_project = project
- if len(parsedRoute) == 5:
- id = parsedRoute[4]
+ if len(parsed_route) == 5:
+ id = parsed_route[4]
- pushInfo = {
- "destination": parsedRoute[0],
+ push_info = {
+ "destination": parsed_route[0],
"id": int(id) if id else 0,
- "project": parsedProject,
- "revision": parsedRoute[3],
+ "project": parsed_project,
+ "revision": parsed_route[3],
}
- if owner and parsedProject:
- pushInfo["owner"] = owner
- pushInfo["origin"] = "github.com"
+ if owner and parsed_project:
+ push_info["owner"] = owner
+ push_info["origin"] = "github.com"
else:
- pushInfo["origin"] = "hg.mozilla.org"
+ push_info["origin"] = "hg.mozilla.org"
- return pushInfo
+ return push_info
diff --git a/treeherder/middleware.py b/treeherder/middleware.py
index 9dd983b7a35..320228fdf95 100644
--- a/treeherder/middleware.py
+++ b/treeherder/middleware.py
@@ -37,8 +37,8 @@ def add_headers_function(headers, path, url):
if report_uri not in CSP_DIRECTIVES:
CSP_DIRECTIVES.append(report_uri)
- CSP_HEADER = "; ".join(CSP_DIRECTIVES)
- headers["Content-Security-Policy"] = CSP_HEADER
+ csp_header = "; ".join(CSP_DIRECTIVES)
+ headers["Content-Security-Policy"] = csp_header
class CustomWhiteNoise(WhiteNoiseMiddleware):
diff --git a/treeherder/model/error_summary.py b/treeherder/model/error_summary.py
index 5c6278a05de..d174fda4531 100644
--- a/treeherder/model/error_summary.py
+++ b/treeherder/model/error_summary.py
@@ -48,8 +48,8 @@ def get_error_summary(job, queryset=None):
dates = list(line_cache.keys())
dates.sort()
for d in dates:
- dTime = datetime.datetime.strptime(d, "%Y-%m-%d")
- if dTime <= (job.submit_time - datetime.timedelta(days=LINE_CACHE_TIMEOUT_DAYS)):
+ date_time = datetime.datetime.strptime(d, "%Y-%m-%d")
+ if date_time <= (job.submit_time - datetime.timedelta(days=LINE_CACHE_TIMEOUT_DAYS)):
del line_cache[d]
else:
break
diff --git a/treeherder/perf/management/commands/create_test_perf_data.py b/treeherder/perf/management/commands/create_test_perf_data.py
index 79c0d2514d4..d4afbb7be68 100644
--- a/treeherder/perf/management/commands/create_test_perf_data.py
+++ b/treeherder/perf/management/commands/create_test_perf_data.py
@@ -28,7 +28,7 @@ def handle(self, *args, **options):
# verbose, so let's do that programmatically
s = PerformanceSignature.objects.get(id=1)
PerformanceDatum.objects.filter(signature=s).delete()
- INTERVAL = 30
+ interval = 30
now = time.time()
# create a push first as need a push_id
@@ -40,8 +40,8 @@ def handle(self, *args, **options):
)
for t, v in zip(
- [i for i in range(INTERVAL)],
- ([0.5 for i in range(int(INTERVAL / 2))] + [1.0 for i in range(int(INTERVAL / 2))]),
+ [i for i in range(interval)],
+ ([0.5 for i in range(int(interval / 2))] + [1.0 for i in range(int(interval / 2))]),
):
PerformanceDatum.objects.create(
repository=s.repository,
diff --git a/treeherder/push_health/compare.py b/treeherder/push_health/compare.py
index 31da22e8708..140b16f393a 100644
--- a/treeherder/push_health/compare.py
+++ b/treeherder/push_health/compare.py
@@ -10,14 +10,14 @@ def get_commit_history(repository, revision, push):
from mozci.push import Push as MozciPush
from mozci.errors import ParentPushNotFound
- mozciPush = MozciPush([revision], repository.name)
+ mozci_push = MozciPush([revision], repository.name)
parent = None
parent_sha = None
parent_repo = None
parent_push = None
try:
- parent = mozciPush.parent
+ parent = mozci_push.parent
except ParentPushNotFound:
pass
diff --git a/treeherder/push_health/tests.py b/treeherder/push_health/tests.py
index d7b11a4a597..a2b92ac04d6 100644
--- a/treeherder/push_health/tests.py
+++ b/treeherder/push_health/tests.py
@@ -73,7 +73,7 @@ def get_history(
# For each failure item in ``tests``, we group all jobs of the exact same type into
# a field called `jobs`. So it has passed and failed jobs in there.
#
-def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
+def get_current_test_failures(push, option_map, jobs, investigated_tests=None):
# Using .distinct() here would help by removing duplicate FailureLines
# for the same job (with different sub-tests), but it's only supported by
# postgres. Just using .distinct() has no effect.
@@ -107,19 +107,19 @@ def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
all_failed_jobs[job.id] = job
# The 't' ensures the key starts with a character, as required for a query selector
test_key = re.sub(r"\W+", "", f"t{test_name}{config}{platform}{job_name}{job_group}")
- isClassifiedIntermittent = any(
+ is_classified_intermittent = any(
job["failure_classification_id"] == 4 for job in jobs[job_name]
)
- isInvestigated = False
- investigatedTestId = None
- for investigatedTest in investigatedTests:
+ is_investigated = False
+ investigated_test_id = None
+ for investigated_test in investigated_tests:
if (
- investigatedTest.test == test_name
- and job.job_type.id == investigatedTest.job_type.id
+ investigated_test.test == test_name
+ and job.job_type.id == investigated_test.job_type.id
):
- isInvestigated = True
- investigatedTestId = investigatedTest.id
+ is_investigated = True
+ investigated_test_id = investigated_test.id
break
if test_key not in tests:
@@ -140,16 +140,16 @@ def get_current_test_failures(push, option_map, jobs, investigatedTests=None):
"totalFailures": 0,
"totalJobs": 0,
"failedInParent": False,
- "isClassifiedIntermittent": isClassifiedIntermittent,
- "isInvestigated": isInvestigated,
- "investigatedTestId": investigatedTestId,
+ "isClassifiedIntermittent": is_classified_intermittent,
+ "isInvestigated": is_investigated,
+ "investigatedTestId": investigated_test_id,
}
tests[test_key] = line
- countJobs = len(
+ count_jobs = len(
list(filter(lambda x: x["result"] in ["success", "testfailed"], jobs[job_name]))
)
tests[test_key]["totalFailures"] += 1
- tests[test_key]["totalJobs"] = countJobs
+ tests[test_key]["totalJobs"] = count_jobs
# Each line of the sorted list that is returned here represents one test file per platform/
# config. Each line will have at least one failing job, but may have several
@@ -232,13 +232,13 @@ def get_test_failures(
fixed_by_commit_history = get_history(
2, push_date, fixed_by_commit_history_days, option_map, repository_ids
)
- investigatedTests = InvestigatedTests.objects.filter(push=push)
+ investigated_tests = InvestigatedTests.objects.filter(push=push)
# ``push_failures`` are tests that have FailureLine records created by our Log Parser.
# These are tests we are able to show to examine to see if we can determine they are
# intermittent. If they are not, we tell the user they need investigation.
# These are failures ONLY for the current push, not relative to history.
- push_failures = get_current_test_failures(push, option_map, jobs, investigatedTests)
+ push_failures = get_current_test_failures(push, option_map, jobs, investigated_tests)
filtered_push_failures = [failure for failure in push_failures if filter_failure(failure)]
# Based on the intermittent and FixedByCommit history, set the appropriate classification
diff --git a/treeherder/webapp/api/bug_creation.py b/treeherder/webapp/api/bug_creation.py
index 55b9b12e63d..0fc78c07186 100644
--- a/treeherder/webapp/api/bug_creation.py
+++ b/treeherder/webapp/api/bug_creation.py
@@ -19,11 +19,11 @@ def filter_product_component(self, queryset):
# combinations can be in the failure line, it might not be a test and
# the real issue gets logged earlier but not detected as failure line.
# Require user input for the product and component to use.
- IGNORE_LIST_PRODUCT_COMPONENT = [
+ ignore_list_product_component = [
{product: "Testing", component: "Mochitest"},
]
for product_component in queryset:
- if product_component not in IGNORE_LIST_PRODUCT_COMPONENT:
+ if product_component not in ignore_list_product_component:
filtered_queryset.append(product_component)
return filtered_queryset[:5]
@@ -40,8 +40,8 @@ def get_queryset(self):
# Drop parameters
path = (path.split("?"))[0]
file = (path.split("/"))[-1]
- fileNameParts = file.split(".")
- file_without_extension = fileNameParts[0] + ("." if len(fileNameParts) > 1 else "")
+ file_name_parts = file.split(".")
+ file_without_extension = file_name_parts[0] + ("." if len(file_name_parts) > 1 else "")
queryset = (
FilesBugzillaMap.objects.select_related("bugzilla_component")
.filter(path__endswith=path)
diff --git a/treeherder/webapp/api/investigated_test.py b/treeherder/webapp/api/investigated_test.py
index 580750428e3..4f5d8e4ecaa 100644
--- a/treeherder/webapp/api/investigated_test.py
+++ b/treeherder/webapp/api/investigated_test.py
@@ -35,13 +35,13 @@ def create(self, request, *args, **kwargs):
project = kwargs["project"]
revision = request.query_params.get("revision")
test = request.data["test"]
- jobName = request.data["jobName"]
- jobSymbol = request.data["jobSymbol"]
+ job_name = request.data["jobName"]
+ job_symbol = request.data["jobSymbol"]
try:
repository = Repository.objects.get(name=project)
push = Push.objects.get(revision=revision, repository=repository)
- job_type = JobType.objects.get(name=jobName, symbol=jobSymbol)
+ job_type = JobType.objects.get(name=job_name, symbol=job_symbol)
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(push=push, job_type=job_type, test=test)
@@ -54,7 +54,7 @@ def create(self, request, *args, **kwargs):
return Response(f"No push with revision: {revision}", status=HTTP_404_NOT_FOUND)
except JobType.DoesNotExist:
- return Response(f"No JobType with job name: {jobName}", status=HTTP_404_NOT_FOUND)
+ return Response(f"No JobType with job name: {job_name}", status=HTTP_404_NOT_FOUND)
def destroy(self, request, project, pk=None):
try:
diff --git a/treeherder/webapp/api/jobs.py b/treeherder/webapp/api/jobs.py
index 6eac51608ab..ca6a7bdfb95 100644
--- a/treeherder/webapp/api/jobs.py
+++ b/treeherder/webapp/api/jobs.py
@@ -313,7 +313,7 @@ def list(self, request, project):
- count (10)
- return_type (dict)
"""
- MAX_JOBS_COUNT = 2000
+ max_jobs_count = 2000
filter_params = {}
@@ -348,8 +348,8 @@ def list(self, request, project):
return Response("Invalid value for offset or count", status=HTTP_400_BAD_REQUEST)
return_type = filter_params.get("return_type", "dict").lower()
- if count > MAX_JOBS_COUNT:
- msg = f"Specified count exceeds API MAX_JOBS_COUNT value: {MAX_JOBS_COUNT}"
+ if count > max_jobs_count:
+ msg = f"Specified count exceeds API MAX_JOBS_COUNT value: {max_jobs_count}"
return Response({"detail": msg}, status=HTTP_400_BAD_REQUEST)
try:
diff --git a/treeherder/webapp/api/push.py b/treeherder/webapp/api/push.py
index 212e14f006c..f5759e2af22 100644
--- a/treeherder/webapp/api/push.py
+++ b/treeherder/webapp/api/push.py
@@ -35,7 +35,7 @@ def list(self, request, project):
GET method for list of ``push`` records with revisions
"""
# What is the upper limit on the number of pushes returned by the api
- MAX_PUSH_COUNT = 1000
+ max_push_count = 1000
# make a mutable copy of these params
filter_params = request.query_params.copy()
@@ -167,8 +167,8 @@ def list(self, request, project):
except ValueError:
return Response({"detail": "Valid count value required"}, status=HTTP_400_BAD_REQUEST)
- if count > MAX_PUSH_COUNT:
- msg = f"Specified count exceeds api limit: {MAX_PUSH_COUNT}"
+ if count > max_push_count:
+ msg = f"Specified count exceeds api limit: {max_push_count}"
return Response({"detail": msg}, status=HTTP_400_BAD_REQUEST)
# we used to have a "full" parameter for this endpoint so you could
From fc15092541dec50b61ca1e4baff969ec604d34e3 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Mon, 26 Feb 2024 18:37:48 +0100
Subject: [PATCH 045/128] N803: argument name should be lowercase
---
pyproject.toml | 2 +-
tests/etl/test_job_loader.py | 4 +-
tests/etl/test_perf_data_load.py | 24 ++---
.../test_common_behaviour.py | 50 +++++------
treeherder/etl/jobs.py | 4 +-
treeherder/etl/management/commands/ingest.py | 12 +--
treeherder/etl/taskcluster_pulse/handler.py | 90 +++++++++----------
.../intermittents_commenter/commenter.py | 4 +-
treeherder/services/pulse/consumers.py | 4 +-
9 files changed, 97 insertions(+), 97 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index a03486f4355..a19edd67a6a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ select = [
# pyupgrade
"UP",
# pep-naming
- "N806",
+ "N806", "N803",
]
ignore = [
diff --git a/tests/etl/test_job_loader.py b/tests/etl/test_job_loader.py
index 7b94028a371..ac8a2526ff9 100644
--- a/tests/etl/test_job_loader.py
+++ b/tests/etl/test_job_loader.py
@@ -36,14 +36,14 @@ def transformed_pulse_jobs(sample_data, test_repository):
return jobs
-def mock_artifact(taskId, runId, artifactName):
+def mock_artifact(task_id, run_id, artifact_name):
# Mock artifact with empty body
base_url = (
"https://taskcluster.net/api/queue/v1/task/{taskId}/runs/{runId}/artifacts/{artifactName}"
)
responses.add(
responses.GET,
- base_url.format(taskId=taskId, runId=runId, artifactName=artifactName),
+ base_url.format(taskId=task_id, runId=run_id, artifactName=artifact_name),
body="",
content_type="text/plain",
status=200,
diff --git a/tests/etl/test_perf_data_load.py b/tests/etl/test_perf_data_load.py
index 3aa459f725f..512ebbd5153 100644
--- a/tests/etl/test_perf_data_load.py
+++ b/tests/etl/test_perf_data_load.py
@@ -311,15 +311,15 @@ def test_changing_extra_options_decouples_perf_signatures(
# Multi perf data (for the same job) ingestion workflow
-@pytest.mark.parametrize("PERFHERDER_ENABLE_MULTIDATA_INGESTION", [True, False])
+@pytest.mark.parametrize("perfherder_enable_multidata_ingestion", [True, False])
def test_multi_data_can_be_ingested_for_same_job_and_push(
- PERFHERDER_ENABLE_MULTIDATA_INGESTION,
+ perfherder_enable_multidata_ingestion,
test_repository,
perf_job,
sibling_perf_artifacts,
settings,
):
- settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = PERFHERDER_ENABLE_MULTIDATA_INGESTION
+ settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = perfherder_enable_multidata_ingestion
try:
for artifact in sibling_perf_artifacts:
@@ -330,11 +330,11 @@ def test_multi_data_can_be_ingested_for_same_job_and_push(
@pytest.mark.parametrize(
- "PERFHERDER_ENABLE_MULTIDATA_INGESTION, based_on_multidata_toggle",
+ "perfherder_enable_multidata_ingestion, based_on_multidata_toggle",
[(True, operator.truth), (False, operator.not_)],
)
def test_multi_data_ingest_workflow(
- PERFHERDER_ENABLE_MULTIDATA_INGESTION,
+ perfherder_enable_multidata_ingestion,
based_on_multidata_toggle,
test_repository,
perf_push,
@@ -347,7 +347,7 @@ def test_multi_data_ingest_workflow(
"""
Assumes the job has multiple PERFHERDER_DATA record in the same log
"""
- settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = PERFHERDER_ENABLE_MULTIDATA_INGESTION
+ settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = perfherder_enable_multidata_ingestion
def performance_datum_exists(**with_these_properties) -> bool:
return based_on_multidata_toggle(
@@ -398,15 +398,15 @@ def performance_datum_exists(**with_these_properties) -> bool:
)
-@pytest.mark.parametrize("PERFHERDER_ENABLE_MULTIDATA_INGESTION", [True, False])
+@pytest.mark.parametrize("perfherder_enable_multidata_ingestion", [True, False])
def test_hash_remains_unchanged_for_multi_data_ingestion_workflow(
- PERFHERDER_ENABLE_MULTIDATA_INGESTION,
+ perfherder_enable_multidata_ingestion,
test_repository,
perf_job,
sibling_perf_artifacts,
settings,
):
- settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = PERFHERDER_ENABLE_MULTIDATA_INGESTION
+ settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = perfherder_enable_multidata_ingestion
for artifact in sibling_perf_artifacts:
_, submit_datum = _prepare_test_data(artifact)
@@ -416,10 +416,10 @@ def test_hash_remains_unchanged_for_multi_data_ingestion_workflow(
@pytest.mark.parametrize(
- "PERFHERDER_ENABLE_MULTIDATA_INGESTION, operator_", [(True, operator.eq), (False, operator.ne)]
+ "perfherder_enable_multidata_ingestion, operator_", [(True, operator.eq), (False, operator.ne)]
)
def test_timestamp_can_be_updated_for_multi_data_ingestion_workflow(
- PERFHERDER_ENABLE_MULTIDATA_INGESTION,
+ perfherder_enable_multidata_ingestion,
operator_,
test_repository,
perf_job,
@@ -428,7 +428,7 @@ def test_timestamp_can_be_updated_for_multi_data_ingestion_workflow(
sibling_perf_artifacts,
settings,
):
- settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = PERFHERDER_ENABLE_MULTIDATA_INGESTION
+ settings.PERFHERDER_ENABLE_MULTIDATA_INGESTION = perfherder_enable_multidata_ingestion
for artifact in sibling_perf_artifacts:
_, submit_datum = _prepare_test_data(artifact)
diff --git a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
index e4cc9098c64..d38911ecc21 100644
--- a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
+++ b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
@@ -83,23 +83,23 @@ def test_formula_throws_adequate_error_for_bug(bad_structured_bug, formula, nonb
formula.has_cooled_down(bad_structured_bug)
-@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
-def test_formula_initializes_with_non_blockable_sessions(FormulaClass, nonblock_session):
+@pytest.mark.parametrize("formula_class", concrete_formula_classes())
+def test_formula_initializes_with_non_blockable_sessions(formula_class, nonblock_session):
try:
- _ = FormulaClass(nonblock_session)
+ _ = formula_class(nonblock_session)
except TypeError:
pytest.fail()
try:
- _ = FormulaClass()
+ _ = formula_class()
except TypeError:
pytest.fail()
-@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
-def test_formula_cannot_be_initialized_with_a_regular_session(FormulaClass, unrecommended_session):
+@pytest.mark.parametrize("formula_class", concrete_formula_classes())
+def test_formula_cannot_be_initialized_with_a_regular_session(formula_class, unrecommended_session):
with pytest.raises(TypeError):
- _ = FormulaClass(unrecommended_session)
+ _ = formula_class(unrecommended_session)
@pytest.mark.parametrize("formula", bugzilla_formula_instances())
@@ -111,9 +111,9 @@ def test_accessing_breakdown_without_prior_calculus_errors_out(formula, nonblock
# Leveraging HTTP VCR
-@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
-def test_formula_demands_at_least_framework_and_suite(FormulaClass, betamax_recorder):
- formula = FormulaClass(betamax_recorder.session)
+@pytest.mark.parametrize("formula_class", concrete_formula_classes())
+def test_formula_demands_at_least_framework_and_suite(formula_class, betamax_recorder):
+ formula = formula_class(betamax_recorder.session)
with pytest.raises(TypeError):
formula("some_framework")
@@ -128,9 +128,9 @@ def test_formula_demands_at_least_framework_and_suite(FormulaClass, betamax_reco
pytest.fail()
-@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
-def test_breakdown_updates_between_calculations(FormulaClass, betamax_recorder):
- formula = FormulaClass(betamax_recorder.session)
+@pytest.mark.parametrize("formula_class", concrete_formula_classes())
+def test_breakdown_updates_between_calculations(formula_class, betamax_recorder):
+ formula = formula_class(betamax_recorder.session)
test_moniker_a = ("build_metrics", "build times")
test_moniker_b = ("talos", "tp5n", "nonmain_startup_fileio")
@@ -149,9 +149,9 @@ def test_breakdown_updates_between_calculations(FormulaClass, betamax_recorder):
assert breakdown_a != breakdown_b
-@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
-def test_breakdown_resets_to_null_when_calculus_errors_out(FormulaClass, betamax_recorder):
- formula = FormulaClass(betamax_recorder.session)
+@pytest.mark.parametrize("formula_class", concrete_formula_classes())
+def test_breakdown_resets_to_null_when_calculus_errors_out(formula_class, betamax_recorder):
+ formula = formula_class(betamax_recorder.session)
test_moniker_a = ("build_metrics", "build times")
test_moniker_b = ("nonexistent_framework", "nonexistent_suite")
@@ -174,7 +174,7 @@ def test_breakdown_resets_to_null_when_calculus_errors_out(FormulaClass, betamax
_ = formula.breakdown()
-@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
+@pytest.mark.parametrize("formula_class", concrete_formula_classes())
@pytest.mark.parametrize(
"framework, suite, test",
[
@@ -185,9 +185,9 @@ def test_breakdown_resets_to_null_when_calculus_errors_out(FormulaClass, betamax
],
)
def test_formula_fetches_bugs_from_quantifying_period(
- framework, suite, test, FormulaClass, betamax_recorder
+ framework, suite, test, formula_class, betamax_recorder
):
- formula = FormulaClass(betamax_recorder.session)
+ formula = formula_class(betamax_recorder.session)
cassette = "-".join(filter(None, [framework, suite, test]))
with betamax_recorder.use_cassette(f"{cassette}", serialize_with="prettyjson"):
@@ -201,7 +201,7 @@ def test_formula_fetches_bugs_from_quantifying_period(
assert creation_time >= formula.oldest_timestamp
-@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
+@pytest.mark.parametrize("formula_class", concrete_formula_classes())
@pytest.mark.parametrize(
"framework, suite, test",
[
@@ -212,9 +212,9 @@ def test_formula_fetches_bugs_from_quantifying_period(
],
)
def test_formula_filters_out_bugs_that_didnt_cool_down_yet(
- framework, suite, test, FormulaClass, betamax_recorder
+ framework, suite, test, formula_class, betamax_recorder
):
- formula = FormulaClass(betamax_recorder.session)
+ formula = formula_class(betamax_recorder.session)
cassette = "-".join(filter(None, [framework, suite, test]))
with betamax_recorder.use_cassette(f"{cassette}", serialize_with="prettyjson"):
@@ -226,9 +226,9 @@ def test_formula_filters_out_bugs_that_didnt_cool_down_yet(
assert formula.has_cooled_down(bug)
-@pytest.mark.parametrize("FormulaClass", concrete_formula_classes())
-def test_formula_errors_up_when_no_bugs_were_filed(FormulaClass, betamax_recorder):
- formula = FormulaClass(betamax_recorder.session)
+@pytest.mark.parametrize("formula_class", concrete_formula_classes())
+def test_formula_errors_up_when_no_bugs_were_filed(formula_class, betamax_recorder):
+ formula = formula_class(betamax_recorder.session)
nonexistent_framework = "nonexistent_framework"
nonexistent_suite = "nonexistent_suite"
diff --git a/treeherder/etl/jobs.py b/treeherder/etl/jobs.py
index 9ca1732c8f1..68268e1b4a2 100644
--- a/treeherder/etl/jobs.py
+++ b/treeherder/etl/jobs.py
@@ -362,7 +362,7 @@ def _schedule_log_parsing(job, job_logs, result, repository):
parse_logs.apply_async(queue=queue, args=[job.id, [job_log.id], priority])
-def store_job_data(repository, originalData):
+def store_job_data(repository, original_data):
"""
Store job data instances into jobs db
@@ -412,7 +412,7 @@ def store_job_data(repository, originalData):
]
"""
- data = copy.deepcopy(originalData)
+ data = copy.deepcopy(original_data)
# Ensure that we have job data to process
if not data:
return
diff --git a/treeherder/etl/management/commands/ingest.py b/treeherder/etl/management/commands/ingest.py
index 78a77f474c4..9bbb7cda143 100644
--- a/treeherder/etl/management/commands/ingest.py
+++ b/treeherder/etl/management/commands/ingest.py
@@ -112,14 +112,14 @@ def _ingest_hg_push(project, revision, fetch_push_id=None):
process.run(pushlog_url, project, changeset=revision, last_push_id=fetch_push_id)
-async def ingest_task(taskId, root_url):
+async def ingest_task(task_id, root_url):
# Limiting the connection pool just in case we have too many
conn = aiohttp.TCPConnector(limit=10)
# Remove default timeout limit of 5 minutes
timeout = aiohttp.ClientTimeout(total=0)
async with taskcluster.aio.createSession(connector=conn, timeout=timeout) as session:
async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
- results = await asyncio.gather(async_queue.status(taskId), async_queue.task(taskId))
+ results = await asyncio.gather(async_queue.status(task_id), async_queue.task(task_id))
await handleTask(
{
"status": results[0]["status"],
@@ -160,7 +160,7 @@ async def handleTask(task, root_url):
await await_futures(job_futures)
-async def fetchGroupTasks(taskGroupId, root_url):
+async def fetchGroupTasks(task_group_id, root_url):
tasks = []
query = {}
continuation_token = ""
@@ -173,7 +173,7 @@ async def fetchGroupTasks(taskGroupId, root_url):
while True:
if continuation_token:
query = {"continuationToken": continuation_token}
- response = await async_queue.listTaskGroup(taskGroupId, query=query)
+ response = await async_queue.listTaskGroup(task_group_id, query=query)
tasks.extend(response["tasks"])
continuation_token = response.get("continuationToken")
if continuation_token is None:
@@ -182,9 +182,9 @@ async def fetchGroupTasks(taskGroupId, root_url):
return tasks
-async def processTasks(taskGroupId, root_url):
+async def processTasks(task_group_id, root_url):
try:
- tasks = await fetchGroupTasks(taskGroupId, root_url)
+ tasks = await fetchGroupTasks(task_group_id, root_url)
logger.info("We have %s tasks to process", len(tasks))
except Exception as e:
logger.exception(e)
diff --git a/treeherder/etl/taskcluster_pulse/handler.py b/treeherder/etl/taskcluster_pulse/handler.py
index 0b931a7304d..ae88fb998c0 100644
--- a/treeherder/etl/taskcluster_pulse/handler.py
+++ b/treeherder/etl/taskcluster_pulse/handler.py
@@ -33,20 +33,20 @@ class PulseHandlerError(Exception):
pass
-def stateFromRun(jobRun):
- return "completed" if jobRun["state"] in ("exception", "failed") else jobRun["state"]
+def stateFromRun(job_run):
+ return "completed" if job_run["state"] in ("exception", "failed") else job_run["state"]
-def resultFromRun(jobRun):
+def resultFromRun(job_run):
run_to_result = {
"completed": "success",
"failed": "fail",
}
- state = jobRun["state"]
+ state = job_run["state"]
if state in list(run_to_result.keys()):
return run_to_result[state]
elif state == "exception":
- reason_resolved = jobRun.get("reasonResolved")
+ reason_resolved = job_run.get("reasonResolved")
if reason_resolved in ["canceled", "superseded"]:
return reason_resolved
return "exception"
@@ -56,10 +56,10 @@ def resultFromRun(jobRun):
# Creates a log entry for Treeherder to retrieve and parse. This log is
# displayed on the Treeherder Log Viewer once parsed.
-def createLogReference(root_url, taskId, runId):
+def createLogReference(root_url, task_id, run_id):
log_url = taskcluster_urls.api(
root_url, "queue", "v1", "task/{taskId}/runs/{runId}/artifacts/public/logs/live_backing.log"
- ).format(taskId=taskId, runId=runId)
+ ).format(taskId=task_id, runId=run_id)
return {
"name": "live_backing_log",
"url": log_url,
@@ -70,14 +70,14 @@ def createLogReference(root_url, taskId, runId):
# the route is parsed into distinct parts used for constructing the
# Treeherder job message.
# TODO: Refactor https://bugzilla.mozilla.org/show_bug.cgi?id=1560596
-def parseRouteInfo(prefix, taskId, routes, task):
+def parseRouteInfo(prefix, task_id, routes, task):
matching_routes = list(filter(lambda route: route.split(".")[0] == "tc-treeherder", routes))
if len(matching_routes) != 1:
raise PulseHandlerError(
"Could not determine Treeherder route. Either there is no route, "
+ "or more than one matching route exists."
- + f"Task ID: {taskId} Routes: {routes}"
+ + f"Task ID: {task_id} Routes: {routes}"
)
parsed_route = parseRoute(matching_routes[0])
@@ -98,12 +98,12 @@ def validateTask(task):
return True
-def ignore_task(task, taskId, rootUrl, project):
+def ignore_task(task, task_id, root_url, project):
ignore = False
# This logic is useful to reduce the number of tasks we ingest and requirying
# less dynos and less database writes. You can adjust PROJECTS_TO_INGEST on the app to meet your needs
if projectsToIngest and project not in projectsToIngest.split(","):
- logger.debug("Ignoring tasks not matching PROJECTS_TO_INGEST (Task id: %s)", taskId)
+ logger.debug("Ignoring tasks not matching PROJECTS_TO_INGEST (Task id: %s)", task_id)
return True
mobile_repos = (
@@ -122,7 +122,7 @@ def ignore_task(task, taskId, rootUrl, project):
# Ignore tasks that are associated to a pull request
if envs["MOBILE_BASE_REPOSITORY"] != envs["MOBILE_HEAD_REPOSITORY"]:
logger.debug(
- "Task: %s belong to a pull request OR branch which we ignore.", taskId
+ "Task: %s belong to a pull request OR branch which we ignore.", task_id
)
ignore = True
# Bug 1587542 - Temporary change to ignore Github tasks not associated to 'master'
@@ -132,13 +132,13 @@ def ignore_task(task, taskId, rootUrl, project):
"refs/heads/main",
"main",
):
- logger.info("Task: %s is not for the `master` branch.", taskId)
+ logger.info("Task: %s is not for the `master` branch.", task_id)
ignore = True
except KeyError:
pass
else:
# The decision task is the ultimate source for determining this information
- queue = taskcluster.Queue({"rootUrl": rootUrl})
+ queue = taskcluster.Queue({"rootUrl": root_url})
decision_task = queue.task(task["taskGroupId"])
scopes = decision_task["metadata"].get("source")
ignore = True
@@ -156,7 +156,7 @@ def ignore_task(task, taskId, rootUrl, project):
break
if ignore:
- logger.debug(f"Task to be ignored ({taskId})")
+ logger.debug(f"Task to be ignored ({task_id})")
return ignore
@@ -166,12 +166,12 @@ def ignore_task(task, taskId, rootUrl, project):
# Only messages that contain the properly formatted routing key and contains
# treeherder job information in task.extra.treeherder are accepted
# This will generate a list of messages that need to be ingested by Treeherder
-async def handleMessage(message, taskDefinition=None):
+async def handleMessage(message, task_definition=None):
async with taskcluster.aio.createSession() as session:
jobs = []
task_id = message["payload"]["status"]["taskId"]
async_queue = taskcluster.aio.Queue({"rootUrl": message["root_url"]}, session=session)
- task = (await async_queue.task(task_id)) if not taskDefinition else taskDefinition
+ task = (await async_queue.task(task_id)) if not task_definition else task_definition
try:
parsed_route = parseRouteInfo("tc-treeherder", task_id, task["routes"], task)
@@ -217,16 +217,16 @@ async def handleMessage(message, taskDefinition=None):
#
# Specific handlers for each message type will add/remove information necessary
# for the type of task event..
-def buildMessage(pushInfo, task, runId, payload):
+def buildMessage(push_info, task, run_id, payload):
task_id = payload["status"]["taskId"]
- job_run = payload["status"]["runs"][runId]
+ job_run = payload["status"]["runs"][run_id]
treeherder_config = task["extra"]["treeherder"]
job = {
"buildSystem": "taskcluster",
"owner": task["metadata"]["owner"],
- "taskId": f"{slugid.decode(task_id)}/{runId}",
- "retryId": runId,
+ "taskId": f"{slugid.decode(task_id)}/{run_id}",
+ "retryId": run_id,
"isRetried": False,
"display": {
# jobSymbols could be an integer (i.e. Chunk ID) but need to be strings
@@ -250,16 +250,16 @@ def buildMessage(pushInfo, task, runId, payload):
}
job["origin"] = {
- "kind": pushInfo["origin"],
- "project": pushInfo["project"],
- "revision": pushInfo["revision"],
+ "kind": push_info["origin"],
+ "project": push_info["project"],
+ "revision": push_info["revision"],
}
- if pushInfo["origin"] == "hg.mozilla.org":
- job["origin"]["pushLogID"] = pushInfo["id"]
+ if push_info["origin"] == "hg.mozilla.org":
+ job["origin"]["pushLogID"] = push_info["id"]
else:
- job["origin"]["pullRequestID"] = pushInfo["id"]
- job["origin"]["owner"] = pushInfo["owner"]
+ job["origin"]["pullRequestID"] = push_info["id"]
+ job["origin"]["owner"] = push_info["owner"]
# Transform "collection" into an array of labels if task doesn't
# define "labels".
@@ -289,14 +289,14 @@ def buildMessage(pushInfo, task, runId, payload):
return job
-def handleTaskPending(pushInfo, task, message):
+def handleTaskPending(push_info, task, message):
payload = message["payload"]
- return buildMessage(pushInfo, task, payload["runId"], payload)
+ return buildMessage(push_info, task, payload["runId"], payload)
-async def handleTaskRerun(pushInfo, task, message, session):
+async def handleTaskRerun(push_info, task, message, session):
payload = message["payload"]
- job = buildMessage(pushInfo, task, payload["runId"] - 1, payload)
+ job = buildMessage(push_info, task, payload["runId"] - 1, payload)
job["state"] = "completed"
job["result"] = "fail"
job["isRetried"] = True
@@ -309,17 +309,17 @@ async def handleTaskRerun(pushInfo, task, message, session):
return job
-def handleTaskRunning(pushInfo, task, message):
+def handleTaskRunning(push_info, task, message):
payload = message["payload"]
- job = buildMessage(pushInfo, task, payload["runId"], payload)
+ job = buildMessage(push_info, task, payload["runId"], payload)
job["timeStarted"] = payload["status"]["runs"][payload["runId"]]["started"]
return job
-async def handleTaskCompleted(pushInfo, task, message, session):
+async def handleTaskCompleted(push_info, task, message, session):
payload = message["payload"]
job_run = payload["status"]["runs"][payload["runId"]]
- job = buildMessage(pushInfo, task, payload["runId"], payload)
+ job = buildMessage(push_info, task, payload["runId"], payload)
job["timeStarted"] = job_run["started"]
job["timeCompleted"] = job_run["resolved"]
@@ -332,7 +332,7 @@ async def handleTaskCompleted(pushInfo, task, message, session):
return job
-async def handleTaskException(pushInfo, task, message, session):
+async def handleTaskException(push_info, task, message, session):
payload = message["payload"]
job_run = payload["status"]["runs"][payload["runId"]]
# Do not report runs that were created as an exception. Such cases
@@ -340,7 +340,7 @@ async def handleTaskException(pushInfo, task, message, session):
if job_run["reasonCreated"] == "exception":
return
- job = buildMessage(pushInfo, task, payload["runId"], payload)
+ job = buildMessage(push_info, task, payload["runId"], payload)
# Jobs that get cancelled before running don't have a started time
if job_run.get("started"):
job["timeStarted"] = job_run["started"]
@@ -354,9 +354,9 @@ async def handleTaskException(pushInfo, task, message, session):
return job
-async def fetchArtifacts(root_url, taskId, runId, session):
+async def fetchArtifacts(root_url, task_id, run_id, session):
async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
- res = await async_queue.listArtifacts(taskId, runId)
+ res = await async_queue.listArtifacts(task_id, run_id)
artifacts = res["artifacts"]
continuation_token = res.get("continuationToken")
@@ -364,7 +364,7 @@ async def fetchArtifacts(root_url, taskId, runId, session):
continuation = {"continuationToken": res["continuationToken"]}
try:
- res = await async_queue.listArtifacts(taskId, runId, continuation)
+ res = await async_queue.listArtifacts(task_id, run_id, continuation)
except Exception:
break
@@ -378,12 +378,12 @@ async def fetchArtifacts(root_url, taskId, runId, session):
# fetch them in order to determine if there is an error_summary log;
# TODO refactor this when there is a way to only retrieve the error_summary
# artifact: https://bugzilla.mozilla.org/show_bug.cgi?id=1629716
-async def addArtifactUploadedLinks(root_url, taskId, runId, job, session):
+async def addArtifactUploadedLinks(root_url, task_id, run_id, job, session):
artifacts = []
try:
- artifacts = await fetchArtifacts(root_url, taskId, runId, session)
+ artifacts = await fetchArtifacts(root_url, task_id, run_id, session)
except Exception:
- logger.debug("Artifacts could not be found for task: %s run: %s", taskId, runId)
+ logger.debug("Artifacts could not be found for task: %s run: %s", task_id, run_id)
return job
seen = {}
@@ -408,7 +408,7 @@ async def addArtifactUploadedLinks(root_url, taskId, runId, job, session):
"queue",
"v1",
"task/{taskId}/runs/{runId}/artifacts/{artifact_name}".format(
- taskId=taskId, runId=runId, artifact_name=artifact["name"]
+ taskId=task_id, runId=run_id, artifact_name=artifact["name"]
),
),
}
diff --git a/treeherder/intermittents_commenter/commenter.py b/treeherder/intermittents_commenter/commenter.py
index 5a1e9aacb65..fc26d47c913 100644
--- a/treeherder/intermittents_commenter/commenter.py
+++ b/treeherder/intermittents_commenter/commenter.py
@@ -181,14 +181,14 @@ def open_file(self, filename, load):
else:
return myfile.read()
- def calculate_date_strings(self, mode, numDays):
+ def calculate_date_strings(self, mode, num_days):
"""Returns a tuple of start (in YYYY-MM-DD format) and end date
strings (in YYYY-MM-DD HH:MM:SS format for an inclusive day)."""
yesterday = date.today() - timedelta(days=1)
endday = datetime(yesterday.year, yesterday.month, yesterday.day, 23, 59, 59, 999)
if mode:
- startday = yesterday - timedelta(days=numDays)
+ startday = yesterday - timedelta(days=num_days)
else:
# daily mode
startday = yesterday
diff --git a/treeherder/services/pulse/consumers.py b/treeherder/services/pulse/consumers.py
index ff3b2a02d1b..04037314380 100644
--- a/treeherder/services/pulse/consumers.py
+++ b/treeherder/services/pulse/consumers.py
@@ -64,8 +64,8 @@ def __init__(self, source, build_routing_key):
self.source = source
self.build_routing_key = build_routing_key
- def get_consumers(self, Consumer, channel):
- return [Consumer(**c) for c in self.consumers]
+ def get_consumers(self, consumer, channel):
+ return [consumer(**c) for c in self.consumers]
def bindings(self):
"""Get the bindings for this consumer, each of the form `.`,
From 50b2a303127cd1ab3f20661527336b961b409ab4 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Tue, 27 Feb 2024 11:59:34 +0100
Subject: [PATCH 046/128] N801: Class name should use CapWords convention
---
pyproject.toml | 2 +-
tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py | 4 ++--
treeherder/workers/task.py | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index a19edd67a6a..773fb71a3ee 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ select = [
# pyupgrade
"UP",
# pep-naming
- "N806", "N803",
+ "N806", "N803", "N801",
]
ignore = [
diff --git a/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py b/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
index 6b49ff0d4ff..943aaaa581a 100644
--- a/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
+++ b/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
@@ -112,7 +112,7 @@
res.ready = MagicMock(return_value=True)
-class eventually_ready:
+class EventuallyReady:
def __init__(self, start_time: float, ready_after: float):
print(f"start_time: {start_time}")
self.start_time = start_time
@@ -128,7 +128,7 @@ def __call__(self):
with freeze_time(CASSETTES_RECORDING_DATE) as frozentime:
for res in EVENTUALLY_READY_RESULTS:
res.ready = MagicMock(
- side_effect=eventually_ready(time.time(), 4 * 60 + 59)
+ side_effect=EventuallyReady(time.time(), 4 * 60 + 59)
) # ready just before timeout
diff --git a/treeherder/workers/task.py b/treeherder/workers/task.py
index 2573dfa320d..77454e1e7b8 100644
--- a/treeherder/workers/task.py
+++ b/treeherder/workers/task.py
@@ -10,7 +10,7 @@
from treeherder.etl.exceptions import MissingPushException
-class retryable_task:
+class retryable_task: # noqa: N801
"""Wrapper around a celery task to add conditional task retrying."""
NON_RETRYABLE_EXCEPTIONS = (
From c002a310b51817991e5ab661abb92c5020625cac Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Tue, 27 Feb 2024 18:58:25 +0100
Subject: [PATCH 047/128] N815: Variable in class scope should not be mixedCase
---
pyproject.toml | 2 +-
treeherder/webapp/api/investigated_test.py | 4 ++--
treeherder/webapp/api/performance_serializers.py | 4 ++--
treeherder/webapp/api/serializers.py | 6 +++---
4 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 773fb71a3ee..e8b1090d847 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ select = [
# pyupgrade
"UP",
# pep-naming
- "N806", "N803", "N801",
+ "N806", "N803", "N801", "N815"
]
ignore = [
diff --git a/treeherder/webapp/api/investigated_test.py b/treeherder/webapp/api/investigated_test.py
index 4f5d8e4ecaa..d1cd6f78139 100644
--- a/treeherder/webapp/api/investigated_test.py
+++ b/treeherder/webapp/api/investigated_test.py
@@ -35,8 +35,8 @@ def create(self, request, *args, **kwargs):
project = kwargs["project"]
revision = request.query_params.get("revision")
test = request.data["test"]
- job_name = request.data["jobName"]
- job_symbol = request.data["jobSymbol"]
+ job_name = request.data["job_name"]
+ job_symbol = request.data["job_symbol"]
try:
repository = Repository.objects.get(name=project)
diff --git a/treeherder/webapp/api/performance_serializers.py b/treeherder/webapp/api/performance_serializers.py
index 5e56e1601eb..d8aa902d801 100644
--- a/treeherder/webapp/api/performance_serializers.py
+++ b/treeherder/webapp/api/performance_serializers.py
@@ -349,11 +349,11 @@ class Meta:
class IssueTrackerSerializer(serializers.ModelSerializer):
text = serializers.CharField(read_only=True, source="name")
- issueTrackerUrl = serializers.URLField(read_only=True, source="task_base_url")
+ issue_tracker_url = serializers.URLField(read_only=True, source="task_base_url")
class Meta:
model = IssueTracker
- fields = ["id", "text", "issueTrackerUrl"]
+ fields = ["id", "text", "issue_tracker_url"]
class PerformanceQueryParamsSerializer(serializers.Serializer):
diff --git a/treeherder/webapp/api/serializers.py b/treeherder/webapp/api/serializers.py
index cc1e43fd371..9ce3c4142e9 100644
--- a/treeherder/webapp/api/serializers.py
+++ b/treeherder/webapp/api/serializers.py
@@ -435,9 +435,9 @@ class Meta:
class InvestigatedTestsSerializers(serializers.ModelSerializer):
- jobName = serializers.CharField(source="job_type.name")
- jobSymbol = serializers.CharField(source="job_type.symbol")
+ job_name = serializers.CharField(source="job_type.name")
+ job_symbol = serializers.CharField(source="job_type.symbol")
class Meta:
model = models.InvestigatedTests
- fields = ("id", "test", "jobName", "jobSymbol")
+ fields = ("id", "test", "job_name", "job_symbol")
From 2b2d57cdb019562ee618718b44e54f974fe41571 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Tue, 27 Feb 2024 19:01:18 +0100
Subject: [PATCH 048/128] N811: Constant imported as non-constant
---
pyproject.toml | 2 +-
tests/webapp/api/test_auth.py | 12 ++++++------
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index e8b1090d847..984a1c9c7c4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ select = [
# pyupgrade
"UP",
# pep-naming
- "N806", "N803", "N801", "N815"
+ "N806", "N803", "N801", "N815", "N811",
]
ignore = [
diff --git a/tests/webapp/api/test_auth.py b/tests/webapp/api/test_auth.py
index f598e6d330a..8b9383de6da 100644
--- a/tests/webapp/api/test_auth.py
+++ b/tests/webapp/api/test_auth.py
@@ -1,7 +1,7 @@
import time
import pytest
-from django.contrib.auth import SESSION_KEY as auth_session_key
+from django.contrib.auth import SESSION_KEY
from django.urls import reverse
from rest_framework import status
from rest_framework.decorators import APIView
@@ -73,7 +73,7 @@ def userinfo_mock(*args, **kwargs):
monkeypatch.setattr(AuthBackend, "_get_user_info", userinfo_mock)
- assert auth_session_key not in client.session
+ assert SESSION_KEY not in client.session
assert User.objects.count() == 0
# The first time someone logs in a new user should be created,
@@ -92,12 +92,12 @@ def userinfo_mock(*args, **kwargs):
"is_staff": False,
"is_superuser": False,
}
- assert auth_session_key in client.session
+ assert SESSION_KEY in client.session
# Uses a tolerance of up to 5 seconds to account for rounding/the time the test takes to run.
assert client.session.get_expiry_age() == pytest.approx(one_hour_in_seconds, abs=5)
assert User.objects.count() == 1
- session_user_id = int(client.session[auth_session_key])
+ session_user_id = int(client.session[SESSION_KEY])
user = User.objects.get(id=session_user_id)
assert user.username == expected_username
assert user.email == id_token_email
@@ -106,7 +106,7 @@ def userinfo_mock(*args, **kwargs):
resp = client.get(reverse("auth-logout"))
assert resp.status_code == 200
- assert auth_session_key not in client.session
+ assert SESSION_KEY not in client.session
# Logging in again should associate the existing user with the Django session.
@@ -118,7 +118,7 @@ def userinfo_mock(*args, **kwargs):
)
assert resp.status_code == 200
assert resp.json()["username"] == expected_username
- assert auth_session_key in client.session
+ assert SESSION_KEY in client.session
assert client.session.get_expiry_age() == pytest.approx(one_hour_in_seconds, abs=5)
assert User.objects.count() == 1
From 703397102ee10fb96987a0ee555ccc9739274a80 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Tue, 27 Feb 2024 19:10:06 +0100
Subject: [PATCH 049/128] N818: Exception name should be named with an Error
suffix
---
pyproject.toml | 2 +-
tests/log_parser/test_artifact_builder_collection.py | 4 ++--
tests/log_parser/test_performance_parser.py | 4 ++--
tests/model/cycle_data/test_perfherder_cycling.py | 6 +++---
tests/perf/auto_perf_sheriffing/test_backfill_tool.py | 6 +++---
.../test_report_backfill_outcome.py | 4 ++--
tests/perf/auto_perf_sheriffing/test_sherlock.py | 8 ++++----
.../auto_sheriffing_criteria/test_common_behaviour.py | 6 +++---
.../auto_sheriffing_criteria/test_criteria_tracker.py | 4 ++--
tests/test_worker/test_pulse_tasks.py | 4 ++--
treeherder/etl/exceptions.py | 4 ++--
treeherder/etl/job_loader.py | 4 ++--
treeherder/etl/management/commands/ingest.py | 4 ++--
treeherder/etl/pushlog.py | 4 ++--
treeherder/log_parser/artifactbuildercollection.py | 8 ++++----
treeherder/log_parser/parsers.py | 4 ++--
treeherder/log_parser/tasks.py | 4 ++--
treeherder/model/data_cycling/cyclers.py | 8 ++++----
treeherder/model/data_cycling/max_runtime.py | 4 ++--
.../perf/auto_perf_sheriffing/backfill_reports.py | 6 +++---
treeherder/perf/auto_perf_sheriffing/backfill_tool.py | 4 ++--
treeherder/perf/auto_perf_sheriffing/sherlock.py | 6 +++---
treeherder/perf/exceptions.py | 10 +++++-----
treeherder/perf/management/commands/perf_sheriff.py | 4 ++--
treeherder/perf/management/commands/remove_vcs_data.py | 4 ++--
.../perf/sheriffing_criteria/bugzilla_formulas.py | 4 ++--
.../perf/sheriffing_criteria/criteria_tracking.py | 6 +++---
treeherder/workers/task.py | 4 ++--
28 files changed, 70 insertions(+), 70 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 984a1c9c7c4..1a93a7ed65a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ select = [
# pyupgrade
"UP",
# pep-naming
- "N806", "N803", "N801", "N815", "N811",
+ "N806", "N803", "N801", "N815", "N811", "N818",
]
ignore = [
diff --git a/tests/log_parser/test_artifact_builder_collection.py b/tests/log_parser/test_artifact_builder_collection.py
index b57fdea2d06..2e268b14d6d 100644
--- a/tests/log_parser/test_artifact_builder_collection.py
+++ b/tests/log_parser/test_artifact_builder_collection.py
@@ -5,7 +5,7 @@
from treeherder.log_parser.artifactbuildercollection import (
MAX_DOWNLOAD_SIZE_IN_BYTES,
ArtifactBuilderCollection,
- LogSizeException,
+ LogSizeError,
)
from treeherder.log_parser.artifactbuilders import LogViewerArtifactBuilder
@@ -69,5 +69,5 @@ def test_log_download_size_limit():
)
lpc = ArtifactBuilderCollection(url)
- with pytest.raises(LogSizeException):
+ with pytest.raises(LogSizeError):
lpc.parse()
diff --git a/tests/log_parser/test_performance_parser.py b/tests/log_parser/test_performance_parser.py
index 1c36e142a96..a121b1e3767 100644
--- a/tests/log_parser/test_performance_parser.py
+++ b/tests/log_parser/test_performance_parser.py
@@ -1,6 +1,6 @@
import json
-from treeherder.log_parser.parsers import EmptyPerformanceData, PerformanceParser
+from treeherder.log_parser.parsers import EmptyPerformanceDataError, PerformanceParser
def test_performance_log_parsing_malformed_perfherder_data():
@@ -15,7 +15,7 @@ def test_performance_log_parsing_malformed_perfherder_data():
try:
# Empty performance data
parser.parse_line("PERFHERDER_DATA: {}", 2)
- except EmptyPerformanceData:
+ except EmptyPerformanceDataError:
pass
valid_perfherder_data = {
diff --git a/tests/model/cycle_data/test_perfherder_cycling.py b/tests/model/cycle_data/test_perfherder_cycling.py
index 23f5fbfb25c..68546ea267f 100644
--- a/tests/model/cycle_data/test_perfherder_cycling.py
+++ b/tests/model/cycle_data/test_perfherder_cycling.py
@@ -18,7 +18,7 @@
StalledDataRemoval,
)
from treeherder.model.models import Push
-from treeherder.perf.exceptions import MaxRuntimeExceeded
+from treeherder.perf.exceptions import MaxRuntimeExceededError
from treeherder.perf.models import (
PerformanceDatum,
PerformanceDatumReplicate,
@@ -401,7 +401,7 @@ def test_performance_cycler_quit_indicator(taskcluster_notify_mock):
two_seconds_ago = datetime.now() - timedelta(seconds=2)
five_minutes = timedelta(minutes=5)
- with pytest.raises(MaxRuntimeExceeded):
+ with pytest.raises(MaxRuntimeExceededError):
PerfherderCycler(chunk_size=100, sleep_time=0)
max_runtime = MaxRuntime(max_runtime=one_second)
@@ -413,7 +413,7 @@ def test_performance_cycler_quit_indicator(taskcluster_notify_mock):
max_runtime = MaxRuntime(max_runtime=five_minutes)
max_runtime.started_at = two_seconds_ago
max_runtime.quit_on_timeout()
- except MaxRuntimeExceeded:
+ except MaxRuntimeExceededError:
pytest.fail("Performance cycling shouldn't have timed out")
diff --git a/tests/perf/auto_perf_sheriffing/test_backfill_tool.py b/tests/perf/auto_perf_sheriffing/test_backfill_tool.py
index 89d60c8c4ee..364196f2f3c 100644
--- a/tests/perf/auto_perf_sheriffing/test_backfill_tool.py
+++ b/tests/perf/auto_perf_sheriffing/test_backfill_tool.py
@@ -1,7 +1,7 @@
import pytest
from treeherder.perf.auto_perf_sheriffing.backfill_tool import BackfillTool
-from treeherder.perf.exceptions import CannotBackfill
+from treeherder.perf.exceptions import CannotBackfillError
from treeherder.services.taskcluster import TaskclusterModelNullObject
@@ -18,11 +18,11 @@ def test_backfilling_missing_job_errors_out(self, db):
def test_backfilling_job_from_try_repo_by_id_raises_exception(self, job_from_try):
backfill_tool = BackfillTool(TaskclusterModelNullObject(*self.FAKE_OPTIONS))
- with pytest.raises(CannotBackfill):
+ with pytest.raises(CannotBackfillError):
backfill_tool.backfill_job(job_from_try.id)
def test_backfilling_job_from_try_repo_raises_exception(self, job_from_try):
backfill_tool = BackfillTool(TaskclusterModelNullObject(*self.FAKE_OPTIONS))
- with pytest.raises(CannotBackfill):
+ with pytest.raises(CannotBackfillError):
backfill_tool.backfill_job(job_from_try)
diff --git a/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py b/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py
index b8eeec56807..b6de40cc3c8 100644
--- a/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py
+++ b/tests/perf/auto_perf_sheriffing/test_report_backfill_outcome.py
@@ -6,7 +6,7 @@
from treeherder.perf.auto_perf_sheriffing.sherlock import Sherlock
from treeherder.perf.models import BackfillNotificationRecord
-from treeherder.perf.exceptions import MaxRuntimeExceeded
+from treeherder.perf.exceptions import MaxRuntimeExceededError
EPOCH = datetime.utcfromtimestamp(0)
@@ -78,7 +78,7 @@ def test_no_email_is_sent_if_runtime_exceeded(
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary, no_time_left)
try:
sherlock.sheriff(since=EPOCH, frameworks=["raptor", "talos"], repositories=["autoland"])
- except MaxRuntimeExceeded:
+ except MaxRuntimeExceededError:
pass
assert BackfillNotificationRecord.objects.count() == 0
diff --git a/tests/perf/auto_perf_sheriffing/test_sherlock.py b/tests/perf/auto_perf_sheriffing/test_sherlock.py
index d23de352e85..eba0233f686 100644
--- a/tests/perf/auto_perf_sheriffing/test_sherlock.py
+++ b/tests/perf/auto_perf_sheriffing/test_sherlock.py
@@ -9,7 +9,7 @@
from tests.perf.auto_perf_sheriffing.conftest import prepare_record_with_search_str
from treeherder.model.models import Job, Push
from treeherder.perf.auto_perf_sheriffing.sherlock import Sherlock
-from treeherder.perf.exceptions import MaxRuntimeExceeded
+from treeherder.perf.exceptions import MaxRuntimeExceededError
from treeherder.perf.models import BackfillRecord, BackfillReport
EPOCH = datetime.utcfromtimestamp(0)
@@ -95,7 +95,7 @@ def test_assert_can_run_throws_exception_when_runtime_exceeded(
no_time_left = timedelta(seconds=0)
sherlock_bot = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary, no_time_left)
- with pytest.raises(MaxRuntimeExceeded):
+ with pytest.raises(MaxRuntimeExceededError):
sherlock_bot.assert_can_run()
@@ -111,7 +111,7 @@ def test_assert_can_run_doesnt_throw_exception_when_enough_time_left(
try:
sherlock.assert_can_run()
- except MaxRuntimeExceeded:
+ except MaxRuntimeExceededError:
pytest.fail()
@@ -153,7 +153,7 @@ def test_records_and_db_limits_remain_unchanged_if_runtime_exceeded(
sherlock = Sherlock(report_maintainer_mock, backfill_tool_mock, secretary, no_time_left)
try:
sherlock.sheriff(since=EPOCH, frameworks=["raptor", "talos"], repositories=["autoland"])
- except MaxRuntimeExceeded:
+ except MaxRuntimeExceededError:
pass
assert not has_changed(record_ready_for_processing)
diff --git a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
index d38911ecc21..455374c4d68 100644
--- a/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
+++ b/tests/perf/auto_sheriffing_criteria/test_common_behaviour.py
@@ -6,7 +6,7 @@
from tests.perf.auto_sheriffing_criteria.conftest import CASSETTES_RECORDING_DATE
from treeherder.config.settings import BZ_DATETIME_FORMAT
-from treeherder.perf.exceptions import NoFiledBugs
+from treeherder.perf.exceptions import NoFiledBugsError
from treeherder.perf.sheriffing_criteria import (
EngineerTractionFormula,
FixRatioFormula,
@@ -166,7 +166,7 @@ def test_breakdown_resets_to_null_when_calculus_errors_out(formula_class, betama
# now run alternated path calculus
with betamax_recorder.use_cassette(f"{cassette_preffix_b}", serialize_with="prettyjson"):
- with pytest.raises(NoFiledBugs):
+ with pytest.raises(NoFiledBugsError):
formula(*test_moniker_b) # intentionally blows up while doing calculus
# cached breakdown got invalidated & can no longer be obtained
@@ -235,5 +235,5 @@ def test_formula_errors_up_when_no_bugs_were_filed(formula_class, betamax_record
with betamax_recorder.use_cassette(
f"{nonexistent_framework}-{nonexistent_suite}", serialize_with="prettyjson"
):
- with pytest.raises(NoFiledBugs):
+ with pytest.raises(NoFiledBugsError):
formula(nonexistent_framework, nonexistent_suite)
diff --git a/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py b/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
index 943aaaa581a..e9cf5d27dcb 100644
--- a/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
+++ b/tests/perf/auto_sheriffing_criteria/test_criteria_tracker.py
@@ -12,7 +12,7 @@
from freezegun import freeze_time
from tests.perf.auto_sheriffing_criteria.conftest import CASSETTES_RECORDING_DATE
-from treeherder.perf.exceptions import NoFiledBugs
+from treeherder.perf.exceptions import NoFiledBugsError
from treeherder.perf.sheriffing_criteria import (
CriteriaTracker,
EngineerTractionFormula,
@@ -223,7 +223,7 @@ def test_record_computer_can_tell_unallowed_data(criteria_record):
@pytest.mark.freeze_time(CASSETTES_RECORDING_DATE) # disable tick
-@pytest.mark.parametrize("exception", [NoFiledBugs(), Exception()])
+@pytest.mark.parametrize("exception", [NoFiledBugsError(), Exception()])
def test_record_computer_still_updates_if_one_of_the_formulas_fails(exception, db):
formula_map = {
"EngineerTraction": MagicMock(spec=EngineerTractionFormula, return_value=EXPECTED_VALUE),
diff --git a/tests/test_worker/test_pulse_tasks.py b/tests/test_worker/test_pulse_tasks.py
index 09a615e821b..19a426074f8 100644
--- a/tests/test_worker/test_pulse_tasks.py
+++ b/tests/test_worker/test_pulse_tasks.py
@@ -2,7 +2,7 @@
import pytest
-from treeherder.etl.exceptions import MissingPushException
+from treeherder.etl.exceptions import MissingPushError
from treeherder.etl.push import store_push_data
from treeherder.etl.tasks.pulse_tasks import store_pulse_tasks
from treeherder.model.models import Job
@@ -26,7 +26,7 @@ def test_retry_missing_revision_succeeds(
orig_retry = store_pulse_tasks.retry
def retry_mock(exc=None, countdown=None):
- assert isinstance(exc, MissingPushException)
+ assert isinstance(exc, MissingPushError)
thread_data.retries += 1
store_push_data(test_repository, [rs])
return orig_retry(exc=exc, countdown=countdown)
diff --git a/treeherder/etl/exceptions.py b/treeherder/etl/exceptions.py
index f4a309c023c..89ee5fc14bc 100644
--- a/treeherder/etl/exceptions.py
+++ b/treeherder/etl/exceptions.py
@@ -1,4 +1,4 @@
-class CollectionNotStoredException(Exception):
+class CollectionNotStoredError(Exception):
def __init__(self, error_list, *args, **kwargs):
"""
error_list contains dictionaries, each containing
@@ -16,5 +16,5 @@ def __str__(self):
)
-class MissingPushException(Exception):
+class MissingPushError(Exception):
pass
diff --git a/treeherder/etl/job_loader.py b/treeherder/etl/job_loader.py
index 69a3aff9768..b3111a929da 100644
--- a/treeherder/etl/job_loader.py
+++ b/treeherder/etl/job_loader.py
@@ -7,7 +7,7 @@
from treeherder.etl.taskcluster_pulse.handler import ignore_task
from treeherder.etl.common import to_timestamp
-from treeherder.etl.exceptions import MissingPushException
+from treeherder.etl.exceptions import MissingPushError
from treeherder.etl.jobs import store_job_data
from treeherder.etl.schema import get_json_schema
from treeherder.model.models import Push, Repository
@@ -106,7 +106,7 @@ def validate_revision(self, repository, pulse_job):
task = get_task_definition(repository.tc_root_url, real_task_id)
# We do this to prevent raising an exception for a task that will never be ingested
if not ignore_task(task, real_task_id, repository.tc_root_url, project):
- raise MissingPushException(
+ raise MissingPushError(
"No push found in {} for revision {} for task {}".format(
pulse_job["origin"]["project"], revision, real_task_id
)
diff --git a/treeherder/etl/management/commands/ingest.py b/treeherder/etl/management/commands/ingest.py
index 9bbb7cda143..cff426c3cc4 100644
--- a/treeherder/etl/management/commands/ingest.py
+++ b/treeherder/etl/management/commands/ingest.py
@@ -16,7 +16,7 @@
from treeherder.client.thclient import TreeherderClient
from treeherder.config.settings import GITHUB_TOKEN
-from treeherder.etl.job_loader import JobLoader, MissingPushException
+from treeherder.etl.job_loader import JobLoader, MissingPushError
from treeherder.etl.push_loader import PushLoader
from treeherder.etl.pushlog import HgPushlogProcess, last_push_id_from_server
from treeherder.etl.taskcluster_pulse.handler import EXCHANGE_EVENT_MAP, handleMessage
@@ -226,7 +226,7 @@ def process_job_with_threads(pulse_job, root_url):
with Connection():
try:
JobLoader().process_job(pulse_job, root_url)
- except MissingPushException:
+ except MissingPushError:
logger.warning("The push was not in the DB. We are going to try that first")
ingest_push(pulse_job["origin"]["project"], pulse_job["origin"]["revision"])
JobLoader().process_job(pulse_job, root_url)
diff --git a/treeherder/etl/pushlog.py b/treeherder/etl/pushlog.py
index 2ff234bb805..17d0a7bc280 100644
--- a/treeherder/etl/pushlog.py
+++ b/treeherder/etl/pushlog.py
@@ -5,7 +5,7 @@
import requests
from django.core.cache import cache
-from treeherder.etl.exceptions import CollectionNotStoredException
+from treeherder.etl.exceptions import CollectionNotStoredError
from treeherder.etl.push import store_push
from treeherder.model.models import Repository
from treeherder.utils.github import fetch_json
@@ -136,7 +136,7 @@ def run(self, source_url, repository_name, changeset=None, last_push_id=None):
)
if errors:
- raise CollectionNotStoredException(errors)
+ raise CollectionNotStoredError(errors)
if not changeset:
# only cache the last push if we're not fetching a specific changeset
diff --git a/treeherder/log_parser/artifactbuildercollection.py b/treeherder/log_parser/artifactbuildercollection.py
index c178ffce932..bb70306c4dd 100644
--- a/treeherder/log_parser/artifactbuildercollection.py
+++ b/treeherder/log_parser/artifactbuildercollection.py
@@ -5,7 +5,7 @@
from treeherder.utils.http import make_request
from .artifactbuilders import LogViewerArtifactBuilder, PerformanceDataArtifactBuilder
-from .parsers import EmptyPerformanceData
+from .parsers import EmptyPerformanceDataError
logger = logging.getLogger(__name__)
# Max log size in bytes we will download (prior to decompression).
@@ -92,7 +92,7 @@ def parse(self):
)
if download_size_in_bytes > MAX_DOWNLOAD_SIZE_IN_BYTES:
- raise LogSizeException(
+ raise LogSizeError(
"Download size of %i bytes exceeds limit" % download_size_in_bytes
)
@@ -106,7 +106,7 @@ def parse(self):
# Using `replace` to prevent malformed unicode (which might possibly exist
# in test message output) from breaking parsing of the rest of the log.
builder.parse_line(line.decode("utf-8", "replace"))
- except EmptyPerformanceData:
+ except EmptyPerformanceDataError:
logger.warning("We have parsed an empty PERFHERDER_DATA for %s", self.url)
# gather the artifacts from all builders
@@ -121,5 +121,5 @@ def parse(self):
self.artifacts[name] = artifact
-class LogSizeException(Exception):
+class LogSizeError(Exception):
pass
diff --git a/treeherder/log_parser/parsers.py b/treeherder/log_parser/parsers.py
index 08406da5387..27e29d96794 100644
--- a/treeherder/log_parser/parsers.py
+++ b/treeherder/log_parser/parsers.py
@@ -197,7 +197,7 @@ def parse_line(self, line, lineno):
try:
data = json.loads(match.group(1))
if not bool(data):
- raise EmptyPerformanceData("The perf data is empty.")
+ raise EmptyPerformanceDataError("The perf data is empty.")
validate_perf_data(data)
self.artifact.append(data)
except ValueError:
@@ -210,5 +210,5 @@ def parse_line(self, line, lineno):
# Don't mark the parser as complete, in case there are multiple performance artifacts.
-class EmptyPerformanceData(Exception):
+class EmptyPerformanceDataError(Exception):
pass
diff --git a/treeherder/log_parser/tasks.py b/treeherder/log_parser/tasks.py
index 9efad3784cf..388db82b7b4 100644
--- a/treeherder/log_parser/tasks.py
+++ b/treeherder/log_parser/tasks.py
@@ -8,7 +8,7 @@
from treeherder.etl.artifact import serialize_artifact_json_blobs, store_job_artifacts
from treeherder.log_parser.artifactbuildercollection import (
ArtifactBuilderCollection,
- LogSizeException,
+ LogSizeError,
)
from treeherder.model.models import Job, JobLog
from treeherder.workers.task import retryable_task
@@ -89,7 +89,7 @@ def post_log_artifacts(job_log):
try:
artifact_list = extract_text_log_artifacts(job_log)
- except LogSizeException as e:
+ except LogSizeError as e:
job_log.update_status(JobLog.SKIPPED_SIZE)
logger.warning("Skipping parsing log for %s: %s", job_log.id, e)
return
diff --git a/treeherder/model/data_cycling/cyclers.py b/treeherder/model/data_cycling/cyclers.py
index 04346df86f0..24c66ffaee2 100644
--- a/treeherder/model/data_cycling/cyclers.py
+++ b/treeherder/model/data_cycling/cyclers.py
@@ -17,7 +17,7 @@
BuildPlatform,
MachinePlatform,
)
-from treeherder.perf.exceptions import NoDataCyclingAtAll, MaxRuntimeExceeded
+from treeherder.perf.exceptions import NoDataCyclingAtAllError, MaxRuntimeExceededError
from treeherder.perf.models import (
PerformanceSignature,
PerformanceAlertSummary,
@@ -140,11 +140,11 @@ def cycle(self):
try:
logger.warning(f"Cycling data using {strategy.name}...")
self._delete_in_chunks(strategy)
- except NoDataCyclingAtAll as ex:
+ except NoDataCyclingAtAllError as ex:
logger.warning(str(ex))
self._remove_leftovers()
- except MaxRuntimeExceeded as ex:
+ except MaxRuntimeExceededError as ex:
logger.warning(ex)
def _remove_leftovers(self):
@@ -236,4 +236,4 @@ def __handle_chunk_removal_exception(
logger.warning(f"{msg}: (Exception: {exception})")
else:
logger.warning(msg)
- raise NoDataCyclingAtAll() from exception
+ raise NoDataCyclingAtAllError() from exception
diff --git a/treeherder/model/data_cycling/max_runtime.py b/treeherder/model/data_cycling/max_runtime.py
index d4dad8172d9..4eb19f155f6 100644
--- a/treeherder/model/data_cycling/max_runtime.py
+++ b/treeherder/model/data_cycling/max_runtime.py
@@ -1,5 +1,5 @@
from datetime import datetime, timedelta
-from treeherder.perf.exceptions import MaxRuntimeExceeded
+from treeherder.perf.exceptions import MaxRuntimeExceededError
class MaxRuntime:
@@ -16,7 +16,7 @@ def quit_on_timeout(self):
elapsed_runtime = datetime.now() - self.started_at
if self.max_runtime < elapsed_runtime:
- raise MaxRuntimeExceeded("Max runtime for performance data cycling exceeded")
+ raise MaxRuntimeExceededError("Max runtime for performance data cycling exceeded")
def start_timer(self):
self.started_at = datetime.now()
diff --git a/treeherder/perf/auto_perf_sheriffing/backfill_reports.py b/treeherder/perf/auto_perf_sheriffing/backfill_reports.py
index 5bf0a061c01..f6e6751812f 100644
--- a/treeherder/perf/auto_perf_sheriffing/backfill_reports.py
+++ b/treeherder/perf/auto_perf_sheriffing/backfill_reports.py
@@ -6,7 +6,7 @@
import simplejson as json
from django.db.models import QuerySet, Q, F
-from treeherder.perf.exceptions import MissingRecords
+from treeherder.perf.exceptions import MissingRecordsError
from treeherder.perf.models import (
PerformanceAlert,
PerformanceDatum,
@@ -302,7 +302,7 @@ def compile_reports_for(self, summaries_to_retrigger: QuerySet) -> list[Backfill
try:
alert_context_map = self._associate_retrigger_context(important_alerts)
- except MissingRecords as ex:
+ except MissingRecordsError as ex:
self.log.warning(f"Failed to compute report for alert summary {summary}. {ex}")
continue
@@ -367,7 +367,7 @@ def _associate_retrigger_context(self, important_alerts: list[PerformanceAlert])
if incomplete_mapping:
expected = len(important_alerts)
missing = expected - len(retrigger_map)
- raise MissingRecords(f"{missing} out of {expected} records are missing!")
+ raise MissingRecordsError(f"{missing} out of {expected} records are missing!")
return retrigger_map
diff --git a/treeherder/perf/auto_perf_sheriffing/backfill_tool.py b/treeherder/perf/auto_perf_sheriffing/backfill_tool.py
index dcbc4b480b7..34f1d724721 100644
--- a/treeherder/perf/auto_perf_sheriffing/backfill_tool.py
+++ b/treeherder/perf/auto_perf_sheriffing/backfill_tool.py
@@ -4,7 +4,7 @@
from typing import Union
from treeherder.model.models import Job
-from treeherder.perf.exceptions import CannotBackfill
+from treeherder.perf.exceptions import CannotBackfillError
from treeherder.services.taskcluster import TaskclusterModel
logger = logging.getLogger(__name__)
@@ -51,7 +51,7 @@ def backfill_job(self, job: Union[Job, str]) -> str:
def assert_backfill_ability(self, over_job: Job):
if over_job.repository.is_try_repo:
- raise CannotBackfill("Try repository isn't suited for backfilling.")
+ raise CannotBackfillError("Try repository isn't suited for backfilling.")
@staticmethod
def _fetch_job_by_id(job_id: str) -> Job:
diff --git a/treeherder/perf/auto_perf_sheriffing/sherlock.py b/treeherder/perf/auto_perf_sheriffing/sherlock.py
index 77e4e387f62..2ea0bac8c2e 100644
--- a/treeherder/perf/auto_perf_sheriffing/sherlock.py
+++ b/treeherder/perf/auto_perf_sheriffing/sherlock.py
@@ -10,7 +10,7 @@
from treeherder.perf.auto_perf_sheriffing.backfill_reports import BackfillReportMaintainer
from treeherder.perf.auto_perf_sheriffing.backfill_tool import BackfillTool
from treeherder.perf.auto_perf_sheriffing.secretary import Secretary
-from treeherder.perf.exceptions import CannotBackfill, MaxRuntimeExceeded
+from treeherder.perf.exceptions import CannotBackfillError, MaxRuntimeExceededError
from treeherder.perf.models import BackfillRecord, BackfillReport, BackfillNotificationRecord
logger = logging.getLogger(__name__)
@@ -72,7 +72,7 @@ def runtime_exceeded(self) -> bool:
def assert_can_run(self):
if self.runtime_exceeded():
- raise MaxRuntimeExceeded("Sherlock: Max runtime exceeded.")
+ raise MaxRuntimeExceededError("Sherlock: Max runtime exceeded.")
def _report(
self, since: datetime, frameworks: list[str], repositories: list[str]
@@ -143,7 +143,7 @@ def _backfill_record(self, record: BackfillRecord, left: int) -> tuple[int, int]
using_job_id = data_point["job_id"]
self.backfill_tool.backfill_job(using_job_id)
left, consumed = left - 1, consumed + 1
- except (KeyError, CannotBackfill, Exception) as ex:
+ except (KeyError, CannotBackfillError, Exception) as ex:
logger.debug(f"Failed to backfill record {record.alert.id}: {ex}")
else:
record.try_remembering_job_properties(using_job_id)
diff --git a/treeherder/perf/exceptions.py b/treeherder/perf/exceptions.py
index 1cf50fd4de3..63cc67c0cef 100644
--- a/treeherder/perf/exceptions.py
+++ b/treeherder/perf/exceptions.py
@@ -1,4 +1,4 @@
-class NoDataCyclingAtAll(Exception):
+class NoDataCyclingAtAllError(Exception):
def __str__(self):
msg = "No data cycling could be performed."
if self.__cause__:
@@ -6,19 +6,19 @@ def __str__(self):
return msg
-class MaxRuntimeExceeded(Exception):
+class MaxRuntimeExceededError(Exception):
pass
-class MissingRecords(Exception):
+class MissingRecordsError(Exception):
pass
-class CannotBackfill(Exception):
+class CannotBackfillError(Exception):
pass
-class NoFiledBugs(Exception):
+class NoFiledBugsError(Exception):
pass
diff --git a/treeherder/perf/management/commands/perf_sheriff.py b/treeherder/perf/management/commands/perf_sheriff.py
index 2bb80788770..b254a9adbed 100644
--- a/treeherder/perf/management/commands/perf_sheriff.py
+++ b/treeherder/perf/management/commands/perf_sheriff.py
@@ -5,7 +5,7 @@
from treeherder.model.models import Repository
from treeherder.perf.auto_perf_sheriffing.factories import sherlock_factory
-from treeherder.perf.exceptions import MaxRuntimeExceeded
+from treeherder.perf.exceptions import MaxRuntimeExceededError
from treeherder.perf.models import PerformanceFramework
logger = logging.getLogger(__name__)
@@ -59,7 +59,7 @@ def handle(self, *args, **options):
sherlock = sherlock_factory(days_to_lookup)
try:
sherlock.sheriff(since, frameworks, repositories)
- except MaxRuntimeExceeded as ex:
+ except MaxRuntimeExceededError as ex:
logging.info(ex)
logging.info("Sherlock: Going back to sleep.")
diff --git a/treeherder/perf/management/commands/remove_vcs_data.py b/treeherder/perf/management/commands/remove_vcs_data.py
index 6bc60af3d6d..6cc92d18571 100644
--- a/treeherder/perf/management/commands/remove_vcs_data.py
+++ b/treeherder/perf/management/commands/remove_vcs_data.py
@@ -10,7 +10,7 @@
from django.core.management.base import BaseCommand
from treeherder.model.data_cycling import MaxRuntime
-from treeherder.perf.exceptions import MaxRuntimeExceeded
+from treeherder.perf.exceptions import MaxRuntimeExceededError
from treeherder.perf.models import PerformanceSignature
@@ -38,7 +38,7 @@ def _maybe_take_small_break(self):
def __enough_work(self) -> bool:
try:
self.__timer.quit_on_timeout() # check timer
- except MaxRuntimeExceeded:
+ except MaxRuntimeExceededError:
self.__timer.start_timer() # reset & restart it
return True
return False
diff --git a/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py b/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py
index 0c529c6e88f..d0473cc7e74 100644
--- a/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py
+++ b/treeherder/perf/sheriffing_criteria/bugzilla_formulas.py
@@ -7,7 +7,7 @@
from requests import Session
from treeherder.config.settings import BZ_DATETIME_FORMAT
-from treeherder.perf.exceptions import NoFiledBugs, BugzillaEndpointError
+from treeherder.perf.exceptions import NoFiledBugsError, BugzillaEndpointError
from treeherder.perf.models import PerformanceAlert
# Google Doc specification
@@ -77,7 +77,7 @@ def __call__(self, framework: str, suite: str, test: str = None) -> float:
all_filed_bugs = self.__fetch_cooled_down_bugs(framework, suite, test)
if len(all_filed_bugs) == 0:
- raise NoFiledBugs()
+ raise NoFiledBugsError()
denominator_bugs = self._filter_denominator_bugs(all_filed_bugs)
numerator_bugs = self._filter_numerator_bugs(all_filed_bugs)
diff --git a/treeherder/perf/sheriffing_criteria/criteria_tracking.py b/treeherder/perf/sheriffing_criteria/criteria_tracking.py
index 73019967a52..79ca745c94d 100644
--- a/treeherder/perf/sheriffing_criteria/criteria_tracking.py
+++ b/treeherder/perf/sheriffing_criteria/criteria_tracking.py
@@ -8,7 +8,7 @@
from datetime import datetime, timedelta
-from treeherder.perf.exceptions import NoFiledBugs
+from treeherder.perf.exceptions import NoFiledBugsError
from .bugzilla_formulas import BugzillaFormula, EngineerTractionFormula, FixRatioFormula
from treeherder.utils import PROJECT_ROOT
@@ -83,7 +83,7 @@ def apply_formulas(self, record: CriteriaRecord) -> CriteriaRecord:
for form_name, formula in self._formula_map.items():
try:
result = formula(record.Framework, record.Suite, record.Test)
- except (NoFiledBugs, Exception) as ex:
+ except (NoFiledBugsError, Exception) as ex:
result = "N/A"
self.__log_unexpected(ex, form_name, record)
@@ -95,7 +95,7 @@ def apply_formulas(self, record: CriteriaRecord) -> CriteriaRecord:
return record
def __log_unexpected(self, exception: Exception, formula_name: str, record: CriteriaRecord):
- if type(Exception) is NoFiledBugs:
+ if type(Exception) is NoFiledBugsError:
# maybe web service problem
self.log.info(exception)
elif type(exception) is Exception:
diff --git a/treeherder/workers/task.py b/treeherder/workers/task.py
index 77454e1e7b8..78a37c55203 100644
--- a/treeherder/workers/task.py
+++ b/treeherder/workers/task.py
@@ -7,7 +7,7 @@
from celery import shared_task
from django.db.utils import IntegrityError, ProgrammingError
-from treeherder.etl.exceptions import MissingPushException
+from treeherder.etl.exceptions import MissingPushError
class retryable_task: # noqa: N801
@@ -28,7 +28,7 @@ class retryable_task: # noqa: N801
# For these exceptions, we expect a certain amount of retries
# but to report each one is just noise. So don't raise to
# New Relic until the retries have been exceeded.
- HIDE_DURING_RETRIES = (MissingPushException,)
+ HIDE_DURING_RETRIES = (MissingPushError,)
def __init__(self, *args, **kwargs):
self.task_args = args
From 72061ba5ea78329a1dbe16f1f08ed38ecf50f2fe Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Tue, 27 Feb 2024 19:11:31 +0100
Subject: [PATCH 050/128] N804: First argument of a class method should be
named cls
---
pyproject.toml | 2 +-
treeherder/model/models.py | 8 ++++----
2 files changed, 5 insertions(+), 5 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 1a93a7ed65a..188a82f8d76 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ select = [
# pyupgrade
"UP",
# pep-naming
- "N806", "N803", "N801", "N815", "N811", "N818",
+ "N806", "N803", "N801", "N815", "N811", "N818", "N804",
]
ignore = [
diff --git a/treeherder/model/models.py b/treeherder/model/models.py
index 0aa283097d4..db8f21bd1b3 100644
--- a/treeherder/model/models.py
+++ b/treeherder/model/models.py
@@ -234,7 +234,7 @@ def __str__(self):
return f"{self.id}"
@classmethod
- def sanitized_search_term(self, search_term):
+ def sanitized_search_term(cls, search_term):
# MySQL Full Text Search operators, based on:
# https://dev.mysql.com/doc/refman/5.7/en/fulltext-boolean.html
# and other characters we want to remove
@@ -245,12 +245,12 @@ def sanitized_search_term(self, search_term):
return re.sub(mysql_fts_operators_re, " ", search_term)
@classmethod
- def search(self, search_term):
+ def search(cls, search_term):
max_size = 50
# Do not wrap a string in quotes to search as a phrase;
# see https://bugzilla.mozilla.org/show_bug.cgi?id=1704311
- search_term_fulltext = self.sanitized_search_term(search_term)
+ search_term_fulltext = cls.sanitized_search_term(search_term)
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
# Substitute escape and wildcard characters, so the search term is used
@@ -262,7 +262,7 @@ def search(self, search_term):
.replace('\\"', "")
)
- recent_qs = self.objects.raw(
+ recent_qs = cls.objects.raw(
"""
SELECT id, summary, crash_signature, keywords, resolution, status, dupe_of,
MATCH (`summary`) AGAINST (%s IN BOOLEAN MODE) AS relevance
From c39b22b34835775fcdba5e29e9b3c974f52d7f33 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Tue, 27 Feb 2024 19:13:27 +0100
Subject: [PATCH 051/128] N813: Camelcase imported as lowercase
---
pyproject.toml | 2 +-
treeherder/perf/management/commands/import_perf_data.py | 4 ++--
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 188a82f8d76..be1039c3b93 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ select = [
# pyupgrade
"UP",
# pep-naming
- "N806", "N803", "N801", "N815", "N811", "N818", "N804",
+ "N806", "N803", "N801", "N815", "N811", "N818", "N804", "N813",
]
ignore = [
diff --git a/treeherder/perf/management/commands/import_perf_data.py b/treeherder/perf/management/commands/import_perf_data.py
index 4b0a5e88f53..43caa67fd9d 100644
--- a/treeherder/perf/management/commands/import_perf_data.py
+++ b/treeherder/perf/management/commands/import_perf_data.py
@@ -1,7 +1,7 @@
import collections
import datetime
import math
-from multiprocessing import Manager as interproc
+from multiprocessing import Manager as Interproc
from multiprocessing import Process
from django.core.management.base import BaseCommand
@@ -205,7 +205,7 @@ def __init__(
if repositories is not None
else list(Repository.objects.using(self.source).values_list("name", flat=True))
)
- interproc_instance = interproc()
+ interproc_instance = Interproc()
self.models_instances = {
"reference_data_signature": interproc_instance.list(),
"performance_alert": interproc_instance.list(),
From ff066ed8f8d96be84f90616920f328593874d622 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Tue, 27 Feb 2024 19:16:04 +0100
Subject: [PATCH 052/128] N816: Variable in global scope should not be
mixedCase
---
pyproject.toml | 2 +-
treeherder/etl/management/commands/ingest.py | 6 +++---
treeherder/etl/taskcluster_pulse/handler.py | 4 ++--
3 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index be1039c3b93..8a0fe241f20 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -40,7 +40,7 @@ select = [
# pyupgrade
"UP",
# pep-naming
- "N806", "N803", "N801", "N815", "N811", "N818", "N804", "N813",
+ "N806", "N803", "N801", "N815", "N811", "N818", "N804", "N813", "N816"
]
ignore = [
diff --git a/treeherder/etl/management/commands/ingest.py b/treeherder/etl/management/commands/ingest.py
index cff426c3cc4..6d8d1e7f7cc 100644
--- a/treeherder/etl/management/commands/ingest.py
+++ b/treeherder/etl/management/commands/ingest.py
@@ -30,9 +30,9 @@
# Executor to run threads in parallel
executor = ThreadPoolExecutor()
-stateToExchange = {}
+state_to_exchange = {}
for key, value in EXCHANGE_EVENT_MAP.items():
- stateToExchange[value] = key
+ state_to_exchange[value] = key
# Semaphore to limit the number of threads opening DB connections when processing jobs
conn_sem = BoundedSemaphore(50)
@@ -136,7 +136,7 @@ async def handleTask(task, root_url):
# "retry" instead of exception
for run in reversed(runs):
message = {
- "exchange": stateToExchange[run["state"]],
+ "exchange": state_to_exchange[run["state"]],
"payload": {
"status": {
"taskId": task_id,
diff --git a/treeherder/etl/taskcluster_pulse/handler.py b/treeherder/etl/taskcluster_pulse/handler.py
index ae88fb998c0..e70a6821c58 100644
--- a/treeherder/etl/taskcluster_pulse/handler.py
+++ b/treeherder/etl/taskcluster_pulse/handler.py
@@ -14,7 +14,7 @@
env = environ.Env()
logger = logging.getLogger(__name__)
-projectsToIngest = env("PROJECTS_TO_INGEST", default=None)
+projects_to_ingest = env("PROJECTS_TO_INGEST", default=None)
# Build a mapping from exchange name to task status
@@ -102,7 +102,7 @@ def ignore_task(task, task_id, root_url, project):
ignore = False
# This logic is useful to reduce the number of tasks we ingest and requirying
# less dynos and less database writes. You can adjust PROJECTS_TO_INGEST on the app to meet your needs
- if projectsToIngest and project not in projectsToIngest.split(","):
+ if projects_to_ingest and project not in projects_to_ingest.split(","):
logger.debug("Ignoring tasks not matching PROJECTS_TO_INGEST (Task id: %s)", task_id)
return True
From 91d2b50694d49663436ccd850b0c336f04c79e80 Mon Sep 17 00:00:00 2001
From: Yoann Schneider
Date: Tue, 27 Feb 2024 19:43:01 +0100
Subject: [PATCH 053/128] N802: Function name should be lowercase
---
pyproject.toml | 4 +-
tests/etl/test_job_loader.py | 8 +--
tests/services/pulse/test_consumers.py | 8 +--
tests/utils/test_taskcluster_lib_scopes.py | 18 ++---
treeherder/etl/management/commands/ingest.py | 14 ++--
treeherder/etl/taskcluster_pulse/handler.py | 70 +++++++++----------
.../etl/taskcluster_pulse/parse_route.py | 2 +-
treeherder/etl/tasks/pulse_tasks.py | 6 +-
.../model/data_cycling/removal_strategies.py | 12 ++--
treeherder/services/taskcluster.py | 4 +-
treeherder/utils/taskcluster_lib_scopes.py | 14 ++--
11 files changed, 80 insertions(+), 80 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index 8a0fe241f20..123030cb321 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -39,8 +39,8 @@ select = [
"F",
# pyupgrade
"UP",
- # pep-naming
- "N806", "N803", "N801", "N815", "N811", "N818", "N804", "N813", "N816"
+ # pep8-naming
+ "N"
]
ignore = [
diff --git a/tests/etl/test_job_loader.py b/tests/etl/test_job_loader.py
index ac8a2526ff9..89d7b4c1398 100644
--- a/tests/etl/test_job_loader.py
+++ b/tests/etl/test_job_loader.py
@@ -6,7 +6,7 @@
import slugid
from treeherder.etl.job_loader import JobLoader
-from treeherder.etl.taskcluster_pulse.handler import handleMessage
+from treeherder.etl.taskcluster_pulse.handler import handle_message
from treeherder.model.models import Job, JobLog, TaskclusterMetadata
from django.core.exceptions import ObjectDoesNotExist
@@ -62,9 +62,9 @@ async def new_pulse_jobs(sample_data, test_repository, push_stored):
task_id = message["payload"]["status"]["taskId"]
task = tasks[task_id]
- # If we pass task to handleMessage we won't hit the network
- task_runs = await handleMessage(message, task)
- # handleMessage returns [] when it is a task that is not meant for Treeherder
+ # If we pass task to handle_message we won't hit the network
+ task_runs = await handle_message(message, task)
+ # handle_message returns [] when it is a task that is not meant for Treeherder
for run in reversed(task_runs):
mock_artifact(task_id, run["retryId"], "public/logs/live_backing.log")
run["origin"]["project"] = test_repository.name
diff --git a/tests/services/pulse/test_consumers.py b/tests/services/pulse/test_consumers.py
index 61a3e9235ba..cdef467bc74 100644
--- a/tests/services/pulse/test_consumers.py
+++ b/tests/services/pulse/test_consumers.py
@@ -9,7 +9,7 @@
from .utils import create_and_destroy_exchange
-def test_Consumers():
+def test_consumers():
class TestConsumer:
def prepare(self):
self.prepared = True
@@ -30,7 +30,7 @@ def run(self):
@pytest.mark.skipif(IS_WINDOWS, reason="celery does not work on windows")
-def test_PulseConsumer(pulse_connection):
+def test_pulse_consumer(pulse_connection):
class TestConsumer(PulseConsumer):
queue_suffix = "test"
@@ -51,7 +51,7 @@ def on_message(self, body, message):
cons.prepare()
-def test_JointConsumer_on_message_do_not_call_classification_ingestion(monkeypatch):
+def test_joint_consumer_on_message_do_not_call_classification_ingestion(monkeypatch):
mock_called = False
def mock_store_pulse_tasks_classification(args, queue):
@@ -87,7 +87,7 @@ def mock_store_pulse_tasks_classification(args, queue):
assert not mock_called
-def test_JointConsumer_on_message_call_classification_ingestion(monkeypatch):
+def test_joint_consumer_on_message_call_classification_ingestion(monkeypatch):
mock_called = False
def mock_store_pulse_tasks_classification(args, queue):
diff --git a/tests/utils/test_taskcluster_lib_scopes.py b/tests/utils/test_taskcluster_lib_scopes.py
index 0bcf5d288de..da80015c710 100644
--- a/tests/utils/test_taskcluster_lib_scopes.py
+++ b/tests/utils/test_taskcluster_lib_scopes.py
@@ -1,9 +1,9 @@
import pytest
-from treeherder.utils.taskcluster_lib_scopes import patternMatch, satisfiesExpression
+from treeherder.utils.taskcluster_lib_scopes import pattern_match, satisfies_expression
-# satisfiesExpression()
+# satisfies_expression()
@pytest.mark.parametrize(
"scopeset, expression",
[
@@ -36,7 +36,7 @@
],
)
def test_expression_is_satisfied(scopeset, expression):
- assert satisfiesExpression(scopeset, expression) is True
+ assert satisfies_expression(scopeset, expression) is True
@pytest.mark.parametrize(
@@ -58,7 +58,7 @@ def test_expression_is_satisfied(scopeset, expression):
],
)
def test_expression_is_not_satisfied(scopeset, expression):
- assert not satisfiesExpression(scopeset, expression)
+ assert not satisfies_expression(scopeset, expression)
@pytest.mark.parametrize(
@@ -72,19 +72,19 @@ def test_expression_is_not_satisfied(scopeset, expression):
)
def test_wrong_scopeset_type_raises_exception(scopeset):
with pytest.raises(TypeError):
- satisfiesExpression(scopeset, "in-tree:hook-action:{hook_group_id}/{hook_id}")
+ satisfies_expression(scopeset, "in-tree:hook-action:{hook_group_id}/{hook_id}")
-# patternMatch()
+# pattern_match()
def test_identical_scope_and_pattern_are_matching():
- assert patternMatch("mock:scope", "mock:scope") is True
+ assert pattern_match("mock:scope", "mock:scope") is True
@pytest.mark.parametrize(
"pattern, scope", [("matching*", "matching"), ("matching*", "matching/scope")]
)
def test_starred_patterns_are_matching(pattern, scope):
- assert patternMatch(pattern, scope) is True
+ assert pattern_match(pattern, scope) is True
@pytest.mark.parametrize(
@@ -92,4 +92,4 @@ def test_starred_patterns_are_matching(pattern, scope):
[("matching*", "mismatching"), ("match*ing", "matching"), ("*matching", "matching")],
)
def test_starred_patterns_dont_matching(pattern, scope):
- assert not patternMatch(pattern, scope)
+ assert not pattern_match(pattern, scope)
diff --git a/treeherder/etl/management/commands/ingest.py b/treeherder/etl/management/commands/ingest.py
index 6d8d1e7f7cc..1dfedc03a60 100644
--- a/treeherder/etl/management/commands/ingest.py
+++ b/treeherder/etl/management/commands/ingest.py
@@ -92,7 +92,7 @@ def ingest_hg_push(options):
gecko_decision_task = get_decision_task_id(project, commit, repo.tc_root_url)
logger.info("## START ##")
loop = asyncio.get_event_loop()
- loop.run_until_complete(processTasks(gecko_decision_task, repo.tc_root_url))
+ loop.run_until_complete(process_tasks(gecko_decision_task, repo.tc_root_url))
logger.info("## END ##")
else:
logger.info("You can ingest all tasks for a push with -a/--ingest-all-tasks.")
@@ -120,7 +120,7 @@ async def ingest_task(task_id, root_url):
async with taskcluster.aio.createSession(connector=conn, timeout=timeout) as session:
async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
results = await asyncio.gather(async_queue.status(task_id), async_queue.task(task_id))
- await handleTask(
+ await handle_task(
{
"status": results[0]["status"],
"task": results[1],
@@ -129,7 +129,7 @@ async def ingest_task(task_id, root_url):
)
-async def handleTask(task, root_url):
+async def handle_task(task, root_url):
task_id = task["status"]["taskId"]
runs = task["status"]["runs"]
# If we iterate in order of the runs, we will not be able to mark older runs as
@@ -160,7 +160,7 @@ async def handleTask(task, root_url):
await await_futures(job_futures)
-async def fetchGroupTasks(task_group_id, root_url):
+async def fetch_group_tasks(task_group_id, root_url):
tasks = []
query = {}
continuation_token = ""
@@ -182,9 +182,9 @@ async def fetchGroupTasks(task_group_id, root_url):
return tasks
-async def processTasks(task_group_id, root_url):
+async def process_tasks(task_group_id, root_url):
try:
- tasks = await fetchGroupTasks(task_group_id, root_url)
+ tasks = await fetch_group_tasks(task_group_id, root_url)
logger.info("We have %s tasks to process", len(tasks))
except Exception as e:
logger.exception(e)
@@ -193,7 +193,7 @@ async def processTasks(task_group_id, root_url):
return
# Schedule and run tasks inside the thread pool executor
- task_futures = [routine_to_future(handleTask, task, root_url) for task in tasks]
+ task_futures = [routine_to_future(handle_task, task, root_url) for task in tasks]
await await_futures(task_futures)
diff --git a/treeherder/etl/taskcluster_pulse/handler.py b/treeherder/etl/taskcluster_pulse/handler.py
index e70a6821c58..914e17878ac 100644
--- a/treeherder/etl/taskcluster_pulse/handler.py
+++ b/treeherder/etl/taskcluster_pulse/handler.py
@@ -10,7 +10,7 @@
import taskcluster_urls
from treeherder.etl.schema import get_json_schema
-from treeherder.etl.taskcluster_pulse.parse_route import parseRoute
+from treeherder.etl.taskcluster_pulse.parse_route import parse_route
env = environ.Env()
logger = logging.getLogger(__name__)
@@ -33,11 +33,11 @@ class PulseHandlerError(Exception):
pass
-def stateFromRun(job_run):
+def state_from_run(job_run):
return "completed" if job_run["state"] in ("exception", "failed") else job_run["state"]
-def resultFromRun(job_run):
+def result_from_run(job_run):
run_to_result = {
"completed": "success",
"failed": "fail",
@@ -56,7 +56,7 @@ def resultFromRun(job_run):
# Creates a log entry for Treeherder to retrieve and parse. This log is
# displayed on the Treeherder Log Viewer once parsed.
-def createLogReference(root_url, task_id, run_id):
+def create_log_reference(root_url, task_id, run_id):
log_url = taskcluster_urls.api(
root_url, "queue", "v1", "task/{taskId}/runs/{runId}/artifacts/public/logs/live_backing.log"
).format(taskId=task_id, runId=run_id)
@@ -70,7 +70,7 @@ def createLogReference(root_url, task_id, run_id):
# the route is parsed into distinct parts used for constructing the
# Treeherder job message.
# TODO: Refactor https://bugzilla.mozilla.org/show_bug.cgi?id=1560596
-def parseRouteInfo(prefix, task_id, routes, task):
+def parse_route_info(prefix, task_id, routes, task):
matching_routes = list(filter(lambda route: route.split(".")[0] == "tc-treeherder", routes))
if len(matching_routes) != 1:
@@ -80,12 +80,12 @@ def parseRouteInfo(prefix, task_id, routes, task):
+ f"Task ID: {task_id} Routes: {routes}"
)
- parsed_route = parseRoute(matching_routes[0])
+ parsed_route = parse_route(matching_routes[0])
return parsed_route
-def validateTask(task):
+def validate_task(task):
treeherder_metadata = task.get("extra", {}).get("treeherder")
if not treeherder_metadata:
logger.debug("Task metadata is missing Treeherder job configuration.")
@@ -166,7 +166,7 @@ def ignore_task(task, task_id, root_url, project):
# Only messages that contain the properly formatted routing key and contains
# treeherder job information in task.extra.treeherder are accepted
# This will generate a list of messages that need to be ingested by Treeherder
-async def handleMessage(message, task_definition=None):
+async def handle_message(message, task_definition=None):
async with taskcluster.aio.createSession() as session:
jobs = []
task_id = message["payload"]["status"]["taskId"]
@@ -174,7 +174,7 @@ async def handleMessage(message, task_definition=None):
task = (await async_queue.task(task_id)) if not task_definition else task_definition
try:
- parsed_route = parseRouteInfo("tc-treeherder", task_id, task["routes"], task)
+ parsed_route = parse_route_info("tc-treeherder", task_id, task["routes"], task)
except PulseHandlerError as e:
logger.debug("%s", str(e))
return jobs
@@ -185,7 +185,7 @@ async def handleMessage(message, task_definition=None):
logger.debug("Message received for task %s", task_id)
# Validation failures are common and logged, so do nothing more.
- if not validateTask(task):
+ if not validate_task(task):
return jobs
task_type = EXCHANGE_EVENT_MAP.get(message["exchange"])
@@ -196,18 +196,18 @@ async def handleMessage(message, task_definition=None):
# This will only work if the previous run has not yet been processed by Treeherder
# since _remove_existing_jobs() will prevent it
if message["payload"]["runId"] > 0:
- jobs.append(await handleTaskRerun(parsed_route, task, message, session))
+ jobs.append(await handle_task_rerun(parsed_route, task, message, session))
if not task_type:
raise Exception("Unknown exchange: {exchange}".format(exchange=message["exchange"]))
elif task_type == "pending":
- jobs.append(handleTaskPending(parsed_route, task, message))
+ jobs.append(handle_task_pending(parsed_route, task, message))
elif task_type == "running":
- jobs.append(handleTaskRunning(parsed_route, task, message))
+ jobs.append(handle_task_running(parsed_route, task, message))
elif task_type in ("completed", "failed"):
- jobs.append(await handleTaskCompleted(parsed_route, task, message, session))
+ jobs.append(await handle_task_completed(parsed_route, task, message, session))
elif task_type == "exception":
- jobs.append(await handleTaskException(parsed_route, task, message, session))
+ jobs.append(await handle_task_exception(parsed_route, task, message, session))
return jobs
@@ -217,7 +217,7 @@ async def handleMessage(message, task_definition=None):
#
# Specific handlers for each message type will add/remove information necessary
# for the type of task event..
-def buildMessage(push_info, task, run_id, payload):
+def build_message(push_info, task, run_id, payload):
task_id = payload["status"]["taskId"]
job_run = payload["status"]["runs"][run_id]
treeherder_config = task["extra"]["treeherder"]
@@ -236,8 +236,8 @@ def buildMessage(push_info, task, run_id, payload):
# Maximum job name length is 140 chars...
"jobName": task["metadata"]["name"][0:139],
},
- "state": stateFromRun(job_run),
- "result": resultFromRun(job_run),
+ "state": state_from_run(job_run),
+ "result": result_from_run(job_run),
"tier": treeherder_config.get("tier", 1),
"timeScheduled": task["created"],
"jobKind": treeherder_config.get("jobKind", "other"),
@@ -289,50 +289,50 @@ def buildMessage(push_info, task, run_id, payload):
return job
-def handleTaskPending(push_info, task, message):
+def handle_task_pending(push_info, task, message):
payload = message["payload"]
- return buildMessage(push_info, task, payload["runId"], payload)
+ return build_message(push_info, task, payload["runId"], payload)
-async def handleTaskRerun(push_info, task, message, session):
+async def handle_task_rerun(push_info, task, message, session):
payload = message["payload"]
- job = buildMessage(push_info, task, payload["runId"] - 1, payload)
+ job = build_message(push_info, task, payload["runId"] - 1, payload)
job["state"] = "completed"
job["result"] = "fail"
job["isRetried"] = True
# reruns often have no logs, so in the interest of not linking to a 404'ing artifact,
# don't include a link
job["logs"] = []
- job = await addArtifactUploadedLinks(
+ job = await add_artifact_uploaded_links(
message["root_url"], payload["status"]["taskId"], payload["runId"] - 1, job, session
)
return job
-def handleTaskRunning(push_info, task, message):
+def handle_task_running(push_info, task, message):
payload = message["payload"]
- job = buildMessage(push_info, task, payload["runId"], payload)
+ job = build_message(push_info, task, payload["runId"], payload)
job["timeStarted"] = payload["status"]["runs"][payload["runId"]]["started"]
return job
-async def handleTaskCompleted(push_info, task, message, session):
+async def handle_task_completed(push_info, task, message, session):
payload = message["payload"]
job_run = payload["status"]["runs"][payload["runId"]]
- job = buildMessage(push_info, task, payload["runId"], payload)
+ job = build_message(push_info, task, payload["runId"], payload)
job["timeStarted"] = job_run["started"]
job["timeCompleted"] = job_run["resolved"]
job["logs"] = [
- createLogReference(message["root_url"], payload["status"]["taskId"], job_run["runId"]),
+ create_log_reference(message["root_url"], payload["status"]["taskId"], job_run["runId"]),
]
- job = await addArtifactUploadedLinks(
+ job = await add_artifact_uploaded_links(
message["root_url"], payload["status"]["taskId"], payload["runId"], job, session
)
return job
-async def handleTaskException(push_info, task, message, session):
+async def handle_task_exception(push_info, task, message, session):
payload = message["payload"]
job_run = payload["status"]["runs"][payload["runId"]]
# Do not report runs that were created as an exception. Such cases
@@ -340,7 +340,7 @@ async def handleTaskException(push_info, task, message, session):
if job_run["reasonCreated"] == "exception":
return
- job = buildMessage(push_info, task, payload["runId"], payload)
+ job = build_message(push_info, task, payload["runId"], payload)
# Jobs that get cancelled before running don't have a started time
if job_run.get("started"):
job["timeStarted"] = job_run["started"]
@@ -348,13 +348,13 @@ async def handleTaskException(push_info, task, message, session):
# exceptions generally have no logs, so in the interest of not linking to a 404'ing artifact,
# don't include a link
job["logs"] = []
- job = await addArtifactUploadedLinks(
+ job = await add_artifact_uploaded_links(
message["root_url"], payload["status"]["taskId"], payload["runId"], job, session
)
return job
-async def fetchArtifacts(root_url, task_id, run_id, session):
+async def fetch_artifacts(root_url, task_id, run_id, session):
async_queue = taskcluster.aio.Queue({"rootUrl": root_url}, session=session)
res = await async_queue.listArtifacts(task_id, run_id)
artifacts = res["artifacts"]
@@ -378,10 +378,10 @@ async def fetchArtifacts(root_url, task_id, run_id, session):
# fetch them in order to determine if there is an error_summary log;
# TODO refactor this when there is a way to only retrieve the error_summary
# artifact: https://bugzilla.mozilla.org/show_bug.cgi?id=1629716
-async def addArtifactUploadedLinks(root_url, task_id, run_id, job, session):
+async def add_artifact_uploaded_links(root_url, task_id, run_id, job, session):
artifacts = []
try:
- artifacts = await fetchArtifacts(root_url, task_id, run_id, session)
+ artifacts = await fetch_artifacts(root_url, task_id, run_id, session)
except Exception:
logger.debug("Artifacts could not be found for task: %s run: %s", task_id, run_id)
return job
diff --git a/treeherder/etl/taskcluster_pulse/parse_route.py b/treeherder/etl/taskcluster_pulse/parse_route.py
index b4c1a15da7d..a4f50b3331b 100644
--- a/treeherder/etl/taskcluster_pulse/parse_route.py
+++ b/treeherder/etl/taskcluster_pulse/parse_route.py
@@ -11,7 +11,7 @@
# Note: pushes on a branch on Github would not have a PR ID
# Function extracted from
# https://github.com/taskcluster/taskcluster/blob/32629c562f8d6f5a6b608a3141a8ee2e0984619f/services/treeherder/src/util/route_parser.js
-def parseRoute(route):
+def parse_route(route):
id = None
owner = None
parsed_project = None
diff --git a/treeherder/etl/tasks/pulse_tasks.py b/treeherder/etl/tasks/pulse_tasks.py
index 7b39b3cb9c7..cd558b2db19 100644
--- a/treeherder/etl/tasks/pulse_tasks.py
+++ b/treeherder/etl/tasks/pulse_tasks.py
@@ -8,7 +8,7 @@
from treeherder.etl.classification_loader import ClassificationLoader
from treeherder.etl.job_loader import JobLoader
from treeherder.etl.push_loader import PushLoader
-from treeherder.etl.taskcluster_pulse.handler import handleMessage
+from treeherder.etl.taskcluster_pulse.handler import handle_message
from treeherder.workers.task import retryable_task
# NOTE: default values for root_url parameters can be removed once all tasks that lack
@@ -25,9 +25,9 @@ def store_pulse_tasks(
loop = asyncio.get_event_loop()
newrelic.agent.add_custom_attribute("exchange", exchange)
newrelic.agent.add_custom_attribute("routing_key", routing_key)
- # handleMessage expects messages in this format
+ # handle_message expects messages in this format
runs = loop.run_until_complete(
- handleMessage(
+ handle_message(
{
"exchange": exchange,
"payload": pulse_job,
diff --git a/treeherder/model/data_cycling/removal_strategies.py b/treeherder/model/data_cycling/removal_strategies.py
index 4470bb1c537..4c0e5488e2e 100644
--- a/treeherder/model/data_cycling/removal_strategies.py
+++ b/treeherder/model/data_cycling/removal_strategies.py
@@ -18,7 +18,7 @@
class RemovalStrategy(ABC):
@property
@abstractmethod
- def CYCLE_INTERVAL(self) -> int:
+ def cycle_interval(self) -> int:
"""
expressed in days
"""
@@ -26,7 +26,7 @@ def CYCLE_INTERVAL(self) -> int:
@has_valid_explicit_days
def __init__(self, chunk_size: int, days: int = None):
- days = days or self.CYCLE_INTERVAL
+ days = days or self.cycle_interval
self._cycle_interval = timedelta(days=days)
self._chunk_size = chunk_size
@@ -65,7 +65,7 @@ class MainRemovalStrategy(RemovalStrategy):
"""
@property
- def CYCLE_INTERVAL(self) -> int:
+ def cycle_interval(self) -> int:
# WARNING!! Don't override this without proper approval!
return 365 # days #
########################################################
@@ -127,7 +127,7 @@ class TryDataRemoval(RemovalStrategy):
SIGNATURE_BULK_SIZE = 10
@property
- def CYCLE_INTERVAL(self) -> int:
+ def cycle_interval(self) -> int:
# WARNING!! Don't override this without proper approval!
return 42 # days #
########################################################
@@ -246,7 +246,7 @@ class IrrelevantDataRemoval(RemovalStrategy):
]
@property
- def CYCLE_INTERVAL(self) -> int:
+ def cycle_interval(self) -> int:
# WARNING!! Don't override this without proper approval!
return 180 # days #
########################################################
@@ -340,7 +340,7 @@ class StalledDataRemoval(RemovalStrategy):
"""
@property
- def CYCLE_INTERVAL(self) -> int:
+ def cycle_interval(self) -> int:
# WARNING!! Don't override this without proper approval!
return 120 # days #
########################################################
diff --git a/treeherder/services/taskcluster.py b/treeherder/services/taskcluster.py
index c24736b1ee4..2da66db3c66 100644
--- a/treeherder/services/taskcluster.py
+++ b/treeherder/services/taskcluster.py
@@ -7,7 +7,7 @@
import taskcluster
from django.conf import settings
-from treeherder.utils.taskcluster_lib_scopes import satisfiesExpression
+from treeherder.utils.taskcluster_lib_scopes import satisfies_expression
logger = logging.getLogger(__name__)
@@ -117,7 +117,7 @@ def _submit(
expansion = self.auth.expandScopes({"scopes": decision_task["scopes"]})
expression = f"in-tree:hook-action:{hook_group_id}/{hook_id}"
- if not satisfiesExpression(expansion["scopes"], expression):
+ if not satisfies_expression(expansion["scopes"], expression):
raise RuntimeError(
f"Action is misconfigured: decision task's scopes do not satisfy {expression}"
)
diff --git a/treeherder/utils/taskcluster_lib_scopes.py b/treeherder/utils/taskcluster_lib_scopes.py
index 0f8d9db9f24..80138de89d7 100644
--- a/treeherder/utils/taskcluster_lib_scopes.py
+++ b/treeherder/utils/taskcluster_lib_scopes.py
@@ -4,25 +4,25 @@
"""
-def satisfiesExpression(scopeset, expression):
+def satisfies_expression(scopeset, expression):
if not isinstance(scopeset, list):
raise TypeError("Scopeset must be an array.")
- def isSatisfied(expr):
+ def is_satisfied(expr):
if isinstance(expr, str):
- return any([patternMatch(s, expr) for s in scopeset])
+ return any([pattern_match(s, expr) for s in scopeset])
return (
"AllOf" in expr
- and all([isSatisfied(e) for e in expr["AllOf"]])
+ and all([is_satisfied(e) for e in expr["AllOf"]])
or "AnyOf" in expr
- and any([isSatisfied(e) for e in expr["AnyOf"]])
+ and any([is_satisfied(e) for e in expr["AnyOf"]])
)
- return isSatisfied(expression)
+ return is_satisfied(expression)
-def patternMatch(pattern: str, scope):
+def pattern_match(pattern: str, scope):
if scope == pattern:
return True
From a78f4bab680768f66378247ebe161ad0596c80e8 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 04:10:59 +0000
Subject: [PATCH 054/128] Bump black from 23.3.0 to 24.2.0
Bumps [black](https://github.com/psf/black) from 23.3.0 to 24.2.0.
- [Release notes](https://github.com/psf/black/releases)
- [Changelog](https://github.com/psf/black/blob/main/CHANGES.md)
- [Commits](https://github.com/psf/black/compare/23.3.0...24.2.0)
---
updated-dependencies:
- dependency-name: black
dependency-type: direct:development
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 49 +++++++++++++++++++++-----------------------
2 files changed, 24 insertions(+), 27 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 0d58a2b1e2b..2320f54eb8c 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -16,7 +16,7 @@ pytest-watch==4.2.0
# Required by django-extension's runserver_plus command.
pytest-django==4.8.0
pytest==7.3.2
-black==23.3.0
+black==24.2.0
shellcheck-py==0.9.0.6
# To test async code
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 5ecd8136044..16d2df82227 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -24,32 +24,29 @@ betamax-serializers==0.2.1 \
--hash=sha256:1b23c46429c40a8873682854c88d805c787c72d252f3fa0c858e9c300682ceac \
--hash=sha256:345c419b1b73171f2951c62ac3c701775ac4b76e13e86464ebf0ff2a978e4949
# via -r requirements/dev.in
-black==23.3.0 \
- --hash=sha256:064101748afa12ad2291c2b91c960be28b817c0c7eaa35bec09cc63aa56493c5 \
- --hash=sha256:0945e13506be58bf7db93ee5853243eb368ace1c08a24c65ce108986eac65915 \
- --hash=sha256:11c410f71b876f961d1de77b9699ad19f939094c3a677323f43d7a29855fe326 \
- --hash=sha256:1c7b8d606e728a41ea1ccbd7264677e494e87cf630e399262ced92d4a8dac940 \
- --hash=sha256:1d06691f1eb8de91cd1b322f21e3bfc9efe0c7ca1f0e1eb1db44ea367dff656b \
- --hash=sha256:3238f2aacf827d18d26db07524e44741233ae09a584273aa059066d644ca7b30 \
- --hash=sha256:32daa9783106c28815d05b724238e30718f34155653d4d6e125dc7daec8e260c \
- --hash=sha256:35d1381d7a22cc5b2be2f72c7dfdae4072a3336060635718cc7e1ede24221d6c \
- --hash=sha256:3a150542a204124ed00683f0db1f5cf1c2aaaa9cc3495b7a3b5976fb136090ab \
- --hash=sha256:48f9d345675bb7fbc3dd85821b12487e1b9a75242028adad0333ce36ed2a6d27 \
- --hash=sha256:50cb33cac881766a5cd9913e10ff75b1e8eb71babf4c7104f2e9c52da1fb7de2 \
- --hash=sha256:562bd3a70495facf56814293149e51aa1be9931567474993c7942ff7d3533961 \
- --hash=sha256:67de8d0c209eb5b330cce2469503de11bca4085880d62f1628bd9972cc3366b9 \
- --hash=sha256:6b39abdfb402002b8a7d030ccc85cf5afff64ee90fa4c5aebc531e3ad0175ddb \
- --hash=sha256:6f3c333ea1dd6771b2d3777482429864f8e258899f6ff05826c3a4fcc5ce3f70 \
- --hash=sha256:714290490c18fb0126baa0fca0a54ee795f7502b44177e1ce7624ba1c00f2331 \
- --hash=sha256:7c3eb7cea23904399866c55826b31c1f55bbcd3890ce22ff70466b907b6775c2 \
- --hash=sha256:92c543f6854c28a3c7f39f4d9b7694f9a6eb9d3c5e2ece488c327b6e7ea9b266 \
- --hash=sha256:a6f6886c9869d4daae2d1715ce34a19bbc4b95006d20ed785ca00fa03cba312d \
- --hash=sha256:a8a968125d0a6a404842fa1bf0b349a568634f856aa08ffaff40ae0dfa52e7c6 \
- --hash=sha256:c7ab5790333c448903c4b721b59c0d80b11fe5e9803d8703e84dcb8da56fec1b \
- --hash=sha256:e114420bf26b90d4b9daa597351337762b63039752bdf72bf361364c1aa05925 \
- --hash=sha256:e198cf27888ad6f4ff331ca1c48ffc038848ea9f031a3b40ba36aced7e22f2c8 \
- --hash=sha256:ec751418022185b0c1bb7d7736e6933d40bbb14c14a0abcf9123d1b159f98dd4 \
- --hash=sha256:f0bd2f4a58d6666500542b26354978218a9babcdc972722f4bf90779524515f3
+black==24.2.0 \
+ --hash=sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8 \
+ --hash=sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8 \
+ --hash=sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd \
+ --hash=sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9 \
+ --hash=sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31 \
+ --hash=sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92 \
+ --hash=sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f \
+ --hash=sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29 \
+ --hash=sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4 \
+ --hash=sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693 \
+ --hash=sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218 \
+ --hash=sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a \
+ --hash=sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23 \
+ --hash=sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0 \
+ --hash=sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982 \
+ --hash=sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894 \
+ --hash=sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540 \
+ --hash=sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430 \
+ --hash=sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b \
+ --hash=sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2 \
+ --hash=sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6 \
+ --hash=sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d
# via -r requirements/dev.in
build==1.0.3 \
--hash=sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b \
From 0051ae7ef566db126d907f2641b183ad99dd66fd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 04:04:36 +0000
Subject: [PATCH 055/128] Bump dockerflow from 2024.1.0 to 2024.3.0
Bumps [dockerflow](https://github.com/mozilla-services/python-dockerflow) from 2024.1.0 to 2024.3.0.
- [Release notes](https://github.com/mozilla-services/python-dockerflow/releases)
- [Changelog](https://github.com/mozilla-services/python-dockerflow/blob/main/docs/changelog.rst)
- [Commits](https://github.com/mozilla-services/python-dockerflow/compare/2024.1.0...2024.3.0)
---
updated-dependencies:
- dependency-name: dockerflow
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 609ce64ffd2..7606884f133 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -37,7 +37,7 @@ django-cache-memoize==0.1.10 # Imported as cache_memoize
mozci[cache]==2.4.0
# Dockerflow/CloudOps APIs
-dockerflow==2024.1.0
+dockerflow==2024.3.0
# Measuring noise of perf data
moz-measure-noise==2.60.1
diff --git a/requirements/common.txt b/requirements/common.txt
index 591f19b8e41..a96a6631ad8 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -390,9 +390,9 @@ djangorestframework==3.14.0 \
--hash=sha256:579a333e6256b09489cbe0a067e66abe55c6595d8926be6b99423786334350c8 \
--hash=sha256:eb63f58c9f218e1a7d064d17a70751f528ed4e1d35547fdade9aaf4cd103fd08
# via -r requirements/common.in
-dockerflow==2024.1.0 \
- --hash=sha256:38d6a60a01e87d33dcf802f1ef2f09ae2f375c829d3805923d88409387562d66 \
- --hash=sha256:df1597fb3d58d759993e5b5e7f254162804882c04c09d5b7df97aa47b0a9d15b
+dockerflow==2024.3.0 \
+ --hash=sha256:96678b00636dfd61fccf08f5f4102d0444e43bec3f8850175a060d8e83559e4c \
+ --hash=sha256:e8cea4df7f7342aa551c9bfa12b401adfd3e28f7f928fc545ae657fc5614ebda
# via -r requirements/common.in
ecdsa==0.18.0 \
--hash=sha256:190348041559e21b22a1d65cee485282ca11a6f81d503fddb84d5017e9ed1e49 \
From 544ee8c34c421eb2e12c307e85339b581249379d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 04:02:50 +0000
Subject: [PATCH 056/128] Bump python-dateutil from 2.8.2 to 2.9.0.post0
Bumps [python-dateutil](https://github.com/dateutil/dateutil) from 2.8.2 to 2.9.0.post0.
- [Release notes](https://github.com/dateutil/dateutil/releases)
- [Changelog](https://github.com/dateutil/dateutil/blob/master/NEWS)
- [Commits](https://github.com/dateutil/dateutil/compare/2.8.2...2.9.0.post0)
---
updated-dependencies:
- dependency-name: python-dateutil
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
requirements/dev.txt | 6 +++---
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 7606884f133..77d1e5ca831 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -21,7 +21,7 @@ PyYAML==6.0.1 # Imported as yaml
django-environ==0.10.0 # Imported as environ
uritemplate==4.1.1 # For OpenAPI schema
-python-dateutil==2.8.2
+python-dateutil==2.9.0.post0
django-filter==23.2 # Listed in DEFAULT_FILTER_BACKENDS on settings.py
django-redis==5.3.0 # Listed in CACHES on settings.py
diff --git a/requirements/common.txt b/requirements/common.txt
index a96a6631ad8..4836c99399e 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -995,9 +995,9 @@ pyflakes==2.4.0 \
--hash=sha256:05a85c2872edf37a4ed30b0cce2f6093e1d0581f8c19d7393122da7e25b2b24c \
--hash=sha256:3bb3a3f256f4b7968c9c788781e4ff07dce46bdf12339dcda61053375426ee2e
# via flake8
-python-dateutil==2.8.2 \
- --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
- --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9
+python-dateutil==2.9.0.post0 \
+ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \
+ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427
# via
# -r requirements/common.in
# arrow
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 16d2df82227..5b2c6b42b60 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -372,9 +372,9 @@ pytest-testmon==2.1.1 \
pytest-watch==4.2.0 \
--hash=sha256:06136f03d5b361718b8d0d234042f7b2f203910d8568f63df2f866b547b3d4b9
# via -r requirements/dev.in
-python-dateutil==2.8.2 \
- --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \
- --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9
+python-dateutil==2.9.0.post0 \
+ --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \
+ --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427
# via freezegun
pyyaml==6.0.1 \
--hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \
From bc0af25f0296c82b3a8234209d37fb9c5deb0de9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 03:58:12 +0000
Subject: [PATCH 057/128] Bump responses from 0.23.1 to 0.25.0
Bumps [responses](https://github.com/getsentry/responses) from 0.23.1 to 0.25.0.
- [Release notes](https://github.com/getsentry/responses/releases)
- [Changelog](https://github.com/getsentry/responses/blob/master/CHANGES)
- [Commits](https://github.com/getsentry/responses/compare/0.23.1...0.25.0)
---
updated-dependencies:
- dependency-name: responses
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 10 +++-------
2 files changed, 4 insertions(+), 8 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 2320f54eb8c..acf835bac02 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -2,7 +2,7 @@
pytest-cov==4.1.0
django-debug-toolbar==4.3.0
mock==5.1.0
-responses==0.23.1
+responses==0.25.0
django-extensions==3.2.3
PyPOM==2.2.4
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 5b2c6b42b60..40960de04bd 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -438,9 +438,9 @@ requests==2.31.0 \
# -r requirements/dev.in
# betamax
# responses
-responses==0.23.1 \
- --hash=sha256:8a3a5915713483bf353b6f4079ba8b2a29029d1d1090a503c70b0dc5d9d0c7bd \
- --hash=sha256:c4d9aa9fc888188f0c673eff79a8dadbe2e75b7fe879dc80a221a06e0a68138f
+responses==0.25.0 \
+ --hash=sha256:01ae6a02b4f34e39bffceb0fc6786b67a25eae919c6368d05eabc8d9576c2a66 \
+ --hash=sha256:2f0b9c2b6437db4b528619a77e5d565e4ec2a9532162ac1a131a83529db7be1a
# via -r requirements/dev.in
selenium==4.17.2 \
--hash=sha256:5aee79026c07985dc1b0c909f34084aa996dfe5b307602de9016d7a621a473f2 \
@@ -489,10 +489,6 @@ trio-websocket==0.11.1 \
--hash=sha256:18c11793647703c158b1f6e62de638acada927344d534e3c7628eedcb746839f \
--hash=sha256:520d046b0d030cf970b8b2b2e00c4c2245b3807853ecd44214acd33d74581638
# via selenium
-types-pyyaml==6.0.12.12 \
- --hash=sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062 \
- --hash=sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24
- # via responses
typing-extensions==4.9.0 \
--hash=sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783 \
--hash=sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd
From c6ee755e5b2ef656de42de46cf92edf04758af4f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 03:19:55 +0000
Subject: [PATCH 058/128] Bump psycopg2-binary from 2.9.6 to 2.9.9
Bumps [psycopg2-binary](https://github.com/psycopg/psycopg2) from 2.9.6 to 2.9.9.
- [Changelog](https://github.com/psycopg/psycopg2/blob/master/NEWS)
- [Commits](https://github.com/psycopg/psycopg2/compare/2.9.6...2.9.9)
---
updated-dependencies:
- dependency-name: psycopg2-binary
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 136 +++++++++++++++++++++-------------------
2 files changed, 74 insertions(+), 64 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 77d1e5ca831..7a2634b6413 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -9,7 +9,7 @@ newrelic==8.8.0
certifi==2023.5.7
mysqlclient==2.1.1 # Required by Django
-psycopg2-binary==2.9.6
+psycopg2-binary==2.9.9
jsonschema==4.21.1 # import jsonschema
djangorestframework==3.14.0 # Imported as rest_framework
diff --git a/requirements/common.txt b/requirements/common.txt
index 4836c99399e..0de7291c19f 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -883,69 +883,79 @@ prompt-toolkit==3.0.43 \
--hash=sha256:3527b7af26106cbc65a040bcc84839a3566ec1b051bb0bfe953631e704b0ff7d \
--hash=sha256:a11a29cb3bf0a28a387fe5122cdb649816a957cd9261dcedf8c9f1fef33eacf6
# via click-repl
-psycopg2-binary==2.9.6 \
- --hash=sha256:02c0f3757a4300cf379eb49f543fb7ac527fb00144d39246ee40e1df684ab514 \
- --hash=sha256:02c6e3cf3439e213e4ee930308dc122d6fb4d4bea9aef4a12535fbd605d1a2fe \
- --hash=sha256:0645376d399bfd64da57148694d78e1f431b1e1ee1054872a5713125681cf1be \
- --hash=sha256:0892ef645c2fabb0c75ec32d79f4252542d0caec1d5d949630e7d242ca4681a3 \
- --hash=sha256:0d236c2825fa656a2d98bbb0e52370a2e852e5a0ec45fc4f402977313329174d \
- --hash=sha256:0e0f754d27fddcfd74006455b6e04e6705d6c31a612ec69ddc040a5468e44b4e \
- --hash=sha256:15e2ee79e7cf29582ef770de7dab3d286431b01c3bb598f8e05e09601b890081 \
- --hash=sha256:1876843d8e31c89c399e31b97d4b9725a3575bb9c2af92038464231ec40f9edb \
- --hash=sha256:1f64dcfb8f6e0c014c7f55e51c9759f024f70ea572fbdef123f85318c297947c \
- --hash=sha256:2ab652e729ff4ad76d400df2624d223d6e265ef81bb8aa17fbd63607878ecbee \
- --hash=sha256:30637a20623e2a2eacc420059be11527f4458ef54352d870b8181a4c3020ae6b \
- --hash=sha256:34b9ccdf210cbbb1303c7c4db2905fa0319391bd5904d32689e6dd5c963d2ea8 \
- --hash=sha256:38601cbbfe600362c43714482f43b7c110b20cb0f8172422c616b09b85a750c5 \
- --hash=sha256:441cc2f8869a4f0f4bb408475e5ae0ee1f3b55b33f350406150277f7f35384fc \
- --hash=sha256:498807b927ca2510baea1b05cc91d7da4718a0f53cb766c154c417a39f1820a0 \
- --hash=sha256:4ac30da8b4f57187dbf449294d23b808f8f53cad6b1fc3623fa8a6c11d176dd0 \
- --hash=sha256:4c727b597c6444a16e9119386b59388f8a424223302d0c06c676ec8b4bc1f963 \
- --hash=sha256:4d67fbdaf177da06374473ef6f7ed8cc0a9dc640b01abfe9e8a2ccb1b1402c1f \
- --hash=sha256:4dfb4be774c4436a4526d0c554af0cc2e02082c38303852a36f6456ece7b3503 \
- --hash=sha256:4ea29fc3ad9d91162c52b578f211ff1c931d8a38e1f58e684c45aa470adf19e2 \
- --hash=sha256:51537e3d299be0db9137b321dfb6a5022caaab275775680e0c3d281feefaca6b \
- --hash=sha256:61b047a0537bbc3afae10f134dc6393823882eb263088c271331602b672e52e9 \
- --hash=sha256:6460c7a99fc939b849431f1e73e013d54aa54293f30f1109019c56a0b2b2ec2f \
- --hash=sha256:65bee1e49fa6f9cf327ce0e01c4c10f39165ee76d35c846ade7cb0ec6683e303 \
- --hash=sha256:65c07febd1936d63bfde78948b76cd4c2a411572a44ac50719ead41947d0f26b \
- --hash=sha256:71f14375d6f73b62800530b581aed3ada394039877818b2d5f7fc77e3bb6894d \
- --hash=sha256:7a40c00dbe17c0af5bdd55aafd6ff6679f94a9be9513a4c7e071baf3d7d22a70 \
- --hash=sha256:7e13a5a2c01151f1208d5207e42f33ba86d561b7a89fca67c700b9486a06d0e2 \
- --hash=sha256:7f0438fa20fb6c7e202863e0d5ab02c246d35efb1d164e052f2f3bfe2b152bd0 \
- --hash=sha256:8122cfc7cae0da9a3077216528b8bb3629c43b25053284cc868744bfe71eb141 \
- --hash=sha256:8338a271cb71d8da40b023a35d9c1e919eba6cbd8fa20a54b748a332c355d896 \
- --hash=sha256:84d2222e61f313c4848ff05353653bf5f5cf6ce34df540e4274516880d9c3763 \
- --hash=sha256:8a6979cf527e2603d349a91060f428bcb135aea2be3201dff794813256c274f1 \
- --hash=sha256:8a76e027f87753f9bd1ab5f7c9cb8c7628d1077ef927f5e2446477153a602f2c \
- --hash=sha256:964b4dfb7c1c1965ac4c1978b0f755cc4bd698e8aa2b7667c575fb5f04ebe06b \
- --hash=sha256:9972aad21f965599ed0106f65334230ce826e5ae69fda7cbd688d24fa922415e \
- --hash=sha256:a8c28fd40a4226b4a84bdf2d2b5b37d2c7bd49486b5adcc200e8c7ec991dfa7e \
- --hash=sha256:ae102a98c547ee2288637af07393dd33f440c25e5cd79556b04e3fca13325e5f \
- --hash=sha256:af335bac6b666cc6aea16f11d486c3b794029d9df029967f9938a4bed59b6a19 \
- --hash=sha256:afe64e9b8ea66866a771996f6ff14447e8082ea26e675a295ad3bdbffdd72afb \
- --hash=sha256:b4b24f75d16a89cc6b4cdff0eb6a910a966ecd476d1e73f7ce5985ff1328e9a6 \
- --hash=sha256:b6c8288bb8a84b47e07013bb4850f50538aa913d487579e1921724631d02ea1b \
- --hash=sha256:b83456c2d4979e08ff56180a76429263ea254c3f6552cd14ada95cff1dec9bb8 \
- --hash=sha256:bfb13af3c5dd3a9588000910178de17010ebcccd37b4f9794b00595e3a8ddad3 \
- --hash=sha256:c3dba7dab16709a33a847e5cd756767271697041fbe3fe97c215b1fc1f5c9848 \
- --hash=sha256:c48d8f2db17f27d41fb0e2ecd703ea41984ee19362cbce52c097963b3a1b4365 \
- --hash=sha256:c7e62ab8b332147a7593a385d4f368874d5fe4ad4e341770d4983442d89603e3 \
- --hash=sha256:c83a74b68270028dc8ee74d38ecfaf9c90eed23c8959fca95bd703d25b82c88e \
- --hash=sha256:cacbdc5839bdff804dfebc058fe25684cae322987f7a38b0168bc1b2df703fb1 \
- --hash=sha256:cf4499e0a83b7b7edcb8dabecbd8501d0d3a5ef66457200f77bde3d210d5debb \
- --hash=sha256:cfec476887aa231b8548ece2e06d28edc87c1397ebd83922299af2e051cf2827 \
- --hash=sha256:d26e0342183c762de3276cca7a530d574d4e25121ca7d6e4a98e4f05cb8e4df7 \
- --hash=sha256:d4e6036decf4b72d6425d5b29bbd3e8f0ff1059cda7ac7b96d6ac5ed34ffbacd \
- --hash=sha256:d57c3fd55d9058645d26ae37d76e61156a27722097229d32a9e73ed54819982a \
- --hash=sha256:dfa74c903a3c1f0d9b1c7e7b53ed2d929a4910e272add6700c38f365a6002820 \
- --hash=sha256:e3ed340d2b858d6e6fb5083f87c09996506af483227735de6964a6100b4e6a54 \
- --hash=sha256:e78e6e2a00c223e164c417628572a90093c031ed724492c763721c2e0bc2a8df \
- --hash=sha256:e9182eb20f41417ea1dd8e8f7888c4d7c6e805f8a7c98c1081778a3da2bee3e4 \
- --hash=sha256:e99e34c82309dd78959ba3c1590975b5d3c862d6f279f843d47d26ff89d7d7e1 \
- --hash=sha256:f6a88f384335bb27812293fdb11ac6aee2ca3f51d3c7820fe03de0a304ab6249 \
- --hash=sha256:f81e65376e52f03422e1fb475c9514185669943798ed019ac50410fb4c4df232 \
- --hash=sha256:ffe9dc0a884a8848075e576c1de0290d85a533a9f6e9c4e564f19adf8f6e54a7
+psycopg2-binary==2.9.9 \
+ --hash=sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9 \
+ --hash=sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77 \
+ --hash=sha256:0c009475ee389757e6e34611d75f6e4f05f0cf5ebb76c6037508318e1a1e0d7e \
+ --hash=sha256:0ef4854e82c09e84cc63084a9e4ccd6d9b154f1dbdd283efb92ecd0b5e2b8c84 \
+ --hash=sha256:1236ed0952fbd919c100bc839eaa4a39ebc397ed1c08a97fc45fee2a595aa1b3 \
+ --hash=sha256:143072318f793f53819048fdfe30c321890af0c3ec7cb1dfc9cc87aa88241de2 \
+ --hash=sha256:15208be1c50b99203fe88d15695f22a5bed95ab3f84354c494bcb1d08557df67 \
+ --hash=sha256:1873aade94b74715be2246321c8650cabf5a0d098a95bab81145ffffa4c13876 \
+ --hash=sha256:18d0ef97766055fec15b5de2c06dd8e7654705ce3e5e5eed3b6651a1d2a9a152 \
+ --hash=sha256:1ea665f8ce695bcc37a90ee52de7a7980be5161375d42a0b6c6abedbf0d81f0f \
+ --hash=sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a \
+ --hash=sha256:246b123cc54bb5361588acc54218c8c9fb73068bf227a4a531d8ed56fa3ca7d6 \
+ --hash=sha256:275ff571376626195ab95a746e6a04c7df8ea34638b99fc11160de91f2fef503 \
+ --hash=sha256:281309265596e388ef483250db3640e5f414168c5a67e9c665cafce9492eda2f \
+ --hash=sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493 \
+ --hash=sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996 \
+ --hash=sha256:30dcc86377618a4c8f3b72418df92e77be4254d8f89f14b8e8f57d6d43603c0f \
+ --hash=sha256:31a34c508c003a4347d389a9e6fcc2307cc2150eb516462a7a17512130de109e \
+ --hash=sha256:323ba25b92454adb36fa425dc5cf6f8f19f78948cbad2e7bc6cdf7b0d7982e59 \
+ --hash=sha256:34eccd14566f8fe14b2b95bb13b11572f7c7d5c36da61caf414d23b91fcc5d94 \
+ --hash=sha256:3a58c98a7e9c021f357348867f537017057c2ed7f77337fd914d0bedb35dace7 \
+ --hash=sha256:3f78fd71c4f43a13d342be74ebbc0666fe1f555b8837eb113cb7416856c79682 \
+ --hash=sha256:4154ad09dac630a0f13f37b583eae260c6aa885d67dfbccb5b02c33f31a6d420 \
+ --hash=sha256:420f9bbf47a02616e8554e825208cb947969451978dceb77f95ad09c37791dae \
+ --hash=sha256:4686818798f9194d03c9129a4d9a702d9e113a89cb03bffe08c6cf799e053291 \
+ --hash=sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe \
+ --hash=sha256:60989127da422b74a04345096c10d416c2b41bd7bf2a380eb541059e4e999980 \
+ --hash=sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93 \
+ --hash=sha256:68fc1f1ba168724771e38bee37d940d2865cb0f562380a1fb1ffb428b75cb692 \
+ --hash=sha256:6e6f98446430fdf41bd36d4faa6cb409f5140c1c2cf58ce0bbdaf16af7d3f119 \
+ --hash=sha256:729177eaf0aefca0994ce4cffe96ad3c75e377c7b6f4efa59ebf003b6d398716 \
+ --hash=sha256:72dffbd8b4194858d0941062a9766f8297e8868e1dd07a7b36212aaa90f49472 \
+ --hash=sha256:75723c3c0fbbf34350b46a3199eb50638ab22a0228f93fb472ef4d9becc2382b \
+ --hash=sha256:77853062a2c45be16fd6b8d6de2a99278ee1d985a7bd8b103e97e41c034006d2 \
+ --hash=sha256:78151aa3ec21dccd5cdef6c74c3e73386dcdfaf19bced944169697d7ac7482fc \
+ --hash=sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c \
+ --hash=sha256:804d99b24ad523a1fe18cc707bf741670332f7c7412e9d49cb5eab67e886b9b5 \
+ --hash=sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab \
+ --hash=sha256:8359bf4791968c5a78c56103702000105501adb557f3cf772b2c207284273984 \
+ --hash=sha256:83791a65b51ad6ee6cf0845634859d69a038ea9b03d7b26e703f94c7e93dbcf9 \
+ --hash=sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf \
+ --hash=sha256:876801744b0dee379e4e3c38b76fc89f88834bb15bf92ee07d94acd06ec890a0 \
+ --hash=sha256:8dbf6d1bc73f1d04ec1734bae3b4fb0ee3cb2a493d35ede9badbeb901fb40f6f \
+ --hash=sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212 \
+ --hash=sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb \
+ --hash=sha256:977646e05232579d2e7b9c59e21dbe5261f403a88417f6a6512e70d3f8a046be \
+ --hash=sha256:9dba73be7305b399924709b91682299794887cbbd88e38226ed9f6712eabee90 \
+ --hash=sha256:a148c5d507bb9b4f2030a2025c545fccb0e1ef317393eaba42e7eabd28eb6041 \
+ --hash=sha256:a6cdcc3ede532f4a4b96000b6362099591ab4a3e913d70bcbac2b56c872446f7 \
+ --hash=sha256:ac05fb791acf5e1a3e39402641827780fe44d27e72567a000412c648a85ba860 \
+ --hash=sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d \
+ --hash=sha256:b58b4710c7f4161b5e9dcbe73bb7c62d65670a87df7bcce9e1faaad43e715245 \
+ --hash=sha256:b6356793b84728d9d50ead16ab43c187673831e9d4019013f1402c41b1db9b27 \
+ --hash=sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417 \
+ --hash=sha256:bc7bb56d04601d443f24094e9e31ae6deec9ccb23581f75343feebaf30423359 \
+ --hash=sha256:c2470da5418b76232f02a2fcd2229537bb2d5a7096674ce61859c3229f2eb202 \
+ --hash=sha256:c332c8d69fb64979ebf76613c66b985414927a40f8defa16cf1bc028b7b0a7b0 \
+ --hash=sha256:c6af2a6d4b7ee9615cbb162b0738f6e1fd1f5c3eda7e5da17861eacf4c717ea7 \
+ --hash=sha256:c77e3d1862452565875eb31bdb45ac62502feabbd53429fdc39a1cc341d681ba \
+ --hash=sha256:ca08decd2697fdea0aea364b370b1249d47336aec935f87b8bbfd7da5b2ee9c1 \
+ --hash=sha256:ca49a8119c6cbd77375ae303b0cfd8c11f011abbbd64601167ecca18a87e7cdd \
+ --hash=sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07 \
+ --hash=sha256:d2997c458c690ec2bc6b0b7ecbafd02b029b7b4283078d3b32a852a7ce3ddd98 \
+ --hash=sha256:d3f82c171b4ccd83bbaf35aa05e44e690113bd4f3b7b6cc54d2219b132f3ae55 \
+ --hash=sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d \
+ --hash=sha256:ead20f7913a9c1e894aebe47cccf9dc834e1618b7aa96155d2091a626e59c972 \
+ --hash=sha256:ebdc36bea43063116f0486869652cb2ed7032dbc59fbcb4445c4862b5c1ecf7f \
+ --hash=sha256:ed1184ab8f113e8d660ce49a56390ca181f2981066acc27cf637d5c1e10ce46e \
+ --hash=sha256:ee825e70b1a209475622f7f7b776785bd68f34af6e7a46e2e42f27b659b5bc26 \
+ --hash=sha256:f7ae5d65ccfbebdfa761585228eb4d0df3a8b15cfb53bd953e713e09fbb12957 \
+ --hash=sha256:f7fc5a5acafb7d6ccca13bfa8c90f8c51f13d8fb87d95656d3950f0158d3ce53 \
+ --hash=sha256:f9b5571d33660d5009a8b3c25dc1db560206e2d2f89d3df1cb32d72c0d117d52
# via -r requirements/common.in
pyasn1==0.5.1 \
--hash=sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58 \
From a5de2e5edfa04c07188f7ee98af74fc3f924fca9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 03:18:09 +0000
Subject: [PATCH 059/128] Bump certifi from 2023.5.7 to 2024.2.2
Bumps [certifi](https://github.com/certifi/python-certifi) from 2023.5.7 to 2024.2.2.
- [Commits](https://github.com/certifi/python-certifi/compare/2023.05.07...2024.02.02)
---
updated-dependencies:
- dependency-name: certifi
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 7a2634b6413..3898b9ceb2f 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -6,7 +6,7 @@ celery==5.3.6 # celery needed for data ingestion
cached-property==1.5.2 # needed for kombu with --require-hashes
simplejson==3.19.1 # import simplejson
newrelic==8.8.0
-certifi==2023.5.7
+certifi==2024.2.2
mysqlclient==2.1.1 # Required by Django
psycopg2-binary==2.9.9
diff --git a/requirements/common.txt b/requirements/common.txt
index 0de7291c19f..56f4b39db59 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -231,9 +231,9 @@ celery==5.3.6 \
--hash=sha256:870cc71d737c0200c397290d730344cc991d13a057534353d124c9380267aab9 \
--hash=sha256:9da4ea0118d232ce97dff5ed4974587fb1c0ff5c10042eb15278487cdd27d1af
# via -r requirements/common.in
-certifi==2023.5.7 \
- --hash=sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7 \
- --hash=sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716
+certifi==2024.2.2 \
+ --hash=sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f \
+ --hash=sha256:dc383c07b76109f368f6106eee2b593b04a011ea4d55f652c6ca24a754d1cdd1
# via
# -r requirements/common.in
# requests
From 69ab3bc9b23e99e557cbac2d573439652d7d7cbc Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 03:06:47 +0000
Subject: [PATCH 060/128] Bump whitenoise[brotli] from 6.5.0 to 6.6.0
Bumps [whitenoise[brotli]](https://github.com/evansd/whitenoise) from 6.5.0 to 6.6.0.
- [Changelog](https://github.com/evansd/whitenoise/blob/main/docs/changelog.rst)
- [Commits](https://github.com/evansd/whitenoise/compare/6.5.0...6.6.0)
---
updated-dependencies:
- dependency-name: whitenoise[brotli]
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 3898b9ceb2f..bce2743d3eb 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -1,6 +1,6 @@
# Packages that are shared between deployment and dev environments.
gunicorn==21.2.0
-whitenoise[brotli]==6.5.0 # Used by Whitenoise to provide Brotli-compressed versions of static files.
+whitenoise[brotli]==6.6.0 # Used by Whitenoise to provide Brotli-compressed versions of static files.
Django==4.1.13
celery==5.3.6 # celery needed for data ingestion
cached-property==1.5.2 # needed for kombu with --require-hashes
diff --git a/requirements/common.txt b/requirements/common.txt
index 56f4b39db59..00d38cae7f8 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -1542,9 +1542,9 @@ wcwidth==0.2.13 \
# via
# blessed
# prompt-toolkit
-whitenoise[brotli]==6.5.0 \
- --hash=sha256:15fe60546ac975b58e357ccaeb165a4ca2d0ab697e48450b8f0307ca368195a8 \
- --hash=sha256:16468e9ad2189f09f4a8c635a9031cc9bb2cdbc8e5e53365407acf99f7ade9ec
+whitenoise[brotli]==6.6.0 \
+ --hash=sha256:8998f7370973447fac1e8ef6e8ded2c5209a7b1f67c1012866dbcd09681c3251 \
+ --hash=sha256:b1f9db9bf67dc183484d760b99f4080185633136a273a03f6436034a41064146
# via
# -r requirements/common.in
# whitenoise
From 25c2d4905b2a6df7b970126a800c0caae6d0cb2f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 15:37:14 +0000
Subject: [PATCH 061/128] Bump django-filter from 23.2 to 23.5
Bumps [django-filter](https://github.com/carltongibson/django-filter) from 23.2 to 23.5.
- [Release notes](https://github.com/carltongibson/django-filter/releases)
- [Changelog](https://github.com/carltongibson/django-filter/blob/main/CHANGES.rst)
- [Commits](https://github.com/carltongibson/django-filter/compare/23.2...23.5)
---
updated-dependencies:
- dependency-name: django-filter
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index bce2743d3eb..d862eea3d13 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -22,7 +22,7 @@ django-environ==0.10.0 # Imported as environ
uritemplate==4.1.1 # For OpenAPI schema
python-dateutil==2.9.0.post0
-django-filter==23.2 # Listed in DEFAULT_FILTER_BACKENDS on settings.py
+django-filter==23.5 # Listed in DEFAULT_FILTER_BACKENDS on settings.py
django-redis==5.3.0 # Listed in CACHES on settings.py
taskcluster==60.4.2 # import taskcluster
diff --git a/requirements/common.txt b/requirements/common.txt
index 00d38cae7f8..8caac654384 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -378,9 +378,9 @@ django-environ==0.10.0 \
--hash=sha256:510f8c9c1d0a38b0815f91504270c29440a0cf44fab07f55942fa8d31bbb9be6 \
--hash=sha256:b3559a91439c9d774a9e0c1ced872364772c612cdf6dc919506a2b13f7a77225
# via -r requirements/common.in
-django-filter==23.2 \
- --hash=sha256:2fe15f78108475eda525692813205fa6f9e8c1caf1ae65daa5862d403c6dbf00 \
- --hash=sha256:d12d8e0fc6d3eb26641e553e5d53b191eb8cec611427d4bdce0becb1f7c172b5
+django-filter==23.5 \
+ --hash=sha256:67583aa43b91fe8c49f74a832d95f4d8442be628fd4c6d65e9f811f5153a4e5c \
+ --hash=sha256:99122a201d83860aef4fe77758b69dda913e874cc5e0eaa50a86b0b18d708400
# via -r requirements/common.in
django-redis==5.3.0 \
--hash=sha256:2d8660d39f586c41c9907d5395693c477434141690fd7eca9d32376af00b0aac \
From 5d2aa2047774192888c6da582f4becf3956e7f53 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 03:13:09 +0000
Subject: [PATCH 062/128] Bump mkdocs-material from 9.1.21 to 9.5.12
Bumps [mkdocs-material](https://github.com/squidfunk/mkdocs-material) from 9.1.21 to 9.5.12.
- [Release notes](https://github.com/squidfunk/mkdocs-material/releases)
- [Changelog](https://github.com/squidfunk/mkdocs-material/blob/master/CHANGELOG)
- [Commits](https://github.com/squidfunk/mkdocs-material/compare/9.1.21...9.5.12)
---
updated-dependencies:
- dependency-name: mkdocs-material
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 123030cb321..8adead7bbec 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ description = "Defaut package, used for development or readthedocs"
[project.optional-dependencies]
docs = [
"mkdocs==1.5.3",
- "mkdocs-material==9.1.21",
+ "mkdocs-material==9.5.12",
"mdx_truly_sane_lists==1.3",
]
From 758e528b7cf0cfe2b798bbf9f311f42847efffc8 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 15:40:49 +0000
Subject: [PATCH 063/128] Bump pytest from 7.3.2 to 8.0.2
Bumps [pytest](https://github.com/pytest-dev/pytest) from 7.3.2 to 8.0.2.
- [Release notes](https://github.com/pytest-dev/pytest/releases)
- [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/pytest-dev/pytest/compare/7.3.2...8.0.2)
---
updated-dependencies:
- dependency-name: pytest
dependency-type: direct:development
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index acf835bac02..361d647fce7 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -15,7 +15,7 @@ pytest-watch==4.2.0
# Required by django-extension's runserver_plus command.
pytest-django==4.8.0
-pytest==7.3.2
+pytest==8.0.2
black==24.2.0
shellcheck-py==0.9.0.6
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 40960de04bd..0ea770859e5 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -339,9 +339,9 @@ pysocks==1.7.1 \
--hash=sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5 \
--hash=sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0
# via urllib3
-pytest==7.3.2 \
- --hash=sha256:cdcbd012c9312258922f8cd3f1b62a6580fdced17db6014896053d47cddf9295 \
- --hash=sha256:ee990a3cc55ba808b80795a79944756f315c67c12b56abd3ac993a7b8c17030b
+pytest==8.0.2 \
+ --hash=sha256:d4051d623a2e0b7e51960ba963193b09ce6daeb9759a451844a21e4ddedfc1bd \
+ --hash=sha256:edfaaef32ce5172d5466b5127b42e0d6d35ebbe4453f0e3505d96afd93f6b096
# via
# -r requirements/dev.in
# pytest-asyncio
From 0f0c4da1732f7885b207de684f0b3a8e096841cf Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 15:44:24 +0000
Subject: [PATCH 064/128] Bump newrelic from 8.8.0 to 9.7.0
Bumps [newrelic](https://github.com/newrelic/newrelic-python-agent) from 8.8.0 to 9.7.0.
- [Release notes](https://github.com/newrelic/newrelic-python-agent/releases)
- [Commits](https://github.com/newrelic/newrelic-python-agent/compare/v8.8.0...v9.7.0)
---
updated-dependencies:
- dependency-name: newrelic
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 46 +++++++++++++++++++++++++++--------------
2 files changed, 31 insertions(+), 17 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index d862eea3d13..3149e00044c 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -5,7 +5,7 @@ Django==4.1.13
celery==5.3.6 # celery needed for data ingestion
cached-property==1.5.2 # needed for kombu with --require-hashes
simplejson==3.19.1 # import simplejson
-newrelic==8.8.0
+newrelic==9.7.0
certifi==2024.2.2
mysqlclient==2.1.1 # Required by Django
diff --git a/requirements/common.txt b/requirements/common.txt
index 8caac654384..4dd3152b1d3 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -814,22 +814,36 @@ mysqlclient==2.1.1 \
--hash=sha256:c812b67e90082a840efb82a8978369e6e69fc62ce1bda4ca8f3084a9d862308b \
--hash=sha256:dea88c8d3f5a5d9293dfe7f087c16dd350ceb175f2f6631c9cf4caf3e19b7a96
# via -r requirements/common.in
-newrelic==8.8.0 \
- --hash=sha256:1bc307d06e2033637e7b484af22f540ca041fb23a54b311bcd5968ca1a64e4ef \
- --hash=sha256:435ac9e3791f78e05c9da8107a6ef49c13e62ac302696858fa2411198fe201ff \
- --hash=sha256:6662ec79493f23f9d0995a015177c87508bea4c541f7c9f17a61b503b82e1367 \
- --hash=sha256:67902b3c53fa497dba887068166261d114ac2347c8a4908d735d7594cca163dc \
- --hash=sha256:6b4db0e7544232d4e6e835a02ee28637970576f8dce82ffcaa3d675246e822d5 \
- --hash=sha256:796ed5ff44b04b41e051dc0112e5016e53a37e39e95023c45ff7ecd34c254a7d \
- --hash=sha256:84d1f71284efa5f1cae696161e0c3cb65eaa2f53116fe5e7c5a62be7d15d9536 \
- --hash=sha256:9355f209ba8d82fd0f9d78d7cc1d9bef0ae4677b3cfed7b7aaec521adbe87559 \
- --hash=sha256:9c0d5153b7363d5cb5cac7f8d1a4e03669b074afee2dda201851a67c7bed1e32 \
- --hash=sha256:bcd3219e1e816a0fdb51ac993cac6744e6a835c13ee72e21d86bcbc2d16628ce \
- --hash=sha256:c4a0556c6ece49132ab1c32bfe398047a8311f9a8b6862b482495d132fcb0ad4 \
- --hash=sha256:caccdf201735df80b470ddf772f60a154f2c07c0c1b2b3f6e999d55e79ce601e \
- --hash=sha256:d21af16cee1e0caf4c73c4c1b2d7ba9f33fe6a870d93135dc8b23ac592f49b38 \
- --hash=sha256:da8f2dc31e182768fe314d8ceb6f42acd09956708846f8ae71f07f044a3aa05e \
- --hash=sha256:ef9c178329f8c04f0574908c1f04ff1f18b9eba55b869744583fee3eac48e571
+newrelic==9.7.0 \
+ --hash=sha256:0344e718ddc4ffe78a1441c6313a6af2f9aa3001e93a8a5197caac091f8bc9b3 \
+ --hash=sha256:0fdd25b9969a4c85a53a1dc2cade462164c6603e85ffe50da732ad4e69347659 \
+ --hash=sha256:172732a71d4ff053c1c724a8dfbb8b1efc24c398c25e78f7aaf7966551d3fb09 \
+ --hash=sha256:27e851365bf5e5f8e7ca21e63d01bd2ce9327afc18417e071a3d50590f2747a8 \
+ --hash=sha256:288ed42949fd4a5d535507cb15b8f602111244663eceab1716a0a77e529ee2b6 \
+ --hash=sha256:333ec033d13646f2221fdaf3822d3b8360d1935d1baea6879c1ae7f0d5020217 \
+ --hash=sha256:4966e4be00eab203903796a4b5aa864d866ba45d17bf823d71a932f99330ceee \
+ --hash=sha256:4adf292b529771536b417f46f84c497413f467f1ae7534009404580e259cb1b1 \
+ --hash=sha256:4bd32c76427782a3cf6994cab1217a1da79327d5b9cb2bad11917df5eb55dc0d \
+ --hash=sha256:4cefc2b264122e9f99db557ec9f1c5b287f4b95229957f7f78269cc462d47065 \
+ --hash=sha256:563342155edbed8276ddef9e2e15a61a31953ff9f42015a426f94660adf104cb \
+ --hash=sha256:59f2c94a2e256f00b344efc909eb1f058cd411e9a95a6ad1d7adf957223a747d \
+ --hash=sha256:78f604a2622a6795320a6ff54262816aeff86da79400429c34346fc5feecb235 \
+ --hash=sha256:8958e575f7ada2ed8937dafff297790aeb960499b08d209b76a8a3c72f0841fc \
+ --hash=sha256:91e2ad1da28c76d67344daca7ddd6166a5e190f7031f9a5bd683db17542f91ef \
+ --hash=sha256:9c41a571d0889409044bfb22194382731e18fd7962ba6a91ff640b274ca3fc1a \
+ --hash=sha256:a687a521950da96b7daa553d1ab6371aebc5bfd1f3cb4ceb5d6dc859b0956602 \
+ --hash=sha256:b180f099aabff875f83364b6314b9954e29dfca753ccc1d353a8135c1430f9a6 \
+ --hash=sha256:b7733168eae4c718f885f188bcfc265c299f51d43130350b32f86f3754bc809b \
+ --hash=sha256:bc5af6e7d7b6f30b03cec4f265b84fa8d370e006332854181214507e2deb421e \
+ --hash=sha256:be2a7697b8407cea2ebe962ec990935dff300d9c4f78d3d7335b9dc280d33c53 \
+ --hash=sha256:bf9485a5c9efaa30c645683eab427ce8b41164213bc003f7e7ad31772eb1f481 \
+ --hash=sha256:c005bfb53c7090652839e9b38a3ec2462fe4e125fe976e2b9fcd778efa1c4a12 \
+ --hash=sha256:d3656b546aced2c6a4443e5e76f89e17a1672d69dfe47940119c688ab4426a76 \
+ --hash=sha256:e229fb5406a3c0752723bc5444d75dc863456a0305621be4159356f2880488e9 \
+ --hash=sha256:e57d78ef1291710968e872412a8d7c765f077de0aaf225aaab216c552ee1775a \
+ --hash=sha256:e731ac5b66dbeda1e990ba41cecda8ea865c69f72b0267574d6b1727113f7de2 \
+ --hash=sha256:eb94aabd4b575f4fa2068343781614cc249630c8bcbc07f115affeb1311736cd \
+ --hash=sha256:fb3e40be0f1ba2b2d1ad070d7913952efb1ceee13e6548d63bb973dcdf2c9d32
# via -r requirements/common.in
numpy==1.26.3 \
--hash=sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd \
From a6db6a02aee5645abe35af2ad786a0f41ac17aba Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 15:21:52 +0000
Subject: [PATCH 065/128] Bump django-cache-memoize from 0.1.10 to 0.2.0
Bumps [django-cache-memoize](https://github.com/peterbe/django-cache-memoize) from 0.1.10 to 0.2.0.
- [Changelog](https://github.com/peterbe/django-cache-memoize/blob/master/CHANGELOG.rst)
- [Commits](https://github.com/peterbe/django-cache-memoize/commits)
---
updated-dependencies:
- dependency-name: django-cache-memoize
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 3149e00044c..5ddc8ae2e41 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -31,7 +31,7 @@ python-jose[pycryptodome]==3.3.0 # from jose import jwt
furl==2.1.3 # Imported as furl
first==2.0.2 # Imported as first
json-e==4.7.0 # import jsone
-django-cache-memoize==0.1.10 # Imported as cache_memoize
+django-cache-memoize==0.2.0 # Imported as cache_memoize
# Required for Push Health
mozci[cache]==2.4.0
diff --git a/requirements/common.txt b/requirements/common.txt
index 4dd3152b1d3..c3bfd153d5b 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -366,9 +366,9 @@ django==4.1.13 \
# django-filter
# django-redis
# djangorestframework
-django-cache-memoize==0.1.10 \
- --hash=sha256:63e8faa245a41c0dbad843807e9f21a6e59eba8e6e50df310fdf6485a6749843 \
- --hash=sha256:676299313079cde9242ae84db0160e80b1d44e8dd6bc9b1f4f1247e11b30c9e0
+django-cache-memoize==0.2.0 \
+ --hash=sha256:79950a027ba40e4aff4efed587b76036bf5ba1f59329d7b158797b832be72ca6 \
+ --hash=sha256:a6bfd112da699d1fa85955a1e15b7c48ee25e58044398958e269678db10736f3
# via -r requirements/common.in
django-cors-headers==4.1.0 \
--hash=sha256:36a8d7a6dee6a85f872fe5916cc878a36d0812043866355438dfeda0b20b6b78 \
From e5f25b8baafd9c4e4828d0ae7b16dc7322e85396 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 03:01:14 +0000
Subject: [PATCH 066/128] Bump pip-tools from 6.13.0 to 7.4.0
Bumps [pip-tools](https://github.com/jazzband/pip-tools) from 6.13.0 to 7.4.0.
- [Release notes](https://github.com/jazzband/pip-tools/releases)
- [Changelog](https://github.com/jazzband/pip-tools/blob/main/CHANGELOG.md)
- [Commits](https://github.com/jazzband/pip-tools/compare/6.13.0...7.4.0)
---
updated-dependencies:
- dependency-name: pip-tools
dependency-type: direct:development
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 11 +++++++----
2 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 361d647fce7..036ea525fbe 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -34,7 +34,7 @@ betamax==0.9.0
betamax-serializers==0.2.1
# pip-compile for pinning versions
-pip-tools==6.13.0
+pip-tools==7.4.0
requests==2.31.0
urllib3==2.0.3
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 0ea770859e5..9fd4130d6d1 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -306,9 +306,9 @@ pathspec==0.12.1 \
--hash=sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08 \
--hash=sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712
# via black
-pip-tools==6.13.0 \
- --hash=sha256:50943f151d87e752abddec8158622c34ad7f292e193836e90e30d87da60b19d9 \
- --hash=sha256:61d46bd2eb8016ed4a924e196e6e5b0a268cd3babd79e593048720db23522bb1
+pip-tools==7.4.0 \
+ --hash=sha256:a92a6ddfa86ff389fe6ace381d463bc436e2c705bd71d52117c25af5ce867bb7 \
+ --hash=sha256:b67432fd0759ed834c5367f9e0ce8c95441acecfec9c8e24b41aca166757adf0
# via -r requirements/dev.in
platformdirs==4.2.0 \
--hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \
@@ -333,7 +333,9 @@ pypom==2.2.4 \
pyproject-hooks==1.0.0 \
--hash=sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8 \
--hash=sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5
- # via build
+ # via
+ # build
+ # pip-tools
pysocks==1.7.1 \
--hash=sha256:08e69f092cc6dbe92a0fdd16eeb9b9ffbc13cadfe5ca4c7bd92ffb078b293299 \
--hash=sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5 \
@@ -477,6 +479,7 @@ tomli==2.0.1 \
# black
# build
# coverage
+ # pip-tools
# pyproject-hooks
# pytest
trio==0.24.0 \
From e7a064e1049217911971cbd404ab932e97d740df Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 16:48:01 +0000
Subject: [PATCH 067/128] Bump simplejson from 3.19.1 to 3.19.2
Bumps [simplejson](https://github.com/simplejson/simplejson) from 3.19.1 to 3.19.2.
- [Release notes](https://github.com/simplejson/simplejson/releases)
- [Changelog](https://github.com/simplejson/simplejson/blob/master/CHANGES.txt)
- [Commits](https://github.com/simplejson/simplejson/compare/v3.19.1...v3.19.2)
---
updated-dependencies:
- dependency-name: simplejson
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 185 +++++++++++++++++++++-------------------
2 files changed, 100 insertions(+), 87 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 5ddc8ae2e41..20539425a08 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -4,7 +4,7 @@ whitenoise[brotli]==6.6.0 # Used by Whitenoise to provide Brotli-compressed ver
Django==4.1.13
celery==5.3.6 # celery needed for data ingestion
cached-property==1.5.2 # needed for kombu with --require-hashes
-simplejson==3.19.1 # import simplejson
+simplejson==3.19.2 # import simplejson
newrelic==9.7.0
certifi==2024.2.2
diff --git a/requirements/common.txt b/requirements/common.txt
index c3bfd153d5b..2af45ef7ab2 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -1340,92 +1340,105 @@ scipy==1.10.0 \
--hash=sha256:c8b3cbc636a87a89b770c6afc999baa6bcbb01691b5ccbbc1b1791c7c0a07540 \
--hash=sha256:e096b062d2efdea57f972d232358cb068413dc54eec4f24158bcbb5cb8bddfd8
# via moz-measure-noise
-simplejson==3.19.1 \
- --hash=sha256:081ea6305b3b5e84ae7417e7f45956db5ea3872ec497a584ec86c3260cda049e \
- --hash=sha256:08be5a241fdf67a8e05ac7edbd49b07b638ebe4846b560673e196b2a25c94b92 \
- --hash=sha256:0c16ec6a67a5f66ab004190829eeede01c633936375edcad7cbf06d3241e5865 \
- --hash=sha256:0ccb2c1877bc9b25bc4f4687169caa925ffda605d7569c40e8e95186e9a5e58b \
- --hash=sha256:17a963e8dd4d81061cc05b627677c1f6a12e81345111fbdc5708c9f088d752c9 \
- --hash=sha256:199a0bcd792811c252d71e3eabb3d4a132b3e85e43ebd93bfd053d5b59a7e78b \
- --hash=sha256:1cb19eacb77adc5a9720244d8d0b5507421d117c7ed4f2f9461424a1829e0ceb \
- --hash=sha256:203412745fed916fc04566ecef3f2b6c872b52f1e7fb3a6a84451b800fb508c1 \
- --hash=sha256:2098811cd241429c08b7fc5c9e41fcc3f59f27c2e8d1da2ccdcf6c8e340ab507 \
- --hash=sha256:22b867205cd258050c2625325fdd9a65f917a5aff22a23387e245ecae4098e78 \
- --hash=sha256:23fbb7b46d44ed7cbcda689295862851105c7594ae5875dce2a70eeaa498ff86 \
- --hash=sha256:2541fdb7467ef9bfad1f55b6c52e8ea52b3ce4a0027d37aff094190a955daa9d \
- --hash=sha256:3231100edee292da78948fa0a77dee4e5a94a0a60bcba9ed7a9dc77f4d4bb11e \
- --hash=sha256:344a5093b71c1b370968d0fbd14d55c9413cb6f0355fdefeb4a322d602d21776 \
- --hash=sha256:37724c634f93e5caaca04458f267836eb9505d897ab3947b52f33b191bf344f3 \
- --hash=sha256:3844305bc33d52c4975da07f75b480e17af3558c0d13085eaa6cc2f32882ccf7 \
- --hash=sha256:390f4a8ca61d90bcf806c3ad644e05fa5890f5b9a72abdd4ca8430cdc1e386fa \
- --hash=sha256:3a4480e348000d89cf501b5606415f4d328484bbb431146c2971123d49fd8430 \
- --hash=sha256:3b652579c21af73879d99c8072c31476788c8c26b5565687fd9db154070d852a \
- --hash=sha256:3e0902c278243d6f7223ba3e6c5738614c971fd9a887fff8feaa8dcf7249c8d4 \
- --hash=sha256:412e58997a30c5deb8cab5858b8e2e5b40ca007079f7010ee74565cc13d19665 \
- --hash=sha256:44cdb4e544134f305b033ad79ae5c6b9a32e7c58b46d9f55a64e2a883fbbba01 \
- --hash=sha256:46133bc7dd45c9953e6ee4852e3de3d5a9a4a03b068bd238935a5c72f0a1ce34 \
- --hash=sha256:46e89f58e4bed107626edce1cf098da3664a336d01fc78fddcfb1f397f553d44 \
- --hash=sha256:4710806eb75e87919b858af0cba4ffedc01b463edc3982ded7b55143f39e41e1 \
- --hash=sha256:476c8033abed7b1fd8db62a7600bf18501ce701c1a71179e4ce04ac92c1c5c3c \
- --hash=sha256:48600a6e0032bed17c20319d91775f1797d39953ccfd68c27f83c8d7fc3b32cb \
- --hash=sha256:4d3025e7e9ddb48813aec2974e1a7e68e63eac911dd5e0a9568775de107ac79a \
- --hash=sha256:547ea86ca408a6735335c881a2e6208851027f5bfd678d8f2c92a0f02c7e7330 \
- --hash=sha256:54fca2b26bcd1c403146fd9461d1da76199442297160721b1d63def2a1b17799 \
- --hash=sha256:5673d27806085d2a413b3be5f85fad6fca4b7ffd31cfe510bbe65eea52fff571 \
- --hash=sha256:58ee5e24d6863b22194020eb62673cf8cc69945fcad6b283919490f6e359f7c5 \
- --hash=sha256:5ca922c61d87b4c38f37aa706520328ffe22d7ac1553ef1cadc73f053a673553 \
- --hash=sha256:5db86bb82034e055257c8e45228ca3dbce85e38d7bfa84fa7b2838e032a3219c \
- --hash=sha256:6277f60848a7d8319d27d2be767a7546bc965535b28070e310b3a9af90604a4c \
- --hash=sha256:6424d8229ba62e5dbbc377908cfee9b2edf25abd63b855c21f12ac596cd18e41 \
- --hash=sha256:65dafe413b15e8895ad42e49210b74a955c9ae65564952b0243a18fb35b986cc \
- --hash=sha256:66389b6b6ee46a94a493a933a26008a1bae0cfadeca176933e7ff6556c0ce998 \
- --hash=sha256:66d780047c31ff316ee305c3f7550f352d87257c756413632303fc59fef19eac \
- --hash=sha256:69a8b10a4f81548bc1e06ded0c4a6c9042c0be0d947c53c1ed89703f7e613950 \
- --hash=sha256:6a561320485017ddfc21bd2ed5de2d70184f754f1c9b1947c55f8e2b0163a268 \
- --hash=sha256:6aa7ca03f25b23b01629b1c7f78e1cd826a66bfb8809f8977a3635be2ec48f1a \
- --hash=sha256:6b79642a599740603ca86cf9df54f57a2013c47e1dd4dd2ae4769af0a6816900 \
- --hash=sha256:6e7c70f19405e5f99168077b785fe15fcb5f9b3c0b70b0b5c2757ce294922c8c \
- --hash=sha256:70128fb92932524c89f373e17221cf9535d7d0c63794955cc3cd5868e19f5d38 \
- --hash=sha256:73d0904c2471f317386d4ae5c665b16b5c50ab4f3ee7fd3d3b7651e564ad74b1 \
- --hash=sha256:74bf802debe68627227ddb665c067eb8c73aa68b2476369237adf55c1161b728 \
- --hash=sha256:79c748aa61fd8098d0472e776743de20fae2686edb80a24f0f6593a77f74fe86 \
- --hash=sha256:79d46e7e33c3a4ef853a1307b2032cfb7220e1a079d0c65488fbd7118f44935a \
- --hash=sha256:7e78d79b10aa92f40f54178ada2b635c960d24fc6141856b926d82f67e56d169 \
- --hash=sha256:8090e75653ea7db75bc21fa5f7bcf5f7bdf64ea258cbbac45c7065f6324f1b50 \
- --hash=sha256:87b190e6ceec286219bd6b6f13547ca433f977d4600b4e81739e9ac23b5b9ba9 \
- --hash=sha256:889328873c35cb0b2b4c83cbb83ec52efee5a05e75002e2c0c46c4e42790e83c \
- --hash=sha256:8f8d179393e6f0cf6c7c950576892ea6acbcea0a320838c61968ac7046f59228 \
- --hash=sha256:919bc5aa4d8094cf8f1371ea9119e5d952f741dc4162810ab714aec948a23fe5 \
- --hash=sha256:926957b278de22797bfc2f004b15297013843b595b3cd7ecd9e37ccb5fad0b72 \
- --hash=sha256:93f5ac30607157a0b2579af59a065bcfaa7fadeb4875bf927a8f8b6739c8d910 \
- --hash=sha256:96ade243fb6f3b57e7bd3b71e90c190cd0f93ec5dce6bf38734a73a2e5fa274f \
- --hash=sha256:9f14ecca970d825df0d29d5c6736ff27999ee7bdf5510e807f7ad8845f7760ce \
- --hash=sha256:a755f7bfc8adcb94887710dc70cc12a69a454120c6adcc6f251c3f7b46ee6aac \
- --hash=sha256:a79b439a6a77649bb8e2f2644e6c9cc0adb720fc55bed63546edea86e1d5c6c8 \
- --hash=sha256:aa9d614a612ad02492f704fbac636f666fa89295a5d22b4facf2d665fc3b5ea9 \
- --hash=sha256:ad071cd84a636195f35fa71de2186d717db775f94f985232775794d09f8d9061 \
- --hash=sha256:b0e9a5e66969f7a47dc500e3dba8edc3b45d4eb31efb855c8647700a3493dd8a \
- --hash=sha256:b438e5eaa474365f4faaeeef1ec3e8d5b4e7030706e3e3d6b5bee6049732e0e6 \
- --hash=sha256:b46aaf0332a8a9c965310058cf3487d705bf672641d2c43a835625b326689cf4 \
- --hash=sha256:c39fa911e4302eb79c804b221ddec775c3da08833c0a9120041dd322789824de \
- --hash=sha256:ca56a6c8c8236d6fe19abb67ef08d76f3c3f46712c49a3b6a5352b6e43e8855f \
- --hash=sha256:cb502cde018e93e75dc8fc7bb2d93477ce4f3ac10369f48866c61b5e031db1fd \
- --hash=sha256:cd4d50a27b065447c9c399f0bf0a993bd0e6308db8bbbfbc3ea03b41c145775a \
- --hash=sha256:d125e754d26c0298715bdc3f8a03a0658ecbe72330be247f4b328d229d8cf67f \
- --hash=sha256:d300773b93eed82f6da138fd1d081dc96fbe53d96000a85e41460fe07c8d8b33 \
- --hash=sha256:d396b610e77b0c438846607cd56418bfc194973b9886550a98fd6724e8c6cfec \
- --hash=sha256:d61482b5d18181e6bb4810b4a6a24c63a490c3a20e9fbd7876639653e2b30a1a \
- --hash=sha256:d9f2c27f18a0b94107d57294aab3d06d6046ea843ed4a45cae8bd45756749f3a \
- --hash=sha256:dc2b3f06430cbd4fac0dae5b2974d2bf14f71b415fb6de017f498950da8159b1 \
- --hash=sha256:dc935d8322ba9bc7b84f99f40f111809b0473df167bf5b93b89fb719d2c4892b \
- --hash=sha256:e333c5b62e93949f5ac27e6758ba53ef6ee4f93e36cc977fe2e3df85c02f6dc4 \
- --hash=sha256:e765b1f47293dedf77946f0427e03ee45def2862edacd8868c6cf9ab97c8afbd \
- --hash=sha256:ed18728b90758d171f0c66c475c24a443ede815cf3f1a91e907b0db0ebc6e508 \
- --hash=sha256:eff87c68058374e45225089e4538c26329a13499bc0104b52b77f8428eed36b2 \
- --hash=sha256:f05d05d99fce5537d8f7a0af6417a9afa9af3a6c4bb1ba7359c53b6257625fcb \
- --hash=sha256:f253edf694ce836631b350d758d00a8c4011243d58318fbfbe0dd54a6a839ab4 \
- --hash=sha256:f41915a4e1f059dfad614b187bc06021fefb5fc5255bfe63abf8247d2f7a646a \
- --hash=sha256:f96def94576f857abf58e031ce881b5a3fc25cbec64b2bc4824824a8a4367af9
+simplejson==3.19.2 \
+ --hash=sha256:0405984f3ec1d3f8777c4adc33eac7ab7a3e629f3b1c05fdded63acc7cf01137 \
+ --hash=sha256:0436a70d8eb42bea4fe1a1c32d371d9bb3b62c637969cb33970ad624d5a3336a \
+ --hash=sha256:061e81ea2d62671fa9dea2c2bfbc1eec2617ae7651e366c7b4a2baf0a8c72cae \
+ --hash=sha256:064300a4ea17d1cd9ea1706aa0590dcb3be81112aac30233823ee494f02cb78a \
+ --hash=sha256:08889f2f597ae965284d7b52a5c3928653a9406d88c93e3161180f0abc2433ba \
+ --hash=sha256:0a48679310e1dd5c9f03481799311a65d343748fe86850b7fb41df4e2c00c087 \
+ --hash=sha256:0b0a3eb6dd39cce23801a50c01a0976971498da49bc8a0590ce311492b82c44b \
+ --hash=sha256:0d2d5119b1d7a1ed286b8af37357116072fc96700bce3bec5bb81b2e7057ab41 \
+ --hash=sha256:0d551dc931638e2102b8549836a1632e6e7cf620af3d093a7456aa642bff601d \
+ --hash=sha256:1018bd0d70ce85f165185d2227c71e3b1e446186f9fa9f971b69eee223e1e3cd \
+ --hash=sha256:11c39fbc4280d7420684494373b7c5904fa72a2b48ef543a56c2d412999c9e5d \
+ --hash=sha256:11cc3afd8160d44582543838b7e4f9aa5e97865322844b75d51bf4e0e413bb3e \
+ --hash=sha256:1537b3dd62d8aae644f3518c407aa8469e3fd0f179cdf86c5992792713ed717a \
+ --hash=sha256:16ca9c90da4b1f50f089e14485db8c20cbfff2d55424062791a7392b5a9b3ff9 \
+ --hash=sha256:176a1b524a3bd3314ed47029a86d02d5a95cc0bee15bd3063a1e1ec62b947de6 \
+ --hash=sha256:18955c1da6fc39d957adfa346f75226246b6569e096ac9e40f67d102278c3bcb \
+ --hash=sha256:1bb5b50dc6dd671eb46a605a3e2eb98deb4a9af787a08fcdddabe5d824bb9664 \
+ --hash=sha256:1c768e7584c45094dca4b334af361e43b0aaa4844c04945ac7d43379eeda9bc2 \
+ --hash=sha256:1dd4f692304854352c3e396e9b5f0a9c9e666868dd0bdc784e2ac4c93092d87b \
+ --hash=sha256:25785d038281cd106c0d91a68b9930049b6464288cea59ba95b35ee37c2d23a5 \
+ --hash=sha256:287e39ba24e141b046812c880f4619d0ca9e617235d74abc27267194fc0c7835 \
+ --hash=sha256:2c1467d939932901a97ba4f979e8f2642415fcf02ea12f53a4e3206c9c03bc17 \
+ --hash=sha256:2c433a412e96afb9a3ce36fa96c8e61a757af53e9c9192c97392f72871e18e69 \
+ --hash=sha256:2d022b14d7758bfb98405672953fe5c202ea8a9ccf9f6713c5bd0718eba286fd \
+ --hash=sha256:2f98d918f7f3aaf4b91f2b08c0c92b1774aea113334f7cde4fe40e777114dbe6 \
+ --hash=sha256:2fc697be37585eded0c8581c4788fcfac0e3f84ca635b73a5bf360e28c8ea1a2 \
+ --hash=sha256:3194cd0d2c959062b94094c0a9f8780ffd38417a5322450a0db0ca1a23e7fbd2 \
+ --hash=sha256:332c848f02d71a649272b3f1feccacb7e4f7e6de4a2e6dc70a32645326f3d428 \
+ --hash=sha256:346820ae96aa90c7d52653539a57766f10f33dd4be609206c001432b59ddf89f \
+ --hash=sha256:3471e95110dcaf901db16063b2e40fb394f8a9e99b3fe9ee3acc6f6ef72183a2 \
+ --hash=sha256:3848427b65e31bea2c11f521b6fc7a3145d6e501a1038529da2391aff5970f2f \
+ --hash=sha256:39b6d79f5cbfa3eb63a869639cfacf7c41d753c64f7801efc72692c1b2637ac7 \
+ --hash=sha256:3e74355cb47e0cd399ead3477e29e2f50e1540952c22fb3504dda0184fc9819f \
+ --hash=sha256:3f39bb1f6e620f3e158c8b2eaf1b3e3e54408baca96a02fe891794705e788637 \
+ --hash=sha256:40847f617287a38623507d08cbcb75d51cf9d4f9551dd6321df40215128325a3 \
+ --hash=sha256:4280e460e51f86ad76dc456acdbfa9513bdf329556ffc8c49e0200878ca57816 \
+ --hash=sha256:445a96543948c011a3a47c8e0f9d61e9785df2544ea5be5ab3bc2be4bd8a2565 \
+ --hash=sha256:4969d974d9db826a2c07671273e6b27bc48e940738d768fa8f33b577f0978378 \
+ --hash=sha256:49aaf4546f6023c44d7e7136be84a03a4237f0b2b5fb2b17c3e3770a758fc1a0 \
+ --hash=sha256:49e0e3faf3070abdf71a5c80a97c1afc059b4f45a5aa62de0c2ca0444b51669b \
+ --hash=sha256:49f9da0d6cd17b600a178439d7d2d57c5ef01f816b1e0e875e8e8b3b42db2693 \
+ --hash=sha256:4a8c3cc4f9dfc33220246760358c8265dad6e1104f25f0077bbca692d616d358 \
+ --hash=sha256:4d36081c0b1c12ea0ed62c202046dca11438bee48dd5240b7c8de8da62c620e9 \
+ --hash=sha256:4edcd0bf70087b244ba77038db23cd98a1ace2f91b4a3ecef22036314d77ac23 \
+ --hash=sha256:554313db34d63eac3b3f42986aa9efddd1a481169c12b7be1e7512edebff8eaf \
+ --hash=sha256:5675e9d8eeef0aa06093c1ff898413ade042d73dc920a03e8cea2fb68f62445a \
+ --hash=sha256:60848ab779195b72382841fc3fa4f71698a98d9589b0a081a9399904487b5832 \
+ --hash=sha256:66e5dc13bfb17cd6ee764fc96ccafd6e405daa846a42baab81f4c60e15650414 \
+ --hash=sha256:6779105d2fcb7fcf794a6a2a233787f6bbd4731227333a072d8513b252ed374f \
+ --hash=sha256:6ad331349b0b9ca6da86064a3599c425c7a21cd41616e175ddba0866da32df48 \
+ --hash=sha256:6f0a0b41dd05eefab547576bed0cf066595f3b20b083956b1405a6f17d1be6ad \
+ --hash=sha256:73a8a4653f2e809049999d63530180d7b5a344b23a793502413ad1ecea9a0290 \
+ --hash=sha256:778331444917108fa8441f59af45886270d33ce8a23bfc4f9b192c0b2ecef1b3 \
+ --hash=sha256:7cb98be113911cb0ad09e5523d0e2a926c09a465c9abb0784c9269efe4f95917 \
+ --hash=sha256:7d74beca677623481810c7052926365d5f07393c72cbf62d6cce29991b676402 \
+ --hash=sha256:7f2398361508c560d0bf1773af19e9fe644e218f2a814a02210ac2c97ad70db0 \
+ --hash=sha256:8434dcdd347459f9fd9c526117c01fe7ca7b016b6008dddc3c13471098f4f0dc \
+ --hash=sha256:8a390e56a7963e3946ff2049ee1eb218380e87c8a0e7608f7f8790ba19390867 \
+ --hash=sha256:92c4a4a2b1f4846cd4364855cbac83efc48ff5a7d7c06ba014c792dd96483f6f \
+ --hash=sha256:9300aee2a8b5992d0f4293d88deb59c218989833e3396c824b69ba330d04a589 \
+ --hash=sha256:9453419ea2ab9b21d925d0fd7e3a132a178a191881fab4169b6f96e118cc25bb \
+ --hash=sha256:9652e59c022e62a5b58a6f9948b104e5bb96d3b06940c6482588176f40f4914b \
+ --hash=sha256:972a7833d4a1fcf7a711c939e315721a88b988553fc770a5b6a5a64bd6ebeba3 \
+ --hash=sha256:9c1a4393242e321e344213a90a1e3bf35d2f624aa8b8f6174d43e3c6b0e8f6eb \
+ --hash=sha256:9e038c615b3906df4c3be8db16b3e24821d26c55177638ea47b3f8f73615111c \
+ --hash=sha256:9e4c166f743bb42c5fcc60760fb1c3623e8fda94f6619534217b083e08644b46 \
+ --hash=sha256:9eb117db8d7ed733a7317c4215c35993b815bf6aeab67523f1f11e108c040672 \
+ --hash=sha256:9eb442a2442ce417801c912df68e1f6ccfcd41577ae7274953ab3ad24ef7d82c \
+ --hash=sha256:a3cd18e03b0ee54ea4319cdcce48357719ea487b53f92a469ba8ca8e39df285e \
+ --hash=sha256:a8617625369d2d03766413bff9e64310feafc9fc4f0ad2b902136f1a5cd8c6b0 \
+ --hash=sha256:a970a2e6d5281d56cacf3dc82081c95c1f4da5a559e52469287457811db6a79b \
+ --hash=sha256:aad7405c033d32c751d98d3a65801e2797ae77fac284a539f6c3a3e13005edc4 \
+ --hash=sha256:adcb3332979cbc941b8fff07181f06d2b608625edc0a4d8bc3ffc0be414ad0c4 \
+ --hash=sha256:af9c7e6669c4d0ad7362f79cb2ab6784d71147503e62b57e3d95c4a0f222c01c \
+ --hash=sha256:b01fda3e95d07a6148702a641e5e293b6da7863f8bc9b967f62db9461330562c \
+ --hash=sha256:b8d940fd28eb34a7084877747a60873956893e377f15a32ad445fe66c972c3b8 \
+ --hash=sha256:bccb3e88ec26ffa90f72229f983d3a5d1155e41a1171190fa723d4135523585b \
+ --hash=sha256:bcedf4cae0d47839fee7de344f96b5694ca53c786f28b5f773d4f0b265a159eb \
+ --hash=sha256:be893258d5b68dd3a8cba8deb35dc6411db844a9d35268a8d3793b9d9a256f80 \
+ --hash=sha256:c0521e0f07cb56415fdb3aae0bbd8701eb31a9dfef47bb57206075a0584ab2a2 \
+ --hash=sha256:c594642d6b13d225e10df5c16ee15b3398e21a35ecd6aee824f107a625690374 \
+ --hash=sha256:c87c22bd6a987aca976e3d3e23806d17f65426191db36d40da4ae16a6a494cbc \
+ --hash=sha256:c9ac1c2678abf9270e7228133e5b77c6c3c930ad33a3c1dfbdd76ff2c33b7b50 \
+ --hash=sha256:d0e5ffc763678d48ecc8da836f2ae2dd1b6eb2d27a48671066f91694e575173c \
+ --hash=sha256:d0f402e787e6e7ee7876c8b05e2fe6464820d9f35ba3f172e95b5f8b699f6c7f \
+ --hash=sha256:d222a9ed082cd9f38b58923775152003765016342a12f08f8c123bf893461f28 \
+ --hash=sha256:d94245caa3c61f760c4ce4953cfa76e7739b6f2cbfc94cc46fff6c050c2390c5 \
+ --hash=sha256:de9a2792612ec6def556d1dc621fd6b2073aff015d64fba9f3e53349ad292734 \
+ --hash=sha256:e2f5a398b5e77bb01b23d92872255e1bcb3c0c719a3be40b8df146570fe7781a \
+ --hash=sha256:e8dd53a8706b15bc0e34f00e6150fbefb35d2fd9235d095b4f83b3c5ed4fa11d \
+ --hash=sha256:e9eb3cff1b7d71aa50c89a0536f469cb8d6dcdd585d8f14fb8500d822f3bdee4 \
+ --hash=sha256:ed628c1431100b0b65387419551e822987396bee3c088a15d68446d92f554e0c \
+ --hash=sha256:ef7938a78447174e2616be223f496ddccdbf7854f7bf2ce716dbccd958cc7d13 \
+ --hash=sha256:f1c70249b15e4ce1a7d5340c97670a95f305ca79f376887759b43bb33288c973 \
+ --hash=sha256:f3c7363a8cb8c5238878ec96c5eb0fc5ca2cb11fc0c7d2379863d342c6ee367a \
+ --hash=sha256:fbbcc6b0639aa09b9649f36f1bcb347b19403fe44109948392fbb5ea69e48c3e \
+ --hash=sha256:febffa5b1eda6622d44b245b0685aff6fb555ce0ed734e2d7b1c3acd018a2cff \
+ --hash=sha256:ff836cd4041e16003549449cc0a5e372f6b6f871eb89007ab0ee18fb2800fded
# via -r requirements/common.in
six==1.16.0 \
--hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
From d1b7dec48d29a160dc21a1f8d6a7ef370a47e6c4 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 17:01:33 +0000
Subject: [PATCH 068/128] Bump django-cors-headers from 4.1.0 to 4.3.1
Bumps [django-cors-headers](https://github.com/adamchainz/django-cors-headers) from 4.1.0 to 4.3.1.
- [Changelog](https://github.com/adamchainz/django-cors-headers/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/adamchainz/django-cors-headers/compare/4.1.0...4.3.1)
---
updated-dependencies:
- dependency-name: django-cors-headers
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 10 ++++++----
2 files changed, 7 insertions(+), 5 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 20539425a08..7c82ede612c 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -13,7 +13,7 @@ psycopg2-binary==2.9.9
jsonschema==4.21.1 # import jsonschema
djangorestframework==3.14.0 # Imported as rest_framework
-django-cors-headers==4.1.0 # Listed as 3rd party app on settings.py
+django-cors-headers==4.3.1 # Listed as 3rd party app on settings.py
mozlog==8.0.0
# Used directly and also by Django's YAML serializer.
diff --git a/requirements/common.txt b/requirements/common.txt
index 2af45ef7ab2..75949602f23 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -101,7 +101,9 @@ arrow==1.3.0 \
asgiref==3.7.2 \
--hash=sha256:89b2ef2247e3b562a16eef663bc0e2e703ec6468e2fa8a5cd61cd449786d4f6e \
--hash=sha256:9e0ce3aa93a819ba5b45120216b23878cf6e8525eb3848653452b4192b92afed
- # via django
+ # via
+ # django
+ # django-cors-headers
async-timeout==4.0.3 \
--hash=sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f \
--hash=sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028
@@ -370,9 +372,9 @@ django-cache-memoize==0.2.0 \
--hash=sha256:79950a027ba40e4aff4efed587b76036bf5ba1f59329d7b158797b832be72ca6 \
--hash=sha256:a6bfd112da699d1fa85955a1e15b7c48ee25e58044398958e269678db10736f3
# via -r requirements/common.in
-django-cors-headers==4.1.0 \
- --hash=sha256:36a8d7a6dee6a85f872fe5916cc878a36d0812043866355438dfeda0b20b6b78 \
- --hash=sha256:88a4bfae24b6404dd0e0640203cb27704a2a57fd546a429e5d821dfa53dd1acf
+django-cors-headers==4.3.1 \
+ --hash=sha256:0b1fd19297e37417fc9f835d39e45c8c642938ddba1acce0c1753d3edef04f36 \
+ --hash=sha256:0bf65ef45e606aff1994d35503e6b677c0b26cafff6506f8fd7187f3be840207
# via -r requirements/common.in
django-environ==0.10.0 \
--hash=sha256:510f8c9c1d0a38b0815f91504270c29440a0cf44fab07f55942fa8d31bbb9be6 \
From bef38bf4ca85339d53bdac68fca8970f95897f6c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 16:47:24 +0000
Subject: [PATCH 069/128] Bump mysqlclient from 2.1.1 to 2.2.4
Bumps [mysqlclient](https://github.com/PyMySQL/mysqlclient) from 2.1.1 to 2.2.4.
- [Release notes](https://github.com/PyMySQL/mysqlclient/releases)
- [Changelog](https://github.com/PyMySQL/mysqlclient/blob/main/HISTORY.rst)
- [Commits](https://github.com/PyMySQL/mysqlclient/compare/v2.1.1...v2.2.4)
---
updated-dependencies:
- dependency-name: mysqlclient
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 18 ++++++++++--------
2 files changed, 11 insertions(+), 9 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 7c82ede612c..f63a2da3984 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -8,7 +8,7 @@ simplejson==3.19.2 # import simplejson
newrelic==9.7.0
certifi==2024.2.2
-mysqlclient==2.1.1 # Required by Django
+mysqlclient==2.2.4 # Required by Django
psycopg2-binary==2.9.9
jsonschema==4.21.1 # import jsonschema
diff --git a/requirements/common.txt b/requirements/common.txt
index 75949602f23..8a20f741f33 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -807,14 +807,16 @@ multidict==6.0.5 \
# via
# aiohttp
# yarl
-mysqlclient==2.1.1 \
- --hash=sha256:0d1cd3a5a4d28c222fa199002810e8146cffd821410b67851af4cc80aeccd97c \
- --hash=sha256:828757e419fb11dd6c5ed2576ec92c3efaa93a0f7c39e263586d1ee779c3d782 \
- --hash=sha256:996924f3483fd36a34a5812210c69e71dea5a3d5978d01199b78b7f6d485c855 \
- --hash=sha256:b355c8b5a7d58f2e909acdbb050858390ee1b0e13672ae759e5e784110022994 \
- --hash=sha256:c1ed71bd6244993b526113cca3df66428609f90e4652f37eb51c33496d478b37 \
- --hash=sha256:c812b67e90082a840efb82a8978369e6e69fc62ce1bda4ca8f3084a9d862308b \
- --hash=sha256:dea88c8d3f5a5d9293dfe7f087c16dd350ceb175f2f6631c9cf4caf3e19b7a96
+mysqlclient==2.2.4 \
+ --hash=sha256:329e4eec086a2336fe3541f1ce095d87a6f169d1cc8ba7b04ac68bcb234c9711 \
+ --hash=sha256:33bc9fb3464e7d7c10b1eaf7336c5ff8f2a3d3b88bab432116ad2490beb3bf41 \
+ --hash=sha256:3c318755e06df599338dad7625f884b8a71fcf322a9939ef78c9b3db93e1de7a \
+ --hash=sha256:4e80dcad884dd6e14949ac6daf769123223a52a6805345608bf49cdaf7bc8b3a \
+ --hash=sha256:9d3310295cb682232cadc28abd172f406c718b9ada41d2371259098ae37779d3 \
+ --hash=sha256:9d4c015480c4a6b2b1602eccd9846103fc70606244788d04aa14b31c4bd1f0e2 \
+ --hash=sha256:ac44777eab0a66c14cb0d38965572f762e193ec2e5c0723bcd11319cc5b693c5 \
+ --hash=sha256:d43987bb9626096a302ca6ddcdd81feaeca65ced1d5fe892a6a66b808326aa54 \
+ --hash=sha256:e1ebe3f41d152d7cb7c265349fdb7f1eca86ccb0ca24a90036cde48e00ceb2ab
# via -r requirements/common.in
newrelic==9.7.0 \
--hash=sha256:0344e718ddc4ffe78a1441c6313a6af2f9aa3001e93a8a5197caac091f8bc9b3 \
From e6a93db2716666166baed30aa0ef48b3fbbc317b Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Tue, 5 Mar 2024 17:54:53 +0100
Subject: [PATCH 070/128] add pkg-config required by mysqlcient >=2.2
---
docker/Dockerfile | 1 +
docker/dev.Dockerfile | 1 +
2 files changed, 2 insertions(+)
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 82b39804f30..ea3c31a3968 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -19,6 +19,7 @@ FROM python:3.9.18-slim-bullseye
# libmysqlclient-dev is required for the mysqlclient Python package.
RUN apt-get update && apt-get install -y --no-install-recommends \
+ pkg-config \
default-libmysqlclient-dev \
&& rm -rf /var/lib/apt/lists/*
diff --git a/docker/dev.Dockerfile b/docker/dev.Dockerfile
index 11eada87b1b..03f5d3ac34e 100644
--- a/docker/dev.Dockerfile
+++ b/docker/dev.Dockerfile
@@ -6,6 +6,7 @@ ENV NEW_RELIC_CONFIG_FILE newrelic.ini
# libmysqlclient-dev and gcc are required for the mysqlclient Python package.
# netcat is used for the MySQL readiness check in entrypoint.sh.
RUN apt-get update && apt-get install -y --no-install-recommends \
+ pkg-config \
default-libmysqlclient-dev \
gcc \
netcat \
From 289637f3b709af2c379b85a09d1338d850906198 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 16:58:46 +0000
Subject: [PATCH 071/128] Bump django-redis from 5.3.0 to 5.4.0
Bumps [django-redis](https://github.com/jazzband/django-redis) from 5.3.0 to 5.4.0.
- [Release notes](https://github.com/jazzband/django-redis/releases)
- [Changelog](https://github.com/jazzband/django-redis/blob/master/CHANGELOG.rst)
- [Commits](https://github.com/jazzband/django-redis/compare/5.3.0...5.4.0)
---
updated-dependencies:
- dependency-name: django-redis
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index f63a2da3984..1fdc7f92457 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -23,7 +23,7 @@ uritemplate==4.1.1 # For OpenAPI schema
python-dateutil==2.9.0.post0
django-filter==23.5 # Listed in DEFAULT_FILTER_BACKENDS on settings.py
-django-redis==5.3.0 # Listed in CACHES on settings.py
+django-redis==5.4.0 # Listed in CACHES on settings.py
taskcluster==60.4.2 # import taskcluster
python-jose[pycryptodome]==3.3.0 # from jose import jwt
diff --git a/requirements/common.txt b/requirements/common.txt
index 8a20f741f33..1fa5438feaa 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -384,9 +384,9 @@ django-filter==23.5 \
--hash=sha256:67583aa43b91fe8c49f74a832d95f4d8442be628fd4c6d65e9f811f5153a4e5c \
--hash=sha256:99122a201d83860aef4fe77758b69dda913e874cc5e0eaa50a86b0b18d708400
# via -r requirements/common.in
-django-redis==5.3.0 \
- --hash=sha256:2d8660d39f586c41c9907d5395693c477434141690fd7eca9d32376af00b0aac \
- --hash=sha256:8bc5793ec06b28ea802aad85ec437e7646511d4e571e07ccad19cfed8b9ddd44
+django-redis==5.4.0 \
+ --hash=sha256:6a02abaa34b0fea8bf9b707d2c363ab6adc7409950b2db93602e6cb292818c42 \
+ --hash=sha256:ebc88df7da810732e2af9987f7f426c96204bf89319df4c6da6ca9a2942edd5b
# via -r requirements/common.in
djangorestframework==3.14.0 \
--hash=sha256:579a333e6256b09489cbe0a067e66abe55c6595d8926be6b99423786334350c8 \
From c544e75d00156b290aa9531503affb0193ae3ca5 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 5 Mar 2024 17:23:18 +0000
Subject: [PATCH 072/128] Bump django-environ from 0.10.0 to 0.11.2
Bumps [django-environ](https://github.com/joke2k/django-environ) from 0.10.0 to 0.11.2.
- [Release notes](https://github.com/joke2k/django-environ/releases)
- [Changelog](https://github.com/joke2k/django-environ/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/joke2k/django-environ/compare/v0.10.0...v0.11.2)
---
updated-dependencies:
- dependency-name: django-environ
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 1fdc7f92457..e43f089a1a1 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -18,7 +18,7 @@ mozlog==8.0.0
# Used directly and also by Django's YAML serializer.
PyYAML==6.0.1 # Imported as yaml
-django-environ==0.10.0 # Imported as environ
+django-environ==0.11.2 # Imported as environ
uritemplate==4.1.1 # For OpenAPI schema
python-dateutil==2.9.0.post0
diff --git a/requirements/common.txt b/requirements/common.txt
index 1fa5438feaa..384ca420657 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -376,9 +376,9 @@ django-cors-headers==4.3.1 \
--hash=sha256:0b1fd19297e37417fc9f835d39e45c8c642938ddba1acce0c1753d3edef04f36 \
--hash=sha256:0bf65ef45e606aff1994d35503e6b677c0b26cafff6506f8fd7187f3be840207
# via -r requirements/common.in
-django-environ==0.10.0 \
- --hash=sha256:510f8c9c1d0a38b0815f91504270c29440a0cf44fab07f55942fa8d31bbb9be6 \
- --hash=sha256:b3559a91439c9d774a9e0c1ced872364772c612cdf6dc919506a2b13f7a77225
+django-environ==0.11.2 \
+ --hash=sha256:0ff95ab4344bfeff693836aa978e6840abef2e2f1145adff7735892711590c05 \
+ --hash=sha256:f32a87aa0899894c27d4e1776fa6b477e8164ed7f6b3e410a62a6d72caaf64be
# via -r requirements/common.in
django-filter==23.5 \
--hash=sha256:67583aa43b91fe8c49f74a832d95f4d8442be628fd4c6d65e9f811f5153a4e5c \
From 05465d05803c8235dc4f7a9bf20c72333b1a2975 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 4 Mar 2024 22:22:19 +0000
Subject: [PATCH 073/128] Bump pre-commit from 3.3.3 to 3.6.2
Bumps [pre-commit](https://github.com/pre-commit/pre-commit) from 3.3.3 to 3.6.2.
- [Release notes](https://github.com/pre-commit/pre-commit/releases)
- [Changelog](https://github.com/pre-commit/pre-commit/blob/main/CHANGELOG.md)
- [Commits](https://github.com/pre-commit/pre-commit/compare/v3.3.3...v3.6.2)
---
updated-dependencies:
- dependency-name: pre-commit
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 036ea525fbe..31f12202181 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -7,7 +7,7 @@ django-extensions==3.2.3
PyPOM==2.2.4
# for git commit hooks
-pre-commit==3.3.3
+pre-commit==3.6.2
# for test driven development
pytest-testmon==2.1.1
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 9fd4130d6d1..05cd8372957 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -322,9 +322,9 @@ pluggy==1.4.0 \
# via
# pypom
# pytest
-pre-commit==3.3.3 \
- --hash=sha256:10badb65d6a38caff29703362271d7dca483d01da88f9d7e05d0b97171c136cb \
- --hash=sha256:a2256f489cd913d575c145132ae196fe335da32d91a8294b7afe6622335dd023
+pre-commit==3.6.2 \
+ --hash=sha256:ba637c2d7a670c10daedc059f5c49b5bd0aadbccfcd7ec15592cf9665117532c \
+ --hash=sha256:c3ef34f463045c88658c5b99f38c1e297abdcc0ff13f98d3370055fbbfabc67e
# via -r requirements/dev.in
pypom==2.2.4 \
--hash=sha256:5da52cf447e62f43a0cfa47dfe52eb822eff07b2fdad759f930d1d227c15220b \
From df9814e2e23b898a6e24f634643a22ad34535237 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 11 Mar 2024 00:11:02 +0000
Subject: [PATCH 074/128] Update dependency docs/mkdocs-material to v9.5.13
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 8adead7bbec..43e4f7a48fa 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ description = "Defaut package, used for development or readthedocs"
[project.optional-dependencies]
docs = [
"mkdocs==1.5.3",
- "mkdocs-material==9.5.12",
+ "mkdocs-material==9.5.13",
"mdx_truly_sane_lists==1.3",
]
From b65bb780507da2307b5372164d9fbcc169f98574 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 11 Mar 2024 08:17:31 +0000
Subject: [PATCH 075/128] Update rabbitmq Docker tag to v3.11.28
---
docker-compose.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index 647da4cec7c..75f9e10cabf 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -111,7 +111,7 @@ services:
rabbitmq:
container_name: rabbitmq
# https://hub.docker.com/r/library/rabbitmq/
- image: rabbitmq:3.11.25-alpine
+ image: rabbitmq:3.11.28-alpine
environment:
# Hide INFO and WARNING log levels to reduce log spam.
- 'RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=-rabbit log [{console,[{level,error}]}]'
From 7e2ef8b7193b837a034a40a00eaf6d5413fdb31a Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 11 Mar 2024 02:16:27 +0000
Subject: [PATCH 076/128] Bump pytest from 8.0.2 to 8.1.1
Bumps [pytest](https://github.com/pytest-dev/pytest) from 8.0.2 to 8.1.1.
- [Release notes](https://github.com/pytest-dev/pytest/releases)
- [Changelog](https://github.com/pytest-dev/pytest/blob/main/CHANGELOG.rst)
- [Commits](https://github.com/pytest-dev/pytest/compare/8.0.2...8.1.1)
---
updated-dependencies:
- dependency-name: pytest
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 31f12202181..7000378b2ac 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -15,7 +15,7 @@ pytest-watch==4.2.0
# Required by django-extension's runserver_plus command.
pytest-django==4.8.0
-pytest==8.0.2
+pytest==8.1.1
black==24.2.0
shellcheck-py==0.9.0.6
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 05cd8372957..e7fd92338ca 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -341,9 +341,9 @@ pysocks==1.7.1 \
--hash=sha256:2725bd0a9925919b9b51739eea5f9e2bae91e83288108a9ad338b2e3a4435ee5 \
--hash=sha256:3f8804571ebe159c380ac6de37643bb4685970655d3bba243530d6558b799aa0
# via urllib3
-pytest==8.0.2 \
- --hash=sha256:d4051d623a2e0b7e51960ba963193b09ce6daeb9759a451844a21e4ddedfc1bd \
- --hash=sha256:edfaaef32ce5172d5466b5127b42e0d6d35ebbe4453f0e3505d96afd93f6b096
+pytest==8.1.1 \
+ --hash=sha256:2a8386cfc11fa9d2c50ee7b2a57e7d898ef90470a7a34c4b949ff59662bb78b7 \
+ --hash=sha256:ac978141a75948948817d360297b7aae0fcb9d6ff6bc9ec6d514b85d5a65c044
# via
# -r requirements/dev.in
# pytest-asyncio
From 2333acb73421a1f19caaf521fed9d9d75d2163bd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 11 Mar 2024 02:19:06 +0000
Subject: [PATCH 077/128] Bump pytest-asyncio from 0.23.5 to 0.23.5.post1
Bumps [pytest-asyncio](https://github.com/pytest-dev/pytest-asyncio) from 0.23.5 to 0.23.5.post1.
- [Release notes](https://github.com/pytest-dev/pytest-asyncio/releases)
- [Commits](https://github.com/pytest-dev/pytest-asyncio/compare/v0.23.5...v0.23.5.post1)
---
updated-dependencies:
- dependency-name: pytest-asyncio
dependency-type: direct:development
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 7000378b2ac..2fd3ed262cc 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -20,7 +20,7 @@ black==24.2.0
shellcheck-py==0.9.0.6
# To test async code
-pytest-asyncio==0.23.5 # required to pass test_new_job_transformation
+pytest-asyncio==0.23.5.post1 # required to pass test_new_job_transformation
# To test code that's making system time calls
# pytest-freezegun is not compatible with recent Django versions
diff --git a/requirements/dev.txt b/requirements/dev.txt
index e7fd92338ca..7fa5803bc27 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -352,9 +352,9 @@ pytest==8.1.1 \
# pytest-freezegun
# pytest-testmon
# pytest-watch
-pytest-asyncio==0.23.5 \
- --hash=sha256:3a048872a9c4ba14c3e90cc1aa20cbc2def7d01c7c8db3777ec281ba9c057675 \
- --hash=sha256:4e7093259ba018d58ede7d5315131d21923a60f8a6e9ee266ce1589685c89eac
+pytest-asyncio==0.23.5.post1 \
+ --hash=sha256:30f54d27774e79ac409778889880242b0403d09cabd65b727ce90fe92dd5d80e \
+ --hash=sha256:b9a8806bea78c21276bc34321bbf234ba1b2ea5b30d9f0ce0f2dea45e4685813
# via -r requirements/dev.in
pytest-cov==4.1.0 \
--hash=sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6 \
From 5cf42c07d66cc820a3cd90da4c34fff29073bd79 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 7 Mar 2024 03:09:13 +0000
Subject: [PATCH 078/128] Bump pip-tools from 7.4.0 to 7.4.1
Bumps [pip-tools](https://github.com/jazzband/pip-tools) from 7.4.0 to 7.4.1.
- [Release notes](https://github.com/jazzband/pip-tools/releases)
- [Changelog](https://github.com/jazzband/pip-tools/blob/main/CHANGELOG.md)
- [Commits](https://github.com/jazzband/pip-tools/compare/7.4.0...7.4.1)
---
updated-dependencies:
- dependency-name: pip-tools
dependency-type: direct:development
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 2fd3ed262cc..c9300b4a2d5 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -34,7 +34,7 @@ betamax==0.9.0
betamax-serializers==0.2.1
# pip-compile for pinning versions
-pip-tools==7.4.0
+pip-tools==7.4.1
requests==2.31.0
urllib3==2.0.3
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 7fa5803bc27..09c5b546b2d 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -306,9 +306,9 @@ pathspec==0.12.1 \
--hash=sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08 \
--hash=sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712
# via black
-pip-tools==7.4.0 \
- --hash=sha256:a92a6ddfa86ff389fe6ace381d463bc436e2c705bd71d52117c25af5ce867bb7 \
- --hash=sha256:b67432fd0759ed834c5367f9e0ce8c95441acecfec9c8e24b41aca166757adf0
+pip-tools==7.4.1 \
+ --hash=sha256:4c690e5fbae2f21e87843e89c26191f0d9454f362d8acdbd695716493ec8b3a9 \
+ --hash=sha256:864826f5073864450e24dbeeb85ce3920cdfb09848a3d69ebf537b521f14bcc9
# via -r requirements/dev.in
platformdirs==4.2.0 \
--hash=sha256:0614df2a2f37e1a662acbd8e2b25b92ccf8632929bc6d43467e17fe89c75e068 \
From 8c501b15bf257c1d1826689a5d7ddf5c576c792c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 7 Mar 2024 17:57:47 +0000
Subject: [PATCH 079/128] Bump jose from 4.11.1 to 4.15.5
Bumps [jose](https://github.com/panva/jose) from 4.11.1 to 4.15.5.
- [Release notes](https://github.com/panva/jose/releases)
- [Changelog](https://github.com/panva/jose/blob/v4.15.5/CHANGELOG.md)
- [Commits](https://github.com/panva/jose/compare/v4.11.1...v4.15.5)
---
updated-dependencies:
- dependency-name: jose
dependency-type: indirect
...
Signed-off-by: dependabot[bot]
---
yarn.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/yarn.lock b/yarn.lock
index 3952532ec92..d422afe80e1 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -7028,9 +7028,9 @@ joi@^17.11.0:
"@sideway/pinpoint" "^2.0.0"
jose@^4.0.4:
- version "4.11.1"
- resolved "https://registry.npmjs.org/jose/-/jose-4.11.1.tgz"
- integrity sha512-YRv4Tk/Wlug8qicwqFNFVEZSdbROCHRAC6qu/i0dyNKr5JQdoa2pIGoS04lLO/jXQX7Z9omoNewYIVIxqZBd9Q==
+ version "4.15.5"
+ resolved "https://registry.yarnpkg.com/jose/-/jose-4.15.5.tgz#6475d0f467ecd3c630a1b5dadd2735a7288df706"
+ integrity sha512-jc7BFxgKPKi94uOvEmzlSWFFe2+vASyXaKUpdQKatWAESU2MWjDfFf0fdfc83CDKcA5QecabZeNLyfhe3yKNkg==
js-cookie@3.0.5:
version "3.0.5"
From e794d1d80ff0b771178efcdc4c2f7a6b2d83cbd9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 12 Mar 2024 02:40:51 +0000
Subject: [PATCH 080/128] Bump newrelic from 9.7.0 to 9.7.1
Bumps [newrelic](https://github.com/newrelic/newrelic-python-agent) from 9.7.0 to 9.7.1.
- [Release notes](https://github.com/newrelic/newrelic-python-agent/releases)
- [Commits](https://github.com/newrelic/newrelic-python-agent/compare/v9.7.0...v9.7.1)
---
updated-dependencies:
- dependency-name: newrelic
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 60 ++++++++++++++++++++---------------------
2 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index e43f089a1a1..12069e166e2 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -5,7 +5,7 @@ Django==4.1.13
celery==5.3.6 # celery needed for data ingestion
cached-property==1.5.2 # needed for kombu with --require-hashes
simplejson==3.19.2 # import simplejson
-newrelic==9.7.0
+newrelic==9.7.1
certifi==2024.2.2
mysqlclient==2.2.4 # Required by Django
diff --git a/requirements/common.txt b/requirements/common.txt
index 384ca420657..1c63948778e 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -818,36 +818,36 @@ mysqlclient==2.2.4 \
--hash=sha256:d43987bb9626096a302ca6ddcdd81feaeca65ced1d5fe892a6a66b808326aa54 \
--hash=sha256:e1ebe3f41d152d7cb7c265349fdb7f1eca86ccb0ca24a90036cde48e00ceb2ab
# via -r requirements/common.in
-newrelic==9.7.0 \
- --hash=sha256:0344e718ddc4ffe78a1441c6313a6af2f9aa3001e93a8a5197caac091f8bc9b3 \
- --hash=sha256:0fdd25b9969a4c85a53a1dc2cade462164c6603e85ffe50da732ad4e69347659 \
- --hash=sha256:172732a71d4ff053c1c724a8dfbb8b1efc24c398c25e78f7aaf7966551d3fb09 \
- --hash=sha256:27e851365bf5e5f8e7ca21e63d01bd2ce9327afc18417e071a3d50590f2747a8 \
- --hash=sha256:288ed42949fd4a5d535507cb15b8f602111244663eceab1716a0a77e529ee2b6 \
- --hash=sha256:333ec033d13646f2221fdaf3822d3b8360d1935d1baea6879c1ae7f0d5020217 \
- --hash=sha256:4966e4be00eab203903796a4b5aa864d866ba45d17bf823d71a932f99330ceee \
- --hash=sha256:4adf292b529771536b417f46f84c497413f467f1ae7534009404580e259cb1b1 \
- --hash=sha256:4bd32c76427782a3cf6994cab1217a1da79327d5b9cb2bad11917df5eb55dc0d \
- --hash=sha256:4cefc2b264122e9f99db557ec9f1c5b287f4b95229957f7f78269cc462d47065 \
- --hash=sha256:563342155edbed8276ddef9e2e15a61a31953ff9f42015a426f94660adf104cb \
- --hash=sha256:59f2c94a2e256f00b344efc909eb1f058cd411e9a95a6ad1d7adf957223a747d \
- --hash=sha256:78f604a2622a6795320a6ff54262816aeff86da79400429c34346fc5feecb235 \
- --hash=sha256:8958e575f7ada2ed8937dafff297790aeb960499b08d209b76a8a3c72f0841fc \
- --hash=sha256:91e2ad1da28c76d67344daca7ddd6166a5e190f7031f9a5bd683db17542f91ef \
- --hash=sha256:9c41a571d0889409044bfb22194382731e18fd7962ba6a91ff640b274ca3fc1a \
- --hash=sha256:a687a521950da96b7daa553d1ab6371aebc5bfd1f3cb4ceb5d6dc859b0956602 \
- --hash=sha256:b180f099aabff875f83364b6314b9954e29dfca753ccc1d353a8135c1430f9a6 \
- --hash=sha256:b7733168eae4c718f885f188bcfc265c299f51d43130350b32f86f3754bc809b \
- --hash=sha256:bc5af6e7d7b6f30b03cec4f265b84fa8d370e006332854181214507e2deb421e \
- --hash=sha256:be2a7697b8407cea2ebe962ec990935dff300d9c4f78d3d7335b9dc280d33c53 \
- --hash=sha256:bf9485a5c9efaa30c645683eab427ce8b41164213bc003f7e7ad31772eb1f481 \
- --hash=sha256:c005bfb53c7090652839e9b38a3ec2462fe4e125fe976e2b9fcd778efa1c4a12 \
- --hash=sha256:d3656b546aced2c6a4443e5e76f89e17a1672d69dfe47940119c688ab4426a76 \
- --hash=sha256:e229fb5406a3c0752723bc5444d75dc863456a0305621be4159356f2880488e9 \
- --hash=sha256:e57d78ef1291710968e872412a8d7c765f077de0aaf225aaab216c552ee1775a \
- --hash=sha256:e731ac5b66dbeda1e990ba41cecda8ea865c69f72b0267574d6b1727113f7de2 \
- --hash=sha256:eb94aabd4b575f4fa2068343781614cc249630c8bcbc07f115affeb1311736cd \
- --hash=sha256:fb3e40be0f1ba2b2d1ad070d7913952efb1ceee13e6548d63bb973dcdf2c9d32
+newrelic==9.7.1 \
+ --hash=sha256:0798e85b738a24843da9aa0e4175b42441d9b10af6b17ee8de137cf83d5bb222 \
+ --hash=sha256:08a062f6b0483de744b3085e70b88ccb7599ba4f242977bf1cbb602ed4385980 \
+ --hash=sha256:0d85b7d08e7fe130951de1f2225e69c321ece620da18bbc4385905c72e0aa51b \
+ --hash=sha256:102f6e8e65e6fa5044a0d433066a46ce5b382f96335576dfa16217c1855ebc2b \
+ --hash=sha256:166e365a6334d6b591a6af91e07dd191f043fb10893474ad1b60ed0b99a78f4e \
+ --hash=sha256:1c1d24be69d5316af7be99d6c87686d900e708bc421ca55977cb021fd29de0bd \
+ --hash=sha256:28343d596de29b7b0adcbcd2b872a1657d85c2467482792d8190814faec46c80 \
+ --hash=sha256:2a41340ce1d58bcc4dda39784d244e8a42c11b150665d8bec0527ea88bf02f53 \
+ --hash=sha256:365ec3b3795f43a70895652dff4ece28c11ecf0337aabf8da762b746cfda4c2e \
+ --hash=sha256:53273e8fbea3df48265b15ce3a5aee8e7950036a8463e973ef949d79072b5d74 \
+ --hash=sha256:56a6b322b228ee0b3afbb59d686fad0c58b6b284fc6bb3227e7746ca0d458858 \
+ --hash=sha256:5824e599b972b5931caa13f5f34eb60df4cf3c7048604d0efe34b9ad41923739 \
+ --hash=sha256:6361b3be951e3520ea2b138ca56783b03f8a6c85085885fcf597d1ee28c59153 \
+ --hash=sha256:647a5f4ff3514e7e2acbbc884319499b0ae90ec4ec93e83e7f41474cf8666e0e \
+ --hash=sha256:783c560439a08715eb00c79be64cd9471ce843f07b194592d15978909a8c85ad \
+ --hash=sha256:7c62934b8ae447bda4273a2dc4c3a65b0c7dc995af611b5003e75a34faa926f2 \
+ --hash=sha256:7d7f79bd828ab56446b0be941a6acb0af287ad97fe4ac5052c59ad0518f5456d \
+ --hash=sha256:82076bf4e84a1378ccd1c699e8890a8f469c3ebeec110ae5c4f03cfab25cd09b \
+ --hash=sha256:8d61c374e4d698ee36eab80e33c485054514bd6f57c25cd8e2c9f0a40f159ebc \
+ --hash=sha256:8dba147f13457bd22b4509bfa700ce12bfcb8294f8b9abd4c66d4e90f90cefc2 \
+ --hash=sha256:8faecb1fce2a25201f5496ad96181933e60b4b833f99dc143a84d5d2494a46f6 \
+ --hash=sha256:b4f741be5f34e17caa57c72924045776a865efd5f9deab6ebb3b7c4f1190273b \
+ --hash=sha256:bbbd24bd0054a978b5d6c1be7794289c760b20d44fea526e3fb1078d060a6fe7 \
+ --hash=sha256:d143adbc38df9a576a34220d18f564208ddf88f691a1aaaf7b78c7fc653f2428 \
+ --hash=sha256:dfd572a79e1848b67307be25e15f0804b9e9fc30a0d669a0fad668f3678a8869 \
+ --hash=sha256:e06c7991367e848c4f8e08e7197b0a71c145a8e32c5f92158ed64b4f6d5b4a22 \
+ --hash=sha256:ed776ced21ebf57379cb38885485460ffd7df29cca9666197876d2045849b927 \
+ --hash=sha256:feb83b708350947846fd898debb1538ab5e0458ff56627f01e2174e73c0fe079 \
+ --hash=sha256:fed29c65100966472bef1603c479b3b60be47078810a0d1c407e5ee133e606d7
# via -r requirements/common.in
numpy==1.26.3 \
--hash=sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd \
From 9e7c4eb0c5fc78c7820fa98dee3f2c91ca438e66 Mon Sep 17 00:00:00 2001
From: florinbilt <160469273+florinbilt@users.noreply.github.com>
Date: Thu, 14 Mar 2024 09:26:08 +0200
Subject: [PATCH 081/128] 3376 - fix the linked bugs from alert summaries do
not open (#7970)
---
ui/perfherder/alerts/AlertHeader.jsx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/ui/perfherder/alerts/AlertHeader.jsx b/ui/perfherder/alerts/AlertHeader.jsx
index ade96d032d6..72428f79b9f 100644
--- a/ui/perfherder/alerts/AlertHeader.jsx
+++ b/ui/perfherder/alerts/AlertHeader.jsx
@@ -27,7 +27,7 @@ const AlertHeader = ({
updateAssignee,
}) => {
const getIssueTrackerUrl = () => {
- const { issueTrackerUrl } = issueTrackers.find(
+ const { issue_tracker_url: issueTrackerUrl } = issueTrackers.find(
(tracker) => tracker.id === alertSummary.issue_tracker,
);
return issueTrackerUrl + alertSummary.bug_number;
From 59dd66f578251a94bca9faa3b556c7662ee588e6 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 14 Mar 2024 02:12:18 +0000
Subject: [PATCH 082/128] Bump taskcluster from 60.4.2 to 61.0.0
Bumps [taskcluster](https://github.com/taskcluster/taskcluster) from 60.4.2 to 61.0.0.
- [Release notes](https://github.com/taskcluster/taskcluster/releases)
- [Changelog](https://github.com/taskcluster/taskcluster/blob/main/CHANGELOG.md)
- [Commits](https://github.com/taskcluster/taskcluster/compare/v60.4.2...v61.0.0)
---
updated-dependencies:
- dependency-name: taskcluster
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 18 ++++++------------
2 files changed, 7 insertions(+), 13 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 12069e166e2..7b8729ee52f 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -25,7 +25,7 @@ python-dateutil==2.9.0.post0
django-filter==23.5 # Listed in DEFAULT_FILTER_BACKENDS on settings.py
django-redis==5.4.0 # Listed in CACHES on settings.py
-taskcluster==60.4.2 # import taskcluster
+taskcluster==61.0.0 # import taskcluster
python-jose[pycryptodome]==3.3.0 # from jose import jwt
furl==2.1.3 # Imported as furl
diff --git a/requirements/common.txt b/requirements/common.txt
index 1c63948778e..af4a1c1a85c 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -699,9 +699,7 @@ moz-measure-noise==2.60.1 \
mozci[cache]==2.4.0 \
--hash=sha256:1302ce8b08f53e608b654e54313b1f36f978dafad9a913a58a3331139b2d9225 \
--hash=sha256:b1ee163b31e1696bee7f2b203f508fcd4a3869c1158969615f9bdab2e1a57a9b
- # via
- # -r requirements/common.in
- # mozci
+ # via -r requirements/common.in
mozfile==3.0.0 \
--hash=sha256:3b0afcda2fa8b802ef657df80a56f21619008f61fcc14b756124028d7b7adf5c \
--hash=sha256:92ca1a786abbdf5e6a7aada62d3a4e28f441ef069c7623223add45268e53c789
@@ -1035,9 +1033,7 @@ python-dateutil==2.9.0.post0 \
python-jose[pycryptodome]==3.3.0 \
--hash=sha256:55779b5e6ad599c6336191246e95eb2293a9ddebd555f796a65f838f07e5d78a \
--hash=sha256:9b1376b023f8b298536eedd47ae1089bcdb848f1535ab30555cd92002d78923a
- # via
- # -r requirements/common.in
- # python-jose
+ # via -r requirements/common.in
python3-memcached==1.51 \
--hash=sha256:7cbe5951d68eef69d948b7a7ed7decfbd101e15e7f5be007dcd1219ccc584859
# via mozci
@@ -1473,9 +1469,9 @@ tabulate==0.9.0 \
--hash=sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c \
--hash=sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f
# via mozci
-taskcluster==60.4.2 \
- --hash=sha256:5217073dd3c6642d976ab4a3f5861308b18bba533cd246d378a095d85e39597c \
- --hash=sha256:9cacf06e790e81535a019c0623e5cdf284a7ecefce7f02de8f2802cdad161077
+taskcluster==61.0.0 \
+ --hash=sha256:69022458e59bf3228394184c811dfe9983ac72bfd8c40133dee849c3a1f73a4d \
+ --hash=sha256:acf6b64a7cf5db7fca1f626c0d9526df298a5ea5450831695844836eebec009d
# via
# -r requirements/common.in
# mozci
@@ -1576,9 +1572,7 @@ wcwidth==0.2.13 \
whitenoise[brotli]==6.6.0 \
--hash=sha256:8998f7370973447fac1e8ef6e8ded2c5209a7b1f67c1012866dbcd09681c3251 \
--hash=sha256:b1f9db9bf67dc183484d760b99f4080185633136a273a03f6436034a41064146
- # via
- # -r requirements/common.in
- # whitenoise
+ # via -r requirements/common.in
yarl==1.9.4 \
--hash=sha256:008d3e808d03ef28542372d01057fd09168419cdc8f848efe2804f894ae03e51 \
--hash=sha256:03caa9507d3d3c83bca08650678e25364e1843b484f19986a527630ca376ecce \
From e62565ed75ebb651b1d47a44310f32afb7831e4c Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 15 Mar 2024 20:26:59 +0000
Subject: [PATCH 083/128] Bump follow-redirects from 1.15.1 to 1.15.6
Bumps [follow-redirects](https://github.com/follow-redirects/follow-redirects) from 1.15.1 to 1.15.6.
- [Release notes](https://github.com/follow-redirects/follow-redirects/releases)
- [Commits](https://github.com/follow-redirects/follow-redirects/compare/v1.15.1...v1.15.6)
---
updated-dependencies:
- dependency-name: follow-redirects
dependency-type: indirect
...
Signed-off-by: dependabot[bot]
---
yarn.lock | 13 ++++---------
1 file changed, 4 insertions(+), 9 deletions(-)
diff --git a/yarn.lock b/yarn.lock
index d422afe80e1..c2904a4d60a 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -5351,15 +5351,10 @@ flux-standard-action@^0.6.1:
dependencies:
lodash.isplainobject "^3.2.0"
-follow-redirects@^1.0.0:
- version "1.15.1"
- resolved "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.1.tgz"
- integrity sha512-yLAMQs+k0b2m7cVxpS1VKJVvoz7SS9Td1zss3XRwXj+ZDH00RJgnuLx7E44wx02kQLrdM3aOOy+FpzS7+8OizA==
-
-follow-redirects@^1.15.4:
- version "1.15.5"
- resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.5.tgz#54d4d6d062c0fa7d9d17feb008461550e3ba8020"
- integrity sha512-vSFWUON1B+yAw1VN4xMfxgn5fTUiaOzAJCKBwIIgT/+7CuGy9+r+5gITvP62j3RmaD5Ph65UaERdOSRGUzZtgw==
+follow-redirects@^1.0.0, follow-redirects@^1.15.4:
+ version "1.15.6"
+ resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b"
+ integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==
for-each@^0.3.3:
version "0.3.3"
From 74a11f2b9bdbc489b8fdc6cd2fc528184fd12c91 Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Fri, 15 Mar 2024 21:15:57 +0100
Subject: [PATCH 084/128] Bug 1885655 - use 'Fenix :: General' as default
Bugzilla component for android-components, fenix and focus tasks
---
tests/ui/job-view/bugfiler_test.jsx | 3 +++
ui/shared/BugFiler.jsx | 25 ++++++++++++++++++-
.../tabs/failureSummary/FailureSummaryTab.jsx | 1 +
3 files changed, 28 insertions(+), 1 deletion(-)
diff --git a/tests/ui/job-view/bugfiler_test.jsx b/tests/ui/job-view/bugfiler_test.jsx
index f3c85f172e2..318bf5057b2 100644
--- a/tests/ui/job-view/bugfiler_test.jsx
+++ b/tests/ui/job-view/bugfiler_test.jsx
@@ -21,6 +21,7 @@ describe('BugFiler', () => {
job_group_name: 'Mochitests executed by TaskCluster',
job_type_name: 'test-linux64/debug-mochitest-browser-chrome-10',
job_type_symbol: 'bc10',
+ platform: 'windows11-64',
};
const suggestions = [
{
@@ -120,6 +121,7 @@ describe('BugFiler', () => {
successCallback={successCallback}
jobGroupName={selectedJob.job_group_name}
jobTypeName={selectedJob.job_type_name}
+ platform={selectedJob.platform}
notify={() => {}}
/>
@@ -138,6 +140,7 @@ describe('BugFiler', () => {
successCallback={successCallback}
jobGroupName={selectedJob.job_group_name}
jobTypeName={selectedJob.job_type_name}
+ platform={selectedJob.platform}
notify={() => {}}
/>
diff --git a/ui/shared/BugFiler.jsx b/ui/shared/BugFiler.jsx
index 5e5decbeabd..52f0ca7a1a6 100644
--- a/ui/shared/BugFiler.jsx
+++ b/ui/shared/BugFiler.jsx
@@ -386,10 +386,31 @@ export class BugFilerClass extends React.Component {
* file path or its end.
*/
findProductByPath = async () => {
- const { suggestion } = this.props;
+ const { suggestion, platform } = this.props;
const { crashSignatures } = this.state;
const pathEnd = suggestion.path_end;
+ if (
+ !crashSignatures.length &&
+ (platform.startsWith('AC-') || platform.startsWith('fenix-'))
+ ) {
+ this.setState({
+ suggestedProducts: ['Fenix :: General'],
+ selectedProduct: 'Fenix :: General',
+ searching: false,
+ });
+ return;
+ }
+
+ if (!crashSignatures.length && platform.startsWith('focus-')) {
+ this.setState({
+ suggestedProducts: ['Focus :: General'],
+ selectedProduct: 'Focus :: General',
+ searching: false,
+ });
+ return;
+ }
+
if (!pathEnd) {
return;
}
@@ -1001,6 +1022,8 @@ BugFilerClass.propTypes = {
reftestUrl: PropTypes.string.isRequired,
successCallback: PropTypes.func.isRequired,
jobGroupName: PropTypes.string.isRequired,
+ jobTypeName: PropTypes.string.isRequired,
+ platform: PropTypes.string.isRequired,
notify: PropTypes.func.isRequired,
};
diff --git a/ui/shared/tabs/failureSummary/FailureSummaryTab.jsx b/ui/shared/tabs/failureSummary/FailureSummaryTab.jsx
index cc988cf000b..9cc62cd692a 100644
--- a/ui/shared/tabs/failureSummary/FailureSummaryTab.jsx
+++ b/ui/shared/tabs/failureSummary/FailureSummaryTab.jsx
@@ -306,6 +306,7 @@ class FailureSummaryTab extends React.Component {
successCallback={this.bugFilerCallback}
jobGroupName={selectedJob.job_group_name}
jobTypeName={selectedJob.job_type_name}
+ platform={selectedJob.platform}
/>
)}
From 586995f3f62ee6f05108b371c26ab7e802494c84 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 18 Mar 2024 01:20:08 +0000
Subject: [PATCH 085/128] Update dependency docs/mkdocs-material to v9.5.14
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 43e4f7a48fa..7e5ce02622c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ description = "Defaut package, used for development or readthedocs"
[project.optional-dependencies]
docs = [
"mkdocs==1.5.3",
- "mkdocs-material==9.5.13",
+ "mkdocs-material==9.5.14",
"mdx_truly_sane_lists==1.3",
]
From 0218563e8d0ac170fb9969f29c718b0701073e83 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 18 Mar 2024 02:58:42 +0000
Subject: [PATCH 086/128] Bump black from 24.2.0 to 24.3.0
Bumps [black](https://github.com/psf/black) from 24.2.0 to 24.3.0.
- [Release notes](https://github.com/psf/black/releases)
- [Changelog](https://github.com/psf/black/blob/main/CHANGES.md)
- [Commits](https://github.com/psf/black/compare/24.2.0...24.3.0)
---
updated-dependencies:
- dependency-name: black
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 47 ++++++++++++++++++++++----------------------
2 files changed, 24 insertions(+), 25 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index c9300b4a2d5..b07df4eb041 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -16,7 +16,7 @@ pytest-watch==4.2.0
# Required by django-extension's runserver_plus command.
pytest-django==4.8.0
pytest==8.1.1
-black==24.2.0
+black==24.3.0
shellcheck-py==0.9.0.6
# To test async code
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 09c5b546b2d..d7fcf650b7b 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -24,29 +24,29 @@ betamax-serializers==0.2.1 \
--hash=sha256:1b23c46429c40a8873682854c88d805c787c72d252f3fa0c858e9c300682ceac \
--hash=sha256:345c419b1b73171f2951c62ac3c701775ac4b76e13e86464ebf0ff2a978e4949
# via -r requirements/dev.in
-black==24.2.0 \
- --hash=sha256:057c3dc602eaa6fdc451069bd027a1b2635028b575a6c3acfd63193ced20d9c8 \
- --hash=sha256:08654d0797e65f2423f850fc8e16a0ce50925f9337fb4a4a176a7aa4026e63f8 \
- --hash=sha256:163baf4ef40e6897a2a9b83890e59141cc8c2a98f2dda5080dc15c00ee1e62cd \
- --hash=sha256:1e08fb9a15c914b81dd734ddd7fb10513016e5ce7e6704bdd5e1251ceee51ac9 \
- --hash=sha256:4dd76e9468d5536abd40ffbc7a247f83b2324f0c050556d9c371c2b9a9a95e31 \
- --hash=sha256:4f9de21bafcba9683853f6c96c2d515e364aee631b178eaa5145fc1c61a3cc92 \
- --hash=sha256:61a0391772490ddfb8a693c067df1ef5227257e72b0e4108482b8d41b5aee13f \
- --hash=sha256:6981eae48b3b33399c8757036c7f5d48a535b962a7c2310d19361edeef64ce29 \
- --hash=sha256:7e53a8c630f71db01b28cd9602a1ada68c937cbf2c333e6ed041390d6968faf4 \
- --hash=sha256:810d445ae6069ce64030c78ff6127cd9cd178a9ac3361435708b907d8a04c693 \
- --hash=sha256:93601c2deb321b4bad8f95df408e3fb3943d85012dddb6121336b8e24a0d1218 \
- --hash=sha256:992e451b04667116680cb88f63449267c13e1ad134f30087dec8527242e9862a \
- --hash=sha256:9db528bccb9e8e20c08e716b3b09c6bdd64da0dd129b11e160bf082d4642ac23 \
- --hash=sha256:a0057f800de6acc4407fe75bb147b0c2b5cbb7c3ed110d3e5999cd01184d53b0 \
- --hash=sha256:ba15742a13de85e9b8f3239c8f807723991fbfae24bad92d34a2b12e81904982 \
- --hash=sha256:bce4f25c27c3435e4dace4815bcb2008b87e167e3bf4ee47ccdc5ce906eb4894 \
- --hash=sha256:ca610d29415ee1a30a3f30fab7a8f4144e9d34c89a235d81292a1edb2b55f540 \
- --hash=sha256:d533d5e3259720fdbc1b37444491b024003e012c5173f7d06825a77508085430 \
- --hash=sha256:d84f29eb3ee44859052073b7636533ec995bd0f64e2fb43aeceefc70090e752b \
- --hash=sha256:e37c99f89929af50ffaf912454b3e3b47fd64109659026b678c091a4cd450fb2 \
- --hash=sha256:e8a6ae970537e67830776488bca52000eaa37fa63b9988e8c487458d9cd5ace6 \
- --hash=sha256:faf2ee02e6612577ba0181f4347bcbcf591eb122f7841ae5ba233d12c39dcb4d
+black==24.3.0 \
+ --hash=sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f \
+ --hash=sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93 \
+ --hash=sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11 \
+ --hash=sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0 \
+ --hash=sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9 \
+ --hash=sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5 \
+ --hash=sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213 \
+ --hash=sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d \
+ --hash=sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7 \
+ --hash=sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837 \
+ --hash=sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f \
+ --hash=sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395 \
+ --hash=sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995 \
+ --hash=sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f \
+ --hash=sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597 \
+ --hash=sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959 \
+ --hash=sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5 \
+ --hash=sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb \
+ --hash=sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4 \
+ --hash=sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7 \
+ --hash=sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd \
+ --hash=sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7
# via -r requirements/dev.in
build==1.0.3 \
--hash=sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b \
@@ -218,7 +218,6 @@ coverage[toml]==7.4.1 \
--hash=sha256:f90515974b39f4dea2f27c0959688621b46d96d5a626cf9c53dbc653a895c05c \
--hash=sha256:fe558371c1bdf3b8fa03e097c523fb9645b8730399c14fe7721ee9c9e2a545d3
# via
- # coverage
# pytest-cov
# pytest-testmon
distlib==0.3.8 \
From d914f6ed15232400288621563701ed52b8ffd7b1 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 18 Mar 2024 03:04:15 +0000
Subject: [PATCH 087/128] Bump djangorestframework from 3.14.0 to 3.15.0
Bumps [djangorestframework](https://github.com/encode/django-rest-framework) from 3.14.0 to 3.15.0.
- [Release notes](https://github.com/encode/django-rest-framework/releases)
- [Commits](https://github.com/encode/django-rest-framework/compare/3.14.0...3.15.0)
---
updated-dependencies:
- dependency-name: djangorestframework
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 10 +++-------
2 files changed, 4 insertions(+), 8 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 7b8729ee52f..c741c86e017 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -12,7 +12,7 @@ mysqlclient==2.2.4 # Required by Django
psycopg2-binary==2.9.9
jsonschema==4.21.1 # import jsonschema
-djangorestframework==3.14.0 # Imported as rest_framework
+djangorestframework==3.15.0 # Imported as rest_framework
django-cors-headers==4.3.1 # Listed as 3rd party app on settings.py
mozlog==8.0.0
diff --git a/requirements/common.txt b/requirements/common.txt
index af4a1c1a85c..f1b3059568b 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -388,9 +388,9 @@ django-redis==5.4.0 \
--hash=sha256:6a02abaa34b0fea8bf9b707d2c363ab6adc7409950b2db93602e6cb292818c42 \
--hash=sha256:ebc88df7da810732e2af9987f7f426c96204bf89319df4c6da6ca9a2942edd5b
# via -r requirements/common.in
-djangorestframework==3.14.0 \
- --hash=sha256:579a333e6256b09489cbe0a067e66abe55c6595d8926be6b99423786334350c8 \
- --hash=sha256:eb63f58c9f218e1a7d064d17a70751f528ed4e1d35547fdade9aaf4cd103fd08
+djangorestframework==3.15.0 \
+ --hash=sha256:3f4a263012e1b263bf49a4907eb4cfe14de840a09b1ba64596d01a9c54835919 \
+ --hash=sha256:5fa616048a7ec287fdaab3148aa5151efb73f7f8be1e23a9d18484e61e672695
# via -r requirements/common.in
dockerflow==2024.3.0 \
--hash=sha256:96678b00636dfd61fccf08f5f4102d0444e43bec3f8850175a060d8e83559e4c \
@@ -1037,10 +1037,6 @@ python-jose[pycryptodome]==3.3.0 \
python3-memcached==1.51 \
--hash=sha256:7cbe5951d68eef69d948b7a7ed7decfbd101e15e7f5be007dcd1219ccc584859
# via mozci
-pytz==2024.1 \
- --hash=sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812 \
- --hash=sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319
- # via djangorestframework
pyyaml==6.0.1 \
--hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \
--hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \
From 7adf0f071a4eb53d03b22987e89c1480115a4871 Mon Sep 17 00:00:00 2001
From: Gregory Mierzwinski
Date: Tue, 19 Mar 2024 11:31:37 -0400
Subject: [PATCH 088/128] Bug 1886208 - Add proper names for the win11 ref hw
platforms.
---
ui/helpers/constants.js | 2 ++
1 file changed, 2 insertions(+)
diff --git a/ui/helpers/constants.js b/ui/helpers/constants.js
index 5caf7437275..f27b0f21ebd 100644
--- a/ui/helpers/constants.js
+++ b/ui/helpers/constants.js
@@ -102,6 +102,8 @@ export const thPlatformMap = {
'windows11-64-2009-devedition-qr': 'Windows 11 x64 22H2 WebRender DevEdition',
'windows11-64-2009-ccov-qr': 'Windows 11 x64 22H2 CCov WebRender',
'windows11-64-2009-mingwclang-qr': 'Windows 11 x64 22H2 MinGW WebRender',
+ 'windows11-64-2009-hw-ref': 'Windows 11 x64 22H2 WebRender Ref HW',
+ 'windows11-64-2009-hw-ref-shippable': 'Windows 11 x64 22H2 WebRender Ref HW Shippable',
'windows2012-32': 'Windows 2012',
'windows2012-32-shippable': 'Windows 2012 Shippable',
'windows2012-32-add-on-devel': 'Windows 2012 addon',
From cafb92dd8d0bc203a764991a8af52aab74f498d7 Mon Sep 17 00:00:00 2001
From: Gregory Mierzwinski
Date: Tue, 19 Mar 2024 11:53:51 -0400
Subject: [PATCH 089/128] Fix linting failure.
---
ui/helpers/constants.js | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/ui/helpers/constants.js b/ui/helpers/constants.js
index f27b0f21ebd..21124220b32 100644
--- a/ui/helpers/constants.js
+++ b/ui/helpers/constants.js
@@ -103,7 +103,8 @@ export const thPlatformMap = {
'windows11-64-2009-ccov-qr': 'Windows 11 x64 22H2 CCov WebRender',
'windows11-64-2009-mingwclang-qr': 'Windows 11 x64 22H2 MinGW WebRender',
'windows11-64-2009-hw-ref': 'Windows 11 x64 22H2 WebRender Ref HW',
- 'windows11-64-2009-hw-ref-shippable': 'Windows 11 x64 22H2 WebRender Ref HW Shippable',
+ 'windows11-64-2009-hw-ref-shippable':
+ 'Windows 11 x64 22H2 WebRender Ref HW Shippable',
'windows2012-32': 'Windows 2012',
'windows2012-32-shippable': 'Windows 2012 Shippable',
'windows2012-32-add-on-devel': 'Windows 2012 addon',
From 6226d431a10b5fae5e85c55b093d53257275c90d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 20 Mar 2024 03:00:38 +0000
Subject: [PATCH 090/128] Bump pytest-asyncio from 0.23.5.post1 to 0.23.6
Bumps [pytest-asyncio](https://github.com/pytest-dev/pytest-asyncio) from 0.23.5.post1 to 0.23.6.
- [Release notes](https://github.com/pytest-dev/pytest-asyncio/releases)
- [Commits](https://github.com/pytest-dev/pytest-asyncio/compare/v0.23.5.post1...v0.23.6)
---
updated-dependencies:
- dependency-name: pytest-asyncio
dependency-type: direct:development
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index b07df4eb041..4ffe40521f7 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -20,7 +20,7 @@ black==24.3.0
shellcheck-py==0.9.0.6
# To test async code
-pytest-asyncio==0.23.5.post1 # required to pass test_new_job_transformation
+pytest-asyncio==0.23.6 # required to pass test_new_job_transformation
# To test code that's making system time calls
# pytest-freezegun is not compatible with recent Django versions
diff --git a/requirements/dev.txt b/requirements/dev.txt
index d7fcf650b7b..4143cbce4af 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -351,9 +351,9 @@ pytest==8.1.1 \
# pytest-freezegun
# pytest-testmon
# pytest-watch
-pytest-asyncio==0.23.5.post1 \
- --hash=sha256:30f54d27774e79ac409778889880242b0403d09cabd65b727ce90fe92dd5d80e \
- --hash=sha256:b9a8806bea78c21276bc34321bbf234ba1b2ea5b30d9f0ce0f2dea45e4685813
+pytest-asyncio==0.23.6 \
+ --hash=sha256:68516fdd1018ac57b846c9846b954f0393b26f094764a28c955eabb0536a4e8a \
+ --hash=sha256:ffe523a89c1c222598c76856e76852b787504ddb72dd5d9b6617ffa8aa2cde5f
# via -r requirements/dev.in
pytest-cov==4.1.0 \
--hash=sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6 \
From cd069a528587247dc417539a2174dd9285ad3d1d Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 18 Mar 2024 15:48:05 +0000
Subject: [PATCH 091/128] Bump shellcheck-py from 0.9.0.6 to 0.10.0.1
Bumps [shellcheck-py](https://github.com/ryanrhee/shellcheck-py) from 0.9.0.6 to 0.10.0.1.
- [Commits](https://github.com/ryanrhee/shellcheck-py/compare/v0.9.0.6...v0.10.0.1)
---
updated-dependencies:
- dependency-name: shellcheck-py
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 11 ++++++-----
2 files changed, 7 insertions(+), 6 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 4ffe40521f7..c176a50b6f4 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -17,7 +17,7 @@ pytest-watch==4.2.0
pytest-django==4.8.0
pytest==8.1.1
black==24.3.0
-shellcheck-py==0.9.0.6
+shellcheck-py==0.10.0.1
# To test async code
pytest-asyncio==0.23.6 # required to pass test_new_job_transformation
diff --git a/requirements/dev.txt b/requirements/dev.txt
index 4143cbce4af..b38266a9fcd 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -447,11 +447,12 @@ selenium==4.17.2 \
--hash=sha256:5aee79026c07985dc1b0c909f34084aa996dfe5b307602de9016d7a621a473f2 \
--hash=sha256:d43d6972e516855fb242ef9ce4ce759057b115070e702e7b1c1032fe7b38d87b
# via pypom
-shellcheck-py==0.9.0.6 \
- --hash=sha256:38d48a4e2279f5deac374574e7625cd53b7f615301f36b1b1fffd22105dc066d \
- --hash=sha256:730235c4f92657884f8b343d5426e4dc28e9a6ba9ad54d469cd038e340ea5be0 \
- --hash=sha256:d1d0c285e2c094813659e0920559a2892da598c1176da59cb4eb9e2f505e5ee8 \
- --hash=sha256:f83a0ee1e9762f787ab52e8a906e553b9583586c44e3f9730b6e635f296a69e8
+shellcheck-py==0.10.0.1 \
+ --hash=sha256:390826b340b8c19173922b0da5ef7b66ef34d4d087dc48aad3e01f7e77e164d9 \
+ --hash=sha256:48f08965cafbb3363b265c4ef40628ffced19cb6fc7c4bb5ce72d32cbcfb4bb9 \
+ --hash=sha256:8f3bf12ee6d0845dd5ac1a7bac8c4b1fec0379e115950986883c9488af40ada7 \
+ --hash=sha256:be73a16931c05f79643ff74b6519d1e1203b394583ab8c68a48a8e7f257d1090 \
+ --hash=sha256:c1c266f7f54cd286057c592ead3095f93d123acdcabf048879a7d8900c3aac7b
# via -r requirements/dev.in
six==1.16.0 \
--hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \
From 630d6f303fcce885a009db0e946eb15cb8b76539 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 25 Mar 2024 02:31:00 +0000
Subject: [PATCH 092/128] Update dependency docs/mkdocs-material to v9.5.15
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 7e5ce02622c..b4c905c38aa 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ description = "Defaut package, used for development or readthedocs"
[project.optional-dependencies]
docs = [
"mkdocs==1.5.3",
- "mkdocs-material==9.5.14",
+ "mkdocs-material==9.5.15",
"mdx_truly_sane_lists==1.3",
]
From 61a741a488ea0338bddb307dc6bdadd10a490d89 Mon Sep 17 00:00:00 2001
From: beatrice-acasandrei
<69891317+beatrice-acasandrei@users.noreply.github.com>
Date: Mon, 25 Mar 2024 11:15:03 +0200
Subject: [PATCH 093/128] Removed call to update the status summary (#7985)
---
ui/perfherder/alerts/AlertActionPanel.jsx | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/ui/perfherder/alerts/AlertActionPanel.jsx b/ui/perfherder/alerts/AlertActionPanel.jsx
index ac88f157204..1a59bc0ddb9 100644
--- a/ui/perfherder/alerts/AlertActionPanel.jsx
+++ b/ui/perfherder/alerts/AlertActionPanel.jsx
@@ -11,7 +11,7 @@ import {
import SimpleTooltip from '../../shared/SimpleTooltip';
import { alertStatusMap } from '../perf-helpers/constants';
-import { modifyAlert, modifyAlertSummary } from '../perf-helpers/helpers';
+import { modifyAlert } from '../perf-helpers/helpers';
import { processErrors } from '../../helpers/http';
import AlertModal from './AlertModal';
@@ -121,7 +121,6 @@ export default class AlertActionPanel extends React.Component {
await this.modifySelectedAlerts(selectedAlerts, {
status: alertStatusMap[newStatus],
});
- modifyAlertSummary(alertSummary.id);
const untriagedAlerts = alertSummary.alerts.filter(
(alert) => alert.status === 0,
From 5f6a2cee2ea33f5887fcb052be052bcab5755cca Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 25 Mar 2024 02:37:23 +0000
Subject: [PATCH 094/128] Bump djangorestframework from 3.15.0 to 3.15.1
Bumps [djangorestframework](https://github.com/encode/django-rest-framework) from 3.15.0 to 3.15.1.
- [Release notes](https://github.com/encode/django-rest-framework/releases)
- [Commits](https://github.com/encode/django-rest-framework/compare/3.15.0...3.15.1)
---
updated-dependencies:
- dependency-name: djangorestframework
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index c741c86e017..718304dd31a 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -12,7 +12,7 @@ mysqlclient==2.2.4 # Required by Django
psycopg2-binary==2.9.9
jsonschema==4.21.1 # import jsonschema
-djangorestframework==3.15.0 # Imported as rest_framework
+djangorestframework==3.15.1 # Imported as rest_framework
django-cors-headers==4.3.1 # Listed as 3rd party app on settings.py
mozlog==8.0.0
diff --git a/requirements/common.txt b/requirements/common.txt
index f1b3059568b..2e0e1f70298 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -388,9 +388,9 @@ django-redis==5.4.0 \
--hash=sha256:6a02abaa34b0fea8bf9b707d2c363ab6adc7409950b2db93602e6cb292818c42 \
--hash=sha256:ebc88df7da810732e2af9987f7f426c96204bf89319df4c6da6ca9a2942edd5b
# via -r requirements/common.in
-djangorestframework==3.15.0 \
- --hash=sha256:3f4a263012e1b263bf49a4907eb4cfe14de840a09b1ba64596d01a9c54835919 \
- --hash=sha256:5fa616048a7ec287fdaab3148aa5151efb73f7f8be1e23a9d18484e61e672695
+djangorestframework==3.15.1 \
+ --hash=sha256:3ccc0475bce968608cf30d07fb17d8e52d1d7fc8bfe779c905463200750cbca6 \
+ --hash=sha256:f88fad74183dfc7144b2756d0d2ac716ea5b4c7c9840995ac3bfd8ec034333c1
# via -r requirements/common.in
dockerflow==2024.3.0 \
--hash=sha256:96678b00636dfd61fccf08f5f4102d0444e43bec3f8850175a060d8e83559e4c \
From b08ea1340c4b1868906cbebe3b2eb4b542620147 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 25 Mar 2024 02:31:43 +0000
Subject: [PATCH 095/128] Bump pre-commit from 3.6.2 to 3.7.0
Bumps [pre-commit](https://github.com/pre-commit/pre-commit) from 3.6.2 to 3.7.0.
- [Release notes](https://github.com/pre-commit/pre-commit/releases)
- [Changelog](https://github.com/pre-commit/pre-commit/blob/main/CHANGELOG.md)
- [Commits](https://github.com/pre-commit/pre-commit/compare/v3.6.2...v3.7.0)
---
updated-dependencies:
- dependency-name: pre-commit
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index c176a50b6f4..bd5a99e8ded 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -7,7 +7,7 @@ django-extensions==3.2.3
PyPOM==2.2.4
# for git commit hooks
-pre-commit==3.6.2
+pre-commit==3.7.0
# for test driven development
pytest-testmon==2.1.1
diff --git a/requirements/dev.txt b/requirements/dev.txt
index b38266a9fcd..e8a397dc64f 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -321,9 +321,9 @@ pluggy==1.4.0 \
# via
# pypom
# pytest
-pre-commit==3.6.2 \
- --hash=sha256:ba637c2d7a670c10daedc059f5c49b5bd0aadbccfcd7ec15592cf9665117532c \
- --hash=sha256:c3ef34f463045c88658c5b99f38c1e297abdcc0ff13f98d3370055fbbfabc67e
+pre-commit==3.7.0 \
+ --hash=sha256:5eae9e10c2b5ac51577c3452ec0a490455c45a0533f7960f993a0d01e59decab \
+ --hash=sha256:e209d61b8acdcf742404408531f0c37d49d2c734fd7cff2d6076083d191cb060
# via -r requirements/dev.in
pypom==2.2.4 \
--hash=sha256:5da52cf447e62f43a0cfa47dfe52eb822eff07b2fdad759f930d1d227c15220b \
From 8d97dc57fba42ad201b839eba77e59da94204b01 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Sat, 23 Mar 2024 06:42:27 +0000
Subject: [PATCH 096/128] Bump webpack-dev-middleware from 5.3.3 to 5.3.4
Bumps [webpack-dev-middleware](https://github.com/webpack/webpack-dev-middleware) from 5.3.3 to 5.3.4.
- [Release notes](https://github.com/webpack/webpack-dev-middleware/releases)
- [Changelog](https://github.com/webpack/webpack-dev-middleware/blob/v5.3.4/CHANGELOG.md)
- [Commits](https://github.com/webpack/webpack-dev-middleware/compare/v5.3.3...v5.3.4)
---
updated-dependencies:
- dependency-name: webpack-dev-middleware
dependency-type: indirect
...
Signed-off-by: dependabot[bot]
---
yarn.lock | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/yarn.lock b/yarn.lock
index c2904a4d60a..fa52488313a 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -11064,9 +11064,9 @@ webpack-cli@5.1.4:
webpack-merge "^5.7.3"
webpack-dev-middleware@^5.3.1:
- version "5.3.3"
- resolved "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.3.tgz"
- integrity sha512-hj5CYrY0bZLB+eTO+x/j67Pkrquiy7kWepMHmUMoPsmcUaeEnQJqFzHJOyxgWlq746/wUuA64p9ta34Kyb01pA==
+ version "5.3.4"
+ resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz#eb7b39281cbce10e104eb2b8bf2b63fce49a3517"
+ integrity sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==
dependencies:
colorette "^2.0.10"
memfs "^3.4.3"
From e759dccfd96f8da8666d8f5f9453fcd6b34ff4f5 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 25 Mar 2024 02:41:53 +0000
Subject: [PATCH 097/128] Bump pytest-cov from 4.1.0 to 5.0.0
Bumps [pytest-cov](https://github.com/pytest-dev/pytest-cov) from 4.1.0 to 5.0.0.
- [Changelog](https://github.com/pytest-dev/pytest-cov/blob/master/CHANGELOG.rst)
- [Commits](https://github.com/pytest-dev/pytest-cov/compare/v4.1.0...v5.0.0)
---
updated-dependencies:
- dependency-name: pytest-cov
dependency-type: direct:development
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index bd5a99e8ded..63cbeccdc23 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -1,5 +1,5 @@
# Dependencies needed only for development/testing.
-pytest-cov==4.1.0
+pytest-cov==5.0.0
django-debug-toolbar==4.3.0
mock==5.1.0
responses==0.25.0
diff --git a/requirements/dev.txt b/requirements/dev.txt
index e8a397dc64f..e9dd5d7c518 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -355,9 +355,9 @@ pytest-asyncio==0.23.6 \
--hash=sha256:68516fdd1018ac57b846c9846b954f0393b26f094764a28c955eabb0536a4e8a \
--hash=sha256:ffe523a89c1c222598c76856e76852b787504ddb72dd5d9b6617ffa8aa2cde5f
# via -r requirements/dev.in
-pytest-cov==4.1.0 \
- --hash=sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6 \
- --hash=sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a
+pytest-cov==5.0.0 \
+ --hash=sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652 \
+ --hash=sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857
# via -r requirements/dev.in
pytest-django==4.8.0 \
--hash=sha256:5d054fe011c56f3b10f978f41a8efb2e5adfc7e680ef36fb571ada1f24779d90 \
From 0b15f7dbc3e69260c742092a8807388ad93785c7 Mon Sep 17 00:00:00 2001
From: florinbilt <160469273+florinbilt@users.noreply.github.com>
Date: Mon, 25 Mar 2024 17:47:59 +0200
Subject: [PATCH 098/128] Add tab name Alert# when we view a certain
alert (#7987)
---
ui/App.jsx | 11 ++++++++++-
1 file changed, 10 insertions(+), 1 deletion(-)
diff --git a/ui/App.jsx b/ui/App.jsx
index 110544f8dd3..fe5065de8e4 100644
--- a/ui/App.jsx
+++ b/ui/App.jsx
@@ -99,7 +99,16 @@ const faviconPaths = {
};
const withFavicon = (element, route) => {
- const { title, favicon } = faviconPaths[route];
+ let { title } = faviconPaths[route];
+ const { favicon } = faviconPaths[route];
+
+ const searchParams = new URLSearchParams(history.location.search);
+ const id = searchParams.get('id');
+
+ if (history.location.pathname === '/perfherder/alerts' && id) {
+ title = `Alert #${id.toString()}`;
+ }
+
return (
From a662295672d56fac3ecd4b595f9e33871659cb66 Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Mon, 25 Mar 2024 16:34:00 +0100
Subject: [PATCH 099/128] update yarn from 1.22.21 to 1.22.22
---
docker/Dockerfile | 2 +-
package.json | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docker/Dockerfile b/docker/Dockerfile
index ea3c31a3968..36a01f66dee 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -9,7 +9,7 @@ COPY package.json babel.config.json webpack.config.js yarn.lock /app/
# ensure we have python-venv available for glean
RUN apt-get update && apt-get install python3-venv -y
-RUN npm install -g --force yarn@1.22.21
+RUN npm install -g --force yarn@1.22.22
RUN yarn install
RUN yarn build
diff --git a/package.json b/package.json
index 98adb2d1348..bb1eed9b511 100644
--- a/package.json
+++ b/package.json
@@ -8,7 +8,7 @@
"license": "MPL-2.0",
"engines": {
"node": "21.1.0",
- "yarn": "1.22.21"
+ "yarn": "1.22.22"
},
"dependencies": {
"@fortawesome/fontawesome-svg-core": "6.2.1",
From c5f5581abc76140ab7d134ebee4f452d6cd3f75e Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Mon, 25 Mar 2024 16:28:18 +0100
Subject: [PATCH 100/128] Sync mkdocs requirements for build-system with
project.optional-dependencies
---
pyproject.toml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/pyproject.toml b/pyproject.toml
index b4c905c38aa..45358536c7b 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -17,8 +17,8 @@ packages = ["treeherder"]
requires = ["setuptools", "wheel"]
# A list of all of the optional dependencies, some of which are included in the
# below `extras`. They can be opted into by apps.
-mkdocs = { version = "==1.4.2", optional = true }
-mkdocs-material = { version = "==8.5.11", optional = true }
+mkdocs = { version = "==1.5.3", optional = true }
+mkdocs-material = { version = "==9.5.15", optional = true }
mdx_truly_sane_lists = { version = "1.3", optional = true }
[tool.ruff]
From 541cdf516055986ea6e986b1e31c8cc39a9e1888 Mon Sep 17 00:00:00 2001
From: Valentin Rigal
Date: Tue, 26 Mar 2024 16:59:06 +0100
Subject: [PATCH 101/128] Fix bug suggestions with PostgreSQL (#7988)
* Use trigram similarity instead of FTS
* Skip escaping special characters
* Order results by match ranking on Postgres
---------
Co-authored-by: Bastien Abadie
Co-authored-by: Sebastian Hengst
---
tests/log_parser/test_tasks.py | 88 ++++++++++++++++++-
tests/sample_data/bug_list.json | 2 +-
.../migrations/0031_trigram_extension.py | 13 +++
treeherder/model/models.py | 26 +++---
4 files changed, 115 insertions(+), 14 deletions(-)
create mode 100644 treeherder/model/migrations/0031_trigram_extension.py
diff --git a/tests/log_parser/test_tasks.py b/tests/log_parser/test_tasks.py
index ffb6a77b306..eefeffb11f2 100644
--- a/tests/log_parser/test_tasks.py
+++ b/tests/log_parser/test_tasks.py
@@ -1,10 +1,11 @@
import pytest
+from unittest.mock import patch
from tests.test_utils import add_log_response
from treeherder.etl.jobs import store_job_data
from treeherder.etl.push import store_push_data
-from treeherder.model.error_summary import get_error_summary
-from treeherder.model.models import Job, TextLogError
+from treeherder.model.error_summary import get_error_summary, bug_suggestions_line
+from treeherder.model.models import Job, TextLogError, Bugscache
from ..sampledata import SampleData
@@ -62,3 +63,86 @@ def test_create_error_summary(
)
for failure_line in bug_suggestions:
assert set(failure_line.keys()) == expected_keys
+
+
+@pytest.mark.django_db
+@patch(
+ "treeherder.model.error_summary.get_error_search_term_and_path",
+ return_value={
+ "search_term": ["browser_dbg-pretty-print-inline-scripts.js"],
+ "path_end": "devtools/client/debugger/test/mochitest/browser_dbg-pretty-print-inline-scripts.js",
+ },
+)
+def test_bug_suggestion_line(
+ search_mock, failure_classifications, jobs_with_local_log, sample_push, test_repository
+):
+ """
+ A test to verify similarity of search term (often test name) derived from
+ the failure line and bug summary gets taken into account. If it is equal
+ for every bug, the expected result won't be returned by the query because
+ of its higher bug ID.
+ """
+ store_push_data(test_repository, sample_push)
+ for job in jobs_with_local_log:
+ job["job"]["result"] = "testfailed"
+ job["revision"] = sample_push[0]["revision"]
+ store_job_data(test_repository, jobs_with_local_log)
+
+ job = Job.objects.get(id=1)
+
+ Bugscache.objects.create(
+ id=1775819,
+ status="2",
+ keywords="intermittent-failure,regression,test-verify-fail",
+ whiteboard="[retriggered][stockwell unknown]",
+ summary=(
+ "Intermittent devtools/client/debugger/test/mochitest/browser_dbg-pretty-print-inline-scripts.js "
+ "| single tracking bug"
+ ),
+ modified="2010-01-01 00:00:00",
+ )
+
+ # Create 50 other results with an inferior ID.
+ # The bug suggestions SQL query fetches up to 50 rows, ordered by match rank then ID.
+ # In case results are returned with a wrong rank (e.g. 0 for each result), above related suggestion will be lost.
+ Bugscache.objects.bulk_create(
+ [
+ Bugscache(
+ id=100 + i,
+ status="2",
+ keywords="intermittent-failure,intermittent-testcase",
+ summary=(
+ f"Intermittent devtools/client/debugger/test/mochitest/browser_unrelated-{i}.js "
+ "| single tracking bug"
+ ),
+ modified="2010-01-01 00:00:00",
+ )
+ for i in range(50)
+ ]
+ )
+
+ error = job.text_log_error.first()
+ summary, line_cache = bug_suggestions_line(
+ error,
+ project=job.repository,
+ logdate=job.submit_time,
+ term_cache={},
+ line_cache={str(job.submit_time.date()): {}},
+ revision=job.push.revision,
+ )
+ assert summary["bugs"]["open_recent"] == [
+ {
+ "crash_signature": "",
+ "dupe_of": None,
+ "id": 1775819,
+ "keywords": "intermittent-failure,regression,test-verify-fail",
+ "resolution": "",
+ "status": "2",
+ "whiteboard": "[retriggered][stockwell unknown]",
+ "summary": (
+ "Intermittent "
+ "devtools/client/debugger/test/mochitest/browser_dbg-pretty-print-inline-scripts.js "
+ "| single tracking bug"
+ ),
+ }
+ ]
diff --git a/tests/sample_data/bug_list.json b/tests/sample_data/bug_list.json
index 21e790c2b09..3989b0a5adf 100644
--- a/tests/sample_data/bug_list.json
+++ b/tests/sample_data/bug_list.json
@@ -135,7 +135,7 @@
{
"status": "NEW",
"id": 1054669,
- "summary": "Intermittent test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted_from_under_us | TimeoutException: TimeoutException: Connection timed out",
+ "summary": "Intermittent test_switch_frame.py TestSwitchFrame.test_should_be_able_to_carry_on_working_if_the_frame_is_deleted_from_under_us | TimeoutException",
"dupe_of": null,
"duplicates": [],
"cf_crash_signature": "",
diff --git a/treeherder/model/migrations/0031_trigram_extension.py b/treeherder/model/migrations/0031_trigram_extension.py
new file mode 100644
index 00000000000..896165dcbeb
--- /dev/null
+++ b/treeherder/model/migrations/0031_trigram_extension.py
@@ -0,0 +1,13 @@
+# Generated by Django 4.1.13 on 2024-03-25 16:15
+
+from django.db import migrations
+from django.contrib.postgres.operations import TrigramExtension
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("model", "0030_group_durations"),
+ ]
+
+ operations = [TrigramExtension()]
diff --git a/treeherder/model/models.py b/treeherder/model/models.py
index db8f21bd1b3..a6c0db63f07 100644
--- a/treeherder/model/models.py
+++ b/treeherder/model/models.py
@@ -12,12 +12,12 @@
import newrelic.agent
from django.conf import settings
from django.contrib.auth.models import User
-from django.contrib.postgres.search import SearchQuery, SearchRank, SearchVector
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.validators import MinLengthValidator
from django.db import models, transaction
from django.db.models import Count, Max, Min, Q, Subquery
+from django.contrib.postgres.search import TrigramSimilarity
from django.db.utils import ProgrammingError
from django.forms import model_to_dict
from django.utils import timezone
@@ -248,11 +248,11 @@ def sanitized_search_term(cls, search_term):
def search(cls, search_term):
max_size = 50
- # Do not wrap a string in quotes to search as a phrase;
- # see https://bugzilla.mozilla.org/show_bug.cgi?id=1704311
- search_term_fulltext = cls.sanitized_search_term(search_term)
-
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
+ # Do not wrap a string in quotes to search as a phrase;
+ # see https://bugzilla.mozilla.org/show_bug.cgi?id=1704311
+ search_term_fulltext = cls.sanitized_search_term(search_term)
+
# Substitute escape and wildcard characters, so the search term is used
# literally in the LIKE statement.
search_term_like = (
@@ -275,12 +275,16 @@ def search(cls, search_term):
[search_term_fulltext, search_term_like, max_size],
)
else:
- # On PostgreSQL we can use the full text search features
- vector = SearchVector("summary")
- query = SearchQuery(search_term_fulltext)
- recent_qs = Bugscache.objects.annotate(rank=SearchRank(vector, query)).order_by(
- "-rank", "id"
- )[0:max_size]
+ # On PostgreSQL we can use the ORM directly, but NOT the full text search
+ # as the ranking algorithm expects english words, not paths
+ # So we use standard pattern matching AND trigram similarity to compare suite of characters
+ # instead of words
+ # Django already escapes special characters, so we do not need to handle that here
+ recent_qs = (
+ Bugscache.objects.filter(summary__icontains=search_term)
+ .annotate(similarity=TrigramSimilarity("summary", search_term))
+ .order_by("-similarity")[0:max_size]
+ )
exclude_fields = ["modified", "processed_update"]
try:
From 32f294c91c250831e9a1856bb5d59f287f9243aa Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 1 Apr 2024 02:50:04 +0000
Subject: [PATCH 102/128] Update dependency docs/mkdocs-material to v9.5.16
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 45358536c7b..5012f177336 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ description = "Defaut package, used for development or readthedocs"
[project.optional-dependencies]
docs = [
"mkdocs==1.5.3",
- "mkdocs-material==9.5.15",
+ "mkdocs-material==9.5.16",
"mdx_truly_sane_lists==1.3",
]
From cbadddb4974756612f0ebee2851078f64de7e8a3 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 1 Apr 2024 06:23:00 +0000
Subject: [PATCH 103/128] Update python Docker tag to v3.9.19
---
docker/Dockerfile | 2 +-
docker/dev.Dockerfile | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 36a01f66dee..cc527d80e9c 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -15,7 +15,7 @@ RUN yarn build
## Backend stage
-FROM python:3.9.18-slim-bullseye
+FROM python:3.9.19-slim-bullseye
# libmysqlclient-dev is required for the mysqlclient Python package.
RUN apt-get update && apt-get install -y --no-install-recommends \
diff --git a/docker/dev.Dockerfile b/docker/dev.Dockerfile
index 03f5d3ac34e..b840298c609 100644
--- a/docker/dev.Dockerfile
+++ b/docker/dev.Dockerfile
@@ -1,4 +1,4 @@
-FROM python:3.9.18-bullseye
+FROM python:3.9.19-bullseye
# Variables that are not specific to a particular environment.
ENV NEW_RELIC_CONFIG_FILE newrelic.ini
From 3f4b7cfa96380899e218616b2b7c41b298310109 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 8 Apr 2024 00:53:01 +0000
Subject: [PATCH 104/128] Update dependency docs/mkdocs-material to v9.5.17
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 5012f177336..ddffefa1843 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ description = "Defaut package, used for development or readthedocs"
[project.optional-dependencies]
docs = [
"mkdocs==1.5.3",
- "mkdocs-material==9.5.16",
+ "mkdocs-material==9.5.17",
"mdx_truly_sane_lists==1.3",
]
From 933f9abfef6e60dfec6baf26d6d6c841322a13f5 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 8 Apr 2024 06:52:42 +0000
Subject: [PATCH 105/128] Update redis Docker tag to v7.0.15
---
docker-compose.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docker-compose.yml b/docker-compose.yml
index 75f9e10cabf..2edb4f2cf30 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -100,7 +100,7 @@ services:
redis:
container_name: redis
# https://hub.docker.com/_/redis/
- image: redis:7.0.14-alpine
+ image: redis:7.0.15-alpine
# Messages after starting the redis-server
# WARNING Memory overcommit must be enabled! Without it, a background save or replication may fail under low memory condition. Being disabled, it can can also cause failures without low memory condition, see https://github.com/jemalloc/jemalloc/issues/1328. To fix this issue add 'vm.overcommit_memory = 1' to /etc/sysctl.conf and then reboot or run the command 'sysctl vm.overcommit_memory=1' for this to take effect.
# Hide Redis `notice` log level startup output spam.
From 544381fac1e68156f9283913963444670c9c7e85 Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Mon, 8 Apr 2024 17:04:37 +0200
Subject: [PATCH 106/128] Bug 1890349 - only check for crash signature to
identify failure line as crash
Until now, 'application crashed' had been expected as prefix of the crash
signature. Other text can be logged in its place, e.g. 'not thread-safe'.
---
ui/shared/BugFiler.jsx | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/ui/shared/BugFiler.jsx b/ui/shared/BugFiler.jsx
index 52f0ca7a1a6..add2635ccfb 100644
--- a/ui/shared/BugFiler.jsx
+++ b/ui/shared/BugFiler.jsx
@@ -287,9 +287,9 @@ export class BugFilerClass extends React.Component {
}
getCrashSignatures(failureLine) {
- const crashRegex = /application crashed \[@ (.+)\]/g;
- const crash = failureLine.search.match(crashRegex);
- return crash ? [crash[0].split('application crashed ')[1]] : [];
+ const crashRegex = /(\[@ .+\])/g;
+ const crashSignatures = failureLine.search.match(crashRegex);
+ return crashSignatures ? [crashSignatures[0]] : [];
}
getUnhelpfulSummaryReason(summary) {
From a24031e665a392558a77fa6715755c0bfe1e4b05 Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Mon, 8 Apr 2024 21:50:44 +0200
Subject: [PATCH 107/128] Bug 1890441 - also log revision of ingested push
---
treeherder/etl/push_loader.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/treeherder/etl/push_loader.py b/treeherder/etl/push_loader.py
index e41687c19b9..4e3f0021272 100644
--- a/treeherder/etl/push_loader.py
+++ b/treeherder/etl/push_loader.py
@@ -46,7 +46,7 @@ def process(self, message_body, exchange, root_url):
transformed_data = transformer.transform(repo.name)
logger.info(
- "Storing push for %s %s %s", repo.name, transformer.repo_url, transformer.branch
+ f"Storing push for repository '{repo.name}' revision '{transformed_data['revision']}' branch '{transformer.branch}' url {transformer.repo_url}",
)
store_push_data(repo, [transformed_data])
From 256b9d83d4f9f6e0aab8c02735d9fae1078f2699 Mon Sep 17 00:00:00 2001
From: florinbilt <160469273+florinbilt@users.noreply.github.com>
Date: Tue, 9 Apr 2024 17:38:36 +0300
Subject: [PATCH 108/128] Added a copy icon for the alert's ID in Graphs View.
(#7973)
* Added a copy icon for the alert's ID in Graphs View.
* Added a copy icon for the alert's ID in Graphs View.
---
tests/ui/mock/performance_summary.json | 8 ++
.../graphs-view/graphs_view_test.jsx | 97 +++++++++++++------
ui/perfherder/graphs/GraphTooltip.jsx | 10 ++
3 files changed, 87 insertions(+), 28 deletions(-)
diff --git a/tests/ui/mock/performance_summary.json b/tests/ui/mock/performance_summary.json
index 626e10485ce..53f24b7b940 100644
--- a/tests/ui/mock/performance_summary.json
+++ b/tests/ui/mock/performance_summary.json
@@ -54,6 +54,14 @@
"push_timestamp": "2019-08-11T09:56:40",
"push_id": 530521,
"revision": "e8fe8b0af1a7a0c64d28b4e08a9c5509d916759f"
+ },
+ {
+ "job_id": 260895769,
+ "id": 887279309,
+ "value": 211.24042970178886,
+ "push_timestamp": "2019-08-09T21:57:48",
+ "push_id": 477720,
+ "revision": "3afb892abb74c6d281f3e66431408cbb2e16b8c5"
}
]
}
diff --git a/tests/ui/perfherder/graphs-view/graphs_view_test.jsx b/tests/ui/perfherder/graphs-view/graphs_view_test.jsx
index 521600f83ac..6b77cff7b6d 100644
--- a/tests/ui/perfherder/graphs-view/graphs_view_test.jsx
+++ b/tests/ui/perfherder/graphs-view/graphs_view_test.jsx
@@ -6,6 +6,7 @@ import {
waitFor,
waitForElementToBeRemoved,
} from '@testing-library/react';
+import { BrowserRouter as Router } from 'react-router-dom';
import fetchMock from 'fetch-mock';
import queryString from 'query-string';
@@ -17,6 +18,7 @@ import {
import GraphsViewControls from '../../../../ui/perfherder/graphs/GraphsViewControls';
import repos from '../../mock/repositories';
import testData from '../../mock/performance_summary.json';
+import alertSummaries from '../../mock/alert_summaries.json';
import changelogData from '../../mock/infra_changelog.json';
import seriesData from '../../mock/performance_signature_formatted.json';
import seriesData2 from '../../mock/performance_signature_formatted2.json';
@@ -33,7 +35,7 @@ fetchMock.mock(`begin:${getApiUrl(endpoints.changelog)}`, changelogData);
const graphData = createGraphData(
testData,
- [],
+ alertSummaries,
[...graphColors],
[...graphSymbols],
[...commonAlerts],
@@ -76,37 +78,40 @@ const graphsViewControls = (
hasNoData = true,
replicates = false,
handleUpdateStateParams,
+ selectedDataPoint = {
+ signature_id: testData[0].signature_id,
+ dataPointId: testData[0].data[1].id,
+ },
) => {
const updateStateParams = () => {};
return render(
- {}}
- hasNoData={hasNoData}
- frameworks={frameworks}
- projects={repos}
- timeRange={{ value: 172800, text: 'Last two days' }}
- options={{}}
- getTestData={() => {}}
- testData={data}
- getInitialData={() => ({
- platforms,
- })}
- getSeriesData={mockGetSeriesData}
- showModal={Boolean(mockShowModal)}
- toggle={mockShowModal}
- selectedDataPoint={{
- signature_id: testData[0].signature_id,
- dataPointId: testData[0].data[1].id,
- }}
- user={{ isStaff: true }}
- updateData={() => {}}
- replicates={replicates}
- />,
+
+ {}}
+ hasNoData={hasNoData}
+ frameworks={frameworks}
+ projects={repos}
+ timeRange={{ value: 172800, text: 'Last two days' }}
+ options={{}}
+ getTestData={() => {}}
+ testData={data}
+ getInitialData={() => ({
+ platforms,
+ })}
+ getSeriesData={mockGetSeriesData}
+ showModal={Boolean(mockShowModal)}
+ toggle={mockShowModal}
+ selectedDataPoint={selectedDataPoint}
+ user={{ isStaff: true }}
+ updateData={() => {}}
+ replicates={replicates}
+ />
+ ,
);
};
afterEach(cleanup);
@@ -216,6 +221,42 @@ test('Using select query param displays tooltip for correct datapoint', async ()
expect(platform).toHaveTextContent(testData[0].platform);
});
+test("Alert's ID can be copied to clipboard from tooltip", async () => {
+ const selectedDataPoint = {
+ signature_id: testData[0].signature_id,
+ dataPointId: testData[0].data[5].id,
+ };
+
+ Object.assign(navigator, {
+ clipboard: {
+ writeText: jest.fn(),
+ },
+ });
+ const { getByTestId, queryByTitle } = graphsViewControls(
+ graphData,
+ false,
+ undefined,
+ undefined,
+ selectedDataPoint,
+ );
+
+ const graphContainer = await waitFor(() => getByTestId('graphContainer'));
+ expect(graphContainer).toBeInTheDocument();
+
+ const graphTooltip = await waitFor(() => getByTestId('graphTooltip'));
+ expect(graphTooltip).toBeInTheDocument();
+
+ const copyIdButton = await waitFor(() =>
+ queryByTitle('Copy Alert Summary id'),
+ );
+ expect(copyIdButton).toBeInTheDocument();
+
+ fireEvent.click(copyIdButton);
+
+ const alertID = alertSummaries[0].id;
+ expect(navigator.clipboard.writeText).toHaveBeenCalledWith(`${alertID}`);
+});
+
test('Using select query param displays tooltip for correct datapoint with replicates', async () => {
const { getByTestId, getByText } = graphsViewControls(graphData, false, true);
diff --git a/ui/perfherder/graphs/GraphTooltip.jsx b/ui/perfherder/graphs/GraphTooltip.jsx
index e394848fde4..7e825529bd9 100644
--- a/ui/perfherder/graphs/GraphTooltip.jsx
+++ b/ui/perfherder/graphs/GraphTooltip.jsx
@@ -276,6 +276,11 @@ const GraphTooltip = ({
)}
+
)}
{isCommonAlert && !dataPointDetails.alertSummary && (
@@ -292,6 +297,11 @@ const GraphTooltip = ({
{` Alert # ${datum.commonAlert.id}`}
{` - ${commonAlertStatus} `}
+
Common alert
)}
From 6389d25abaa8802ee0c8dbdf5249736981cc199b Mon Sep 17 00:00:00 2001
From: myeongjun
Date: Fri, 5 Apr 2024 23:40:37 +0900
Subject: [PATCH 109/128] Change the function name according to N802 of pep8
---
treeherder/etl/management/commands/ingest.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/treeherder/etl/management/commands/ingest.py b/treeherder/etl/management/commands/ingest.py
index 1dfedc03a60..73297f35d3c 100644
--- a/treeherder/etl/management/commands/ingest.py
+++ b/treeherder/etl/management/commands/ingest.py
@@ -19,7 +19,7 @@
from treeherder.etl.job_loader import JobLoader, MissingPushError
from treeherder.etl.push_loader import PushLoader
from treeherder.etl.pushlog import HgPushlogProcess, last_push_id_from_server
-from treeherder.etl.taskcluster_pulse.handler import EXCHANGE_EVENT_MAP, handleMessage
+from treeherder.etl.taskcluster_pulse.handler import EXCHANGE_EVENT_MAP, handle_message
from treeherder.model.models import Repository
from treeherder.utils import github
from treeherder.utils.github import fetch_json
@@ -148,7 +148,7 @@ async def handle_task(task, root_url):
}
try:
- task_runs = await handleMessage(message, task["task"])
+ task_runs = await handle_message(message, task["task"])
except Exception as e:
logger.exception(e)
From 99afab7e175aabeaf2bb1868663c63f921be9700 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 8 Apr 2024 02:06:45 +0000
Subject: [PATCH 110/128] Bump dockerflow from 2024.3.0 to 2024.4.1
Bumps [dockerflow](https://github.com/mozilla-services/python-dockerflow) from 2024.3.0 to 2024.4.1.
- [Release notes](https://github.com/mozilla-services/python-dockerflow/releases)
- [Changelog](https://github.com/mozilla-services/python-dockerflow/blob/main/docs/changelog.rst)
- [Commits](https://github.com/mozilla-services/python-dockerflow/compare/2024.3.0...2024.4.1)
---
updated-dependencies:
- dependency-name: dockerflow
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 718304dd31a..8b18c5956ba 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -37,7 +37,7 @@ django-cache-memoize==0.2.0 # Imported as cache_memoize
mozci[cache]==2.4.0
# Dockerflow/CloudOps APIs
-dockerflow==2024.3.0
+dockerflow==2024.4.1
# Measuring noise of perf data
moz-measure-noise==2.60.1
diff --git a/requirements/common.txt b/requirements/common.txt
index 2e0e1f70298..baba69ae640 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -392,9 +392,9 @@ djangorestframework==3.15.1 \
--hash=sha256:3ccc0475bce968608cf30d07fb17d8e52d1d7fc8bfe779c905463200750cbca6 \
--hash=sha256:f88fad74183dfc7144b2756d0d2ac716ea5b4c7c9840995ac3bfd8ec034333c1
# via -r requirements/common.in
-dockerflow==2024.3.0 \
- --hash=sha256:96678b00636dfd61fccf08f5f4102d0444e43bec3f8850175a060d8e83559e4c \
- --hash=sha256:e8cea4df7f7342aa551c9bfa12b401adfd3e28f7f928fc545ae657fc5614ebda
+dockerflow==2024.4.1 \
+ --hash=sha256:839b0b691ba258bb28bc775bfafbf709b7258053f8305bdc7b958995126ad433 \
+ --hash=sha256:c2910cc7d80f0890c818a3ea6d54340a0dff32a54159c6fa333d1111cd650ba0
# via -r requirements/common.in
ecdsa==0.18.0 \
--hash=sha256:190348041559e21b22a1d65cee485282ca11a6f81d503fddb84d5017e9ed1e49 \
From dbd801f7734e77280acecf9291971239c23a242c Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Wed, 3 Apr 2024 17:38:51 +0200
Subject: [PATCH 111/128] Revert djangorestframework from 3.15.1 to 3.14.0
The API documentation page was broken: https://github.com/encode/django-rest-framework/issues/9291
Revert "Bump djangorestframework from 3.15.0 to 3.15.1"
This reverts commit 995ce0322734bdec2c641b304a80b275549824f3.
Revert "Bump djangorestframework from 3.14.0 to 3.15.0"
This reverts commit 9b9a11f1e93bccdf7086f6fa6d6f07db9ed0643b.
---
requirements/common.in | 2 +-
requirements/common.txt | 10 +++++++---
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 8b18c5956ba..de4730b35e1 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -12,7 +12,7 @@ mysqlclient==2.2.4 # Required by Django
psycopg2-binary==2.9.9
jsonschema==4.21.1 # import jsonschema
-djangorestframework==3.15.1 # Imported as rest_framework
+djangorestframework==3.14.0 # Imported as rest_framework
django-cors-headers==4.3.1 # Listed as 3rd party app on settings.py
mozlog==8.0.0
diff --git a/requirements/common.txt b/requirements/common.txt
index baba69ae640..ab99635ec8d 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -388,9 +388,9 @@ django-redis==5.4.0 \
--hash=sha256:6a02abaa34b0fea8bf9b707d2c363ab6adc7409950b2db93602e6cb292818c42 \
--hash=sha256:ebc88df7da810732e2af9987f7f426c96204bf89319df4c6da6ca9a2942edd5b
# via -r requirements/common.in
-djangorestframework==3.15.1 \
- --hash=sha256:3ccc0475bce968608cf30d07fb17d8e52d1d7fc8bfe779c905463200750cbca6 \
- --hash=sha256:f88fad74183dfc7144b2756d0d2ac716ea5b4c7c9840995ac3bfd8ec034333c1
+djangorestframework==3.14.0 \
+ --hash=sha256:579a333e6256b09489cbe0a067e66abe55c6595d8926be6b99423786334350c8 \
+ --hash=sha256:eb63f58c9f218e1a7d064d17a70751f528ed4e1d35547fdade9aaf4cd103fd08
# via -r requirements/common.in
dockerflow==2024.4.1 \
--hash=sha256:839b0b691ba258bb28bc775bfafbf709b7258053f8305bdc7b958995126ad433 \
@@ -1037,6 +1037,10 @@ python-jose[pycryptodome]==3.3.0 \
python3-memcached==1.51 \
--hash=sha256:7cbe5951d68eef69d948b7a7ed7decfbd101e15e7f5be007dcd1219ccc584859
# via mozci
+pytz==2024.1 \
+ --hash=sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812 \
+ --hash=sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319
+ # via djangorestframework
pyyaml==6.0.1 \
--hash=sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5 \
--hash=sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc \
From d976c5f66fb4b31048bcc84bdc8a8dd7cc6c6499 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 28 Mar 2024 17:32:25 +0000
Subject: [PATCH 112/128] Bump express from 4.18.2 to 4.19.2
Bumps [express](https://github.com/expressjs/express) from 4.18.2 to 4.19.2.
- [Release notes](https://github.com/expressjs/express/releases)
- [Changelog](https://github.com/expressjs/express/blob/master/History.md)
- [Commits](https://github.com/expressjs/express/compare/4.18.2...4.19.2)
---
updated-dependencies:
- dependency-name: express
dependency-type: indirect
...
Signed-off-by: dependabot[bot]
---
yarn.lock | 53 +++++++++++++++++++++++++++++++++++++++++++----------
1 file changed, 43 insertions(+), 10 deletions(-)
diff --git a/yarn.lock b/yarn.lock
index fa52488313a..d65e7f5b15f 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -3309,7 +3309,25 @@ bn.js@^5.0.0, bn.js@^5.2.1:
resolved "https://registry.npmjs.org/bn.js/-/bn.js-5.2.1.tgz"
integrity sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==
-body-parser@1.20.1, body-parser@^1.19.0:
+body-parser@1.20.2:
+ version "1.20.2"
+ resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.2.tgz#6feb0e21c4724d06de7ff38da36dad4f57a747fd"
+ integrity sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==
+ dependencies:
+ bytes "3.1.2"
+ content-type "~1.0.5"
+ debug "2.6.9"
+ depd "2.0.0"
+ destroy "1.2.0"
+ http-errors "2.0.0"
+ iconv-lite "0.4.24"
+ on-finished "2.4.1"
+ qs "6.11.0"
+ raw-body "2.5.2"
+ type-is "~1.6.18"
+ unpipe "1.0.0"
+
+body-parser@^1.19.0:
version "1.20.1"
resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.1.tgz#b1812a8912c195cd371a3ee5e66faa2338a5c668"
integrity sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==
@@ -3865,6 +3883,11 @@ content-type@~1.0.4:
resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.4.tgz#e138cc75e040c727b1966fe5e5f8c9aee256fe3b"
integrity sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==
+content-type@~1.0.5:
+ version "1.0.5"
+ resolved "https://registry.yarnpkg.com/content-type/-/content-type-1.0.5.tgz#8b773162656d1d1086784c8f23a54ce6d73d7918"
+ integrity sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==
+
convert-source-map@^1.4.0, convert-source-map@^1.6.0, convert-source-map@^1.7.0:
version "1.8.0"
resolved "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.8.0.tgz"
@@ -3882,10 +3905,10 @@ cookie-signature@1.0.6:
resolved "https://registry.yarnpkg.com/cookie-signature/-/cookie-signature-1.0.6.tgz#e303a882b342cc3ee8ca513a79999734dab3ae2c"
integrity sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==
-cookie@0.5.0:
- version "0.5.0"
- resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.5.0.tgz#d1f5d71adec6558c58f389987c366aa47e994f8b"
- integrity sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==
+cookie@0.6.0:
+ version "0.6.0"
+ resolved "https://registry.yarnpkg.com/cookie/-/cookie-0.6.0.tgz#2798b04b071b0ecbff0dbb62a505a8efa4e19051"
+ integrity sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==
cookiejar@^2.1.3:
version "2.1.4"
@@ -5090,16 +5113,16 @@ expect@^28.1.3:
jest-util "^28.1.3"
express@^4.17.1, express@^4.17.3:
- version "4.18.2"
- resolved "https://registry.yarnpkg.com/express/-/express-4.18.2.tgz#3fabe08296e930c796c19e3c516979386ba9fd59"
- integrity sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==
+ version "4.19.2"
+ resolved "https://registry.yarnpkg.com/express/-/express-4.19.2.tgz#e25437827a3aa7f2a827bc8171bbbb664a356465"
+ integrity sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==
dependencies:
accepts "~1.3.8"
array-flatten "1.1.1"
- body-parser "1.20.1"
+ body-parser "1.20.2"
content-disposition "0.5.4"
content-type "~1.0.4"
- cookie "0.5.0"
+ cookie "0.6.0"
cookie-signature "1.0.6"
debug "2.6.9"
depd "2.0.0"
@@ -8775,6 +8798,16 @@ raw-body@2.5.1:
iconv-lite "0.4.24"
unpipe "1.0.0"
+raw-body@2.5.2:
+ version "2.5.2"
+ resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.2.tgz#99febd83b90e08975087e8f1f9419a149366b68a"
+ integrity sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==
+ dependencies:
+ bytes "3.1.2"
+ http-errors "2.0.0"
+ iconv-lite "0.4.24"
+ unpipe "1.0.0"
+
react-dates@21.5.1:
version "21.5.1"
resolved "https://registry.npmjs.org/react-dates/-/react-dates-21.5.1.tgz"
From fb23de42114622cce32effe93d16a88be49c055f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Thu, 28 Mar 2024 02:32:49 +0000
Subject: [PATCH 113/128] Bump newrelic from 9.7.1 to 9.8.0
Bumps [newrelic](https://github.com/newrelic/newrelic-python-agent) from 9.7.1 to 9.8.0.
- [Release notes](https://github.com/newrelic/newrelic-python-agent/releases)
- [Commits](https://github.com/newrelic/newrelic-python-agent/compare/v9.7.1...v9.8.0)
---
updated-dependencies:
- dependency-name: newrelic
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 60 ++++++++++++++++++++---------------------
2 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index de4730b35e1..6f71964e64a 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -5,7 +5,7 @@ Django==4.1.13
celery==5.3.6 # celery needed for data ingestion
cached-property==1.5.2 # needed for kombu with --require-hashes
simplejson==3.19.2 # import simplejson
-newrelic==9.7.1
+newrelic==9.8.0
certifi==2024.2.2
mysqlclient==2.2.4 # Required by Django
diff --git a/requirements/common.txt b/requirements/common.txt
index ab99635ec8d..98dc35e93eb 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -816,36 +816,36 @@ mysqlclient==2.2.4 \
--hash=sha256:d43987bb9626096a302ca6ddcdd81feaeca65ced1d5fe892a6a66b808326aa54 \
--hash=sha256:e1ebe3f41d152d7cb7c265349fdb7f1eca86ccb0ca24a90036cde48e00ceb2ab
# via -r requirements/common.in
-newrelic==9.7.1 \
- --hash=sha256:0798e85b738a24843da9aa0e4175b42441d9b10af6b17ee8de137cf83d5bb222 \
- --hash=sha256:08a062f6b0483de744b3085e70b88ccb7599ba4f242977bf1cbb602ed4385980 \
- --hash=sha256:0d85b7d08e7fe130951de1f2225e69c321ece620da18bbc4385905c72e0aa51b \
- --hash=sha256:102f6e8e65e6fa5044a0d433066a46ce5b382f96335576dfa16217c1855ebc2b \
- --hash=sha256:166e365a6334d6b591a6af91e07dd191f043fb10893474ad1b60ed0b99a78f4e \
- --hash=sha256:1c1d24be69d5316af7be99d6c87686d900e708bc421ca55977cb021fd29de0bd \
- --hash=sha256:28343d596de29b7b0adcbcd2b872a1657d85c2467482792d8190814faec46c80 \
- --hash=sha256:2a41340ce1d58bcc4dda39784d244e8a42c11b150665d8bec0527ea88bf02f53 \
- --hash=sha256:365ec3b3795f43a70895652dff4ece28c11ecf0337aabf8da762b746cfda4c2e \
- --hash=sha256:53273e8fbea3df48265b15ce3a5aee8e7950036a8463e973ef949d79072b5d74 \
- --hash=sha256:56a6b322b228ee0b3afbb59d686fad0c58b6b284fc6bb3227e7746ca0d458858 \
- --hash=sha256:5824e599b972b5931caa13f5f34eb60df4cf3c7048604d0efe34b9ad41923739 \
- --hash=sha256:6361b3be951e3520ea2b138ca56783b03f8a6c85085885fcf597d1ee28c59153 \
- --hash=sha256:647a5f4ff3514e7e2acbbc884319499b0ae90ec4ec93e83e7f41474cf8666e0e \
- --hash=sha256:783c560439a08715eb00c79be64cd9471ce843f07b194592d15978909a8c85ad \
- --hash=sha256:7c62934b8ae447bda4273a2dc4c3a65b0c7dc995af611b5003e75a34faa926f2 \
- --hash=sha256:7d7f79bd828ab56446b0be941a6acb0af287ad97fe4ac5052c59ad0518f5456d \
- --hash=sha256:82076bf4e84a1378ccd1c699e8890a8f469c3ebeec110ae5c4f03cfab25cd09b \
- --hash=sha256:8d61c374e4d698ee36eab80e33c485054514bd6f57c25cd8e2c9f0a40f159ebc \
- --hash=sha256:8dba147f13457bd22b4509bfa700ce12bfcb8294f8b9abd4c66d4e90f90cefc2 \
- --hash=sha256:8faecb1fce2a25201f5496ad96181933e60b4b833f99dc143a84d5d2494a46f6 \
- --hash=sha256:b4f741be5f34e17caa57c72924045776a865efd5f9deab6ebb3b7c4f1190273b \
- --hash=sha256:bbbd24bd0054a978b5d6c1be7794289c760b20d44fea526e3fb1078d060a6fe7 \
- --hash=sha256:d143adbc38df9a576a34220d18f564208ddf88f691a1aaaf7b78c7fc653f2428 \
- --hash=sha256:dfd572a79e1848b67307be25e15f0804b9e9fc30a0d669a0fad668f3678a8869 \
- --hash=sha256:e06c7991367e848c4f8e08e7197b0a71c145a8e32c5f92158ed64b4f6d5b4a22 \
- --hash=sha256:ed776ced21ebf57379cb38885485460ffd7df29cca9666197876d2045849b927 \
- --hash=sha256:feb83b708350947846fd898debb1538ab5e0458ff56627f01e2174e73c0fe079 \
- --hash=sha256:fed29c65100966472bef1603c479b3b60be47078810a0d1c407e5ee133e606d7
+newrelic==9.8.0 \
+ --hash=sha256:15ab0ff9c2526c73ad3538cb2451a651dc577369c049a379abedb946a3357a52 \
+ --hash=sha256:195640b93c3d8bc38fda5b8302313a98afc1e43dec6853355d59ba1a5441d5cb \
+ --hash=sha256:1bab7dbc54e08c7a20db455e9cd635cc2a0ac48f8cdcadf6b1b40c7c6a279b7a \
+ --hash=sha256:235d51008f2dfb63c783b5980e26214d71cdd22c8b89fe8b2640228ed2403e08 \
+ --hash=sha256:26f75e0bb749314a18e43aba54802e3753a08a446b326ebf6653f9ea2b66da63 \
+ --hash=sha256:294955819d2741fa36978a287698de7128bd18c9a6e9322b96b8c71967aa1c5d \
+ --hash=sha256:373ceaf8876019cbc8893c0d3eac979aab26a8476902e409937b34b5581510d1 \
+ --hash=sha256:3a4e0f3203fc983801b27a3f65a83323ee5108ba6f482bb3c82691d44223098a \
+ --hash=sha256:50ba95cfe20a0960911f6aa2c612e5b2e35e959d9ad43766eed8a2ea8377c606 \
+ --hash=sha256:5b917026043fac50e687c82cd9922759d849320bfd467daffee6392b7e874875 \
+ --hash=sha256:606e437b51bd6e41fc358d1c9895f0739bb3af7c5889180e05d56e1c2c3774a6 \
+ --hash=sha256:6a1e188aa29c8f8a9d12388778caab36b921a4b200475056df5895f7bd95fee0 \
+ --hash=sha256:6a78fa9a8938fc45c78e354818a4e9dd9be87c74df55ad38094afe1056d75488 \
+ --hash=sha256:6bcec1a613bb523278bf2356e207b882eee105f4226b06b62fc7e38e4d30189f \
+ --hash=sha256:6ecea5d54187aba8d911e7aaa0e3f7e8d332619d3837c90020c6fa41f03abe04 \
+ --hash=sha256:708dc11213cac17eaae2a0151a9c49febdbdeba0f20ca9e572b148ab77c5af97 \
+ --hash=sha256:75f2fc6260b4a049afa4229c20abfcbda3f6a0add79606fe7e0566af0b56b1b6 \
+ --hash=sha256:8978eb4a4f43af7f778b63251d4931519023ee1f188ff62a148e6f467ba925c5 \
+ --hash=sha256:8ff08e87f7706329a0b56996a49827135dfaa6e556c8ea11246af7085aea5d4d \
+ --hash=sha256:a1627e7ddcbb2f4c1b4157261188926e3da3db77be268c7306967cebc724aa92 \
+ --hash=sha256:a5d9e8f491c88ad2cb71f3d8b3de73540a497b4d2c2f0178573fabf0faf0676a \
+ --hash=sha256:ab58426f223d407354830d38adc00ca30e563cb629ba1deef20f02e8ae5a880a \
+ --hash=sha256:bd18c4b9b1e9cf3550ab19c384ec59a31e5f7832360d9d13a3de62fae171ce17 \
+ --hash=sha256:bffc9617cae1e3950c6eeb990691e0526217044f5a46a6f39b99d3459fb14430 \
+ --hash=sha256:d8968c1bbe2cb04bc0f07e56d3988dae22e535ee3ba585f6370384363f4b1dfb \
+ --hash=sha256:e4c0976af8c5d21bd331bff5b9ec780afcdb3a8bd8cbf1c4969d545b4fb2fa46 \
+ --hash=sha256:eb76abc5ef093b804c39c187241d71a7a708debd386484966f85b88fb2c79a63 \
+ --hash=sha256:f274ec466271f8c1ef76fdcf4cdf0a3dfe146aa696626e52bac452d432056de0 \
+ --hash=sha256:fab06501364befff11cb3e99426a2baba046e0c72e86b7a42c5319bd3a19d470
# via -r requirements/common.in
numpy==1.26.3 \
--hash=sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd \
From d1faced7a39ef963c5efbad50e5c73387299cffd Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 9 Apr 2024 02:19:41 +0000
Subject: [PATCH 114/128] Bump taskcluster from 61.0.0 to 64.2.4
Bumps [taskcluster](https://github.com/taskcluster/taskcluster) from 61.0.0 to 64.2.4.
- [Release notes](https://github.com/taskcluster/taskcluster/releases)
- [Changelog](https://github.com/taskcluster/taskcluster/blob/main/CHANGELOG.md)
- [Commits](https://github.com/taskcluster/taskcluster/compare/v61.0.0...v64.2.4)
---
updated-dependencies:
- dependency-name: taskcluster
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 6f71964e64a..1d4a2497a11 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -25,7 +25,7 @@ python-dateutil==2.9.0.post0
django-filter==23.5 # Listed in DEFAULT_FILTER_BACKENDS on settings.py
django-redis==5.4.0 # Listed in CACHES on settings.py
-taskcluster==61.0.0 # import taskcluster
+taskcluster==64.2.4 # import taskcluster
python-jose[pycryptodome]==3.3.0 # from jose import jwt
furl==2.1.3 # Imported as furl
diff --git a/requirements/common.txt b/requirements/common.txt
index 98dc35e93eb..701943c5f7c 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -1469,9 +1469,9 @@ tabulate==0.9.0 \
--hash=sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c \
--hash=sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f
# via mozci
-taskcluster==61.0.0 \
- --hash=sha256:69022458e59bf3228394184c811dfe9983ac72bfd8c40133dee849c3a1f73a4d \
- --hash=sha256:acf6b64a7cf5db7fca1f626c0d9526df298a5ea5450831695844836eebec009d
+taskcluster==64.2.4 \
+ --hash=sha256:3247b81ecee6a889efb2fbdb40cdb3cadf4c91c2739c3d4b04f3a532d7ebc5fb \
+ --hash=sha256:8f33dbd688ba3cd937884adee24bc782de207366449ca576fea20be296a6f1f6
# via
# -r requirements/common.in
# mozci
From 7b5fa21720d7efe6ef5adab43f0ab931472f896f Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Fri, 12 Jan 2024 22:28:10 +0100
Subject: [PATCH 115/128] Bug 1890656 - use new Treestatus on non-production
instances
---
tests/ui/job-view/SecondaryNavBar_test.jsx | 17 +++++----
treeherder/middleware.py | 2 +-
ui/helpers/constants.js | 42 ++++++++++++++++++++++
ui/job-view/headerbars/InfraMenu.jsx | 3 +-
ui/job-view/headerbars/WatchedRepo.jsx | 6 ++--
ui/models/treeStatus.js | 28 ++++++++++++---
6 files changed, 82 insertions(+), 16 deletions(-)
diff --git a/tests/ui/job-view/SecondaryNavBar_test.jsx b/tests/ui/job-view/SecondaryNavBar_test.jsx
index 390c725d78e..5e5799e06b7 100644
--- a/tests/ui/job-view/SecondaryNavBar_test.jsx
+++ b/tests/ui/job-view/SecondaryNavBar_test.jsx
@@ -18,14 +18,17 @@ const history = createBrowserHistory();
const router = { location: history.location };
beforeEach(() => {
- fetchMock.get('https://treestatus.mozilla-releng.net/trees/autoland', {
- result: {
- message_of_the_day: '',
- reason: '',
- status: 'open',
- tree: 'autoland',
+ fetchMock.get(
+ 'https://treestatus.dev.lando.nonprod.cloudops.mozgcp.net/trees/autoland',
+ {
+ result: {
+ message_of_the_day: '',
+ reason: '',
+ status: 'open',
+ tree: 'autoland',
+ },
},
- });
+ );
});
afterEach(() => {
diff --git a/treeherder/middleware.py b/treeherder/middleware.py
index 320228fdf95..bda3f927c24 100644
--- a/treeherder/middleware.py
+++ b/treeherder/middleware.py
@@ -18,7 +18,7 @@
"font-src 'self' https://fonts.gstatic.com",
# The `data:` is required for images that were inlined by webpack's url-loader (as an optimisation).
"img-src 'self' data:",
- "connect-src 'self' https://community-tc.services.mozilla.com https://firefox-ci-tc.services.mozilla.com https://*.taskcluster-artifacts.net https://taskcluster-artifacts.net https://treestatus.mozilla-releng.net https://bugzilla.mozilla.org https://auth.mozilla.auth0.com https://stage.taskcluster.nonprod.cloudops.mozgcp.net https://insights-api.newrelic.com https://prototype.treeherder.nonprod.cloudops.mozgcp.net https://treeherder.allizom.org",
+ "connect-src 'self' https://community-tc.services.mozilla.com https://firefox-ci-tc.services.mozilla.com https://*.taskcluster-artifacts.net https://taskcluster-artifacts.net https://treestatus.mozilla-releng.net https://treestatus.dev.lando.nonprod.cloudops.mozgcp.net https://bugzilla.mozilla.org https://auth.mozilla.auth0.com https://stage.taskcluster.nonprod.cloudops.mozgcp.net https://insights-api.newrelic.com https://prototype.treeherder.nonprod.cloudops.mozgcp.net https://treeherder.allizom.org",
# Required since auth0-js performs session renewals in an iframe.
"frame-src 'self' https://auth.mozilla.auth0.com",
]
diff --git a/ui/helpers/constants.js b/ui/helpers/constants.js
index 21124220b32..29f2853402f 100644
--- a/ui/helpers/constants.js
+++ b/ui/helpers/constants.js
@@ -1,6 +1,48 @@
import treeFavicon from '../img/tree_open.png';
import closedTreeFavicon from '../img/tree_closed.png';
+export const thHosts = {
+ production: {
+ host: 'treeherder.mozilla.org',
+ treestatus: {
+ uiUrl: 'https://treestatus.mozilla-releng.net/static/ui/treestatus/',
+ apiUrl: 'https://treestatus.mozilla-releng.net/',
+ },
+ },
+ stage: {
+ host: 'treeherder.allizom.org',
+ treestatus: {
+ uiUrl: 'https://ui.dev.lando.nonprod.cloudops.mozgcp.net/treestatus/',
+ apiUrl: 'https://treestatus.dev.lando.nonprod.cloudops.mozgcp.net/',
+ },
+ },
+ prototype: {
+ host: 'prototype.treeherder.nonprod.cloudops.mozgcp.net',
+ treestatus: {
+ uiUrl: 'https://ui.dev.lando.nonprod.cloudops.mozgcp.net/treestatus/',
+ apiUrl: 'https://treestatus.dev.lando.nonprod.cloudops.mozgcp.net/',
+ },
+ },
+ localhost: {
+ host: 'localhost',
+ treestatus: {
+ uiUrl: 'https://ui.dev.lando.nonprod.cloudops.mozgcp.net/treestatus/',
+ apiUrl: 'https://treestatus.dev.lando.nonprod.cloudops.mozgcp.net/',
+ },
+ },
+ default: {
+ host: null,
+ treestatus: {
+ uiUrl: 'https://treestatus.mozilla-releng.net/static/ui/treestatus/',
+ apiUrl: 'https://treestatus.mozilla-releng.net/',
+ /*
+ uiUrl: 'https://ui.dev.lando.nonprod.cloudops.mozgcp.net/treestatus/',
+ apiUrl: 'https://treestatus.dev.lando.nonprod.cloudops.mozgcp.net/',
+ */
+ },
+ },
+};
+
// TODO: This file is a handy catch-all, but we could likely move some of these
// to a specific helper or into the classes that use them.
diff --git a/ui/job-view/headerbars/InfraMenu.jsx b/ui/job-view/headerbars/InfraMenu.jsx
index f5ac64b5650..ebf97be7622 100644
--- a/ui/job-view/headerbars/InfraMenu.jsx
+++ b/ui/job-view/headerbars/InfraMenu.jsx
@@ -7,6 +7,7 @@ import {
} from 'reactstrap';
import { prodFirefoxRootUrl } from '../../taskcluster-auth-callback/constants';
+import { treeStatusUiUrl } from '../../models/treeStatus';
const InfraMenu = () => (
@@ -26,7 +27,7 @@ const InfraMenu = () => (
Taskcluster Workers
diff --git a/ui/job-view/headerbars/WatchedRepo.jsx b/ui/job-view/headerbars/WatchedRepo.jsx
index 755c95072e1..79882afc931 100644
--- a/ui/job-view/headerbars/WatchedRepo.jsx
+++ b/ui/job-view/headerbars/WatchedRepo.jsx
@@ -19,7 +19,7 @@ import {
} from 'reactstrap';
import { Link } from 'react-router-dom';
-import TreeStatusModel from '../../models/treeStatus';
+import TreeStatusModel, { treeStatusUiUrl } from '../../models/treeStatus';
import BugLinkify from '../../shared/BugLinkify';
import { updateRepoParams } from '../../helpers/location';
@@ -171,7 +171,7 @@ export default class WatchedRepo extends React.Component {
@@ -199,7 +199,7 @@ export default class WatchedRepo extends React.Component {
)}
diff --git a/ui/models/treeStatus.js b/ui/models/treeStatus.js
index 57a6764944e..cd7fd93dfb1 100644
--- a/ui/models/treeStatus.js
+++ b/ui/models/treeStatus.js
@@ -1,8 +1,29 @@
-const uri = 'https://treestatus.mozilla-releng.net/trees/';
+import { thHosts } from '../helpers/constants';
+
+let _treeStatusApiUrl;
+let _treeStatusUiUrl;
+for (const [hostPrettyName, config] of Object.entries(thHosts)) {
+ if (config.host === window.location.hostname) {
+ _treeStatusApiUrl = thHosts[hostPrettyName].treestatus.apiUrl;
+ _treeStatusUiUrl = thHosts[hostPrettyName].treestatus.uiUrl;
+ }
+}
+if (_treeStatusApiUrl === undefined) {
+ _treeStatusApiUrl = thHosts.default.treestatus.apiUrl;
+}
+if (_treeStatusUiUrl === undefined) {
+ _treeStatusUiUrl = thHosts.default.treestatus.uiUrl;
+}
+
+export function treeStatusUiUrl() {
+ return _treeStatusUiUrl;
+}
+
+const apiUrl = `${_treeStatusApiUrl}trees/`;
export default class TreeStatusModel {
static get(repoName) {
- return fetch(`${uri}${repoName}`)
+ return fetch(`${apiUrl}${repoName}`)
.then(async (resp) => {
if (resp.ok) {
return resp.json();
@@ -24,8 +45,7 @@ export default class TreeStatusModel {
Promise.resolve({
result: {
status: 'error',
- message_of_the_day:
- 'Unable to connect to the https://treestatus.mozilla-releng.net/trees/ API',
+ message_of_the_day: `Unable to connect to the ${apiUrl} API`,
reason: reason.toString(),
tree: repoName,
},
From 0bf73311fd9ce68a52e9432901cb2d74439ad603 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 9 Apr 2024 22:33:57 +0000
Subject: [PATCH 116/128] Bump tar from 6.1.13 to 6.2.1
Bumps [tar](https://github.com/isaacs/node-tar) from 6.1.13 to 6.2.1.
- [Release notes](https://github.com/isaacs/node-tar/releases)
- [Changelog](https://github.com/isaacs/node-tar/blob/main/CHANGELOG.md)
- [Commits](https://github.com/isaacs/node-tar/compare/v6.1.13...v6.2.1)
---
updated-dependencies:
- dependency-name: tar
dependency-type: indirect
...
Signed-off-by: dependabot[bot]
---
yarn.lock | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/yarn.lock b/yarn.lock
index d65e7f5b15f..cbfbfc1ef1d 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -7696,6 +7696,11 @@ minipass@^4.0.0:
dependencies:
yallist "^4.0.0"
+minipass@^5.0.0:
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/minipass/-/minipass-5.0.0.tgz#3e9788ffb90b694a5d0ec94479a45b5d8738133d"
+ integrity sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==
+
"minipass@^5.0.0 || ^6.0.2":
version "6.0.2"
resolved "https://registry.yarnpkg.com/minipass/-/minipass-6.0.2.tgz#542844b6c4ce95b202c0995b0a471f1229de4c81"
@@ -10201,13 +10206,13 @@ tar-stream@^3.1.5:
streamx "^2.15.0"
tar@^6.1.11:
- version "6.1.13"
- resolved "https://registry.yarnpkg.com/tar/-/tar-6.1.13.tgz#46e22529000f612180601a6fe0680e7da508847b"
- integrity sha512-jdIBIN6LTIe2jqzay/2vtYLlBHa3JF42ot3h1dW8Q0PaAG4v8rm0cvpVePtau5C6OKXGGcgO9q2AMNSWxiLqKw==
+ version "6.2.1"
+ resolved "https://registry.yarnpkg.com/tar/-/tar-6.2.1.tgz#717549c541bc3c2af15751bea94b1dd068d4b03a"
+ integrity sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==
dependencies:
chownr "^2.0.0"
fs-minipass "^2.0.0"
- minipass "^4.0.0"
+ minipass "^5.0.0"
minizlib "^2.1.1"
mkdirp "^1.0.3"
yallist "^4.0.0"
From 81a3a0007f92cb33c697ccd08ec4cad1873d2c50 Mon Sep 17 00:00:00 2001
From: florinbilt <160469273+florinbilt@users.noreply.github.com>
Date: Thu, 11 Apr 2024 17:06:54 +0300
Subject: [PATCH 117/128] =?UTF-8?q?Change=20the=20message=20from=20"Copy?=
=?UTF-8?q?=20summary"=20to=20be=20similar=20to=20the=20comment=200?=
=?UTF-8?q?=E2=80=A6=20(#7998)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* Change the message from "Copy summary" to be similar to the comment 0 when filing a new regression bug
* Added specific messages for regression and improvement.
---
ui/perfherder/alerts/StatusDropdown.jsx | 27 ++++++++++++++++++++++---
1 file changed, 24 insertions(+), 3 deletions(-)
diff --git a/ui/perfherder/alerts/StatusDropdown.jsx b/ui/perfherder/alerts/StatusDropdown.jsx
index 1a2d4acc980..a267f391051 100644
--- a/ui/perfherder/alerts/StatusDropdown.jsx
+++ b/ui/perfherder/alerts/StatusDropdown.jsx
@@ -161,18 +161,39 @@ export default class StatusDropdown extends React.Component {
};
copySummary = async () => {
- const { filteredAlerts, alertSummary, frameworks } = this.props;
+ const { alertSummary, repoModel, filteredAlerts, frameworks } = this.props;
const { browsertimeAlertsExtraData } = this.state;
const textualSummary = new TextualSummary(
frameworks,
filteredAlerts,
alertSummary,
- true,
+ null,
await browsertimeAlertsExtraData.enrichAndRetrieveAlerts(),
);
+
+ const templateArgs = {
+ bugType: 'defect',
+ framework: getFrameworkName(frameworks, alertSummary.framework),
+ revision: alertSummary.revision,
+ revisionHref: repoModel.getPushLogHref(alertSummary.revision),
+ alertHref: `${window.location.origin}/perfherder/alerts?id=${alertSummary.id}`,
+ alertSummary: textualSummary.markdown,
+ alertSummaryId: alertSummary.id,
+ };
+ const containsRegression = textualSummary.alerts.some(
+ (item) => item.is_regression === true,
+ );
+ const templateText = containsRegression
+ ? 'Perfherder has detected a {{ framework }} performance change from push [{{ revision }}]({{ revisionHref }}).\n\n{{ alertSummary }}\n\nAs author of one of the patches included in that push, we need your help to address this regression.\nDetails of the alert can be found in the [alert summary]({{ alertHref }}), including links to graphs and comparisons for each of the affected tests. Please follow our [guide to handling regression bugs](https://wiki.mozilla.org/TestEngineering/Performance/Handling_regression_bugs) and **let us know your plans within 3 business days, or the patch(es) may be backed out** in accordance with our [regression policy](https://www.mozilla.org/en-US/about/governance/policies/regressions/).\n\nIf you need the profiling jobs you can trigger them yourself from treeherder job view or ask a sheriff to do that for you.\n\nYou can run these tests on try with `./mach try perf --alert {{ alertSummaryId }}`\n\nFor more information on performance sheriffing please see our [FAQ](https://wiki.mozilla.org/TestEngineering/Performance/FAQ).\n'
+ : 'Perfherder has detected a {{ framework }} performance change from push [{{ revision }}]({{ revisionHref }}).\n\n{{ alertSummary }}\n\nDetails of the alert can be found in the [alert summary]({{ alertHref }}), including links to graphs and comparisons for each of the affected tests.\n\nIf you need the profiling jobs you can trigger them yourself from treeherder job view or ask a sheriff to do that for you.\n\nYou can run these tests on try with `./mach try perf --alert {{ alertSummaryId }}`\n\nFor more information on performance sheriffing please see our [FAQ](https://wiki.mozilla.org/TestEngineering/Performance/FAQ).\n';
+
+ templateSettings.interpolate = /{{([\s\S]+?)}}/g;
+ const fillTemplate = template(templateText);
+ const commentText = fillTemplate(templateArgs);
+
// can't access the clipboardData on event unless it's done from react's
// onCopy, onCut or onPaste props so using this workaround
- navigator.clipboard.writeText(textualSummary.markdown).then(() => {});
+ navigator.clipboard.writeText(commentText).then(() => {});
};
toggle = (state) => {
From 49f69992a4aca259933f8d0a689e278888f84c7e Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Thu, 11 Apr 2024 13:37:19 +0200
Subject: [PATCH 118/128] Bug 1890981 - omit process ID of asserting process in
failure tab and for matching against known issues
---
tests/model/test_error_summary.py | 12 ++++++++++++
treeherder/model/error_summary.py | 8 +++++---
2 files changed, 17 insertions(+), 3 deletions(-)
diff --git a/tests/model/test_error_summary.py b/tests/model/test_error_summary.py
index ca2e66ffc00..116c5e4e63d 100644
--- a/tests/model/test_error_summary.py
+++ b/tests/model/test_error_summary.py
@@ -19,6 +19,18 @@
"/builds/worker/checkouts/gecko/xpcom/threads/TimerThread.cpp:434"
),
),
+ (
+ (
+ "17:22:43 INFO - PID 2944 | [6132] Assertion failure: XRE_IsGPUProcess()"
+ " || gfxPlatform::GetPlatform()->DevicesInitialized(),"
+ " at /builds/worker/checkouts/gecko/gfx/thebes/DeviceManagerDx.cpp:1320"
+ ),
+ (
+ "Assertion failure: XRE_IsGPUProcess()"
+ " || gfxPlatform::GetPlatform()->DevicesInitialized(),"
+ " at /builds/worker/checkouts/gecko/gfx/thebes/DeviceManagerDx.cpp:1320"
+ ),
+ ),
)
diff --git a/treeherder/model/error_summary.py b/treeherder/model/error_summary.py
index d174fda4531..ac4d7abb579 100644
--- a/treeherder/model/error_summary.py
+++ b/treeherder/model/error_summary.py
@@ -20,7 +20,8 @@
CRASH_RE = re.compile(r".+ application crashed \[@ (.+)\] \|.+")
MOZHARNESS_RE = re.compile(r"^\d+:\d+:\d+[ ]+(?:DEBUG|INFO|WARNING|ERROR|CRITICAL|FATAL) - [ ]?")
MARIONETTE_RE = re.compile(r".+marionette([_harness/]?).*/test_.+.py ([A-Za-z]+).+")
-PROCESS_ID_RE = re.compile(r"(?:PID \d+|GECKO\(\d+\)) \| +")
+PROCESS_ID_RE_1 = re.compile(r"(?:PID \d+|GECKO\(\d+\)) \| +")
+PROCESS_ID_RE_2 = re.compile(r"^\[\d+\] +")
REFTEST_RE = re.compile(r"\s+[=!]=\s+.*")
PREFIX_PATTERN = r"^(TEST-UNEXPECTED-\S+|PROCESS-CRASH)\s+\|\s+"
@@ -196,9 +197,10 @@ def bug_suggestions_line(
def get_cleaned_line(line):
- """Strip possible mozharness bits from the given line."""
+ """Strip possible unwanted information from the given line."""
line_to_clean = MOZHARNESS_RE.sub("", line).strip()
- return PROCESS_ID_RE.sub("", line_to_clean)
+ line_to_clean = PROCESS_ID_RE_1.sub("", line_to_clean)
+ return PROCESS_ID_RE_2.sub("", line_to_clean)
def cache_clean_error_line(line):
From 97660c542451bb7a3a0d683b508928e975a15655 Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 15 Apr 2024 21:23:12 +0000
Subject: [PATCH 119/128] Update codecov orb to v4
---
.circleci/config.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 5fd7ddf2602..85bb1629bce 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -151,7 +151,7 @@ jobs:
orbs:
node: circleci/node@4.1.1
docker: circleci/docker@1.5.0
- codecov: codecov/codecov@3.2.5
+ codecov: codecov/codecov@4.1.0
version: 2.1
workflows:
run-tests:
From 4572baa6e30219896e0592df0776d388c1b7bd0c Mon Sep 17 00:00:00 2001
From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com>
Date: Mon, 15 Apr 2024 01:28:54 +0000
Subject: [PATCH 120/128] Update dependency @mozilla/glean to v5
---
package.json | 2 +-
yarn.lock | 14 ++++----------
2 files changed, 5 insertions(+), 11 deletions(-)
diff --git a/package.json b/package.json
index bb1eed9b511..9a1c0762173 100644
--- a/package.json
+++ b/package.json
@@ -16,7 +16,7 @@
"@fortawesome/free-regular-svg-icons": "6.2.1",
"@fortawesome/free-solid-svg-icons": "6.2.1",
"@fortawesome/react-fontawesome": "0.1.19",
- "@mozilla/glean": "2.0.5",
+ "@mozilla/glean": "5.0.0",
"@types/prop-types": "*",
"@types/react": "*",
"@types/react-dom": "*",
diff --git a/yarn.lock b/yarn.lock
index cbfbfc1ef1d..55e833ec5af 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1833,13 +1833,12 @@
dependencies:
"@types/whatwg-streams" "^0.0.7"
-"@mozilla/glean@2.0.5":
- version "2.0.5"
- resolved "https://registry.yarnpkg.com/@mozilla/glean/-/glean-2.0.5.tgz#20679c244c10710a54b98a72e0b20773932a3bdd"
- integrity sha512-9OKK+bUuhfIrDOt5CK/mXQdZ76uSjX68H25JlX0yXBw0b8k+Ft1vdA7ToTjlL4vkgrOymhPLfwMCmEsc1/kX5Q==
+"@mozilla/glean@5.0.0":
+ version "5.0.0"
+ resolved "https://registry.yarnpkg.com/@mozilla/glean/-/glean-5.0.0.tgz#ded7afd60ce7b3a63714545f1b410eb129c1d922"
+ integrity sha512-DstT6PI8QTixlULf7Is277sEmpX61Dz+z/7rtxQBBFwGaPsvWJJsMsnNysNtSzSESIlcXoPbdPZ9RoXOcHuSlA==
dependencies:
fflate "^0.8.0"
- jose "^4.0.4"
tslib "^2.3.1"
uuid "^9.0.0"
@@ -7045,11 +7044,6 @@ joi@^17.11.0:
"@sideway/formula" "^3.0.1"
"@sideway/pinpoint" "^2.0.0"
-jose@^4.0.4:
- version "4.15.5"
- resolved "https://registry.yarnpkg.com/jose/-/jose-4.15.5.tgz#6475d0f467ecd3c630a1b5dadd2735a7288df706"
- integrity sha512-jc7BFxgKPKi94uOvEmzlSWFFe2+vASyXaKUpdQKatWAESU2MWjDfFf0fdfc83CDKcA5QecabZeNLyfhe3yKNkg==
-
js-cookie@3.0.5:
version "3.0.5"
resolved "https://registry.yarnpkg.com/js-cookie/-/js-cookie-3.0.5.tgz#0b7e2fd0c01552c58ba86e0841f94dc2557dcdbc"
From 0a4acf5f043933bd2cc3b6c7edeb3118087102f4 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 15 Apr 2024 02:51:41 +0000
Subject: [PATCH 121/128] Bump black from 24.3.0 to 24.4.0
Bumps [black](https://github.com/psf/black) from 24.3.0 to 24.4.0.
- [Release notes](https://github.com/psf/black/releases)
- [Changelog](https://github.com/psf/black/blob/main/CHANGES.md)
- [Commits](https://github.com/psf/black/compare/24.3.0...24.4.0)
---
updated-dependencies:
- dependency-name: black
dependency-type: direct:development
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/dev.in | 2 +-
requirements/dev.txt | 46 ++++++++++++++++++++++----------------------
2 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/requirements/dev.in b/requirements/dev.in
index 63cbeccdc23..3842f3d2eab 100644
--- a/requirements/dev.in
+++ b/requirements/dev.in
@@ -16,7 +16,7 @@ pytest-watch==4.2.0
# Required by django-extension's runserver_plus command.
pytest-django==4.8.0
pytest==8.1.1
-black==24.3.0
+black==24.4.0
shellcheck-py==0.10.0.1
# To test async code
diff --git a/requirements/dev.txt b/requirements/dev.txt
index e9dd5d7c518..ae2fe03a533 100644
--- a/requirements/dev.txt
+++ b/requirements/dev.txt
@@ -24,29 +24,29 @@ betamax-serializers==0.2.1 \
--hash=sha256:1b23c46429c40a8873682854c88d805c787c72d252f3fa0c858e9c300682ceac \
--hash=sha256:345c419b1b73171f2951c62ac3c701775ac4b76e13e86464ebf0ff2a978e4949
# via -r requirements/dev.in
-black==24.3.0 \
- --hash=sha256:2818cf72dfd5d289e48f37ccfa08b460bf469e67fb7c4abb07edc2e9f16fb63f \
- --hash=sha256:41622020d7120e01d377f74249e677039d20e6344ff5851de8a10f11f513bf93 \
- --hash=sha256:4acf672def7eb1725f41f38bf6bf425c8237248bb0804faa3965c036f7672d11 \
- --hash=sha256:4be5bb28e090456adfc1255e03967fb67ca846a03be7aadf6249096100ee32d0 \
- --hash=sha256:4f1373a7808a8f135b774039f61d59e4be7eb56b2513d3d2f02a8b9365b8a8a9 \
- --hash=sha256:56f52cfbd3dabe2798d76dbdd299faa046a901041faf2cf33288bc4e6dae57b5 \
- --hash=sha256:65b76c275e4c1c5ce6e9870911384bff5ca31ab63d19c76811cb1fb162678213 \
- --hash=sha256:65c02e4ea2ae09d16314d30912a58ada9a5c4fdfedf9512d23326128ac08ac3d \
- --hash=sha256:6905238a754ceb7788a73f02b45637d820b2f5478b20fec82ea865e4f5d4d9f7 \
- --hash=sha256:79dcf34b33e38ed1b17434693763301d7ccbd1c5860674a8f871bd15139e7837 \
- --hash=sha256:7bb041dca0d784697af4646d3b62ba4a6b028276ae878e53f6b4f74ddd6db99f \
- --hash=sha256:7d5e026f8da0322b5662fa7a8e752b3fa2dac1c1cbc213c3d7ff9bdd0ab12395 \
- --hash=sha256:9f50ea1132e2189d8dff0115ab75b65590a3e97de1e143795adb4ce317934995 \
- --hash=sha256:a0c9c4a0771afc6919578cec71ce82a3e31e054904e7197deacbc9382671c41f \
- --hash=sha256:aadf7a02d947936ee418777e0247ea114f78aff0d0959461057cae8a04f20597 \
- --hash=sha256:b5991d523eee14756f3c8d5df5231550ae8993e2286b8014e2fdea7156ed0959 \
- --hash=sha256:bf21b7b230718a5f08bd32d5e4f1db7fc8788345c8aea1d155fc17852b3410f5 \
- --hash=sha256:c45f8dff244b3c431b36e3224b6be4a127c6aca780853574c00faf99258041eb \
- --hash=sha256:c7ed6668cbbfcd231fa0dc1b137d3e40c04c7f786e626b405c62bcd5db5857e4 \
- --hash=sha256:d7de8d330763c66663661a1ffd432274a2f92f07feeddd89ffd085b5744f85e7 \
- --hash=sha256:e19cb1c6365fd6dc38a6eae2dcb691d7d83935c10215aef8e6c38edee3f77abd \
- --hash=sha256:e2af80566f43c85f5797365077fb64a393861a3730bd110971ab7a0c94e873e7
+black==24.4.0 \
+ --hash=sha256:1bb9ca06e556a09f7f7177bc7cb604e5ed2d2df1e9119e4f7d2f1f7071c32e5d \
+ --hash=sha256:21f9407063ec71c5580b8ad975653c66508d6a9f57bd008bb8691d273705adcd \
+ --hash=sha256:4396ca365a4310beef84d446ca5016f671b10f07abdba3e4e4304218d2c71d33 \
+ --hash=sha256:44d99dfdf37a2a00a6f7a8dcbd19edf361d056ee51093b2445de7ca09adac965 \
+ --hash=sha256:5cd5b4f76056cecce3e69b0d4c228326d2595f506797f40b9233424e2524c070 \
+ --hash=sha256:64578cf99b6b46a6301bc28bdb89f9d6f9b592b1c5837818a177c98525dbe397 \
+ --hash=sha256:64e60a7edd71fd542a10a9643bf369bfd2644de95ec71e86790b063aa02ff745 \
+ --hash=sha256:652e55bb722ca026299eb74e53880ee2315b181dfdd44dca98e43448620ddec1 \
+ --hash=sha256:6644f97a7ef6f401a150cca551a1ff97e03c25d8519ee0bbc9b0058772882665 \
+ --hash=sha256:6ad001a9ddd9b8dfd1b434d566be39b1cd502802c8d38bbb1ba612afda2ef436 \
+ --hash=sha256:71d998b73c957444fb7c52096c3843875f4b6b47a54972598741fe9a7f737fcb \
+ --hash=sha256:74eb9b5420e26b42c00a3ff470dc0cd144b80a766128b1771d07643165e08d0e \
+ --hash=sha256:75a2d0b4f5eb81f7eebc31f788f9830a6ce10a68c91fbe0fade34fff7a2836e6 \
+ --hash=sha256:7852b05d02b5b9a8c893ab95863ef8986e4dda29af80bbbda94d7aee1abf8702 \
+ --hash=sha256:7f2966b9b2b3b7104fca9d75b2ee856fe3fdd7ed9e47c753a4bb1a675f2caab8 \
+ --hash=sha256:8e5537f456a22cf5cfcb2707803431d2feeb82ab3748ade280d6ccd0b40ed2e8 \
+ --hash=sha256:d4e71cdebdc8efeb6deaf5f2deb28325f8614d48426bed118ecc2dcaefb9ebf3 \
+ --hash=sha256:dae79397f367ac8d7adb6c779813328f6d690943f64b32983e896bcccd18cbad \
+ --hash=sha256:e3a3a092b8b756c643fe45f4624dbd5a389f770a4ac294cf4d0fce6af86addaf \
+ --hash=sha256:eb949f56a63c5e134dfdca12091e98ffb5fd446293ebae123d10fc1abad00b9e \
+ --hash=sha256:f07b69fda20578367eaebbd670ff8fc653ab181e1ff95d84497f9fa20e7d0641 \
+ --hash=sha256:f95cece33329dc4aa3b0e1a771c41075812e46cf3d6e3f1dfe3d91ff09826ed2
# via -r requirements/dev.in
build==1.0.3 \
--hash=sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b \
From 915f8dec3ec6e2afbb57936ee09a7766e8a65c83 Mon Sep 17 00:00:00 2001
From: florinbilt <160469273+florinbilt@users.noreply.github.com>
Date: Tue, 16 Apr 2024 23:20:55 +0300
Subject: [PATCH 122/128] Improve the assign input on Alerts View. (#8002)
---
tests/ui/perfherder/alerts-view/alerts_test.jsx | 8 ++++----
ui/perfherder/alerts/Assignee.jsx | 14 +++++++++-----
2 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/tests/ui/perfherder/alerts-view/alerts_test.jsx b/tests/ui/perfherder/alerts-view/alerts_test.jsx
index 48a43b9030b..19bede40e3a 100644
--- a/tests/ui/perfherder/alerts-view/alerts_test.jsx
+++ b/tests/ui/perfherder/alerts-view/alerts_test.jsx
@@ -516,10 +516,10 @@ test('setting an assignee on an already assigned summary is possible', async ()
fireEvent.click(unassignedBadge);
const inputField = await waitFor(() =>
- getByDisplayValue('mozilla-ldap/test_user@mozilla.com'),
+ getByDisplayValue('test_user@mozilla.com'),
);
fireEvent.change(inputField, {
- target: { value: 'mozilla-ldap/test_another_user@mozilla.com' },
+ target: { value: 'test_another_user@mozilla.com' },
});
// pressing 'Enter' has some issues on react-testing-library;
// found workaround on https://github.com/testing-library/react-testing-library/issues/269
@@ -536,7 +536,7 @@ test("'Escape' from partially editted assignee does not update original assignee
fireEvent.click(unassignedBadge);
const inputField = await waitFor(() =>
- getByDisplayValue('mozilla-ldap/test_user@mozilla.com'),
+ getByDisplayValue('test_user@mozilla.com'),
);
fireEvent.change(inputField, {
target: { value: 'mozilla-ldap/test_another_' },
@@ -559,7 +559,7 @@ test("Clicking on 'Take' prefills with logged in user", async () => {
fireEvent.click(takeButton);
// ensure it preffiled input field
- await waitFor(() => getByDisplayValue('mozilla-ldap/test_user@mozilla.com'));
+ await waitFor(() => getByDisplayValue('test_user@mozilla.com'));
});
test('Alerts retriggered by the backfill bot have a title', async () => {
diff --git a/ui/perfherder/alerts/Assignee.jsx b/ui/perfherder/alerts/Assignee.jsx
index f4046bf7faa..fdc3e7e3d92 100644
--- a/ui/perfherder/alerts/Assignee.jsx
+++ b/ui/perfherder/alerts/Assignee.jsx
@@ -33,7 +33,9 @@ export default class Assignee extends React.Component {
inEditMode: true,
// input prefills with this field, so
// we must have it prepared
- newAssigneeUsername: assigneeUsername,
+ newAssigneeUsername: assigneeUsername
+ ? assigneeUsername.split('/')[1]
+ : assigneeUsername,
});
}
};
@@ -43,11 +45,13 @@ export default class Assignee extends React.Component {
};
pressedEnter = async (event) => {
- event.preventDefault();
if (event.key === 'Enter') {
+ event.preventDefault();
const { updateAssignee } = this.props;
- const newAssigneeUsername = event.target.value;
-
+ const newAssigneeUsername =
+ event.target.value !== ''
+ ? `mozilla-ldap/${event.target.value}`
+ : event.target.value;
const { failureStatus } = await updateAssignee(newAssigneeUsername);
if (!failureStatus) {
@@ -63,7 +67,7 @@ export default class Assignee extends React.Component {
const { user } = this.props;
this.setState({
- newAssigneeUsername: user.username,
+ newAssigneeUsername: user.username.split('/')[1],
inEditMode: true,
});
};
From 98805d7cd6b237394751f107ce9d7dd7277d8192 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 02:33:37 +0000
Subject: [PATCH 123/128] Bump newrelic from 9.8.0 to 9.9.0
Bumps [newrelic](https://github.com/newrelic/newrelic-python-agent) from 9.8.0 to 9.9.0.
- [Release notes](https://github.com/newrelic/newrelic-python-agent/releases)
- [Commits](https://github.com/newrelic/newrelic-python-agent/compare/v9.8.0...v9.9.0)
---
updated-dependencies:
- dependency-name: newrelic
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 60 ++++++++++++++++++++---------------------
2 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 1d4a2497a11..81cc034e76a 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -5,7 +5,7 @@ Django==4.1.13
celery==5.3.6 # celery needed for data ingestion
cached-property==1.5.2 # needed for kombu with --require-hashes
simplejson==3.19.2 # import simplejson
-newrelic==9.8.0
+newrelic==9.9.0
certifi==2024.2.2
mysqlclient==2.2.4 # Required by Django
diff --git a/requirements/common.txt b/requirements/common.txt
index 701943c5f7c..8eea783b78b 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -816,36 +816,36 @@ mysqlclient==2.2.4 \
--hash=sha256:d43987bb9626096a302ca6ddcdd81feaeca65ced1d5fe892a6a66b808326aa54 \
--hash=sha256:e1ebe3f41d152d7cb7c265349fdb7f1eca86ccb0ca24a90036cde48e00ceb2ab
# via -r requirements/common.in
-newrelic==9.8.0 \
- --hash=sha256:15ab0ff9c2526c73ad3538cb2451a651dc577369c049a379abedb946a3357a52 \
- --hash=sha256:195640b93c3d8bc38fda5b8302313a98afc1e43dec6853355d59ba1a5441d5cb \
- --hash=sha256:1bab7dbc54e08c7a20db455e9cd635cc2a0ac48f8cdcadf6b1b40c7c6a279b7a \
- --hash=sha256:235d51008f2dfb63c783b5980e26214d71cdd22c8b89fe8b2640228ed2403e08 \
- --hash=sha256:26f75e0bb749314a18e43aba54802e3753a08a446b326ebf6653f9ea2b66da63 \
- --hash=sha256:294955819d2741fa36978a287698de7128bd18c9a6e9322b96b8c71967aa1c5d \
- --hash=sha256:373ceaf8876019cbc8893c0d3eac979aab26a8476902e409937b34b5581510d1 \
- --hash=sha256:3a4e0f3203fc983801b27a3f65a83323ee5108ba6f482bb3c82691d44223098a \
- --hash=sha256:50ba95cfe20a0960911f6aa2c612e5b2e35e959d9ad43766eed8a2ea8377c606 \
- --hash=sha256:5b917026043fac50e687c82cd9922759d849320bfd467daffee6392b7e874875 \
- --hash=sha256:606e437b51bd6e41fc358d1c9895f0739bb3af7c5889180e05d56e1c2c3774a6 \
- --hash=sha256:6a1e188aa29c8f8a9d12388778caab36b921a4b200475056df5895f7bd95fee0 \
- --hash=sha256:6a78fa9a8938fc45c78e354818a4e9dd9be87c74df55ad38094afe1056d75488 \
- --hash=sha256:6bcec1a613bb523278bf2356e207b882eee105f4226b06b62fc7e38e4d30189f \
- --hash=sha256:6ecea5d54187aba8d911e7aaa0e3f7e8d332619d3837c90020c6fa41f03abe04 \
- --hash=sha256:708dc11213cac17eaae2a0151a9c49febdbdeba0f20ca9e572b148ab77c5af97 \
- --hash=sha256:75f2fc6260b4a049afa4229c20abfcbda3f6a0add79606fe7e0566af0b56b1b6 \
- --hash=sha256:8978eb4a4f43af7f778b63251d4931519023ee1f188ff62a148e6f467ba925c5 \
- --hash=sha256:8ff08e87f7706329a0b56996a49827135dfaa6e556c8ea11246af7085aea5d4d \
- --hash=sha256:a1627e7ddcbb2f4c1b4157261188926e3da3db77be268c7306967cebc724aa92 \
- --hash=sha256:a5d9e8f491c88ad2cb71f3d8b3de73540a497b4d2c2f0178573fabf0faf0676a \
- --hash=sha256:ab58426f223d407354830d38adc00ca30e563cb629ba1deef20f02e8ae5a880a \
- --hash=sha256:bd18c4b9b1e9cf3550ab19c384ec59a31e5f7832360d9d13a3de62fae171ce17 \
- --hash=sha256:bffc9617cae1e3950c6eeb990691e0526217044f5a46a6f39b99d3459fb14430 \
- --hash=sha256:d8968c1bbe2cb04bc0f07e56d3988dae22e535ee3ba585f6370384363f4b1dfb \
- --hash=sha256:e4c0976af8c5d21bd331bff5b9ec780afcdb3a8bd8cbf1c4969d545b4fb2fa46 \
- --hash=sha256:eb76abc5ef093b804c39c187241d71a7a708debd386484966f85b88fb2c79a63 \
- --hash=sha256:f274ec466271f8c1ef76fdcf4cdf0a3dfe146aa696626e52bac452d432056de0 \
- --hash=sha256:fab06501364befff11cb3e99426a2baba046e0c72e86b7a42c5319bd3a19d470
+newrelic==9.9.0 \
+ --hash=sha256:04cd3fc7087513a4786908a9b0a7475db154c888ac9d2de251f8abb93353a4a7 \
+ --hash=sha256:1743df0e72bf559b61112763a71c35e5d456a509ba4dde2bdbaa88d894f1812a \
+ --hash=sha256:2182673a01f04a0ed4a0bb3f49e8fa869044c37558c8f409c96de13105f58a57 \
+ --hash=sha256:26713f779cf23bb29c6b408436167059d0c8ee1475810dc1b0efe858fe578f25 \
+ --hash=sha256:2ffcbdb706de1bbaa36acd0c9b487a08895a420020bcf775be2d80c7df29b56c \
+ --hash=sha256:4356690cbc9e5e662defa2af15aba05901cf9b285a8d02aeb90718e84dd6d779 \
+ --hash=sha256:47efe8fc4dc14b0f265d635639f94ef5a071b5e5ebbf41ecf0946fce071c49e6 \
+ --hash=sha256:4cf5d85a4a8e8de6e0aeb7a76afad9264d0c0dc459bc3f1a8b02a0e48a9a26da \
+ --hash=sha256:57451807f600331a94ad1ec66e3981523b0516d5b2dd9fd078e7f3d6c9228913 \
+ --hash=sha256:5b40155f9712e75c00d03cdec8272f6cf8eaa05ea2ed22bb5ecc96ed86017b47 \
+ --hash=sha256:63b230dd5d093874c0137eddc738cb028e17326d2a8a98cbc12c665bbdf6ec67 \
+ --hash=sha256:834ce8de7550bc444aed6c2afc1436c04485998e46f429e41b89d66ab85f0fbb \
+ --hash=sha256:9dbf35914d0bbf1294d8eb6fa5357d072238c6c722726c2ee20b9c1e35b8253d \
+ --hash=sha256:a257995d832858cf7c56bcfb1911f3379f9d3e795d7357f56f035f1b60339ea0 \
+ --hash=sha256:a57ff176818037983589c15b6dca03841fcef1429c279f5948800caa333fb476 \
+ --hash=sha256:a91dea75f8c202a6a553339a1997983224465555a3f8d7294b24de1e2bee5f05 \
+ --hash=sha256:b60f66132a42ec8c67fd26b8082cc3a0626192283dc9b5716a66203a58f10d30 \
+ --hash=sha256:b64a61f2f228b70f91c06a0bd82e2645c6b75ddbd50587f94a67c89ef6d5d854 \
+ --hash=sha256:b773ee74d869bf632ce1e12903cc8e7ae8b5697ef9ae97169ed263a5d3a87f76 \
+ --hash=sha256:c4e12ead3602ca2c188528fde444f8ab953b504b095d70265303bbf132908eb7 \
+ --hash=sha256:cf3c13d264cd089d467e9848fb6875907940202d22475b506a70683f04ef82af \
+ --hash=sha256:d8304317ff27bb50fd94f1e6e8c3ae0c59151ee85de2ea0269dbe7e982512c45 \
+ --hash=sha256:dac3b74bd801513e8221f05a01a294405eda7f4922fce5b174e5e33c222ae09d \
+ --hash=sha256:db32fa04d69bbb742401c124a6cec158e6237a21af4602dbf53e4630ea9dd068 \
+ --hash=sha256:de2ac509f8730fc6f6819f13a9ebbe52865397d526ca4dbe963a0e9865bb0500 \
+ --hash=sha256:df6198259dae01212b39079add58e0ef7311cf01734adea51fec4d2f7a9fafec \
+ --hash=sha256:e6cb86aa2f7230ee9dcb5f9f8821c7090566419def5537a44240f978b680c4f7 \
+ --hash=sha256:f0d8c8f66aba3629f0f17a1d2314beb2984ad7c485dd318ef2d5f257c040981d \
+ --hash=sha256:f48898e268dcaa14aa1b6d5c8b8d10f3f4396589a37be10a06bb5ba262ef0541
# via -r requirements/common.in
numpy==1.26.3 \
--hash=sha256:02f98011ba4ab17f46f80f7f8f1c291ee7d855fcef0a5a98db80767a468c85cd \
From fecc63d83ecfc84b773b3a0621f112acd0bfba3f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Fri, 19 Apr 2024 12:41:38 +0000
Subject: [PATCH 124/128] Bump celery from 5.3.6 to 5.4.0
Bumps [celery](https://github.com/celery/celery) from 5.3.6 to 5.4.0.
- [Release notes](https://github.com/celery/celery/releases)
- [Changelog](https://github.com/celery/celery/blob/main/Changelog.rst)
- [Commits](https://github.com/celery/celery/compare/v5.3.6...v5.4.0)
---
updated-dependencies:
- dependency-name: celery
dependency-type: direct:production
update-type: version-update:semver-minor
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 81cc034e76a..0dd21f16c98 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -2,7 +2,7 @@
gunicorn==21.2.0
whitenoise[brotli]==6.6.0 # Used by Whitenoise to provide Brotli-compressed versions of static files.
Django==4.1.13
-celery==5.3.6 # celery needed for data ingestion
+celery==5.4.0 # celery needed for data ingestion
cached-property==1.5.2 # needed for kombu with --require-hashes
simplejson==3.19.2 # import simplejson
newrelic==9.9.0
diff --git a/requirements/common.txt b/requirements/common.txt
index 8eea783b78b..a3fb4ffee93 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -229,9 +229,9 @@ cachy==0.3.0 \
--hash=sha256:186581f4ceb42a0bbe040c407da73c14092379b1e4c0e327fdb72ae4a9b269b1 \
--hash=sha256:338ca09c8860e76b275aff52374330efedc4d5a5e45dc1c5b539c1ead0786fe7
# via mozci
-celery==5.3.6 \
- --hash=sha256:870cc71d737c0200c397290d730344cc991d13a057534353d124c9380267aab9 \
- --hash=sha256:9da4ea0118d232ce97dff5ed4974587fb1c0ff5c10042eb15278487cdd27d1af
+celery==5.4.0 \
+ --hash=sha256:369631eb580cf8c51a82721ec538684994f8277637edde2dfc0dacd73ed97f64 \
+ --hash=sha256:504a19140e8d3029d5acad88330c541d4c3f64c789d85f94756762d8bca7e706
# via -r requirements/common.in
certifi==2024.2.2 \
--hash=sha256:0569859f95fc761b18b45ef421b1290a0f65f147e92a1e5eb3e635f9a5e4e66f \
From f6ecd09fd319312c2ea2afaae974d9965a1be81f Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 17 Apr 2024 02:32:29 +0000
Subject: [PATCH 125/128] Bump mkdocs-material from 9.5.17 to 9.5.18
Bumps [mkdocs-material](https://github.com/squidfunk/mkdocs-material) from 9.5.17 to 9.5.18.
- [Release notes](https://github.com/squidfunk/mkdocs-material/releases)
- [Changelog](https://github.com/squidfunk/mkdocs-material/blob/master/CHANGELOG)
- [Commits](https://github.com/squidfunk/mkdocs-material/compare/9.5.17...9.5.18)
---
updated-dependencies:
- dependency-name: mkdocs-material
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index ddffefa1843..d804bac9e6f 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -6,7 +6,7 @@ description = "Defaut package, used for development or readthedocs"
[project.optional-dependencies]
docs = [
"mkdocs==1.5.3",
- "mkdocs-material==9.5.17",
+ "mkdocs-material==9.5.18",
"mdx_truly_sane_lists==1.3",
]
From 99aa314d4932e5b152351a32880bc7a4f645958e Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 17 Apr 2024 02:25:11 +0000
Subject: [PATCH 126/128] Bump taskcluster from 64.2.4 to 64.2.5
Bumps [taskcluster](https://github.com/taskcluster/taskcluster) from 64.2.4 to 64.2.5.
- [Release notes](https://github.com/taskcluster/taskcluster/releases)
- [Changelog](https://github.com/taskcluster/taskcluster/blob/main/CHANGELOG.md)
- [Commits](https://github.com/taskcluster/taskcluster/compare/v64.2.4...v64.2.5)
---
updated-dependencies:
- dependency-name: taskcluster
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 0dd21f16c98..9470d47edd5 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -25,7 +25,7 @@ python-dateutil==2.9.0.post0
django-filter==23.5 # Listed in DEFAULT_FILTER_BACKENDS on settings.py
django-redis==5.4.0 # Listed in CACHES on settings.py
-taskcluster==64.2.4 # import taskcluster
+taskcluster==64.2.5 # import taskcluster
python-jose[pycryptodome]==3.3.0 # from jose import jwt
furl==2.1.3 # Imported as furl
diff --git a/requirements/common.txt b/requirements/common.txt
index a3fb4ffee93..cbdc92860d3 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -1469,9 +1469,9 @@ tabulate==0.9.0 \
--hash=sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c \
--hash=sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f
# via mozci
-taskcluster==64.2.4 \
- --hash=sha256:3247b81ecee6a889efb2fbdb40cdb3cadf4c91c2739c3d4b04f3a532d7ebc5fb \
- --hash=sha256:8f33dbd688ba3cd937884adee24bc782de207366449ca576fea20be296a6f1f6
+taskcluster==64.2.5 \
+ --hash=sha256:0972813cef47a6afca14445e945a978cfcb30dad401510a333b22c00803230d2 \
+ --hash=sha256:cd419b0d2b3608b676bd07c415e2600a7bf0a316d92fc4d965bc652e937066a2
# via
# -r requirements/common.in
# mozci
From efbe61874702eea49dbfe05ebf20499b84809ce9 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 17 Apr 2024 02:23:37 +0000
Subject: [PATCH 127/128] Bump gunicorn from 21.2.0 to 22.0.0
Bumps [gunicorn](https://github.com/benoitc/gunicorn) from 21.2.0 to 22.0.0.
- [Release notes](https://github.com/benoitc/gunicorn/releases)
- [Commits](https://github.com/benoitc/gunicorn/compare/21.2.0...22.0.0)
---
updated-dependencies:
- dependency-name: gunicorn
dependency-type: direct:production
update-type: version-update:semver-major
...
Signed-off-by: dependabot[bot]
---
requirements/common.in | 2 +-
requirements/common.txt | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/requirements/common.in b/requirements/common.in
index 9470d47edd5..a5a83ad14da 100644
--- a/requirements/common.in
+++ b/requirements/common.in
@@ -1,5 +1,5 @@
# Packages that are shared between deployment and dev environments.
-gunicorn==21.2.0
+gunicorn==22.0.0
whitenoise[brotli]==6.6.0 # Used by Whitenoise to provide Brotli-compressed versions of static files.
Django==4.1.13
celery==5.4.0 # celery needed for data ingestion
diff --git a/requirements/common.txt b/requirements/common.txt
index cbdc92860d3..376a4ae40b4 100644
--- a/requirements/common.txt
+++ b/requirements/common.txt
@@ -493,9 +493,9 @@ furl==2.1.3 \
--hash=sha256:5a6188fe2666c484a12159c18be97a1977a71d632ef5bb867ef15f54af39cc4e \
--hash=sha256:9ab425062c4217f9802508e45feb4a83e54324273ac4b202f1850363309666c0
# via -r requirements/common.in
-gunicorn==21.2.0 \
- --hash=sha256:3213aa5e8c24949e792bcacfc176fef362e7aac80b76c56f6b5122bf350722f0 \
- --hash=sha256:88ec8bff1d634f98e61b9f65bc4bf3cd918a90806c6f5c48bc5603849ec81033
+gunicorn==22.0.0 \
+ --hash=sha256:350679f91b24062c86e386e198a15438d53a7a8207235a78ba1b53df4c4378d9 \
+ --hash=sha256:4a0b436239ff76fb33f11c07a16482c521a7e09c1ce3cc293c2330afe01bec63
# via -r requirements/common.in
idna==3.6 \
--hash=sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca \
From 9b2adc97d075d3fe4f41f77cfcb88a6a25b9ecfa Mon Sep 17 00:00:00 2001
From: Sebastian Hengst
Date: Thu, 18 Jan 2024 19:52:08 +0100
Subject: [PATCH 128/128] Bug 1875340 - use JavaScript's DecompressionStream
instead of 'pako' package
---
package.json | 1 -
tests/ui/helpers/gzip_test.js | 12 ------------
ui/helpers/gzip.js | 16 ++++++++--------
ui/job-view/pushes/Push.jsx | 3 +--
4 files changed, 9 insertions(+), 23 deletions(-)
delete mode 100644 tests/ui/helpers/gzip_test.js
diff --git a/package.json b/package.json
index 9a1c0762173..f1d2203bd82 100644
--- a/package.json
+++ b/package.json
@@ -41,7 +41,6 @@
"mobx": "6.10.2",
"moment": "2.29.4",
"numeral": "2.0.6",
- "pako": "2.0.4",
"prop-types": "15.7.2",
"query-string": "7.0.1",
"react": "17.0.2",
diff --git a/tests/ui/helpers/gzip_test.js b/tests/ui/helpers/gzip_test.js
deleted file mode 100644
index 02f67411cac..00000000000
--- a/tests/ui/helpers/gzip_test.js
+++ /dev/null
@@ -1,12 +0,0 @@
-import { gzip } from 'pako';
-
-import decompress from '../../../ui/helpers/gzip';
-
-describe('gzip related functions', () => {
- test('compress and decompress', async () => {
- const str = JSON.stringify({ foo: 'bar' });
- const compressed = await gzip(str);
- const decompressed = await decompress(compressed);
- expect(JSON.stringify(decompressed)).toBe(str);
- });
-});
diff --git a/ui/helpers/gzip.js b/ui/helpers/gzip.js
index 38daa01d273..7817e0dcbf2 100644
--- a/ui/helpers/gzip.js
+++ b/ui/helpers/gzip.js
@@ -1,8 +1,8 @@
-import { inflate } from 'pako';
-
-export const unGzip = async (binData) => {
- const decompressed = await inflate(binData, { to: 'string' });
- return JSON.parse(decompressed);
-};
-
-export default unGzip;
+export default async function unGzip(blob) {
+ const decompressionStream = new DecompressionStream('gzip');
+ const decompressedStream = blob.stream().pipeThrough(decompressionStream);
+ const payloadText = await (
+ await new Response(decompressedStream).blob()
+ ).text();
+ return JSON.parse(payloadText);
+}
diff --git a/ui/job-view/pushes/Push.jsx b/ui/job-view/pushes/Push.jsx
index d28ff045c48..8a3b4c679af 100644
--- a/ui/job-view/pushes/Push.jsx
+++ b/ui/job-view/pushes/Push.jsx
@@ -79,8 +79,7 @@ const fetchGeckoDecisionArtifact = async (project, revision, filePath) => {
if (url.endsWith('.gz')) {
if ([200, 303, 304].includes(response.status)) {
const blob = await response.blob();
- const binData = await blob.arrayBuffer();
- artifactContents = await decompress(binData);
+ artifactContents = await decompress(blob);
}
} else if (url.endsWith('.json')) {
if ([200, 303, 304].includes(response.status)) {