diff --git a/RLTest/__main__.py b/RLTest/__main__.py index 63a0c85..f0ac085 100644 --- a/RLTest/__main__.py +++ b/RLTest/__main__.py @@ -20,6 +20,10 @@ from RLTest.Enterprise import binaryrepo from RLTest import debuggers from RLTest._version import __version__ +from contextlib import redirect_stdout +from progressbar import progressbar, ProgressBar +import threading +import signal import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) @@ -209,6 +213,10 @@ def do_normal_conn(self, line): '--shards-count', default=1, type=int, help='Number shards in bdb') +parser.add_argument( + '--test-timeout', default=0, type=int, + help='Test timeout, 0 means no timeout.') + parser.add_argument( '--download-enterprise-binaries', action='store_const', const=True, default=False, help='run env with slaves enabled') @@ -272,7 +280,11 @@ def do_normal_conn(self, line): parser.add_argument( '-s', '--no-output-catch', action='store_const', const=True, default=False, - help='all output will be written to the stdout, no log files.') + help='all output will be written to the stdout, no log files. Implies --no-progress.') + +parser.add_argument( + '--no-progress', action='store_const', const=True, default=False, + help='Do not show progress bar.') parser.add_argument( '--verbose-information-on-failure', action='store_const', const=True, default=False, @@ -336,6 +348,58 @@ def __enter__(self): def __exit__(self, type, value, traceback): self.runner.takeEnvDown() +class TestTimeLimit(object): + """ + A test timeout watcher. The watcher opens thread that sleep for the + required timeout and then wake up and send SIGUSR1 signal to the main thread + causing it to enter a timeout phase. When enter a timeout phase, the main thread + prints its trace and enter a deep sleep. The watcher thread continue collecting + environment stats and when done kills the processes. + """ + + def __init__(self, timeout, timeout_func): + self.timeout = timeout + self.timeout_func = timeout_func + self.condition = threading.Condition() + self.thread = None + self.is_done = False + self.trace_printed = False + + def on_timeout(self, signum, frame): + for line in traceback.format_stack(): + print(line.strip()) + self.trace_printed = True + time.sleep(1000) # sleep forever process will be killed soon + + def watcher_thread(self): + self.condition.acquire() + self.condition.wait(timeout=self.timeout) + if not self.is_done: + print(Colors.Bred('Test Timeout, printing trace.')) + os.kill(os.getpid(), signal.SIGUSR1) + while not self.trace_printed: + time.sleep(0.1) + try: + self.timeout_func() + except Exception as e: + print(Colors.Bred("Failed on timeout function, %s" % str(e))) + os._exit(1) + + def __enter__(self): + if self.timeout == 0: + return + signal.signal(signal.SIGUSR1, self.on_timeout) + self.thread = threading.Thread(target=self.watcher_thread) + self.thread.start() + + def __exit__(self, exc_type, exc_value, traceback): + if self.timeout == 0: + return + self.condition.acquire() + self.is_done = True + self.condition.notify(1) + self.condition.release() + class RLTest: def __init__(self): @@ -468,7 +532,7 @@ def __init__(self): raise Exception('Cannot use unix sockets with slaves') self.tests = [] - self.testsFailed = [] + self.testsFailed = {} self.currEnv = None self.loader = TestLoader() if self.args.test is not None: @@ -503,6 +567,11 @@ def __init__(self): def _convertArgsType(self): pass + def stopEnvWithSegFault(self): + if not self.currEnv: + return + self.currEnv.stopEnvWithSegFault() + def takeEnvDown(self, fullShutDown=False): if not self.currEnv: return @@ -558,15 +627,12 @@ def addFailure(self, name, failures=None): failures = [failures] if not failures: failures = [] - self.testsFailed.append([name, failures]) + self.testsFailed.setdefault(name, []).extend(failures) - def getTotalFailureCount(self): - ret = 0 - for _, failures in self.testsFailed: - ret += len(failures) - return ret + def getFailedTestsCount(self): + return len(self.testsFailed) - def handleFailure(self, testFullName=None, exception=None, prefix='', testname=None, env=None): + def handleFailure(self, testFullName=None, exception=None, prefix='', testname=None, env=None, error_msg=None): """ Failure omni-function. @@ -596,6 +662,8 @@ def handleFailure(self, testFullName=None, exception=None, prefix='', testname=N self.addFailuresFromEnv(testname, env) elif exception: self.addFailure(testname, str(exception)) + elif error_msg: + self.addFailure(testname, str(error_msg)) else: self.addFailure(testname, '') @@ -699,6 +767,88 @@ def printPass(self, name): def envScopeGuard(self): return EnvScopeGuard(self) + + def killEnvWithSegFault(self): + if self.currEnv and Defaults.print_verbose_information_on_failure: + try: + verboseInfo = {} + # It is not safe to get the information before dispose, Redis might be stack and will not reply. + # It will cause us to hand here forever. We will only get the information after dispose, this should be + # enough as we kill Redis with segfualt which means that it should provide use with all the required details. + self.stopEnvWithSegFault() + verboseInfo['after_dispose'] = self.currEnv.getInformationAfterDispose() + self.currEnv.debugPrint(json.dumps(verboseInfo, indent=2).replace('\\n', '\n'), force=True) + except Exception as e: + print('Failed %s' % str(e)) + else: + self.stopEnvWithSegFault() + + def run_single_test(self, test, on_timeout_func): + done = 0 + with self.envScopeGuard(): + if test.is_class: + test.initialize() + + Defaults.curr_test_name = test.name + try: + obj = test.create_instance() + + except unittest.SkipTest: + self.printSkip(test.name) + return + + except Exception as e: + self.printException(e) + self.addFailure(test.name + " [__init__]") + return + + failures = 0 + before = getattr(obj, 'setUp', None) + after = getattr(obj, 'tearDown', None) + for subtest in test.get_functions(obj): + with TestTimeLimit(self.args.test_timeout, on_timeout_func): + failures += self._runTest(subtest, prefix='\t', + numberOfAssertionFailed=failures, + before=before, after=after) + done += 1 + + else: + with TestTimeLimit(self.args.test_timeout, on_timeout_func): + failures = self._runTest(test) + done += 1 + + verboseInfo = {} + if failures > 0 and Defaults.print_verbose_information_on_failure: + lastEnv = self.currEnv + verboseInfo['before_dispose'] = lastEnv.getInformationBeforeDispose() + + # here the env is down so lets collect more info and print it + if failures > 0 and Defaults.print_verbose_information_on_failure: + verboseInfo['after_dispose'] = lastEnv.getInformationAfterDispose() + lastEnv.debugPrint(json.dumps(verboseInfo, indent=2).replace('\\n', '\n'), force=True) + return done + + def print_failures(self): + for group, failures in self.testsFailed.items(): + print('\t' + Colors.Bold(group)) + if not failures: + print('\t\t' + Colors.Bred('Exception raised during test execution. See logs')) + for failure in failures: + print('\t\t' + failure) + + def disable_progress_bar(self): + return self.args.no_output_catch or self.args.no_progress or not sys.stdout.isatty() + + def progressbar(self, num_elements): + bar = None + if not self.disable_progress_bar(): + bar = ProgressBar(max_value=num_elements, redirect_stdout=True) + for i in range(num_elements): + bar.update(i) + yield i + bar.update(num_elements) + else: + yield from range(num_elements) def execute(self): Env.RTestInstance = self @@ -722,91 +872,117 @@ def execute(self): sys.exit(1) jobs = Queue() + n_jobs = 0 for test in self.loader: jobs.put(test, block=False) + n_jobs += 1 - def run_jobs(jobs, results, port): - Defaults.port = port - done = 0 - while True: + def run_jobs_main_thread(jobs): + nonlocal done + bar = self.progressbar(n_jobs) + for _ in bar: try: test = jobs.get(timeout=0.1) except Exception as e: break - with self.envScopeGuard(): - if test.is_class: - test.initialize() - - Defaults.curr_test_name = test.name - try: - obj = test.create_instance() + def on_timeout(): + nonlocal done + try: + done += 1 + self.killEnvWithSegFault() + self.handleFailure(testFullName=test.name, testname=test.name, error_msg=Colors.Bred('Test timeout')) + self.print_failures() + finally: + # we must update the bar anyway to see output + bar.__next__() - except unittest.SkipTest: - self.printSkip(test.name) - continue + done += self.run_single_test(test, on_timeout) - except Exception as e: - self.printException(e) - self.addFailure(test.name + " [__init__]") - continue - - failures = 0 - before = getattr(obj, 'setUp', None) - after = getattr(obj, 'tearDown', None) - for subtest in test.get_functions(obj): - failures += self._runTest(subtest, prefix='\t', - numberOfAssertionFailed=failures, - before=before, after=after) - done += 1 - - else: - failures = self._runTest(test) - done += 1 + self.takeEnvDown(fullShutDown=True) - verboseInfo = {} - if failures > 0 and Defaults.print_verbose_information_on_failure: - lastEnv = self.currEnv - verboseInfo['before_dispose'] = lastEnv.getInformationBeforeDispose() + def run_jobs(jobs, results, summary, port): + Defaults.port = port + done = 0 + while True: + try: + test = jobs.get(timeout=0.1) + except Exception as e: + break - # here the env is down so lets collect more info and print it - if failures > 0 and Defaults.print_verbose_information_on_failure: - verboseInfo['after_dispose'] = lastEnv.getInformationAfterDispose() - lastEnv.debugPrint(json.dumps(verboseInfo, indent=2).replace('\\n', '\n'), force=True) + + output = io.StringIO() + with redirect_stdout(output): + def on_timeout(): + nonlocal done + try: + done += 1 + self.killEnvWithSegFault() + self.handleFailure(testFullName=test.name, testname=test.name, error_msg=Colors.Bred('Test timeout')) + except Exception as e: + self.handleFailure(testFullName=test.name, testname=test.name, error_msg=Colors.Bred('Exception on timeout function %s' % str(e))) + finally: + results.put({'test_name': test.name, "output": output.getvalue()}, block=False) + summary.put({'done': done, 'failures': self.testsFailed}, block=False) + # After we return the processes will be killed, so we must make sure the queues are drained properly. + results.close() + summary.close() + summary.join_thread() + results.join_thread() + done += self.run_single_test(test, on_timeout) + + results.put({'test_name': test.name, "output": output.getvalue()}, block=False) self.takeEnvDown(fullShutDown=True) # serialized the results back - results.put({'done': done, 'failures': self.testsFailed}, block=False) + summary.put({'done': done, 'failures': self.testsFailed}, block=False) results = Queue() + summary = Queue() if self.parallelism == 1: - run_jobs(jobs, results, Defaults.port) + run_jobs_main_thread(jobs) else : processes = [] currPort = Defaults.port for i in range(self.parallelism): - p = Process(target=run_jobs, args=(jobs,results,currPort)) + p = Process(target=run_jobs, args=(jobs,results,summary,currPort)) currPort += 30 # safe distance for cluster and replicas processes.append(p) p.start() + for _ in self.progressbar(n_jobs): + while True: + # check if we have some lives executors + has_live_processor = False + for p in processes: + if p.is_alive(): + has_live_processor = True + break + try: + res = results.get(timeout=1) + break + except Exception as e: + if not has_live_processor: + raise Exception('Failed to get job result and no more processors is alive') + output = res['output'] + print('%s' % output, end="") for p in processes: p.join() - # join results - while True: - try: - res = results.get(timeout=0.1) - except Exception as e: - break - done += res['done'] - self.testsFailed.extend(res['failures']) + # join results + while True: + try: + res = summary.get(timeout=1) + except Exception as e: + break + done += res['done'] + self.testsFailed.update(res['failures']) endTime = time.time() - print(Colors.Bold('Test Took: %d sec' % (endTime - startTime))) - print(Colors.Bold('Total Tests Run: %d, Total Tests Failed: %d, Total Tests Passed: %d' % (done, self.getTotalFailureCount(), done - self.getTotalFailureCount()))) + print(Colors.Bold('\nTest Took: %d sec' % (endTime - startTime))) + print(Colors.Bold('Total Tests Run: %d, Total Tests Failed: %d, Total Tests Passed: %d' % (done, self.getFailedTestsCount(), done - self.getFailedTestsCount()))) if self.testsFailed: if self.args.failed_tests_file: with open(self.args.failed_tests_file, 'w') as file: @@ -814,12 +990,7 @@ def run_jobs(jobs, results, port): file.write(test.split(' ')[0] + "\n") print(Colors.Bold('Failed Tests Summary:')) - for group, failures in self.testsFailed: - print('\t' + Colors.Bold(group)) - if not failures: - print('\t\t' + Colors.Bred('Exception raised during test execution. See logs')) - for failure in failures: - print('\t\t' + failure) + self.print_failures() sys.exit(1) else: if self.args.failed_tests_file: diff --git a/RLTest/env.py b/RLTest/env.py index a649349..e2cb8bd 100644 --- a/RLTest/env.py +++ b/RLTest/env.py @@ -372,6 +372,9 @@ def start(self, masters = True, slaves = True ): def stop(self, masters = True, slaves = True): self.envRunner.stopEnv(masters, slaves) + def stopEnvWithSegFault(self, masters = True, slaves = True): + self.envRunner.stopEnvWithSegFault(masters, slaves) + def getEnvStr(self): return self.env diff --git a/RLTest/redis_cluster.py b/RLTest/redis_cluster.py index 9c187ed..ea3fa14 100644 --- a/RLTest/redis_cluster.py +++ b/RLTest/redis_cluster.py @@ -114,6 +114,10 @@ def startEnv(self, masters=True, slaves=True): self.envIsUp = True self.envIsHealthy = True + def stopEnvWithSegFault(self, masters=True, slaves=True): + for shard in self.shards: + shard.stopEnvWithSegFault(masters, slaves) + def stopEnv(self, masters=True, slaves=True): self.envIsUp = False self.envIsHealthy = False diff --git a/RLTest/redis_std.py b/RLTest/redis_std.py index adf84a0..ea14d85 100644 --- a/RLTest/redis_std.py +++ b/RLTest/redis_std.py @@ -7,7 +7,7 @@ import uuid import platform import psutil - +import signal import redis from .random_port import get_random_port @@ -310,20 +310,13 @@ def getInformationBeforeDispose(self): if self.useSlaves: instances.append((SLAVE, self.getSlaveConnection(), self.slaveProcess)) for role, conn, proc in instances: - logs = None info = None - try: - with open(os.path.join(self.dbDirPath, self._getFileName(role, '.log'))) as f: - logs = f.read() - except os.FileNoteFoundError: - pass try: info = conn.execute_command('info', 'everything') except redis.exceptions.RedisError: pass res[role] = { - 'info': info, - 'logs': logs, + 'info': info } return res @@ -335,6 +328,7 @@ def getInformationAfterDispose(self): for role, stdout, stderr in instances: stdoutStr = None stderrStr = None + logs = None try: stdoutStr = stdout.read().decode('utf8') except (NameError, AttributeError): @@ -345,9 +339,16 @@ def getInformationAfterDispose(self): except (NameError, AttributeError): pass + try: + with open(os.path.join(self.dbDirPath, self._getFileName(role, '.log'))) as f: + logs = f.read() + except os.FileNoteFoundError: + pass + res[role] = { 'stdout': stdoutStr, 'stderr': stderrStr, + 'logs': logs, } return res @@ -409,6 +410,31 @@ def _isAlive(self, process): return True return False + def _segfault(self, role, retries=3): + process = self.masterProcess if role == MASTER else self.slaveProcess + if not self._isAlive(process): + return + for _ in range(retries): + if process.poll() is None: # None returns if the processes is not finished yet, retry until redis exits + time.sleep(1) + process.send_signal(signal.SIGSEGV) + else: + return + print(Colors.Bred('Failed killing processes with sigsegv, forcely kill the processes.')) + for _ in range(retries): + if process.poll() is None: # None returns if the processes is not finished yet, retry until redis exits + time.sleep(1) + process.kill() + else: + return + print(Colors.Bred('Failed killing processes with sigkill.')) + + def stopEnvWithSegFault(self, masters = True, slaves = True): + if self.masterProcess is not None and masters is True: + self._segfault(MASTER) + if self.useSlaves and self.slaveProcess is not None and slaves is True: + self._segfault(SLAVE) + def _stopProcess(self, role): process = self.masterProcess if role == MASTER else self.slaveProcess serverId = self.masterServerId if role == MASTER else self.slaveServerId @@ -490,7 +516,6 @@ def stopEnv(self, masters = True, slaves = True): self.envIsUp = self.masterProcess is not None or self.slaveProcess is not None self.envIsHealthy = self.masterProcess is not None and (self.slaveProcess is not None if self.useSlaves else True) - def _getConnection(self, role): if self.useUnix: return redis.StrictRedis(unix_socket_path=self.getUnixPath(role), diff --git a/poetry.lock b/poetry.lock index f9828e8..5462ba6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.5.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. [[package]] name = "async-timeout" @@ -113,13 +113,13 @@ files = [ [[package]] name = "exceptiongroup" -version = "1.1.2" +version = "1.2.0" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" files = [ - {file = "exceptiongroup-1.1.2-py3-none-any.whl", hash = "sha256:e346e69d186172ca7cf029c8c1d16235aa0e04035e5750b4b95039e65204328f"}, - {file = "exceptiongroup-1.1.2.tar.gz", hash = "sha256:12c3e887d6485d16943a309616de20ae5582633e0a2eda17f4e10fd61c1e8af5"}, + {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"}, + {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"}, ] [package.extras] @@ -158,13 +158,13 @@ files = [ [[package]] name = "packaging" -version = "23.1" +version = "23.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, - {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, + {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"}, + {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] [[package]] @@ -185,27 +185,47 @@ importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "progressbar2" +version = "4.2.0" +description = "A Python Progressbar library to provide visual (yet text based) progress to long running operations." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "progressbar2-4.2.0-py2.py3-none-any.whl", hash = "sha256:1a8e201211f99a85df55f720b3b6da7fb5c8cdef56792c4547205be2de5ea606"}, + {file = "progressbar2-4.2.0.tar.gz", hash = "sha256:1393922fcb64598944ad457569fbeb4b3ac189ef50b5adb9cef3284e87e394ce"}, +] + +[package.dependencies] +python-utils = ">=3.0.0" + +[package.extras] +docs = ["sphinx (>=1.8.5)"] +tests = ["flake8 (>=3.7.7)", "freezegun (>=0.3.11)", "pytest (>=4.6.9)", "pytest-cov (>=2.6.1)", "pytest-mypy", "sphinx (>=1.8.5)"] + [[package]] name = "psutil" -version = "5.9.5" +version = "5.9.6" description = "Cross-platform lib for process and system monitoring in Python." optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ - {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"}, - {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"}, - {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"}, - {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"}, - {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"}, - {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"}, - {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"}, - {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"}, - {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"}, - {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"}, - {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"}, + {file = "psutil-5.9.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fb8a697f11b0f5994550555fcfe3e69799e5b060c8ecf9e2f75c69302cc35c0d"}, + {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:91ecd2d9c00db9817a4b4192107cf6954addb5d9d67a969a4f436dbc9200f88c"}, + {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:10e8c17b4f898d64b121149afb136c53ea8b68c7531155147867b7b1ac9e7e28"}, + {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:18cd22c5db486f33998f37e2bb054cc62fd06646995285e02a51b1e08da97017"}, + {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:ca2780f5e038379e520281e4c032dddd086906ddff9ef0d1b9dcf00710e5071c"}, + {file = "psutil-5.9.6-cp27-none-win32.whl", hash = "sha256:70cb3beb98bc3fd5ac9ac617a327af7e7f826373ee64c80efd4eb2856e5051e9"}, + {file = "psutil-5.9.6-cp27-none-win_amd64.whl", hash = "sha256:51dc3d54607c73148f63732c727856f5febec1c7c336f8f41fcbd6315cce76ac"}, + {file = "psutil-5.9.6-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c69596f9fc2f8acd574a12d5f8b7b1ba3765a641ea5d60fb4736bf3c08a8214a"}, + {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92e0cc43c524834af53e9d3369245e6cc3b130e78e26100d1f63cdb0abeb3d3c"}, + {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:748c9dd2583ed86347ed65d0035f45fa8c851e8d90354c122ab72319b5f366f4"}, + {file = "psutil-5.9.6-cp36-cp36m-win32.whl", hash = "sha256:3ebf2158c16cc69db777e3c7decb3c0f43a7af94a60d72e87b2823aebac3d602"}, + {file = "psutil-5.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:ff18b8d1a784b810df0b0fff3bcb50ab941c3b8e2c8de5726f9c71c601c611aa"}, + {file = "psutil-5.9.6-cp37-abi3-win32.whl", hash = "sha256:a6f01f03bf1843280f4ad16f4bde26b817847b4c1a0db59bf6419807bc5ce05c"}, + {file = "psutil-5.9.6-cp37-abi3-win_amd64.whl", hash = "sha256:6e5fb8dc711a514da83098bc5234264e551ad980cec5f85dabf4d38ed6f15e9a"}, + {file = "psutil-5.9.6-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:daecbcbd29b289aac14ece28eca6a3e60aa361754cf6da3dfb20d4d32b6c7f57"}, + {file = "psutil-5.9.6.tar.gz", hash = "sha256:e4b92ddcd7dd4cdd3f900180ea1e104932c7bce234fb88976e2a3b296441225a"}, ] [package.extras] @@ -213,13 +233,13 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] [[package]] name = "pytest" -version = "7.4.0" +version = "7.4.3" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" files = [ - {file = "pytest-7.4.0-py3-none-any.whl", hash = "sha256:78bf16451a2eb8c7a2ea98e32dc119fd2aa758f1d5d66dbf0a59d69a3969df32"}, - {file = "pytest-7.4.0.tar.gz", hash = "sha256:b4bf8c45bd59934ed84001ad51e11b4ee40d40a1229d2c79f9c592b0a3f6bd8a"}, + {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"}, + {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"}, ] [package.dependencies] @@ -252,15 +272,34 @@ pytest = ">=4.6" [package.extras] testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"] +[[package]] +name = "python-utils" +version = "3.5.2" +description = "Python Utils is a module with some convenient utilities not included with the standard Python install" +optional = false +python-versions = ">3.6.0" +files = [ + {file = "python-utils-3.5.2.tar.gz", hash = "sha256:68198854fc276bc4b2403b261703c218e01ef564dcb072a7096ed9ea7aa5130c"}, + {file = "python_utils-3.5.2-py2.py3-none-any.whl", hash = "sha256:8bfefc3430f1c48408fa0e5958eee51d39840a5a987c2181a579e99ab6fe5ca6"}, +] + +[package.dependencies] +typing-extensions = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +docs = ["mock", "python-utils", "sphinx"] +loguru = ["loguru"] +tests = ["flake8", "loguru", "pytest", "pytest-asyncio", "pytest-cov", "pytest-mypy", "sphinx", "types-setuptools"] + [[package]] name = "redis" -version = "5.0.0rc2" +version = "5.0.1" description = "Python client for Redis database and key-value store" optional = false python-versions = ">=3.7" files = [ - {file = "redis-5.0.0rc2-py3-none-any.whl", hash = "sha256:b8fe448639fc9c27742194d9295687371a0ba2d1c7e3aa068611a64dcbab9236"}, - {file = "redis-5.0.0rc2.tar.gz", hash = "sha256:c728ec06056a4bf069dbb64b6fa1cfd76b6721100ec357e578fb516064f64670"}, + {file = "redis-5.0.1-py3-none-any.whl", hash = "sha256:ed4802971884ae19d640775ba3b03aa2e7bd5e8fb8dfaed2decce4d0fc48391f"}, + {file = "redis-5.0.1.tar.gz", hash = "sha256:0dab495cd5753069d3bc650a0dde8a8f9edde16fc5691b689a566eda58100d0f"}, ] [package.dependencies] @@ -312,4 +351,4 @@ testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more [metadata] lock-version = "2.0" python-versions = ">= 3.7.0" -content-hash = "b4df4ce3830f6976fd1acd6e18325e197322b98de3e7b4880e9a8024a07a167d" +content-hash = "ec234463c786c65c728a9f05c5e8ca424d70221587a9bdb7595b8a8f63b5414b" diff --git a/pyproject.toml b/pyproject.toml index 92cd3b2..d2beb7f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,7 @@ redis = "^5.0.0rc2" psutil = "^5.9.5" pytest-cov = "^4.1.0" pytest = "^7.4" +progressbar2 = "4.2" [tool.poetry.urls] repository = "https://github.com/RedisLabsModules/RLTest"