From 405cef42af1c0b7640e871ecbb4fb0f2970aa442 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Le=C5=9Bniewski?= Date: Fri, 9 Sep 2022 12:13:13 +0200 Subject: [PATCH 1/7] Fix bucket leaks in integration tests --- CHANGELOG.md | 1 + test/integration/cleanup_buckets.py | 2 +- test/integration/conftest.py | 71 ++--- test/integration/helpers.py | 215 +++++++------- test/integration/test_b2_command_line.py | 360 ++++++++--------------- 5 files changed, 274 insertions(+), 375 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 59a09e502..a1dfe421c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Infrastructure * GitHub CI got checkout action updated to v3 and setup-python to v4 * Ensured that changelog validation only happens on pull requests +* Fix bucket leaks in integration tests ## [3.6.0] - 2022-09-20 diff --git a/test/integration/cleanup_buckets.py b/test/integration/cleanup_buckets.py index 45adf743a..0bdaec745 100644 --- a/test/integration/cleanup_buckets.py +++ b/test/integration/cleanup_buckets.py @@ -13,4 +13,4 @@ def test_cleanup_buckets(b2_api): # this is not a test, but it is intended to be called # via pytest because it reuses fixtures which have everything # set up - b2_api.clean_buckets() + b2_api.clean_all_buckets() diff --git a/test/integration/conftest.py b/test/integration/conftest.py index 96cb9a7b1..109aca821 100644 --- a/test/integration/conftest.py +++ b/test/integration/conftest.py @@ -17,11 +17,9 @@ import pytest from b2sdk.v2 import B2_ACCOUNT_INFO_ENV_VAR, XDG_CONFIG_HOME_ENV_VAR -from b2sdk.exception import BucketIdNotFound +from b2sdk.exception import BucketIdNotFound, NonExistentBucket -from .helpers import Api, CommandLine, bucket_name_part - -GENERAL_BUCKET_NAME_PREFIX = 'clitst' +from .helpers import Api, CommandLine @pytest.hookimpl @@ -52,32 +50,16 @@ def realm() -> str: @pytest.fixture(scope='function') -def bucket_name(b2_api) -> str: - bucket = b2_api.create_bucket() - yield bucket.name - with contextlib.suppress(BucketIdNotFound): - b2_api.clean_bucket(bucket) - - -@pytest.fixture(scope='function') # , autouse=True) -def debug_print_buckets(b2_api): - print('-' * 30) - print('Buckets before test ' + environ['PYTEST_CURRENT_TEST']) - num_buckets = b2_api.count_and_print_buckets() - print('-' * 30) - try: - yield - finally: - print('-' * 30) - print('Buckets after test ' + environ['PYTEST_CURRENT_TEST']) - delta = b2_api.count_and_print_buckets() - num_buckets - print(f'DELTA: {delta}') - print('-' * 30) +def create_test_bucket(b2_api): + def factory(*args, **kwargs): + return b2_api.create_test_bucket(*args, **kwargs).name + return factory -@pytest.fixture(scope='session') -def this_run_bucket_name_prefix() -> str: - yield GENERAL_BUCKET_NAME_PREFIX + bucket_name_part(8) + +@pytest.fixture(scope='function') +def bucket_name(create_test_bucket) -> str: + return create_test_bucket() @pytest.fixture(scope='module') @@ -108,23 +90,14 @@ def auto_change_account_info_dir(monkey_patch) -> dir: @pytest.fixture(scope='module') -def b2_api(application_key_id, application_key, realm, this_run_bucket_name_prefix) -> Api: - yield Api( - application_key_id, application_key, realm, GENERAL_BUCKET_NAME_PREFIX, - this_run_bucket_name_prefix - ) +def b2_api(application_key_id, application_key, realm) -> Api: + return Api(application_key_id, application_key, realm) @pytest.fixture(scope='module') -def b2_tool( - request, application_key_id, application_key, realm, this_run_bucket_name_prefix -) -> CommandLine: +def b2_tool(request, application_key_id, application_key, realm) -> CommandLine: tool = CommandLine( - request.config.getoption('--sut'), - application_key_id, - application_key, - realm, - this_run_bucket_name_prefix, + request.config.getoption('--sut'), application_key_id, application_key, realm ) tool.reauthorize(check_key_capabilities=True) # reauthorize for the first time (with check) return tool @@ -134,3 +107,19 @@ def b2_tool( def auto_reauthorize(request, b2_tool): """ Automatically reauthorize for each test (without check) """ b2_tool.reauthorize(check_key_capabilities=False) + + +@pytest.fixture(scope='function', autouse=True) +def auto_clean_buckets(b2_api, b2_tool): + """Automatically delete created buckets after each test case""" + yield + + # remove buckets created using the CLI + while b2_tool.buckets: + with contextlib.suppress(BucketIdNotFound, NonExistentBucket): + # The buckets were created with the CLI tool, but we still delete them using the API as it will handle + # corner cases properly (like retries or deleting non-empty buckets). + b2_api.clean_bucket(b2_tool.buckets.pop()) + + # remove buckets created using the API + b2_api.clean_buckets() diff --git a/test/integration/helpers.py b/test/integration/helpers.py index 1f0ea4eb3..68820aeba 100644 --- a/test/integration/helpers.py +++ b/test/integration/helpers.py @@ -8,6 +8,9 @@ # ###################################################################### +import argparse +import contextlib +import dataclasses import json import os import platform @@ -19,7 +22,6 @@ import sys import threading -from dataclasses import dataclass from datetime import datetime from os import environ, linesep, path from pathlib import Path @@ -29,18 +31,16 @@ import backoff from b2sdk._v3.exception import BucketIdNotFound as v3BucketIdNotFound -from b2sdk.v2 import ALL_CAPABILITIES, NO_RETENTION_FILE_SETTING, B2Api, Bucket, EncryptionAlgorithm, EncryptionKey, EncryptionMode, EncryptionSetting, InMemoryAccountInfo, InMemoryCache, LegalHold, RetentionMode, SqliteAccountInfo, fix_windows_path_limit -from b2sdk.v2.exception import BucketIdNotFound, DuplicateBucketName, FileNotPresent, TooManyRequests +from b2sdk.v2 import ALL_CAPABILITIES, NO_RETENTION_FILE_SETTING, B2Api, Bucket, EncryptionAlgorithm, EncryptionKey, EncryptionMode, EncryptionSetting, InMemoryAccountInfo, InMemoryCache, LegalHold, RetentionMode, BucketTrackingMixin, SqliteAccountInfo, fix_windows_path_limit +from b2sdk.v2.exception import BucketIdNotFound, DuplicateBucketName, FileNotPresent, TooManyRequests, NonExistentBucket from b2.console_tool import Command, current_time_millis -BUCKET_CLEANUP_PERIOD_MILLIS = 0 ONE_HOUR_MILLIS = 60 * 60 * 1000 ONE_DAY_MILLIS = ONE_HOUR_MILLIS * 24 BUCKET_NAME_LENGTH = 50 BUCKET_NAME_CHARS = string.ascii_letters + string.digits + '-' -BUCKET_CREATED_AT_MILLIS = 'created_at_millis' SSE_NONE = EncryptionSetting(mode=EncryptionMode.NONE,) SSE_B2_AES = EncryptionSetting( @@ -58,78 +58,35 @@ key=EncryptionKey(secret=os.urandom(32), key_id='another-user-generated-key-id') ) +BUCKET_NAME_PREFIX = 'clitst' +BUCKET_NAME_PREFIX_OLD = 'test-b2-cli-' # TODO: remove this when sure that there are no more old buckets -def bucket_name_part(length: int) -> str: - return ''.join(random.choice(BUCKET_NAME_CHARS) for _ in range(length)) +def generate_bucket_name(): + suffix_length = BUCKET_NAME_LENGTH - len(BUCKET_NAME_PREFIX) + suffix = ''.join(random.choice(BUCKET_NAME_CHARS) for _ in range(suffix_length)) + return BUCKET_NAME_PREFIX + suffix -@dataclass -class Api: - account_id: str - application_key: str - realm: str - general_bucket_name_prefix: str - this_run_bucket_name_prefix: str - api: B2Api = None - - def __post_init__(self): +class Api(BucketTrackingMixin, B2Api): + def __init__(self, account_id, application_key, realm): info = InMemoryAccountInfo() cache = InMemoryCache() - self.api = B2Api(info, cache=cache) - self.api.authorize_account(self.realm, self.account_id, self.application_key) + super().__init__(info, cache=cache) + self.authorize_account(realm, account_id, application_key) - def create_bucket(self) -> Bucket: + def create_test_bucket(self, bucket_type="allPublic", **kwargs) -> Bucket: for _ in range(10): - bucket_name = self.this_run_bucket_name_prefix + bucket_name_part( - BUCKET_NAME_LENGTH - len(self.this_run_bucket_name_prefix) - ) + bucket_name = generate_bucket_name() print('Creating bucket:', bucket_name) try: - return self.api.create_bucket( - bucket_name, - 'allPublic', - bucket_info={BUCKET_CREATED_AT_MILLIS: str(current_time_millis())}, - ) + return self.create_bucket(bucket_name, bucket_type, **kwargs) except DuplicateBucketName: pass print() raise ValueError('Failed to create bucket due to name collision') - def _should_remove_bucket(self, bucket: Bucket): - if bucket.name.startswith(self.this_run_bucket_name_prefix): - return True, 'it is a bucket for this very run' - OLD_PATTERN = 'test-b2-cli-' - if bucket.name.startswith(self.general_bucket_name_prefix) or bucket.name.startswith(OLD_PATTERN): # yapf: disable - if BUCKET_CREATED_AT_MILLIS in bucket.bucket_info: - delete_older_than = current_time_millis() - BUCKET_CLEANUP_PERIOD_MILLIS - this_bucket_creation_time = bucket.bucket_info[BUCKET_CREATED_AT_MILLIS] - if int(this_bucket_creation_time) < delete_older_than: - return True, f"this_bucket_creation_time={this_bucket_creation_time} < delete_older_than={delete_older_than}" - else: - return True, 'undefined ' + BUCKET_CREATED_AT_MILLIS - return False, '' - - def clean_buckets(self): - buckets = self.api.list_buckets() - print('Total bucket count:', len(buckets)) - for bucket in buckets: - should_remove, why = self._should_remove_bucket(bucket) - if not should_remove: - print(f'Skipping bucket removal: "{bucket.name}"') - continue - - print('Trying to remove bucket:', bucket.name, 'because', why) - try: - self.clean_bucket(bucket) - except (BucketIdNotFound, v3BucketIdNotFound): - print('It seems that bucket %s has already been removed' % (bucket.name,)) - buckets = self.api.list_buckets() - print('Total bucket count after cleanup:', len(buckets)) - for bucket in buckets: - print(bucket) - @backoff.on_exception( backoff.expo, TooManyRequests, @@ -137,7 +94,7 @@ def clean_buckets(self): ) def clean_bucket(self, bucket: Union[Bucket, str]): if isinstance(bucket, str): - bucket = self.api.get_bucket_by_name(bucket) + bucket = self.get_bucket_by_name(bucket) files_leftover = False file_versions = bucket.ls(latest_only=False, recursive=True) @@ -146,7 +103,7 @@ def clean_bucket(self, bucket: Union[Bucket, str]): if file_version_info.file_retention: if file_version_info.file_retention.mode == RetentionMode.GOVERNANCE: print('Removing retention from file version:', file_version_info.id_) - self.api.update_file_retention( + self.update_file_retention( file_version_info.id_, file_version_info.file_name, NO_RETENTION_FILE_SETTING, True ) @@ -166,12 +123,12 @@ def clean_bucket(self, bucket: Union[Bucket, str]): ) if file_version_info.legal_hold.is_on(): print('Removing legal hold from file version:', file_version_info.id_) - self.api.update_file_legal_hold( + self.update_file_legal_hold( file_version_info.id_, file_version_info.file_name, LegalHold.OFF ) print('Removing file version:', file_version_info.id_) try: - self.api.delete_file_version(file_version_info.id_, file_version_info.file_name) + self.delete_file_version(file_version_info.id_, file_version_info.file_name) except FileNotPresent: print( 'It seems that file version %s has already been removed' % @@ -183,13 +140,39 @@ def clean_bucket(self, bucket: Union[Bucket, str]): else: print('Removing bucket:', bucket.name) try: - self.api.delete_bucket(bucket) + self.delete_bucket(bucket) except BucketIdNotFound: print('It seems that bucket %s has already been removed' % (bucket.name,)) print() + def clean_buckets(self): + for bucket in self.buckets: + with contextlib.suppress(BucketIdNotFound, v3BucketIdNotFound, NonExistentBucket): + self.clean_bucket(bucket) + self.buckets = [] + + def clean_all_buckets(self): + buckets = self.list_buckets() + print(f'Total bucket count: {len(buckets)}') + + for bucket in buckets: + if not bucket.name.startswith((BUCKET_NAME_PREFIX, BUCKET_NAME_PREFIX_OLD)): + print(f'Skipping bucket removal: "{bucket.name}"') + continue + + print(f'Removing bucket: "{bucket.name}"') + try: + self.clean_bucket(bucket) + except (BucketIdNotFound, v3BucketIdNotFound): + print(f'It seems that bucket "{bucket.name}" has already been removed') + + buckets = self.list_buckets() + print(f'Total bucket count after cleanup: {len(buckets)}') + for bucket in buckets: + print(bucket) + def count_and_print_buckets(self) -> int: - buckets = self.api.list_buckets() + buckets = self.list_buckets() count = len(buckets) print(f'Total bucket count at {datetime.now()}: {count}') for i, bucket in enumerate(buckets, start=1): @@ -248,11 +231,30 @@ def read_from(self, f): self.string = str(e) +@dataclasses.dataclass +class CommandResult: + status: int + stdout: str + stderr: str + + @property + def success(self): + return self.status == 0 + + @property + def failure(self): + return not self.success + + @property + def output(self): + return self.stdout + self.stderr + + def run_command( cmd: str, args: Optional[List[Union[str, Path, int]]] = None, additional_env: Optional[dict] = None, -): +) -> CommandResult: """ :param cmd: a command to run :param args: command's arguments @@ -296,7 +298,8 @@ def run_command( stderr_decoded = remove_warnings(stderr.get_string().decode('utf-8', errors='replace')) print_output(p.returncode, stdout_decoded, stderr_decoded) - return p.returncode, stdout_decoded, stderr_decoded + + return CommandResult(p.returncode, stdout_decoded, stderr_decoded) class EnvVarTestContext: @@ -350,27 +353,41 @@ class CommandLine: re.compile(r'Trying to print: .*'), ] - def __init__(self, command, account_id, application_key, realm, bucket_name_prefix): + def __init__(self, command, account_id, application_key, realm): self.command = command self.account_id = account_id self.application_key = application_key self.realm = realm - self.bucket_name_prefix = bucket_name_prefix self.env_var_test_context = EnvVarTestContext(SqliteAccountInfo().filename) self.account_info_file_name = SqliteAccountInfo().filename - - def generate_bucket_name(self): - return self.bucket_name_prefix + bucket_name_part( - BUCKET_NAME_LENGTH - len(self.bucket_name_prefix) - ) - - def run_command(self, args, additional_env: Optional[dict] = None): - """ - Runs the command with the given arguments, returns a tuple in form of - (succeeded, stdout) - """ - status, stdout, stderr = run_command(self.command, args, additional_env) - return status == 0 and stderr == '', stdout + self.buckets = set() + + def run(self, args, additional_env: Optional[dict] = None) -> CommandResult: + if args: + if args[0] == 'create-bucket': + raise ValueError(f'use {type(self).__name__}.create_bucket instead') + elif args[0] == 'delete-bucket': + raise ValueError(f'use {type(self).__name__}.delete_bucket instead') + + return run_command(self.command, args, additional_env) + + def create_bucket( + self, bucket_name, *args, additional_env: Optional[dict] = None + ) -> CommandResult: + args = ['create-bucket', bucket_name] + [str(arg) for arg in args] + result = run_command(self.command, args, additional_env) + if result.success: + self.buckets.add(bucket_name) + return result + + def delete_bucket( + self, bucket_name, *args, additional_env: Optional[dict] = None + ) -> CommandResult: + args = ['delete-bucket', bucket_name] + [str(arg) for arg in args] + result = run_command(self.command, args, additional_env) + if result.success: + self.buckets.discard(bucket_name) + return result def should_succeed( self, @@ -383,21 +400,21 @@ def should_succeed( if there was an error; otherwise, returns the stdout of the command as as string. """ - status, stdout, stderr = run_command(self.command, args, additional_env) - assert status == 0, f'FAILED with status {status}, stderr={stderr}' + result = self.run(args, additional_env) + assert result.success, f'FAILED with status {result.status}, stderr={result.stderr}' - if stderr != '': - for line in (s.strip() for s in stderr.split(os.linesep)): + if result.stderr: + for line in (s.strip() for s in result.stderr.split(os.linesep)): assert any(p.match(line) for p in self.EXPECTED_STDERR_PATTERNS), \ f'Unexpected stderr line: {repr(line)}' - if expected_pattern is not None: - assert re.search(expected_pattern, stdout), \ - f'did not match pattern="{expected_pattern}", stdout="{stdout}"' + if expected_pattern: + assert re.search(expected_pattern, result.stdout), \ + f'did not match pattern="{expected_pattern}", stdout="{result.stdout}"' - return stdout + return result.stdout - def should_succeed_json(self, args, additional_env: Optional[dict] = None): + def should_succeed_json(self, args, additional_env: Optional[dict] = None) -> dict: """ Runs the command-line with the given arguments. Raises an exception if there was an error; otherwise, treats the stdout as JSON and returns @@ -405,18 +422,18 @@ def should_succeed_json(self, args, additional_env: Optional[dict] = None): """ return json.loads(self.should_succeed(args, additional_env=additional_env)) - def should_fail(self, args, expected_pattern, additional_env: Optional[dict] = None): + def should_fail(self, args, expected_pattern, additional_env: Optional[dict] = None) -> None: """ Runs the command-line with the given args, expecting the given pattern to appear in stderr. """ - status, stdout, stderr = run_command(self.command, args, additional_env) - assert status != 0, 'ERROR: should have failed' + result = self.run(args, additional_env) + assert result.failure, 'ERROR: should have failed' - assert re.search(expected_pattern, stdout + stderr), \ - f'did not match pattern="{expected_pattern}", stdout="{stdout}", stderr="{stderr}"' + assert re.search(expected_pattern, result.output), \ + f'did not match pattern="{expected_pattern}", stdout="{result.stdout}", stderr="{result.stderr}"' - def reauthorize(self, check_key_capabilities=False): + def reauthorize(self, check_key_capabilities=False) -> None: """Clear and authorize again to the account.""" self.should_succeed(['clear-account']) self.should_succeed( diff --git a/test/integration/test_b2_command_line.py b/test/integration/test_b2_command_line.py index c3ef169d0..ad612437d 100644 --- a/test/integration/test_b2_command_line.py +++ b/test/integration/test_b2_command_line.py @@ -18,7 +18,7 @@ import re from pathlib import Path -from typing import Optional, Tuple +from typing import Optional import pytest @@ -26,11 +26,7 @@ from b2.console_tool import current_time_millis -from .helpers import BUCKET_CREATED_AT_MILLIS, ONE_DAY_MILLIS, ONE_HOUR_MILLIS, SSE_B2_AES, SSE_C_AES, SSE_C_AES_2, SSE_NONE, TempDir, file_mod_time_millis, random_hex, read_file, set_file_mod_time_millis, should_equal, write_file - - -def get_bucketinfo() -> Tuple[str, str]: - return '--bucketInfo', json.dumps({BUCKET_CREATED_AT_MILLIS: str(current_time_millis())}), +from .helpers import ONE_DAY_MILLIS, ONE_HOUR_MILLIS, SSE_B2_AES, SSE_C_AES, SSE_C_AES_2, SSE_NONE, TempDir, file_mod_time_millis, generate_bucket_name, random_hex, read_file, set_file_mod_time_millis, should_equal, write_file def test_download(b2_tool, bucket_name): @@ -160,25 +156,20 @@ def test_basic(b2_tool, bucket_name): file_to_upload, ), ) # \r? is for Windows, as $ doesn't match \r\n - to_be_removed_bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_succeed( - [ - 'create-bucket', - to_be_removed_bucket_name, - 'allPublic', - *get_bucketinfo(), - ], - ) - b2_tool.should_succeed(['delete-bucket', to_be_removed_bucket_name],) - b2_tool.should_fail( - ['delete-bucket', to_be_removed_bucket_name], - re.compile(r'^ERROR: Bucket with id=\w* not found\s*$') - ) + + to_be_removed_bucket_name = generate_bucket_name() + assert b2_tool.create_bucket(to_be_removed_bucket_name, 'allPublic').success + assert b2_tool.delete_bucket(to_be_removed_bucket_name).success + + result = b2_tool.delete_bucket(to_be_removed_bucket_name) + assert result.failure + assert re.search(r'^ERROR: Bucket with id=\w* not found\s*$', result.output) + # Check logging settings - b2_tool.should_fail( - ['delete-bucket', to_be_removed_bucket_name, '--debugLogs'], - re.compile(r'^ERROR: Bucket with id=\w* not found\s*$') - ) + result = b2_tool.delete_bucket(to_be_removed_bucket_name, '--debugLogs') + assert result.failure + assert re.search(r'^ERROR: Bucket with id=\w* not found\s*$', result.output) + stack_trace_in_log = r'Traceback \(most recent call last\):.*Bucket with id=\w* not found' # the two regexes below depend on log message from urllib3, which is not perfect, but this test needs to @@ -198,12 +189,14 @@ def test_basic(b2_tool, bucket_name): assert re.search(log_file_regex, log), log os.remove('b2_cli.log') - b2_tool.should_fail(['delete-bucket', to_be_removed_bucket_name, '--verbose'], stderr_regex) + result = b2_tool.delete_bucket(to_be_removed_bucket_name, '--verbose') + assert result.failure + assert re.search(stderr_regex, result.output) assert not os.path.exists('b2_cli.log') - b2_tool.should_fail( - ['delete-bucket', to_be_removed_bucket_name, '--verbose', '--debugLogs'], stderr_regex - ) + result = b2_tool.delete_bucket(to_be_removed_bucket_name, '--verbose', '--debugLogs') + assert result.failure + assert re.search(stderr_regex, result.output) with open('b2_cli.log', 'r') as logfile: log = logfile.read() assert re.search(log_file_regex, log), log @@ -216,7 +209,7 @@ def test_bucket(b2_tool, bucket_name): "fileNamePrefix": "" }]""" output = b2_tool.should_succeed_json( - ['update-bucket', '--lifecycleRules', rules, bucket_name, 'allPublic', *get_bucketinfo()], + ['update-bucket', '--lifecycleRules', rules, bucket_name, 'allPublic'], ) assert output["lifecycleRules"] == [ { @@ -227,10 +220,9 @@ def test_bucket(b2_tool, bucket_name): ] -def test_key_restrictions(b2_api, b2_tool, bucket_name): - - second_bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_succeed(['create-bucket', second_bucket_name, 'allPublic', *get_bucketinfo()],) +def test_key_restrictions(b2_tool, create_test_bucket): + first_bucket_name = create_test_bucket() + second_bucket_name = create_test_bucket() key_one_name = 'clt-testKey-01' + random_hex(6) created_key_stdout = b2_tool.should_succeed( @@ -246,7 +238,7 @@ def test_key_restrictions(b2_api, b2_tool, bucket_name): ['authorize-account', '--environment', b2_tool.realm, key_one_id, key_one], ) - b2_tool.should_succeed(['get-bucket', bucket_name],) + b2_tool.should_succeed(['get-bucket', first_bucket_name],) b2_tool.should_succeed(['get-bucket', second_bucket_name],) key_two_name = 'clt-testKey-02' + random_hex(6) @@ -254,7 +246,7 @@ def test_key_restrictions(b2_api, b2_tool, bucket_name): [ 'create-key', '--bucket', - bucket_name, + first_bucket_name, key_two_name, 'listFiles,listBuckets,readFiles', ] @@ -264,13 +256,13 @@ def test_key_restrictions(b2_api, b2_tool, bucket_name): b2_tool.should_succeed( ['authorize-account', '--environment', b2_tool.realm, key_two_id, key_two], ) - b2_tool.should_succeed(['get-bucket', bucket_name],) - b2_tool.should_succeed(['ls', bucket_name],) + b2_tool.should_succeed(['get-bucket', first_bucket_name],) + b2_tool.should_succeed(['ls', first_bucket_name],) - failed_bucket_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name + failed_bucket_err = r'ERROR: Application key is restricted to bucket: ' + first_bucket_name b2_tool.should_fail(['get-bucket', second_bucket_name], failed_bucket_err) - failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name + failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + first_bucket_name b2_tool.should_fail(['ls', second_bucket_name], failed_list_files_err) # reauthorize with more capabilities for clean up @@ -280,19 +272,20 @@ def test_key_restrictions(b2_api, b2_tool, bucket_name): b2_tool.application_key ] ) - b2_api.clean_bucket(second_bucket_name) b2_tool.should_succeed(['delete-key', key_one_id]) b2_tool.should_succeed(['delete-key', key_two_id]) -def test_account(b2_tool, bucket_name): +def test_account(b2_tool, create_test_bucket): # actually a high level operations test - we run bucket tests here since this test doesn't use it - b2_tool.should_succeed(['delete-bucket', bucket_name]) - new_bucket_name = b2_tool.generate_bucket_name() + bucket_name = create_test_bucket() + assert b2_tool.delete_bucket(bucket_name).success + new_bucket_name = generate_bucket_name() + # apparently server behaves erratically when we delete a bucket and recreate it right away - b2_tool.should_succeed(['create-bucket', new_bucket_name, 'allPrivate', *get_bucketinfo()]) + assert b2_tool.create_bucket(new_bucket_name, 'allPrivate').success b2_tool.should_succeed(['update-bucket', new_bucket_name, 'allPublic']) - b2_tool.should_succeed(['delete-bucket', new_bucket_name]) + assert b2_tool.delete_bucket(new_bucket_name).success with b2_tool.env_var_test_context: b2_tool.should_succeed(['clear-account']) @@ -316,13 +309,15 @@ def test_account(b2_tool, bucket_name): # first, let's make sure "create-bucket" doesn't work without auth data - i.e. that the sqlite file hs been # successfully removed - bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_fail( - ['create-bucket', bucket_name, 'allPrivate'], + bucket_name = generate_bucket_name() + result = b2_tool.create_bucket(bucket_name, 'allPrivate') + assert result.failure + assert re.search( r'ERROR: Missing account data: \'NoneType\' object is not subscriptable (\(key 0\) )? ' r'Use: b2(\.exe)? authorize-account or provide auth data with "B2_APPLICATION_KEY_ID" and ' - r'"B2_APPLICATION_KEY" environment variables' + r'"B2_APPLICATION_KEY" environment variables', result.output ) + os.remove(new_creds) # then, let's see that auth data from env vars works @@ -330,9 +325,9 @@ def test_account(b2_tool, bucket_name): os.environ['B2_APPLICATION_KEY_ID'] = os.environ['B2_TEST_APPLICATION_KEY_ID'] os.environ['B2_ENVIRONMENT'] = b2_tool.realm - bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_succeed(['create-bucket', bucket_name, 'allPrivate', *get_bucketinfo()]) - b2_tool.should_succeed(['delete-bucket', bucket_name]) + bucket_name = generate_bucket_name() + assert b2_tool.create_bucket(bucket_name, 'allPrivate').success + assert b2_tool.delete_bucket(bucket_name).success assert os.path.exists(new_creds), 'sqlite file not created' os.environ.pop('B2_APPLICATION_KEY') @@ -340,16 +335,22 @@ def test_account(b2_tool, bucket_name): # last, let's see that providing only one of the env vars results in a failure os.environ['B2_APPLICATION_KEY'] = os.environ['B2_TEST_APPLICATION_KEY'] - b2_tool.should_fail( - ['create-bucket', bucket_name, 'allPrivate'], - r'Please provide both "B2_APPLICATION_KEY" and "B2_APPLICATION_KEY_ID" environment variables or none of them' + result = b2_tool.create_bucket(bucket_name, 'allPrivate') + assert result.failure + assert re.search( + 'Please provide both "B2_APPLICATION_KEY" and "B2_APPLICATION_KEY_ID" environment variables or none of them', + result.output ) + os.environ.pop('B2_APPLICATION_KEY') os.environ['B2_APPLICATION_KEY_ID'] = os.environ['B2_TEST_APPLICATION_KEY_ID'] - b2_tool.should_fail( - ['create-bucket', bucket_name, 'allPrivate'], - r'Please provide both "B2_APPLICATION_KEY" and "B2_APPLICATION_KEY_ID" environment variables or none of them' + + result = b2_tool.create_bucket(bucket_name, 'allPrivate') + assert result.failure + assert re.search( + 'Please provide both "B2_APPLICATION_KEY" and "B2_APPLICATION_KEY_ID" environment variables or none of them', + result.output ) os.environ.pop('B2_APPLICATION_KEY_ID') @@ -415,23 +416,15 @@ def encryption_summary(sse_dict, file_info): return encryption -def test_sync_up(b2_tool, bucket_name): - sync_up_helper(b2_tool, bucket_name, 'sync') - - -def test_sync_up_sse_b2(b2_tool, bucket_name): - sync_up_helper(b2_tool, bucket_name, 'sync', encryption=SSE_B2_AES) - - -def test_sync_up_sse_c(b2_tool, bucket_name): - sync_up_helper(b2_tool, bucket_name, 'sync', encryption=SSE_C_AES) - - -def test_sync_up_no_prefix(b2_tool, bucket_name): - sync_up_helper(b2_tool, bucket_name, '') - - -def sync_up_helper(b2_tool, bucket_name, dir_, encryption=None): +@pytest.mark.parametrize( + "dir_,encryption", [ + ('sync', None), + ('sync', SSE_B2_AES), + ('sync', SSE_C_AES), + ('', None), + ] +) +def test_sync_up(b2_tool, bucket_name, dir_, encryption): sync_point_parts = [bucket_name] if dir_: sync_point_parts.append(dir_) @@ -670,19 +663,14 @@ def sync_up_helper(b2_tool, bucket_name, dir_, encryption=None): ) -def test_sync_down(b2_tool, bucket_name): - sync_down_helper(b2_tool, bucket_name, 'sync') - - -def test_sync_down_no_prefix(b2_tool, bucket_name): - sync_down_helper(b2_tool, bucket_name, '') - - -def test_sync_down_sse_c_no_prefix(b2_tool, bucket_name): - sync_down_helper(b2_tool, bucket_name, '', SSE_C_AES) - - -def sync_down_helper(b2_tool, bucket_name, folder_in_bucket, encryption=None): +@pytest.mark.parametrize( + 'folder_in_bucket,encryption', [ + ('sync', None), + ('', None), + ('', SSE_C_AES), + ] +) +def test_sync_down(b2_tool, bucket_name, folder_in_bucket, encryption): file_to_upload = 'README.md' @@ -772,50 +760,6 @@ def sync_down_helper(b2_tool, bucket_name, folder_in_bucket, encryption=None): ) -def test_sync_copy(b2_api, b2_tool, bucket_name): - prepare_and_run_sync_copy_tests(b2_api, b2_tool, bucket_name, 'sync') - - -def test_sync_copy_no_prefix_default_encryption(b2_api, b2_tool, bucket_name): - prepare_and_run_sync_copy_tests( - b2_api, b2_tool, bucket_name, '', destination_encryption=None, expected_encryption=SSE_NONE - ) - - -def test_sync_copy_no_prefix_no_encryption(b2_api, b2_tool, bucket_name): - prepare_and_run_sync_copy_tests( - b2_api, - b2_tool, - bucket_name, - '', - destination_encryption=SSE_NONE, - expected_encryption=SSE_NONE - ) - - -def test_sync_copy_no_prefix_sse_b2(b2_api, b2_tool, bucket_name): - prepare_and_run_sync_copy_tests( - b2_api, - b2_tool, - bucket_name, - '', - destination_encryption=SSE_B2_AES, - expected_encryption=SSE_B2_AES, - ) - - -def test_sync_copy_no_prefix_sse_c(b2_api, b2_tool, bucket_name): - prepare_and_run_sync_copy_tests( - b2_api, - b2_tool, - bucket_name, - '', - destination_encryption=SSE_C_AES, - expected_encryption=SSE_C_AES, - source_encryption=SSE_C_AES_2, - ) - - def test_sync_copy_sse_c_single_bucket(b2_tool, bucket_name): run_sync_copy_with_basic_checks( b2_tool=b2_tool, @@ -847,15 +791,26 @@ def test_sync_copy_sse_c_single_bucket(b2_tool, bucket_name): ) -def prepare_and_run_sync_copy_tests( - b2_api, +@pytest.mark.parametrize( + "folder_in_bucket,destination_encryption,expected_encryption,source_encryption", [ + ('sync', None, SSE_NONE, None), + ('', None, SSE_NONE, None), + ('', SSE_NONE, SSE_NONE, None), + ('', SSE_B2_AES, SSE_B2_AES, None), + ('', SSE_C_AES, SSE_C_AES, SSE_C_AES_2), + ] +) +def test_sync_copy( b2_tool, - bucket_name, + create_test_bucket, folder_in_bucket, - destination_encryption=None, - expected_encryption=SSE_NONE, - source_encryption=None, + destination_encryption, + expected_encryption, + source_encryption, ): + bucket_name = create_test_bucket() + other_bucket_name = create_test_bucket() + b2_sync_point = 'b2:%s' % bucket_name if folder_in_bucket: b2_sync_point += '/' + folder_in_bucket @@ -863,11 +818,6 @@ def prepare_and_run_sync_copy_tests( else: b2_file_prefix = '' - other_bucket_name = b2_tool.generate_bucket_name() - success, _ = b2_tool.run_command( - ['create-bucket', other_bucket_name, 'allPublic', *get_bucketinfo()] - ) - other_b2_sync_point = 'b2:%s' % other_bucket_name if folder_in_bucket: other_b2_sync_point += '/' + folder_in_bucket @@ -901,8 +851,6 @@ def prepare_and_run_sync_copy_tests( file_version_summary_with_encryption(file_versions), ) - b2_api.clean_bucket(other_bucket_name) - def run_sync_copy_with_basic_checks( b2_tool, @@ -1020,7 +968,7 @@ def test_sync_long_path(b2_tool, bucket_name): should_equal(['+ ' + long_path], file_version_summary(file_versions)) -def test_default_sse_b2(b2_api, b2_tool, bucket_name): +def test_default_sse_b2(b2_tool, bucket_name): # Set default encryption via update-bucket bucket_info = b2_tool.should_succeed_json(['get-bucket', bucket_name]) bucket_default_sse = {'mode': 'none'} @@ -1043,23 +991,16 @@ def test_default_sse_b2(b2_api, b2_tool, bucket_name): should_equal(bucket_default_sse, bucket_info['defaultServerSideEncryption']) # Set default encryption via create-bucket - second_bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_succeed( - [ - 'create-bucket', - '--defaultServerSideEncryption=SSE-B2', - second_bucket_name, - 'allPublic', - *get_bucketinfo(), - ] - ) + second_bucket_name = generate_bucket_name() + assert b2_tool.create_bucket( + second_bucket_name, '--defaultServerSideEncryption=SSE-B2', 'allPublic' + ).success second_bucket_info = b2_tool.should_succeed_json(['get-bucket', second_bucket_name]) second_bucket_default_sse = { 'algorithm': 'AES256', 'mode': 'SSE-B2', } should_equal(second_bucket_default_sse, second_bucket_info['defaultServerSideEncryption']) - b2_api.clean_bucket(second_bucket_name) def test_sse_b2(b2_tool, bucket_name): @@ -1428,16 +1369,8 @@ def test_sse_c(b2_tool, bucket_name): ) -def test_file_lock(b2_tool, application_key_id, application_key, b2_api): - lock_disabled_bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_succeed( - [ - 'create-bucket', - lock_disabled_bucket_name, - 'allPrivate', - *get_bucketinfo(), - ], - ) +def test_file_lock(b2_tool, create_test_bucket): + lock_disabled_bucket_name = create_test_bucket("allPrivate") file_to_upload = 'README.md' now_millis = current_time_millis() @@ -1482,16 +1415,11 @@ def test_file_lock(b2_tool, application_key_id, application_key, b2_api): 'compliance', '--defaultRetentionPeriod', '7 days' ], r'ERROR: The bucket is not file lock enabled \(bucket_missing_file_lock\)' ) - lock_enabled_bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_succeed( - [ - 'create-bucket', - lock_enabled_bucket_name, - 'allPrivate', - '--fileLockEnabled', - *get_bucketinfo(), - ], - ) + lock_enabled_bucket_name = generate_bucket_name() + assert b2_tool.create_bucket( + lock_enabled_bucket_name, 'allPrivate', '--fileLockEnabled' + ).success + updated_bucket = b2_tool.should_succeed_json( [ 'update-bucket', @@ -1699,18 +1627,6 @@ def test_file_lock(b2_tool, application_key_id, application_key, b2_api): not_lockable_file['fileId'] ) - # ---- perform test cleanup ---- - b2_tool.should_succeed( - ['authorize-account', '--environment', b2_tool.realm, application_key_id, application_key], - ) - # b2_tool.reauthorize(check_key_capabilities=False) - buckets = [ - bucket for bucket in b2_api.api.list_buckets() - if bucket.name in {lock_enabled_bucket_name, lock_disabled_bucket_name} - ] - for bucket in buckets: - b2_api.clean_bucket(bucket) - def file_lock_without_perms_test( b2_tool, lock_enabled_bucket_name, lock_disabled_bucket_name, lockable_file_id, @@ -1905,7 +1821,7 @@ def test_profile_switch(b2_tool): os.environ[B2_ACCOUNT_INFO_ENV_VAR] = B2_ACCOUNT_INFO -def test_replication_basic(b2_api, b2_tool, bucket_name): +def test_replication_basic(b2_tool, bucket_name): key_one_name = 'clt-testKey-01' + random_hex(6) created_key_stdout = b2_tool.should_succeed( [ @@ -1988,17 +1904,13 @@ def test_replication_basic(b2_api, b2_tool, bucket_name): source_replication_configuration_json = json.dumps(source_replication_configuration) # create a source bucket and set up replication to destination bucket - source_bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_succeed( - [ - 'create-bucket', - source_bucket_name, - 'allPublic', - '--replication', - source_replication_configuration_json, - *get_bucketinfo(), - ] - ) + source_bucket_name = generate_bucket_name() + assert b2_tool.create_bucket( + source_bucket_name, + 'allPublic', + '--replication', + source_replication_configuration_json, + ).success source_bucket = b2_tool.should_succeed_json(['get-bucket', source_bucket_name]) # test that all replication rules are present in source bucket @@ -2054,21 +1966,12 @@ def test_replication_basic(b2_api, b2_tool, bucket_name): b2_tool.should_succeed(['delete-key', key_one_id]) b2_tool.should_succeed(['delete-key', key_two_id]) - b2_api.clean_bucket(source_bucket_name) -def test_replication_setup(b2_api, b2_tool, bucket_name): - source_bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_succeed( - [ - 'create-bucket', - source_bucket_name, - 'allPublic', - '--fileLockEnabled', - *get_bucketinfo(), - ] - ) - destination_bucket_name = bucket_name +def test_replication_setup(b2_tool, create_test_bucket): + destination_bucket_name = create_test_bucket() + source_bucket_name = create_test_bucket(is_file_lock_enabled=True) + b2_tool.should_succeed(['replication-setup', source_bucket_name, destination_bucket_name]) destination_bucket_old = b2_tool.should_succeed_json(['get-bucket', destination_bucket_name]) @@ -2110,13 +2013,12 @@ def test_replication_setup(b2_api, b2_tool, bucket_name): 'sourceToDestinationKeyMapping'].items(): b2_tool.should_succeed(['delete-key', key_one_id]) b2_tool.should_succeed(['delete-key', key_two_id]) - b2_api.clean_bucket(source_bucket_name) assert destination_bucket_old['replication']['asReplicationDestination'][ 'sourceToDestinationKeyMapping'] == destination_bucket['replication'][ 'asReplicationDestination']['sourceToDestinationKeyMapping'] -def test_replication_monitoring(b2_tool, bucket_name, b2_api): +def test_replication_monitoring(b2_tool, bucket_name): # ---------------- set up keys ---------------- key_one_name = 'clt-testKey-01' + random_hex(6) @@ -2195,18 +2097,14 @@ def test_replication_monitoring(b2_tool, bucket_name, b2_api): source_replication_configuration_json = json.dumps(source_replication_configuration) # create a source bucket and set up replication to destination bucket - source_bucket_name = b2_tool.generate_bucket_name() - b2_tool.should_succeed( - [ - 'create-bucket', - source_bucket_name, - 'allPublic', - '--fileLockEnabled', - '--replication', - source_replication_configuration_json, - *get_bucketinfo(), - ] - ) + source_bucket_name = generate_bucket_name() + assert b2_tool.create_bucket( + source_bucket_name, + 'allPublic', + '--fileLockEnabled', + '--replication', + source_replication_configuration_json, + ).success # make test data uploaded_a = b2_tool.should_succeed_json( @@ -2347,8 +2245,6 @@ def test_replication_monitoring(b2_tool, bucket_name, b2_api): } for first, second in itertools.product(['FAILED', 'PENDING'], ['FAILED', 'PENDING']) ] - b2_api.clean_bucket(source_bucket_name) - def test_enable_file_lock_first_retention_second(b2_tool, b2_api, bucket_name): # enable file lock only @@ -2365,8 +2261,6 @@ def test_enable_file_lock_first_retention_second(b2_tool, b2_api, bucket_name): # attempt to re-enable should be a noop b2_tool.should_succeed(['update-bucket', bucket_name, '--fileLockEnabled']) - b2_api.clean_bucket(bucket_name) - def test_enable_file_lock_and_set_retention_at_once(b2_tool, b2_api, bucket_name): # attempt setting retention without file lock enabled @@ -2388,8 +2282,6 @@ def test_enable_file_lock_and_set_retention_at_once(b2_tool, b2_api, bucket_name # attempt to re-enable should be a noop b2_tool.should_succeed(['update-bucket', bucket_name, '--fileLockEnabled']) - b2_api.clean_bucket(bucket_name) - def _assert_file_lock_configuration( b2_tool, From 3019a63ea237dbb2abc211bf7e1ea1d81ca23e82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Le=C5=9Bniewski?= Date: Wed, 21 Sep 2022 08:51:29 +0200 Subject: [PATCH 2/7] Unify error messages printed in failed tests --- test/integration/helpers.py | 18 +++++----- test/integration/test_b2_command_line.py | 46 ++++++++++++------------ 2 files changed, 30 insertions(+), 34 deletions(-) diff --git a/test/integration/helpers.py b/test/integration/helpers.py index 68820aeba..0a80c541b 100644 --- a/test/integration/helpers.py +++ b/test/integration/helpers.py @@ -237,13 +237,11 @@ class CommandResult: stdout: str stderr: str - @property - def success(self): - return self.status == 0 + def should_succeed(self): + assert self.status == 0, f'FAILED with status {self.status}, stderr={self.stderr}' - @property - def failure(self): - return not self.success + def should_fail(self): + assert self.status != 0, 'ERROR: should have failed' @property def output(self): @@ -376,7 +374,7 @@ def create_bucket( ) -> CommandResult: args = ['create-bucket', bucket_name] + [str(arg) for arg in args] result = run_command(self.command, args, additional_env) - if result.success: + if result.status == 0: self.buckets.add(bucket_name) return result @@ -385,7 +383,7 @@ def delete_bucket( ) -> CommandResult: args = ['delete-bucket', bucket_name] + [str(arg) for arg in args] result = run_command(self.command, args, additional_env) - if result.success: + if result.status == 0: self.buckets.discard(bucket_name) return result @@ -401,7 +399,7 @@ def should_succeed( as as string. """ result = self.run(args, additional_env) - assert result.success, f'FAILED with status {result.status}, stderr={result.stderr}' + result.should_succeed() if result.stderr: for line in (s.strip() for s in result.stderr.split(os.linesep)): @@ -428,7 +426,7 @@ def should_fail(self, args, expected_pattern, additional_env: Optional[dict] = N to appear in stderr. """ result = self.run(args, additional_env) - assert result.failure, 'ERROR: should have failed' + result.should_fail() assert re.search(expected_pattern, result.output), \ f'did not match pattern="{expected_pattern}", stdout="{result.stdout}", stderr="{result.stderr}"' diff --git a/test/integration/test_b2_command_line.py b/test/integration/test_b2_command_line.py index ad612437d..dffcdbcf4 100644 --- a/test/integration/test_b2_command_line.py +++ b/test/integration/test_b2_command_line.py @@ -158,16 +158,16 @@ def test_basic(b2_tool, bucket_name): ) # \r? is for Windows, as $ doesn't match \r\n to_be_removed_bucket_name = generate_bucket_name() - assert b2_tool.create_bucket(to_be_removed_bucket_name, 'allPublic').success - assert b2_tool.delete_bucket(to_be_removed_bucket_name).success + b2_tool.create_bucket(to_be_removed_bucket_name, 'allPublic').should_succeed() + b2_tool.delete_bucket(to_be_removed_bucket_name).should_succeed() result = b2_tool.delete_bucket(to_be_removed_bucket_name) - assert result.failure + result.should_fail() assert re.search(r'^ERROR: Bucket with id=\w* not found\s*$', result.output) # Check logging settings result = b2_tool.delete_bucket(to_be_removed_bucket_name, '--debugLogs') - assert result.failure + result.should_fail() assert re.search(r'^ERROR: Bucket with id=\w* not found\s*$', result.output) stack_trace_in_log = r'Traceback \(most recent call last\):.*Bucket with id=\w* not found' @@ -190,12 +190,12 @@ def test_basic(b2_tool, bucket_name): os.remove('b2_cli.log') result = b2_tool.delete_bucket(to_be_removed_bucket_name, '--verbose') - assert result.failure + result.should_fail() assert re.search(stderr_regex, result.output) assert not os.path.exists('b2_cli.log') result = b2_tool.delete_bucket(to_be_removed_bucket_name, '--verbose', '--debugLogs') - assert result.failure + result.should_fail() assert re.search(stderr_regex, result.output) with open('b2_cli.log', 'r') as logfile: log = logfile.read() @@ -279,13 +279,13 @@ def test_key_restrictions(b2_tool, create_test_bucket): def test_account(b2_tool, create_test_bucket): # actually a high level operations test - we run bucket tests here since this test doesn't use it bucket_name = create_test_bucket() - assert b2_tool.delete_bucket(bucket_name).success + b2_tool.delete_bucket(bucket_name).should_succeed() new_bucket_name = generate_bucket_name() # apparently server behaves erratically when we delete a bucket and recreate it right away - assert b2_tool.create_bucket(new_bucket_name, 'allPrivate').success + b2_tool.create_bucket(new_bucket_name, 'allPrivate').should_succeed() b2_tool.should_succeed(['update-bucket', new_bucket_name, 'allPublic']) - assert b2_tool.delete_bucket(new_bucket_name).success + b2_tool.delete_bucket(new_bucket_name).should_succeed() with b2_tool.env_var_test_context: b2_tool.should_succeed(['clear-account']) @@ -311,7 +311,7 @@ def test_account(b2_tool, create_test_bucket): # successfully removed bucket_name = generate_bucket_name() result = b2_tool.create_bucket(bucket_name, 'allPrivate') - assert result.failure + result.should_fail() assert re.search( r'ERROR: Missing account data: \'NoneType\' object is not subscriptable (\(key 0\) )? ' r'Use: b2(\.exe)? authorize-account or provide auth data with "B2_APPLICATION_KEY_ID" and ' @@ -326,8 +326,8 @@ def test_account(b2_tool, create_test_bucket): os.environ['B2_ENVIRONMENT'] = b2_tool.realm bucket_name = generate_bucket_name() - assert b2_tool.create_bucket(bucket_name, 'allPrivate').success - assert b2_tool.delete_bucket(bucket_name).success + b2_tool.create_bucket(bucket_name, 'allPrivate').should_succeed() + b2_tool.delete_bucket(bucket_name).should_succeed() assert os.path.exists(new_creds), 'sqlite file not created' os.environ.pop('B2_APPLICATION_KEY') @@ -336,7 +336,7 @@ def test_account(b2_tool, create_test_bucket): # last, let's see that providing only one of the env vars results in a failure os.environ['B2_APPLICATION_KEY'] = os.environ['B2_TEST_APPLICATION_KEY'] result = b2_tool.create_bucket(bucket_name, 'allPrivate') - assert result.failure + result.should_fail() assert re.search( 'Please provide both "B2_APPLICATION_KEY" and "B2_APPLICATION_KEY_ID" environment variables or none of them', result.output @@ -347,7 +347,7 @@ def test_account(b2_tool, create_test_bucket): os.environ['B2_APPLICATION_KEY_ID'] = os.environ['B2_TEST_APPLICATION_KEY_ID'] result = b2_tool.create_bucket(bucket_name, 'allPrivate') - assert result.failure + result.should_fail() assert re.search( 'Please provide both "B2_APPLICATION_KEY" and "B2_APPLICATION_KEY_ID" environment variables or none of them', result.output @@ -992,9 +992,8 @@ def test_default_sse_b2(b2_tool, bucket_name): # Set default encryption via create-bucket second_bucket_name = generate_bucket_name() - assert b2_tool.create_bucket( - second_bucket_name, '--defaultServerSideEncryption=SSE-B2', 'allPublic' - ).success + b2_tool.create_bucket(second_bucket_name, '--defaultServerSideEncryption=SSE-B2', + 'allPublic').should_succeed() second_bucket_info = b2_tool.should_succeed_json(['get-bucket', second_bucket_name]) second_bucket_default_sse = { 'algorithm': 'AES256', @@ -1416,9 +1415,8 @@ def test_file_lock(b2_tool, create_test_bucket): ], r'ERROR: The bucket is not file lock enabled \(bucket_missing_file_lock\)' ) lock_enabled_bucket_name = generate_bucket_name() - assert b2_tool.create_bucket( - lock_enabled_bucket_name, 'allPrivate', '--fileLockEnabled' - ).success + b2_tool.create_bucket(lock_enabled_bucket_name, 'allPrivate', + '--fileLockEnabled').should_succeed() updated_bucket = b2_tool.should_succeed_json( [ @@ -1905,12 +1903,12 @@ def test_replication_basic(b2_tool, bucket_name): # create a source bucket and set up replication to destination bucket source_bucket_name = generate_bucket_name() - assert b2_tool.create_bucket( + b2_tool.create_bucket( source_bucket_name, 'allPublic', '--replication', source_replication_configuration_json, - ).success + ).should_succeed() source_bucket = b2_tool.should_succeed_json(['get-bucket', source_bucket_name]) # test that all replication rules are present in source bucket @@ -2098,13 +2096,13 @@ def test_replication_monitoring(b2_tool, bucket_name): # create a source bucket and set up replication to destination bucket source_bucket_name = generate_bucket_name() - assert b2_tool.create_bucket( + b2_tool.create_bucket( source_bucket_name, 'allPublic', '--fileLockEnabled', '--replication', source_replication_configuration_json, - ).success + ).should_succeed() # make test data uploaded_a = b2_tool.should_succeed_json( From aa945e466a088b70fcd1d117b83f1cd4b492c976 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Le=C5=9Bniewski?= Date: Sun, 11 Sep 2022 14:33:01 +0200 Subject: [PATCH 3/7] Allow only one CI workflow at a time --- .github/workflows/ci.yml | 2 ++ CHANGELOG.md | 1 + 2 files changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a7ad11aad..d4d580b47 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,6 +9,8 @@ on: env: PYTHON_DEFAULT_VERSION: "3.10" +concurrency: continuous-integration + jobs: lint: runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index a1dfe421c..8762ea785 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * GitHub CI got checkout action updated to v3 and setup-python to v4 * Ensured that changelog validation only happens on pull requests * Fix bucket leaks in integration tests +* Allow only one CI workflow at a time ## [3.6.0] - 2022-09-20 From 8f4f431f35a250b359bacd753d959d8b4e0f24fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Le=C5=9Bniewski?= Date: Sun, 11 Sep 2022 14:34:14 +0200 Subject: [PATCH 4/7] Re-enable pytest-xdist for integration tests --- CHANGELOG.md | 1 + noxfile.py | 3 +-- test/integration/helpers.py | 8 +++++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8762ea785..65ca7944b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 * Ensured that changelog validation only happens on pull requests * Fix bucket leaks in integration tests * Allow only one CI workflow at a time +* Re-enable pytest-xdist for integration tests ## [3.6.0] - 2022-09-20 diff --git a/noxfile.py b/noxfile.py index 25cf8de10..ea54753c0 100644 --- a/noxfile.py +++ b/noxfile.py @@ -180,9 +180,8 @@ def integration(session): """Run integration tests.""" install_myself(session) session.run('pip', 'install', *REQUIREMENTS_TEST) - #session.run('pytest', '-s', '-x', '-v', '-n', '4', *session.posargs, 'test/integration') session.run( - 'pytest', '-s', '-x', '-v', '-W', 'ignore::DeprecationWarning:rst2ansi.visitor:', + 'pytest', '-s', '-x', '-v', '-n', '4', '-W', 'ignore::DeprecationWarning:rst2ansi.visitor:', *session.posargs, 'test/integration' ) diff --git a/test/integration/helpers.py b/test/integration/helpers.py index 0a80c541b..77cc3bb02 100644 --- a/test/integration/helpers.py +++ b/test/integration/helpers.py @@ -231,7 +231,8 @@ def read_from(self, f): self.string = str(e) -@dataclasses.dataclass +# TODO: add slots=True when dropping support for Python 3.9 +@dataclasses.dataclass(frozen=True) class CommandResult: status: int stdout: str @@ -362,9 +363,10 @@ def __init__(self, command, account_id, application_key, realm): def run(self, args, additional_env: Optional[dict] = None) -> CommandResult: if args: - if args[0] == 'create-bucket': + subcommand = args[0].replace('_', '-') + if subcommand == 'create-bucket': raise ValueError(f'use {type(self).__name__}.create_bucket instead') - elif args[0] == 'delete-bucket': + elif subcommand == 'delete-bucket': raise ValueError(f'use {type(self).__name__}.delete_bucket instead') return run_command(self.command, args, additional_env) From fba037141c68b6cca121d53255eff52142f8396e Mon Sep 17 00:00:00 2001 From: Krzysztof Kalinowski Date: Fri, 3 Feb 2023 09:58:40 +0100 Subject: [PATCH 5/7] Ensured proper random seed at start of integration tests Also limited number of parallel tests in CI --- .github/workflows/ci.yml | 1 + test/integration/helpers.py | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3cf8e84b7..5f6ef5815 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -80,6 +80,7 @@ jobs: B2_TEST_APPLICATION_KEY_ID: ${{ secrets.B2_TEST_APPLICATION_KEY_ID }} runs-on: ${{ matrix.os }} strategy: + max-parallel: 5 fail-fast: false matrix: os: ["ubuntu-latest", "macos-latest", "windows-latest"] diff --git a/test/integration/helpers.py b/test/integration/helpers.py index 77cc3bb02..e7e8f701e 100644 --- a/test/integration/helpers.py +++ b/test/integration/helpers.py @@ -21,6 +21,7 @@ import subprocess import sys import threading +import time from datetime import datetime from os import environ, linesep, path @@ -61,6 +62,12 @@ BUCKET_NAME_PREFIX = 'clitst' BUCKET_NAME_PREFIX_OLD = 'test-b2-cli-' # TODO: remove this when sure that there are no more old buckets +# RUNNER_NAME is the only variable exposed by the GitHub CI that was changing for each matrix entry. +# Example values are "GitHub Actions N" (with N being a whole number, starting from 2) and "Hosted Agent". +# Here, we're using these names as long as time as seeds to start the random number generator. +# Name fraction is used for runners inside the same matrix, time fraction is used for runners in different runs. +random.seed(environ.get('RUNNER_NAME', 'local') + '--' + str(time.time_ns())) + def generate_bucket_name(): suffix_length = BUCKET_NAME_LENGTH - len(BUCKET_NAME_PREFIX) From 2903c8e9bdc51d93fb930bd92e3aa2dbbb43424f Mon Sep 17 00:00:00 2001 From: Krzysztof Kalinowski Date: Fri, 3 Feb 2023 10:28:44 +0100 Subject: [PATCH 6/7] Linter fix --- test/integration/test_b2_command_line.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/integration/test_b2_command_line.py b/test/integration/test_b2_command_line.py index 1324d5b77..803702873 100644 --- a/test/integration/test_b2_command_line.py +++ b/test/integration/test_b2_command_line.py @@ -278,7 +278,10 @@ def test_key_restrictions(b2_tool, create_test_bucket): r'application key with capabilities ' \ r"'(.*listFiles.*|.*listBuckets.*|.*readFiles.*){3}', " \ r"restricted to bucket '%s' \(unauthorized\)" % second_bucket_name - b2_tool.should_fail(['rm', '--recursive', '--noProgress', second_bucket_name], failed_bucket_err) + b2_tool.should_fail( + ['rm', '--recursive', '--noProgress', second_bucket_name], + failed_bucket_err, + ) failed_bucket_err = r'ERROR: Application key is restricted to bucket: ' + first_bucket_name b2_tool.should_fail(['get-bucket', second_bucket_name], failed_bucket_err) From 3331c52989619eb99e72fffb1f735df37951cda2 Mon Sep 17 00:00:00 2001 From: Krzysztof Kalinowski Date: Fri, 3 Feb 2023 11:26:05 +0100 Subject: [PATCH 7/7] PR fixes - Replaced dataclass with namedtuple - Fixed key restrictions test with rm --- test/integration/helpers.py | 14 +++++--------- test/integration/test_b2_command_line.py | 6 +++--- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/test/integration/helpers.py b/test/integration/helpers.py index e7e8f701e..c398ec714 100644 --- a/test/integration/helpers.py +++ b/test/integration/helpers.py @@ -8,9 +8,7 @@ # ###################################################################### -import argparse import contextlib -import dataclasses import json import os import platform @@ -27,7 +25,7 @@ from os import environ, linesep, path from pathlib import Path from tempfile import gettempdir, mkdtemp -from typing import List, Optional, Union +from typing import List, NamedTuple, Optional, Union import backoff @@ -238,21 +236,19 @@ def read_from(self, f): self.string = str(e) -# TODO: add slots=True when dropping support for Python 3.9 -@dataclasses.dataclass(frozen=True) -class CommandResult: +class CommandResult(NamedTuple): status: int stdout: str stderr: str - def should_succeed(self): + def should_succeed(self) -> None: assert self.status == 0, f'FAILED with status {self.status}, stderr={self.stderr}' - def should_fail(self): + def should_fail(self) -> None: assert self.status != 0, 'ERROR: should have failed' @property - def output(self): + def output(self) -> str: return self.stdout + self.stderr diff --git a/test/integration/test_b2_command_line.py b/test/integration/test_b2_command_line.py index 803702873..d1e169556 100644 --- a/test/integration/test_b2_command_line.py +++ b/test/integration/test_b2_command_line.py @@ -235,7 +235,7 @@ def test_key_restrictions(b2_tool, create_test_bucket): second_bucket_name = create_test_bucket() # A single file for rm to fail on. - b2_tool.should_succeed(['upload-file', '--noProgress', second_bucket_name, 'README.md', 'test']) + b2_tool.should_succeed(['upload-file', '--noProgress', first_bucket_name, 'README.md', 'test']) key_one_name = 'clt-testKey-01' + random_hex(6) created_key_stdout = b2_tool.should_succeed( @@ -277,9 +277,9 @@ def test_key_restrictions(b2_tool, create_test_bucket): failed_bucket_err = r'Deletion of file "test" \([^\)]+\) failed: unauthorized for ' \ r'application key with capabilities ' \ r"'(.*listFiles.*|.*listBuckets.*|.*readFiles.*){3}', " \ - r"restricted to bucket '%s' \(unauthorized\)" % second_bucket_name + r"restricted to bucket '%s' \(unauthorized\)" % first_bucket_name b2_tool.should_fail( - ['rm', '--recursive', '--noProgress', second_bucket_name], + ['rm', '--recursive', '--noProgress', first_bucket_name], failed_bucket_err, )