From 4761462ef835320cefce1ed8255ce4c16236eb4a Mon Sep 17 00:00:00 2001 From: Joaquin Vanschoren Date: Mon, 24 Nov 2025 14:10:14 +0100 Subject: [PATCH 1/7] key update for new test server --- openml/config.py | 2 +- openml/testing.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/openml/config.py b/openml/config.py index 3dde45bdd..8b768376b 100644 --- a/openml/config.py +++ b/openml/config.py @@ -212,7 +212,7 @@ class ConfigurationForExamples: _last_used_key = None _start_last_called = False _test_server = "https://test.openml.org/api/v1/xml" - _test_apikey = "c0c42819af31e706efe1f4b88c23c6c1" + _test_apikey = "normaluser" @classmethod def start_using_configuration_for_example(cls) -> None: diff --git a/openml/testing.py b/openml/testing.py index 2003bb1b9..88314c4f8 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -49,7 +49,7 @@ class TestBase(unittest.TestCase): flow_name_tracker: ClassVar[list[str]] = [] test_server = "https://test.openml.org/api/v1/xml" # amueller's read/write key that he will throw away later - apikey = "610344db6388d9ba34f6db45a3cf71de" + apikey = "abc" # creating logger for tracking files uploaded to test server logger = logging.getLogger("unit_tests_published_entities") From a58c06742edade9fd99e444ae200e2810d6cf453 Mon Sep 17 00:00:00 2001 From: Joaquin Vanschoren Date: Mon, 24 Nov 2025 14:28:09 +0100 Subject: [PATCH 2/7] Update to new test server API keys --- tests/conftest.py | 4 ++-- tests/test_openml/test_config.py | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 40a801e86..877602e3f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -251,7 +251,7 @@ def test_files_directory() -> Path: @pytest.fixture(scope="session") def test_api_key() -> str: - return "c0c42819af31e706efe1f4b88c23c6c1" + return "normaluser" @pytest.fixture(autouse=True, scope="function") @@ -277,7 +277,7 @@ def with_server(request): yield return openml.config.server = "https://test.openml.org/api/v1/xml" - openml.config.apikey = "c0c42819af31e706efe1f4b88c23c6c1" + openml.config.apikey = "normaluser" yield diff --git a/tests/test_openml/test_config.py b/tests/test_openml/test_config.py index 0324545a7..5560cf677 100644 --- a/tests/test_openml/test_config.py +++ b/tests/test_openml/test_config.py @@ -76,7 +76,7 @@ def test_get_config_as_dict(self): """Checks if the current configuration is returned accurately as a dict.""" config = openml.config.get_config_as_dict() _config = {} - _config["apikey"] = "610344db6388d9ba34f6db45a3cf71de" + _config["apikey"] = "abc" _config["server"] = "https://test.openml.org/api/v1/xml" _config["cachedir"] = self.workdir _config["avoid_duplicate_runs"] = False @@ -90,7 +90,7 @@ def test_get_config_as_dict(self): def test_setup_with_config(self): """Checks if the OpenML configuration can be updated using _setup().""" _config = {} - _config["apikey"] = "610344db6388d9ba34f6db45a3cf71de" + _config["apikey"] = "abc" _config["server"] = "https://www.openml.org/api/v1/xml" _config["cachedir"] = self.workdir _config["avoid_duplicate_runs"] = True @@ -109,25 +109,25 @@ class TestConfigurationForExamples(openml.testing.TestBase): def test_switch_to_example_configuration(self): """Verifies the test configuration is loaded properly.""" # Below is the default test key which would be used anyway, but just for clarity: - openml.config.apikey = "610344db6388d9ba34f6db45a3cf71de" + openml.config.apikey = "abc" openml.config.server = self.production_server openml.config.start_using_configuration_for_example() - assert openml.config.apikey == "c0c42819af31e706efe1f4b88c23c6c1" + assert openml.config.apikey == "normaluser" assert openml.config.server == self.test_server @pytest.mark.production() def test_switch_from_example_configuration(self): """Verifies the previous configuration is loaded after stopping.""" # Below is the default test key which would be used anyway, but just for clarity: - openml.config.apikey = "610344db6388d9ba34f6db45a3cf71de" + openml.config.apikey = "abc" openml.config.server = self.production_server openml.config.start_using_configuration_for_example() openml.config.stop_using_configuration_for_example() - assert openml.config.apikey == "610344db6388d9ba34f6db45a3cf71de" + assert openml.config.apikey == "abc" assert openml.config.server == self.production_server def test_example_configuration_stop_before_start(self): @@ -145,14 +145,14 @@ def test_example_configuration_stop_before_start(self): @pytest.mark.production() def test_example_configuration_start_twice(self): """Checks that the original config can be returned to if `start..` is called twice.""" - openml.config.apikey = "610344db6388d9ba34f6db45a3cf71de" + openml.config.apikey = "abc" openml.config.server = self.production_server openml.config.start_using_configuration_for_example() openml.config.start_using_configuration_for_example() openml.config.stop_using_configuration_for_example() - assert openml.config.apikey == "610344db6388d9ba34f6db45a3cf71de" + assert openml.config.apikey == "abc" assert openml.config.server == self.production_server From 60f8b203bfb742d759585585d13d58a864a87ed7 Mon Sep 17 00:00:00 2001 From: PGijsbers Date: Thu, 11 Dec 2025 15:43:14 +0100 Subject: [PATCH 3/7] Fix further issues caused by the production server updates --- openml/testing.py | 2 ++ tests/conftest.py | 7 +++---- tests/test_datasets/test_dataset_functions.py | 6 ++++-- tests/test_flows/test_flow_functions.py | 1 - tests/test_openml/test_config.py | 17 +++++++++-------- tests/test_runs/test_run_functions.py | 8 ++++---- tests/test_setups/test_setup_functions.py | 3 +-- tests/test_tasks/test_task_functions.py | 6 +++--- tests/test_utils/test_utils.py | 6 +++--- 9 files changed, 29 insertions(+), 27 deletions(-) diff --git a/openml/testing.py b/openml/testing.py index 88314c4f8..8c906e87f 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -50,6 +50,8 @@ class TestBase(unittest.TestCase): test_server = "https://test.openml.org/api/v1/xml" # amueller's read/write key that he will throw away later apikey = "abc" + admin_key = "abc" + user_key = "normaluser" # creating logger for tracking files uploaded to test server logger = logging.getLogger("unit_tests_published_entities") diff --git a/tests/conftest.py b/tests/conftest.py index 877602e3f..1fabbb375 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -251,7 +251,7 @@ def test_files_directory() -> Path: @pytest.fixture(scope="session") def test_api_key() -> str: - return "normaluser" + return TestBase.user_key @pytest.fixture(autouse=True, scope="function") @@ -274,10 +274,11 @@ def as_robot() -> Iterator[None]: def with_server(request): if "production" in request.keywords: openml.config.server = "https://www.openml.org/api/v1/xml" + openml.config.apikey = None yield return openml.config.server = "https://test.openml.org/api/v1/xml" - openml.config.apikey = "normaluser" + openml.config.apikey = TestBase.user_key yield @@ -295,11 +296,9 @@ def with_test_cache(test_files_directory, request): if tmp_cache.exists(): shutil.rmtree(tmp_cache) - @pytest.fixture def static_cache_dir(): - return Path(__file__).parent / "files" @pytest.fixture diff --git a/tests/test_datasets/test_dataset_functions.py b/tests/test_datasets/test_dataset_functions.py index 4145b86ad..bcf34c8f4 100644 --- a/tests/test_datasets/test_dataset_functions.py +++ b/tests/test_datasets/test_dataset_functions.py @@ -586,9 +586,9 @@ def test_data_status(self): TestBase.logger.info(f"collected from {__file__.split('/')[-1]}: {dataset.id}") did = dataset.id - # admin key for test server (only adminds can activate datasets. + # admin key for test server (only admins can activate datasets. # all users can deactivate their own datasets) - openml.config.apikey = "d488d8afd93b32331cf6ea9d7003d4c3" + openml.config.apikey = TestBase.admin_key openml.datasets.status_update(did, "active") self._assert_status_of_dataset(did=did, status="active") @@ -869,6 +869,7 @@ def test_get_online_dataset_arff(self): ), "ARFF files are not equal" def test_topic_api_error(self): + openml.config.apikey = TestBase.user_key # Check server exception when non-admin accessses apis self.assertRaisesRegex( OpenMLServerException, @@ -1475,6 +1476,7 @@ def test_data_edit_cannot_edit_critical_field_if_dataset_has_task(self): ) def test_edit_data_user_cannot_edit_critical_field_of_other_users_dataset(self): + openml.config.apikey = TestBase.user_key # Check server exception when a non-owner or non-admin tries to edit critical fields self.assertRaisesRegex( OpenMLServerException, diff --git a/tests/test_flows/test_flow_functions.py b/tests/test_flows/test_flow_functions.py index ef4759e54..5acdce87d 100644 --- a/tests/test_flows/test_flow_functions.py +++ b/tests/test_flows/test_flow_functions.py @@ -69,7 +69,6 @@ def test_list_flows_output_format(self): @pytest.mark.production() def test_list_flows_empty(self): self.use_production_server() - openml.config.server = self.production_server flows = openml.flows.list_flows(tag="NoOneEverUsesThisTag123") assert flows.empty diff --git a/tests/test_openml/test_config.py b/tests/test_openml/test_config.py index 5560cf677..dba3e1702 100644 --- a/tests/test_openml/test_config.py +++ b/tests/test_openml/test_config.py @@ -14,6 +14,7 @@ import openml.config import openml.testing +from openml.testing import TestBase @contextmanager @@ -76,7 +77,7 @@ def test_get_config_as_dict(self): """Checks if the current configuration is returned accurately as a dict.""" config = openml.config.get_config_as_dict() _config = {} - _config["apikey"] = "abc" + _config["apikey"] = TestBase.apikey _config["server"] = "https://test.openml.org/api/v1/xml" _config["cachedir"] = self.workdir _config["avoid_duplicate_runs"] = False @@ -90,7 +91,7 @@ def test_get_config_as_dict(self): def test_setup_with_config(self): """Checks if the OpenML configuration can be updated using _setup().""" _config = {} - _config["apikey"] = "abc" + _config["apikey"] = TestBase.apikey _config["server"] = "https://www.openml.org/api/v1/xml" _config["cachedir"] = self.workdir _config["avoid_duplicate_runs"] = True @@ -109,25 +110,25 @@ class TestConfigurationForExamples(openml.testing.TestBase): def test_switch_to_example_configuration(self): """Verifies the test configuration is loaded properly.""" # Below is the default test key which would be used anyway, but just for clarity: - openml.config.apikey = "abc" + openml.config.apikey = TestBase.apikey openml.config.server = self.production_server openml.config.start_using_configuration_for_example() - assert openml.config.apikey == "normaluser" + assert openml.config.apikey == TestBase.user_key assert openml.config.server == self.test_server @pytest.mark.production() def test_switch_from_example_configuration(self): """Verifies the previous configuration is loaded after stopping.""" # Below is the default test key which would be used anyway, but just for clarity: - openml.config.apikey = "abc" + openml.config.apikey = TestBase.apikey openml.config.server = self.production_server openml.config.start_using_configuration_for_example() openml.config.stop_using_configuration_for_example() - assert openml.config.apikey == "abc" + assert openml.config.apikey == TestBase.apikey assert openml.config.server == self.production_server def test_example_configuration_stop_before_start(self): @@ -145,14 +146,14 @@ def test_example_configuration_stop_before_start(self): @pytest.mark.production() def test_example_configuration_start_twice(self): """Checks that the original config can be returned to if `start..` is called twice.""" - openml.config.apikey = "abc" + openml.config.apikey = TestBase.apikey openml.config.server = self.production_server openml.config.start_using_configuration_for_example() openml.config.start_using_configuration_for_example() openml.config.stop_using_configuration_for_example() - assert openml.config.apikey == "abc" + assert openml.config.apikey == TestBase.apikey assert openml.config.server == self.production_server diff --git a/tests/test_runs/test_run_functions.py b/tests/test_runs/test_run_functions.py index b02acdf51..bf43a8024 100644 --- a/tests/test_runs/test_run_functions.py +++ b/tests/test_runs/test_run_functions.py @@ -1407,9 +1407,8 @@ def test_get_run(self): assert run.fold_evaluations["f_measure"][0][i] == value assert "weka" in run.tags assert "weka_3.7.12" in run.tags - assert run.predictions_url == ( - "https://api.openml.org/data/download/1667125/" - "weka_generated_predictions4575715871712251329.arff" + assert run.predictions_url.endswith( + "/data/download/1667125/weka_generated_predictions4575715871712251329.arff" ) def _check_run(self, run): @@ -1550,7 +1549,7 @@ def test_get_runs_list_by_tag(self): # Unit test works on production server only self.use_production_server() - runs = openml.runs.list_runs(tag="curves") + runs = openml.runs.list_runs(tag="curves", size=2) assert len(runs) >= 1 @pytest.mark.sklearn() @@ -1766,6 +1765,7 @@ def test_delete_run(self): _run_id = run.run_id assert delete_run(_run_id) + @pytest.mark.skip(reason="run id is in problematic state on test server due to PR#1454") @unittest.skipIf( Version(sklearn.__version__) < Version("0.20"), reason="SimpleImputer doesn't handle mixed type DataFrame as input", diff --git a/tests/test_setups/test_setup_functions.py b/tests/test_setups/test_setup_functions.py index 6fd11638f..42af5362b 100644 --- a/tests/test_setups/test_setup_functions.py +++ b/tests/test_setups/test_setup_functions.py @@ -116,9 +116,8 @@ def test_existing_setup_exists_3(self): @pytest.mark.production() def test_get_setup(self): + self.use_production_server() # no setups in default test server - openml.config.server = "https://www.openml.org/api/v1/xml/" - # contains all special cases, 0 params, 1 param, n params. # Non scikitlearn flows. setups = [18, 19, 20, 118] diff --git a/tests/test_tasks/test_task_functions.py b/tests/test_tasks/test_task_functions.py index 856352ac2..5f1d577c0 100644 --- a/tests/test_tasks/test_task_functions.py +++ b/tests/test_tasks/test_task_functions.py @@ -55,8 +55,8 @@ def test__get_estimation_procedure_list(self): @pytest.mark.production() def test_list_clustering_task(self): + self.use_production_server() # as shown by #383, clustering tasks can give list/dict casting problems - openml.config.server = self.production_server openml.tasks.list_tasks(task_type=TaskType.CLUSTERING, size=10) # the expected outcome is that it doesn't crash. No assertions. @@ -134,9 +134,9 @@ def test__get_task(self): ) @pytest.mark.production() def test__get_task_live(self): + self.use_production_server() # Test the following task as it used to throw an Unicode Error. # https://github.com/openml/openml-python/issues/378 - openml.config.server = self.production_server openml.tasks.get_task(34536) def test_get_task(self): @@ -198,7 +198,7 @@ def test_get_task_with_cache(self): @pytest.mark.production() def test_get_task_different_types(self): - openml.config.server = self.production_server + self.use_production_server() # Regression task openml.tasks.functions.get_task(5001) # Learning curve diff --git a/tests/test_utils/test_utils.py b/tests/test_utils/test_utils.py index 3b4a34b57..35be84903 100644 --- a/tests/test_utils/test_utils.py +++ b/tests/test_utils/test_utils.py @@ -27,7 +27,7 @@ def min_number_flows_on_test_server() -> int: @pytest.fixture() def min_number_setups_on_test_server() -> int: - """After a reset at least 50 setups are on the test server""" + """After a reset at least 20 setups are on the test server""" return 50 @@ -39,8 +39,8 @@ def min_number_runs_on_test_server() -> int: @pytest.fixture() def min_number_evaluations_on_test_server() -> int: - """After a reset at least 22 evaluations are on the test server""" - return 22 + """After a reset at least 8 evaluations are on the test server""" + return 8 def _mocked_perform_api_call(call, request_method): From 6f5fc6f61451126b2659abc09ea1feb137f65b5a Mon Sep 17 00:00:00 2001 From: PGijsbers Date: Thu, 11 Dec 2025 15:57:15 +0100 Subject: [PATCH 4/7] default to normal read/write key instead of admin key --- openml/testing.py | 4 +--- tests/conftest.py | 2 +- tests/test_datasets/test_dataset_functions.py | 2 -- tests/test_openml/test_config.py | 14 +++++++------- 4 files changed, 9 insertions(+), 13 deletions(-) diff --git a/openml/testing.py b/openml/testing.py index 8c906e87f..16d7e65bf 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -48,8 +48,6 @@ class TestBase(unittest.TestCase): } flow_name_tracker: ClassVar[list[str]] = [] test_server = "https://test.openml.org/api/v1/xml" - # amueller's read/write key that he will throw away later - apikey = "abc" admin_key = "abc" user_key = "normaluser" @@ -101,7 +99,7 @@ def setUp(self, n_levels: int = 1, tmpdir_suffix: str = "") -> None: os.chdir(self.workdir) self.cached = True - openml.config.apikey = TestBase.apikey + openml.config.apikey = TestBase.user_key self.production_server = "https://www.openml.org/api/v1/xml" openml.config.set_root_cache_directory(str(self.workdir)) diff --git a/tests/conftest.py b/tests/conftest.py index 1fabbb375..bd974f3f3 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -98,7 +98,7 @@ def delete_remote_files(tracker, flow_names) -> None: :return: None """ openml.config.server = TestBase.test_server - openml.config.apikey = TestBase.apikey + openml.config.apikey = TestBase.user_key # reordering to delete sub flows at the end of flows # sub-flows have shorter names, hence, sorting by descending order of flow name length diff --git a/tests/test_datasets/test_dataset_functions.py b/tests/test_datasets/test_dataset_functions.py index bcf34c8f4..266a6f6f7 100644 --- a/tests/test_datasets/test_dataset_functions.py +++ b/tests/test_datasets/test_dataset_functions.py @@ -869,7 +869,6 @@ def test_get_online_dataset_arff(self): ), "ARFF files are not equal" def test_topic_api_error(self): - openml.config.apikey = TestBase.user_key # Check server exception when non-admin accessses apis self.assertRaisesRegex( OpenMLServerException, @@ -1476,7 +1475,6 @@ def test_data_edit_cannot_edit_critical_field_if_dataset_has_task(self): ) def test_edit_data_user_cannot_edit_critical_field_of_other_users_dataset(self): - openml.config.apikey = TestBase.user_key # Check server exception when a non-owner or non-admin tries to edit critical fields self.assertRaisesRegex( OpenMLServerException, diff --git a/tests/test_openml/test_config.py b/tests/test_openml/test_config.py index dba3e1702..7ef223504 100644 --- a/tests/test_openml/test_config.py +++ b/tests/test_openml/test_config.py @@ -77,7 +77,7 @@ def test_get_config_as_dict(self): """Checks if the current configuration is returned accurately as a dict.""" config = openml.config.get_config_as_dict() _config = {} - _config["apikey"] = TestBase.apikey + _config["apikey"] = TestBase.user_key _config["server"] = "https://test.openml.org/api/v1/xml" _config["cachedir"] = self.workdir _config["avoid_duplicate_runs"] = False @@ -91,7 +91,7 @@ def test_get_config_as_dict(self): def test_setup_with_config(self): """Checks if the OpenML configuration can be updated using _setup().""" _config = {} - _config["apikey"] = TestBase.apikey + _config["apikey"] = TestBase.user_key _config["server"] = "https://www.openml.org/api/v1/xml" _config["cachedir"] = self.workdir _config["avoid_duplicate_runs"] = True @@ -110,7 +110,7 @@ class TestConfigurationForExamples(openml.testing.TestBase): def test_switch_to_example_configuration(self): """Verifies the test configuration is loaded properly.""" # Below is the default test key which would be used anyway, but just for clarity: - openml.config.apikey = TestBase.apikey + openml.config.apikey = TestBase.admin_key openml.config.server = self.production_server openml.config.start_using_configuration_for_example() @@ -122,13 +122,13 @@ def test_switch_to_example_configuration(self): def test_switch_from_example_configuration(self): """Verifies the previous configuration is loaded after stopping.""" # Below is the default test key which would be used anyway, but just for clarity: - openml.config.apikey = TestBase.apikey + openml.config.apikey = TestBase.user_key openml.config.server = self.production_server openml.config.start_using_configuration_for_example() openml.config.stop_using_configuration_for_example() - assert openml.config.apikey == TestBase.apikey + assert openml.config.apikey == TestBase.user_key assert openml.config.server == self.production_server def test_example_configuration_stop_before_start(self): @@ -146,14 +146,14 @@ def test_example_configuration_stop_before_start(self): @pytest.mark.production() def test_example_configuration_start_twice(self): """Checks that the original config can be returned to if `start..` is called twice.""" - openml.config.apikey = TestBase.apikey + openml.config.apikey = TestBase.user_key openml.config.server = self.production_server openml.config.start_using_configuration_for_example() openml.config.start_using_configuration_for_example() openml.config.stop_using_configuration_for_example() - assert openml.config.apikey == TestBase.apikey + assert openml.config.apikey == TestBase.user_key assert openml.config.server == self.production_server From baa4fbf16c60c78069a418f1ede782bcd92a6b87 Mon Sep 17 00:00:00 2001 From: PGijsbers Date: Thu, 11 Dec 2025 17:32:15 +0100 Subject: [PATCH 5/7] Skip a check that doesn't make sense? --- tests/test_flows/test_flow_functions.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_flows/test_flow_functions.py b/tests/test_flows/test_flow_functions.py index 5acdce87d..9f8ec5e36 100644 --- a/tests/test_flows/test_flow_functions.py +++ b/tests/test_flows/test_flow_functions.py @@ -416,8 +416,11 @@ def test_get_flow_id(self): name=flow.name, exact_version=False, ) - assert flow_ids_exact_version_True == flow_ids_exact_version_False assert flow.flow_id in flow_ids_exact_version_True + assert set(flow_ids_exact_version_True).issubset(set(flow_ids_exact_version_False)) + # instead of the assertion above, the assertion below used to be used. + pytest.skip(reason="Not sure why there should only be one version of this flow.") + assert flow_ids_exact_version_True == flow_ids_exact_version_False def test_delete_flow(self): flow = openml.OpenMLFlow( From 5a86909605dd3ea4271a8f2318e3928097dbbc94 Mon Sep 17 00:00:00 2001 From: PGijsbers Date: Fri, 12 Dec 2025 09:38:08 +0100 Subject: [PATCH 6/7] [skip ci] explain use of production and size --- tests/test_runs/test_run_functions.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/test_runs/test_run_functions.py b/tests/test_runs/test_run_functions.py index bf43a8024..94ffa5001 100644 --- a/tests/test_runs/test_run_functions.py +++ b/tests/test_runs/test_run_functions.py @@ -1545,10 +1545,9 @@ def test_get_runs_list_by_filters(self): @pytest.mark.production() def test_get_runs_list_by_tag(self): - # TODO: comes from live, no such lists on test - # Unit test works on production server only - + # We don't have tagged runs on the test server self.use_production_server() + # Don't remove the size restriction: this query is too expensive without runs = openml.runs.list_runs(tag="curves", size=2) assert len(runs) >= 1 From 4b4c274df1659784929ba2b3339c1f7ac2ea10a2 Mon Sep 17 00:00:00 2001 From: PGijsbers Date: Fri, 12 Dec 2025 09:41:08 +0100 Subject: [PATCH 7/7] Centralize definition of test server normal user key --- openml/config.py | 3 ++- openml/testing.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/openml/config.py b/openml/config.py index 8b768376b..cf66a6346 100644 --- a/openml/config.py +++ b/openml/config.py @@ -24,6 +24,7 @@ OPENML_CACHE_DIR_ENV_VAR = "OPENML_CACHE_DIR" OPENML_SKIP_PARQUET_ENV_VAR = "OPENML_SKIP_PARQUET" +_TEST_SERVER_NORMAL_USER_KEY = "normaluser" class _Config(TypedDict): @@ -212,7 +213,7 @@ class ConfigurationForExamples: _last_used_key = None _start_last_called = False _test_server = "https://test.openml.org/api/v1/xml" - _test_apikey = "normaluser" + _test_apikey = _TEST_SERVER_NORMAL_USER_KEY @classmethod def start_using_configuration_for_example(cls) -> None: diff --git a/openml/testing.py b/openml/testing.py index 16d7e65bf..d1da16876 100644 --- a/openml/testing.py +++ b/openml/testing.py @@ -49,7 +49,7 @@ class TestBase(unittest.TestCase): flow_name_tracker: ClassVar[list[str]] = [] test_server = "https://test.openml.org/api/v1/xml" admin_key = "abc" - user_key = "normaluser" + user_key = openml.config._TEST_SERVER_NORMAL_USER_KEY # creating logger for tracking files uploaded to test server logger = logging.getLogger("unit_tests_published_entities")