From 69e4690a2a7bd95e6be678c2a1ddf2b54340ac76 Mon Sep 17 00:00:00 2001
From: strtgbb <146047128+strtgbb@users.noreply.github.com>
Date: Wed, 26 Nov 2025 11:05:49 -0500
Subject: [PATCH 01/15] Rebase CICD on v25.8.12.129-lts
---
.../10_project-antalya-bug-report.md | 36 +
.github/ISSUE_TEMPLATE/10_question.yaml | 20 -
.../ISSUE_TEMPLATE/20_feature-request.yaml | 38 -
.../20_project-antalya-feature-request.md | 20 +
.../30_project-antalya-question.md | 16 +
.../30_unexpected-behaviour.yaml | 55 -
.../35_incomplete_implementation.yaml | 50 -
.../40_altinity-stable-bug-report.md | 50 +
.../ISSUE_TEMPLATE/45_usability-issue.yaml | 48 -
.../50_altinity-stable-question.md | 16 +
.github/ISSUE_TEMPLATE/50_build-issue.yaml | 50 -
.../60_documentation-issue.yaml | 26 -
.../ISSUE_TEMPLATE/70_performance-issue.yaml | 48 -
.../80_backward-compatibility.yaml | 48 -
.github/ISSUE_TEMPLATE/85_bug-report.yaml | 76 -
.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml | 26 -
.../ISSUE_TEMPLATE/95_sanitizer-report.yaml | 26 -
.../96_installation-issues.yaml | 46 -
.github/PULL_REQUEST_TEMPLATE.md | 31 +-
.github/actionlint.yml | 10 +-
.../actions/create_workflow_report/action.yml | 52 +
.../ci_run_report.html.jinja | 272 ++
.../create_workflow_report.py | 916 +++++
.../workflow_report_hook.sh | 7 +
.github/actions/docker_setup/action.yml | 32 +
.github/actions/runner_setup/action.yml | 19 +
.github/grype/parse_vulnerabilities_grype.py | 32 +
.github/grype/run_grype_scan.sh | 18 +
.../grype/transform_and_upload_results_s3.sh | 20 +
.github/retry.sh | 22 +
.github/workflows/README.md | 13 +
.github/workflows/backport_branches.yml | 324 +-
.github/workflows/cancel.yml | 19 +
.github/workflows/cherry_pick.yml | 2 +-
.github/workflows/compare_fails.yml | 110 +
.github/workflows/create_release.yml | 2 +-
.github/workflows/docker_publish.yml | 150 +
.github/workflows/grype_scan.yml | 154 +
.github/workflows/init_praktika.yml | 27 +
.github/workflows/master.yml | 3124 ++++++++---------
.github/workflows/merge_queue.yml | 121 +-
.github/workflows/nightly_fuzzers.yml | 67 +-
.github/workflows/nightly_jepsen.yml | 67 +-
.github/workflows/nightly_statistics.yml | 31 +-
.github/workflows/pull_request.yml | 3022 ++++++++--------
.github/workflows/regression.yml | 1081 ++++++
.github/workflows/release_branches.yml | 473 ++-
.github/workflows/release_builds.yml | 1245 +++++++
.github/workflows/repo-sanity-checks.yml | 150 +
.github/workflows/reusable_sign.yml | 166 +
.github/workflows/reusable_simple_job.yml | 2 +-
.github/workflows/scheduled_runs.yml | 55 +
.github/workflows/sign_and_release.yml | 567 +++
.github/workflows/vectorsearchstress.yml | 40 +-
ci/defs/defs.py | 203 +-
ci/defs/job_configs.py | 119 +-
ci/docker/binary-builder/Dockerfile | 8 +-
ci/docker/cctools/Dockerfile | 4 +-
ci/docker/compatibility/centos/Dockerfile | 2 +-
ci/docker/compatibility/ubuntu/Dockerfile | 2 +-
ci/docker/docs-builder/Dockerfile | 2 +-
ci/docker/fasttest/Dockerfile | 4 +-
ci/docker/fuzzer/Dockerfile | 2 +-
ci/docker/integration/arrowflight/Dockerfile | 2 +-
ci/docker/integration/base/Dockerfile | 8 +-
.../clickhouse_with_hms_catalog/Dockerfile | 2 +-
.../clickhouse_with_unity_catalog/Dockerfile | 2 +-
.../integration/helper_container/Dockerfile | 2 +-
ci/docker/integration/kerberos_kdc/Dockerfile | 2 +-
.../mysql_dotnet_client/Dockerfile | 2 +-
.../mysql_golang_client/Dockerfile | 2 +-
.../integration/mysql_java_client/Dockerfile | 2 +-
.../integration/mysql_js_client/Dockerfile | 2 +-
.../integration/mysql_php_client/Dockerfile | 2 +-
.../postgresql_java_client/Dockerfile | 2 +-
ci/docker/integration/resolver/Dockerfile | 2 +-
ci/docker/integration/runner/Dockerfile | 8 +-
.../integration/runner/dockerd-entrypoint.sh | 4 +-
ci/docker/integration/s3_proxy/Dockerfile | 2 +-
ci/docker/keeper-jepsen-test/Dockerfile | 2 +-
ci/docker/libfuzzer/Dockerfile | 2 +-
ci/docker/performance-comparison/Dockerfile | 4 +-
ci/docker/server-jepsen-test/Dockerfile | 2 +-
ci/docker/sqlancer-test/Dockerfile | 2 +-
ci/docker/stateless-test/Dockerfile | 8 +-
ci/docker/stress-test/Dockerfile | 2 +-
ci/docker/style-test/Dockerfile | 2 +-
ci/docker/test-base/Dockerfile | 6 +-
ci/jobs/build_clickhouse.py | 21 +-
ci/jobs/clickbench.py | 4 +-
ci/jobs/fast_test.py | 4 +-
ci/jobs/functional_tests.py | 32 +-
ci/jobs/install_check.py | 9 +-
ci/jobs/integration_test_check.py | 6 +-
ci/jobs/scripts/clickhouse_proc.py | 25 +-
ci/jobs/scripts/clickhouse_version.py | 73 +-
ci/jobs/scripts/functional_tests_results.py | 128 +-
ci/jobs/scripts/fuzzer/run-fuzzer.sh | 7 +-
ci/jobs/scripts/integration_tests_runner.py | 251 +-
ci/jobs/scripts/workflow_hooks/filter_job.py | 5 +
.../scripts/workflow_hooks/parse_ci_tags.py | 18 +
ci/jobs/scripts/workflow_hooks/version_log.py | 35 +-
ci/praktika/_environment.py | 39 +-
ci/praktika/execution/__main__.py | 4 +
ci/praktika/execution/execution_settings.py | 2 +-
ci/praktika/gh.py | 4 +-
ci/praktika/hook_cache.py | 2 +-
ci/praktika/job.py | 2 +-
ci/praktika/native_jobs.py | 39 +-
ci/praktika/parser.py | 4 +
ci/praktika/result.py | 2 +
ci/praktika/runner.py | 12 +
ci/praktika/s3.py | 41 +
ci/praktika/workflow.py | 1 +
ci/praktika/yaml_additional_templates.py | 168 +
ci/praktika/yaml_generator.py | 66 +-
ci/settings/altinity_overrides.py | 55 +
ci/settings/settings.py | 8 +-
ci/workflows/VectorSearchStress.py | 2 +-
ci/workflows/backport_branches.py | 1 +
ci/workflows/master.py | 35 +-
ci/workflows/merge_queue.py | 4 +-
ci/workflows/pull_request.py | 67 +-
ci/workflows/release_branches.py | 1 +
ci/workflows/release_builds.py | 67 +
cmake/autogenerated_versions.txt | 9 +-
cmake/version.cmake | 9 +-
docker/keeper/Dockerfile | 2 +-
docker/server/README.md | 2 +-
docker/server/README.src/github-repo | 2 +-
docker/server/README.src/license.md | 2 +-
docker/server/README.src/logo.svg | 56 +-
docker/server/README.src/maintainer.md | 2 +-
docker/test/upgrade/Dockerfile | 29 +
packages/clickhouse-client.yaml | 6 +-
packages/clickhouse-common-static-dbg.yaml | 6 +-
packages/clickhouse-common-static.yaml | 6 +-
packages/clickhouse-keeper-dbg.yaml | 6 +-
packages/clickhouse-keeper.yaml | 6 +-
packages/clickhouse-server.yaml | 6 +-
programs/server/Server.cpp | 24 +
programs/server/binary.html | 5 +-
programs/server/config.xml | 6 +-
programs/server/dashboard.html | 2 +-
programs/server/index.html | 109 +-
programs/server/merges.html | 2 +-
programs/server/play.html | 46 +-
src/Common/SignalHandlers.cpp | 6 +-
tests/broken_tests.yaml | 193 +
tests/ci/changelog.py | 56 +-
tests/ci/ci.py | 6 +-
tests/ci/ci_buddy.py | 4 +-
tests/ci/ci_cache.py | 3 +-
tests/ci/ci_config.py | 24 +-
tests/ci/ci_definitions.py | 45 +-
tests/ci/ci_fuzzer_check.py | 3 +-
tests/ci/clickhouse_helper.py | 18 +-
tests/ci/commit_status_helper.py | 9 +-
tests/ci/compatibility_check.py | 4 +-
tests/ci/create_release.py | 2 +-
tests/ci/docker_images_helper.py | 7 +-
tests/ci/docker_server.py | 13 +-
tests/ci/env_helper.py | 17 +-
tests/ci/get_robot_token.py | 14 +-
tests/ci/git_helper.py | 44 +-
tests/ci/jepsen_check.py | 4 +-
tests/ci/libfuzzer_test_check.py | 2 +-
tests/ci/pr_info.py | 26 +-
.../packaging/ansible/inventory/localhost.yml | 73 +
.../roles/get_cloudfront_info/tasks/main.yml | 34 +
.../ansible/roles/publish_pkgs/tasks/main.yml | 98 +
.../roles/update_bin_repo/tasks/main.yml | 52 +
.../roles/update_deb_repo/tasks/main.yml | 61 +
.../templates/apt-ftparchive-stable.conf | 6 +
.../templates/apt-ftparchive.conf | 17 +
.../roles/update_rpm_repo/tasks/main.yml | 51 +
.../roles/update_rpm_repo/templates/repo.j2 | 7 +
.../update_rpm_repo/templates/rpmmacros.j2 | 1 +
.../roles/update_tar_repo/tasks/main.yml | 61 +
.../packaging/ansible/sign-and-release.yml | 8 +
.../release/packaging/dirindex/dirindexgen.py | 122 +
.../packaging/static/bootstrap.bundle.min.js | 7 +
tests/ci/report.py | 5 +-
tests/ci/s3_helper.py | 41 +
tests/ci/sign_release.py | 97 +
tests/ci/sqltest.py | 2 +-
tests/ci/stress_check.py | 30 +-
tests/ci/version_helper.py | 140 +-
tests/config/config.d/azure_storage_conf.xml | 8 +-
.../compose/docker_compose_arrowflight.yml | 2 +-
.../compose/docker_compose_azurite.yml | 2 +-
.../compose/docker_compose_clickhouse.yml | 2 +-
.../compose/docker_compose_dotnet_client.yml | 2 +-
.../docker_compose_iceberg_hms_catalog.yml | 2 +-
.../compose/docker_compose_jdbc_bridge.yml | 1 +
.../compose/docker_compose_keeper.yml | 6 +-
.../docker_compose_kerberized_kafka.yml | 2 +-
.../compose/docker_compose_kerberos_kdc.yml | 2 +-
.../compose/docker_compose_minio.yml | 6 +-
.../docker_compose_mysql_dotnet_client.yml | 2 +-
.../docker_compose_mysql_golang_client.yml | 2 +-
.../docker_compose_mysql_java_client.yml | 2 +-
.../docker_compose_mysql_js_client.yml | 2 +-
.../docker_compose_mysql_php_client.yml | 2 +-
.../compose/docker_compose_nginx.yml | 2 +-
.../docker_compose_postgresql_java_client.yml | 2 +-
tests/integration/helpers/cluster.py | 11 +-
tests/integration/helpers/iceberg_utils.py | 126 +-
tests/integration/helpers/network.py | 2 +-
tests/integration/integration_test_images.py | 32 +-
tests/integration/runner | 2 +-
.../test_attach_partition_using_copy/test.py | 4 +-
.../test_backward_compatibility/test.py | 2 +-
.../test_aggregate_fixed_key.py | 2 +-
.../test_aggregate_function_state.py | 4 +-
.../test_convert_ordinary.py | 2 +-
.../test_cte_distributed.py | 2 +-
.../test_functions.py | 2 +-
.../test_insert_profile_events.py | 2 +-
.../test_ip_types_binary_compatibility.py | 2 +-
.../test_memory_bound_aggregation.py | 4 +-
.../test_normalized_count_comparison.py | 2 +-
.../test_rocksdb_upgrade.py | 2 +-
.../test_select_aggregate_alias_column.py | 2 +-
.../test_short_strings_aggregation.py | 12 +-
...test_vertical_merges_from_compact_parts.py | 2 +-
tests/integration/test_cow_policy/test.py | 4 +-
tests/integration/test_database_delta/test.py | 2 +-
.../test_database_iceberg/configs/cluster.xml | 12 +
.../integration/test_database_iceberg/test.py | 249 +-
.../test_disk_over_web_server/test.py | 2 +-
.../test.py | 2 +-
.../__init__.py | 0
.../configs/named_collections.xml | 9 +
.../test.py | 131 +
.../__init__.py | 0
.../allow_experimental_export_partition.xml | 3 +
.../disable_experimental_export_partition.xml | 3 +
.../configs/named_collections.xml | 9 +
.../configs/users.d/profile.xml | 8 +
.../test.py | 749 ++++
.../test_lightweight_updates/test.py | 2 +-
.../test_mask_sensitive_info/test.py | 121 +-
tests/integration/test_old_versions/test.py | 2 +-
.../test_polymorphic_parts/test.py | 2 +-
.../test.py | 4 +-
.../test_replicating_constants/test.py | 4 +-
.../test_s3_cache_locality/__init__.py | 0
.../configs/cluster.xml | 126 +
.../configs/named_collections.xml | 10 +
.../test_s3_cache_locality/configs/users.xml | 9 +
.../test_s3_cache_locality/test.py | 195 +
.../test_s3_cluster/data/graceful/part0.csv | 1 +
.../test_s3_cluster/data/graceful/part1.csv | 1 +
.../test_s3_cluster/data/graceful/part2.csv | 1 +
.../test_s3_cluster/data/graceful/part3.csv | 1 +
.../test_s3_cluster/data/graceful/part4.csv | 1 +
.../test_s3_cluster/data/graceful/part5.csv | 1 +
.../test_s3_cluster/data/graceful/part6.csv | 1 +
.../test_s3_cluster/data/graceful/part7.csv | 1 +
.../test_s3_cluster/data/graceful/part8.csv | 1 +
.../test_s3_cluster/data/graceful/part9.csv | 1 +
.../test_s3_cluster/data/graceful/partA.csv | 1 +
.../test_s3_cluster/data/graceful/partB.csv | 1 +
.../test_s3_cluster/data/graceful/partC.csv | 1 +
.../test_s3_cluster/data/graceful/partD.csv | 1 +
.../test_s3_cluster/data/graceful/partE.csv | 1 +
.../test_s3_cluster/data/graceful/partF.csv | 1 +
tests/integration/test_s3_cluster/test.py | 727 ++++
.../disable_parquet_metadata_caching.xml | 7 +
tests/integration/test_storage_delta/test.py | 7 +-
.../configs/config.d/named_collections.xml | 14 +
.../test_trace_log_build_id/test.py | 2 +-
tests/integration/test_ttl_replicated/test.py | 6 +-
tests/integration/test_version_update/test.py | 2 +-
.../test.py | 6 +-
.../queries/0_stateless/01528_play.reference | 2 +-
tests/queries/0_stateless/01528_play.sh | 2 +-
utils/tests-visualizer/index.html | 2 +-
279 files changed, 14356 insertions(+), 4923 deletions(-)
create mode 100644 .github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md
delete mode 100644 .github/ISSUE_TEMPLATE/10_question.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/20_feature-request.yaml
create mode 100644 .github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md
create mode 100644 .github/ISSUE_TEMPLATE/30_project-antalya-question.md
delete mode 100644 .github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml
create mode 100644 .github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md
delete mode 100644 .github/ISSUE_TEMPLATE/45_usability-issue.yaml
create mode 100644 .github/ISSUE_TEMPLATE/50_altinity-stable-question.md
delete mode 100644 .github/ISSUE_TEMPLATE/50_build-issue.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/60_documentation-issue.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/70_performance-issue.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/80_backward-compatibility.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/85_bug-report.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/90_fuzzing-report.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/95_sanitizer-report.yaml
delete mode 100644 .github/ISSUE_TEMPLATE/96_installation-issues.yaml
create mode 100644 .github/actions/create_workflow_report/action.yml
create mode 100644 .github/actions/create_workflow_report/ci_run_report.html.jinja
create mode 100755 .github/actions/create_workflow_report/create_workflow_report.py
create mode 100755 .github/actions/create_workflow_report/workflow_report_hook.sh
create mode 100644 .github/actions/docker_setup/action.yml
create mode 100644 .github/actions/runner_setup/action.yml
create mode 100644 .github/grype/parse_vulnerabilities_grype.py
create mode 100755 .github/grype/run_grype_scan.sh
create mode 100755 .github/grype/transform_and_upload_results_s3.sh
create mode 100755 .github/retry.sh
create mode 100644 .github/workflows/README.md
create mode 100644 .github/workflows/cancel.yml
create mode 100644 .github/workflows/compare_fails.yml
create mode 100644 .github/workflows/docker_publish.yml
create mode 100644 .github/workflows/grype_scan.yml
create mode 100644 .github/workflows/init_praktika.yml
create mode 100644 .github/workflows/regression.yml
create mode 100644 .github/workflows/release_builds.yml
create mode 100644 .github/workflows/repo-sanity-checks.yml
create mode 100644 .github/workflows/reusable_sign.yml
create mode 100644 .github/workflows/scheduled_runs.yml
create mode 100644 .github/workflows/sign_and_release.yml
create mode 100644 ci/jobs/scripts/workflow_hooks/parse_ci_tags.py
create mode 100644 ci/praktika/yaml_additional_templates.py
create mode 100644 ci/settings/altinity_overrides.py
create mode 100644 ci/workflows/release_builds.py
create mode 100644 docker/test/upgrade/Dockerfile
create mode 100644 tests/broken_tests.yaml
create mode 100644 tests/ci/release/packaging/ansible/inventory/localhost.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/get_cloudfront_info/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/publish_pkgs/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/update_bin_repo/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive-stable.conf
create mode 100644 tests/ci/release/packaging/ansible/roles/update_deb_repo/templates/apt-ftparchive.conf
create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/repo.j2
create mode 100644 tests/ci/release/packaging/ansible/roles/update_rpm_repo/templates/rpmmacros.j2
create mode 100644 tests/ci/release/packaging/ansible/roles/update_tar_repo/tasks/main.yml
create mode 100644 tests/ci/release/packaging/ansible/sign-and-release.yml
create mode 100755 tests/ci/release/packaging/dirindex/dirindexgen.py
create mode 100644 tests/ci/release/packaging/static/bootstrap.bundle.min.js
create mode 100644 tests/ci/sign_release.py
create mode 100644 tests/integration/test_database_iceberg/configs/cluster.xml
create mode 100644 tests/integration/test_export_merge_tree_part_to_object_storage/__init__.py
create mode 100644 tests/integration/test_export_merge_tree_part_to_object_storage/configs/named_collections.xml
create mode 100644 tests/integration/test_export_merge_tree_part_to_object_storage/test.py
create mode 100644 tests/integration/test_export_replicated_mt_partition_to_object_storage/__init__.py
create mode 100644 tests/integration/test_export_replicated_mt_partition_to_object_storage/configs/allow_experimental_export_partition.xml
create mode 100644 tests/integration/test_export_replicated_mt_partition_to_object_storage/configs/disable_experimental_export_partition.xml
create mode 100644 tests/integration/test_export_replicated_mt_partition_to_object_storage/configs/named_collections.xml
create mode 100644 tests/integration/test_export_replicated_mt_partition_to_object_storage/configs/users.d/profile.xml
create mode 100644 tests/integration/test_export_replicated_mt_partition_to_object_storage/test.py
create mode 100644 tests/integration/test_s3_cache_locality/__init__.py
create mode 100644 tests/integration/test_s3_cache_locality/configs/cluster.xml
create mode 100644 tests/integration/test_s3_cache_locality/configs/named_collections.xml
create mode 100644 tests/integration/test_s3_cache_locality/configs/users.xml
create mode 100644 tests/integration/test_s3_cache_locality/test.py
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part0.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part1.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part2.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part3.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part4.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part5.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part6.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part7.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part8.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/part9.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/partA.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/partB.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/partC.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/partD.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/partE.csv
create mode 100644 tests/integration/test_s3_cluster/data/graceful/partF.csv
create mode 100644 tests/integration/test_storage_delta/configs/users.d/disable_parquet_metadata_caching.xml
diff --git a/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md
new file mode 100644
index 000000000000..0c8c15a05eaf
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/10_project-antalya-bug-report.md
@@ -0,0 +1,36 @@
+---
+name: Project Antalya Bug Report
+about: Help us improve Project Antalya
+title: ''
+labels: antalya
+assignees: ''
+
+---
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+
+**To Reproduce**
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Expected behavior**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Key information**
+Provide relevant runtime details.
+ - Project Antalya Build Version
+ - Cloud provider, e.g., AWS
+ - Kubernetes provider, e.g., GKE or Minikube
+ - Object storage, e.g., AWS S3 or Minio
+ - Iceberg catalog, e.g., Glue with REST Proxy
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/10_question.yaml b/.github/ISSUE_TEMPLATE/10_question.yaml
deleted file mode 100644
index 71a3d3da6425..000000000000
--- a/.github/ISSUE_TEMPLATE/10_question.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-name: Question
-description: Ask a question about ClickHouse
-labels: ["question"]
-body:
- - type: markdown
- attributes:
- value: |
- > Make sure to check documentation https://clickhouse.com/docs/ first. If the question is concise and probably has a short answer, asking it in [community Slack](https://join.slack.com/t/clickhousedb/shared_invite/zt-1gh9ds7f4-PgDhJAaF8ad5RbWBAAjzFg) is probably the fastest way to find the answer. For more complicated questions, consider asking them on StackOverflow with "clickhouse" tag https://stackoverflow.com/questions/tagged/clickhouse
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Question
- description: Please put your question here.
- validations:
- required: true
diff --git a/.github/ISSUE_TEMPLATE/20_feature-request.yaml b/.github/ISSUE_TEMPLATE/20_feature-request.yaml
deleted file mode 100644
index 054efc2d61ee..000000000000
--- a/.github/ISSUE_TEMPLATE/20_feature-request.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
-name: Feature request
-description: Suggest an idea for ClickHouse
-labels: ["feature"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Use case
- description: A clear and concise description of what the intended usage scenario is.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Describe the solution you'd like
- description: A clear and concise description of what you want to happen.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Describe alternatives you've considered
- description: A clear and concise description of any alternative solutions or features you've considered.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context or screenshots about the feature request here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md
new file mode 100644
index 000000000000..603584bf4428
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/20_project-antalya-feature-request.md
@@ -0,0 +1,20 @@
+---
+name: Project Antalya Feature request
+about: Suggest an idea for Project Antalya
+title: ''
+labels: antalya, enhancement
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/ISSUE_TEMPLATE/30_project-antalya-question.md b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md
new file mode 100644
index 000000000000..c77cee4a916b
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/30_project-antalya-question.md
@@ -0,0 +1,16 @@
+---
+name: Project Antalya Question
+about: Ask a question about Project Antalya
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first.
+
+If your question is concise and probably has a short answer, asking it in the [the Altinity Slack workspace](https://altinity.com/slack) is probably the fastest way to find the answer. Use the #antalya channel.
+
+If you'd rather file a GitHub issue, remove all this text and ask your question here.
+
+Please include relevant environment information as applicable.
diff --git a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml b/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml
deleted file mode 100644
index 7a34c4bb7ba8..000000000000
--- a/.github/ISSUE_TEMPLATE/30_unexpected-behaviour.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
-name: Unexpected behaviour
-description: Some feature is working in non-obvious way
-labels: ["unexpected behaviour"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what doesn't work as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Which ClickHouse versions are affected?
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml b/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml
deleted file mode 100644
index 969c1893e6f5..000000000000
--- a/.github/ISSUE_TEMPLATE/35_incomplete_implementation.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Incomplete implementation
-description: Implementation of existing feature is not finished
-labels: ["unfinished code"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md
new file mode 100644
index 000000000000..90bf241dc195
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/40_altinity-stable-bug-report.md
@@ -0,0 +1,50 @@
+---
+name: Altinity Stable Bug report
+about: Report something broken in an Altinity Stable Build
+title: ''
+labels: stable
+assignees: ''
+
+---
+
+✅ *I checked [the Altinity Stable Builds lifecycle table](https://docs.altinity.com/altinitystablebuilds/#altinity-stable-builds-life-cycle-table), and the Altinity Stable Build version I'm using is still supported.*
+
+## Type of problem
+Choose one of the following items, then delete the others:
+
+**Bug report** - something's broken
+
+**Incomplete implementation** - something's not quite right
+
+**Performance issue** - something works, just not as quickly as it should
+
+**Backwards compatibility issue** - something used to work, but now it doesn't
+
+**Unexpected behavior** - something surprising happened, but it wasn't the good kind of surprise
+
+**Installation issue** - something doesn't install the way it should
+
+**Usability issue** - something works, but it could be a lot easier
+
+**Documentation issue** - something in the docs is wrong, incomplete, or confusing
+
+## Describe the situation
+A clear, concise description of what's happening. Can you reproduce it in a ClickHouse Official build of the same version?
+
+## How to reproduce the behavior
+
+* Which Altinity Stable Build version to use
+* Which interface to use, if it matters
+* Non-default settings, if any
+* `CREATE TABLE` statements for all tables involved
+* Sample data for all these tables, use the [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/31fd4f5eb41d5ec26724fc645c11fe4d62eae07f/programs/obfuscator/README.md) if necessary
+* Queries to run that lead to an unexpected result
+
+## Expected behavior
+A clear, concise description of what you expected to happen.
+
+## Logs, error messages, stacktraces, screenshots...
+Add any details that might explain the issue.
+
+## Additional context
+Add any other context about the issue here.
diff --git a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml b/.github/ISSUE_TEMPLATE/45_usability-issue.yaml
deleted file mode 100644
index 0d2ae1a580e5..000000000000
--- a/.github/ISSUE_TEMPLATE/45_usability-issue.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-name: Usability improvement request
-description: Report something can be made more convenient to use
-labels: ["usability"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the improvement
- description: A clear and concise description of what you want to happen
- validations:
- required: true
- - type: textarea
- attributes:
- label: Which ClickHouse versions are affected?
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md
new file mode 100644
index 000000000000..027970e25a02
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/50_altinity-stable-question.md
@@ -0,0 +1,16 @@
+---
+name: Altinity Stable Question
+about: Ask a question about an Altinity Stable Build
+title: ''
+labels: question, stable
+assignees: ''
+
+---
+
+Make sure to check the [Altinity documentation](https://docs.altinity.com/) and the [Altinity Knowledge Base](https://kb.altinity.com/) first.
+
+If your question is concise and probably has a short answer, asking it in the [the Altinity Slack channel](https://altinity.com/slack) is probably the fastest way to find the answer.
+
+For more complicated questions, consider [asking them on StackOverflow with the tag "clickhouse"](https://stackoverflow.com/questions/tagged/clickhouse).
+
+If you'd rather file a GitHub issue, remove all this text and ask your question here.
diff --git a/.github/ISSUE_TEMPLATE/50_build-issue.yaml b/.github/ISSUE_TEMPLATE/50_build-issue.yaml
deleted file mode 100644
index 0549944c0bb2..000000000000
--- a/.github/ISSUE_TEMPLATE/50_build-issue.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: Build issue
-description: Report failed ClickHouse build from master
-labels: ["build"]
-body:
- - type: markdown
- attributes:
- value: |
- > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the problem
- description: A clear and concise description of what doesn't work as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Operating system
- description: OS kind or distribution, specific version/release, non-standard kernel if any. If you are trying to build inside virtual machine, please mention it too.
- validations:
- required: false
- - type: textarea
- attributes:
- label: CMake version
- description: The output of `cmake --version`.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Ninja version
- description: The output of `ninja --version`.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Compiler name and version
- description: We recommend to use clang. The version can be obtained via `clang --version`.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Full cmake and/or ninja output with the error
- description: Please include everything (use https://pastila.nl/ for large output)!
- validations:
- required: true
diff --git a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml b/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml
deleted file mode 100644
index bba6df87a783..000000000000
--- a/.github/ISSUE_TEMPLATE/60_documentation-issue.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Documentation issue
-description: Report something incorrect or missing in documentation
-labels: ["comp-documentation"]
-body:
- - type: markdown
- attributes:
- value: |
- > Make sure that `git diff` result is empty and you've just pulled fresh master. Try cleaning up cmake cache. Just in case, official build instructions are published here: https://clickhouse.com/docs/en/development/build/
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the issue
- description: A clear and concise description of what's wrong in documentation.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml b/.github/ISSUE_TEMPLATE/70_performance-issue.yaml
deleted file mode 100644
index 1df99dc76fda..000000000000
--- a/.github/ISSUE_TEMPLATE/70_performance-issue.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-name: Performance issue
-description: Report something working slower than expected
-labels: ["performance"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the situation
- description: What exactly works slower than expected?
- validations:
- required: true
- - type: textarea
- attributes:
- label: Which ClickHouse versions are affected?
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected performance
- description: What are your performance expectation, why do you think they are realistic? Has it been working faster in older ClickHouse releases? Is it working faster in some specific other system?
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml b/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml
deleted file mode 100644
index 72f56d781979..000000000000
--- a/.github/ISSUE_TEMPLATE/80_backward-compatibility.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
-name: Backward compatibility issue
-description: Report the case when the behaviour of a new version can break existing use cases
-labels: ["backward compatibility"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe the unexpected behaviour
- description: A clear and concise description of what works not as it is supposed to.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Which ClickHouse versions are affected?
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/85_bug-report.yaml b/.github/ISSUE_TEMPLATE/85_bug-report.yaml
deleted file mode 100644
index 349bf82a3a4e..000000000000
--- a/.github/ISSUE_TEMPLATE/85_bug-report.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-name: Bug report
-description: Wrong behavior (visible to users) in the official ClickHouse release.
-labels: ["potential bug"]
-body:
- - type: markdown
- attributes:
- value: |
- > Please make sure that the version you're using is still supported (you can find the list [here](https://github.com/ClickHouse/ClickHouse/blob/master/SECURITY.md#scope-and-supported-versions)).
- > You have to provide the following information whenever possible.
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Describe what's wrong
- description: |
- * A clear and concise description of what works not as it is supposed to.
- * A link to reproducer in [https://fiddle.clickhouse.com/](https://fiddle.clickhouse.com/).
- validations:
- required: true
- - type: dropdown
- attributes:
- label: Does it reproduce on the most recent release?
- description: |
- [The list of releases](https://github.com/ClickHouse/ClickHouse/blob/master/utils/list-versions/version_date.tsv)
- options:
- - 'Yes'
- - 'No'
- validations:
- required: true
- - type: markdown
- attributes:
- value: |
- -----
- > Change "enabled" to true in "send_crash_reports" section in `config.xml`:
- ```xml
-
-
-
- true
-
- ```
- -----
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * Which ClickHouse server version to use
- * Which interface to use, if matters
- * Non-default settings, if any
- * `CREATE TABLE` statements for all tables involved
- * Sample data for all these tables, use [clickhouse-obfuscator](https://github.com/ClickHouse/ClickHouse/blob/c81bec37a58757be1e2b1ac6f20a62b3f14a31f1/programs/obfuscator/Obfuscator.cpp#L55-L95) if necessary
- * Queries to run that lead to unexpected result
- validations:
- required: true
- - type: textarea
- attributes:
- label: Expected behavior
- description: A clear and concise description of what you expected to happen.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: If applicable, add screenshots to help explain your problem.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Additional context
- description: Add any other context about the problem here.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml b/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml
deleted file mode 100644
index 84dc8a372e5a..000000000000
--- a/.github/ISSUE_TEMPLATE/90_fuzzing-report.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Assertion found via fuzzing
-description: Potential issue has been found via Fuzzer or Stress tests
-labels: ["fuzz"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Describe the bug
- description: A link to the report.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: Try to reproduce the report and copy the tables and queries involved.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: You can find additional information in server logs.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml b/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml
deleted file mode 100644
index 7bb47e2b824b..000000000000
--- a/.github/ISSUE_TEMPLATE/95_sanitizer-report.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
-name: Sanitizer alert
-description: Potential issue has been found by special code instrumentation
-labels: ["testing"]
-body:
- - type: markdown
- attributes:
- value: |
- > (you don't have to strictly follow this form)
- - type: textarea
- attributes:
- label: Describe the bug
- description: A link to the report.
- validations:
- required: true
- - type: textarea
- attributes:
- label: How to reproduce
- description: Try to reproduce the report and copy the tables and queries involved.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: You can find additional information in server logs.
- validations:
- required: false
diff --git a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml b/.github/ISSUE_TEMPLATE/96_installation-issues.yaml
deleted file mode 100644
index f71f6079453e..000000000000
--- a/.github/ISSUE_TEMPLATE/96_installation-issues.yaml
+++ /dev/null
@@ -1,46 +0,0 @@
-name: Installation issue
-description: Issue with ClickHouse installation from https://clickhouse.com/docs/en/install/
-labels: ["comp-install"]
-body:
- - type: markdown
- attributes:
- value: |
- > **I have tried the following solutions**: https://clickhouse.com/docs/en/faq/troubleshooting/#troubleshooting-installation-errors
- - type: textarea
- attributes:
- label: Company or project name
- description: Put your company name or project description here.
- validations:
- required: false
- - type: textarea
- attributes:
- label: Installation type
- description: Packages, docker, single binary, curl?
- validations:
- required: true
- - type: textarea
- attributes:
- label: Source of the ClickHouse
- description: A link to the source. Or the command you've tried.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Describe the problem.
- description: What went wrong and what is the expected result?
- validations:
- required: true
- - type: textarea
- attributes:
- label: Error message and/or stacktrace
- description: You can find additional information in server logs.
- validations:
- required: false
- - type: textarea
- attributes:
- label: How to reproduce
- description: |
- * For Linux-based operating systems: provide a script for clear docker container from the official image
- * For anything else: steps to reproduce on as much as possible clear system
- validations:
- required: false
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 7172463a5781..03b95568dea6 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -22,20 +22,19 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
...
### Documentation entry for user-facing changes
+...
-- [ ] Documentation is written (mandatory for new features)
-
-
+#### Exclude tests:
+- [ ] Fast test
+- [ ] Integration Tests
+- [ ] Stateless tests
+- [ ] Stateful tests
+- [ ] Performance tests
+- [ ] All with ASAN
+- [ ] All with TSAN
+- [ ] All with MSAN
+- [ ] All with UBSAN
+- [ ] All with Coverage
+- [ ] All with Aarch64
+- [ ] All Regression
+- [ ] Disable CI Cache
diff --git a/.github/actionlint.yml b/.github/actionlint.yml
index cf5f575e3c74..904a548dadd5 100644
--- a/.github/actionlint.yml
+++ b/.github/actionlint.yml
@@ -1,9 +1,9 @@
self-hosted-runner:
labels:
- - builder
- - func-tester
- - func-tester-aarch64
+ - altinity-builder
+ - altinity-func-tester
+ - altinity-func-tester-aarch64
- fuzzer-unit-tester
- - style-checker
- - style-checker-aarch64
+ - altinity-style-checker
+ - altinity-style-checker-aarch64
- release-maker
diff --git a/.github/actions/create_workflow_report/action.yml b/.github/actions/create_workflow_report/action.yml
new file mode 100644
index 000000000000..8c975ec345c5
--- /dev/null
+++ b/.github/actions/create_workflow_report/action.yml
@@ -0,0 +1,52 @@
+name: Create and Upload Combined Report
+description: Create and upload a combined CI report
+inputs:
+ workflow_config:
+ description: "Workflow config"
+ required: true
+ final:
+ description: "Control whether the report is final or a preview"
+ required: false
+ default: "false"
+runs:
+ using: "composite"
+ steps:
+ - name: Create workflow config
+ shell: bash
+ run: |
+ mkdir -p ./ci/tmp
+ cat > ./ci/tmp/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ - name: Create and upload workflow report
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ ACTIONS_RUN_URL: ${{ github.event.repository.html_url }}/actions/runs/${{ github.run_id }}
+ COMMIT_SHA: ${{ steps.set_version.outputs.commit_sha || github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ FINAL: ${{ inputs.final }}
+ shell: bash
+ run: |
+ pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5
+
+ CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py"
+ ARGS="--actions-run-url $ACTIONS_RUN_URL --known-fails tests/broken_tests.yaml --cves --pr-number $PR_NUMBER"
+
+ set +e -x
+ if [[ "$FINAL" == "false" ]]; then
+ REPORT_LINK=$($CMD $ARGS --mark-preview)
+ else
+ REPORT_LINK=$($CMD $ARGS)
+ fi
+
+ echo $REPORT_LINK
+
+ if [[ "$FINAL" == "true" ]]; then
+ IS_VALID_URL=$(echo $REPORT_LINK | grep -E '^https?://')
+ if [[ -n $IS_VALID_URL ]]; then
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "Error: $REPORT_LINK" >> $GITHUB_STEP_SUMMARY
+ exit 1
+ fi
+ fi
diff --git a/.github/actions/create_workflow_report/ci_run_report.html.jinja b/.github/actions/create_workflow_report/ci_run_report.html.jinja
new file mode 100644
index 000000000000..ac9a7a70bb01
--- /dev/null
+++ b/.github/actions/create_workflow_report/ci_run_report.html.jinja
@@ -0,0 +1,272 @@
+
+
+
+
+
+
+
+ {%- if is_preview %}
+
+ {%- endif %}
+
+ {{ title }}
+
+
+
+
+
+
+ {{ title }}
+
+
+
+ Pull Request
+ {{ pr_info_html }}
+
+
+ Workflow Run
+ {{ workflow_id }}
+
+
+ Commit
+ {{ commit_sha }}
+
+
+ Build Report
+ {% for job_name, link in build_report_links.items() %}[{{ job_name }}] {% endfor %}
+
+
+ Date
+ {{ date }}
+
+
+
+ {% if is_preview %}
+ This is a preview. The workflow is not yet finished.
+ {% endif %}
+ Table of Contents
+
+
+ {%- if pr_number != 0 -%}
+ New Fails in PR
+ Compared with base sha {{ base_sha }}
+ {{ new_fails_html }}
+ {%- endif %}
+
+ CI Jobs Status
+ {{ ci_jobs_status_html }}
+
+ Checks Errors
+ {{ checks_errors_html }}
+
+ Checks New Fails
+ {{ checks_fails_html }}
+
+ Regression New Fails
+ {{ regression_fails_html }}
+
+ Docker Images CVEs
+ {{ docker_images_cves_html }}
+
+ Checks Known Fails
+
+ Fail reason conventions:
+ KNOWN - Accepted fail and fix is not planned
+ INVESTIGATE - We don't know why it fails
+ NEEDSFIX - Investigation done and a fix is needed to make it pass
+
+ {{ checks_known_fails_html }}
+
+
+
+
\ No newline at end of file
diff --git a/.github/actions/create_workflow_report/create_workflow_report.py b/.github/actions/create_workflow_report/create_workflow_report.py
new file mode 100755
index 000000000000..94bc426e7e5f
--- /dev/null
+++ b/.github/actions/create_workflow_report/create_workflow_report.py
@@ -0,0 +1,916 @@
+#!/usr/bin/env python3
+import argparse
+import os
+from pathlib import Path
+from itertools import combinations
+import json
+from datetime import datetime
+from functools import lru_cache
+from glob import glob
+import urllib.parse
+import re
+
+import pandas as pd
+from jinja2 import Environment, FileSystemLoader
+import requests
+from clickhouse_driver import Client
+import boto3
+from botocore.exceptions import NoCredentialsError
+import yaml
+
+
+DATABASE_HOST_VAR = "CHECKS_DATABASE_HOST"
+DATABASE_USER_VAR = "CLICKHOUSE_TEST_STAT_LOGIN"
+DATABASE_PASSWORD_VAR = "CLICKHOUSE_TEST_STAT_PASSWORD"
+S3_BUCKET = "altinity-build-artifacts"
+GITHUB_REPO = "Altinity/ClickHouse"
+GITHUB_TOKEN = os.getenv("GITHUB_TOKEN") or os.getenv("GH_TOKEN")
+
+def get_commit_statuses(sha: str) -> pd.DataFrame:
+ """
+ Fetch commit statuses for a given SHA and return as a pandas DataFrame.
+ Handles pagination to get all statuses.
+
+ Args:
+ sha (str): Commit SHA to fetch statuses for.
+
+ Returns:
+ pd.DataFrame: DataFrame containing all statuses.
+ """
+ headers = {
+ "Authorization": f"token {GITHUB_TOKEN}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+
+ url = f"https://api.github.com/repos/{GITHUB_REPO}/commits/{sha}/statuses"
+
+ all_data = []
+
+ while url:
+ response = requests.get(url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to fetch statuses: {response.status_code} {response.text}"
+ )
+
+ data = response.json()
+ all_data.extend(data)
+
+ # Check for pagination links in the response headers
+ if "Link" in response.headers:
+ links = response.headers["Link"].split(",")
+ next_url = None
+
+ for link in links:
+ parts = link.strip().split(";")
+ if len(parts) == 2 and 'rel="next"' in parts[1]:
+ next_url = parts[0].strip("<>")
+ break
+
+ url = next_url
+ else:
+ url = None
+
+ # Parse relevant fields
+ parsed = [
+ {
+ "job_name": item["context"],
+ "job_status": item["state"],
+ "message": item["description"],
+ "results_link": item["target_url"],
+ }
+ for item in all_data
+ ]
+
+ # Create DataFrame
+ df = pd.DataFrame(parsed)
+
+ # Drop duplicates keeping the first occurrence (newest status for each context)
+ # GitHub returns statuses in reverse chronological order
+ df = df.drop_duplicates(subset=["job_name"], keep="first")
+
+ # Sort by status and job name
+ return df.sort_values(
+ by=["job_status", "job_name"], ascending=[True, True]
+ ).reset_index(drop=True)
+
+
+def get_pr_info_from_number(pr_number: str) -> dict:
+ """
+ Fetch pull request information for a given PR number.
+
+ Args:
+ pr_number (str): Pull request number to fetch information for.
+
+ Returns:
+ dict: Dictionary containing PR information.
+ """
+ headers = {
+ "Authorization": f"token {GITHUB_TOKEN}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+
+ url = f"https://api.github.com/repos/{GITHUB_REPO}/pulls/{pr_number}"
+ response = requests.get(url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to fetch pull request info: {response.status_code} {response.text}"
+ )
+
+ return response.json()
+
+
+def get_run_details(run_id: str) -> dict:
+ """
+ Fetch run details for a given run URL.
+ """
+ headers = {
+ "Authorization": f"token {GITHUB_TOKEN}",
+ "Accept": "application/vnd.github.v3+json",
+ }
+
+ url = f"https://api.github.com/repos/{GITHUB_REPO}/actions/runs/{run_id}"
+ response = requests.get(url, headers=headers)
+
+ if response.status_code != 200:
+ raise Exception(
+ f"Failed to fetch run details: {response.status_code} {response.text}"
+ )
+
+ return response.json()
+
+
+def get_checks_fails(client: Client, commit_sha: str, branch_name: str):
+ """
+ Get tests that did not succeed for the given commit and branch.
+ Exclude checks that have status 'error' as they are counted in get_checks_errors.
+ """
+ query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ argMax(check_status, check_start_time) as job_status,
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}'
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE test_status IN ('FAIL', 'ERROR')
+ AND job_status!='error'
+ ORDER BY job_name, test_name
+ """
+ return client.query_dataframe(query)
+
+
+def get_broken_tests_rules(broken_tests_file_path):
+ with open(broken_tests_file_path, "r", encoding="utf-8") as broken_tests_file:
+ broken_tests = yaml.safe_load(broken_tests_file)
+
+ compiled_rules = {"exact": {}, "pattern": {}}
+
+ for test in broken_tests:
+ regex = test.get("regex") is True
+ rule = {
+ "reason": test["reason"],
+ }
+
+ if test.get("check_types"):
+ rule["check_types"] = test["check_types"]
+
+ if regex:
+ rule["regex"] = True
+ compiled_rules["pattern"][re.compile(test["name"])] = rule
+ else:
+ compiled_rules["exact"][test["name"]] = rule
+
+ return compiled_rules
+
+
+def get_known_fail_reason(test_name: str, check_name: str, known_fails: dict):
+ """
+ Returns the reason why a test is known to fail based on its name and build context.
+
+ - Exact-name rules are checked first.
+ - Pattern-name rules are checked next (first match wins).
+ - Message/not_message conditions are ignored.
+ """
+ # 1. Exact-name rules
+ rule_data = known_fails["exact"].get(test_name)
+ if rule_data:
+ check_types = rule_data.get("check_types", [])
+ if not check_types or any(
+ check_type in check_name for check_type in check_types
+ ):
+ return rule_data["reason"]
+
+ # 2. Pattern-name rules
+ for name_re, rule_data in known_fails["pattern"].items():
+ if name_re.fullmatch(test_name):
+ check_types = rule_data.get("check_types", [])
+ if not check_types or any(
+ check_type in check_name for check_type in check_types
+ ):
+ return rule_data["reason"]
+
+ return "No reason given"
+
+
+def get_checks_known_fails(
+ client: Client, commit_sha: str, branch_name: str, known_fails: dict
+):
+ """
+ Get tests that are known to fail for the given commit and branch.
+ """
+ if len(known_fails) == 0:
+ return pd.DataFrame()
+
+ query = f"""SELECT job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}'
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE test_status='BROKEN'
+ ORDER BY job_name, test_name
+ """
+
+ df = client.query_dataframe(query)
+
+ if df.shape[0] == 0:
+ return df
+
+ df.insert(
+ len(df.columns) - 1,
+ "reason",
+ df.apply(
+ lambda row: get_known_fail_reason(
+ row["test_name"], row["job_name"], known_fails
+ ),
+ axis=1,
+ ),
+ )
+
+ return df
+
+
+def get_checks_errors(client: Client, commit_sha: str, branch_name: str):
+ """
+ Get checks that have status 'error' for the given commit and branch.
+ """
+ query = f"""SELECT job_status, job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ argMax(check_status, check_start_time) as job_status,
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ WHERE commit_sha='{commit_sha}' AND head_ref='{branch_name}'
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE job_status=='error'
+ ORDER BY job_name, test_name
+ """
+ return client.query_dataframe(query)
+
+
+def drop_prefix_rows(df, column_to_clean):
+ """
+ Drop rows from the dataframe if:
+ - the row matches another row completely except for the specified column
+ - the specified column of that row is a prefix of the same column in another row
+ """
+ to_drop = set()
+ reference_columns = [col for col in df.columns if col != column_to_clean]
+ for (i, row_1), (j, row_2) in combinations(df.iterrows(), 2):
+ if all(row_1[col] == row_2[col] for col in reference_columns):
+ if row_2[column_to_clean].startswith(row_1[column_to_clean]):
+ to_drop.add(i)
+ elif row_1[column_to_clean].startswith(row_2[column_to_clean]):
+ to_drop.add(j)
+ return df.drop(to_drop)
+
+
+def get_regression_fails(client: Client, job_url: str):
+ """
+ Get regression tests that did not succeed for the given job URL.
+ """
+ # If you rename the alias for report_url, also update the formatters in format_results_as_html_table
+ # Nested SELECT handles test reruns
+ query = f"""SELECT arch, job_name, status, test_name, results_link
+ FROM (
+ SELECT
+ architecture as arch,
+ test_name,
+ argMax(result, start_time) AS status,
+ job_name,
+ report_url as results_link,
+ job_url
+ FROM `gh-data`.clickhouse_regression_results
+ GROUP BY architecture, test_name, job_url, job_name, report_url
+ ORDER BY length(test_name) DESC
+ )
+ WHERE job_url LIKE '{job_url}%'
+ AND status IN ('Fail', 'Error')
+ """
+ df = client.query_dataframe(query)
+ df = drop_prefix_rows(df, "test_name")
+ df["job_name"] = df["job_name"].str.title()
+ return df
+
+
+def get_new_fails_this_pr(
+ client: Client,
+ pr_info: dict,
+ checks_fails: pd.DataFrame,
+ regression_fails: pd.DataFrame,
+):
+ """
+ Get tests that failed in the PR but passed in the base branch.
+ Compares both checks and regression test results.
+ """
+ base_sha = pr_info.get("base", {}).get("sha")
+ if not base_sha:
+ raise Exception("No base SHA found for PR")
+
+ # Modify tables to have the same columns
+ if len(checks_fails) > 0:
+ checks_fails = checks_fails.copy().drop(columns=["job_status"])
+ if len(regression_fails) > 0:
+ regression_fails = regression_fails.copy()
+ regression_fails["job_name"] = regression_fails.apply(
+ lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1
+ )
+ regression_fails["test_status"] = regression_fails["status"]
+
+ # Combine both types of fails and select only desired columns
+ desired_columns = ["job_name", "test_name", "test_status", "results_link"]
+ all_pr_fails = pd.concat([checks_fails, regression_fails], ignore_index=True)[
+ desired_columns
+ ]
+ if len(all_pr_fails) == 0:
+ return pd.DataFrame()
+
+ # Get all checks from the base branch that didn't fail
+ base_checks_query = f"""SELECT job_name, status as test_status, test_name, results_link
+ FROM (
+ SELECT
+ check_name as job_name,
+ argMax(test_status, check_start_time) as status,
+ test_name,
+ report_url as results_link,
+ task_url
+ FROM `gh-data`.checks
+ WHERE commit_sha='{base_sha}'
+ GROUP BY check_name, test_name, report_url, task_url
+ )
+ WHERE test_status NOT IN ('FAIL', 'ERROR')
+ ORDER BY job_name, test_name
+ """
+ base_checks = client.query_dataframe(base_checks_query)
+
+ # Get regression results from base branch that didn't fail
+ base_regression_query = f"""SELECT arch, job_name, status, test_name, results_link
+ FROM (
+ SELECT
+ architecture as arch,
+ test_name,
+ argMax(result, start_time) AS status,
+ job_url,
+ job_name,
+ report_url as results_link
+ FROM `gh-data`.clickhouse_regression_results
+ WHERE results_link LIKE'%/{base_sha}/%'
+ GROUP BY architecture, test_name, job_url, job_name, report_url
+ ORDER BY length(test_name) DESC
+ )
+ WHERE status NOT IN ('Fail', 'Error')
+ """
+ base_regression = client.query_dataframe(base_regression_query)
+ if len(base_regression) > 0:
+ base_regression["job_name"] = base_regression.apply(
+ lambda row: f"{row['arch']} {row['job_name']}".strip(), axis=1
+ )
+ base_regression["test_status"] = base_regression["status"]
+ base_regression = base_regression.drop(columns=["arch", "status"])
+
+ # Combine base results
+ base_results = pd.concat([base_checks, base_regression], ignore_index=True)
+
+ # Find tests that failed in PR but passed in base
+ pr_failed_tests = set(zip(all_pr_fails["job_name"], all_pr_fails["test_name"]))
+ base_passed_tests = set(zip(base_results["job_name"], base_results["test_name"]))
+
+ new_fails = pr_failed_tests.intersection(base_passed_tests)
+
+ # Filter PR results to only include new fails
+ mask = all_pr_fails.apply(
+ lambda row: (row["job_name"], row["test_name"]) in new_fails, axis=1
+ )
+ new_fails_df = all_pr_fails[mask]
+
+ return new_fails_df
+
+
+@lru_cache
+def get_workflow_config() -> dict:
+ workflow_config_files = glob("./ci/tmp/workflow_config*.json")
+ if len(workflow_config_files) == 0:
+ raise Exception("No workflow config file found")
+ if len(workflow_config_files) > 1:
+ raise Exception("Multiple workflow config files found")
+ with open(workflow_config_files[0], "r") as f:
+ return json.load(f)
+
+
+def get_cached_job(job_name: str) -> dict:
+ workflow_config = get_workflow_config()
+ return workflow_config["cache_jobs"].get(job_name, {})
+
+
+def get_cves(pr_number, commit_sha, branch):
+ """
+ Fetch Grype results from S3.
+
+ If no results are available for download, returns ... (Ellipsis).
+ """
+ s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL"))
+ prefixes_to_check = set()
+
+ def format_prefix(pr_number, commit_sha, branch):
+ if pr_number == 0:
+ return f"REFs/{branch}/{commit_sha}/grype/"
+ else:
+ return f"PRs/{pr_number}/{commit_sha}/grype/"
+
+ cached_server_job = get_cached_job("Docker server image")
+ if cached_server_job:
+ prefixes_to_check.add(
+ format_prefix(
+ cached_server_job["pr_number"],
+ cached_server_job["sha"],
+ cached_server_job["branch"],
+ )
+ )
+ cached_keeper_job = get_cached_job("Docker keeper image")
+ if cached_keeper_job:
+ prefixes_to_check.add(
+ format_prefix(
+ cached_keeper_job["pr_number"],
+ cached_keeper_job["sha"],
+ cached_keeper_job["branch"],
+ )
+ )
+
+ if not prefixes_to_check:
+ prefixes_to_check = {format_prefix(pr_number, commit_sha, branch)}
+
+ grype_result_dirs = []
+ for s3_prefix in prefixes_to_check:
+ try:
+ response = s3_client.list_objects_v2(
+ Bucket=S3_BUCKET, Prefix=s3_prefix, Delimiter="/"
+ )
+ grype_result_dirs.extend(
+ content["Prefix"] for content in response.get("CommonPrefixes", [])
+ )
+ except Exception as e:
+ print(f"Error listing S3 objects at {s3_prefix}: {e}")
+ continue
+
+ if len(grype_result_dirs) == 0:
+ # We were asked to check the CVE data, but none was found,
+ # maybe this is a preview report and grype results are not available yet
+ return ...
+
+ results = []
+ for path in grype_result_dirs:
+ file_key = f"{path}result.json"
+ try:
+ file_response = s3_client.get_object(Bucket=S3_BUCKET, Key=file_key)
+ content = file_response["Body"].read().decode("utf-8")
+ results.append(json.loads(content))
+ except Exception as e:
+ print(f"Error getting S3 object at {file_key}: {e}")
+ continue
+
+ rows = []
+ for scan_result in results:
+ for match in scan_result["matches"]:
+ rows.append(
+ {
+ "docker_image": scan_result["source"]["target"]["userInput"],
+ "severity": match["vulnerability"]["severity"],
+ "identifier": match["vulnerability"]["id"],
+ "namespace": match["vulnerability"]["namespace"],
+ }
+ )
+
+ if len(rows) == 0:
+ return pd.DataFrame()
+
+ df = pd.DataFrame(rows).drop_duplicates()
+ df = df.sort_values(
+ by="severity",
+ key=lambda col: col.str.lower().map(
+ {"critical": 1, "high": 2, "medium": 3, "low": 4, "negligible": 5}
+ ),
+ )
+ return df
+
+
+def url_to_html_link(url: str) -> str:
+ if not url:
+ return ""
+ text = url.split("/")[-1].split("?")[0]
+ if not text:
+ text = "results"
+ return f'{text} '
+
+
+def format_test_name_for_linewrap(text: str) -> str:
+ """Tweak the test name to improve line wrapping."""
+ return f'{text} '
+
+
+def format_test_status(text: str) -> str:
+ """Format the test status for better readability."""
+ if text.lower().startswith("fail"):
+ color = "red"
+ elif text.lower() == "skipped":
+ color = "grey"
+ elif text.lower() in ("success", "ok", "passed", "pass"):
+ color = "green"
+ else:
+ color = "orange"
+
+ return f'{text} '
+
+
+def format_results_as_html_table(results) -> str:
+ if len(results) == 0:
+ return "Nothing to report
"
+ results.columns = [col.replace("_", " ").title() for col in results.columns]
+ html = results.to_html(
+ index=False,
+ formatters={
+ "Results Link": url_to_html_link,
+ "Test Name": format_test_name_for_linewrap,
+ "Test Status": format_test_status,
+ "Job Status": format_test_status,
+ "Status": format_test_status,
+ "Message": lambda m: m.replace("\n", " "),
+ "Identifier": lambda i: url_to_html_link(
+ "https://nvd.nist.gov/vuln/detail/" + i
+ ),
+ },
+ escape=False,
+ border=0,
+ classes=["test-results-table"],
+ )
+ return html
+
+
+def backfill_skipped_statuses(
+ job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str
+):
+ """
+ Fill in the job statuses for skipped jobs.
+ """
+
+ if pr_number == 0:
+ ref_param = f"REF={branch}"
+ workflow_name = "MasterCI"
+ else:
+ ref_param = f"PR={pr_number}"
+ workflow_name = "PR"
+
+ status_file = f"result_{workflow_name.lower()}.json"
+ s3_path = f"https://{S3_BUCKET}.s3.amazonaws.com/{ref_param.replace('=', 's/')}/{commit_sha}/{status_file}"
+ response = requests.get(s3_path)
+
+ if response.status_code != 200:
+ return job_statuses
+
+ status_data = response.json()
+ skipped_jobs = []
+ for job in status_data["results"]:
+ if job["status"] == "skipped" and len(job["links"]) > 0:
+ skipped_jobs.append(
+ {
+ "job_name": job["name"],
+ "job_status": job["status"],
+ "message": job["info"],
+ "results_link": job["links"][0],
+ }
+ )
+
+ return pd.concat([job_statuses, pd.DataFrame(skipped_jobs)], ignore_index=True)
+
+
+def get_build_report_links(
+ job_statuses: pd.DataFrame, pr_number: int, branch: str, commit_sha: str
+):
+ """
+ Get the build report links for the given PR number, branch, and commit SHA.
+
+ First checks if a build job submitted a success or skipped status.
+ If not available, it guesses the links.
+ """
+ build_job_names = [
+ "Build (amd_release)",
+ "Build (arm_release)",
+ "Docker server image",
+ "Docker keeper image",
+ ]
+ build_report_links = {}
+
+ for job in job_statuses.itertuples():
+ if (
+ job.job_name in build_job_names
+ and job.job_status
+ in (
+ "success",
+ "skipped",
+ )
+ and job.results_link
+ ):
+ build_report_links[job.job_name] = job.results_link
+
+ if 0 < len(build_report_links) < len(build_job_names):
+ # Only have some of the build jobs, guess the rest.
+ # (It was straightforward to force the build jobs to always appear in the cache,
+ # however doing the same for the docker image jobs is difficult.)
+ ref_job, ref_link = list(build_report_links.items())[0]
+ link_template = ref_link.replace(
+ urllib.parse.quote(ref_job, safe=""), "{job_name}"
+ )
+ for job in build_job_names:
+ if job not in build_report_links:
+ build_report_links[job] = link_template.format(job_name=job)
+
+ if len(build_report_links) > 0:
+ return build_report_links
+
+ # No cache or build result was found, guess the links
+ if pr_number == 0:
+ ref_param = f"REF={branch}"
+ workflow_name = "MasterCI"
+ else:
+ ref_param = f"PR={pr_number}"
+ workflow_name = "PR"
+
+ build_report_link_base = f"https://{S3_BUCKET}.s3.amazonaws.com/json.html?{ref_param}&sha={commit_sha}&name_0={urllib.parse.quote(workflow_name, safe='')}"
+ build_report_links = {
+ job_name: f"{build_report_link_base}&name_1={urllib.parse.quote(job_name, safe='')}"
+ for job_name in build_job_names
+ }
+ return build_report_links
+
+
+def parse_args() -> argparse.Namespace:
+ parser = argparse.ArgumentParser(description="Create a combined CI report.")
+ parser.add_argument( # Need the full URL rather than just the ID to query the databases
+ "--actions-run-url", required=True, help="URL of the actions run"
+ )
+ parser.add_argument(
+ "--pr-number", help="Pull request number for the S3 path", type=int
+ )
+ parser.add_argument("--commit-sha", help="Commit SHA for the S3 path")
+ parser.add_argument(
+ "--no-upload", action="store_true", help="Do not upload the report"
+ )
+ parser.add_argument(
+ "--known-fails", type=str, help="Path to the file with known fails"
+ )
+ parser.add_argument(
+ "--cves", action="store_true", help="Get CVEs from Grype results"
+ )
+ parser.add_argument(
+ "--mark-preview", action="store_true", help="Mark the report as a preview"
+ )
+ return parser.parse_args()
+
+
+def create_workflow_report(
+ actions_run_url: str,
+ pr_number: int = None,
+ commit_sha: str = None,
+ no_upload: bool = False,
+ known_fails_file_path: str = None,
+ check_cves: bool = False,
+ mark_preview: bool = False,
+) -> str:
+
+ host = os.getenv(DATABASE_HOST_VAR)
+ if not host:
+ print(f"{DATABASE_HOST_VAR} is not set")
+ user = os.getenv(DATABASE_USER_VAR)
+ if not user:
+ print(f"{DATABASE_USER_VAR} is not set")
+ password = os.getenv(DATABASE_PASSWORD_VAR)
+ if not password:
+ print(f"{DATABASE_PASSWORD_VAR} is not set")
+ if not GITHUB_TOKEN:
+ print("GITHUB_TOKEN is not set")
+ if not all([host, user, password, GITHUB_TOKEN]):
+ raise Exception("Required environment variables are not set")
+
+ run_id = actions_run_url.split("/")[-1]
+
+ run_details = get_run_details(run_id)
+ branch_name = run_details.get("head_branch", "unknown branch")
+ if pr_number is None or commit_sha is None:
+ if pr_number is None:
+ if len(run_details["pull_requests"]) > 0:
+ pr_number = run_details["pull_requests"][0]["number"]
+ else:
+ pr_number = 0
+ if commit_sha is None:
+ commit_sha = run_details["head_commit"]["id"]
+
+ db_client = Client(
+ host=host,
+ user=user,
+ password=password,
+ port=9440,
+ secure="y",
+ verify=False,
+ settings={"use_numpy": True},
+ )
+
+ fail_results = {
+ "job_statuses": get_commit_statuses(commit_sha),
+ "checks_fails": get_checks_fails(db_client, commit_sha, branch_name),
+ "checks_known_fails": [],
+ "pr_new_fails": [],
+ "checks_errors": get_checks_errors(db_client, commit_sha, branch_name),
+ "regression_fails": get_regression_fails(db_client, actions_run_url),
+ "docker_images_cves": (
+ [] if not check_cves else get_cves(pr_number, commit_sha, branch_name)
+ ),
+ }
+
+ # get_cves returns ... in the case where no Grype result files were found.
+ # This might occur when run in preview mode.
+ cves_not_checked = not check_cves or fail_results["docker_images_cves"] is ...
+
+ if known_fails_file_path:
+ if not os.path.exists(known_fails_file_path):
+ print(f"WARNING:Known fails file {known_fails_file_path} not found.")
+ else:
+ known_fails = get_broken_tests_rules(known_fails_file_path)
+
+ fail_results["checks_known_fails"] = get_checks_known_fails(
+ db_client, commit_sha, branch_name, known_fails
+ )
+
+ if pr_number == 0:
+ pr_info_html = f"Release ({branch_name})"
+ else:
+ try:
+ pr_info = get_pr_info_from_number(pr_number)
+ pr_info_html = f"""
+ #{pr_info.get("number")} ({pr_info.get("base", {}).get('ref')} <- {pr_info.get("head", {}).get('ref')}) {pr_info.get("title")}
+ """
+ fail_results["pr_new_fails"] = get_new_fails_this_pr(
+ db_client,
+ pr_info,
+ fail_results["checks_fails"],
+ fail_results["regression_fails"],
+ )
+ except Exception as e:
+ pr_info_html = e
+
+ fail_results["job_statuses"] = backfill_skipped_statuses(
+ fail_results["job_statuses"], pr_number, branch_name, commit_sha
+ )
+
+ high_cve_count = 0
+ if not cves_not_checked and len(fail_results["docker_images_cves"]) > 0:
+ high_cve_count = (
+ fail_results["docker_images_cves"]["severity"]
+ .str.lower()
+ .isin(("high", "critical"))
+ .sum()
+ )
+
+ # Load the template
+ template = Environment(
+ loader=FileSystemLoader(os.path.dirname(__file__))
+ ).get_template("ci_run_report.html.jinja")
+
+ # Define the context for rendering
+ context = {
+ "title": "ClickHouse® CI Workflow Run Report",
+ "github_repo": GITHUB_REPO,
+ "s3_bucket": S3_BUCKET,
+ "pr_info_html": pr_info_html,
+ "pr_number": pr_number,
+ "workflow_id": run_id,
+ "commit_sha": commit_sha,
+ "base_sha": "" if pr_number == 0 else pr_info.get("base", {}).get("sha"),
+ "date": f"{datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')} UTC",
+ "is_preview": mark_preview,
+ "counts": {
+ "jobs_status": f"{sum(fail_results['job_statuses']['job_status'].value_counts().get(x, 0) for x in ('failure', 'error'))} fail/error",
+ "checks_errors": len(fail_results["checks_errors"]),
+ "checks_new_fails": len(fail_results["checks_fails"]),
+ "regression_new_fails": len(fail_results["regression_fails"]),
+ "cves": "N/A" if cves_not_checked else f"{high_cve_count} high/critical",
+ "checks_known_fails": (
+ "N/A" if not known_fails else len(fail_results["checks_known_fails"])
+ ),
+ "pr_new_fails": len(fail_results["pr_new_fails"]),
+ },
+ "build_report_links": get_build_report_links(
+ fail_results["job_statuses"], pr_number, branch_name, commit_sha
+ ),
+ "ci_jobs_status_html": format_results_as_html_table(
+ fail_results["job_statuses"]
+ ),
+ "checks_errors_html": format_results_as_html_table(
+ fail_results["checks_errors"]
+ ),
+ "checks_fails_html": format_results_as_html_table(fail_results["checks_fails"]),
+ "regression_fails_html": format_results_as_html_table(
+ fail_results["regression_fails"]
+ ),
+ "docker_images_cves_html": (
+ "Not Checked
"
+ if cves_not_checked
+ else format_results_as_html_table(fail_results["docker_images_cves"])
+ ),
+ "checks_known_fails_html": (
+ "Not Checked
"
+ if not known_fails
+ else format_results_as_html_table(fail_results["checks_known_fails"])
+ ),
+ "new_fails_html": format_results_as_html_table(fail_results["pr_new_fails"]),
+ }
+
+ # Render the template with the context
+ rendered_html = template.render(context)
+
+ report_name = "ci_run_report.html"
+ report_path = Path(report_name)
+ report_path.write_text(rendered_html, encoding="utf-8")
+
+ if no_upload:
+ print(f"Report saved to {report_path}")
+ exit(0)
+
+ if pr_number == 0:
+ report_destination_key = f"REFs/{branch_name}/{commit_sha}"
+ else:
+ report_destination_key = f"PRs/{pr_number}/{commit_sha}"
+
+ report_destination_key += f"/{run_id}/{report_name}"
+
+ # Upload the report to S3
+ s3_client = boto3.client("s3", endpoint_url=os.getenv("S3_URL"))
+
+ try:
+ s3_client.put_object(
+ Bucket=S3_BUCKET,
+ Key=report_destination_key,
+ Body=rendered_html,
+ ContentType="text/html; charset=utf-8",
+ )
+ except NoCredentialsError:
+ print("Credentials not available for S3 upload.")
+
+ return f"https://s3.amazonaws.com/{S3_BUCKET}/" + report_destination_key
+
+
+def main():
+ args = parse_args()
+
+ report_url = create_workflow_report(
+ args.actions_run_url,
+ args.pr_number,
+ args.commit_sha,
+ args.no_upload,
+ args.known_fails,
+ args.cves,
+ args.mark_preview,
+ )
+
+ print(report_url)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.github/actions/create_workflow_report/workflow_report_hook.sh b/.github/actions/create_workflow_report/workflow_report_hook.sh
new file mode 100755
index 000000000000..04a09a9ee3ca
--- /dev/null
+++ b/.github/actions/create_workflow_report/workflow_report_hook.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+# This script is for generating preview reports when invoked as a post-hook from a praktika job
+pip install clickhouse-driver==0.2.8 numpy==1.26.4 pandas==2.0.3 jinja2==3.1.5
+ARGS="--mark-preview --known-fails tests/broken_tests.yaml --cves --actions-run-url $GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID --pr-number $PR_NUMBER"
+CMD="python3 .github/actions/create_workflow_report/create_workflow_report.py"
+$CMD $ARGS
+
diff --git a/.github/actions/docker_setup/action.yml b/.github/actions/docker_setup/action.yml
new file mode 100644
index 000000000000..56f713fa59d1
--- /dev/null
+++ b/.github/actions/docker_setup/action.yml
@@ -0,0 +1,32 @@
+name: Docker setup
+description: Setup docker
+inputs:
+ test_name:
+ description: name of the test, used in determining ipv6 configs.
+ default: None
+ type: string
+runs:
+ using: "composite"
+ steps:
+ - name: Docker IPv6 configuration
+ shell: bash
+ if: ${{ contains(inputs.test_name, 'Stateless') }}
+ env:
+ ipv6_subnet: 2001:3984:3989::/64
+ run: |
+ # make sure docker uses proper IPv6 config
+ sudo touch /etc/docker/daemon.json
+ sudo chown ubuntu:ubuntu /etc/docker/daemon.json
+ sudo cat < /etc/docker/daemon.json
+ {
+ "ipv6": true,
+ "fixed-cidr-v6": "${{ env.ipv6_subnet }}"
+ }
+ EOT
+ sudo chown root:root /etc/docker/daemon.json
+ sudo systemctl restart docker
+ sudo systemctl status docker
+ - name: Docker info
+ shell: bash
+ run: |
+ docker info
diff --git a/.github/actions/runner_setup/action.yml b/.github/actions/runner_setup/action.yml
new file mode 100644
index 000000000000..5a229fdd47e7
--- /dev/null
+++ b/.github/actions/runner_setup/action.yml
@@ -0,0 +1,19 @@
+name: Setup
+description: Setup environment
+runs:
+ using: "composite"
+ steps:
+ - name: Setup zram
+ shell: bash
+ run: |
+ sudo modprobe zram
+ MemTotal=$(grep -Po "(?<=MemTotal:)\s+\d+" /proc/meminfo) # KiB
+ Percent=200
+ ZRAM_SIZE=$(($MemTotal / 1024 / 1024 * $Percent / 100)) # Convert to GiB
+ .github/retry.sh 30 2 sudo zramctl --size ${ZRAM_SIZE}GiB --algorithm zstd /dev/zram0
+ sudo mkswap /dev/zram0 && sudo swapon -p 100 /dev/zram0
+ sudo sysctl vm.swappiness=200
+ - name: Install awscli
+ shell: bash
+ run: |
+ .github/retry.sh 10 30 sudo apt-get install -y awscli
diff --git a/.github/grype/parse_vulnerabilities_grype.py b/.github/grype/parse_vulnerabilities_grype.py
new file mode 100644
index 000000000000..fec2ef3bfac7
--- /dev/null
+++ b/.github/grype/parse_vulnerabilities_grype.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python3
+import json
+
+from testflows.core import *
+
+xfails = {}
+
+
+@Name("docker vulnerabilities")
+@XFails(xfails)
+@TestModule
+def docker_vulnerabilities(self):
+ with Given("I gather grype scan results"):
+ with open("./result.json", "r") as f:
+ results = json.load(f)
+
+ for vulnerability in results["matches"]:
+ with Test(
+ f"{vulnerability['vulnerability']['id']}@{vulnerability['vulnerability']['namespace']},{vulnerability['vulnerability']['severity']}",
+ flags=TE,
+ ):
+ note(vulnerability)
+ critical_levels = set(["HIGH", "CRITICAL"])
+ if vulnerability['vulnerability']["severity"].upper() in critical_levels:
+ with Then(
+ f"Found vulnerability of {vulnerability['vulnerability']['severity']} severity"
+ ):
+ result(Fail)
+
+
+if main():
+ docker_vulnerabilities()
diff --git a/.github/grype/run_grype_scan.sh b/.github/grype/run_grype_scan.sh
new file mode 100755
index 000000000000..af428e37d669
--- /dev/null
+++ b/.github/grype/run_grype_scan.sh
@@ -0,0 +1,18 @@
+set -x
+set -e
+
+IMAGE=$1
+
+GRYPE_VERSION=${GRYPE_VERSION:-"v0.92.2"}
+
+docker pull $IMAGE
+docker pull anchore/grype:${GRYPE_VERSION}
+
+docker run \
+ --rm --volume /var/run/docker.sock:/var/run/docker.sock \
+ --name Grype anchore/grype:${GRYPE_VERSION} \
+ --scope all-layers \
+ -o json \
+ $IMAGE > result.json
+
+ls -sh
diff --git a/.github/grype/transform_and_upload_results_s3.sh b/.github/grype/transform_and_upload_results_s3.sh
new file mode 100755
index 000000000000..38674d7a2a26
--- /dev/null
+++ b/.github/grype/transform_and_upload_results_s3.sh
@@ -0,0 +1,20 @@
+DOCKER_IMAGE=$(echo "$DOCKER_IMAGE" | sed 's/[\/:]/_/g')
+
+if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/grype/$DOCKER_IMAGE"
+else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA/grype/$DOCKER_IMAGE"
+fi
+
+S3_PATH="s3://$S3_BUCKET/$PREFIX"
+HTTPS_RESULTS_PATH="https://$S3_BUCKET.s3.amazonaws.com/index.html#$PREFIX/"
+HTTPS_REPORT_PATH="https://s3.amazonaws.com/$S3_BUCKET/$PREFIX/results.html"
+echo "https_report_path=$HTTPS_REPORT_PATH" >> $GITHUB_OUTPUT
+
+tfs --no-colors transform nice raw.log nice.log.txt
+tfs --no-colors report results -a $HTTPS_RESULTS_PATH raw.log - --copyright "Altinity LTD" | tfs --no-colors document convert > results.html
+
+aws s3 cp --no-progress nice.log.txt $S3_PATH/nice.log.txt --content-type "text/plain; charset=utf-8" || echo "nice log file not found".
+aws s3 cp --no-progress results.html $S3_PATH/results.html || echo "results file not found".
+aws s3 cp --no-progress raw.log $S3_PATH/raw.log || echo "raw.log file not found".
+aws s3 cp --no-progress result.json $S3_PATH/result.json --content-type "text/plain; charset=utf-8" || echo "result.json not found".
\ No newline at end of file
diff --git a/.github/retry.sh b/.github/retry.sh
new file mode 100755
index 000000000000..566c2cf11315
--- /dev/null
+++ b/.github/retry.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Execute command until exitcode is 0 or
+# maximum number of retries is reached
+# Example:
+# ./retry
+retries=$1
+delay=$2
+command="${@:3}"
+exitcode=0
+try=0
+until [ "$try" -ge $retries ]
+do
+ echo "$command"
+ eval "$command"
+ exitcode=$?
+ if [ $exitcode -eq 0 ]; then
+ break
+ fi
+ try=$((try+1))
+ sleep $2
+done
+exit $exitcode
diff --git a/.github/workflows/README.md b/.github/workflows/README.md
new file mode 100644
index 000000000000..56415c2a7478
--- /dev/null
+++ b/.github/workflows/README.md
@@ -0,0 +1,13 @@
+## Scheduled Build Run Results
+
+Results for **the latest** release_workflow scheduled runs.
+
+| Branch | Status |
+| ------------ | - |
+| **`antalya`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aantalya) |
+| **`project-antalya-24.12.2`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Aproject-antalya-24.12.2) |
+| **`customizations/22.8.21`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/22.8.21) |
+| **`customizations/23.3.19`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.3.19) |
+| **`customizations/23.8.16`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/23.8.16) |
+| **`customizations/24.3.14`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.3.14) |
+| **`customizations/24.8.11`** | [](https://github.com/Altinity/ClickHouse/actions/workflows/release_branches.yml?query=branch%3Acustomizations/24.8.11) |
diff --git a/.github/workflows/backport_branches.yml b/.github/workflows/backport_branches.yml
index 294557a38b0f..549efdf6bc38 100644
--- a/.github/workflows/backport_branches.yml
+++ b/.github/workflows/backport_branches.yml
@@ -3,6 +3,13 @@
name: BackportPR
on:
+ workflow_dispatch:
+ inputs:
+ no_cache:
+ description: Run without cache
+ required: false
+ type: boolean
+ default: false
pull_request:
branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]']
@@ -10,8 +17,22 @@ env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }}
- DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }}
+ DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }}
CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }}
+ AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }}
+ AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }}
+ AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/"
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+ GH_TOKEN: ${{ github.token }}
# Allow updating GH commit statuses and PR comments to post an actual job reports link
permissions: write-all
@@ -19,7 +40,7 @@ permissions: write-all
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: []
name: "Config Workflow"
outputs:
@@ -30,6 +51,26 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -56,7 +97,7 @@ jobs:
fi
dockers_build_amd:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }}
name: "Dockers Build (amd)"
@@ -68,6 +109,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (amd)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -94,7 +142,7 @@ jobs:
fi
dockers_build_arm:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
name: "Dockers Build (arm)"
@@ -106,6 +154,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (arm)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -132,7 +187,7 @@ jobs:
fi
build_amd_debug:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
name: "Build (amd_debug)"
@@ -144,6 +199,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -170,7 +232,7 @@ jobs:
fi
build_amd_release:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
name: "Build (amd_release)"
@@ -182,6 +244,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -208,7 +277,7 @@ jobs:
fi
build_amd_asan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
name: "Build (amd_asan)"
@@ -220,6 +289,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_asan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -246,7 +322,7 @@ jobs:
fi
build_amd_tsan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
name: "Build (amd_tsan)"
@@ -258,6 +334,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -284,7 +367,7 @@ jobs:
fi
build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
name: "Build (arm_release)"
@@ -296,6 +379,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -322,7 +412,7 @@ jobs:
fi
build_amd_darwin:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
name: "Build (amd_darwin)"
@@ -334,6 +424,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_darwin)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -360,7 +457,7 @@ jobs:
fi
build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
name: "Build (arm_darwin)"
@@ -372,6 +469,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_darwin)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -398,7 +502,7 @@ jobs:
fi
docker_server_image:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
name: "Docker server image"
@@ -410,6 +514,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker server image"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -436,7 +547,7 @@ jobs:
fi
docker_keeper_image:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
name: "Docker keeper image"
@@ -448,6 +559,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker keeper image"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -474,7 +592,7 @@ jobs:
fi
install_packages_amd_debug:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX2RlYnVnKQ==') }}
name: "Install packages (amd_debug)"
@@ -486,6 +604,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Install packages (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -512,7 +637,7 @@ jobs:
fi
compatibility_check_release:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }}
name: "Compatibility check (release)"
@@ -524,6 +649,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Compatibility check (release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -550,7 +682,7 @@ jobs:
fi
compatibility_check_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_release]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }}
name: "Compatibility check (aarch64)"
@@ -562,6 +694,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Compatibility check (aarch64)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -588,7 +727,7 @@ jobs:
fi
stateless_tests_amd_asan_distributed_plan_parallel_1_2:
- runs-on: [self-hosted, amd-medium-cpu]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }}
name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
@@ -600,6 +739,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -626,7 +772,7 @@ jobs:
fi
stateless_tests_amd_asan_distributed_plan_parallel_2_2:
- runs-on: [self-hosted, amd-medium-cpu]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }}
name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
@@ -638,6 +784,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -664,7 +817,7 @@ jobs:
fi
stateless_tests_amd_asan_distributed_plan_sequential:
- runs-on: [self-hosted, amd-small-mem]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }}
name: "Stateless tests (amd_asan, distributed plan, sequential)"
@@ -676,6 +829,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -702,7 +862,7 @@ jobs:
fi
stress_test_amd_tsan:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }}
name: "Stress test (amd_tsan)"
@@ -714,6 +874,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -740,7 +907,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_1_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 1/6)"
@@ -752,6 +919,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 1/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -778,7 +952,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_2_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 2/6)"
@@ -790,6 +964,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 2/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -816,7 +997,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_3_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 3/6)"
@@ -828,6 +1009,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 3/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -854,7 +1042,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_4_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 4/6)"
@@ -866,6 +1054,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 4/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -892,7 +1087,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_5_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 5/6)"
@@ -904,6 +1099,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 5/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -930,7 +1132,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_6_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 6/6)"
@@ -942,6 +1144,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 6/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -968,7 +1177,7 @@ jobs:
fi
integration_tests_amd_tsan_1_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }}
name: "Integration tests (amd_tsan, 1/6)"
@@ -980,6 +1189,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 1/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1006,7 +1222,7 @@ jobs:
fi
integration_tests_amd_tsan_2_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }}
name: "Integration tests (amd_tsan, 2/6)"
@@ -1018,6 +1234,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 2/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1044,7 +1267,7 @@ jobs:
fi
integration_tests_amd_tsan_3_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }}
name: "Integration tests (amd_tsan, 3/6)"
@@ -1056,6 +1279,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 3/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1082,7 +1312,7 @@ jobs:
fi
integration_tests_amd_tsan_4_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }}
name: "Integration tests (amd_tsan, 4/6)"
@@ -1094,6 +1324,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 4/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1120,7 +1357,7 @@ jobs:
fi
integration_tests_amd_tsan_5_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }}
name: "Integration tests (amd_tsan, 5/6)"
@@ -1132,6 +1369,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 5/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1158,7 +1402,7 @@ jobs:
fi
integration_tests_amd_tsan_6_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }}
name: "Integration tests (amd_tsan, 6/6)"
@@ -1170,6 +1414,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 6/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1196,7 +1447,7 @@ jobs:
fi
finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_arm_release, build_amd_darwin, build_arm_darwin, docker_server_image, docker_keeper_image, install_packages_amd_debug, compatibility_check_release, compatibility_check_aarch64, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, stress_test_amd_tsan, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6]
if: ${{ !cancelled() }}
name: "Finish Workflow"
@@ -1208,6 +1459,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Finish Workflow"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
diff --git a/.github/workflows/cancel.yml b/.github/workflows/cancel.yml
new file mode 100644
index 000000000000..c1e11ef212cd
--- /dev/null
+++ b/.github/workflows/cancel.yml
@@ -0,0 +1,19 @@
+name: Cancel
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+
+on: # yamllint disable-line rule:truthy
+ workflow_run:
+ workflows: ["PR","PullRequestCI", "ReleaseBranchCI", "DocsCheck", "BackportPR"]
+ types:
+ - requested
+jobs:
+ cancel:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: styfle/cancel-workflow-action@0.9.1
+ with:
+ all_but_latest: true
+ workflow_id: ${{ github.event.workflow.id }}
diff --git a/.github/workflows/cherry_pick.yml b/.github/workflows/cherry_pick.yml
index 315673d4abcc..8e5191eb33cc 100644
--- a/.github/workflows/cherry_pick.yml
+++ b/.github/workflows/cherry_pick.yml
@@ -28,7 +28,7 @@ jobs:
REPO_TEAM=core
EOF
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
clear-repository: true
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
diff --git a/.github/workflows/compare_fails.yml b/.github/workflows/compare_fails.yml
new file mode 100644
index 000000000000..1f734845ac1a
--- /dev/null
+++ b/.github/workflows/compare_fails.yml
@@ -0,0 +1,110 @@
+name: Compare CI Failures
+
+on:
+ workflow_dispatch:
+ inputs:
+ current_ref:
+ description: 'Current reference (commit hash or git tag) (default: current commit on selected branch)'
+ required: false
+ type: string
+ previous_ref:
+ description: 'Previous reference to compare with (commit hash, git tag or workflow url) (default: previous stable tag for current reference)'
+ required: false
+ type: string
+ upstream_ref:
+ description: 'Upstream reference to compare with (commit hash, git tag or MAJOR.MINOR version) (default: previous lts tag for current reference)'
+ required: false
+ type: string
+ include_broken:
+ description: 'Include BROKEN tests in comparison'
+ required: false
+ type: boolean
+ default: false
+ push:
+ tags:
+ - 'v*.altinity*'
+
+env:
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+
+jobs:
+ Compare:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check commit status
+ run: |
+ if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ inputs.current_ref }}" ]]; then
+ # For workflow_dispatch with custom ref, skip the check
+ exit 0
+ fi
+
+ # Query GitHub API for commit status
+ STATUSES=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ "https://api.github.com/repos/${{ github.repository }}/commits/${{ github.sha }}/status")
+
+ # Check if there are any statuses
+ if [ "$(echo $STATUSES | jq '.total_count')" -eq 0 ]; then
+ echo "No commit statuses found for ${{ github.sha }}. Assuming tests have not run yet. Aborting workflow."
+ exit 1
+ fi
+
+ echo "Found commit statuses, proceeding with comparison."
+
+ - name: Check out repository code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ ref: ${{ inputs.current_ref || github.ref }}
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+ cache: 'pip'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install clickhouse-driver requests pandas tabulate
+
+ - name: Set default refs
+ id: default_refs
+ run: |
+ VERSION=$(git describe --tags --abbrev=0 | sed 's/v\([0-9]\+\.[0-9]\+\).*/\1/')
+ echo "Detected version: $VERSION"
+
+ CURRENT_TAG=$(git tag --contains ${{ inputs.current_ref || github.sha }} | sort -r | grep -m 1 'altinity' || echo '')
+ echo "CURRENT_TAG: '$CURRENT_TAG' ${{ inputs.current_ref || github.sha }}"
+ PREVIOUS_TAG_COMMIT=$(git log -1 --until=yesterday --tags=v${VERSION}*.altinity* | grep -Po "(?<=commit ).*")
+ PREVIOUS_TAG=$(git tag --contains $PREVIOUS_TAG_COMMIT | sort -r | grep -m 1 'altinity')
+ echo "PREVIOUS_TAG: '$PREVIOUS_TAG' $PREVIOUS_TAG_COMMIT"
+ UPSTREAM_TAG_COMMIT=$(git log -1 --tags=v${VERSION}*-lts | grep -Po "(?<=commit ).*")
+ UPSTREAM_TAG=$(git tag --contains $UPSTREAM_TAG_COMMIT | sort -r | grep -m 1 'lts')
+ echo "UPSTREAM_TAG: '$UPSTREAM_TAG' $UPSTREAM_TAG_COMMIT"
+
+ echo "PREVIOUS_TAG=$PREVIOUS_TAG" >> $GITHUB_OUTPUT
+ echo "PREVIOUS_TAG_COMMIT=$PREVIOUS_TAG_COMMIT" >> $GITHUB_OUTPUT
+ echo "UPSTREAM_TAG=$UPSTREAM_TAG" >> $GITHUB_OUTPUT
+ echo "UPSTREAM_TAG_COMMIT=$UPSTREAM_TAG_COMMIT" >> $GITHUB_OUTPUT
+ echo "CURRENT_TAG=$CURRENT_TAG" >> $GITHUB_OUTPUT
+ - name: Comparison report
+ if: ${{ !cancelled() }}
+ run: |
+ git clone https://github.com/Altinity/actions.git
+ cd actions
+ git checkout 4623f919ee2738bea69aad405879562476736932
+ python3 scripts/compare_ci_fails.py \
+ --current-ref ${{ steps.default_refs.outputs.CURRENT_TAG || inputs.current_ref || github.sha }} \
+ --previous-ref ${{ steps.default_refs.outputs.PREVIOUS_TAG || inputs.previous_ref || steps.default_refs.outputs.PREVIOUS_TAG_COMMIT }} \
+ --upstream-ref ${{ steps.default_refs.outputs.UPSTREAM_TAG || inputs.upstream_ref || steps.default_refs.outputs.UPSTREAM_TAG_COMMIT }} \
+ ${{ inputs.include_broken && '--broken' || '' }}
+ cat comparison_results.md >> $GITHUB_STEP_SUMMARY
+
+ - name: Upload comparison results
+ uses: actions/upload-artifact@v4
+ with:
+ name: comparison-results
+ path: |
+ actions/comparison_results.md
diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml
index dc708514dfd5..421261fb436f 100644
--- a/.github/workflows/create_release.yml
+++ b/.github/workflows/create_release.yml
@@ -70,7 +70,7 @@ jobs:
runs-on: [self-hosted, release-maker]
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
token: ${{secrets.ROBOT_CLICKHOUSE_COMMIT_TOKEN}}
fetch-depth: 0
diff --git a/.github/workflows/docker_publish.yml b/.github/workflows/docker_publish.yml
new file mode 100644
index 000000000000..1e59aa8b5b8d
--- /dev/null
+++ b/.github/workflows/docker_publish.yml
@@ -0,0 +1,150 @@
+name: Republish Multiarch Docker Image
+
+on:
+ workflow_dispatch:
+ inputs:
+ docker_image:
+ description: 'Multiarch Docker image with tag'
+ required: true
+ release_environment:
+ description: 'Select release type: "staging" or "production"'
+ type: choice
+ default: 'staging'
+ options:
+ - staging
+ - production
+ upload_artifacts:
+ description: 'Upload artifacts directly in this workflow'
+ type: boolean
+ default: true
+ s3_upload_path:
+ description: 'Upload artifacts to s3 path'
+ type: string
+ required: false
+ workflow_call:
+ inputs:
+ docker_image:
+ type: string
+ required: true
+ release_environment:
+ type: string
+ required: false
+ default: 'staging'
+ upload_artifacts:
+ type: boolean
+ required: false
+ default: false
+ s3_upload_path:
+ type: string
+ required: false
+ outputs:
+ image_archives_path:
+ description: 'Path to the image archives directory'
+ value: ${{ jobs.republish.outputs.image_archives_path }}
+
+env:
+ IMAGE: ${{ github.event.inputs.docker_image || inputs.docker_image }}
+
+jobs:
+ republish:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ outputs:
+ image_archives_path: ${{ steps.set_path.outputs.image_archives_path }}
+ steps:
+ - name: Docker Hub Login
+ uses: docker/login-action@v2
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_TOKEN }}
+
+ - name: Set clickhouse-server version as new tag
+ run: |
+ # Determine "clickhouse-server" or "clickhouse-keeper"
+ echo "Input IMAGE: $IMAGE"
+ COMPONENT=$(echo "$IMAGE" | sed -E 's|.*/(clickhouse-[^:]+):.*|\1|')
+ echo "Component determined: $COMPONENT"
+ echo "COMPONENT=$COMPONENT" >> $GITHUB_ENV
+
+ # Pull the image
+ echo "Pulling the image"
+ docker pull $IMAGE
+
+ # Get version and clean it up
+ echo "Getting version from image..."
+ VERSION_OUTPUT=$(docker run --rm $IMAGE $COMPONENT --version)
+ echo "Raw version output: $VERSION_OUTPUT"
+
+ # Extract just the version number
+ NEW_TAG=$(echo "$VERSION_OUTPUT" | sed -E 's/.*version ([0-9.]+[^ ]*).*/\1/')
+ echo "Cleaned version: $NEW_TAG"
+
+ # Append "-prerelease" if necessary
+ if [ "${{ github.event.inputs.release_environment || inputs.release_environment }}" = "staging" ]; then
+ NEW_TAG="${NEW_TAG}-prerelease"
+ fi
+
+ if [[ "$IMAGE" == *-alpine* ]]; then
+ NEW_TAG="${NEW_TAG}-alpine"
+ fi
+ echo "New tag: $NEW_TAG"
+
+ # Export the new tag
+ echo "NEW_TAG=$NEW_TAG" >> $GITHUB_ENV
+
+ - name: Process multiarch manifest
+ run: |
+ echo "Re-tag multiarch image $IMAGE to altinity/$COMPONENT:$NEW_TAG"
+ docker buildx imagetools create --tag "altinity/$COMPONENT:$NEW_TAG" "$IMAGE"
+
+ # Create directory for image archives
+ mkdir -p image_archives
+
+ # Pull and save platform-specific images
+ for PLATFORM in "linux/amd64" "linux/arm64"; do
+ echo "Pulling and saving image for $PLATFORM..."
+ # Pull the specific platform image
+ docker pull --platform $PLATFORM "altinity/$COMPONENT:$NEW_TAG"
+
+ # Save the image to a tar file
+ ARCH=$(echo $PLATFORM | cut -d'/' -f2)
+ docker save "altinity/$COMPONENT:$NEW_TAG" -o "image_archives/${COMPONENT}-${NEW_TAG}-${ARCH}.tar"
+ done
+
+ # Save manifest inspection
+ docker buildx imagetools inspect "altinity/$COMPONENT:$NEW_TAG" > image_archives/manifest.txt
+
+ # Compress the archives
+ cd image_archives
+ for file in *.tar; do
+ gzip "$file"
+ done
+ cd ..
+
+ - name: Set image archives path
+ id: set_path
+ run: |
+ echo "image_archives_path=${{ github.workspace }}/image_archives" >> $GITHUB_OUTPUT
+
+ - name: Upload image archives
+ if: ${{ github.event.inputs.upload_artifacts || inputs.upload_artifacts }}
+ uses: actions/upload-artifact@v4
+ with:
+ name: docker-images-backup
+ path: image_archives/
+ retention-days: 90
+
+ - name: Install aws cli
+ if: ${{ inputs.s3_upload_path != '' }}
+ uses: unfor19/install-aws-cli-action@v1
+ with:
+ version: 2
+ arch: arm64
+
+ - name: Upload to S3
+ if: ${{ inputs.s3_upload_path != '' }}
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ run: |
+ aws s3 sync image_archives/ "${{ inputs.s3_upload_path }}"
+
diff --git a/.github/workflows/grype_scan.yml b/.github/workflows/grype_scan.yml
new file mode 100644
index 000000000000..a92fec5f9879
--- /dev/null
+++ b/.github/workflows/grype_scan.yml
@@ -0,0 +1,154 @@
+name: Grype Scan
+run-name: Grype Scan ${{ inputs.docker_image }}
+
+on:
+ workflow_dispatch:
+ # Inputs for manual run
+ inputs:
+ docker_image:
+ description: 'Docker image. If no tag, it will be determined by version_helper.py'
+ required: true
+ workflow_call:
+ # Inputs for workflow call
+ inputs:
+ docker_image:
+ description: 'Docker image. If no tag, it will be determined by version_helper.py'
+ required: true
+ type: string
+ version:
+ description: 'Version tag. If no version, it will be determined by version_helper.py'
+ required: false
+ type: string
+ default: ""
+ tag-suffix:
+ description: 'Tag suffix. To be appended the version from version_helper.py'
+ required: false
+ type: string
+ default: ""
+env:
+ PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ GRYPE_VERSION: "v0.92.2-arm64v8"
+
+jobs:
+ grype_scan:
+ name: Grype Scan
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Docker
+ uses: docker/setup-buildx-action@v3
+
+ - name: Set up Python
+ run: |
+ export TESTFLOWS_VERSION="2.4.19"
+ sudo apt-get update
+ sudo apt-get install -y python3-pip python3-venv
+ python3 -m venv venv
+ source venv/bin/activate
+ pip install --upgrade requests chardet urllib3 unidiff boto3 PyGithub
+ pip install testflows==$TESTFLOWS_VERSION awscli==1.33.28
+ echo PATH=$PATH >>$GITHUB_ENV
+
+ - name: Set image tag if not given
+ if: ${{ !contains(inputs.docker_image, ':') }}
+ id: set_version
+ env:
+ TAG_SUFFIX: ${{ inputs.tag-suffix }}
+ SPECIFIED_VERSION: ${{ inputs.version }}
+ run: |
+ python3 ./tests/ci/version_helper.py | grep = | tee /tmp/version_info
+ source /tmp/version_info
+ if [ -z "$SPECIFIED_VERSION" ]; then
+ VERSION=$CLICKHOUSE_VERSION_STRING
+ else
+ VERSION=$SPECIFIED_VERSION
+ fi
+ echo "docker_image=${{ inputs.docker_image }}:$PR_NUMBER-$VERSION$TAG_SUFFIX" >> $GITHUB_OUTPUT
+
+ - name: Run Grype Scan
+ run: |
+ DOCKER_IMAGE=${{ steps.set_version.outputs.docker_image || inputs.docker_image }}
+ ./.github/grype/run_grype_scan.sh $DOCKER_IMAGE
+
+ - name: Parse grype results
+ run: |
+ python3 -u ./.github/grype/parse_vulnerabilities_grype.py -o nice --no-colors --log raw.log --test-to-end
+
+ - name: Transform and Upload Grype Results
+ if: always()
+ id: upload_results
+ env:
+ S3_BUCKET: "altinity-build-artifacts"
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ PR_NUMBER: ${{ env.PR_NUMBER || github.event.pull_request.number || 0 }}
+ DOCKER_IMAGE: ${{ steps.set_version.outputs.docker_image || inputs.docker_image }}
+ run: |
+ echo "PR_NUMBER=$PR_NUMBER"
+ ./.github/grype/transform_and_upload_results_s3.sh
+
+ - name: Create step summary
+ if: always()
+ id: create_summary
+ run: |
+ jq -r '"**Image**: \(.source.target.userInput)"' result.json >> $GITHUB_STEP_SUMMARY
+ jq -r '.distro | "**Distro**: \(.name):\(.version)"' result.json >> $GITHUB_STEP_SUMMARY
+ if jq -e '.matches | length == 0' result.json > /dev/null; then
+ echo "No CVEs" >> $GITHUB_STEP_SUMMARY
+ else
+ echo "| Severity | Count |" >> $GITHUB_STEP_SUMMARY
+ echo "|------------|-------|" >> $GITHUB_STEP_SUMMARY
+ jq -r '
+ .matches |
+ map(.vulnerability.severity) |
+ group_by(.) |
+ map({severity: .[0], count: length}) |
+ sort_by(.severity) |
+ map("| \(.severity) | \(.count) |") |
+ .[]
+ ' result.json >> $GITHUB_STEP_SUMMARY
+ fi
+
+ HIGH_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "High")) | length' result.json)
+ CRITICAL_COUNT=$(jq -r '.matches | map(.vulnerability) | unique_by(.id) | map(.severity) | map(select(. == "Critical")) | length' result.json)
+ TOTAL_HIGH_CRITICAL=$((HIGH_COUNT + CRITICAL_COUNT))
+ echo "total_high_critical=$TOTAL_HIGH_CRITICAL" >> $GITHUB_OUTPUT
+
+ if [ $TOTAL_HIGH_CRITICAL -gt 0 ]; then
+ echo '## High and Critical vulnerabilities found' >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ cat raw.log | tfs --no-colors show tests | grep -Pi 'High|Critical' >> $GITHUB_STEP_SUMMARY
+ echo '```' >> $GITHUB_STEP_SUMMARY
+ fi
+
+ - name: Set commit status
+ if: always()
+ uses: actions/github-script@v7
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ const totalHighCritical = '${{ steps.create_summary.outputs.total_high_critical }}';
+ const hasError = totalHighCritical === '';
+ const hasVulnerabilities = parseInt(totalHighCritical) > 0;
+ github.rest.repos.createCommitStatus({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ sha: '${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}',
+ state: hasError ? 'error' : hasVulnerabilities ? 'failure' : 'success',
+ target_url: '${{ steps.upload_results.outputs.https_report_path }}',
+ description: hasError ? 'An error occurred' : `Grype Scan Completed with ${totalHighCritical} high/critical vulnerabilities`,
+ context: 'Grype Scan ${{ steps.set_version.outputs.docker_image || inputs.docker_image }}'
+ });
+
+ - name: Upload artifacts
+ if: always()
+ uses: actions/upload-artifact@v4
+ with:
+ name: grype-results-${{ hashFiles('raw.log') }}
+ path: |
+ result.json
+ nice.log.txt
diff --git a/.github/workflows/init_praktika.yml b/.github/workflows/init_praktika.yml
new file mode 100644
index 000000000000..e9f56e0d2396
--- /dev/null
+++ b/.github/workflows/init_praktika.yml
@@ -0,0 +1,27 @@
+name: InitPraktikaReport
+
+# This workflow is used to initialize/update the praktika report in S3.
+# It does not need to run often, when a new release is created should be plenty.
+
+on:
+ workflow_dispatch:
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+
+jobs:
+
+ init_praktika:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ name: "Init praktika"
+ steps:
+ - name: Init praktika report
+ run: |
+ export PYTHONPATH=./ci:.:
+ pip install htmlmin
+ python3 -m praktika html
+
\ No newline at end of file
diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml
index 00dbccb41836..b50b96d467e2 100644
--- a/.github/workflows/master.yml
+++ b/.github/workflows/master.yml
@@ -3,13 +3,35 @@
name: MasterCI
on:
+ workflow_dispatch:
+ inputs:
+ no_cache:
+ description: Run without cache
+ required: false
+ type: boolean
+ default: false
push:
- branches: ['master']
+ branches: ['antalya', 'releases/*', 'antalya-*']
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
+ DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }}
CHECKOUT_REF: ""
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }}
+ AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }}
+ AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }}
+ AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/"
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+ GH_TOKEN: ${{ github.token }}
# Allow updating GH commit statuses and PR comments to post an actual job reports link
permissions: write-all
@@ -17,7 +39,7 @@ permissions: write-all
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: []
name: "Config Workflow"
outputs:
@@ -28,6 +50,26 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -54,7 +96,7 @@ jobs:
fi
dockers_build_amd:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }}
name: "Dockers Build (amd)"
@@ -66,6 +108,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (amd)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -92,7 +141,7 @@ jobs:
fi
dockers_build_arm:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
name: "Dockers Build (arm)"
@@ -104,6 +153,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (arm)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -130,7 +186,7 @@ jobs:
fi
dockers_build_multiplatform_manifest:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAobXVsdGlwbGF0Zm9ybSBtYW5pZmVzdCk=') }}
name: "Dockers Build (multiplatform manifest)"
@@ -142,43 +198,12 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_tidy:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV90aWR5KQ==') }}
- name: "Build (arm_tidy)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
with:
- ref: ${{ env.CHECKOUT_REF }}
+ test_name: "Dockers Build (multiplatform manifest)"
- name: Prepare env script
run: |
@@ -200,13 +225,13 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_tidy)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_tidy)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
build_amd_debug:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
name: "Build (amd_debug)"
@@ -218,6 +243,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -244,7 +276,7 @@ jobs:
fi
build_amd_release:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
name: "Build (amd_release)"
@@ -256,6 +288,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -282,7 +321,7 @@ jobs:
fi
build_amd_asan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
name: "Build (amd_asan)"
@@ -294,6 +333,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_asan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -320,7 +366,7 @@ jobs:
fi
build_amd_tsan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
name: "Build (amd_tsan)"
@@ -332,6 +378,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -358,7 +411,7 @@ jobs:
fi
build_amd_msan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }}
name: "Build (amd_msan)"
@@ -370,6 +423,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -396,7 +456,7 @@ jobs:
fi
build_amd_ubsan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }}
name: "Build (amd_ubsan)"
@@ -408,6 +468,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -434,7 +501,7 @@ jobs:
fi
build_amd_binary:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
name: "Build (amd_binary)"
@@ -446,6 +513,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_binary)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -472,7 +546,7 @@ jobs:
fi
build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
name: "Build (arm_release)"
@@ -484,6 +558,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -509,11 +590,11 @@ jobs:
python3 -m praktika run 'Build (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_asan:
- runs-on: [self-hosted, builder-aarch64]
+ build_arm_coverage:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }}
- name: "Build (arm_asan)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9jb3ZlcmFnZSk=') }}
+ name: "Build (arm_coverage)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -522,6 +603,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_coverage)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -542,16 +630,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_coverage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_coverage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_coverage:
- runs-on: [self-hosted, builder-aarch64]
+ build_arm_binary:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9jb3ZlcmFnZSk=') }}
- name: "Build (arm_coverage)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }}
+ name: "Build (arm_binary)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -560,6 +648,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_binary)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -580,16 +675,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_coverage)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_coverage)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_binary:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }}
- name: "Build (arm_binary)"
+ unit_tests_asan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }}
+ name: "Unit tests (asan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -598,6 +693,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Unit tests (asan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -618,16 +720,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_binary)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_amd_darwin:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
- name: "Build (amd_darwin)"
+ unit_tests_tsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }}
+ name: "Unit tests (tsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -636,6 +738,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Unit tests (tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -656,16 +765,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (amd_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
- name: "Build (arm_darwin)"
+ unit_tests_msan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }}
+ name: "Unit tests (msan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -674,6 +783,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Unit tests (msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -694,16 +810,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_darwin)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_v80compat:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV92ODBjb21wYXQp') }}
- name: "Build (arm_v80compat)"
+ unit_tests_ubsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }}
+ name: "Unit tests (ubsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -712,6 +828,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Unit tests (ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -732,16 +855,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_v80compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_amd_freebsd:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9mcmVlYnNkKQ==') }}
- name: "Build (amd_freebsd)"
+ docker_server_image:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
+ name: "Docker server image"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -750,6 +873,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker server image"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -770,16 +900,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (amd_freebsd)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_ppc64le:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHBwYzY0bGUp') }}
- name: "Build (ppc64le)"
+ docker_keeper_image:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
+ name: "Docker keeper image"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -788,6 +918,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker keeper image"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -808,16 +945,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (ppc64le)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_amd_compat:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9jb21wYXQp') }}
- name: "Build (amd_compat)"
+ install_packages_amd_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }}
+ name: "Install packages (amd_release)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -826,6 +963,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Install packages (amd_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -846,16 +990,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (amd_compat)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_amd_musl:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tdXNsKQ==') }}
- name: "Build (amd_musl)"
+ install_packages_arm_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }}
+ name: "Install packages (arm_release)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -864,6 +1008,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Install packages (arm_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -884,16 +1035,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (amd_musl)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_riscv64:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHJpc2N2NjQp') }}
- name: "Build (riscv64)"
+ compatibility_check_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }}
+ name: "Compatibility check (release)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -902,6 +1053,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Compatibility check (release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -922,16 +1080,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (riscv64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_s390x:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHMzOTB4KQ==') }}
- name: "Build (s390x)"
+ compatibility_check_aarch64:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }}
+ name: "Compatibility check (aarch64)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -940,6 +1098,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Compatibility check (aarch64)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -960,16 +1125,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (s390x)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_loongarch64:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGxvb25nYXJjaDY0KQ==') }}
- name: "Build (loongarch64)"
+ stateless_tests_amd_asan_distributed_plan_parallel_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }}
+ name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -978,6 +1143,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -998,16 +1170,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (loongarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- build_fuzzers:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGZ1enplcnMp') }}
- name: "Build (fuzzers)"
+ stateless_tests_amd_asan_distributed_plan_parallel_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }}
+ name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1016,6 +1188,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1036,16 +1215,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (fuzzers)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (fuzzers)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- unit_tests_asan:
- runs-on: [self-hosted, builder]
+ stateless_tests_amd_asan_distributed_plan_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }}
- name: "Unit tests (asan)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }}
+ name: "Stateless tests (amd_asan, distributed plan, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1054,6 +1233,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1074,16 +1260,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Unit tests (asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- unit_tests_tsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }}
- name: "Unit tests (tsan)"
+ stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }}
+ name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1092,6 +1278,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1112,16 +1305,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Unit tests (tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- unit_tests_msan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }}
- name: "Unit tests (msan)"
+ stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }}
+ name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1130,6 +1323,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1150,16 +1350,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Unit tests (msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- unit_tests_ubsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }}
- name: "Unit tests (ubsan)"
+ stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }}
+ name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1168,6 +1368,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1188,16 +1395,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Unit tests (ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- docker_server_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
- name: "Docker server image"
+ stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }}
+ name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1206,6 +1413,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1226,1080 +1440,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Docker server image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- docker_keeper_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
- name: "Docker keeper image"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Docker keeper image' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_amd_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }}
- name: "Install packages (amd_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- install_packages_arm_release:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }}
- name: "Install packages (arm_release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Install packages (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }}
- name: "Compatibility check (release)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- compatibility_check_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }}
- name: "Compatibility check (aarch64)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_asan_distributed_plan_parallel_1_2:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }}
- name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_asan_distributed_plan_parallel_2_2:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }}
- name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_asan_distributed_plan_sequential:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }}
- name: "Stateless tests (amd_asan, distributed plan, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }}
- name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }}
- name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }}
- name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }}
- name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_debug_asyncinsert_s3_storage_parallel:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }}
- name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_debug_asyncinsert_s3_storage_sequential:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }}
- name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_debug_parallel:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }}
- name: "Stateless tests (amd_debug, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_debug_sequential:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }}
- name: "Stateless tests (amd_debug, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_tsan_parallel_1_2:
- runs-on: [self-hosted, amd-large]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }}
- name: "Stateless tests (amd_tsan, parallel, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_tsan_parallel_2_2:
- runs-on: [self-hosted, amd-large]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }}
- name: "Stateless tests (amd_tsan, parallel, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_tsan_sequential_1_2:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }}
- name: "Stateless tests (amd_tsan, sequential, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_tsan_sequential_2_2:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }}
- name: "Stateless tests (amd_tsan, sequential, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_msan_parallel_1_2:
- runs-on: [self-hosted, amd-large]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }}
- name: "Stateless tests (amd_msan, parallel, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_msan_parallel_2_2:
- runs-on: [self-hosted, amd-large]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }}
- name: "Stateless tests (amd_msan, parallel, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_msan_sequential_1_2:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }}
- name: "Stateless tests (amd_msan, sequential, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_msan_sequential_2_2:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }}
- name: "Stateless tests (amd_msan, sequential, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_ubsan_parallel:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }}
- name: "Stateless tests (amd_ubsan, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_ubsan_sequential:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }}
- name: "Stateless tests (amd_ubsan, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_debug_distributed_plan_s3_storage_parallel:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }}
- name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_debug_distributed_plan_s3_storage_sequential:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }}
- name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_tsan_s3_storage_parallel:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwp') }}
- name: "Stateless tests (amd_tsan, s3 storage, parallel)"
+ stateless_tests_amd_debug_asyncinsert_s3_storage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }}
+ name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2308,43 +1458,12 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_masterci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_tsan_s3_storage_sequential_1_2:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }}
- name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
with:
- ref: ${{ env.CHECKOUT_REF }}
+ test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)"
- name: Prepare env script
run: |
@@ -2366,16 +1485,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_tsan_s3_storage_sequential_2_2:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }}
- name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)"
+ stateless_tests_amd_debug_asyncinsert_s3_storage_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }}
+ name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2384,6 +1503,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2404,16 +1530,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_arm_binary_parallel:
- runs-on: [self-hosted, arm-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }}
- name: "Stateless tests (arm_binary, parallel)"
+ stateless_tests_amd_debug_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }}
+ name: "Stateless tests (amd_debug, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2422,6 +1548,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2442,16 +1575,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_arm_binary_sequential:
- runs-on: [self-hosted, arm-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }}
- name: "Stateless tests (arm_binary, sequential)"
+ stateless_tests_amd_debug_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }}
+ name: "Stateless tests (amd_debug, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2460,6 +1593,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2480,16 +1620,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_arm_asan_azure_parallel:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHBhcmFsbGVsKQ==') }}
- name: "Stateless tests (arm_asan, azure, parallel)"
+ stateless_tests_amd_tsan_parallel_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }}
+ name: "Stateless tests (amd_tsan, parallel, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2498,6 +1638,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, parallel, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2518,16 +1665,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (arm_asan, azure, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_arm_asan_azure_sequential:
- runs-on: [self-hosted, arm-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYXNhbiwgYXp1cmUsIHNlcXVlbnRpYWwp') }}
- name: "Stateless tests (arm_asan, azure, sequential)"
+ stateless_tests_amd_tsan_parallel_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }}
+ name: "Stateless tests (amd_tsan, parallel, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2536,6 +1683,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, parallel, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2556,16 +1710,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (arm_asan, azure, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_asan_old_analyzer_1_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 1/6)"
+ stateless_tests_amd_tsan_sequential_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }}
+ name: "Stateless tests (amd_tsan, sequential, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2574,6 +1728,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, sequential, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2594,16 +1755,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_asan_old_analyzer_2_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 2/6)"
+ stateless_tests_amd_tsan_sequential_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }}
+ name: "Stateless tests (amd_tsan, sequential, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2612,6 +1773,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, sequential, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2632,16 +1800,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_asan_old_analyzer_3_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 3/6)"
+ stateless_tests_amd_msan_parallel_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }}
+ name: "Stateless tests (amd_msan, parallel, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2650,6 +1818,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_msan, parallel, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2670,16 +1845,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_asan_old_analyzer_4_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 4/6)"
+ stateless_tests_amd_msan_parallel_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }}
+ name: "Stateless tests (amd_msan, parallel, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2688,6 +1863,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_msan, parallel, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2708,16 +1890,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_asan_old_analyzer_5_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 5/6)"
+ stateless_tests_amd_msan_sequential_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }}
+ name: "Stateless tests (amd_msan, sequential, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2726,6 +1908,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_msan, sequential, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2746,16 +1935,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_asan_old_analyzer_6_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 6/6)"
+ stateless_tests_amd_msan_sequential_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }}
+ name: "Stateless tests (amd_msan, sequential, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2764,6 +1953,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_msan, sequential, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2784,16 +1980,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_binary_1_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }}
- name: "Integration tests (amd_binary, 1/5)"
+ stateless_tests_amd_ubsan_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }}
+ name: "Stateless tests (amd_ubsan, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2802,6 +1998,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_ubsan, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2822,16 +2025,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_binary_2_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }}
- name: "Integration tests (amd_binary, 2/5)"
+ stateless_tests_amd_ubsan_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }}
+ name: "Stateless tests (amd_ubsan, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2840,6 +2043,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_ubsan, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2860,16 +2070,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_binary_3_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }}
- name: "Integration tests (amd_binary, 3/5)"
+ stateless_tests_amd_debug_distributed_plan_s3_storage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }}
+ name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2878,6 +2088,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2898,16 +2115,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_binary_4_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }}
- name: "Integration tests (amd_binary, 4/5)"
+ stateless_tests_amd_debug_distributed_plan_s3_storage_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }}
+ name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2916,6 +2133,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2936,16 +2160,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_binary_5_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }}
- name: "Integration tests (amd_binary, 5/5)"
+ stateless_tests_amd_tsan_s3_storage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwp') }}
+ name: "Stateless tests (amd_tsan, s3 storage, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2954,6 +2178,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, s3 storage, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2974,16 +2205,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_arm_binary_distributed_plan_1_4:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }}
- name: "Integration tests (arm_binary, distributed plan, 1/4)"
+ stateless_tests_amd_tsan_s3_storage_sequential_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }}
+ name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2992,6 +2223,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3012,16 +2250,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_arm_binary_distributed_plan_2_4:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }}
- name: "Integration tests (arm_binary, distributed plan, 2/4)"
+ stateless_tests_amd_tsan_s3_storage_sequential_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }}
+ name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3030,6 +2268,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3050,16 +2295,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_arm_binary_distributed_plan_3_4:
- runs-on: [self-hosted, arm-medium]
+ stateless_tests_arm_binary_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }}
- name: "Integration tests (arm_binary, distributed plan, 3/4)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }}
+ name: "Stateless tests (arm_binary, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3068,6 +2313,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (arm_binary, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3088,16 +2340,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_arm_binary_distributed_plan_4_4:
- runs-on: [self-hosted, arm-medium]
+ stateless_tests_arm_binary_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }}
- name: "Integration tests (arm_binary, distributed plan, 4/4)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }}
+ name: "Stateless tests (arm_binary, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3106,6 +2358,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (arm_binary, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3126,16 +2385,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_1_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }}
- name: "Integration tests (amd_tsan, 1/6)"
+ integration_tests_amd_asan_old_analyzer_1_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 1/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3144,6 +2403,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 1/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3164,16 +2430,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_2_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }}
- name: "Integration tests (amd_tsan, 2/6)"
+ integration_tests_amd_asan_old_analyzer_2_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 2/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3182,6 +2448,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 2/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3202,16 +2475,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_3_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }}
- name: "Integration tests (amd_tsan, 3/6)"
+ integration_tests_amd_asan_old_analyzer_3_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 3/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3220,6 +2493,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 3/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3240,16 +2520,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_4_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }}
- name: "Integration tests (amd_tsan, 4/6)"
+ integration_tests_amd_asan_old_analyzer_4_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 4/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3258,6 +2538,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 4/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3278,16 +2565,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_5_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }}
- name: "Integration tests (amd_tsan, 5/6)"
+ integration_tests_amd_asan_old_analyzer_5_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 5/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3296,6 +2583,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 5/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3316,16 +2610,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_6_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }}
- name: "Integration tests (amd_tsan, 6/6)"
+ integration_tests_amd_asan_old_analyzer_6_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 6/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3334,6 +2628,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 6/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3354,16 +2655,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_arm_coverage_parallel:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_coverage]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fY292ZXJhZ2UsIHBhcmFsbGVsKQ==') }}
- name: "Stateless tests (arm_coverage, parallel)"
+ integration_tests_amd_binary_1_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }}
+ name: "Integration tests (amd_binary, 1/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3372,6 +2673,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 1/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3392,16 +2700,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (arm_coverage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (arm_coverage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_arm_coverage_sequential:
- runs-on: [self-hosted, arm-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_coverage]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fY292ZXJhZ2UsIHNlcXVlbnRpYWwp') }}
- name: "Stateless tests (arm_coverage, sequential)"
+ integration_tests_amd_binary_2_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }}
+ name: "Integration tests (amd_binary, 2/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3410,6 +2718,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 2/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3430,16 +2745,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (arm_coverage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (arm_coverage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_amd_debug:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }}
- name: "Stress test (amd_debug)"
+ integration_tests_amd_binary_3_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }}
+ name: "Integration tests (amd_binary, 3/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3448,6 +2763,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 3/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3468,16 +2790,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_amd_tsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }}
- name: "Stress test (amd_tsan)"
+ integration_tests_amd_binary_4_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }}
+ name: "Integration tests (amd_binary, 4/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3486,6 +2808,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 4/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3506,16 +2835,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_arm_asan:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }}
- name: "Stress test (arm_asan)"
+ integration_tests_amd_binary_5_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }}
+ name: "Integration tests (amd_binary, 5/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3524,6 +2853,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 5/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3544,16 +2880,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_amd_ubsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }}
- name: "Stress test (amd_ubsan)"
+ integration_tests_arm_binary_distributed_plan_1_4:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }}
+ name: "Integration tests (arm_binary, distributed plan, 1/4)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3562,6 +2898,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (arm_binary, distributed plan, 1/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3582,16 +2925,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_amd_msan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }}
- name: "Stress test (amd_msan)"
+ integration_tests_arm_binary_distributed_plan_2_4:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }}
+ name: "Integration tests (arm_binary, distributed plan, 2/4)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3600,6 +2943,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (arm_binary, distributed plan, 2/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3620,16 +2970,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_azure_tsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCB0c2FuKQ==') }}
- name: "Stress test (azure, tsan)"
+ integration_tests_arm_binary_distributed_plan_3_4:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }}
+ name: "Integration tests (arm_binary, distributed plan, 3/4)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3638,6 +2988,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (arm_binary, distributed plan, 3/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3658,16 +3015,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (azure, tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (azure, tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_azure_msan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGF6dXJlLCBtc2FuKQ==') }}
- name: "Stress test (azure, msan)"
+ integration_tests_arm_binary_distributed_plan_4_4:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }}
+ name: "Integration tests (arm_binary, distributed plan, 4/4)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3676,6 +3033,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (arm_binary, distributed plan, 4/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3696,16 +3060,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (azure, msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (azure, msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_amd_debug:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }}
- name: "AST fuzzer (amd_debug)"
+ integration_tests_amd_tsan_1_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }}
+ name: "Integration tests (amd_tsan, 1/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3714,6 +3078,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 1/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3734,16 +3105,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_arm_asan:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }}
- name: "AST fuzzer (arm_asan)"
+ integration_tests_amd_tsan_2_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }}
+ name: "Integration tests (amd_tsan, 2/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3752,6 +3123,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 2/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3772,16 +3150,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_amd_tsan:
- runs-on: [self-hosted, amd-medium]
+ integration_tests_amd_tsan_3_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }}
- name: "AST fuzzer (amd_tsan)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }}
+ name: "Integration tests (amd_tsan, 3/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3790,6 +3168,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 3/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3810,16 +3195,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_amd_msan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }}
- name: "AST fuzzer (amd_msan)"
+ integration_tests_amd_tsan_4_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }}
+ name: "Integration tests (amd_tsan, 4/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3828,6 +3213,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 4/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3848,16 +3240,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_amd_ubsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }}
- name: "AST fuzzer (amd_ubsan)"
+ integration_tests_amd_tsan_5_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }}
+ name: "Integration tests (amd_tsan, 5/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3866,6 +3258,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 5/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3886,16 +3285,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_amd_debug:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }}
- name: "BuzzHouse (amd_debug)"
+ integration_tests_amd_tsan_6_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }}
+ name: "Integration tests (amd_tsan, 6/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3904,6 +3303,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 6/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3924,16 +3330,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_arm_asan:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }}
- name: "BuzzHouse (arm_asan)"
+ stateless_tests_arm_coverage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_coverage]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fY292ZXJhZ2UsIHBhcmFsbGVsKQ==') }}
+ name: "Stateless tests (arm_coverage, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3942,6 +3348,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (arm_coverage, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3962,16 +3375,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_coverage, parallel)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_coverage, parallel)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_amd_tsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }}
- name: "BuzzHouse (amd_tsan)"
+ stateless_tests_arm_coverage_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_coverage]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fY292ZXJhZ2UsIHNlcXVlbnRpYWwp') }}
+ name: "Stateless tests (arm_coverage, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3980,6 +3393,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (arm_coverage, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4000,16 +3420,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_coverage, sequential)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_coverage, sequential)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_amd_msan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }}
- name: "BuzzHouse (amd_msan)"
+ stress_test_amd_debug:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }}
+ name: "Stress test (amd_debug)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4018,6 +3438,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4038,16 +3465,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_amd_ubsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }}
- name: "BuzzHouse (amd_ubsan)"
+ stress_test_amd_tsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }}
+ name: "Stress test (amd_tsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4056,6 +3483,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4076,16 +3510,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_amd_release_master_head_1_3:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzMp') }}
- name: "Performance Comparison (amd_release, master_head, 1/3)"
+ stress_test_amd_ubsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }}
+ name: "Stress test (amd_ubsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4094,6 +3528,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4114,16 +3555,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_amd_release_master_head_2_3:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzMp') }}
- name: "Performance Comparison (amd_release, master_head, 2/3)"
+ stress_test_amd_msan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }}
+ name: "Stress test (amd_msan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4132,6 +3573,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4152,16 +3600,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_amd_release_master_head_3_3:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzMp') }}
- name: "Performance Comparison (amd_release, master_head, 3/3)"
+ ast_fuzzer_amd_debug:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }}
+ name: "AST fuzzer (amd_debug)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4170,6 +3618,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "AST fuzzer (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4190,16 +3645,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_arm_release_master_head_1_3:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzMp') }}
- name: "Performance Comparison (arm_release, master_head, 1/3)"
+ ast_fuzzer_amd_tsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }}
+ name: "AST fuzzer (amd_tsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4208,6 +3663,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "AST fuzzer (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4228,16 +3690,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_arm_release_master_head_2_3:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzMp') }}
- name: "Performance Comparison (arm_release, master_head, 2/3)"
+ ast_fuzzer_amd_msan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }}
+ name: "AST fuzzer (amd_msan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4246,6 +3708,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "AST fuzzer (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4266,16 +3735,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_arm_release_master_head_3_3:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzMp') }}
- name: "Performance Comparison (arm_release, master_head, 3/3)"
+ ast_fuzzer_amd_ubsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }}
+ name: "AST fuzzer (amd_ubsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4284,6 +3753,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "AST fuzzer (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4304,16 +3780,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_arm_release_release_base_1_3:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMS8zKQ==') }}
- name: "Performance Comparison (arm_release, release_base, 1/3)"
+ buzzhouse_amd_debug:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }}
+ name: "BuzzHouse (amd_debug)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4322,6 +3798,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "BuzzHouse (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4342,16 +3825,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (arm_release, release_base, 1/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (arm_release, release_base, 1/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_arm_release_release_base_2_3:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMi8zKQ==') }}
- name: "Performance Comparison (arm_release, release_base, 2/3)"
+ buzzhouse_amd_tsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }}
+ name: "BuzzHouse (amd_tsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4360,6 +3843,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "BuzzHouse (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4380,16 +3870,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (arm_release, release_base, 2/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (arm_release, release_base, 2/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_arm_release_release_base_3_3:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIHJlbGVhc2VfYmFzZSwgMy8zKQ==') }}
- name: "Performance Comparison (arm_release, release_base, 3/3)"
+ buzzhouse_amd_msan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }}
+ name: "BuzzHouse (amd_msan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4398,6 +3888,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "BuzzHouse (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4418,16 +3915,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (arm_release, release_base, 3/3)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (arm_release, release_base, 3/3)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- clickbench_amd_release:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAoYW1kX3JlbGVhc2Up') }}
- name: "ClickBench (amd_release)"
+ buzzhouse_amd_ubsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }}
+ name: "BuzzHouse (amd_ubsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4436,6 +3933,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "BuzzHouse (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4456,16 +3960,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'ClickBench (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'ClickBench (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- clickbench_arm_release:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAoYXJtX3JlbGVhc2Up') }}
- name: "ClickBench (arm_release)"
+ clickbench_amd_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAoYW1kX3JlbGVhc2Up') }}
+ name: "ClickBench (amd_release)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4474,6 +3978,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "ClickBench (amd_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4494,16 +4005,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'ClickBench (amd_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'ClickBench (amd_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
- sqlancer_amd_debug:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMYW5jZXIgKGFtZF9kZWJ1Zyk=') }}
- name: "SQLancer (amd_debug)"
+ clickbench_arm_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tCZW5jaCAoYXJtX3JlbGVhc2Up') }}
+ name: "ClickBench (arm_release)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4512,6 +4023,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "ClickBench (arm_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4532,13 +4050,13 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'SQLancer (amd_debug)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'SQLancer (amd_debug)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'ClickBench (arm_release)' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
sqltest:
- runs-on: [self-hosted, arm-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U1FMVGVzdA==') }}
name: "SQLTest"
@@ -4550,6 +4068,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "SQLTest"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4576,8 +4101,8 @@ jobs:
fi
finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary, build_amd_darwin, build_arm_darwin, build_arm_v80compat, build_amd_freebsd, build_ppc64le, build_amd_compat, build_amd_musl, build_riscv64, build_s390x, build_loongarch64, build_fuzzers, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_release, compatibility_check_aarch64, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, stateless_tests_arm_asan_azure_parallel, stateless_tests_arm_asan_azure_sequential, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stateless_tests_arm_coverage_parallel, stateless_tests_arm_coverage_sequential, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_amd_ubsan, stress_test_amd_msan, stress_test_azure_tsan, stress_test_azure_msan, ast_fuzzer_amd_debug, ast_fuzzer_arm_asan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_arm_asan, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan, performance_comparison_amd_release_master_head_1_3, performance_comparison_amd_release_master_head_2_3, performance_comparison_amd_release_master_head_3_3, performance_comparison_arm_release_master_head_1_3, performance_comparison_arm_release_master_head_2_3, performance_comparison_arm_release_master_head_3_3, performance_comparison_arm_release_release_base_1_3, performance_comparison_arm_release_release_base_2_3, performance_comparison_arm_release_release_base_3_3, clickbench_amd_release, clickbench_arm_release, sqlancer_amd_debug, sqltest]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_coverage, build_arm_binary, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, compatibility_check_release, compatibility_check_aarch64, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stateless_tests_arm_coverage_parallel, stateless_tests_arm_coverage_sequential, stress_test_amd_debug, stress_test_amd_tsan, stress_test_amd_ubsan, stress_test_amd_msan, ast_fuzzer_amd_debug, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan, clickbench_amd_release, clickbench_arm_release, sqltest]
if: ${{ !cancelled() }}
name: "Finish Workflow"
outputs:
@@ -4588,6 +4113,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Finish Workflow"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4612,3 +4144,221 @@ jobs:
else
python3 -m praktika run 'Finish Workflow' --workflow "MasterCI" --ci |& tee ./ci/tmp/job.log
fi
+
+##########################################################################################
+##################################### ALTINITY JOBS ######################################
+##########################################################################################
+
+ GrypeScanServer:
+ needs: [config_workflow, docker_server_image]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
+ strategy:
+ fail-fast: false
+ matrix:
+ suffix: ['', '-alpine']
+ uses: ./.github/workflows/grype_scan.yml
+ secrets: inherit
+ with:
+ docker_image: altinityinfra/clickhouse-server
+ version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
+ tag-suffix: ${{ matrix.suffix }}
+ GrypeScanKeeper:
+ needs: [config_workflow, docker_keeper_image]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
+ uses: ./.github/workflows/grype_scan.yml
+ secrets: inherit
+ with:
+ docker_image: altinityinfra/clickhouse-keeper
+ version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
+
+ RegressionTestsRelease:
+ needs: [config_workflow, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'regression')}}
+ uses: ./.github/workflows/regression.yml
+ secrets: inherit
+ with:
+ runner_type: altinity-on-demand, altinity-regression-tester
+ commit: fc19ce3a7322a10ab791de755c950a56744a12e7
+ arch: release
+ build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ timeout_minutes: 300
+ workflow_config: ${{ needs.config_workflow.outputs.data }}
+ RegressionTestsAarch64:
+ needs: [config_workflow, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'regression') && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'aarch64')}}
+ uses: ./.github/workflows/regression.yml
+ secrets: inherit
+ with:
+ runner_type: altinity-on-demand, altinity-regression-tester-aarch64
+ commit: fc19ce3a7322a10ab791de755c950a56744a12e7
+ arch: aarch64
+ build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ timeout_minutes: 300
+ workflow_config: ${{ needs.config_workflow.outputs.data }}
+
+ SignRelease:
+ needs: [config_workflow, build_amd_release]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_sign.yml
+ secrets: inherit
+ with:
+ test_name: Sign release
+ runner_type: altinity-style-checker
+ data: ${{ needs.config_workflow.outputs.data }}
+ SignAarch64:
+ needs: [config_workflow, build_arm_release]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_sign.yml
+ secrets: inherit
+ with:
+ test_name: Sign aarch64
+ runner_type: altinity-style-checker-aarch64
+ data: ${{ needs.config_workflow.outputs.data }}
+
+ FinishCIReport:
+ if: ${{ !cancelled() }}
+ needs:
+ - config_workflow
+ - dockers_build_amd
+ - dockers_build_arm
+ - dockers_build_multiplatform_manifest
+ - build_amd_debug
+ - build_amd_release
+ - build_amd_asan
+ - build_amd_tsan
+ - build_amd_msan
+ - build_amd_ubsan
+ - build_amd_binary
+ - build_arm_release
+ - build_arm_coverage
+ - build_arm_binary
+ - unit_tests_asan
+ - unit_tests_tsan
+ - unit_tests_msan
+ - unit_tests_ubsan
+ - docker_server_image
+ - docker_keeper_image
+ - install_packages_amd_release
+ - install_packages_arm_release
+ - compatibility_check_release
+ - compatibility_check_aarch64
+ - stateless_tests_amd_asan_distributed_plan_parallel_1_2
+ - stateless_tests_amd_asan_distributed_plan_parallel_2_2
+ - stateless_tests_amd_asan_distributed_plan_sequential
+ - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel
+ - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential
+ - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel
+ - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential
+ - stateless_tests_amd_debug_asyncinsert_s3_storage_parallel
+ - stateless_tests_amd_debug_asyncinsert_s3_storage_sequential
+ - stateless_tests_amd_debug_parallel
+ - stateless_tests_amd_debug_sequential
+ - stateless_tests_amd_tsan_parallel_1_2
+ - stateless_tests_amd_tsan_parallel_2_2
+ - stateless_tests_amd_tsan_sequential_1_2
+ - stateless_tests_amd_tsan_sequential_2_2
+ - stateless_tests_amd_msan_parallel_1_2
+ - stateless_tests_amd_msan_parallel_2_2
+ - stateless_tests_amd_msan_sequential_1_2
+ - stateless_tests_amd_msan_sequential_2_2
+ - stateless_tests_amd_ubsan_parallel
+ - stateless_tests_amd_ubsan_sequential
+ - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel
+ - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential
+ - stateless_tests_amd_tsan_s3_storage_parallel
+ - stateless_tests_amd_tsan_s3_storage_sequential_1_2
+ - stateless_tests_amd_tsan_s3_storage_sequential_2_2
+ - stateless_tests_arm_binary_parallel
+ - stateless_tests_arm_binary_sequential
+ - integration_tests_amd_asan_old_analyzer_1_6
+ - integration_tests_amd_asan_old_analyzer_2_6
+ - integration_tests_amd_asan_old_analyzer_3_6
+ - integration_tests_amd_asan_old_analyzer_4_6
+ - integration_tests_amd_asan_old_analyzer_5_6
+ - integration_tests_amd_asan_old_analyzer_6_6
+ - integration_tests_amd_binary_1_5
+ - integration_tests_amd_binary_2_5
+ - integration_tests_amd_binary_3_5
+ - integration_tests_amd_binary_4_5
+ - integration_tests_amd_binary_5_5
+ - integration_tests_arm_binary_distributed_plan_1_4
+ - integration_tests_arm_binary_distributed_plan_2_4
+ - integration_tests_arm_binary_distributed_plan_3_4
+ - integration_tests_arm_binary_distributed_plan_4_4
+ - integration_tests_amd_tsan_1_6
+ - integration_tests_amd_tsan_2_6
+ - integration_tests_amd_tsan_3_6
+ - integration_tests_amd_tsan_4_6
+ - integration_tests_amd_tsan_5_6
+ - integration_tests_amd_tsan_6_6
+ - stateless_tests_arm_coverage_parallel
+ - stateless_tests_arm_coverage_sequential
+ - stress_test_amd_debug
+ - stress_test_amd_tsan
+ - stress_test_amd_ubsan
+ - stress_test_amd_msan
+ - ast_fuzzer_amd_debug
+ - ast_fuzzer_amd_tsan
+ - ast_fuzzer_amd_msan
+ - ast_fuzzer_amd_ubsan
+ - buzzhouse_amd_debug
+ - buzzhouse_amd_tsan
+ - buzzhouse_amd_msan
+ - buzzhouse_amd_ubsan
+ - clickbench_amd_release
+ - clickbench_arm_release
+ - sqltest
+ - finish_workflow
+ - GrypeScanServer
+ - GrypeScanKeeper
+ - RegressionTestsRelease
+ - RegressionTestsAarch64
+ - SignRelease
+ - SignAarch64
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ - name: Finalize workflow report
+ if: ${{ !cancelled() }}
+ uses: ./.github/actions/create_workflow_report
+ with:
+ workflow_config: ${{ needs.config_workflow.outputs.data }}
+ final: true
+
+ SourceUpload:
+ needs: [config_workflow, build_amd_release]
+ if: ${{ !failure() && !cancelled() }}
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ env:
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ VERSION: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ ref: ${{ fromJson(needs.config_workflow.outputs.data).git_ref }}
+ submodules: true
+ fetch-depth: 0
+ filter: tree:0
+ - name: Install aws cli
+ uses: unfor19/install-aws-cli-action@v1
+ with:
+ version: 2
+ arch: arm64
+ - name: Create source tar
+ run: |
+ cd .. && tar czf $RUNNER_TEMP/build_source.src.tar.gz ClickHouse/
+ - name: Upload source tar
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ S3_PATH="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/build_amd_release"
+ else
+ S3_PATH="PRs/$PR_NUMBER/$COMMIT_SHA/build_amd_release"
+ fi
+
+ aws s3 cp $RUNNER_TEMP/build_source.src.tar.gz s3://altinity-build-artifacts/$S3_PATH/clickhouse-$VERSION.src.tar.gz
diff --git a/.github/workflows/merge_queue.yml b/.github/workflows/merge_queue.yml
index 3c074f4e47d5..aa7066382f26 100644
--- a/.github/workflows/merge_queue.yml
+++ b/.github/workflows/merge_queue.yml
@@ -9,12 +9,26 @@ env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
CHECKOUT_REF: ""
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }}
+ AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }}
+ AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }}
+ AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/"
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+ GH_TOKEN: ${{ github.token }}
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: []
name: "Config Workflow"
outputs:
@@ -25,6 +39,26 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -51,7 +85,7 @@ jobs:
fi
dockers_build_amd:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }}
name: "Dockers Build (amd)"
@@ -63,6 +97,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (amd)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -89,7 +130,7 @@ jobs:
fi
dockers_build_arm:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
name: "Dockers Build (arm)"
@@ -101,43 +142,12 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_mergequeueci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- style_check:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3R5bGUgY2hlY2s=') }}
- name: "Style check"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
with:
- ref: ${{ env.CHECKOUT_REF }}
+ test_name: "Dockers Build (arm)"
- name: Prepare env script
run: |
@@ -159,13 +169,13 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Style check' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Style check' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Dockers Build (arm)' --workflow "MergeQueueCI" --ci |& tee ./ci/tmp/job.log
fi
fast_test:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RmFzdCB0ZXN0') }}
name: "Fast test"
@@ -177,6 +187,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Fast test"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -203,7 +220,7 @@ jobs:
fi
build_amd_binary:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
name: "Build (amd_binary)"
@@ -215,6 +232,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_binary)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -241,8 +265,8 @@ jobs:
fi
finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, style_check, fast_test, build_amd_binary]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, fast_test, build_amd_binary]
if: ${{ !cancelled() }}
name: "Finish Workflow"
outputs:
@@ -253,6 +277,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Finish Workflow"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
diff --git a/.github/workflows/nightly_fuzzers.yml b/.github/workflows/nightly_fuzzers.yml
index 66b6f76b35cd..84cb1e8e02b9 100644
--- a/.github/workflows/nightly_fuzzers.yml
+++ b/.github/workflows/nightly_fuzzers.yml
@@ -16,7 +16,7 @@ env:
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: []
name: "Config Workflow"
outputs:
@@ -27,6 +27,26 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -53,7 +73,7 @@ jobs:
fi
dockers_build_amd:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }}
name: "Dockers Build (amd)"
@@ -65,6 +85,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (amd)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -91,7 +118,7 @@ jobs:
fi
dockers_build_arm:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
name: "Dockers Build (arm)"
@@ -103,6 +130,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (arm)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -129,7 +163,7 @@ jobs:
fi
build_fuzzers:
- runs-on: [self-hosted, builder-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGZ1enplcnMp') }}
name: "Build (fuzzers)"
@@ -141,6 +175,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (fuzzers)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -167,7 +208,7 @@ jobs:
fi
libfuzzer_tests:
- runs-on: [self-hosted, arm-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_fuzzers]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'bGliRnV6emVyIHRlc3Rz') }}
name: "libFuzzer tests"
@@ -179,6 +220,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "libFuzzer tests"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -205,7 +253,7 @@ jobs:
fi
finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_fuzzers, libfuzzer_tests]
if: ${{ !cancelled() }}
name: "Finish Workflow"
@@ -217,6 +265,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Finish Workflow"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
diff --git a/.github/workflows/nightly_jepsen.yml b/.github/workflows/nightly_jepsen.yml
index b26728880b3e..1ff46f516f75 100644
--- a/.github/workflows/nightly_jepsen.yml
+++ b/.github/workflows/nightly_jepsen.yml
@@ -16,7 +16,7 @@ env:
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: []
name: "Config Workflow"
outputs:
@@ -27,6 +27,26 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -53,7 +73,7 @@ jobs:
fi
dockers_build_amd:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }}
name: "Dockers Build (amd)"
@@ -65,6 +85,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (amd)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -91,7 +118,7 @@ jobs:
fi
dockers_build_arm:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
name: "Dockers Build (arm)"
@@ -103,6 +130,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (arm)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -129,7 +163,7 @@ jobs:
fi
build_amd_binary:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
name: "Build (amd_binary)"
@@ -141,6 +175,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_binary)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -167,7 +208,7 @@ jobs:
fi
clickhouse_keeper_jepsen:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q2xpY2tIb3VzZSBLZWVwZXIgSmVwc2Vu') }}
name: "ClickHouse Keeper Jepsen"
@@ -179,6 +220,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "ClickHouse Keeper Jepsen"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -205,7 +253,7 @@ jobs:
fi
finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, clickhouse_keeper_jepsen]
if: ${{ !cancelled() }}
name: "Finish Workflow"
@@ -217,6 +265,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Finish Workflow"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
diff --git a/.github/workflows/nightly_statistics.yml b/.github/workflows/nightly_statistics.yml
index 6d64f8f8876c..8a0e96858eb9 100644
--- a/.github/workflows/nightly_statistics.yml
+++ b/.github/workflows/nightly_statistics.yml
@@ -16,7 +16,7 @@ env:
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: []
name: "Config Workflow"
outputs:
@@ -27,6 +27,26 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -53,7 +73,7 @@ jobs:
fi
collect_statistics:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow]
name: "Collect Statistics"
outputs:
@@ -64,6 +84,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Collect Statistics"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml
index 1aa7b0c3445a..f753ebb183cb 100644
--- a/.github/workflows/pull_request.yml
+++ b/.github/workflows/pull_request.yml
@@ -3,15 +3,36 @@
name: PR
on:
+ workflow_dispatch:
+ inputs:
+ no_cache:
+ description: Run without cache
+ required: false
+ type: boolean
+ default: false
pull_request:
- branches: ['master']
+ branches: ['antalya', 'releases/*', 'antalya-*']
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
DISABLE_CI_MERGE_COMMIT: ${{ vars.DISABLE_CI_MERGE_COMMIT || '0' }}
- DISABLE_CI_CACHE: ${{ vars.DISABLE_CI_CACHE || '0' }}
+ DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }}
CHECKOUT_REF: ${{ vars.DISABLE_CI_MERGE_COMMIT == '1' && github.event.pull_request.head.sha || '' }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }}
+ AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }}
+ AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }}
+ AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/"
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+ GH_TOKEN: ${{ github.token }}
# Allow updating GH commit statuses and PR comments to post an actual job reports link
permissions: write-all
@@ -19,7 +40,7 @@ permissions: write-all
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: []
name: "Config Workflow"
outputs:
@@ -30,6 +51,26 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -56,7 +97,7 @@ jobs:
fi
dockers_build_amd:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }}
name: "Dockers Build (amd)"
@@ -68,6 +109,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (amd)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -94,7 +142,7 @@ jobs:
fi
dockers_build_arm:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
name: "Dockers Build (arm)"
@@ -106,6 +154,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (arm)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -132,7 +187,7 @@ jobs:
fi
dockers_build_multiplatform_manifest:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAobXVsdGlwbGF0Zm9ybSBtYW5pZmVzdCk=') }}
name: "Dockers Build (multiplatform manifest)"
@@ -144,81 +199,12 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- style_check:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3R5bGUgY2hlY2s=') }}
- name: "Style check"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Style check' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Style check' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- docs_check:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9jcyBjaGVjaw==') }}
- name: "Docs check"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
with:
- ref: ${{ env.CHECKOUT_REF }}
+ test_name: "Dockers Build (multiplatform manifest)"
- name: Prepare env script
run: |
@@ -240,13 +226,13 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docs check' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Docs check' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Dockers Build (multiplatform manifest)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
fast_test:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RmFzdCB0ZXN0') }}
name: "Fast test"
@@ -258,43 +244,12 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Fast test' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Fast test' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_tidy:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV90aWR5KQ==') }}
- name: "Build (arm_tidy)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
with:
- ref: ${{ env.CHECKOUT_REF }}
+ test_name: "Fast test"
- name: Prepare env script
run: |
@@ -316,14 +271,14 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_tidy)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Fast test' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_tidy)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Fast test' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
build_amd_debug:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
name: "Build (amd_debug)"
outputs:
@@ -334,6 +289,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -360,8 +322,8 @@ jobs:
fi
build_amd_release:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
name: "Build (amd_release)"
outputs:
@@ -372,6 +334,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -398,8 +367,8 @@ jobs:
fi
build_amd_asan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
name: "Build (amd_asan)"
outputs:
@@ -410,6 +379,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_asan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -436,8 +412,8 @@ jobs:
fi
build_amd_tsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
name: "Build (amd_tsan)"
outputs:
@@ -448,6 +424,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -474,8 +457,8 @@ jobs:
fi
build_amd_msan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }}
name: "Build (amd_msan)"
outputs:
@@ -486,6 +469,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -512,8 +502,8 @@ jobs:
fi
build_amd_ubsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }}
name: "Build (amd_ubsan)"
outputs:
@@ -524,6 +514,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -550,8 +547,8 @@ jobs:
fi
build_amd_binary:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9iaW5hcnkp') }}
name: "Build (amd_binary)"
outputs:
@@ -562,6 +559,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_binary)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -588,8 +592,8 @@ jobs:
fi
build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
name: "Build (arm_release)"
outputs:
@@ -600,6 +604,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -625,11 +636,11 @@ jobs:
python3 -m praktika run 'Build (arm_release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_asan:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }}
- name: "Build (arm_asan)"
+ build_arm_coverage:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9jb3ZlcmFnZSk=') }}
+ name: "Build (arm_coverage)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -638,6 +649,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_coverage)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -658,16 +676,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_coverage)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_coverage)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_coverage:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9jb3ZlcmFnZSk=') }}
- name: "Build (arm_coverage)"
+ build_arm_binary:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }}
+ name: "Build (arm_binary)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -676,6 +694,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_binary)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -696,16 +721,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_coverage)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_binary)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_coverage)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_binary)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_binary:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9iaW5hcnkp') }}
- name: "Build (arm_binary)"
+ unit_tests_asan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }}
+ name: "Unit tests (asan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -714,6 +739,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Unit tests (asan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -734,16 +766,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_binary)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_binary)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_amd_darwin:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
- name: "Build (amd_darwin)"
+ unit_tests_tsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }}
+ name: "Unit tests (tsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -752,6 +784,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Unit tests (tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -772,16 +811,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_darwin)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (amd_darwin)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
- name: "Build (arm_darwin)"
+ unit_tests_msan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }}
+ name: "Unit tests (msan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -790,6 +829,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Unit tests (msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -810,16 +856,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_darwin)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_darwin)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_arm_v80compat:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV92ODBjb21wYXQp') }}
- name: "Build (arm_v80compat)"
+ unit_tests_ubsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }}
+ name: "Unit tests (ubsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -828,6 +874,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Unit tests (ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -848,16 +901,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_v80compat)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_v80compat)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_amd_freebsd:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9mcmVlYnNkKQ==') }}
- name: "Build (amd_freebsd)"
+ stateless_tests_amd_asan_distributed_plan_parallel_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }}
+ name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -866,6 +919,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -886,16 +946,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_freebsd)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (amd_freebsd)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_ppc64le:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHBwYzY0bGUp') }}
- name: "Build (ppc64le)"
+ stateless_tests_amd_asan_distributed_plan_parallel_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }}
+ name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -904,6 +964,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -924,16 +991,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (ppc64le)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (ppc64le)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_amd_compat:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9jb21wYXQp') }}
- name: "Build (amd_compat)"
+ stateless_tests_amd_asan_distributed_plan_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }}
+ name: "Stateless tests (amd_asan, distributed plan, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -942,6 +1009,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -962,16 +1036,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_compat)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (amd_compat)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_amd_musl:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tdXNsKQ==') }}
- name: "Build (amd_musl)"
+ stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }}
+ name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -980,6 +1054,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1000,16 +1081,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (amd_musl)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (amd_musl)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_riscv64:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHJpc2N2NjQp') }}
- name: "Build (riscv64)"
+ stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }}
+ name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1018,6 +1099,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1038,16 +1126,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (riscv64)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (riscv64)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_s390x:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKHMzOTB4KQ==') }}
- name: "Build (s390x)"
+ stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }}
+ name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1056,6 +1144,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1076,16 +1171,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (s390x)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (s390x)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_loongarch64:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGxvb25nYXJjaDY0KQ==') }}
- name: "Build (loongarch64)"
+ stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }}
+ name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1094,6 +1189,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1114,16 +1216,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (loongarch64)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (loongarch64)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- build_fuzzers:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGZ1enplcnMp') }}
- name: "Build (fuzzers)"
+ stateless_tests_amd_debug_asyncinsert_s3_storage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }}
+ name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1132,6 +1234,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1152,16 +1261,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (fuzzers)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (fuzzers)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- unit_tests_asan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAoYXNhbik=') }}
- name: "Unit tests (asan)"
+ stateless_tests_amd_debug_asyncinsert_s3_storage_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }}
+ name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1170,6 +1279,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1190,16 +1306,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Unit tests (asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- unit_tests_tsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodHNhbik=') }}
- name: "Unit tests (tsan)"
+ stateless_tests_amd_debug_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }}
+ name: "Stateless tests (amd_debug, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1208,6 +1324,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1228,16 +1351,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Unit tests (tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- unit_tests_msan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAobXNhbik=') }}
- name: "Unit tests (msan)"
+ stateless_tests_amd_debug_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }}
+ name: "Stateless tests (amd_debug, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1246,6 +1369,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1266,16 +1396,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Unit tests (msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- unit_tests_ubsan:
- runs-on: [self-hosted, builder]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VW5pdCB0ZXN0cyAodWJzYW4p') }}
- name: "Unit tests (ubsan)"
+ stateless_tests_amd_tsan_parallel_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }}
+ name: "Stateless tests (amd_tsan, parallel, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1284,6 +1414,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, parallel, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1304,16 +1441,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Unit tests (ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_asan_distributed_plan_parallel_1_2:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }}
- name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
+ stateless_tests_amd_tsan_parallel_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }}
+ name: "Stateless tests (amd_tsan, parallel, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1322,6 +1459,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, parallel, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1342,16 +1486,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_asan_distributed_plan_parallel_2_2:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }}
- name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
+ stateless_tests_amd_tsan_sequential_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }}
+ name: "Stateless tests (amd_tsan, sequential, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1360,6 +1504,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, sequential, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1380,16 +1531,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_asan_distributed_plan_sequential:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }}
- name: "Stateless tests (amd_asan, distributed plan, sequential)"
+ stateless_tests_amd_tsan_sequential_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }}
+ name: "Stateless tests (amd_tsan, sequential, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1398,6 +1549,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, sequential, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1418,16 +1576,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_asan, distributed plan, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgcGFyYWxsZWwp') }}
- name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)"
+ stateless_tests_amd_msan_parallel_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }}
+ name: "Stateless tests (amd_msan, parallel, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1436,6 +1594,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_msan, parallel, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1456,16 +1621,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBvbGQgYW5hbHl6ZXIsIHMzIHN0b3JhZ2UsIERhdGFiYXNlUmVwbGljYXRlZCwgc2VxdWVudGlhbCk=') }}
- name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)"
+ stateless_tests_amd_msan_parallel_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }}
+ name: "Stateless tests (amd_msan, parallel, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1474,6 +1639,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_msan, parallel, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1494,16 +1666,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }}
- name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)"
+ stateless_tests_amd_msan_sequential_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }}
+ name: "Stateless tests (amd_msan, sequential, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1512,6 +1684,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_msan, sequential, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1532,16 +1711,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYmluYXJ5LCBQYXJhbGxlbFJlcGxpY2FzLCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }}
- name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)"
+ stateless_tests_amd_msan_sequential_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }}
+ name: "Stateless tests (amd_msan, sequential, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1550,6 +1729,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_msan, sequential, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1570,16 +1756,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_debug_asyncinsert_s3_storage_parallel:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBwYXJhbGxlbCk=') }}
- name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)"
+ stateless_tests_amd_ubsan_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }}
+ name: "Stateless tests (amd_ubsan, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1588,6 +1774,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_ubsan, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1608,16 +1801,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_debug_asyncinsert_s3_storage_sequential:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIEFzeW5jSW5zZXJ0LCBzMyBzdG9yYWdlLCBzZXF1ZW50aWFsKQ==') }}
- name: "Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)"
+ stateless_tests_amd_ubsan_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }}
+ name: "Stateless tests (amd_ubsan, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1626,6 +1819,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_ubsan, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1646,16 +1846,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_debug, AsyncInsert, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_debug_parallel:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHBhcmFsbGVsKQ==') }}
- name: "Stateless tests (amd_debug, parallel)"
+ stateless_tests_amd_debug_distributed_plan_s3_storage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }}
+ name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1664,6 +1864,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1684,16 +1891,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_debug, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_debug_sequential:
- runs-on: [self-hosted, amd-small]
+ stateless_tests_amd_debug_distributed_plan_s3_storage_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIHNlcXVlbnRpYWwp') }}
- name: "Stateless tests (amd_debug, sequential)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }}
+ name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1702,6 +1909,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1722,16 +1936,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_debug, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_tsan_parallel_1_2:
- runs-on: [self-hosted, amd-large]
+ stateless_tests_amd_tsan_s3_storage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDEvMik=') }}
- name: "Stateless tests (amd_tsan, parallel, 1/2)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwp') }}
+ name: "Stateless tests (amd_tsan, s3 storage, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1740,6 +1954,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, s3 storage, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1760,16 +1981,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_tsan_parallel_2_2:
- runs-on: [self-hosted, amd-large]
+ stateless_tests_amd_tsan_s3_storage_sequential_1_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgcGFyYWxsZWwsIDIvMik=') }}
- name: "Stateless tests (amd_tsan, parallel, 2/2)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }}
+ name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1778,6 +1999,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1798,16 +2026,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_tsan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_tsan_sequential_1_2:
- runs-on: [self-hosted, amd-small]
+ stateless_tests_amd_tsan_s3_storage_sequential_2_2:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }}
- name: "Stateless tests (amd_tsan, sequential, 1/2)"
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }}
+ name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1816,6 +2044,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1836,16 +2071,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stateless_tests_amd_tsan_sequential_2_2:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }}
- name: "Stateless tests (amd_tsan, sequential, 2/2)"
+ stateless_tests_arm_binary_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }}
+ name: "Stateless tests (arm_binary, parallel)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -1854,6 +2089,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (arm_binary, parallel)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1874,890 +2116,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stateless tests (amd_tsan, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_msan_parallel_1_2:
- runs-on: [self-hosted, amd-large]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDEvMik=') }}
- name: "Stateless tests (amd_msan, parallel, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_msan, parallel, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_msan_parallel_2_2:
- runs-on: [self-hosted, amd-large]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgcGFyYWxsZWwsIDIvMik=') }}
- name: "Stateless tests (amd_msan, parallel, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_msan, parallel, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_msan_sequential_1_2:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMS8yKQ==') }}
- name: "Stateless tests (amd_msan, sequential, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_msan, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_msan_sequential_2_2:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfbXNhbiwgc2VxdWVudGlhbCwgMi8yKQ==') }}
- name: "Stateless tests (amd_msan, sequential, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_msan, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_ubsan_parallel:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHBhcmFsbGVsKQ==') }}
- name: "Stateless tests (amd_ubsan, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_ubsan, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_ubsan_sequential:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdWJzYW4sIHNlcXVlbnRpYWwp') }}
- name: "Stateless tests (amd_ubsan, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_ubsan, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_debug_distributed_plan_s3_storage_parallel:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHBhcmFsbGVsKQ==') }}
- name: "Stateless tests (amd_debug, distributed plan, s3 storage, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_debug_distributed_plan_s3_storage_sequential:
- runs-on: [self-hosted, amd-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfZGVidWcsIGRpc3RyaWJ1dGVkIHBsYW4sIHMzIHN0b3JhZ2UsIHNlcXVlbnRpYWwp') }}
- name: "Stateless tests (amd_debug, distributed plan, s3 storage, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_debug, distributed plan, s3 storage, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_tsan_s3_storage_parallel:
- runs-on: [self-hosted, amd-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgcGFyYWxsZWwp') }}
- name: "Stateless tests (amd_tsan, s3 storage, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_tsan_s3_storage_sequential_1_2:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMS8yKQ==') }}
- name: "Stateless tests (amd_tsan, s3 storage, sequential, 1/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 1/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_tsan_s3_storage_sequential_2_2:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfdHNhbiwgczMgc3RvcmFnZSwgc2VxdWVudGlhbCwgMi8yKQ==') }}
- name: "Stateless tests (amd_tsan, s3 storage, sequential, 2/2)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_tsan, s3 storage, sequential, 2/2)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_arm_binary_parallel:
- runs-on: [self-hosted, arm-medium-cpu]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBwYXJhbGxlbCk=') }}
- name: "Stateless tests (arm_binary, parallel)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_arm_binary_sequential:
- runs-on: [self-hosted, arm-small]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }}
- name: "Stateless tests (arm_binary, sequential)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- bugfix_validation_integration_tests:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, fast_test, build_arm_tidy]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGludGVncmF0aW9uIHRlc3RzKQ==') }}
- name: "Bugfix validation (integration tests)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Bugfix validation (integration tests)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Bugfix validation (integration tests)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- bugfix_validation_functional_tests:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGZ1bmN0aW9uYWwgdGVzdHMp') }}
- name: "Bugfix validation (functional tests)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Bugfix validation (functional tests)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Bugfix validation (functional tests)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- stateless_tests_amd_asan_flaky_check:
- runs-on: [self-hosted, amd-small-mem]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZmxha3kgY2hlY2sp') }}
- name: "Stateless tests (amd_asan, flaky check)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_amd_asan_old_analyzer_1_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 1/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_amd_asan_old_analyzer_2_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 2/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_amd_asan_old_analyzer_3_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 3/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_amd_asan_old_analyzer_4_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 4/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_amd_asan_old_analyzer_5_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 5/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_amd_asan_old_analyzer_6_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }}
- name: "Integration tests (amd_asan, old analyzer, 6/6)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_amd_binary_1_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }}
- name: "Integration tests (amd_binary, 1/5)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
- with:
- ref: ${{ env.CHECKOUT_REF }}
-
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_binary_2_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }}
- name: "Integration tests (amd_binary, 2/5)"
+ stateless_tests_arm_binary_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhcm1fYmluYXJ5LCBzZXF1ZW50aWFsKQ==') }}
+ name: "Stateless tests (arm_binary, sequential)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2766,43 +2134,12 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_amd_binary_3_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }}
- name: "Integration tests (amd_binary, 3/5)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
with:
- ref: ${{ env.CHECKOUT_REF }}
+ test_name: "Stateless tests (arm_binary, sequential)"
- name: Prepare env script
run: |
@@ -2824,16 +2161,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_binary_4_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }}
- name: "Integration tests (amd_binary, 4/5)"
+ bugfix_validation_integration_tests:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGludGVncmF0aW9uIHRlc3RzKQ==') }}
+ name: "Bugfix validation (integration tests)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2842,43 +2179,12 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_pr.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
- fi
-
- integration_tests_amd_binary_5_5:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }}
- name: "Integration tests (amd_binary, 5/5)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
with:
- ref: ${{ env.CHECKOUT_REF }}
+ test_name: "Bugfix validation (integration tests)"
- name: Prepare env script
run: |
@@ -2900,16 +2206,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Bugfix validation (integration tests)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Bugfix validation (integration tests)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_arm_binary_distributed_plan_1_4:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }}
- name: "Integration tests (arm_binary, distributed plan, 1/4)"
+ bugfix_validation_functional_tests:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVnZml4IHZhbGlkYXRpb24gKGZ1bmN0aW9uYWwgdGVzdHMp') }}
+ name: "Bugfix validation (functional tests)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2918,6 +2224,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Bugfix validation (functional tests)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2938,16 +2251,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Bugfix validation (functional tests)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Bugfix validation (functional tests)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_arm_binary_distributed_plan_2_4:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }}
- name: "Integration tests (arm_binary, distributed plan, 2/4)"
+ stateless_tests_amd_asan_flaky_check:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZmxha3kgY2hlY2sp') }}
+ name: "Stateless tests (amd_asan, flaky check)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2956,6 +2269,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, flaky check)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -2976,16 +2296,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stateless tests (amd_asan, flaky check)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_arm_binary_distributed_plan_3_4:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }}
- name: "Integration tests (arm_binary, distributed plan, 3/4)"
+ integration_tests_amd_asan_old_analyzer_1_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 1/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -2994,6 +2314,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 1/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3014,16 +2341,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_arm_binary_distributed_plan_4_4:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }}
- name: "Integration tests (arm_binary, distributed plan, 4/4)"
+ integration_tests_amd_asan_old_analyzer_2_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 2/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3032,6 +2359,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 2/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3052,16 +2386,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_1_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }}
- name: "Integration tests (amd_tsan, 1/6)"
+ integration_tests_amd_asan_old_analyzer_3_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 3/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3070,6 +2404,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 3/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3090,16 +2431,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_2_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }}
- name: "Integration tests (amd_tsan, 2/6)"
+ integration_tests_amd_asan_old_analyzer_4_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 4/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3108,6 +2449,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 4/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3128,16 +2476,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_3_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }}
- name: "Integration tests (amd_tsan, 3/6)"
+ integration_tests_amd_asan_old_analyzer_5_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 5/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3146,6 +2494,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 5/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3166,16 +2521,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_4_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }}
- name: "Integration tests (amd_tsan, 4/6)"
+ integration_tests_amd_asan_old_analyzer_6_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }}
+ name: "Integration tests (amd_asan, old analyzer, 6/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3184,6 +2539,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 6/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3204,16 +2566,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, old analyzer, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_5_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }}
- name: "Integration tests (amd_tsan, 5/6)"
+ integration_tests_amd_binary_1_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDEvNSk=') }}
+ name: "Integration tests (amd_binary, 1/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3222,6 +2584,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 1/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3242,16 +2611,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 1/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_tsan_6_6:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }}
- name: "Integration tests (amd_tsan, 6/6)"
+ integration_tests_amd_binary_2_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDIvNSk=') }}
+ name: "Integration tests (amd_binary, 2/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3260,6 +2629,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 2/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3280,16 +2656,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 2/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- integration_tests_amd_asan_flaky_check:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBmbGFreSBjaGVjayk=') }}
- name: "Integration tests (amd_asan, flaky check)"
+ integration_tests_amd_binary_3_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDMvNSk=') }}
+ name: "Integration tests (amd_binary, 3/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3298,6 +2674,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 3/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3318,16 +2701,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Integration tests (amd_asan, flaky check)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Integration tests (amd_asan, flaky check)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 3/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- docker_server_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
- name: "Docker server image"
+ integration_tests_amd_binary_4_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDQvNSk=') }}
+ name: "Integration tests (amd_binary, 4/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3336,6 +2719,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 4/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3356,16 +2746,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker server image' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Docker server image' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 4/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- docker_keeper_image:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
- name: "Docker keeper image"
+ integration_tests_amd_binary_5_5:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9iaW5hcnksIDUvNSk=') }}
+ name: "Integration tests (amd_binary, 5/5)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3374,6 +2764,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_binary, 5/5)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3394,16 +2791,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_binary, 5/5)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- install_packages_amd_debug:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX2RlYnVnKQ==') }}
- name: "Install packages (amd_debug)"
+ integration_tests_arm_binary_distributed_plan_1_4:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDEvNCk=') }}
+ name: "Integration tests (arm_binary, distributed plan, 1/4)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3412,6 +2809,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (arm_binary, distributed plan, 1/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3432,16 +2836,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Install packages (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Install packages (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 1/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- compatibility_check_release:
- runs-on: [self-hosted, style-checker]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }}
- name: "Compatibility check (release)"
+ integration_tests_arm_binary_distributed_plan_2_4:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDIvNCk=') }}
+ name: "Integration tests (arm_binary, distributed plan, 2/4)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3450,6 +2854,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (arm_binary, distributed plan, 2/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3470,16 +2881,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Compatibility check (release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 2/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- compatibility_check_aarch64:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }}
- name: "Compatibility check (aarch64)"
+ integration_tests_arm_binary_distributed_plan_3_4:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDMvNCk=') }}
+ name: "Integration tests (arm_binary, distributed plan, 3/4)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3488,6 +2899,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (arm_binary, distributed plan, 3/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3508,16 +2926,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Compatibility check (aarch64)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 3/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_amd_debug:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }}
- name: "Stress test (amd_debug)"
+ integration_tests_arm_binary_distributed_plan_4_4:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFybV9iaW5hcnksIGRpc3RyaWJ1dGVkIHBsYW4sIDQvNCk=') }}
+ name: "Integration tests (arm_binary, distributed plan, 4/4)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3526,6 +2944,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (arm_binary, distributed plan, 4/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3546,16 +2971,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (arm_binary, distributed plan, 4/4)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_amd_tsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }}
- name: "Stress test (amd_tsan)"
+ integration_tests_amd_tsan_1_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }}
+ name: "Integration tests (amd_tsan, 1/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3564,6 +2989,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 1/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3584,16 +3016,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 1/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_arm_asan:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_asan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }}
- name: "Stress test (arm_asan)"
+ integration_tests_amd_tsan_2_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }}
+ name: "Integration tests (amd_tsan, 2/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3602,6 +3034,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 2/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3622,16 +3061,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 2/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_amd_ubsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }}
- name: "Stress test (amd_ubsan)"
+ integration_tests_amd_tsan_3_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }}
+ name: "Integration tests (amd_tsan, 3/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3640,6 +3079,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 3/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3660,16 +3106,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 3/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- stress_test_amd_msan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }}
- name: "Stress test (amd_msan)"
+ integration_tests_amd_tsan_4_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }}
+ name: "Integration tests (amd_tsan, 4/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3678,6 +3124,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 4/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3698,16 +3151,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 4/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- upgrade_check_amd_asan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VXBncmFkZSBjaGVjayAoYW1kX2FzYW4p') }}
- name: "Upgrade check (amd_asan)"
+ integration_tests_amd_tsan_5_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }}
+ name: "Integration tests (amd_tsan, 5/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3716,6 +3169,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 5/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3736,16 +3196,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Upgrade check (amd_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Upgrade check (amd_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 5/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- upgrade_check_amd_tsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VXBncmFkZSBjaGVjayAoYW1kX3RzYW4p') }}
- name: "Upgrade check (amd_tsan)"
+ integration_tests_amd_tsan_6_6:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }}
+ name: "Integration tests (amd_tsan, 6/6)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3754,6 +3214,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 6/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3774,16 +3241,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Upgrade check (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Upgrade check (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_tsan, 6/6)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- upgrade_check_amd_msan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VXBncmFkZSBjaGVjayAoYW1kX21zYW4p') }}
- name: "Upgrade check (amd_msan)"
+ integration_tests_amd_asan_flaky_check:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_asan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBmbGFreSBjaGVjayk=') }}
+ name: "Integration tests (amd_asan, flaky check)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3792,6 +3259,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, flaky check)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3812,16 +3286,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Upgrade check (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, flaky check)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Upgrade check (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Integration tests (amd_asan, flaky check)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- upgrade_check_amd_debug:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'VXBncmFkZSBjaGVjayAoYW1kX2RlYnVnKQ==') }}
- name: "Upgrade check (amd_debug)"
+ docker_server_image:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
+ name: "Docker server image"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3830,6 +3304,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker server image"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3850,16 +3331,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Upgrade check (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Docker server image' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Upgrade check (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Docker server image' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_amd_debug:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }}
- name: "AST fuzzer (amd_debug)"
+ docker_keeper_image:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
+ name: "Docker keeper image"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3868,6 +3349,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker keeper image"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3888,16 +3376,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Docker keeper image' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_arm_asan:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYXJtX2FzYW4p') }}
- name: "AST fuzzer (arm_asan)"
+ install_packages_amd_debug:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX2RlYnVnKQ==') }}
+ name: "Install packages (amd_debug)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3906,6 +3394,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Install packages (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3926,16 +3421,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Install packages (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Install packages (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_amd_tsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }}
- name: "AST fuzzer (amd_tsan)"
+ compatibility_check_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAocmVsZWFzZSk=') }}
+ name: "Compatibility check (release)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3944,6 +3439,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Compatibility check (release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -3964,16 +3466,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Compatibility check (release)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Compatibility check (release)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_amd_msan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }}
- name: "AST fuzzer (amd_msan)"
+ compatibility_check_aarch64:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'Q29tcGF0aWJpbGl0eSBjaGVjayAoYWFyY2g2NCk=') }}
+ name: "Compatibility check (aarch64)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -3982,6 +3484,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Compatibility check (aarch64)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4002,16 +3511,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Compatibility check (aarch64)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Compatibility check (aarch64)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- ast_fuzzer_amd_ubsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }}
- name: "AST fuzzer (amd_ubsan)"
+ stress_test_amd_debug:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_debug, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }}
+ name: "Stress test (amd_debug)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4020,6 +3529,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4040,16 +3556,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_amd_debug:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }}
- name: "BuzzHouse (amd_debug)"
+ stress_test_amd_tsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_tsan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }}
+ name: "Stress test (amd_tsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4058,6 +3574,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4078,16 +3601,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_arm_asan:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhcm1fYXNhbik=') }}
- name: "BuzzHouse (arm_asan)"
+ stress_test_amd_ubsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_ubsan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }}
+ name: "Stress test (amd_ubsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4096,6 +3619,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4116,16 +3646,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (arm_asan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_amd_tsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }}
- name: "BuzzHouse (amd_tsan)"
+ stress_test_amd_msan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, build_amd_msan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }}
+ name: "Stress test (amd_msan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4134,6 +3664,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4154,16 +3691,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_amd_msan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }}
- name: "BuzzHouse (amd_msan)"
+ ast_fuzzer_amd_debug:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX2RlYnVnKQ==') }}
+ name: "AST fuzzer (amd_debug)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4172,6 +3709,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "AST fuzzer (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4192,16 +3736,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- buzzhouse_amd_ubsan:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_ubsan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }}
- name: "BuzzHouse (amd_ubsan)"
+ ast_fuzzer_amd_tsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3RzYW4p') }}
+ name: "AST fuzzer (amd_tsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4210,6 +3754,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "AST fuzzer (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4230,16 +3781,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_amd_release_master_head_1_3:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzMp') }}
- name: "Performance Comparison (amd_release, master_head, 1/3)"
+ ast_fuzzer_amd_msan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX21zYW4p') }}
+ name: "AST fuzzer (amd_msan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4248,6 +3799,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "AST fuzzer (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4268,16 +3826,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/3)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 1/3)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_amd_release_master_head_2_3:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzMp') }}
- name: "Performance Comparison (amd_release, master_head, 2/3)"
+ ast_fuzzer_amd_ubsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QVNUIGZ1enplciAoYW1kX3Vic2FuKQ==') }}
+ name: "AST fuzzer (amd_ubsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4286,6 +3844,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "AST fuzzer (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4306,16 +3871,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/3)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 2/3)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'AST fuzzer (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_amd_release_master_head_3_3:
- runs-on: [self-hosted, amd-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYW1kX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzMp') }}
- name: "Performance Comparison (amd_release, master_head, 3/3)"
+ buzzhouse_amd_debug:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_debug]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfZGVidWcp') }}
+ name: "BuzzHouse (amd_debug)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4324,6 +3889,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "BuzzHouse (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4344,16 +3916,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/3)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (amd_release, master_head, 3/3)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_debug)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_arm_release_master_head_1_3:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAxLzMp') }}
- name: "Performance Comparison (arm_release, master_head, 1/3)"
+ buzzhouse_amd_tsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_tsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdHNhbik=') }}
+ name: "BuzzHouse (amd_tsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4362,6 +3934,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "BuzzHouse (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4382,16 +3961,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/3)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 1/3)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_tsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_arm_release_master_head_2_3:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAyLzMp') }}
- name: "Performance Comparison (arm_release, master_head, 2/3)"
+ buzzhouse_amd_msan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_msan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfbXNhbik=') }}
+ name: "BuzzHouse (amd_msan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4400,6 +3979,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "BuzzHouse (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4420,16 +4006,16 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/3)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 2/3)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_msan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
- performance_comparison_arm_release_master_head_3_3:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_arm_release]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'UGVyZm9ybWFuY2UgQ29tcGFyaXNvbiAoYXJtX3JlbGVhc2UsIG1hc3Rlcl9oZWFkLCAzLzMp') }}
- name: "Performance Comparison (arm_release, master_head, 3/3)"
+ buzzhouse_amd_ubsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_debug_parallel, stateless_tests_arm_binary_parallel, build_amd_ubsan]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnV6ekhvdXNlIChhbWRfdWJzYW4p') }}
+ name: "BuzzHouse (amd_ubsan)"
outputs:
data: ${{ steps.run.outputs.DATA }}
steps:
@@ -4438,6 +4024,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "BuzzHouse (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4458,14 +4051,14 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/3)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Performance Comparison (arm_release, master_head, 3/3)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'BuzzHouse (amd_ubsan)' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, style_check, docs_check, fast_test, build_arm_tidy, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_asan, build_arm_coverage, build_arm_binary, build_amd_darwin, build_arm_darwin, build_arm_v80compat, build_amd_freebsd, build_ppc64le, build_amd_compat, build_amd_musl, build_riscv64, build_s390x, build_loongarch64, build_fuzzers, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, bugfix_validation_integration_tests, bugfix_validation_functional_tests, stateless_tests_amd_asan_flaky_check, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, integration_tests_amd_asan_flaky_check, docker_server_image, docker_keeper_image, install_packages_amd_debug, compatibility_check_release, compatibility_check_aarch64, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_amd_ubsan, stress_test_amd_msan, upgrade_check_amd_asan, upgrade_check_amd_tsan, upgrade_check_amd_msan, upgrade_check_amd_debug, ast_fuzzer_amd_debug, ast_fuzzer_arm_asan, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_arm_asan, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan, performance_comparison_amd_release_master_head_1_3, performance_comparison_amd_release_master_head_2_3, performance_comparison_amd_release_master_head_3_3, performance_comparison_arm_release_master_head_1_3, performance_comparison_arm_release_master_head_2_3, performance_comparison_arm_release_master_head_3_3]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, dockers_build_multiplatform_manifest, fast_test, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_coverage, build_arm_binary, unit_tests_asan, unit_tests_tsan, unit_tests_msan, unit_tests_ubsan, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_amd_debug_asyncinsert_s3_storage_parallel, stateless_tests_amd_debug_asyncinsert_s3_storage_sequential, stateless_tests_amd_debug_parallel, stateless_tests_amd_debug_sequential, stateless_tests_amd_tsan_parallel_1_2, stateless_tests_amd_tsan_parallel_2_2, stateless_tests_amd_tsan_sequential_1_2, stateless_tests_amd_tsan_sequential_2_2, stateless_tests_amd_msan_parallel_1_2, stateless_tests_amd_msan_parallel_2_2, stateless_tests_amd_msan_sequential_1_2, stateless_tests_amd_msan_sequential_2_2, stateless_tests_amd_ubsan_parallel, stateless_tests_amd_ubsan_sequential, stateless_tests_amd_debug_distributed_plan_s3_storage_parallel, stateless_tests_amd_debug_distributed_plan_s3_storage_sequential, stateless_tests_amd_tsan_s3_storage_parallel, stateless_tests_amd_tsan_s3_storage_sequential_1_2, stateless_tests_amd_tsan_s3_storage_sequential_2_2, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential, bugfix_validation_integration_tests, bugfix_validation_functional_tests, stateless_tests_amd_asan_flaky_check, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_binary_1_5, integration_tests_amd_binary_2_5, integration_tests_amd_binary_3_5, integration_tests_amd_binary_4_5, integration_tests_amd_binary_5_5, integration_tests_arm_binary_distributed_plan_1_4, integration_tests_arm_binary_distributed_plan_2_4, integration_tests_arm_binary_distributed_plan_3_4, integration_tests_arm_binary_distributed_plan_4_4, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, integration_tests_amd_asan_flaky_check, docker_server_image, docker_keeper_image, install_packages_amd_debug, compatibility_check_release, compatibility_check_aarch64, stress_test_amd_debug, stress_test_amd_tsan, stress_test_amd_ubsan, stress_test_amd_msan, ast_fuzzer_amd_debug, ast_fuzzer_amd_tsan, ast_fuzzer_amd_msan, ast_fuzzer_amd_ubsan, buzzhouse_amd_debug, buzzhouse_amd_tsan, buzzhouse_amd_msan, buzzhouse_amd_ubsan]
if: ${{ !cancelled() }}
name: "Finish Workflow"
outputs:
@@ -4476,6 +4069,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Finish Workflow"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -4500,3 +4100,199 @@ jobs:
else
python3 -m praktika run 'Finish Workflow' --workflow "PR" --ci |& tee ./ci/tmp/job.log
fi
+
+##########################################################################################
+##################################### ALTINITY JOBS ######################################
+##########################################################################################
+
+ GrypeScanServer:
+ needs: [config_workflow, docker_server_image]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
+ strategy:
+ fail-fast: false
+ matrix:
+ suffix: ['', '-alpine']
+ uses: ./.github/workflows/grype_scan.yml
+ secrets: inherit
+ with:
+ docker_image: altinityinfra/clickhouse-server
+ version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
+ tag-suffix: ${{ matrix.suffix }}
+ GrypeScanKeeper:
+ needs: [config_workflow, docker_keeper_image]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
+ uses: ./.github/workflows/grype_scan.yml
+ secrets: inherit
+ with:
+ docker_image: altinityinfra/clickhouse-keeper
+ version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
+
+ RegressionTestsRelease:
+ needs: [config_workflow, build_amd_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'regression')}}
+ uses: ./.github/workflows/regression.yml
+ secrets: inherit
+ with:
+ runner_type: altinity-on-demand, altinity-regression-tester
+ commit: fc19ce3a7322a10ab791de755c950a56744a12e7
+ arch: release
+ build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ timeout_minutes: 300
+ workflow_config: ${{ needs.config_workflow.outputs.data }}
+ RegressionTestsAarch64:
+ needs: [config_workflow, build_arm_binary]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'regression') && !contains(fromJson(needs.config_workflow.outputs.data).custom_data.ci_exclude_tags, 'aarch64')}}
+ uses: ./.github/workflows/regression.yml
+ secrets: inherit
+ with:
+ runner_type: altinity-on-demand, altinity-regression-tester-aarch64
+ commit: fc19ce3a7322a10ab791de755c950a56744a12e7
+ arch: aarch64
+ build_sha: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ timeout_minutes: 300
+ workflow_config: ${{ needs.config_workflow.outputs.data }}
+
+ FinishCIReport:
+ if: ${{ !cancelled() }}
+ needs:
+ - config_workflow
+ - dockers_build_amd
+ - dockers_build_arm
+ - dockers_build_multiplatform_manifest
+ - fast_test
+ - build_amd_debug
+ - build_amd_release
+ - build_amd_asan
+ - build_amd_tsan
+ - build_amd_msan
+ - build_amd_ubsan
+ - build_amd_binary
+ - build_arm_release
+ - build_arm_coverage
+ - build_arm_binary
+ - unit_tests_asan
+ - unit_tests_tsan
+ - unit_tests_msan
+ - unit_tests_ubsan
+ - stateless_tests_amd_asan_distributed_plan_parallel_1_2
+ - stateless_tests_amd_asan_distributed_plan_parallel_2_2
+ - stateless_tests_amd_asan_distributed_plan_sequential
+ - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel
+ - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential
+ - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel
+ - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential
+ - stateless_tests_amd_debug_asyncinsert_s3_storage_parallel
+ - stateless_tests_amd_debug_asyncinsert_s3_storage_sequential
+ - stateless_tests_amd_debug_parallel
+ - stateless_tests_amd_debug_sequential
+ - stateless_tests_amd_tsan_parallel_1_2
+ - stateless_tests_amd_tsan_parallel_2_2
+ - stateless_tests_amd_tsan_sequential_1_2
+ - stateless_tests_amd_tsan_sequential_2_2
+ - stateless_tests_amd_msan_parallel_1_2
+ - stateless_tests_amd_msan_parallel_2_2
+ - stateless_tests_amd_msan_sequential_1_2
+ - stateless_tests_amd_msan_sequential_2_2
+ - stateless_tests_amd_ubsan_parallel
+ - stateless_tests_amd_ubsan_sequential
+ - stateless_tests_amd_debug_distributed_plan_s3_storage_parallel
+ - stateless_tests_amd_debug_distributed_plan_s3_storage_sequential
+ - stateless_tests_amd_tsan_s3_storage_parallel
+ - stateless_tests_amd_tsan_s3_storage_sequential_1_2
+ - stateless_tests_amd_tsan_s3_storage_sequential_2_2
+ - stateless_tests_arm_binary_parallel
+ - stateless_tests_arm_binary_sequential
+ - bugfix_validation_integration_tests
+ - bugfix_validation_functional_tests
+ - stateless_tests_amd_asan_flaky_check
+ - integration_tests_amd_asan_old_analyzer_1_6
+ - integration_tests_amd_asan_old_analyzer_2_6
+ - integration_tests_amd_asan_old_analyzer_3_6
+ - integration_tests_amd_asan_old_analyzer_4_6
+ - integration_tests_amd_asan_old_analyzer_5_6
+ - integration_tests_amd_asan_old_analyzer_6_6
+ - integration_tests_amd_binary_1_5
+ - integration_tests_amd_binary_2_5
+ - integration_tests_amd_binary_3_5
+ - integration_tests_amd_binary_4_5
+ - integration_tests_amd_binary_5_5
+ - integration_tests_arm_binary_distributed_plan_1_4
+ - integration_tests_arm_binary_distributed_plan_2_4
+ - integration_tests_arm_binary_distributed_plan_3_4
+ - integration_tests_arm_binary_distributed_plan_4_4
+ - integration_tests_amd_tsan_1_6
+ - integration_tests_amd_tsan_2_6
+ - integration_tests_amd_tsan_3_6
+ - integration_tests_amd_tsan_4_6
+ - integration_tests_amd_tsan_5_6
+ - integration_tests_amd_tsan_6_6
+ - integration_tests_amd_asan_flaky_check
+ - docker_server_image
+ - docker_keeper_image
+ - install_packages_amd_debug
+ - compatibility_check_release
+ - compatibility_check_aarch64
+ - stress_test_amd_debug
+ - stress_test_amd_tsan
+ - stress_test_amd_ubsan
+ - stress_test_amd_msan
+ - ast_fuzzer_amd_debug
+ - ast_fuzzer_amd_tsan
+ - ast_fuzzer_amd_msan
+ - ast_fuzzer_amd_ubsan
+ - buzzhouse_amd_debug
+ - buzzhouse_amd_tsan
+ - buzzhouse_amd_msan
+ - buzzhouse_amd_ubsan
+ - finish_workflow
+ - GrypeScanServer
+ - GrypeScanKeeper
+ - RegressionTestsRelease
+ - RegressionTestsAarch64
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ - name: Finalize workflow report
+ if: ${{ !cancelled() }}
+ uses: ./.github/actions/create_workflow_report
+ with:
+ workflow_config: ${{ needs.config_workflow.outputs.data }}
+ final: true
+
+ SourceUpload:
+ needs: [config_workflow, build_amd_release]
+ if: ${{ !failure() && !cancelled() }}
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ env:
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ VERSION: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ ref: ${{ fromJson(needs.config_workflow.outputs.data).git_ref }}
+ submodules: true
+ fetch-depth: 0
+ filter: tree:0
+ - name: Install aws cli
+ uses: unfor19/install-aws-cli-action@v1
+ with:
+ version: 2
+ arch: arm64
+ - name: Create source tar
+ run: |
+ cd .. && tar czf $RUNNER_TEMP/build_source.src.tar.gz ClickHouse/
+ - name: Upload source tar
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ S3_PATH="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/build_amd_release"
+ else
+ S3_PATH="PRs/$PR_NUMBER/$COMMIT_SHA/build_amd_release"
+ fi
+
+ aws s3 cp $RUNNER_TEMP/build_source.src.tar.gz s3://altinity-build-artifacts/$S3_PATH/clickhouse-$VERSION.src.tar.gz
diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml
new file mode 100644
index 000000000000..5275e2550003
--- /dev/null
+++ b/.github/workflows/regression.yml
@@ -0,0 +1,1081 @@
+name: Regression test workflow - Release
+'on':
+ workflow_call:
+ inputs:
+ runner_type:
+ description: the label of runner to use, can be a simple string or a comma-separated list
+ required: true
+ type: string
+ commit:
+ description: commit hash of the regression tests.
+ required: true
+ type: string
+ arch:
+ description: arch to run the tests on.
+ required: true
+ type: string
+ timeout_minutes:
+ description: Maximum number of minutes to let workflow run before GitHub cancels it.
+ default: 210
+ type: number
+ build_sha:
+ description: commit sha of the workflow run for artifact upload.
+ required: true
+ type: string
+ checkout_depth:
+ description: the value of the git shallow checkout
+ required: false
+ type: number
+ default: 1
+ submodules:
+ description: if the submodules should be checked out
+ required: false
+ type: boolean
+ default: false
+ additional_envs:
+ description: additional ENV variables to setup the job
+ type: string
+ workflow_config:
+ description: workflow config for the run
+ required: true
+ type: string
+ secrets:
+ secret_envs:
+ description: if given, it's passed to the environments
+ required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
+ AWS_DEFAULT_REGION:
+ description: the region of the aws param store.
+ required: true
+ AWS_REPORT_KEY_ID:
+ description: aws s3 key id used for regression test reports.
+ required: true
+ AWS_REPORT_SECRET_ACCESS_KEY:
+ description: aws s3 secret access key used for regression test reports.
+ required: true
+ AWS_REPORT_REGION:
+ description: aws s3 region used for regression test reports.
+ required: true
+ DOCKER_USERNAME:
+ description: username of the docker user.
+ required: true
+ DOCKER_PASSWORD:
+ description: password to the docker user.
+ required: true
+ REGRESSION_AWS_S3_BUCKET:
+ description: aws s3 bucket used for regression tests.
+ required: true
+ REGRESSION_AWS_S3_KEY_ID:
+ description: aws s3 key id used for regression tests.
+ required: true
+ REGRESSION_AWS_S3_SECRET_ACCESS_KEY:
+ description: aws s3 secret access key used for regression tests.
+ required: true
+ REGRESSION_AWS_S3_REGION:
+ description: aws s3 region used for regression tests.
+ required: true
+ REGRESSION_GCS_KEY_ID:
+ description: gcs key id used for regression tests.
+ required: true
+ REGRESSION_GCS_KEY_SECRET:
+ description: gcs key secret used for regression tests.
+ required: true
+ REGRESSION_GCS_URI:
+ description: gcs uri used for regression tests.
+ required: true
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ CHECKS_DATABASE_USER: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CHECKS_DATABASE_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ args: --test-to-end
+ --no-colors
+ --local
+ --collect-service-logs
+ --output new-fails
+ --parallel 1
+ --log raw.log
+ --with-analyzer
+ artifacts: builds
+ artifact_paths: |
+ ./report.html
+ ./*.log.txt
+ ./*.log
+ ./*.html
+ ./*/_instances/*.log
+ ./*/_instances/*/logs/*.log
+ ./*/*/_instances/*/logs/*.log
+ ./*/*/_instances/*.log
+ build_sha: ${{ inputs.build_sha }}
+ pr_number: ${{ github.event.number }}
+ event_name: ${{ github.event_name }}
+ version: ${{ fromJson(inputs.workflow_config).custom_data.version.string }}
+ SKIP_LIST: ${{ join(fromJson(inputs.workflow_config).custom_data.ci_exclude_tags, '|') || '' }}
+
+jobs:
+ runner_labels_setup:
+ name: Compute proper runner labels for the rest of the jobs
+ runs-on: ubuntu-latest
+ outputs:
+ runner_labels: ${{ steps.setVariables.outputs.runner_labels }}
+ steps:
+ - id: setVariables
+ name: Prepare runner_labels variables for the later steps
+ run: |
+
+ # Prepend self-hosted
+ input="self-hosted, ${input}"
+
+ # Remove all whitespace
+ input="$(echo ${input} | tr -d [:space:])"
+ # Make something like a JSON array from comma-separated list
+ input="[ '${input//\,/\'\, \'}' ]"
+
+ echo "runner_labels=$input" >> ${GITHUB_OUTPUT}
+ env:
+ input: ${{ inputs.runner_type }}
+
+ Common:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'common')
+ strategy:
+ fail-fast: false
+ matrix:
+ SUITE: [aes_encryption, atomic_insert, base_58, data_types, datetime64_extended_range, disk_level_encryption, dns, engines, example, extended_precision_data_types, functions, kafka, kerberos, key_value, lightweight_delete, memory, part_moves_between_shards, selects, session_timezone, swarms, version, window_functions]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=${{ matrix.SUITE }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.SUITE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} ${{ matrix.SUITE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ AggregateFunctions:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'aggregate_functions')
+ strategy:
+ fail-fast: false
+ matrix:
+ PART: [1, 2, 3]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=aggregate_functions
+ PART=${{ matrix.PART }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.PART }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ --only "part ${{ matrix.PART }}/*"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} ${{ env.SUITE }}-${{ matrix.PART }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.PART }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+ Alter:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'alter')
+ strategy:
+ fail-fast: false
+ matrix:
+ ONLY: [replace, move]
+ include:
+ - ONLY: attach
+ PART: 1
+ - ONLY: attach
+ PART: 2
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=alter
+ STORAGE=/${{ matrix.ONLY }}_partition
+ PART=${{ matrix.PART }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u alter/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --only "/alter/${{ matrix.ONLY }} partition/${{ matrix.PART && format('part {0}/', matrix.PART) || '' }}*"
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.ONLY }}${{ matrix.PART }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Alter ${{ matrix.ONLY }} partition ${{ matrix.PART }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: alter-${{ matrix.ONLY }}${{ matrix.PART && format('-{0}', matrix.PART) || '' }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ Benchmark:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'benchmark')
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, aws_s3, gcs]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=ontime_benchmark
+ STORAGE=/${{ matrix.STORAGE }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/benchmark.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --storage ${{ matrix.STORAGE }}
+ --gcs-uri ${{ secrets.REGRESSION_GCS_URI }}
+ --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }}
+ --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }}
+ --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }}
+ --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Benchmark ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: benchmark-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ ClickHouseKeeper:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'clickhouse_keeper')
+ strategy:
+ fail-fast: false
+ matrix:
+ PART: [1, 2]
+ SSL: [ssl, no_ssl]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{runner.temp}}/reports_dir
+ SUITE=clickhouse_keeper
+ STORAGE=/${{ matrix.SSL }}
+ PART=${{ matrix.PART }}
+ SSL=${{ matrix.SSL == 'ssl' && '--ssl' || '' }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py ${{ env.SSL }}
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.PART }}, ${{ matrix.SSL }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ --only "part ${{ matrix.PART }}/*"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Clickhouse Keeper ${{ matrix.SSL }} ${{ matrix.PART }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.PART }}-${{ inputs.arch }}-${{ matrix.SSL }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ Iceberg:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'iceberg')
+ strategy:
+ fail-fast: false
+ matrix:
+ PART: [1, 2]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ if [ ${{ matrix.PART }} -eq 1 ]; then
+ echo ICEBERG_ONLY='"/iceberg/iceberg engine/rest catalog/*" "/iceberg/s3 table function/*" "/iceberg/icebergS3 table function/*" "/iceberg/iceberg cache"' >> "$GITHUB_ENV"
+ else
+ echo ICEBERG_ONLY='"/iceberg/iceberg engine/glue catalog/*" "/iceberg/iceberg table engine/*"' >> "$GITHUB_ENV"
+ fi
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=iceberg
+ PART=${{ matrix.PART }}
+ LOCALSTACK_AUTH_TOKEN=${{ secrets.LOCALSTACK_AUTH_TOKEN }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.PART }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ --only ${{ env.ICEBERG_ONLY }}
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} ${{ env.SUITE }}-${{ matrix.PART }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.PART }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+ LDAP:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'ldap')
+ strategy:
+ fail-fast: false
+ matrix:
+ SUITE: [authentication, external_user_directory, role_mapping]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=ldap/${{ matrix.SUITE }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.SUITE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} LDAP ${{ matrix.SUITE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ldap-${{ matrix.SUITE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ Parquet:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'parquet')
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=parquet
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name=$GITHUB_JOB job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Parquet"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+ ParquetS3:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'parquet')
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [minio, aws_s3]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=parquet
+ STORAGE=${{ matrix.STORAGE}}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --storage ${{ matrix.STORAGE }}
+ --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }}
+ --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Parquet ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ env.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths }}
+
+
+ RBAC:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'rbac')
+ strategy:
+ fail-fast: false
+ matrix:
+ PART: [1, 2, 3]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=rbac
+ PART=${{ matrix.PART }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.PART }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ --only "/rbac/part ${{ matrix.PART }}/*"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} RBAC ${{ matrix.PART }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.PART }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+ SSLServer:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'ssl_server')
+ strategy:
+ fail-fast: false
+ matrix:
+ PART: [1, 2, 3]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=ssl_server
+ PART=${{ matrix.PART }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.PART }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ --only "part ${{ matrix.PART }}/*"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} ${{ env.SUITE }}-${{ matrix.PART }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.PART }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ S3:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 's3')
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [aws_s3, gcs, azure, minio]
+ PART: [1, 2]
+ include:
+ - STORAGE: minio
+ PART: 3
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=s3
+ PART=${{ matrix.PART }}
+ STORAGE=/${{ matrix.STORAGE }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --storage ${{ matrix.STORAGE }}
+ --gcs-uri ${{ secrets.REGRESSION_GCS_URI }}
+ --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }}
+ --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }}
+ --aws-s3-bucket ${{ secrets.REGRESSION_AWS_S3_BUCKET }}
+ --aws-s3-region ${{ secrets.REGRESSION_AWS_S3_REGION }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --azure-account-name ${{ secrets.AZURE_ACCOUNT_NAME }}
+ --azure-storage-key ${{ secrets.AZURE_STORAGE_KEY }}
+ --azure-container ${{ secrets.AZURE_CONTAINER_NAME }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }}-${{ matrix.PART }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ --only ":/try*" ":/part ${{ matrix.PART }}/*"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} S3 ${{ matrix.STORAGE }}-${{ matrix.PART }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.STORAGE }}-${{ matrix.PART }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
+
+ TieredStorage:
+ if: |
+ fromJson(inputs.workflow_config).custom_data.ci_regression_jobs[0] == null ||
+ contains(fromJson(inputs.workflow_config).custom_data.ci_regression_jobs, 'tiered_storage')
+ strategy:
+ fail-fast: false
+ matrix:
+ STORAGE: [local, minio, s3amazon, s3gcs]
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ timeout-minutes: ${{ inputs.timeout_minutes }}
+ steps:
+ - name: Checkout regression repo
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/clickhouse-regression
+ ref: ${{ inputs.commit }}
+ - name: Set envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ REPORTS_PATH=${{ runner.temp }}/reports_dir
+ SUITE=tiered_storage
+ STORAGE=/${{ matrix.STORAGE }}
+ EOF
+ - name: Setup
+ run: .github/setup.sh
+ - name: Get deb url
+ env:
+ S3_BASE_URL: https://altinity-build-artifacts.s3.amazonaws.com/
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ run: |
+ mkdir -p $REPORTS_PATH
+ cat > $REPORTS_PATH/workflow_config.json << 'EOF'
+ ${{ inputs.workflow_config }}
+ EOF
+
+ python3 .github/get-deb-url.py --github-env $GITHUB_ENV --workflow-config $REPORTS_PATH/workflow_config.json --s3-base-url $S3_BASE_URL --pr-number $PR_NUMBER --branch-name ${{ github.ref_name }} --commit-hash ${{ inputs.build_sha }} --binary
+
+ - name: Run ${{ env.SUITE }} suite
+ id: run_suite
+ run: EXITCODE=0;
+ python3
+ -u ${{ env.SUITE }}/regression.py
+ --clickhouse-binary-path ${{ env.clickhouse_path }}
+ --aws-s3-access-key ${{ secrets.REGRESSION_AWS_S3_SECRET_ACCESS_KEY }}
+ --aws-s3-key-id ${{ secrets.REGRESSION_AWS_S3_KEY_ID }}
+ --aws-s3-uri https://s3.${{ secrets.REGRESSION_AWS_S3_REGION}}.amazonaws.com/${{ secrets.REGRESSION_AWS_S3_BUCKET }}/data/
+ --gcs-key-id ${{ secrets.REGRESSION_GCS_KEY_ID }}
+ --gcs-key-secret ${{ secrets.REGRESSION_GCS_KEY_SECRET }}
+ --gcs-uri ${{ secrets.REGRESSION_GCS_URI }}
+ ${{ matrix.STORAGE != 'local' && format('--with-{0}', matrix.STORAGE) || '' }}
+ --attr project="$GITHUB_REPOSITORY" project.id="$GITHUB_REPOSITORY_ID" package="${{ env.clickhouse_path }}" version="${{ env.version }}" user.name="$GITHUB_ACTOR" repository="https://github.com/Altinity/clickhouse-regression" commit.hash="$(git rev-parse HEAD)" job.name="$GITHUB_JOB (${{ matrix.STORAGE }})" job.retry=$GITHUB_RUN_ATTEMPT job.url="$GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID" arch="$(uname -i)"
+ ${{ env.args }} || EXITCODE=$?;
+ .github/add_link_to_logs.sh;
+ exit $EXITCODE
+ - name: Set Commit Status
+ if: always()
+ run: python3 .github/set_builds_status.py
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ JOB_OUTCOME: ${{ steps.run_suite.outcome }}
+ SUITE_NAME: "Regression ${{ inputs.arch }} Tiered Storage ${{ matrix.STORAGE }}"
+ - name: Create and upload logs
+ if: always()
+ run: .github/create_and_upload_logs.sh 1
+ - name: Upload logs to regression results database
+ if: always()
+ timeout-minutes: 20
+ run: .github/upload_results_to_database.sh 1
+ - uses: actions/upload-artifact@v4
+ if: always()
+ with:
+ name: ${{ env.SUITE }}-${{ matrix.STORAGE }}-${{ inputs.arch }}-artifacts
+ path: ${{ env.artifact_paths}}
diff --git a/.github/workflows/release_branches.yml b/.github/workflows/release_branches.yml
index 9cefc82eddda..12e370e00207 100644
--- a/.github/workflows/release_branches.yml
+++ b/.github/workflows/release_branches.yml
@@ -3,13 +3,35 @@
name: ReleaseBranchCI
on:
+ workflow_dispatch:
+ inputs:
+ no_cache:
+ description: Run without cache
+ required: false
+ type: boolean
+ default: false
push:
branches: ['2[1-9].[1-9][0-9]', '2[1-9].[1-9]']
env:
# Force the stdout and stderr streams to be unbuffered
PYTHONUNBUFFERED: 1
+ DISABLE_CI_CACHE: ${{ github.event.inputs.no_cache || '0' }}
CHECKOUT_REF: ""
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }}
+ AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }}
+ AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }}
+ AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/"
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+ GH_TOKEN: ${{ github.token }}
# Allow updating GH commit statuses and PR comments to post an actual job reports link
permissions: write-all
@@ -17,7 +39,7 @@ permissions: write-all
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: []
name: "Config Workflow"
outputs:
@@ -28,6 +50,26 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -54,7 +96,7 @@ jobs:
fi
dockers_build_amd:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYW1kKQ==') }}
name: "Dockers Build (amd)"
@@ -66,6 +108,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (amd)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -92,7 +141,7 @@ jobs:
fi
dockers_build_arm:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VycyBCdWlsZCAoYXJtKQ==') }}
name: "Dockers Build (arm)"
@@ -104,6 +153,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (arm)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -130,7 +186,7 @@ jobs:
fi
build_amd_debug:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kZWJ1Zyk=') }}
name: "Build (amd_debug)"
@@ -142,6 +198,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -168,7 +231,7 @@ jobs:
fi
build_amd_release:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9yZWxlYXNlKQ==') }}
name: "Build (amd_release)"
@@ -180,6 +243,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -206,7 +276,7 @@ jobs:
fi
build_amd_asan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9hc2FuKQ==') }}
name: "Build (amd_asan)"
@@ -218,6 +288,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_asan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -244,7 +321,7 @@ jobs:
fi
build_amd_tsan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF90c2FuKQ==') }}
name: "Build (amd_tsan)"
@@ -256,6 +333,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_tsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -282,7 +366,7 @@ jobs:
fi
build_amd_msan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9tc2FuKQ==') }}
name: "Build (amd_msan)"
@@ -294,6 +378,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -320,7 +411,7 @@ jobs:
fi
build_amd_ubsan:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF91YnNhbik=') }}
name: "Build (amd_ubsan)"
@@ -332,6 +423,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -358,7 +456,7 @@ jobs:
fi
build_arm_release:
- runs-on: [self-hosted, builder-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9yZWxlYXNlKQ==') }}
name: "Build (arm_release)"
@@ -370,43 +468,12 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_release)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Build (arm_release)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- build_arm_asan:
- runs-on: [self-hosted, builder-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9hc2FuKQ==') }}
- name: "Build (arm_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
with:
- ref: ${{ env.CHECKOUT_REF }}
+ test_name: "Build (arm_release)"
- name: Prepare env script
run: |
@@ -428,13 +495,13 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Build (arm_asan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_release)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Build (arm_asan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Build (arm_release)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
fi
build_amd_darwin:
- runs-on: [self-hosted, builder]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFtZF9kYXJ3aW4p') }}
name: "Build (amd_darwin)"
@@ -446,6 +513,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_darwin)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -472,7 +546,7 @@ jobs:
fi
build_arm_darwin:
- runs-on: [self-hosted, builder-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
needs: [config_workflow, dockers_build_amd, dockers_build_arm]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'QnVpbGQgKGFybV9kYXJ3aW4p') }}
name: "Build (arm_darwin)"
@@ -484,6 +558,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_darwin)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -510,7 +591,7 @@ jobs:
fi
docker_server_image:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
name: "Docker server image"
@@ -522,6 +603,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker server image"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -548,7 +636,7 @@ jobs:
fi
docker_keeper_image:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
name: "Docker keeper image"
@@ -560,6 +648,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker keeper image"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -586,7 +681,7 @@ jobs:
fi
install_packages_amd_release:
- runs-on: [self-hosted, style-checker]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYW1kX3JlbGVhc2Up') }}
name: "Install packages (amd_release)"
@@ -598,6 +693,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Install packages (amd_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -624,7 +726,7 @@ jobs:
fi
install_packages_arm_release:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_release]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW5zdGFsbCBwYWNrYWdlcyAoYXJtX3JlbGVhc2Up') }}
name: "Install packages (arm_release)"
@@ -636,6 +738,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Install packages (arm_release)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -662,7 +771,7 @@ jobs:
fi
stateless_tests_amd_asan_distributed_plan_parallel_1_2:
- runs-on: [self-hosted, amd-medium-cpu]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDEvMik=') }}
name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
@@ -674,6 +783,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, parallel, 1/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -700,7 +816,7 @@ jobs:
fi
stateless_tests_amd_asan_distributed_plan_parallel_2_2:
- runs-on: [self-hosted, amd-medium-cpu]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgcGFyYWxsZWwsIDIvMik=') }}
name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
@@ -712,6 +828,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, parallel, 2/2)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -738,7 +861,7 @@ jobs:
fi
stateless_tests_amd_asan_distributed_plan_sequential:
- runs-on: [self-hosted, amd-small-mem]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RhdGVsZXNzIHRlc3RzIChhbWRfYXNhbiwgZGlzdHJpYnV0ZWQgcGxhbiwgc2VxdWVudGlhbCk=') }}
name: "Stateless tests (amd_asan, distributed plan, sequential)"
@@ -750,6 +873,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_asan, distributed plan, sequential)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -776,7 +906,7 @@ jobs:
fi
integration_tests_amd_asan_1_4:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCAxLzQp') }}
name: "Integration tests (amd_asan, 1/4)"
@@ -788,6 +918,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, 1/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -814,7 +951,7 @@ jobs:
fi
integration_tests_amd_asan_2_4:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCAyLzQp') }}
name: "Integration tests (amd_asan, 2/4)"
@@ -826,6 +963,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, 2/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -852,7 +996,7 @@ jobs:
fi
integration_tests_amd_asan_3_4:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCAzLzQp') }}
name: "Integration tests (amd_asan, 3/4)"
@@ -864,6 +1008,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, 3/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -890,7 +1041,7 @@ jobs:
fi
integration_tests_amd_asan_4_4:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCA0LzQp') }}
name: "Integration tests (amd_asan, 4/4)"
@@ -902,6 +1053,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, 4/4)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -928,7 +1086,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_1_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDEvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 1/6)"
@@ -940,6 +1098,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 1/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -966,7 +1131,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_2_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDIvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 2/6)"
@@ -978,6 +1143,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 2/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1004,7 +1176,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_3_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDMvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 3/6)"
@@ -1016,6 +1188,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 3/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1042,7 +1221,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_4_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDQvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 4/6)"
@@ -1054,6 +1233,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 4/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1080,7 +1266,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_5_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDUvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 5/6)"
@@ -1092,6 +1278,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 5/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1118,7 +1311,7 @@ jobs:
fi
integration_tests_amd_asan_old_analyzer_6_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_asan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF9hc2FuLCBvbGQgYW5hbHl6ZXIsIDYvNik=') }}
name: "Integration tests (amd_asan, old analyzer, 6/6)"
@@ -1130,6 +1323,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_asan, old analyzer, 6/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1156,7 +1356,7 @@ jobs:
fi
integration_tests_amd_tsan_1_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAxLzYp') }}
name: "Integration tests (amd_tsan, 1/6)"
@@ -1168,6 +1368,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 1/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1194,7 +1401,7 @@ jobs:
fi
integration_tests_amd_tsan_2_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAyLzYp') }}
name: "Integration tests (amd_tsan, 2/6)"
@@ -1206,6 +1413,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 2/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1232,7 +1446,7 @@ jobs:
fi
integration_tests_amd_tsan_3_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCAzLzYp') }}
name: "Integration tests (amd_tsan, 3/6)"
@@ -1244,6 +1458,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 3/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1270,7 +1491,7 @@ jobs:
fi
integration_tests_amd_tsan_4_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA0LzYp') }}
name: "Integration tests (amd_tsan, 4/6)"
@@ -1282,6 +1503,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 4/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1308,7 +1536,7 @@ jobs:
fi
integration_tests_amd_tsan_5_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA1LzYp') }}
name: "Integration tests (amd_tsan, 5/6)"
@@ -1320,6 +1548,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 5/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1346,7 +1581,7 @@ jobs:
fi
integration_tests_amd_tsan_6_6:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'SW50ZWdyYXRpb24gdGVzdHMgKGFtZF90c2FuLCA2LzYp') }}
name: "Integration tests (amd_tsan, 6/6)"
@@ -1358,6 +1593,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Integration tests (amd_tsan, 6/6)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1384,7 +1626,7 @@ jobs:
fi
stress_test_amd_debug:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9kZWJ1Zyk=') }}
name: "Stress test (amd_debug)"
@@ -1396,6 +1638,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_debug)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1422,7 +1671,7 @@ jobs:
fi
stress_test_amd_tsan:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_tsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF90c2FuKQ==') }}
name: "Stress test (amd_tsan)"
@@ -1434,43 +1683,12 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
- - name: Prepare env script
- run: |
- rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
- mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
- cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
- export PYTHONPATH=./ci:.:
- cat > ./ci/tmp/workflow_config_releasebranchci.json << 'EOF'
- ${{ needs.config_workflow.outputs.data }}
- EOF
- cat > ./ci/tmp/workflow_status.json << 'EOF'
- ${{ toJson(needs) }}
- EOF
- ENV_SETUP_SCRIPT_EOF
-
- - name: Run
- id: run
- run: |
- . ./ci/tmp/praktika_setup_env.sh
- set -o pipefail
- if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (amd_tsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
- else
- python3 -m praktika run 'Stress test (amd_tsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
- fi
-
- stress_test_arm_asan:
- runs-on: [self-hosted, arm-medium]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_asan]
- if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFybV9hc2FuKQ==') }}
- name: "Stress test (arm_asan)"
- outputs:
- data: ${{ steps.run.outputs.DATA }}
- steps:
- - name: Checkout code
- uses: actions/checkout@v4
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
with:
- ref: ${{ env.CHECKOUT_REF }}
+ test_name: "Stress test (amd_tsan)"
- name: Prepare env script
run: |
@@ -1492,13 +1710,13 @@ jobs:
. ./ci/tmp/praktika_setup_env.sh
set -o pipefail
if command -v ts &> /dev/null; then
- python3 -m praktika run 'Stress test (arm_asan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_tsan)' --workflow "ReleaseBranchCI" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
else
- python3 -m praktika run 'Stress test (arm_asan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
+ python3 -m praktika run 'Stress test (amd_tsan)' --workflow "ReleaseBranchCI" --ci |& tee ./ci/tmp/job.log
fi
stress_test_amd_ubsan:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_ubsan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF91YnNhbik=') }}
name: "Stress test (amd_ubsan)"
@@ -1510,6 +1728,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_ubsan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1536,7 +1761,7 @@ jobs:
fi
stress_test_amd_msan:
- runs-on: [self-hosted, amd-medium]
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_msan]
if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'U3RyZXNzIHRlc3QgKGFtZF9tc2FuKQ==') }}
name: "Stress test (amd_msan)"
@@ -1548,6 +1773,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stress test (amd_msan)"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -1574,8 +1806,8 @@ jobs:
fi
finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
- needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_arm_release, build_arm_asan, build_amd_darwin, build_arm_darwin, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, integration_tests_amd_asan_1_4, integration_tests_amd_asan_2_4, integration_tests_amd_asan_3_4, integration_tests_amd_asan_4_4, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stress_test_amd_debug, stress_test_amd_tsan, stress_test_arm_asan, stress_test_amd_ubsan, stress_test_amd_msan]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_arm_release, build_amd_darwin, build_arm_darwin, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, stateless_tests_amd_asan_distributed_plan_parallel_1_2, stateless_tests_amd_asan_distributed_plan_parallel_2_2, stateless_tests_amd_asan_distributed_plan_sequential, integration_tests_amd_asan_1_4, integration_tests_amd_asan_2_4, integration_tests_amd_asan_3_4, integration_tests_amd_asan_4_4, integration_tests_amd_asan_old_analyzer_1_6, integration_tests_amd_asan_old_analyzer_2_6, integration_tests_amd_asan_old_analyzer_3_6, integration_tests_amd_asan_old_analyzer_4_6, integration_tests_amd_asan_old_analyzer_5_6, integration_tests_amd_asan_old_analyzer_6_6, integration_tests_amd_tsan_1_6, integration_tests_amd_tsan_2_6, integration_tests_amd_tsan_3_6, integration_tests_amd_tsan_4_6, integration_tests_amd_tsan_5_6, integration_tests_amd_tsan_6_6, stress_test_amd_debug, stress_test_amd_tsan, stress_test_amd_ubsan, stress_test_amd_msan]
if: ${{ !cancelled() }}
name: "Finish Workflow"
outputs:
@@ -1586,6 +1818,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Finish Workflow"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
diff --git a/.github/workflows/release_builds.yml b/.github/workflows/release_builds.yml
new file mode 100644
index 000000000000..00bfb1015b15
--- /dev/null
+++ b/.github/workflows/release_builds.yml
@@ -0,0 +1,1245 @@
+# generated by praktika
+
+name: Release Builds
+on:
+ workflow_dispatch:
+ inputs:
+
+env:
+ PYTHONUNBUFFERED: 1
+ CHECKOUT_REF: ""
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ CHECKS_DATABASE_HOST: ${{ secrets.CHECKS_DATABASE_HOST }}
+ AZURE_STORAGE_KEY: ${{ secrets.AZURE_STORAGE_KEY }}
+ AZURE_ACCOUNT_NAME: ${{ secrets.AZURE_ACCOUNT_NAME }}
+ AZURE_CONTAINER_NAME: ${{ secrets.AZURE_CONTAINER_NAME }}
+ AZURE_STORAGE_ACCOUNT_URL: "https://${{ secrets.AZURE_ACCOUNT_NAME }}.blob.core.windows.net/"
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+ GH_TOKEN: ${{ github.token }}
+
+
+jobs:
+
+ config_workflow:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ needs: []
+ name: "Config Workflow"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Config Workflow' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Config Workflow' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ dockers_build_amd:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow]
+ name: "Dockers Build (amd)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (amd)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Dockers Build (amd)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Dockers Build (amd)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ dockers_build_arm:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ needs: [config_workflow]
+ name: "Dockers Build (arm)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Dockers Build (arm)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Dockers Build (arm)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Dockers Build (arm)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ build_amd_debug:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary]
+ name: "Build (amd_debug)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_debug)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Build (amd_debug)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Build (amd_debug)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ build_amd_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm]
+ name: "Build (amd_release)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_release)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Build (amd_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Build (amd_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ build_amd_asan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary]
+ name: "Build (amd_asan)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_asan)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Build (amd_asan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Build (amd_asan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ build_amd_tsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary]
+ name: "Build (amd_tsan)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_tsan)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Build (amd_tsan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Build (amd_tsan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ build_amd_msan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary]
+ name: "Build (amd_msan)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_msan)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Build (amd_msan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Build (amd_msan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ build_amd_ubsan:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary, build_arm_binary]
+ name: "Build (amd_ubsan)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_ubsan)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Build (amd_ubsan)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Build (amd_ubsan)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ build_amd_binary:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm]
+ name: "Build (amd_binary)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (amd_binary)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Build (amd_binary)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Build (amd_binary)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ build_arm_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm]
+ name: "Build (arm_release)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_release)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Build (arm_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Build (arm_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ build_arm_binary:
+ runs-on: [self-hosted, altinity-on-demand, altinity-builder]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm]
+ name: "Build (arm_binary)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Build (arm_binary)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Build (arm_binary)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Build (arm_binary)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ docker_server_image:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release]
+ name: "Docker server image"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker server image"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Docker server image' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Docker server image' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ docker_keeper_image:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release, build_arm_release]
+ name: "Docker keeper image"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Docker keeper image"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Docker keeper image' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Docker keeper image' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ install_packages_amd_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_release]
+ name: "Install packages (amd_release)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Install packages (amd_release)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Install packages (amd_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Install packages (amd_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ install_packages_arm_release:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_release]
+ name: "Install packages (arm_release)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Install packages (arm_release)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Install packages (arm_release)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Install packages (arm_release)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary]
+ name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, parallel)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary]
+ name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Stateless tests (amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary]
+ name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, parallel)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_binary]
+ name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Stateless tests (amd_binary, ParallelReplicas, s3 storage, sequential)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ stateless_tests_arm_binary_parallel:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_binary]
+ name: "Stateless tests (arm_binary, parallel)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (arm_binary, parallel)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Stateless tests (arm_binary, parallel)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ stateless_tests_arm_binary_sequential:
+ runs-on: [self-hosted, altinity-on-demand, altinity-func-tester-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_arm_binary]
+ name: "Stateless tests (arm_binary, sequential)"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Stateless tests (arm_binary, sequential)"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Stateless tests (arm_binary, sequential)' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+ finish_workflow:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ needs: [config_workflow, dockers_build_amd, dockers_build_arm, build_amd_debug, build_amd_release, build_amd_asan, build_amd_tsan, build_amd_msan, build_amd_ubsan, build_amd_binary, build_arm_release, build_arm_binary, docker_server_image, docker_keeper_image, install_packages_amd_release, install_packages_arm_release, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel, stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential, stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel, stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential, stateless_tests_arm_binary_parallel, stateless_tests_arm_binary_sequential]
+ if: ${{ !cancelled() }}
+ name: "Finish Workflow"
+ outputs:
+ data: ${{ steps.run.outputs.DATA }}
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ env.CHECKOUT_REF }}
+
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Finish Workflow"
+
+ - name: Prepare env script
+ run: |
+ rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
+ mkdir -p ./ci/tmp ./ci/tmp ./ci/tmp
+ cat > ./ci/tmp/praktika_setup_env.sh << 'ENV_SETUP_SCRIPT_EOF'
+ export PYTHONPATH=./ci:.:
+ cat > ./ci/tmp/workflow_inputs.json << 'EOF'
+ ${{ toJson(github.event.inputs) }}
+ EOF
+ cat > ./ci/tmp/workflow_config_release_builds.json << 'EOF'
+ ${{ needs.config_workflow.outputs.data }}
+ EOF
+ cat > ./ci/tmp/workflow_status.json << 'EOF'
+ ${{ toJson(needs) }}
+ EOF
+ ENV_SETUP_SCRIPT_EOF
+
+ - name: Run
+ id: run
+ run: |
+ . ./ci/tmp/praktika_setup_env.sh
+ set -o pipefail
+ if command -v ts &> /dev/null; then
+ python3 -m praktika run 'Finish Workflow' --workflow "Release Builds" --ci |& ts '[%Y-%m-%d %H:%M:%S]' | tee ./ci/tmp/job.log
+ else
+ python3 -m praktika run 'Finish Workflow' --workflow "Release Builds" --ci |& tee ./ci/tmp/job.log
+ fi
+
+##########################################################################################
+##################################### ALTINITY JOBS ######################################
+##########################################################################################
+
+ GrypeScanServer:
+ needs: [config_workflow, docker_server_image]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIHNlcnZlciBpbWFnZQ==') }}
+ strategy:
+ fail-fast: false
+ matrix:
+ suffix: ['', '-alpine']
+ uses: ./.github/workflows/grype_scan.yml
+ secrets: inherit
+ with:
+ docker_image: altinityinfra/clickhouse-server
+ version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
+ tag-suffix: ${{ matrix.suffix }}
+ GrypeScanKeeper:
+ needs: [config_workflow, docker_keeper_image]
+ if: ${{ !failure() && !cancelled() && !contains(fromJson(needs.config_workflow.outputs.data).cache_success_base64, 'RG9ja2VyIGtlZXBlciBpbWFnZQ==') }}
+ uses: ./.github/workflows/grype_scan.yml
+ secrets: inherit
+ with:
+ docker_image: altinityinfra/clickhouse-keeper
+ version: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
+
+ SignRelease:
+ needs: [config_workflow, build_amd_release]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_sign.yml
+ secrets: inherit
+ with:
+ test_name: Sign release
+ runner_type: altinity-style-checker
+ data: ${{ needs.config_workflow.outputs.data }}
+ SignAarch64:
+ needs: [config_workflow, build_arm_release]
+ if: ${{ !failure() && !cancelled() }}
+ uses: ./.github/workflows/reusable_sign.yml
+ secrets: inherit
+ with:
+ test_name: Sign aarch64
+ runner_type: altinity-style-checker-aarch64
+ data: ${{ needs.config_workflow.outputs.data }}
+
+ FinishCIReport:
+ if: ${{ !cancelled() }}
+ needs:
+ - config_workflow
+ - dockers_build_amd
+ - dockers_build_arm
+ - build_amd_debug
+ - build_amd_release
+ - build_amd_asan
+ - build_amd_tsan
+ - build_amd_msan
+ - build_amd_ubsan
+ - build_amd_binary
+ - build_arm_release
+ - build_arm_binary
+ - docker_server_image
+ - docker_keeper_image
+ - install_packages_amd_release
+ - install_packages_arm_release
+ - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_parallel
+ - stateless_tests_amd_binary_old_analyzer_s3_storage_databasereplicated_sequential
+ - stateless_tests_amd_binary_parallelreplicas_s3_storage_parallel
+ - stateless_tests_amd_binary_parallelreplicas_s3_storage_sequential
+ - stateless_tests_arm_binary_parallel
+ - stateless_tests_arm_binary_sequential
+ - finish_workflow
+ - GrypeScanServer
+ - GrypeScanKeeper
+ - SignRelease
+ - SignAarch64
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ - name: Finalize workflow report
+ if: ${{ !cancelled() }}
+ uses: ./.github/actions/create_workflow_report
+ with:
+ workflow_config: ${{ needs.config_workflow.outputs.data }}
+ final: true
+
+ SourceUpload:
+ needs: [config_workflow, build_amd_release]
+ if: ${{ !failure() && !cancelled() }}
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
+ env:
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ VERSION: ${{ fromJson(needs.config_workflow.outputs.data).custom_data.version.string }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ ref: ${{ fromJson(needs.config_workflow.outputs.data).git_ref }}
+ submodules: true
+ fetch-depth: 0
+ filter: tree:0
+ - name: Install aws cli
+ uses: unfor19/install-aws-cli-action@v1
+ with:
+ version: 2
+ arch: arm64
+ - name: Create source tar
+ run: |
+ cd .. && tar czf $RUNNER_TEMP/build_source.src.tar.gz ClickHouse/
+ - name: Upload source tar
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ S3_PATH="REFs/$GITHUB_REF_NAME/$COMMIT_SHA/build_amd_release"
+ else
+ S3_PATH="PRs/$PR_NUMBER/$COMMIT_SHA/build_amd_release"
+ fi
+
+ aws s3 cp $RUNNER_TEMP/build_source.src.tar.gz s3://altinity-build-artifacts/$S3_PATH/clickhouse-$VERSION.src.tar.gz
diff --git a/.github/workflows/repo-sanity-checks.yml b/.github/workflows/repo-sanity-checks.yml
new file mode 100644
index 000000000000..ec50a056b730
--- /dev/null
+++ b/.github/workflows/repo-sanity-checks.yml
@@ -0,0 +1,150 @@
+name: Repository Sanity Checks
+
+on:
+ workflow_dispatch: # Manual trigger only
+
+ workflow_call:
+
+jobs:
+ sanity-checks:
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker]
+ strategy:
+ fail-fast: false # Continue with other combinations if one fails
+ matrix:
+ include:
+ # Production packages
+ - env: prod
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/apt-repo
+ - env: prod
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/yum-repo
+ # FIPS Production packages
+ - env: prod-fips
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/fips-apt-repo
+ - env: prod-fips
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/fips-yum-repo
+ # Staging packages
+ - env: staging
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/apt-repo
+ - env: staging
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/yum-repo
+ # FIPS Staging packages
+ - env: staging-fips
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/fips-apt-repo
+ - env: staging-fips
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/fips-yum-repo
+ # Hotfix packages
+ - env: hotfix
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/hotfix-apt-repo
+ - env: hotfix
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/hotfix-yum-repo
+ # Antalya experimental packages
+ - env: antalya
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.altinity.cloud/antalya-apt-repo
+ - env: antalya
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.altinity.cloud/antalya-yum-repo
+ # Hotfix staging packages
+ - env: hotfix-staging
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/hotfix-apt-repo
+ - env: hotfix-staging
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/hotfix-yum-repo
+ # Antalya experimental staging packages
+ - env: antalya-staging
+ type: deb
+ base: ubuntu:22.04
+ repo_url: https://builds.staging.altinity.cloud/antalya-apt-repo
+ - env: antalya-staging
+ type: rpm
+ base: centos:8
+ repo_url: https://builds.staging.altinity.cloud/antalya-yum-repo
+
+ steps:
+ - name: Run sanity check
+ run: |
+ cat << 'EOF' > sanity.sh
+ #!/bin/bash
+ set -e -x
+
+ # Package installation commands based on type
+ if [ "${{ matrix.type }}" = "deb" ]; then
+ export DEBIAN_FRONTEND=noninteractive
+ apt-get update && apt-get install -y apt-transport-https ca-certificates curl gnupg2 dialog sudo
+ mkdir -p /usr/share/keyrings
+ curl -s "${REPO_URL}/pubkey.gpg" | gpg --dearmor > /usr/share/keyrings/altinity-archive-keyring.gpg
+ echo "deb [signed-by=/usr/share/keyrings/altinity-archive-keyring.gpg] ${REPO_URL} stable main" > /etc/apt/sources.list.d/altinity.list
+ apt-get update
+ apt-get install -y clickhouse-server clickhouse-client
+ else
+ sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-*
+ sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*
+ yum install -y curl gnupg2 sudo
+ if [[ "${{ matrix.env }}" == *"staging"* ]]; then
+ curl "${REPO_URL}/altinity-staging.repo" -o /etc/yum.repos.d/altinity-staging.repo
+ else
+ curl "${REPO_URL}/altinity.repo" -o /etc/yum.repos.d/altinity.repo
+ fi
+ yum install -y clickhouse-server clickhouse-client
+ fi
+
+ # Ensure correct ownership
+ chown -R clickhouse /var/lib/clickhouse/
+ chown -R clickhouse /var/log/clickhouse-server/
+
+ # Check server version
+ server_version=$(clickhouse-server --version)
+ echo "$server_version" | grep "altinity" || FAILED_SERVER=true
+
+ # Start server and test
+ sudo -u clickhouse clickhouse-server --config-file /etc/clickhouse-server/config.xml --daemon
+ sleep 10
+ clickhouse-client -q 'SELECT 1'
+
+ # Check client version
+ client_version=$(clickhouse-client --version)
+ echo "$client_version" | grep "altinity" || FAILED_CLIENT=true
+
+ # Report results
+ if [ "$FAILED_SERVER" = true ]; then
+ echo "::error::Server check failed - Version: $server_version"
+ exit 1
+ elif [ "$FAILED_CLIENT" = true ]; then
+ echo "::error::Client check failed - Version: $client_version"
+ exit 1
+ else
+ echo "All checks passed successfully!"
+ fi
+ EOF
+
+ chmod +x sanity.sh
+ docker run --rm \
+ -v $(pwd)/sanity.sh:/sanity.sh \
+ -e REPO_URL="${{ matrix.repo_url }}" \
+ ${{ matrix.base }} \
+ /sanity.sh
diff --git a/.github/workflows/reusable_sign.yml b/.github/workflows/reusable_sign.yml
new file mode 100644
index 000000000000..7bfed2758359
--- /dev/null
+++ b/.github/workflows/reusable_sign.yml
@@ -0,0 +1,166 @@
+name: Sigining workflow
+'on':
+ workflow_call:
+ inputs:
+ test_name:
+ description: the value of test type from tests/ci/ci_config.py, ends up as $CHECK_NAME ENV
+ required: true
+ type: string
+ runner_type:
+ description: the label of runner to use
+ required: true
+ type: string
+ run_command:
+ description: the command to launch the check
+ default: ""
+ required: false
+ type: string
+ checkout_depth:
+ description: the value of the git shallow checkout
+ required: false
+ type: number
+ default: 1
+ submodules:
+ description: if the submodules should be checked out
+ required: false
+ type: boolean
+ default: false
+ additional_envs:
+ description: additional ENV variables to setup the job
+ type: string
+ data:
+ description: ci data
+ type: string
+ required: true
+ working-directory:
+ description: sets custom working directory
+ type: string
+ default: "$GITHUB_WORKSPACE/tests/ci"
+ secrets:
+ secret_envs:
+ description: if given, it's passed to the environments
+ required: false
+ AWS_SECRET_ACCESS_KEY:
+ description: the access key to the aws param store.
+ required: true
+ AWS_ACCESS_KEY_ID:
+ description: the access key id to the aws param store.
+ required: true
+ GPG_BINARY_SIGNING_KEY:
+ description: gpg signing key for packages.
+ required: true
+ GPG_BINARY_SIGNING_PASSPHRASE:
+ description: gpg signing key passphrase.
+ required: true
+
+env:
+ # Force the stdout and stderr streams to be unbuffered
+ PYTHONUNBUFFERED: 1
+ CHECK_NAME: ${{inputs.test_name}}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_DEFAULT_REGION: ${{ secrets.AWS_DEFAULT_REGION }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ CLICKHOUSE_TEST_STAT_LOGIN: ${{ secrets.CLICKHOUSE_TEST_STAT_LOGIN }}
+ CLICKHOUSE_TEST_STAT_PASSWORD: ${{ secrets.CLICKHOUSE_TEST_STAT_PASSWORD }}
+ CLICKHOUSE_TEST_STAT_URL: ${{ secrets.CLICKHOUSE_TEST_STAT_URL }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+ ROBOT_TOKEN: ${{ secrets.ROBOT_TOKEN }}
+
+jobs:
+ runner_labels_setup:
+ name: Compute proper runner labels for the rest of the jobs
+ runs-on: ubuntu-latest
+ outputs:
+ runner_labels: ${{ steps.setVariables.outputs.runner_labels }}
+ steps:
+ - id: setVariables
+ name: Prepare runner_labels variables for the later steps
+ run: |
+
+ # Prepend self-hosted
+ input="self-hosted, altinity-on-demand, ${input}"
+
+ # Remove all whitespace
+ input="$(echo ${input} | tr -d [:space:])"
+ # Make something like a JSON array from comma-separated list
+ input="[ '${input//\,/\'\, \'}' ]"
+
+ echo "runner_labels=$input" >> ${GITHUB_OUTPUT}
+ env:
+ input: ${{ inputs.runner_type }}
+
+ Test:
+ needs: [runner_labels_setup]
+ runs-on: ${{ fromJson(needs.runner_labels_setup.outputs.runner_labels) }}
+ name: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
+ env:
+ GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches > 1 && format('-{0}',matrix.batch) || '' }}
+ strategy:
+ fail-fast: false # we always wait for entire matrix
+ matrix:
+ batch: ${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].batches }}
+ steps:
+ - name: Check out repository code
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f6
+ with:
+ clear-repository: true
+ ref: ${{ fromJson(inputs.data).git_ref }}
+ submodules: ${{inputs.submodules}}
+ fetch-depth: ${{inputs.checkout_depth}}
+ filter: tree:0
+ - name: Set build envs
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ CHECK_NAME=${{ inputs.test_name }}
+ ${{inputs.additional_envs}}
+ ${{secrets.secret_envs}}
+ DOCKER_TAG< 1 }}
+ run: |
+ cat >> "$GITHUB_ENV" << 'EOF'
+ RUN_BY_HASH_NUM=${{matrix.batch}}
+ RUN_BY_HASH_TOTAL=${{ fromJson(inputs.data).jobs_data.jobs_params[inputs.test_name].num_batches }}
+ EOF
+ - name: Pre run
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --pre --job-name '${{inputs.test_name}}'
+ - name: Sign release
+ env:
+ GPG_BINARY_SIGNING_KEY: ${{ secrets.GPG_BINARY_SIGNING_KEY }}
+ GPG_BINARY_SIGNING_PASSPHRASE: ${{ secrets.GPG_BINARY_SIGNING_PASSPHRASE }}
+ run: |
+ cd "${{ inputs.working-directory }}"
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" \
+ --infile ${{ toJson(inputs.data) }} \
+ --job-name '${{inputs.test_name}}' \
+ --run \
+ --force \
+ --run-command '''python3 sign_release.py'''
+ - name: Post run
+ if: ${{ !cancelled() }}
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --post --job-name '${{inputs.test_name}}'
+ - name: Mark as done
+ if: ${{ !cancelled() }}
+ run: |
+ python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --infile ${{ toJson(inputs.data) }} --mark-success --job-name '${{inputs.test_name}}' --batch ${{matrix.batch}}
+ - name: Upload signed hashes
+ uses: actions/upload-artifact@v4
+ with:
+ name: ${{inputs.test_name}} signed-hashes
+ path: ${{ env.TEMP_PATH }}/*.gpg
+ - name: Clean
+ if: always()
+ uses: ./.github/actions/clean
diff --git a/.github/workflows/reusable_simple_job.yml b/.github/workflows/reusable_simple_job.yml
index 247569c4f527..c13b6c88027e 100644
--- a/.github/workflows/reusable_simple_job.yml
+++ b/.github/workflows/reusable_simple_job.yml
@@ -66,7 +66,7 @@ jobs:
GITHUB_JOB_OVERRIDDEN: ${{inputs.test_name}}
steps:
- name: Check out repository code
- uses: ClickHouse/checkout@v1
+ uses: Altinity/checkout@19599efdf36c4f3f30eb55d5bb388896faea69f
with:
clear-repository: true
ref: ${{ inputs.git_ref }}
diff --git a/.github/workflows/scheduled_runs.yml b/.github/workflows/scheduled_runs.yml
new file mode 100644
index 000000000000..9069ea7685f2
--- /dev/null
+++ b/.github/workflows/scheduled_runs.yml
@@ -0,0 +1,55 @@
+name: Scheduled Altinity Stable Builds
+
+on:
+ schedule:
+ - cron: '0 0 * * 6' #Weekly run for stable versions
+ - cron: '0 0 * * *' #Daily run for antalya versions
+ # Make sure that any changes to this file is actually tested with PRs
+ pull_request:
+ types:
+ - synchronize
+ - reopened
+ - opened
+ paths:
+ - '**/scheduled_runs.yml'
+
+jobs:
+ DailyRuns:
+ strategy:
+ fail-fast: false
+ matrix:
+ branch:
+ - antalya
+ name: ${{ matrix.branch }}
+ if: github.event.schedule != '0 0 * * 6'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Run ${{ matrix.branch }} workflow
+ run: |
+ curl -L \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.TOKEN }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \
+ -d '{"ref":"${{ matrix.branch }}"}'
+
+ WeeklyRuns:
+ strategy:
+ fail-fast: false
+ matrix:
+ branch:
+ - customizations/24.8.14
+ name: ${{ matrix.branch }}
+ if: github.event.schedule != '0 0 * * *'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Run ${{ matrix.branch }} workflow
+ run: |
+ curl -L \
+ -X POST \
+ -H "Accept: application/vnd.github+json" \
+ -H "Authorization: Bearer ${{ secrets.TOKEN }}" \
+ -H "X-GitHub-Api-Version: 2022-11-28" \
+ https://api.github.com/repos/Altinity/ClickHouse/actions/workflows/release_branches.yml/dispatches \
+ -d '{"ref":"${{ matrix.branch }}"}'
diff --git a/.github/workflows/sign_and_release.yml b/.github/workflows/sign_and_release.yml
new file mode 100644
index 000000000000..f5a48dee97f5
--- /dev/null
+++ b/.github/workflows/sign_and_release.yml
@@ -0,0 +1,567 @@
+name: Sign and Release packages
+
+on:
+ workflow_dispatch:
+ inputs:
+ workflow_url:
+ description: 'The URL to the workflow run that produced the packages'
+ required: true
+ release_environment:
+ description: 'The environment to release to. "staging" or "production"'
+ required: true
+ default: 'staging'
+ package_version:
+ description: 'The version of the package to release'
+ required: true
+ type: string
+ GPG_PASSPHRASE:
+ description: 'GPG passphrase for signing (required for production releases)'
+ required: false
+ type: string
+
+env:
+ ARTIFACT_NAME: build_report_package_release
+ AWS_REGION: us-east-1
+ SRC_BUCKET: altinity-build-artifacts
+ S3_STORAGE_BUCKET: altinity-test-reports
+
+jobs:
+ extract-package-info:
+ runs-on: [altinity-style-checker-aarch64, altinity-on-demand]
+ outputs:
+ docker_version: ${{ env.DOCKER_VERSION }}-${{ env.PACKAGE_VERSION }}
+ commit_hash: ${{ env.COMMIT_HASH }}
+ folder_time: ${{ env.FOLDER_TIME }}
+ needs_binary_processing: ${{ env.NEEDS_BINARY_PROCESSING }}
+ package_version: ${{ env.PACKAGE_VERSION }}
+ src_dir: ${{ env.SRC_DIR }}
+ test_results_src: ${{ env.TEST_RESULTS_SRC }}
+ altinity_build_feature: ${{ env.ALTINITY_BUILD_FEATURE }}
+ repo_prefix: ${{ env.REPO_PREFIX }}
+ src_url: ${{ env.SRC_URL }}
+ dest_url: ${{ env.DEST_URL }}
+ steps:
+ - name: Validate inputs
+ run: |
+ if [ -z "${{ inputs.workflow_url }}" ]; then
+ echo "Error: workflow_url is required"
+ exit 1
+ fi
+ if [ -z "${{ inputs.package_version }}" ]; then
+ echo "Error: package_version is required"
+ exit 1
+ fi
+ if [ "${{ inputs.release_environment }}" != "staging" ] && [ "${{ inputs.release_environment }}" != "production" ]; then
+ echo "Error: release_environment must be either 'staging' or 'production'"
+ exit 1
+ fi
+
+ - name: Download artifact "${{ env.ARTIFACT_NAME }}"
+ run: |
+ run_id=$(echo "${{ inputs.workflow_url }}" | grep -oE '[0-9]+$')
+
+ # Get artifact ID
+ artifact_id=$(curl -s "https://api.github.com/repos/Altinity/ClickHouse/actions/runs/$run_id/artifacts" \
+ | jq '.artifacts[] | select(.name == "'"${{ env.ARTIFACT_NAME }}"'") | .id')
+
+ # Download artifact
+ curl -L -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
+ -o "${{ env.ARTIFACT_NAME }}" \
+ "https://api.github.com/repos/Altinity/ClickHouse/actions/artifacts/$artifact_id/zip"
+
+ - name: Unzip Artifact
+ run: |
+ unzip -o "${{ env.ARTIFACT_NAME }}" -d "artifact"
+
+ - name: Extract and Parse JSON File
+ run: |
+ cd artifact
+ JSON_FILE=$(ls | grep "build_report.*package_release\.json" | head -n 1)
+ if [ -z "$JSON_FILE" ]; then
+ echo "Error: No JSON file matching the pattern was found"
+ exit 1
+ fi
+ echo "Found JSON file: ${JSON_FILE}"
+
+ # Extract client URL
+ CLIENT_URL=$(jq -r '.build_urls[] | select(test("clickhouse-client-.*-amd64.tgz$"))' "$JSON_FILE")
+ if [ -z "$CLIENT_URL" ]; then
+ echo "Error: No matching client URL found in JSON"
+ exit 1
+ fi
+ echo "Found client URL: ${CLIENT_URL}"
+ echo "CLIENT_URL=$CLIENT_URL" >> $GITHUB_ENV
+
+ - name: Extract and Validate Package Information
+ run: |
+ # Define regex patterns
+ PR_REGEX="PRs/([^/]+)/([^/]+)/([^/]+)/clickhouse-client-([^-]+)-amd64.tgz"
+ NONPR_REGEX="s3.amazonaws.com/([^/]+)/([^/]+)/([^/]+)/([^/]+)/clickhouse-client-([^-]+)-amd64.tgz"
+
+ # Extract information based on URL pattern
+ if [[ "$CLIENT_URL" =~ $PR_REGEX ]]; then
+ echo "Matched PR pattern"
+ PR_NUMBER="${BASH_REMATCH[1]}"
+ COMMIT_HASH="${BASH_REMATCH[2]}"
+ PACKAGE_TYPE="${BASH_REMATCH[3]}"
+ PACKAGE_VERSION="${BASH_REMATCH[4]}"
+ DOCKER_VERSION="${PR_NUMBER}"
+ TEST_RESULTS_SRC="${PR_NUMBER}"
+ SRC_DIR="PRs/${PR_NUMBER}"
+ elif [[ "$CLIENT_URL" =~ $NONPR_REGEX ]]; then
+ echo "Matched non-PR pattern"
+ BRANCH="${BASH_REMATCH[2]}"
+ COMMIT_HASH="${BASH_REMATCH[3]}"
+ PACKAGE_TYPE="${BASH_REMATCH[4]}"
+ PACKAGE_VERSION="${BASH_REMATCH[5]}"
+ DOCKER_VERSION="0"
+ TEST_RESULTS_SRC="0"
+ SRC_DIR="${BRANCH}"
+ else
+ echo "Error: The client URL did not match any expected pattern"
+ exit 1
+ fi
+
+ # Verify package version
+ if [ "$PACKAGE_VERSION" != "${{ inputs.package_version }}" ]; then
+ echo "Error: Extracted package version ($PACKAGE_VERSION) does not match input package version (${{ inputs.package_version }})"
+ exit 1
+ fi
+
+ # Extract major version and determine binary processing need
+ MAJOR_VERSION=$(echo "$PACKAGE_VERSION" | cut -d. -f1)
+ NEEDS_BINARY_PROCESSING=$([ "$MAJOR_VERSION" -ge 24 ] && echo "true" || echo "false")
+
+ # Extract feature and set repo prefix
+ ALTINITY_BUILD_FEATURE=$(echo "$PACKAGE_VERSION" | rev | cut -d. -f1 | rev)
+ case "$ALTINITY_BUILD_FEATURE" in
+ "altinityhotfix") REPO_PREFIX="hotfix-" ;;
+ "altinityfips") REPO_PREFIX="fips-" ;;
+ "altinityantalya") REPO_PREFIX="antalya-" ;;
+ "altinitystable"|"altinitytest") REPO_PREFIX="" ;;
+ *)
+ echo "Error: Build feature not supported: ${ALTINITY_BUILD_FEATURE}"
+ exit 1
+ ;;
+ esac
+
+ # Generate folder time
+ FOLDER_TIME=$(date -u +"%Y-%m-%dT%H-%M-%S.%3N")
+
+ # Set all environment variables at once
+ {
+ echo "COMMIT_HASH=${COMMIT_HASH}"
+ echo "DOCKER_VERSION=${DOCKER_VERSION}"
+ echo "FOLDER_TIME=${FOLDER_TIME}"
+ echo "NEEDS_BINARY_PROCESSING=${NEEDS_BINARY_PROCESSING}"
+ echo "PACKAGE_VERSION=${PACKAGE_VERSION}"
+ echo "SRC_DIR=${SRC_DIR}"
+ echo "TEST_RESULTS_SRC=${TEST_RESULTS_SRC}"
+ echo "ALTINITY_BUILD_FEATURE=${ALTINITY_BUILD_FEATURE}"
+ echo "REPO_PREFIX=${REPO_PREFIX}"
+ echo "SRC_URL=s3://${SRC_BUCKET}/${SRC_DIR}/${COMMIT_HASH}"
+ echo "DEST_URL=s3://${S3_STORAGE_BUCKET}/builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}"
+ } >> $GITHUB_ENV
+
+ - name: Display Extracted Information
+ run: |
+ echo "Extracted information:"
+ echo "altinity_build_feature: ${ALTINITY_BUILD_FEATURE}"
+ echo "commit_hash: ${COMMIT_HASH}"
+ echo "docker_version: ${DOCKER_VERSION}"
+ echo "folder_time: ${FOLDER_TIME}"
+ echo "needs_binary_processing: ${NEEDS_BINARY_PROCESSING}"
+ echo "package_version: ${PACKAGE_VERSION}"
+ echo "repo_prefix: ${REPO_PREFIX}"
+ echo "src_bucket: ${SRC_BUCKET}"
+ echo "src_dir: ${SRC_DIR}"
+ echo "test_results_src: ${TEST_RESULTS_SRC}"
+ echo "src_url: ${SRC_URL}"
+ echo "dest_url: ${DEST_URL}"
+
+ - name: Install aws cli
+ if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }}
+ uses: unfor19/install-aws-cli-action@v1
+ with:
+ version: 2
+ arch: arm64
+
+ - name: Process ARM binary
+ if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }}
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ run: |
+ echo "Downloading clickhouse binary..."
+ if ! aws s3 cp "${SRC_URL}/package_aarch64/clickhouse" clickhouse; then
+ echo "Failed to download clickhouse binary"
+ exit 1
+ fi
+ chmod +x clickhouse
+
+ echo "Running clickhouse binary..."
+ ./clickhouse -q'q'
+
+ echo "Stripping the binary..."
+ strip clickhouse -o clickhouse-stripped
+
+ echo "Uploading processed binaries..."
+ if ! aws s3 cp clickhouse "${SRC_URL}/package_aarch64/arm-bin/non-self-extracting/"; then
+ echo "Failed to upload clickhouse binary"
+ exit 1
+ fi
+ if ! aws s3 cp clickhouse-stripped "${SRC_URL}/package_aarch64/arm-bin/non-self-extracting/"; then
+ echo "Failed to upload stripped clickhouse binary"
+ exit 1
+ fi
+
+ copy-packages:
+ needs: extract-package-info
+ runs-on: [altinity-func-tester, altinity-on-demand]
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ SRC_URL: ${{ needs.extract-package-info.outputs.src_url }}
+ DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }}
+ NEEDS_BINARY_PROCESSING: ${{ needs.extract-package-info.outputs.needs_binary_processing }}
+ steps:
+ - name: Install aws cli
+ uses: unfor19/install-aws-cli-action@v1
+ with:
+ version: 2
+ arch: amd64
+
+ # - name: Download signed hash artifacts
+ # run: |
+ # run_id=$(echo "${{ inputs.workflow_url }}" | grep -oE '[0-9]+$')
+ # mkdir -p signed-hashes/amd64 signed-hashes/arm64
+
+ # # Download AMD64 hashes
+ # artifact_id=$(curl -s \
+ # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
+ # -H "Accept: application/vnd.github.v3+json" \
+ # "https://api.github.com/repos/Altinity/ClickHouse/actions/runs/$run_id/artifacts?per_page=1000" \
+ # | jq -r --arg NAME "Sign release signed-hashes" '.artifacts[] | select(.name == $NAME) | .id')
+ # if [ -z "$artifact_id" ] || [ "$artifact_id" == "null" ]; then
+ # echo "Error: Could not find artifact 'Sign release signed-hashes' for run $run_id"
+ # exit 1
+ # fi
+ # if ! curl -L \
+ # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
+ # -H "Accept: application/vnd.github.v3+json" \
+ # -o "signed-hashes/amd64/hashes.zip" \
+ # "https://api.github.com/repos/Altinity/ClickHouse/actions/artifacts/$artifact_id/zip"; then
+ # echo "Error: Failed to download AMD64 hashes"
+ # exit 1
+ # fi
+ # unzip -o "signed-hashes/amd64/hashes.zip" -d signed-hashes/amd64
+
+ # # Download ARM64 hashes
+ # artifact_id=$(curl -s \
+ # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
+ # -H "Accept: application/vnd.github.v3+json" \
+ # "https://api.github.com/repos/Altinity/ClickHouse/actions/runs/$run_id/artifacts?per_page=1000" \
+ # | jq -r --arg NAME "Sign aarch64 signed-hashes" '.artifacts[] | select(.name == $NAME) | .id')
+ # if [ -z "$artifact_id" ] || [ "$artifact_id" == "null" ]; then
+ # echo "Error: Could not find artifact 'Sign aarch64 signed-hashes' for run $run_id"
+ # exit 1
+ # fi
+ # if ! curl -L \
+ # -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" \
+ # -H "Accept: application/vnd.github.v3+json" \
+ # -o "signed-hashes/arm64/hashes.zip" \
+ # "https://api.github.com/repos/Altinity/ClickHouse/actions/artifacts/$artifact_id/zip"; then
+ # echo "Error: Failed to download ARM64 hashes"
+ # exit 1
+ # fi
+ # unzip -o "signed-hashes/arm64/hashes.zip" -d signed-hashes/arm64
+
+ # - name: Download packages for verification
+ # run: |
+ # # Create temporary directories for downloaded packages
+ # mkdir -p /tmp/arm_packages /tmp/amd_packages
+
+ # # Download ARM packages
+ # echo "Downloading ARM packages for verification..."
+ # if ! aws s3 sync "${SRC_URL}/package_aarch64/" /tmp/arm_packages; then
+ # echo "Failed to download ARM packages"
+ # exit 1
+ # fi
+
+ # # Download AMD packages
+ # echo "Downloading AMD packages for verification..."
+ # if ! aws s3 sync "${SRC_URL}/package_release/" /tmp/amd_packages; then
+ # echo "Failed to download AMD packages"
+ # exit 1
+ # fi
+
+ # - name: Verify ARM packages
+ # run: |
+ # cd signed-hashes/arm64
+ # # Verify all files
+ # find /tmp/arm_packages -type f | while read -r file; do
+ # if [ -f "$file" ]; then
+ # file_name=$(basename "$file")
+ # echo "Verifying $file_name..."
+
+ # if ! gpg --verify "$file_name.sha256.gpg" 2>/dev/null; then
+ # echo "GPG verification failed for $file_name"
+ # exit 1
+ # fi
+ # if ! sha256sum -c "$file_name.sha256.gpg" 2>/dev/null; then
+ # echo "SHA256 verification failed for $file_name"
+ # exit 1
+ # fi
+ # fi
+ # done
+
+ # - name: Verify AMD packages
+ # run: |
+ # cd signed-hashes/amd64
+ # # Verify all files
+ # find /tmp/amd_packages -type f | while read -r file; do
+ # if [ -f "$file" ]; then
+ # file_name=$(basename "$file")
+ # echo "Verifying $file_name..."
+
+ # if ! gpg --verify "$file_name.sha256.gpg" 2>/dev/null; then
+ # echo "GPG verification failed for $file_name"
+ # exit 1
+ # fi
+ # if ! sha256sum -c "$file_name.sha256.gpg" 2>/dev/null; then
+ # echo "SHA256 verification failed for $file_name"
+ # exit 1
+ # fi
+ # fi
+ # done
+
+ - name: Move verified packages to destination
+ run: |
+ # Move ARM packages
+ echo "Moving verified ARM packages to destination..."
+ if ! aws s3 cp "${SRC_URL}/package_aarch64/" "${DEST_URL}/packages/ARM_PACKAGES/" --recursive; then
+ echo "Failed to move ARM packages to destination"
+ exit 1
+ fi
+
+ # Move AMD packages
+ echo "Moving verified AMD packages to destination..."
+ if ! aws s3 cp "${SRC_URL}/package_release/" "${DEST_URL}/packages/AMD_PACKAGES/" --recursive; then
+ echo "Failed to move AMD packages to destination"
+ exit 1
+ fi
+
+ # Clean up temporary directories
+ rm -rf /tmp/arm_packages /tmp/amd_packages
+
+ - name: Separate ARM binary
+ run: |
+ aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse"
+ aws s3 mv "${DEST_URL}/packages/ARM_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/ARM_PACKAGES/arm-bin/clickhouse-stripped"
+
+ - name: Separate AMD binary
+ run: |
+ aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse"
+ aws s3 mv "${DEST_URL}/packages/AMD_PACKAGES/clickhouse-stripped" "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse-stripped"
+
+ - name: Process AMD binary
+ if: ${{ env.NEEDS_BINARY_PROCESSING == 'true' }}
+ run: |
+ echo "Downloading clickhouse binary..."
+ if ! aws s3 cp "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/clickhouse" clickhouse; then
+ echo "Failed to download clickhouse binary"
+ exit 1
+ fi
+ chmod +x clickhouse
+
+ echo "Running clickhouse binary..."
+ ./clickhouse -q'q'
+
+ echo "Stripping the binary..."
+ strip clickhouse -o clickhouse-stripped
+
+ echo "Uploading processed binaries..."
+ if ! aws s3 cp clickhouse "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/non-self-extracting/"; then
+ echo "Failed to upload clickhouse binary"
+ exit 1
+ fi
+ if ! aws s3 cp clickhouse-stripped "${DEST_URL}/packages/AMD_PACKAGES/amd-bin/non-self-extracting/"; then
+ echo "Failed to upload stripped clickhouse binary"
+ exit 1
+ fi
+
+ copy-test-results:
+ needs: extract-package-info
+ runs-on: [altinity-style-checker-aarch64, altinity-on-demand]
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ TEST_RESULTS_SRC: ${{ needs.extract-package-info.outputs.test_results_src }}
+ COMMIT_HASH: ${{ needs.extract-package-info.outputs.commit_hash }}
+ DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }}
+ steps:
+ - name: Install aws cli
+ uses: unfor19/install-aws-cli-action@v1
+ with:
+ version: 2
+ arch: arm64
+ - name: Copy test results to S3
+ run: |
+ # Copy test results
+ echo "Copying test results..."
+ if ! aws s3 sync "s3://${SRC_BUCKET}/${TEST_RESULTS_SRC}/${COMMIT_HASH}" \
+ "${DEST_URL}/test_results/"; then
+ echo "Failed to copy test results"
+ exit 1
+ fi
+
+ # publish-docker:
+ # needs: extract-package-info
+ # strategy:
+ # matrix:
+ # image_type: [server, keeper]
+ # variant: ['', '-alpine']
+ # uses: ./.github/workflows/docker_publish.yml
+ # with:
+ # docker_image: altinityinfra/clickhouse-${{ matrix.image_type }}:${{ needs.extract-package-info.outputs.docker_version }}${{ matrix.variant }}
+ # release_environment: ${{ inputs.release_environment }}
+ # upload_artifacts: false
+ # s3_upload_path: "${{ needs.extract-package-info.outputs.dest_url }}/docker_images/${{ matrix.image_type }}${{ matrix.variant }}/"
+ # secrets: inherit
+
+ sign-and-publish:
+ needs: [extract-package-info, copy-packages]
+ runs-on: arc-runners-clickhouse-signer
+ env:
+ GPG_PASSPHRASE: ${{ inputs.release_environment == 'production' && inputs.GPG_PASSPHRASE || secrets.GPG_PASSPHRASE }}
+ REPO_DNS_NAME: ${{ inputs.release_environment == 'production' && 'builds.altinity.cloud' || 'builds.staging.altinity.cloud' }}
+ REPO_NAME: ${{ inputs.release_environment == 'production' && 'altinity' || 'altinity-staging' }}
+ REPO_SUBTITLE: ${{ inputs.release_environment == 'production' && 'Stable Builds' || 'Staging Builds' }}
+ PACKAGE_VERSION: ${{ needs.extract-package-info.outputs.package_version }}
+ FOLDER_TIME: ${{ needs.extract-package-info.outputs.folder_time }}
+ REPO_PREFIX: ${{ needs.extract-package-info.outputs.repo_prefix }}
+ NEEDS_BINARY_PROCESSING: ${{ needs.extract-package-info.outputs.needs_binary_processing }}
+ DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }}
+ RELEASE_ENVIRONMENT: ${{ inputs.release_environment }}
+ steps:
+ - name: Install aws cli
+ uses: unfor19/install-aws-cli-action@v1
+ with:
+ version: 2
+ arch: arm64
+
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ with:
+ repository: Altinity/ClickHouse
+ ref: antalya
+ path: ClickHouse
+
+ - name: Download packages
+ run: |
+ if ! aws s3 cp "${DEST_URL}/packages/ARM_PACKAGES/" /home/runner/.cache/tmp/packages --recursive; then
+ echo "Failed to download ARM packages"
+ exit 1
+ fi
+ if ! aws s3 cp "${DEST_URL}/packages/AMD_PACKAGES/" /home/runner/.cache/tmp/packages --recursive; then
+ echo "Failed to download AMD packages"
+ exit 1
+ fi
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+
+ - name: Setup GPG
+ run: |
+ if [ -z ${GPG_PASSPHRASE} ]
+ then
+ echo "GPG_PASSPHRASE is not set"
+ exit 1
+ fi
+
+ - name: Process GPG key
+ run: |
+ echo "Processing GPG key..."
+ if ! aws secretsmanager get-secret-value --secret-id arn:aws:secretsmanager:us-east-1:446527654354:secret:altinity_staging_gpg-Rqbe8S --query SecretString --output text | sed -e "s/^'//" -e "s/'$//" | jq -r '.altinity_staging_gpg | @base64d' | gpg --batch --import; then
+ echo "Failed to import GPG key"
+ exit 1
+ fi
+ gpg --list-secret-keys --with-keygrip
+ gpgconf --kill gpg-agent
+ gpg-agent --daemon --allow-preset-passphrase
+ if ! aws ssm get-parameter --name /gitlab-runner/key-encrypting-key --with-decryption --query Parameter.Value --output text | sudo tee /root/.key-encrypting-key >/dev/null; then
+ echo "Failed to get key encrypting key"
+ exit 1
+ fi
+ GPG_KEY_NAME=$(gpg --list-secret-keys | grep uid | head --lines 1 | tr -s " " | cut -d " " -f 4-)
+ GPG_KEY_ID=$(gpg --list-secret-keys --with-keygrip "${GPG_KEY_NAME}" | grep Keygrip | head --lines 1 | tr -s " " | cut -d " " -f 4)
+ echo "$GPG_PASSPHRASE" | base64 -d | sudo openssl enc -d -aes-256-cbc -pbkdf2 -pass file:/root/.key-encrypting-key -in - -out - | /usr/lib/gnupg/gpg-preset-passphrase --preset $GPG_KEY_ID
+
+ - name: Run Ansible playbook
+ run: |
+ echo "Running Ansible playbook for signing and publishing..."
+ echo "ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml -e aws_region=$AWS_REGION -e gpg_key_id=\"$GPG_KEY_ID\" -e gpg_key_name=\"$GPG_KEY_NAME\" -e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" -e pkgver=\"${PACKAGE_VERSION}\" -e release_environment=$RELEASE_ENVIRONMENT -e repo_dns_name=$REPO_DNS_NAME -e repo_name=$REPO_NAME -e repo_prefix=\"$REPO_PREFIX\" -e repo_subtitle=\"$REPO_SUBTITLE\" -e s3_pkgs_bucket=$S3_STORAGE_BUCKET -e s3_pkgs_path=\"builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}\" -e repo_path=\"/home/runner/.cache/${{ inputs.release_environment }}\" ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml "
+ if ! ansible-playbook -i ClickHouse/tests/ci/release/packaging/ansible/inventory/localhost.yml \
+ -e aws_region=$AWS_REGION \
+ -e gpg_key_id="$GPG_KEY_ID" \
+ -e gpg_key_name="$GPG_KEY_NAME" \
+ -e local_repo_path="/home/runner/.cache/${{ inputs.release_environment }}" \
+ -e pkgver="${PACKAGE_VERSION}" \
+ -e release_environment=$RELEASE_ENVIRONMENT \
+ -e repo_dns_name=$REPO_DNS_NAME \
+ -e repo_name=$REPO_NAME \
+ -e repo_prefix="$REPO_PREFIX" \
+ -e repo_subtitle="$REPO_SUBTITLE" \
+ -e s3_pkgs_bucket=$S3_STORAGE_BUCKET \
+ -e s3_pkgs_path="builds/stable/v${PACKAGE_VERSION}/${FOLDER_TIME}" \
+ ClickHouse/tests/ci/release/packaging/ansible/sign-and-release.yml; then
+ echo "Ansible playbook failed"
+ exit 1
+ fi
+ gpgconf --kill gpg-agent
+ ls -hal
+
+ - name: Cleanup temporary files
+ if: always()
+ run: |
+ echo "Cleaning up temporary files..."
+ rm -rf /home/runner/.cache/tmp/packages || true
+
+ repo-sanity-check:
+ needs: sign-and-publish
+ uses: Altinity/ClickHouse/.github/workflows/repo-sanity-checks.yml@antalya
+
+ copy-to-released:
+ needs: [sign-and-publish]
+ if: ${{ inputs.release_environment == 'production' }}
+ runs-on: [altinity-style-checker-aarch64, altinity-on-demand]
+ env:
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
+ PACKAGE_VERSION: ${{ needs.extract-package-info.outputs.package_version }}
+ DEST_URL: ${{ needs.extract-package-info.outputs.dest_url }}
+ steps:
+ - name: Install aws cli
+ uses: unfor19/install-aws-cli-action@v1
+ with:
+ version: 2
+ arch: arm64
+
+ - name: Copy to released directory
+ run: |
+ - name: Copy to released directory
+ run: |
+ echo "Copying to released directory..."
+ echo "Source: ${DEST_URL}/"
+ echo "Destination: s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/"
+
+ if ! aws s3 sync "${DEST_URL}/" "s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/" --no-progress; then
+ echo "Failed to copy to released directory"
+ exit 1
+ fi
+
+ echo "Verifying copy operation..."
+ if ! aws s3 ls "s3://${S3_STORAGE_BUCKET}/builds/released/v${PACKAGE_VERSION}/" | grep -q "packages"; then
+ echo "Error: Packages directory not found in destination"
+ exit 1
+ fi
diff --git a/.github/workflows/vectorsearchstress.yml b/.github/workflows/vectorsearchstress.yml
index 38ed3f423696..e3d123cf315f 100644
--- a/.github/workflows/vectorsearchstress.yml
+++ b/.github/workflows/vectorsearchstress.yml
@@ -16,7 +16,7 @@ env:
jobs:
config_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: []
name: "Config Workflow"
outputs:
@@ -27,6 +27,26 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Config Workflow"
+
+ - name: Note report location to summary
+ env:
+ PR_NUMBER: ${{ github.event.pull_request.number || 0 }}
+ COMMIT_SHA: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
+ run: |
+ if [ "$PR_NUMBER" -eq 0 ]; then
+ PREFIX="REFs/$GITHUB_REF_NAME/$COMMIT_SHA"
+ else
+ PREFIX="PRs/$PR_NUMBER/$COMMIT_SHA"
+ fi
+ REPORT_LINK=https://s3.amazonaws.com/altinity-build-artifacts/$PREFIX/$GITHUB_RUN_ID/ci_run_report.html
+ echo "Workflow Run Report: [View Report]($REPORT_LINK)" >> $GITHUB_STEP_SUMMARY
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -53,7 +73,7 @@ jobs:
fi
vector_search_stress:
- runs-on: [self-hosted, arm-small]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow]
name: "Vector Search Stress"
outputs:
@@ -64,6 +84,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Vector Search Stress"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
@@ -90,7 +117,7 @@ jobs:
fi
finish_workflow:
- runs-on: [self-hosted, style-checker-aarch64]
+ runs-on: [self-hosted, altinity-on-demand, altinity-style-checker-aarch64]
needs: [config_workflow, vector_search_stress]
if: ${{ !cancelled() }}
name: "Finish Workflow"
@@ -102,6 +129,13 @@ jobs:
with:
ref: ${{ env.CHECKOUT_REF }}
+ - name: Setup
+ uses: ./.github/actions/runner_setup
+ - name: Docker setup
+ uses: ./.github/actions/docker_setup
+ with:
+ test_name: "Finish Workflow"
+
- name: Prepare env script
run: |
rm -rf ./ci/tmp ./ci/tmp ./ci/tmp
diff --git a/ci/defs/defs.py b/ci/defs/defs.py
index d3bf1fd567b3..53bee36e16e4 100644
--- a/ci/defs/defs.py
+++ b/ci/defs/defs.py
@@ -1,38 +1,47 @@
from praktika import Artifact, Docker, Job, Secret
from praktika.utils import MetaClasses, Utils
+from settings import altinity_overrides
# i.e. "ClickHouse/ci/tmp"
TEMP_DIR = f"{Utils.cwd()}/ci/tmp" # == _Settings.TEMP_DIR != env_helper.TEMP_PATH
-SYNC = "CH Inc sync"
+SYNC = "Altinity sync"
-S3_BUCKET_NAME = "clickhouse-builds"
-S3_REPORT_BUCKET_NAME = "clickhouse-test-reports"
-S3_BUCKET_HTTP_ENDPOINT = "clickhouse-builds.s3.amazonaws.com"
-S3_REPORT_BUCKET_HTTP_ENDPOINT = "s3.amazonaws.com/clickhouse-test-reports"
+S3_BUCKET_NAME = altinity_overrides.S3_BUCKET_NAME
+S3_REPORT_BUCKET_NAME = altinity_overrides.S3_REPORT_BUCKET_NAME
+S3_BUCKET_HTTP_ENDPOINT = altinity_overrides.S3_BUCKET_HTTP_ENDPOINT
+S3_REPORT_BUCKET_HTTP_ENDPOINT = altinity_overrides.S3_REPORT_BUCKET_HTTP_ENDPOINT
class RunnerLabels:
CI_SERVICES = "ci_services"
CI_SERVICES_EBS = "ci_services_ebs"
- BUILDER_AMD = ["self-hosted", "builder"]
- BUILDER_ARM = ["self-hosted", "builder-aarch64"]
- FUNC_TESTER_AMD = ["self-hosted", "amd-medium"]
- FUNC_TESTER_ARM = ["self-hosted", "arm-medium"]
- AMD_LARGE = ["self-hosted", "amd-large"]
- ARM_LARGE = ["self-hosted", "arm-large"]
- AMD_MEDIUM = ["self-hosted", "amd-medium"]
- ARM_MEDIUM = ["self-hosted", "arm-medium"]
- AMD_MEDIUM_CPU = ["self-hosted", "amd-medium-cpu"]
- ARM_MEDIUM_CPU = ["self-hosted", "arm-medium-cpu"]
- AMD_MEDIUM_MEM = ["self-hosted", "amd-medium-mem"]
- ARM_MEDIUM_MEM = ["self-hosted", "arm-medium-mem"]
- AMD_SMALL = ["self-hosted", "amd-small"]
- ARM_SMALL = ["self-hosted", "arm-small"]
- AMD_SMALL_MEM = ["self-hosted", "amd-small-mem"]
- ARM_SMALL_MEM = ["self-hosted", "arm-small-mem"]
- STYLE_CHECK_AMD = ["self-hosted", "style-checker"]
- STYLE_CHECK_ARM = ["self-hosted", "style-checker-aarch64"]
+ BUILDER_AMD = ["self-hosted", "altinity-on-demand", "altinity-builder"]
+ BUILDER_ARM = ["self-hosted", "altinity-on-demand", "altinity-builder"]
+ FUNC_TESTER_AMD = ["self-hosted", "altinity-on-demand", "altinity-func-tester"]
+ FUNC_TESTER_ARM = [
+ "self-hosted",
+ "altinity-on-demand",
+ "altinity-func-tester-aarch64",
+ ]
+ AMD_LARGE = ["self-hosted", "altinity-on-demand", "altinity-func-tester"]
+ ARM_LARGE = ["self-hosted", "altinity-on-demand", "altinity-func-tester-aarch64"]
+ AMD_MEDIUM = ["self-hosted", "altinity-on-demand", "altinity-func-tester"]
+ ARM_MEDIUM = ["self-hosted", "altinity-on-demand", "altinity-func-tester-aarch64"]
+ AMD_MEDIUM_CPU = ["self-hosted", "altinity-on-demand", "altinity-func-tester"]
+ ARM_MEDIUM_CPU = ["self-hosted", "altinity-on-demand", "altinity-func-tester-aarch64"]
+ AMD_MEDIUM_MEM = ["self-hosted", "altinity-on-demand", "altinity-func-tester"]
+ ARM_MEDIUM_MEM = ["self-hosted", "altinity-on-demand", "altinity-func-tester-aarch64"]
+ AMD_SMALL = ["self-hosted", "altinity-on-demand", "altinity-style-checker"]
+ ARM_SMALL = ["self-hosted", "altinity-on-demand", "altinity-style-checker-aarch64"]
+ AMD_SMALL_MEM = ["self-hosted", "altinity-on-demand", "altinity-style-checker"]
+ ARM_SMALL_MEM = ["self-hosted", "altinity-on-demand", "altinity-style-checker-aarch64"]
+ STYLE_CHECK_AMD = ["self-hosted", "altinity-on-demand", "altinity-style-checker"]
+ STYLE_CHECK_ARM = [
+ "self-hosted",
+ "altinity-on-demand",
+ "altinity-style-checker-aarch64",
+ ]
class CIFiles:
@@ -40,7 +49,7 @@ class CIFiles:
UNIT_TESTS_BIN = f"{TEMP_DIR}/build/src/unit_tests_dbms"
-BASE_BRANCH = "master"
+BASE_BRANCH = altinity_overrides.MAIN_BRANCH
azure_secret = Secret.Config(
name="azure_connection_string",
@@ -49,233 +58,241 @@ class CIFiles:
SECRETS = [
Secret.Config(
- name="dockerhub_robot_password",
- type=Secret.Type.AWS_SSM_VAR,
+ name=altinity_overrides.DOCKERHUB_SECRET,
+ type=Secret.Type.GH_SECRET,
),
Secret.Config(
- name="clickhouse-test-stat-url",
- type=Secret.Type.AWS_SSM_VAR,
+ name=altinity_overrides.SECRET_CI_DB_URL,
+ type=Secret.Type.GH_SECRET,
),
Secret.Config(
- name="clickhouse-test-stat-login",
- type=Secret.Type.AWS_SSM_VAR,
+ name=altinity_overrides.SECRET_CI_DB_USER,
+ type=Secret.Type.GH_SECRET,
),
Secret.Config(
- name="clickhouse-test-stat-password",
- type=Secret.Type.AWS_SSM_VAR,
- ),
- azure_secret,
+ name=altinity_overrides.SECRET_CI_DB_PASSWORD,
+ type=Secret.Type.GH_SECRET,
+ ),
+ # azure_secret,
+ # Secret.Config(
+ # name="woolenwolf_gh_app.clickhouse-app-id",
+ # type=Secret.Type.AWS_SSM_SECRET,
+ # ),
+ # Secret.Config(
+ # name="woolenwolf_gh_app.clickhouse-app-key",
+ # type=Secret.Type.AWS_SSM_SECRET,
+ # ),
Secret.Config(
- name="woolenwolf_gh_app.clickhouse-app-id",
- type=Secret.Type.AWS_SSM_SECRET,
+ name="AWS_ACCESS_KEY_ID",
+ type=Secret.Type.GH_SECRET,
),
Secret.Config(
- name="woolenwolf_gh_app.clickhouse-app-key",
- type=Secret.Type.AWS_SSM_SECRET,
+ name="AWS_SECRET_ACCESS_KEY",
+ type=Secret.Type.GH_SECRET,
),
]
DOCKERS = [
Docker.Config(
- name="clickhouse/style-test",
+ name="altinityinfra/style-test",
path="./ci/docker/style-test",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/fasttest",
+ name="altinityinfra/fasttest",
path="./ci/docker/fasttest",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/binary-builder",
+ name="altinityinfra/binary-builder",
path="./ci/docker/binary-builder",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/fasttest"],
+ depends_on=["altinityinfra/fasttest"],
),
Docker.Config(
- name="clickhouse/test-old-centos",
+ name="altinityinfra/test-old-centos",
path="./ci/docker/compatibility/centos",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/test-old-ubuntu",
+ name="altinityinfra/test-old-ubuntu",
path="./ci/docker/compatibility/ubuntu",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/stateless-test",
+ name="altinityinfra/stateless-test",
path="./ci/docker/stateless-test",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/test-base"],
+ depends_on=["altinityinfra/test-base"],
),
Docker.Config(
- name="clickhouse/cctools",
+ name="altinityinfra/cctools",
path="./ci/docker/cctools",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/fasttest"],
+ depends_on=["altinityinfra/fasttest"],
),
Docker.Config(
- name="clickhouse/test-base",
+ name="altinityinfra/test-base",
path="./ci/docker/test-base",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/stress-test",
+ name="altinityinfra/stress-test",
path="./ci/docker/stress-test",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/stateless-test"],
+ depends_on=["altinityinfra/stateless-test"],
),
Docker.Config(
- name="clickhouse/fuzzer",
+ name="altinityinfra/fuzzer",
path="./ci/docker/fuzzer",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/test-base"],
+ depends_on=["altinityinfra/test-base"],
),
Docker.Config(
- name="clickhouse/performance-comparison",
+ name="altinityinfra/performance-comparison",
path="./ci/docker/performance-comparison",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/test-base"],
+ depends_on=["altinityinfra/test-base"],
),
Docker.Config(
- name="clickhouse/keeper-jepsen-test",
+ name="altinityinfra/keeper-jepsen-test",
path="./ci/docker/keeper-jepsen-test",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/test-base"],
+ depends_on=["altinityinfra/test-base"],
),
Docker.Config(
- name="clickhouse/server-jepsen-test",
+ name="altinityinfra/server-jepsen-test",
path="./ci/docker/server-jepsen-test",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/test-base"],
+ depends_on=["altinityinfra/test-base"],
),
Docker.Config(
- name="clickhouse/integration-test",
+ name="altinityinfra/integration-test",
path="./ci/docker/integration/base",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/test-base"],
+ depends_on=["altinityinfra/test-base"],
),
Docker.Config(
- name="clickhouse/integration-tests-runner",
+ name="altinityinfra/integration-tests-runner",
path="./ci/docker/integration/runner",
platforms=Docker.Platforms.arm_amd,
- depends_on=["clickhouse/test-base"],
+ depends_on=["altinityinfra/test-base"],
),
Docker.Config(
- name="clickhouse/integration-test-with-unity-catalog",
+ name="altinityinfra/integration-test-with-unity-catalog",
path="./ci/docker/integration/clickhouse_with_unity_catalog",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/integration-test-with-hms",
+ name="altinityinfra/integration-test-with-hms",
path="./ci/docker/integration/clickhouse_with_hms_catalog",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/integration-helper",
+ name="altinityinfra/integration-helper",
path="./ci/docker/integration/helper_container",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/kerberos-kdc",
+ name="altinityinfra/kerberos-kdc",
path="./ci/docker/integration/kerberos_kdc",
platforms=[Docker.Platforms.AMD],
depends_on=[],
),
Docker.Config(
- name="clickhouse/mysql-golang-client",
+ name="altinityinfra/mysql-golang-client",
path="./ci/docker/integration/mysql_golang_client",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/mysql-java-client",
+ name="altinityinfra/mysql-java-client",
path="./ci/docker/integration/mysql_java_client",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/mysql-js-client",
+ name="altinityinfra/mysql-js-client",
path="./ci/docker/integration/mysql_js_client",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/arrowflight-server-test",
+ name="altinityinfra/arrowflight-server-test",
path="./ci/docker/integration/arrowflight",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/dotnet-client",
+ name="altinityinfra/dotnet-client",
path="./ci/docker/integration/dotnet_client",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/mysql-php-client",
+ name="altinityinfra/mysql-php-client",
path="./ci/docker/integration/mysql_php_client",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/nginx-dav",
+ name="altinityinfra/nginx-dav",
path="./ci/docker/integration/nginx_dav",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/postgresql-java-client",
+ name="altinityinfra/postgresql-java-client",
path="./ci/docker/integration/postgresql_java_client",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/python-bottle",
+ name="altinityinfra/python-bottle",
path="./ci/docker/integration/resolver",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/s3-proxy",
+ name="altinityinfra/s3-proxy",
path="./ci/docker/integration/s3_proxy",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
+ # Docker.Config(
+ # name="clickhouse/docs-builder",
+ # path="./ci/docker/docs-builder",
+ # platforms=Docker.Platforms.arm_amd,
+ # depends_on=[],
+ # ),
Docker.Config(
- name="clickhouse/docs-builder",
- path="./ci/docker/docs-builder",
- platforms=Docker.Platforms.arm_amd,
- depends_on=[],
- ),
- Docker.Config(
- name="clickhouse/install-deb-test",
+ name="altinityinfra/install-deb-test",
path="./ci/docker/install/deb",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/install-rpm-test",
+ name="altinityinfra/install-rpm-test",
path="./ci/docker/install/rpm",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/sqlancer-test",
+ name="altinityinfra/sqlancer-test",
path="./ci/docker/sqlancer-test",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
),
Docker.Config(
- name="clickhouse/mysql_dotnet_client",
+ name="altinityinfra/mysql_dotnet_client",
path="./ci/docker/integration/mysql_dotnet_client",
platforms=Docker.Platforms.arm_amd,
depends_on=[],
@@ -351,12 +368,14 @@ class ToolSet:
class ArtifactNames:
CH_AMD_DEBUG = "CH_AMD_DEBUG"
CH_AMD_RELEASE = "CH_AMD_RELEASE"
+ CH_AMD_RELEASE_STRIPPED = "CH_AMD_RELEASE_STRIPPED"
CH_AMD_ASAN = "CH_AMD_ASAN"
CH_AMD_TSAN = "CH_AMD_TSAN"
CH_AMD_MSAN = "CH_AMD_MSAN"
CH_AMD_UBSAN = "CH_AMD_UBSAN"
CH_AMD_BINARY = "CH_AMD_BINARY"
CH_ARM_RELEASE = "CH_ARM_RELEASE"
+ CH_ARM_RELEASE_STRIPPED = "CH_ARM_RELEASE_STRIPPED"
CH_ARM_ASAN = "CH_ARM_ASAN"
CH_COV_BIN = "CH_COV_BIN"
@@ -430,6 +449,16 @@ class ArtifactConfigs:
ArtifactNames.CH_LOONGARCH64,
]
)
+ clickhouse_stripped_binaries = Artifact.Config(
+ name="...",
+ type=Artifact.Type.S3,
+ path=f"{TEMP_DIR}/build/programs/self-extracting/clickhouse-stripped",
+ ).parametrize(
+ names=[
+ ArtifactNames.CH_AMD_RELEASE_STRIPPED,
+ ArtifactNames.CH_ARM_RELEASE_STRIPPED,
+ ]
+ )
clickhouse_debians = Artifact.Config(
name="*",
type=Artifact.Type.S3,
diff --git a/ci/defs/job_configs.py b/ci/defs/job_configs.py
index 8cf465351790..31246fb445d4 100644
--- a/ci/defs/job_configs.py
+++ b/ci/defs/job_configs.py
@@ -16,7 +16,7 @@
"./programs",
"./rust",
"./ci/jobs/build_clickhouse.py",
- "./ci/jobs/scripts/job_hooks/build_profile_hook.py",
+ # "./ci/jobs/scripts/job_hooks/build_profile_hook.py",
"./utils/list-licenses",
],
with_git_submodules=True,
@@ -28,7 +28,7 @@
command='python3 ./ci/jobs/functional_tests.py --options "{PARAMETER}"',
# some tests can be flaky due to very slow disks - use tmpfs for temporary ClickHouse files
# --cap-add=SYS_PTRACE and --privileged for gdb in docker
- run_in_docker=f"clickhouse/stateless-test+--memory={LIMITED_MEM}+--cap-add=SYS_PTRACE+--privileged+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/etc/clickhouse-server1:/etc/clickhouse-server1+--volume=./ci/tmp/etc/clickhouse-server2:/etc/clickhouse-server2+--volume=./ci/tmp/var/log:/var/log",
+ run_in_docker="altinityinfra/stateless-test+--cap-add=SYS_PTRACE+--privileged+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/etc/clickhouse-server1:/etc/clickhouse-server1+--volume=./ci/tmp/etc/clickhouse-server2:/etc/clickhouse-server2+--volume=./ci/tmp/var/log:/var/log+--env=AZURE_STORAGE_KEY=$AZURE_STORAGE_KEY+--env=AZURE_ACCOUNT_NAME=$AZURE_ACCOUNT_NAME+--env=AZURE_CONTAINER_NAME=$AZURE_CONTAINER_NAME+--env=AZURE_STORAGE_ACCOUNT_URL=$AZURE_STORAGE_ACCOUNT_URL",
digest_config=Job.CacheDigestConfig(
include_paths=[
"./ci/jobs/functional_tests.py",
@@ -39,15 +39,17 @@
"./tests/config",
"./tests/*.txt",
"./ci/docker/stateless-test",
+ "./tests/broken_tests.yaml",
],
),
result_name_for_cidb="Tests",
)
BINARY_DOCKER_COMMAND = (
- "clickhouse/binary-builder+--network=host+"
+ "altinityinfra/binary-builder+--network=host+"
f"--memory={Utils.physical_memory() * 95 // 100}+"
f"--memory-reservation={Utils.physical_memory() * 9 // 10}"
+ '+--env=AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID"+--env=AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY"'
)
@@ -56,7 +58,7 @@ class JobConfigs:
name=JobNames.STYLE_CHECK,
runs_on=RunnerLabels.STYLE_CHECK_ARM,
command="python3 ./ci/jobs/check_style.py",
- run_in_docker="clickhouse/style-test",
+ run_in_docker="altinityinfra/style-test",
enable_commit_status=True,
)
fast_test = Job.Config(
@@ -64,7 +66,7 @@ class JobConfigs:
runs_on=RunnerLabels.BUILDER_AMD,
command="python3 ./ci/jobs/fast_test.py",
# --network=host required for ec2 metadata http endpoint to work
- run_in_docker="clickhouse/fasttest+--network=host+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/var/log:/var/log",
+ run_in_docker='altinityinfra/fasttest+--network=host+--volume=./ci/tmp/var/lib/clickhouse:/var/lib/clickhouse+--volume=./ci/tmp/etc/clickhouse-client:/etc/clickhouse-client+--volume=./ci/tmp/etc/clickhouse-server:/etc/clickhouse-server+--volume=./ci/tmp/var/log:/var/log+--env=AWS_ACCESS_KEY_ID="$AWS_ACCESS_KEY_ID"+--env=AWS_SECRET_ACCESS_KEY="$AWS_SECRET_ACCESS_KEY"',
digest_config=Job.CacheDigestConfig(
include_paths=[
"./ci/jobs/fast_test.py",
@@ -124,7 +126,7 @@ class JobConfigs:
digest_config=build_digest_config,
post_hooks=[
"python3 ./ci/jobs/scripts/job_hooks/build_master_head_hook.py",
- "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py",
+ # "python3 ./ci/jobs/scripts/job_hooks/build_profile_hook.py",
],
).parametrize(
Job.ParamSet(
@@ -136,6 +138,7 @@ class JobConfigs:
parameter=BuildTypes.AMD_RELEASE,
provides=[
ArtifactNames.CH_AMD_RELEASE,
+ ArtifactNames.CH_AMD_RELEASE_STRIPPED,
ArtifactNames.DEB_AMD_RELEASE,
ArtifactNames.RPM_AMD_RELEASE,
ArtifactNames.TGZ_AMD_RELEASE,
@@ -187,20 +190,22 @@ class JobConfigs:
parameter=BuildTypes.ARM_RELEASE,
provides=[
ArtifactNames.CH_ARM_RELEASE,
+ ArtifactNames.CH_ARM_RELEASE_STRIPPED,
ArtifactNames.DEB_ARM_RELEASE,
ArtifactNames.RPM_ARM_RELEASE,
ArtifactNames.TGZ_ARM_RELEASE,
],
runs_on=RunnerLabels.BUILDER_ARM,
),
- Job.ParamSet(
- parameter=BuildTypes.ARM_ASAN,
- provides=[
- ArtifactNames.CH_ARM_ASAN,
- ArtifactNames.DEB_ARM_ASAN,
- ],
- runs_on=RunnerLabels.BUILDER_ARM,
- ),
+ # NOTE (strtgbb): This build is difficult to cross-compile
+ # Job.ParamSet(
+ # parameter=BuildTypes.ARM_ASAN,
+ # provides=[
+ # ArtifactNames.CH_ARM_ASAN,
+ # ArtifactNames.DEB_ARM_ASAN,
+ # ],
+ # runs_on=RunnerLabels.BUILDER_ARM,
+ # ),
Job.ParamSet(
parameter=BuildTypes.ARM_COVERAGE,
provides=[
@@ -300,7 +305,7 @@ class JobConfigs:
).parametrize(
Job.ParamSet(
parameter="amd_debug",
- runs_on=RunnerLabels.STYLE_CHECK_AMD,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[
ArtifactNames.DEB_AMD_DEBUG,
ArtifactNames.CH_AMD_DEBUG,
@@ -321,7 +326,7 @@ class JobConfigs:
).parametrize(
Job.ParamSet(
parameter="amd_release",
- runs_on=RunnerLabels.STYLE_CHECK_AMD,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[
ArtifactNames.DEB_AMD_RELEASE,
ArtifactNames.RPM_AMD_RELEASE,
@@ -331,7 +336,7 @@ class JobConfigs:
),
Job.ParamSet(
parameter="arm_release",
- runs_on=RunnerLabels.STYLE_CHECK_ARM,
+ runs_on=RunnerLabels.FUNC_TESTER_ARM,
requires=[
ArtifactNames.DEB_ARM_RELEASE,
ArtifactNames.RPM_ARM_RELEASE,
@@ -352,7 +357,7 @@ class JobConfigs:
runs_on=RunnerLabels.FUNC_TESTER_ARM,
command="python3 ./ci/jobs/functional_tests.py --options BugfixValidation",
# some tests can be flaky due to very slow disks - use tmpfs for temporary ClickHouse files
- run_in_docker="clickhouse/stateless-test+--network=host+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse",
+ run_in_docker="altinityinfra/stateless-test+--network=host+--security-opt seccomp=unconfined+--tmpfs /tmp/clickhouse",
digest_config=Job.CacheDigestConfig(
include_paths=[
"./ci/jobs/functional_tests.py",
@@ -376,7 +381,7 @@ class JobConfigs:
],
Job.ParamSet(
parameter="amd_asan, distributed plan, sequential",
- runs_on=RunnerLabels.AMD_SMALL_MEM,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_ASAN],
),
Job.ParamSet(
@@ -386,7 +391,7 @@ class JobConfigs:
),
Job.ParamSet(
parameter="amd_binary, old analyzer, s3 storage, DatabaseReplicated, sequential",
- runs_on=RunnerLabels.AMD_SMALL,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_BINARY],
),
Job.ParamSet(
@@ -396,7 +401,7 @@ class JobConfigs:
),
Job.ParamSet(
parameter="amd_binary, ParallelReplicas, s3 storage, sequential",
- runs_on=RunnerLabels.AMD_SMALL,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_BINARY],
),
Job.ParamSet(
@@ -406,7 +411,7 @@ class JobConfigs:
),
Job.ParamSet(
parameter="amd_debug, AsyncInsert, s3 storage, sequential",
- runs_on=RunnerLabels.AMD_SMALL,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_DEBUG],
),
Job.ParamSet(
@@ -416,13 +421,13 @@ class JobConfigs:
),
Job.ParamSet(
parameter="amd_debug, sequential",
- runs_on=RunnerLabels.AMD_SMALL,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_DEBUG],
),
*[
Job.ParamSet(
parameter=f"amd_tsan, parallel, {batch}/{total_batches}",
- runs_on=RunnerLabels.AMD_LARGE,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_TSAN],
)
for total_batches in (2,)
@@ -431,7 +436,7 @@ class JobConfigs:
*[
Job.ParamSet(
parameter=f"amd_tsan, sequential, {batch}/{total_batches}",
- runs_on=RunnerLabels.AMD_SMALL,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_TSAN],
)
for total_batches in (2,)
@@ -440,7 +445,7 @@ class JobConfigs:
*[
Job.ParamSet(
parameter=f"amd_msan, parallel, {batch}/{total_batches}",
- runs_on=RunnerLabels.AMD_LARGE,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_MSAN],
)
for total_batches in (2,)
@@ -449,7 +454,7 @@ class JobConfigs:
*[
Job.ParamSet(
parameter=f"amd_msan, sequential, {batch}/{total_batches}",
- runs_on=RunnerLabels.AMD_SMALL_MEM,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_MSAN],
)
for total_batches in (2,)
@@ -462,7 +467,7 @@ class JobConfigs:
),
Job.ParamSet(
parameter="amd_ubsan, sequential",
- runs_on=RunnerLabels.AMD_SMALL_MEM,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_UBSAN],
),
Job.ParamSet(
@@ -472,7 +477,7 @@ class JobConfigs:
),
Job.ParamSet(
parameter="amd_debug, distributed plan, s3 storage, sequential",
- runs_on=RunnerLabels.AMD_SMALL,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_DEBUG],
),
Job.ParamSet(
@@ -483,7 +488,7 @@ class JobConfigs:
*[
Job.ParamSet(
parameter=f"amd_tsan, s3 storage, sequential, {batch}/{total_batches}",
- runs_on=RunnerLabels.AMD_SMALL_MEM,
+ runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_TSAN],
)
for total_batches in (2,)
@@ -496,7 +501,7 @@ class JobConfigs:
),
Job.ParamSet(
parameter="arm_binary, sequential",
- runs_on=RunnerLabels.ARM_SMALL,
+ runs_on=RunnerLabels.FUNC_TESTER_ARM,
requires=[ArtifactNames.CH_ARM_BINARY],
),
)
@@ -510,7 +515,7 @@ class JobConfigs:
),
Job.ParamSet(
parameter=f"arm_coverage, sequential",
- runs_on=RunnerLabels.ARM_SMALL_MEM,
+ runs_on=RunnerLabels.FUNC_TESTER_ARM,
requires=[ArtifactNames.CH_COV_BIN],
),
)
@@ -537,7 +542,7 @@ class JobConfigs:
name=JobNames.UNITTEST,
runs_on=[], # from parametrize()
command=f"python3 ./ci/jobs/unit_tests_job.py",
- run_in_docker="clickhouse/fasttest+--privileged",
+ run_in_docker="altinityinfra/fasttest+--privileged",
digest_config=Job.CacheDigestConfig(
include_paths=["./ci/jobs/unit_tests_job.py"],
),
@@ -591,11 +596,11 @@ class JobConfigs:
runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=["Build (amd_tsan)"],
),
- Job.ParamSet(
- parameter="arm_asan",
- runs_on=RunnerLabels.FUNC_TESTER_ARM,
- requires=["Build (arm_asan)"],
- ),
+ # Job.ParamSet(
+ # parameter="arm_asan",
+ # runs_on=RunnerLabels.FUNC_TESTER_ARM,
+ # requires=["Build (arm_asan)"],
+ # ),
Job.ParamSet(
parameter="amd_ubsan",
runs_on=RunnerLabels.FUNC_TESTER_AMD,
@@ -681,6 +686,7 @@ class JobConfigs:
"./ci/jobs/scripts/integration_tests_runner.py",
"./tests/integration/",
"./ci/docker/integration",
+ "./tests/broken_tests.yaml",
],
),
).parametrize(
@@ -704,6 +710,7 @@ class JobConfigs:
"./ci/jobs/scripts/integration_tests_runner.py",
"./tests/integration/",
"./ci/docker/integration",
+ "./tests/broken_tests.yaml",
],
),
).parametrize(
@@ -745,6 +752,7 @@ class JobConfigs:
"./ci/jobs/scripts/integration_tests_runner.py",
"./tests/integration/",
"./ci/docker/integration",
+ "./tests/broken_tests.yaml",
],
),
allow_merge_on_failure=True,
@@ -769,6 +777,7 @@ class JobConfigs:
"./ci/jobs/scripts/integration_tests_runner.py",
"./tests/integration/",
"./ci/docker/integration",
+ "./tests/broken_tests.yaml",
],
),
requires=[ArtifactNames.CH_AMD_ASAN],
@@ -815,11 +824,11 @@ class JobConfigs:
runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_DEBUG],
),
- Job.ParamSet(
- parameter="arm_asan",
- runs_on=RunnerLabels.FUNC_TESTER_ARM,
- requires=[ArtifactNames.CH_ARM_ASAN],
- ),
+ # Job.ParamSet(
+ # parameter="arm_asan",
+ # runs_on=RunnerLabels.FUNC_TESTER_ARM,
+ # requires=[ArtifactNames.CH_ARM_ASAN],
+ # ),
Job.ParamSet(
parameter="amd_tsan",
runs_on=RunnerLabels.FUNC_TESTER_AMD,
@@ -854,11 +863,11 @@ class JobConfigs:
runs_on=RunnerLabels.FUNC_TESTER_AMD,
requires=[ArtifactNames.CH_AMD_DEBUG],
),
- Job.ParamSet(
- parameter="arm_asan",
- runs_on=RunnerLabels.FUNC_TESTER_ARM,
- requires=[ArtifactNames.CH_ARM_ASAN],
- ),
+ # Job.ParamSet(
+ # parameter="arm_asan",
+ # runs_on=RunnerLabels.FUNC_TESTER_ARM,
+ # requires=[ArtifactNames.CH_ARM_ASAN],
+ # ),
Job.ParamSet(
parameter="amd_tsan",
runs_on=RunnerLabels.FUNC_TESTER_AMD,
@@ -880,7 +889,7 @@ class JobConfigs:
runs_on=["#from param"],
command='python3 ./ci/jobs/performance_tests.py --test-options "{PARAMETER}"',
# TODO: switch to stateless-test image
- run_in_docker="clickhouse/performance-comparison",
+ run_in_docker="altinityinfra/performance-comparison",
digest_config=Job.CacheDigestConfig(
include_paths=[
"./tests/performance/",
@@ -949,7 +958,7 @@ class JobConfigs:
"./ci/jobs/scripts/functional_tests/setup_log_cluster.sh",
],
),
- run_in_docker="clickhouse/stateless-test+--shm-size=16g+--network=host",
+ run_in_docker="altinityinfra/stateless-test+--shm-size=16g+--network=host",
).parametrize(
Job.ParamSet(
parameter=BuildTypes.AMD_RELEASE,
@@ -974,7 +983,7 @@ class JobConfigs:
"CHANGELOG.md",
],
),
- run_in_docker="clickhouse/docs-builder",
+ run_in_docker="altinityinfra/docs-builder",
requires=[JobNames.STYLE_CHECK, ArtifactNames.CH_ARM_BINARY],
)
docker_sever = Job.Config(
@@ -987,6 +996,8 @@ class JobConfigs:
"tests/ci/docker_images_helper.py",
"./docker/server",
"./docker/keeper",
+ ".github/grype",
+ ".github/workflows/grype_scan.yml",
],
),
requires=["Build (amd_release)", "Build (arm_release)"],
@@ -1002,6 +1013,8 @@ class JobConfigs:
"tests/ci/docker_images_helper.py",
"./docker/server",
"./docker/keeper",
+ ".github/grype",
+ ".github/workflows/grype_scan.yml",
],
),
requires=["Build (amd_release)", "Build (arm_release)"],
@@ -1014,7 +1027,7 @@ class JobConfigs:
digest_config=Job.CacheDigestConfig(
include_paths=["./ci/jobs/sqlancer_job.sh", "./ci/docker/sqlancer-test"],
),
- run_in_docker="clickhouse/sqlancer-test",
+ run_in_docker="altinityinfra/sqlancer-test",
timeout=3600,
).parametrize(
Job.ParamSet(
@@ -1033,7 +1046,7 @@ class JobConfigs:
],
),
requires=[ArtifactNames.CH_ARM_RELEASE],
- run_in_docker="clickhouse/stateless-test",
+ run_in_docker="altinityinfra/stateless-test",
timeout=10800,
)
jepsen_keeper = Job.Config(
diff --git a/ci/docker/binary-builder/Dockerfile b/ci/docker/binary-builder/Dockerfile
index 90f438c9a077..00134c7a57ff 100644
--- a/ci/docker/binary-builder/Dockerfile
+++ b/ci/docker/binary-builder/Dockerfile
@@ -1,14 +1,14 @@
-# docker build -t clickhouse/binary-builder .
+# docker build -t altinityinfra/binary-builder .
ARG FROM_TAG
-FROM clickhouse/fasttest:$FROM_TAG
+FROM altinityinfra/fasttest:$FROM_TAG
ENV CC=clang-${LLVM_VERSION}
ENV CXX=clang++-${LLVM_VERSION}
# If the cctools is updated, then first build it in the CI, then update here in a different commit
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /cctools /cctools
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /cctools /cctools
# We need OpenSSL FIPS in permissive mode for build on MasterCI
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 \
/opt/openssl-fips/openssl.cnf \
/opt/openssl-fips/fipsmodule.cnf \
/opt/openssl-fips/fips.so \
diff --git a/ci/docker/cctools/Dockerfile b/ci/docker/cctools/Dockerfile
index 3a77e61187ab..b3bfab7a805c 100644
--- a/ci/docker/cctools/Dockerfile
+++ b/ci/docker/cctools/Dockerfile
@@ -1,10 +1,10 @@
-# docker build -t clickhouse/cctools .
+# docker build -t altinityinfra/cctools .
# This is a hack to significantly reduce the build time of the clickhouse/binary-builder
# It's based on the assumption that we don't care of the cctools version so much
# It even does not depend on the clickhouse/fasttest in the `docker/images.json`
ARG FROM_TAG=latest
-FROM clickhouse/fasttest:$FROM_TAG AS builder
+FROM altinityinfra/fasttest:$FROM_TAG AS builder
ENV CC=clang-${LLVM_VERSION}
ENV CXX=clang++-${LLVM_VERSION}
diff --git a/ci/docker/compatibility/centos/Dockerfile b/ci/docker/compatibility/centos/Dockerfile
index 628609e374f6..1edb42422b1f 100644
--- a/ci/docker/compatibility/centos/Dockerfile
+++ b/ci/docker/compatibility/centos/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/test-old-centos .
+# docker build -t altinityinfra/test-old-centos .
FROM centos:5
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \
diff --git a/ci/docker/compatibility/ubuntu/Dockerfile b/ci/docker/compatibility/ubuntu/Dockerfile
index ddd0a76bd446..0eb283ff3daf 100644
--- a/ci/docker/compatibility/ubuntu/Dockerfile
+++ b/ci/docker/compatibility/ubuntu/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/test-old-ubuntu .
+# docker build -t altinityinfra/test-old-ubuntu .
FROM ubuntu:12.04
CMD /bin/sh -c "/clickhouse server --config /config/config.xml > /var/log/clickhouse-server/stderr.log 2>&1 & \
diff --git a/ci/docker/docs-builder/Dockerfile b/ci/docker/docs-builder/Dockerfile
index 8835fe3a7070..59793ae16b7d 100644
--- a/ci/docker/docs-builder/Dockerfile
+++ b/ci/docker/docs-builder/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/docs-builder .
+# docker build -t altinityinfra/docs-builder .
FROM node:20-bookworm-slim
RUN apt-get update && \
diff --git a/ci/docker/fasttest/Dockerfile b/ci/docker/fasttest/Dockerfile
index 9ec9a6c1b352..4782a5a394d7 100644
--- a/ci/docker/fasttest/Dockerfile
+++ b/ci/docker/fasttest/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/fasttest .
+# docker build -t altinityinfra/fasttest .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
@@ -74,7 +74,7 @@ RUN apt-get update \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
# Note, libmpfr6 is also a requirement for gdb
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb
# Give suid to gdb to grant it attach permissions
RUN chmod u+s /opt/gdb/bin/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
diff --git a/ci/docker/fuzzer/Dockerfile b/ci/docker/fuzzer/Dockerfile
index 303dfc59fb5f..1cedc0437a03 100644
--- a/ci/docker/fuzzer/Dockerfile
+++ b/ci/docker/fuzzer/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/fuzzer .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV LANG=C.UTF-8
diff --git a/ci/docker/integration/arrowflight/Dockerfile b/ci/docker/integration/arrowflight/Dockerfile
index 0efaf07cad5f..6a26f07a05dd 100644
--- a/ci/docker/integration/arrowflight/Dockerfile
+++ b/ci/docker/integration/arrowflight/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/arrowflight-server-test .
+# docker build -t altinityinfra/arrowflight-server-test .
FROM python:3.9-slim
ENV PYTHONDONTWRITEBYTECODE=1
diff --git a/ci/docker/integration/base/Dockerfile b/ci/docker/integration/base/Dockerfile
index 29af698b293c..4260bf6c85dd 100644
--- a/ci/docker/integration/base/Dockerfile
+++ b/ci/docker/integration/base/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
-# docker build -t clickhouse/integration-test .
+# docker build -t altinityinfra/integration-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
SHELL ["/bin/bash", "-c"]
@@ -73,10 +73,10 @@ maxClientCnxns=80' > /opt/zookeeper/conf/zoo.cfg && \
ENV TZ=Etc/UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 \
/opt/openssl-fips/openssl.cnf \
/opt/openssl-fips/fipsmodule.cnf \
/opt/openssl-fips/fips.so \
diff --git a/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile b/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile
index 40d107d0c28b..9337e28926a2 100644
--- a/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile
+++ b/ci/docker/integration/clickhouse_with_hms_catalog/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/integration-test-with-hms .
+# docker build -t altinityinfra/integration-test-with-hms .
ARG FROM_TAG=latest
FROM openjdk:8-jre-slim AS build
diff --git a/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile b/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile
index f711d7258a9e..526b9e4f30fa 100644
--- a/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile
+++ b/ci/docker/integration/clickhouse_with_unity_catalog/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/integration-test-with-unity-catalog .
+# docker build -t altinityinfra/integration-test-with-unity-catalog .
ARG FROM_TAG=latest
FROM clickhouse/integration-test:$FROM_TAG
diff --git a/ci/docker/integration/helper_container/Dockerfile b/ci/docker/integration/helper_container/Dockerfile
index 1084d087e53b..81d658705836 100644
--- a/ci/docker/integration/helper_container/Dockerfile
+++ b/ci/docker/integration/helper_container/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/integration-helper .
+# docker build -t altinityinfra/integration-helper .
# Helper docker container to run iptables without sudo
FROM alpine:3.18
diff --git a/ci/docker/integration/kerberos_kdc/Dockerfile b/ci/docker/integration/kerberos_kdc/Dockerfile
index a203c33a3313..a7f989bf4a56 100644
--- a/ci/docker/integration/kerberos_kdc/Dockerfile
+++ b/ci/docker/integration/kerberos_kdc/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/kerberos-kdc .
+# docker build -t altinityinfra/kerberos-kdc .
FROM centos:6
RUN sed -i '/^mirrorlist/s/^/#/;/^#baseurl/{s/#//;s/mirror.centos.org\/centos\/$releasever/vault.centos.org\/6.10/}' /etc/yum.repos.d/*B*
diff --git a/ci/docker/integration/mysql_dotnet_client/Dockerfile b/ci/docker/integration/mysql_dotnet_client/Dockerfile
index 92d0c6ae585e..d1e38db65613 100644
--- a/ci/docker/integration/mysql_dotnet_client/Dockerfile
+++ b/ci/docker/integration/mysql_dotnet_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql_dotnet_client .
+# docker build -t altinityinfra/mysql_dotnet_client .
FROM ubuntu:22.04
diff --git a/ci/docker/integration/mysql_golang_client/Dockerfile b/ci/docker/integration/mysql_golang_client/Dockerfile
index 5281f786ae2d..52be68126e47 100644
--- a/ci/docker/integration/mysql_golang_client/Dockerfile
+++ b/ci/docker/integration/mysql_golang_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-golang-client .
+# docker build -t altinityinfra/mysql-golang-client .
# MySQL golang client docker container
FROM golang:1.17
diff --git a/ci/docker/integration/mysql_java_client/Dockerfile b/ci/docker/integration/mysql_java_client/Dockerfile
index 38fefac070e7..5826ee77d501 100644
--- a/ci/docker/integration/mysql_java_client/Dockerfile
+++ b/ci/docker/integration/mysql_java_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-java-client .
+# docker build -t altinityinfra/mysql-java-client .
# MySQL Java client docker container
FROM openjdk:8-jdk-alpine
diff --git a/ci/docker/integration/mysql_js_client/Dockerfile b/ci/docker/integration/mysql_js_client/Dockerfile
index 4c9df10ace1c..2b821f243234 100644
--- a/ci/docker/integration/mysql_js_client/Dockerfile
+++ b/ci/docker/integration/mysql_js_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-js-client .
+# docker build -t altinityinfra/mysql-js-client .
# MySQL JavaScript client docker container
FROM node:16.14.2
diff --git a/ci/docker/integration/mysql_php_client/Dockerfile b/ci/docker/integration/mysql_php_client/Dockerfile
index 0e11ae023e63..b060e93f70a3 100644
--- a/ci/docker/integration/mysql_php_client/Dockerfile
+++ b/ci/docker/integration/mysql_php_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/mysql-php-client .
+# docker build -t altinityinfra/mysql-php-client .
# MySQL PHP client docker container
FROM php:8-cli-alpine
diff --git a/ci/docker/integration/postgresql_java_client/Dockerfile b/ci/docker/integration/postgresql_java_client/Dockerfile
index c5583085ef37..5a7458cc1d2f 100644
--- a/ci/docker/integration/postgresql_java_client/Dockerfile
+++ b/ci/docker/integration/postgresql_java_client/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/postgresql-java-client .
+# docker build -t altinityinfra/postgresql-java-client .
# PostgreSQL Java client docker container
FROM ubuntu:18.04
diff --git a/ci/docker/integration/resolver/Dockerfile b/ci/docker/integration/resolver/Dockerfile
index 423faf835ae1..1f639bb2793d 100644
--- a/ci/docker/integration/resolver/Dockerfile
+++ b/ci/docker/integration/resolver/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/python-bottle .
+# docker build -t altinityinfra/python-bottle .
# Helper docker container to run python bottle apps
# python cgi module is dropped in 3.13 - pin to 3.12
diff --git a/ci/docker/integration/runner/Dockerfile b/ci/docker/integration/runner/Dockerfile
index 91aa33c06ab9..4b3f8b290d71 100644
--- a/ci/docker/integration/runner/Dockerfile
+++ b/ci/docker/integration/runner/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/integration-tests-runner .
+# docker build -t altinityinfra/integration-tests-runner .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
@@ -90,10 +90,10 @@ COPY modprobe.sh /usr/local/bin/modprobe
COPY dockerd-entrypoint.sh /usr/local/bin/
COPY misc/ /misc/
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 \
/opt/openssl-fips/openssl.cnf \
/opt/openssl-fips/fipsmodule.cnf \
/opt/openssl-fips/fips.so \
diff --git a/ci/docker/integration/runner/dockerd-entrypoint.sh b/ci/docker/integration/runner/dockerd-entrypoint.sh
index f4501d23ca48..607d1c13bdf5 100755
--- a/ci/docker/integration/runner/dockerd-entrypoint.sh
+++ b/ci/docker/integration/runner/dockerd-entrypoint.sh
@@ -8,8 +8,8 @@ echo '{
"ip-forward": true,
"log-level": "debug",
"storage-driver": "overlay2",
- "insecure-registries" : ["dockerhub-proxy.dockerhub-proxy-zone:5000"],
- "registry-mirrors" : ["http://dockerhub-proxy.dockerhub-proxy-zone:5000"]
+ "insecure-registries" : ["65.108.242.32:5000"],
+ "registry-mirrors" : ["http://65.108.242.32:5000"]
}' | dd of=/etc/docker/daemon.json 2>/dev/null
if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
diff --git a/ci/docker/integration/s3_proxy/Dockerfile b/ci/docker/integration/s3_proxy/Dockerfile
index 5858218e4e4c..df8d8f00f216 100644
--- a/ci/docker/integration/s3_proxy/Dockerfile
+++ b/ci/docker/integration/s3_proxy/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/s3-proxy .
+# docker build -t altinityinfra/s3-proxy .
FROM nginx:alpine
COPY run.sh /run.sh
diff --git a/ci/docker/keeper-jepsen-test/Dockerfile b/ci/docker/keeper-jepsen-test/Dockerfile
index 3c5d0a6ecb42..6633d81193d5 100644
--- a/ci/docker/keeper-jepsen-test/Dockerfile
+++ b/ci/docker/keeper-jepsen-test/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/keeper-jepsen-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814
diff --git a/ci/docker/libfuzzer/Dockerfile b/ci/docker/libfuzzer/Dockerfile
index 26201e81def2..0da78bee9a67 100644
--- a/ci/docker/libfuzzer/Dockerfile
+++ b/ci/docker/libfuzzer/Dockerfile
@@ -1,6 +1,6 @@
# docker build -t clickhouse/libfuzzer .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
# ARG for quick switch to a given ubuntu mirror
ARG apt_archive="http://archive.ubuntu.com"
diff --git a/ci/docker/performance-comparison/Dockerfile b/ci/docker/performance-comparison/Dockerfile
index 3a0794eab35f..0e853677c3bb 100644
--- a/ci/docker/performance-comparison/Dockerfile
+++ b/ci/docker/performance-comparison/Dockerfile
@@ -1,7 +1,7 @@
# docker build -t clickhouse/performance-comparison .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
RUN apt-get update \
&& DEBIAN_FRONTEND=noninteractive apt-get install --yes --no-install-recommends \
@@ -42,7 +42,7 @@ RUN pip3 --no-cache-dir install -r requirements.txt
COPY run.sh /
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
# aws cli to acquire secrets and params from ssm
diff --git a/ci/docker/server-jepsen-test/Dockerfile b/ci/docker/server-jepsen-test/Dockerfile
index fd70fc457020..54a4626e2892 100644
--- a/ci/docker/server-jepsen-test/Dockerfile
+++ b/ci/docker/server-jepsen-test/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/server-jepsen-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ENV DEBIAN_FRONTEND=noninteractive
ENV CLOJURE_VERSION=1.10.3.814
diff --git a/ci/docker/sqlancer-test/Dockerfile b/ci/docker/sqlancer-test/Dockerfile
index 2aa5aba9788d..3c5cea2ef7e0 100644
--- a/ci/docker/sqlancer-test/Dockerfile
+++ b/ci/docker/sqlancer-test/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/sqlancer-test .
+# docker build -t altinityinfra/sqlancer-test .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
diff --git a/ci/docker/stateless-test/Dockerfile b/ci/docker/stateless-test/Dockerfile
index 108bf19bab6b..cf3491b826db 100644
--- a/ci/docker/stateless-test/Dockerfile
+++ b/ci/docker/stateless-test/Dockerfile
@@ -1,6 +1,6 @@
-# docker build -t clickhouse/stateless-test .
+# docker build -t altinityinfra/stateless-test .
ARG FROM_TAG=latest
-FROM clickhouse/test-base:$FROM_TAG
+FROM altinityinfra/test-base:$FROM_TAG
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz"
@@ -111,10 +111,10 @@ ENV PYTHONPATH=".:./ci"
# A directory for cache
RUN mkdir /dev/shm/clickhouse
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 \
/opt/openssl-fips/openssl.cnf \
/opt/openssl-fips/fipsmodule.cnf \
/opt/openssl-fips/fips.so \
diff --git a/ci/docker/stress-test/Dockerfile b/ci/docker/stress-test/Dockerfile
index 866480f27a8b..66f41f3cdea5 100644
--- a/ci/docker/stress-test/Dockerfile
+++ b/ci/docker/stress-test/Dockerfile
@@ -1,7 +1,7 @@
# rebuild in #33610
# docker build -t clickhouse/stress-test .
ARG FROM_TAG=latest
-FROM clickhouse/stateless-test:$FROM_TAG
+FROM altinityinfra/stateless-test:$FROM_TAG
RUN apt-get update -y \
&& env DEBIAN_FRONTEND=noninteractive \
diff --git a/ci/docker/style-test/Dockerfile b/ci/docker/style-test/Dockerfile
index 9c83329c0db7..7bb4e502cc4b 100644
--- a/ci/docker/style-test/Dockerfile
+++ b/ci/docker/style-test/Dockerfile
@@ -1,4 +1,4 @@
-# docker build -t clickhouse/style-test .
+# docker build -t altinityinfra/style-test .
FROM ubuntu:22.04
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
diff --git a/ci/docker/test-base/Dockerfile b/ci/docker/test-base/Dockerfile
index 6bccbe7f918f..205fdb136f06 100644
--- a/ci/docker/test-base/Dockerfile
+++ b/ci/docker/test-base/Dockerfile
@@ -1,5 +1,5 @@
# rebuild in #33610
-# docker build -t clickhouse/test-base .
+# docker build -t altinityinfra/test-base .
FROM ubuntu:22.04
# ARG for quick switch to a given ubuntu mirror
@@ -74,10 +74,10 @@ RUN apt-get update \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
# Note, libmpfr6 is also a requirement for gdb
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad /opt/gdb /opt/gdb
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 /opt/gdb /opt/gdb
ENV PATH="/opt/gdb/bin:${PATH}"
-COPY --from=clickhouse/cctools:859fb360308eb8ac47ad \
+COPY --from=altinityinfra/cctools:7c093c1cde57d744be57 \
/opt/openssl-fips/openssl.cnf \
/opt/openssl-fips/fipsmodule.cnf \
/opt/openssl-fips/fips.so \
diff --git a/ci/jobs/build_clickhouse.py b/ci/jobs/build_clickhouse.py
index e334f383a15c..18dc461f5efd 100644
--- a/ci/jobs/build_clickhouse.py
+++ b/ci/jobs/build_clickhouse.py
@@ -20,12 +20,12 @@
BuildTypes.AMD_TSAN: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE=thread -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=1 -DENABLE_LEXER_TEST=1 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON",
BuildTypes.AMD_MSAN: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE=memory -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=1 -DENABLE_LEXER_TEST=1 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON",
BuildTypes.AMD_UBSAN: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE=undefined -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=1 -DENABLE_LEXER_TEST=1 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON",
- BuildTypes.ARM_RELEASE: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=1 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=0 -DENABLE_LEXER_TEST=0 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1",
+ BuildTypes.ARM_RELEASE: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=1 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=0 -DENABLE_LEXER_TEST=0 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON -DSPLIT_DEBUG_SYMBOLS=ON -DBUILD_STANDALONE_KEEPER=1 -DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/linux/toolchain-aarch64.cmake",
BuildTypes.ARM_ASAN: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE=address -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=1 -DENABLE_LEXER_TEST=1 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON -DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/linux/toolchain-aarch64.cmake",
- BuildTypes.ARM_COVERAGE: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=0 -DENABLE_LEXER_TEST=0 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON -DSANITIZE_COVERAGE=1",
- BuildTypes.ARM_BINARY: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=0 -DENABLE_LEXER_TEST=0 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON",
+ BuildTypes.ARM_COVERAGE: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=0 -DENABLE_LEXER_TEST=0 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_SYSCONFDIR=/etc -DCMAKE_INSTALL_LOCALSTATEDIR=/var -DCMAKE_SKIP_INSTALL_ALL_DEPENDENCY=ON -DSANITIZE_COVERAGE=1 -DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/linux/toolchain-aarch64.cmake",
+ BuildTypes.ARM_BINARY: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=0 -DENABLE_LEXER_TEST=0 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/linux/toolchain-aarch64.cmake",
BuildTypes.AMD_TIDY: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=Debug -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=0 -DENABLE_TESTS=1 -DENABLE_LEXER_TEST=1 -DENABLE_UTILS=1 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DENABLE_CLANG_TIDY=1 -DENABLE_EXAMPLES=1 -DENABLE_BUZZHOUSE=1",
- BuildTypes.ARM_TIDY: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=Debug -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=0 -DENABLE_TESTS=1 -DENABLE_LEXER_TEST=1 -DENABLE_UTILS=1 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DENABLE_CLANG_TIDY=1 -DENABLE_EXAMPLES=1 -DENABLE_BUZZHOUSE=1",
+ BuildTypes.ARM_TIDY: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=Debug -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=0 -DENABLE_TESTS=1 -DENABLE_LEXER_TEST=1 -DENABLE_UTILS=1 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DENABLE_CLANG_TIDY=1 -DENABLE_EXAMPLES=1 -DENABLE_BUZZHOUSE=1 -DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/linux/toolchain-aarch64.cmake",
BuildTypes.AMD_DARWIN: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=0 -DENABLE_LEXER_TEST=0 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/darwin/toolchain-x86_64.cmake -DCMAKE_AR:FILEPATH=/cctools/bin/x86_64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/x86_64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=/cctools/bin/x86_64-apple-darwin-ranlib -DLINKER_NAME=/cctools/bin/x86_64-apple-darwin-ld",
BuildTypes.ARM_DARWIN: f" cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=0 -DENABLE_LEXER_TEST=0 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/darwin/toolchain-aarch64.cmake -DCMAKE_AR:FILEPATH=/cctools/bin/aarch64-apple-darwin-ar -DCMAKE_INSTALL_NAME_TOOL=/cctools/bin/aarch64-apple-darwin-install_name_tool -DCMAKE_RANLIB:FILEPATH=/cctools/bin/aarch64-apple-darwin-ranlib -DLINKER_NAME=/cctools/bin/aarch64-apple-darwin-ld",
BuildTypes.ARM_V80COMPAT: f"cmake --debug-trycompile -DCMAKE_VERBOSE_MAKEFILE=1 -LA -DCMAKE_BUILD_TYPE=None -DENABLE_THINLTO=0 -DSANITIZE= -DENABLE_CHECK_HEAVY_BUILDS=1 -DBUILD_STRIPPED_BINARY=1 -DENABLE_CLICKHOUSE_SELF_EXTRACTING=1 -DCMAKE_C_COMPILER={ToolSet.COMPILER_C} -DCMAKE_CXX_COMPILER={ToolSet.COMPILER_CPP} -DCOMPILER_CACHE=sccache -DENABLE_BUILD_PROFILING=1 -DENABLE_TESTS=0 -DENABLE_LEXER_TEST=0 -DENABLE_UTILS=0 -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=ON -DCMAKE_TOOLCHAIN_FILE={current_directory}/cmake/linux/toolchain-aarch64.cmake -DNO_ARMV81_OR_HIGHER=1",
@@ -141,10 +141,13 @@ def do_checkout():
)
res = results[-1].is_ok()
- if info.pr_number == 0 and info.is_push_event:
- version_dict = info.get_kv_data("version")
- else:
- version_dict = CHVersion.get_current_version_as_dict()
+ # NOTE(vnemkov): always getting pre-calcutated version from custom_data
+ version_dict = info.get_kv_data("version")
+
+ # if info.pr_number == 0 and info.is_push_event:
+ # version_dict = info.get_kv_data("version")
+ # else:
+ # version_dict = CHVersion.get_current_version_as_dict()
if res and JobStages.CMAKE in stages:
assert version_dict, "Failed to determine build version"
@@ -188,6 +191,8 @@ def do_checkout():
targets = "fuzzers"
elif build_type in (BuildTypes.AMD_TIDY, BuildTypes.ARM_TIDY):
targets = "-k0 all"
+ elif build_type in (BuildTypes.AMD_RELEASE, BuildTypes.ARM_RELEASE):
+ targets = "clickhouse-bundle clickhouse-stripped"
else:
targets = "clickhouse-bundle"
results.append(
diff --git a/ci/jobs/clickbench.py b/ci/jobs/clickbench.py
index 97492b427f6e..714fb0bd41ba 100644
--- a/ci/jobs/clickbench.py
+++ b/ci/jobs/clickbench.py
@@ -19,7 +19,7 @@ def install():
res = ch.install_clickbench_config()
if Info().is_local_run:
return res
- return res and ch.create_log_export_config()
+ return res # and ch.create_log_export_config()
results.append(
Result.from_commands_run(name="Install ClickHouse", command=install)
@@ -33,7 +33,7 @@ def start():
res = ch.start_light()
if Info().is_local_run:
return res
- return res and ch.start_log_exports(check_start_time=stop_watch.start_time)
+ return res # and ch.start_log_exports(check_start_time=stop_watch.start_time)
results.append(
Result.from_commands_run(
diff --git a/ci/jobs/fast_test.py b/ci/jobs/fast_test.py
index edf59fee9971..860a14f6ca02 100644
--- a/ci/jobs/fast_test.py
+++ b/ci/jobs/fast_test.py
@@ -246,7 +246,9 @@ def main():
print(step_name)
res = res and CH.run_fast_test(test=args.test or "")
if res:
- results.append(FTResultsProcessor(wd=Settings.OUTPUT_DIR).run())
+ results.append(
+ FTResultsProcessor(wd=Settings.OUTPUT_DIR, test_options=["fast"]).run()
+ )
results[-1].set_timing(stopwatch=stop_watch_)
else:
results.append(
diff --git a/ci/jobs/functional_tests.py b/ci/jobs/functional_tests.py
index 84a13960318f..02d148e2bd5f 100644
--- a/ci/jobs/functional_tests.py
+++ b/ci/jobs/functional_tests.py
@@ -121,6 +121,7 @@ def run_specific_tests(tests, runs=1, extra_args=""):
"azure": " --azure-blob-storage --no-random-settings --no-random-merge-tree-settings", # azurite is slow, with randomization it can be super slow
"parallel": "--no-sequential",
"sequential": "--no-parallel",
+ "amd_tsan": " --timeout 1200", # NOTE (strtgbb): tsan is slow, increase the timeout to avoid timeout errors
}
@@ -191,10 +192,12 @@ def main():
if not info.is_local_run:
# TODO: find a way to work with Azure secret so it's ok for local tests as well, for now keep azure disabled
- os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output(
- f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value",
- verbose=True,
- )
+ # os.environ["AZURE_CONNECTION_STRING"] = Shell.get_output(
+ # f"aws ssm get-parameter --region us-east-1 --name azure_connection_string --with-decryption --output text --query Parameter.Value",
+ # verbose=True,
+ # )
+ # NOTE(strtgbb): We pass azure credentials through the docker command, not SSM.
+ pass
else:
print("Disable azure for a local run")
config_installs_args += " --no-azure"
@@ -247,12 +250,13 @@ def main():
if res and JobStages.INSTALL_CLICKHOUSE in stages:
- def configure_log_export():
- if not info.is_local_run:
- print("prepare log export config")
- return CH.create_log_export_config()
- else:
- print("skip log export config for local run")
+ # NOTE (strtgbb): Disable log export throughout this file, it depends on aws ssm, which we don't have configured
+ # def configure_log_export():
+ # if not info.is_local_run:
+ # print("prepare log export config")
+ # return CH.create_log_export_config()
+ # else:
+ # print("skip log export config for local run")
commands = [
f"chmod +x {ch_path}/clickhouse",
@@ -298,7 +302,7 @@ def configure_log_export():
f"prof_active:true,prof_prefix:{temp_dir}/jemalloc_profiles/clickhouse.jemalloc"
)
- commands.append(configure_log_export)
+ # commands.append(configure_log_export)
results.append(
Result.from_commands_run(name="Install ClickHouse", command=commands)
@@ -367,9 +371,9 @@ def start():
tests=tests, runs=50 if is_flaky_check else 1, extra_args=runner_options
)
- if not info.is_local_run:
- CH.stop_log_exports()
- ft_res_processor = FTResultsProcessor(wd=temp_dir)
+ # if not info.is_local_run:
+ # CH.stop_log_exports()
+ ft_res_processor = FTResultsProcessor(wd=temp_dir, test_options=test_options)
results.append(ft_res_processor.run())
debug_files += ft_res_processor.debug_files
test_result = results[-1]
diff --git a/ci/jobs/install_check.py b/ci/jobs/install_check.py
index 3a7c1d348330..7abf1d2166fa 100644
--- a/ci/jobs/install_check.py
+++ b/ci/jobs/install_check.py
@@ -6,8 +6,8 @@
from ci.praktika.result import Result
from ci.praktika.utils import Shell, Utils
-RPM_IMAGE = "clickhouse/install-rpm-test"
-DEB_IMAGE = "clickhouse/install-deb-test"
+RPM_IMAGE = "altinityinfra/install-rpm-test"
+DEB_IMAGE = "altinityinfra/install-deb-test"
REPO_PATH = Utils.cwd()
TEMP_PATH = Path(f"{REPO_PATH}/ci/tmp/")
@@ -20,7 +20,10 @@ def prepare_test_scripts():
echo "$test_env" >> /etc/default/clickhouse
systemctl restart clickhouse-server
clickhouse-client -q 'SELECT version()'
-grep "$test_env" /proc/$(cat /var/run/clickhouse-server/clickhouse-server.pid)/environ"""
+grep "$test_env" /proc/$(cat /var/run/clickhouse-server/clickhouse-server.pid)/environ
+echo "Check Stacktrace"
+output=$(clickhouse-local --stacktrace --query="SELECT throwIf(1,'throw')" 2>&1 >/dev/null || true)
+echo "$output" | grep 'FunctionThrowIf::executeImpl'"""
initd_test = r"""#!/bin/bash
set -e
trap "bash -ex /packages/preserve_logs.sh" ERR
diff --git a/ci/jobs/integration_test_check.py b/ci/jobs/integration_test_check.py
index 824df6b4e411..2bb9c190d5e7 100644
--- a/ci/jobs/integration_test_check.py
+++ b/ci/jobs/integration_test_check.py
@@ -273,7 +273,11 @@ def main():
logging.info(
"ENV parameters for runner:\n%s",
"\n".join(
- [f"{k}={v}" for k, v in my_env.items() if k.startswith("CLICKHOUSE_")]
+ [
+ f"{k}={v}"
+ for k, v in my_env.items()
+ if k.startswith("CLICKHOUSE_") and "TEST_STAT" not in k
+ ]
),
)
diff --git a/ci/jobs/scripts/clickhouse_proc.py b/ci/jobs/scripts/clickhouse_proc.py
index 5b41ba8b5e38..f1983e8a7147 100644
--- a/ci/jobs/scripts/clickhouse_proc.py
+++ b/ci/jobs/scripts/clickhouse_proc.py
@@ -621,16 +621,17 @@ def run_fast_test(self, test=""):
return exit_code == 0
def terminate(self):
- if self.minio_proc:
- # remove the webhook so it doesn't spam with errors once we stop ClickHouse
- Shell.check(
- "/mc admin config reset clickminio logger_webhook:ch_server_webhook",
- verbose=True,
- )
- Shell.check(
- "/mc admin config reset clickminio audit_webhook:ch_audit_webhook",
- verbose=True,
- )
+ # NOTE (strtgbb): Log tables are disabled, we don't use them
+ # if self.minio_proc:
+ # # remove the webhook so it doesn't spam with errors once we stop ClickHouse
+ # Shell.check(
+ # "/mc admin config reset clickminio logger_webhook:ch_server_webhook",
+ # verbose=True,
+ # )
+ # Shell.check(
+ # "/mc admin config reset clickminio audit_webhook:ch_audit_webhook",
+ # verbose=True,
+ # )
self._flush_system_logs()
print("Terminate ClickHouse processes")
@@ -938,8 +939,8 @@ def dump_system_tables(self):
"error_log",
"query_metric_log",
"part_log",
- "minio_audit_logs",
- "minio_server_logs",
+ # "minio_audit_logs", # NOTE (strtgbb): we do not use these logs
+ # "minio_server_logs",
]
command_args = self.LOGS_SAVER_CLIENT_OPTIONS
diff --git a/ci/jobs/scripts/clickhouse_version.py b/ci/jobs/scripts/clickhouse_version.py
index f6f403fe7352..9b68d687e4a3 100644
--- a/ci/jobs/scripts/clickhouse_version.py
+++ b/ci/jobs/scripts/clickhouse_version.py
@@ -1,9 +1,20 @@
import re
+import sys
+import os
from pathlib import Path
from praktika.info import Info
from praktika.utils import Shell
+# NOTE(vnemkov): extremely hackish, buts allows to reuse code from version_helper and git_helper with our modifications.
+
+# allow to import other packages that are located in `tests/ci` directory, like `git_helper`
+import tests.ci
+sys.path.append(os.path.abspath(tests.ci.__path__._path[0]))
+from tests.ci.version_helper import (
+ read_versions,
+ get_version_from_repo
+)
class CHVersion:
FILE_WITH_VERSION_PATH = "./cmake/autogenerated_versions.txt"
@@ -15,42 +26,21 @@ class CHVersion:
SET(VERSION_MINOR {minor})
SET(VERSION_PATCH {patch})
SET(VERSION_GITHASH {githash})
+SET(VERSION_TWEAK {tweak})
+SET(VERSION_FLAVOUR {flavour})
SET(VERSION_DESCRIBE {describe})
SET(VERSION_STRING {string})
"""
@classmethod
def get_release_version_as_dict(cls):
- versions = {}
- for line in (
- Path(cls.FILE_WITH_VERSION_PATH).read_text(encoding="utf-8").splitlines()
- ):
- line = line.strip()
- if not line.startswith("SET("):
- continue
-
- name, value = line[4:-1].split(maxsplit=1)
- name = name.removeprefix("VERSION_").lower()
- if name in ("major", "minor", "patch"):
- value = int(value)
- versions[name] = value
-
- result = {
- "major": versions["major"],
- "minor": versions["minor"],
- "patch": versions["patch"],
- "revision": versions["revision"],
- "githash": versions["githash"],
- "describe": versions["describe"],
- "string": versions["string"],
- }
- return result
+ return read_versions()
@classmethod
def get_current_version_as_dict(cls):
- version = cls.get_release_version_as_dict()
- info = Info()
+ version_from_file = read_versions()
try:
+ version = version_from_file
tweak = int(
Shell.get_output(
f"git rev-list --count {version['githash']}..HEAD", verbose=True
@@ -58,24 +48,19 @@ def get_current_version_as_dict(cls):
)
except ValueError:
# Shallow checkout
- tweak = 1
- version_type = "testing"
- if info.pr_number == 0 and bool(
- re.match(r"^\d{2}\.\d+$", info.git_branch.removeprefix("release/"))
- ):
- if version["minor"] % 5 == 3:
- version_type = "lts"
- else:
- version_type = "stable"
- version_string = (
- f'{version["major"]}.{version["minor"]}.{version["patch"]}.{tweak}'
- )
- version_description = f"v{version_string}-{version_type}"
- version["githash"] = info.sha
- version["tweak"] = tweak
- version["describe"] = version_description
- version["string"] = version_string
- return version
+ tweak = 0
+
+ version = get_version_from_repo()
+ version.tweak += tweak
+
+ # relying on ClickHouseVersion to generate proper `description` and `string` with updated `tweak`` value.
+ version = version.with_description(version.flavour)
+ version_dict = version.as_dict()
+
+ # preserve githash, not sure if that is goign to be usefull, but mimics original implementation
+ version_dict['githash'] = version_from_file['githash']
+
+ return version_dict
@classmethod
def get_version(cls):
diff --git a/ci/jobs/scripts/functional_tests_results.py b/ci/jobs/scripts/functional_tests_results.py
index 254991cc5ea4..eb62b5089683 100755
--- a/ci/jobs/scripts/functional_tests_results.py
+++ b/ci/jobs/scripts/functional_tests_results.py
@@ -1,6 +1,11 @@
import dataclasses
+import json
+import os
import traceback
from typing import List
+import re
+
+import yaml
from praktika.result import Result
@@ -28,6 +33,101 @@
# out.writerow(status)
+def get_broken_tests_rules() -> dict:
+ broken_tests_file_path = "tests/broken_tests.yaml"
+ if (
+ not os.path.isfile(broken_tests_file_path)
+ or os.path.getsize(broken_tests_file_path) == 0
+ ):
+ raise ValueError(
+ "There is something wrong with getting broken tests rules: "
+ f"file '{broken_tests_file_path}' is empty or does not exist."
+ )
+
+ with open(broken_tests_file_path, "r", encoding="utf-8") as broken_tests_file:
+ broken_tests = yaml.safe_load(broken_tests_file)
+
+ compiled_rules = {"exact": {}, "pattern": {}}
+
+ for test in broken_tests:
+ regex = test.get("regex") is True
+ rule = {
+ "reason": test["reason"],
+ }
+
+ if test.get("message"):
+ rule["message"] = re.compile(test["message"]) if regex else test["message"]
+
+ if test.get("not_message"):
+ rule["not_message"] = (
+ re.compile(test["not_message"]) if regex else test["not_message"]
+ )
+ if test.get("check_types"):
+ rule["check_types"] = test["check_types"]
+
+ if regex:
+ rule["regex"] = True
+ compiled_rules["pattern"][re.compile(test["name"])] = rule
+ else:
+ compiled_rules["exact"][test["name"]] = rule
+
+ print(
+ f"INFO: Compiled {len(compiled_rules['exact'])} exact rules and {len(compiled_rules['pattern'])} pattern rules"
+ )
+
+ return compiled_rules
+
+
+def test_is_known_fail(test_name, test_logs, known_broken_tests, test_options_string):
+ matching_rules = []
+
+ print(f"Checking known broken tests for failed test: {test_name}")
+ print("Potential matching rules:")
+ exact_rule = known_broken_tests["exact"].get(test_name)
+ if exact_rule:
+ print(f"{test_name} - {exact_rule}")
+ matching_rules.append(exact_rule)
+
+ for name_re, data in known_broken_tests["pattern"].items():
+ if name_re.fullmatch(test_name):
+ print(f"{name_re} - {data}")
+ matching_rules.append(data)
+
+ if not matching_rules:
+ return False
+
+ def matches_substring(substring, log, is_regex):
+ if log is None:
+ return False
+ if is_regex:
+ return bool(substring.search(log))
+ return substring in log
+
+ for rule_data in matching_rules:
+ if rule_data.get("check_types") and not any(
+ ct in test_options_string for ct in rule_data["check_types"]
+ ):
+ print(
+ f"Check types didn't match: '{rule_data['check_types']}' not in '{test_options_string}'"
+ )
+ continue # check_types didn't match → skip rule
+
+ is_regex = rule_data.get("regex", False)
+ not_message = rule_data.get("not_message")
+ if not_message and matches_substring(not_message, test_logs, is_regex):
+ print(f"Skip rule: Not message matched: '{rule_data['not_message']}'")
+ continue # not_message matched → skip rule
+ message = rule_data.get("message")
+ if message and not matches_substring(message, test_logs, is_regex):
+ print(f"Skip rule: Message didn't match: '{rule_data['message']}'")
+ continue
+
+ print(f"Test {test_name} matched rule: {rule_data}")
+ return rule_data["reason"]
+
+ return False
+
+
class FTResultsProcessor:
@dataclasses.dataclass
class Summary:
@@ -36,6 +136,7 @@ class Summary:
unknown: int
failed: int
success: int
+ broken: int
test_results: List[Result]
hung: bool = False
server_died: bool = False
@@ -43,9 +144,10 @@ class Summary:
success_finish: bool = False
test_end: bool = True
- def __init__(self, wd):
+ def __init__(self, wd, test_options):
self.tests_output_file = f"{wd}/test_result.txt"
self.debug_files = []
+ self.test_options = test_options
def _process_test_output(self):
total = 0
@@ -53,6 +155,7 @@ def _process_test_output(self):
unknown = 0
failed = 0
success = 0
+ broken = 0
hung = False
server_died = False
retries = False
@@ -60,6 +163,8 @@ def _process_test_output(self):
test_results = []
test_end = True
+ known_broken_tests = get_broken_tests_rules()
+
with open(self.tests_output_file, "r", encoding="utf-8") as test_file:
for line in test_file:
original_line = line
@@ -128,6 +233,8 @@ def _process_test_output(self):
if DATABASE_SIGN in line:
test_end = True
+ test_options_string = ", ".join(self.test_options)
+
test_results_ = []
for test in test_results:
try:
@@ -140,6 +247,22 @@ def _process_test_output(self):
info="".join(test[3])[:16384],
)
)
+
+ if test[1] == "FAIL":
+ broken_message = test_is_known_fail(
+ test[0],
+ test_results_[-1].info,
+ known_broken_tests,
+ test_options_string,
+ )
+
+ if broken_message:
+ broken += 1
+ failed -= 1
+ test_results_[-1].set_status(Result.StatusExtended.BROKEN)
+ test_results_[-1].set_label(Result.Label.BROKEN)
+ test_results_[-1].info += "\nMarked as broken: " + broken_message
+
except Exception as e:
print(f"ERROR: Failed to parse test results: {test}")
traceback.print_exc()
@@ -165,6 +288,7 @@ def _process_test_output(self):
unknown=unknown,
failed=failed,
success=success,
+ broken=broken,
test_results=test_results,
hung=hung,
server_died=server_died,
@@ -232,7 +356,7 @@ def run(self):
pass
if not info:
- info = f"Failed: {s.failed}, Passed: {s.success}, Skipped: {s.skipped}"
+ info = f"Failed: {s.failed}, Passed: {s.success}, Skipped: {s.skipped}, Broken: {s.broken}"
result = Result.create_from(
name="Tests",
diff --git a/ci/jobs/scripts/fuzzer/run-fuzzer.sh b/ci/jobs/scripts/fuzzer/run-fuzzer.sh
index e8989e227420..91bc5827d847 100755
--- a/ci/jobs/scripts/fuzzer/run-fuzzer.sh
+++ b/ci/jobs/scripts/fuzzer/run-fuzzer.sh
@@ -58,8 +58,8 @@ EOL
$PWD
EOL
-
- (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_config) || { echo "Failed to create log export config"; exit 1; }
+ # NOTE (strtgbb): Log tables are disabled, we don't use them
+ # (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_config) || { echo "Failed to create log export config"; exit 1; }
}
function filter_exists_and_template
@@ -183,7 +183,8 @@ function fuzz
echo 'Server started and responded.'
- (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_start) || { echo "Failed to start log exports"; exit 1; }
+ # NOTE (strtgbb): Log tables are disabled, we don't use them
+ # (cd $repo_dir && python3 $repo_dir/ci/jobs/scripts/clickhouse_proc.py logs_export_start) || { echo "Failed to start log exports"; exit 1; }
# Setup arguments for the fuzzer
FUZZER_OUTPUT_SQL_FILE=''
diff --git a/ci/jobs/scripts/integration_tests_runner.py b/ci/jobs/scripts/integration_tests_runner.py
index 10771657ba5d..f3ad2a8d7db6 100755
--- a/ci/jobs/scripts/integration_tests_runner.py
+++ b/ci/jobs/scripts/integration_tests_runner.py
@@ -1,3 +1,5 @@
+#!/usr/bin/env python3
+
import csv
import glob
import heapq
@@ -11,10 +13,11 @@
import signal
import subprocess
import time
+from functools import lru_cache
from collections import OrderedDict, defaultdict
from itertools import chain
from statistics import median
-from typing import Any, Dict, Final, List, Optional, Set, Tuple
+from typing import Any, Dict, Final, List, Optional, Set, Tuple, Union
import requests
import yaml # type: ignore[import-untyped]
@@ -23,20 +26,21 @@
from ci.praktika.info import Info
from ci.praktika.utils import Shell
-CLICKHOUSE_PLAY_HOST = os.environ.get("CLICKHOUSE_PLAY_HOST", "play.clickhouse.com")
-CLICKHOUSE_PLAY_USER = os.environ.get("CLICKHOUSE_PLAY_USER", "play")
-CLICKHOUSE_PLAY_PASSWORD = os.environ.get("CLICKHOUSE_PLAY_PASSWORD", "")
-CLICKHOUSE_PLAY_DB = os.environ.get("CLICKHOUSE_PLAY_DB", "default")
-CLICKHOUSE_PLAY_URL = f"https://{CLICKHOUSE_PLAY_HOST}/"
+CLICKHOUSE_PLAY_HOST = os.environ.get("CHECKS_DATABASE_HOST", "play.clickhouse.com")
+CLICKHOUSE_PLAY_USER = os.environ.get("CLICKHOUSE_TEST_STAT_LOGIN", "play")
+CLICKHOUSE_PLAY_PASSWORD = os.environ.get("CLICKHOUSE_TEST_STAT_PASSWORD", "")
+CLICKHOUSE_PLAY_DB = os.environ.get("CLICKHOUSE_PLAY_DB", "gh-data")
+CLICKHOUSE_PLAY_URL = f"https://{CLICKHOUSE_PLAY_HOST}:8443/"
+
-MAX_RETRY = 1
+MAX_RETRY = 2
NUM_WORKERS = 4
SLEEP_BETWEEN_RETRIES = 5
PARALLEL_GROUP_SIZE = 100
CLICKHOUSE_BINARY_PATH = "usr/bin/clickhouse"
-FLAKY_TRIES_COUNT = 2 # run whole pytest several times
-FLAKY_REPEAT_COUNT = 3 # runs test case in single module several times
+FLAKY_TRIES_COUNT = 3 # run whole pytest several times
+FLAKY_REPEAT_COUNT = 5 # runs test case in single module several times
MAX_TIME_SECONDS = 3600
MAX_TIME_IN_SANDBOX = 20 * 60 # 20 minutes
@@ -91,6 +95,39 @@ def filter_existing_tests(tests_to_run, repo_path):
return result
+@lru_cache(maxsize=None)
+def extract_fail_logs(log_path: str) -> dict[str, str]:
+ try:
+ with open(log_path, "r", encoding="utf-8") as log_file:
+ text = log_file.read()
+ except UnicodeDecodeError:
+ logging.warning(
+ "Failed to read log file %s as UTF-8, using errors='replace'",
+ log_path,
+ )
+ with open(log_path, "r", encoding="utf-8", errors="replace") as log_file:
+ text = log_file.read()
+
+ # Regex matches:
+ # - a line like "_____ test_something [param] _____"
+ # - captures the test name (with params)
+ # - captures everything up to the next header or end of file
+ pattern = re.compile(
+ r"_{5,}\s+([^\s].*?)\s+_{5,}(.*?)(?=_{5,}\s+[^\s].*?\s+_{5,}|\Z)",
+ re.S,
+ )
+
+ results = {}
+ for match in pattern.finditer(text):
+ test_name, body = match.groups()
+
+ # Keep only sections that include a failure or captured log
+ if "Captured log" in body or "FAILED" in body:
+ results[test_name.strip()] = body.strip()
+
+ return results
+
+
def _get_deselect_option(tests):
return " ".join([f"--deselect {t}" for t in tests])
@@ -110,7 +147,7 @@ def clear_ip_tables_and_restart_daemons():
try:
logging.info("Killing all alive docker containers")
subprocess.check_output(
- "timeout --verbose --signal=KILL 10m docker ps --quiet | xargs --no-run-if-empty docker kill",
+ "timeout --verbose --signal=KILL 3h docker ps --quiet | xargs --no-run-if-empty docker kill",
shell=True,
)
except subprocess.CalledProcessError as err:
@@ -119,7 +156,7 @@ def clear_ip_tables_and_restart_daemons():
try:
logging.info("Removing all docker containers")
subprocess.check_output(
- "timeout --verbose --signal=KILL 10m docker ps --all --quiet | xargs --no-run-if-empty docker rm --force",
+ "timeout --verbose --signal=KILL 3h docker ps --all --quiet | xargs --no-run-if-empty docker rm --force",
shell=True,
)
except subprocess.CalledProcessError as err:
@@ -320,7 +357,7 @@ def all_tests(self) -> List[str]:
out_file_full = os.path.join(self.result_path, "runner_get_all_tests.log")
report_file = "runner_get_all_tests.jsonl"
cmd = (
- f"cd {self.repo_path}/tests/integration && PYTHONPATH='../..:.' timeout --verbose --signal=KILL 2m ./runner {runner_opts} {image_cmd} -- "
+ f"cd {self.repo_path}/tests/integration && PYTHONPATH='../..:.' timeout --verbose --signal=KILL 3h ./runner {runner_opts} {image_cmd} -- "
f"--setup-plan --report-log={report_file}"
)
@@ -376,6 +413,52 @@ def _get_parallel_tests_skip_list(repo_path):
skip_list_tests = yaml.safe_load(skip_list_file)
return list(sorted(skip_list_tests))
+ @staticmethod
+ def _get_broken_tests_rules(repo_path: str) -> dict:
+ broken_tests_file_path = f"{repo_path}/tests/broken_tests.yaml"
+ if (
+ not os.path.isfile(broken_tests_file_path)
+ or os.path.getsize(broken_tests_file_path) == 0
+ ):
+ raise ValueError(
+ "There is something wrong with getting broken tests rules: "
+ f"file '{broken_tests_file_path}' is empty or does not exist."
+ )
+
+ with open(broken_tests_file_path, "r", encoding="utf-8") as broken_tests_file:
+ broken_tests = yaml.safe_load(broken_tests_file)
+
+ compiled_rules = {"exact": {}, "pattern": {}}
+
+ for test in broken_tests:
+ regex = test.get("regex") is True
+ rule = {
+ "reason": test["reason"],
+ }
+ if test.get("message"):
+ rule["message"] = re.compile(test["message"]) if regex else test["message"]
+
+ if test.get("not_message"):
+ rule["not_message"] = (
+ re.compile(test["not_message"]) if regex else test["not_message"]
+ )
+ if test.get("check_types"):
+ rule["check_types"] = test["check_types"]
+
+ if regex:
+ rule["regex"] = True
+ compiled_rules["pattern"][re.compile(test["name"])] = rule
+ else:
+ compiled_rules["exact"][test["name"]] = rule
+
+ logging.info(
+ "Compiled %s exact rules and %s pattern rules",
+ len(compiled_rules["exact"]),
+ len(compiled_rules["pattern"]),
+ )
+
+ return compiled_rules
+
@staticmethod
def group_test_by_file(tests):
result = OrderedDict() # type: OrderedDict
@@ -410,6 +493,118 @@ def _update_counters(
for test in current_counters[state]:
main_counters[state].append(test)
+ def _handle_broken_tests(
+ self,
+ counters: Dict[str, List[str]],
+ known_broken_tests: Dict[str, Dict[str, str]],
+ log_paths: Union[Dict[str, List[str]], List[str]],
+ ) -> None:
+
+ context_name = self.params["context_name"]
+
+ def get_log_paths(test_name):
+ """Could be a list of logs for all tests or a dict with test name as a key"""
+ log_paths_list = (
+ log_paths[test_name] if isinstance(log_paths, dict) else log_paths
+ )
+ return sorted(
+ [x for x in log_paths_list if x.endswith(".log")], reverse=True
+ )
+
+ broken_tests_log = os.path.join(self.result_path, "broken_tests_handler.log")
+
+ def test_is_known_fail(test_name, test_logs, debug_log_file):
+ if test_logs is None:
+ debug_log_file.write(
+ f"WARNING: Test '{test_name}' has no logs - cannot match message patterns\n"
+ )
+
+ matching_rules = []
+
+ debug_log_file.write("Potential matching rules:\n")
+ exact_rule = known_broken_tests["exact"].get(test_name)
+ if exact_rule:
+ debug_log_file.write(f"{test_name} - {exact_rule}\n")
+ matching_rules.append(exact_rule)
+
+ for name_re, data in known_broken_tests["pattern"].items():
+ if name_re.fullmatch(test_name):
+ debug_log_file.write(f"{name_re} - {data}\n")
+ matching_rules.append(data)
+
+ if not matching_rules:
+ return False
+
+ def matches_substring(substring, log, is_regex):
+ if log is None:
+ # Cannot match a message pattern if we have no logs
+ return False
+ if is_regex:
+ return bool(substring.search(log))
+ return substring in log
+
+ for rule_data in matching_rules:
+ if rule_data.get("check_types") and not any(
+ ct in context_name for ct in rule_data["check_types"]
+ ):
+ debug_log_file.write(f"Check types didn't match: '{rule_data['check_types']}' not in '{context_name}'\n")
+ continue
+
+ is_regex = rule_data.get("regex", False)
+ not_message = rule_data.get("not_message")
+ if not_message and matches_substring(not_message, test_logs, is_regex):
+ debug_log_file.write(f"Skip rule: Not message matched: '{not_message}'\n")
+ continue
+ message = rule_data.get("message")
+ if message and not matches_substring(message, test_logs, is_regex):
+ debug_log_file.write(f"Skip rule: Message didn't match: '{message}'\n")
+ continue
+
+ debug_log_file.write(f"Test {test_name} matched rule: {rule_data}\n")
+ return rule_data["reason"]
+
+ return False
+
+ with open(broken_tests_log, "a") as log_file:
+ log_file.write(
+ f"{len(known_broken_tests['exact']) + len(known_broken_tests['pattern'])} Known broken tests\n"
+ )
+ for status, tests in counters.items():
+ log_file.write(f"Total tests in {status} state: {len(tests)}\n")
+
+ for fail_status in ("ERROR", "FAILED"):
+ for failed_test in counters[fail_status].copy():
+ log_file.write(
+ f"Checking test {failed_test} (status: {fail_status})\n"
+ )
+
+ # Should only care about the most recent log file
+ log_path = get_log_paths(failed_test)[0]
+ test_logs = extract_fail_logs(log_path)
+ test_log = test_logs.get(failed_test.split("::")[-1])
+ if test_log is None:
+ # Log extraction can fail if the fail was in the teardown
+ log_file.write(
+ f"WARNING: Test '{failed_test}' has no logs among {list(test_logs.keys())}, assuming log extraction failed, proceeding with full log\n"
+ )
+ with open(log_path, "r", encoding="utf-8") as f:
+ test_log = f.read()
+
+ known_fail_reason = test_is_known_fail(
+ failed_test, test_log, log_file
+ )
+ if known_fail_reason is not False:
+ log_file.write(
+ f"Test {failed_test} is known to fail: {known_fail_reason}\n"
+ )
+ counters[fail_status].remove(failed_test)
+ counters["BROKEN"].append(failed_test)
+ else:
+ log_file.write(f"Test {failed_test} is not known to fail\n")
+
+ for status, tests in counters.items():
+ log_file.write(f"Total tests in {status} state: {len(tests)}\n")
+
def _get_runner_image_cmd(self):
image_cmd = ""
if self._can_run_with(
@@ -417,7 +612,7 @@ def _get_runner_image_cmd(self):
"--docker-image-version",
):
for img in IMAGES:
- if img == "clickhouse/integration-tests-runner":
+ if img == "altinityinfra/integration-tests-runner":
runner_version = self.get_image_version(img)
logging.info(
"Can run with custom docker image version %s", runner_version
@@ -684,6 +879,7 @@ def run_flaky_check(self, should_fail=False):
} # type: Dict
tests_times = defaultdict(float) # type: Dict
tests_log_paths = defaultdict(list)
+ known_broken_tests = self._get_broken_tests_rules(self.repo_path)
id_counter = 0
for test_to_run in tests_to_run:
tries_num = 1 if should_fail else FLAKY_TRIES_COUNT
@@ -701,6 +897,10 @@ def run_flaky_check(self, should_fail=False):
1,
FLAKY_REPEAT_COUNT,
)
+
+ # Handle broken tests on the group counters that contain test results for a single group
+ self._handle_broken_tests(group_counters, known_broken_tests, log_paths)
+
id_counter = id_counter + 1
for counter, value in group_counters.items():
logging.info(
@@ -803,11 +1003,11 @@ def get_tests_execution_time(self):
SELECT
splitByString('::', test_name)[1] AS file,
median(test_duration_ms) AS test_duration_ms
- FROM checks
+ FROM `{CLICKHOUSE_PLAY_DB}`.checks
WHERE (check_name LIKE 'Integration%')
AND (check_start_time >= ({start_time_filter} - toIntervalDay(30)))
AND (check_start_time <= ({start_time_filter} - toIntervalHour(2)))
- AND ((head_ref = 'master') AND startsWith(head_repo, 'ClickHouse/'))
+ AND (head_ref LIKE 'antalya-25.8%' OR head_ref LIKE 'releases/25.8%' OR head_ref LIKE 'rebase-cicd-v25.8%')
AND (test_name != '')
AND (test_status != 'SKIPPED')
GROUP BY test_name
@@ -826,6 +1026,11 @@ def get_tests_execution_time(self):
max_retries = 3
retry_delay_seconds = 5
+ headers = {
+ "X-ClickHouse-User": CLICKHOUSE_PLAY_USER,
+ "X-ClickHouse-Key": CLICKHOUSE_PLAY_PASSWORD,
+ }
+
for attempt in range(max_retries):
try:
logging.info(
@@ -834,7 +1039,12 @@ def get_tests_execution_time(self):
max_retries,
)
- response = requests.get(url, timeout=120)
+ response = requests.post(
+ CLICKHOUSE_PLAY_URL,
+ timeout=120,
+ headers=headers,
+ params={"query": query},
+ )
response.raise_for_status()
result_data = response.json().get("data", [])
tests_execution_times = {
@@ -1107,6 +1317,7 @@ def run_normal_check(self):
tests_times = defaultdict(float)
tests_log_paths = defaultdict(list)
items_to_run = list(grouped_tests.items())
+ known_broken_tests = self._get_broken_tests_rules(self.repo_path)
logging.info("Total test groups %s", len(items_to_run))
if self.shuffle_test_groups():
logging.info("Shuffling test groups")
@@ -1119,6 +1330,10 @@ def run_normal_check(self):
group_counters, group_test_times, log_paths = self.try_run_test_group(
"2h", group, tests, MAX_RETRY, NUM_WORKERS, 0
)
+
+ # Handle broken tests on the group counters that contain test results for a single group
+ self._handle_broken_tests(group_counters, known_broken_tests, log_paths)
+
total_tests = 0
for counter, value in group_counters.items():
logging.info(
@@ -1169,7 +1384,7 @@ def run_normal_check(self):
for c in counters[state]
]
failed_sum = len(counters["FAILED"]) + len(counters["ERROR"])
- status_text = f"fail: {failed_sum}, passed: {len(counters['PASSED'])}"
+ status_text = f"fail: {failed_sum}, passed: {len(counters['PASSED'])}, broken: {len(counters['BROKEN'])}"
if not counters or sum(len(counter) for counter in counters.values()) == 0:
status_text = "No tests found for some reason! It's a bug"
@@ -1225,7 +1440,7 @@ def run():
timeout_expired = False
-runner_subprocess = None # type:Optional[TeePopen]
+runner_subprocess = None
def handle_sigterm(signum, _frame):
diff --git a/ci/jobs/scripts/workflow_hooks/filter_job.py b/ci/jobs/scripts/workflow_hooks/filter_job.py
index 1f35939f6189..e8921e2b378c 100644
--- a/ci/jobs/scripts/workflow_hooks/filter_job.py
+++ b/ci/jobs/scripts/workflow_hooks/filter_job.py
@@ -167,4 +167,9 @@ def should_skip_job(job_name):
return False, ""
return True, "Skipped, not labeled with 'pr-performance'"
+ ci_exclude_tags = _info_cache.get_kv_data("ci_exclude_tags") or []
+ for tag in ci_exclude_tags:
+ if tag in job_name:
+ return True, f"Skipped, job name includes excluded tag '{tag}'"
+
return False, ""
diff --git a/ci/jobs/scripts/workflow_hooks/parse_ci_tags.py b/ci/jobs/scripts/workflow_hooks/parse_ci_tags.py
new file mode 100644
index 000000000000..c28f59b552ee
--- /dev/null
+++ b/ci/jobs/scripts/workflow_hooks/parse_ci_tags.py
@@ -0,0 +1,18 @@
+import re
+
+from ci.praktika.info import Info
+
+
+def get_ci_tags(pr_body, tag_prefix):
+ pattern = rf"(- \[x\] +
- true
- true
- https://crash.clickhouse.com/
+ false
+ false
+
diff --git a/programs/server/dashboard.html b/programs/server/dashboard.html
index 727d65316a7a..f43bf4698e06 100644
--- a/programs/server/dashboard.html
+++ b/programs/server/dashboard.html
@@ -3,7 +3,7 @@
ClickHouse Dashboard
-
+