Skip to content

Commit a019df8

Browse files
authored
Merge branch 'feature/aquav1.0.1' into ODSC-55771/fix_telemetry_name
2 parents 2fdadef + 4a6bb03 commit a019df8

File tree

7 files changed

+38
-33
lines changed

7 files changed

+38
-33
lines changed

.github/workflows/run-unittests-default_setup.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,12 @@ on:
44
workflow_dispatch:
55
pull_request:
66
branches: [ "main" ]
7+
paths:
8+
- "ads/**"
9+
- "!ads/opctl/operator/**"
10+
- "!ads/feature_store/**"
11+
- "pyproject.toml"
12+
713

814
# Cancel in progress workflows on pull_requests.
915
# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value

.github/workflows/run-unittests-py38-cov-report.yml

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,11 @@ on:
44
workflow_dispatch:
55
pull_request:
66
branches: [ "main" ]
7+
paths:
8+
- "ads/**"
9+
- "!ads/opctl/operator/**"
10+
- "!ads/feature_store/**"
11+
- "pyproject.toml"
712

813
# Cancel in progress workflows on pull_requests.
914
# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value

.github/workflows/run-unittests-py39-py310.yml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,12 @@ name: "[Py3.9][Py3.10] - tests/unitary/**"
33
on:
44
workflow_dispatch:
55
pull_request:
6+
branches: [ "main" ]
7+
paths:
8+
- "ads/**"
9+
- "!ads/opctl/operator/**"
10+
- "!ads/feature_store/**"
11+
- "pyproject.toml"
612

713
# Cancel in progress workflows on pull_requests.
814
# https://docs.github.com/en/actions/using-jobs/using-concurrency#example-using-a-fallback-value

ads/aqua/evaluation.py

Lines changed: 12 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -862,13 +862,13 @@ def get(self, eval_id) -> AquaEvaluationDetail:
862862
logger.info(f"Fetching evaluation: {eval_id} details ...")
863863

864864
resource = utils.query_resource(eval_id)
865-
model_provenance = self.ds_client.get_model_provenance(eval_id).data
866-
867865
if not resource:
868866
raise AquaRuntimeError(
869867
f"Failed to retrieve evalution {eval_id}."
870868
"Please check if the OCID is correct."
871869
)
870+
model_provenance = self.ds_client.get_model_provenance(eval_id).data
871+
872872
jobrun_id = model_provenance.training_id
873873
job_run_details = self._fetch_jobrun(
874874
resource, use_rqs=False, jobrun_id=jobrun_id
@@ -1067,14 +1067,14 @@ def get_status(self, eval_id: str) -> dict:
10671067
"""
10681068
eval = utils.query_resource(eval_id)
10691069

1070-
# TODO: add job_run_id as input param to skip the query below
1071-
model_provenance = self.ds_client.get_model_provenance(eval_id).data
1072-
10731070
if not eval:
10741071
raise AquaRuntimeError(
10751072
f"Failed to retrieve evalution {eval_id}."
10761073
"Please check if the OCID is correct."
10771074
)
1075+
1076+
model_provenance = self.ds_client.get_model_provenance(eval_id).data
1077+
10781078
jobrun_id = model_provenance.training_id
10791079
job_run_details = self._fetch_jobrun(eval, use_rqs=False, jobrun_id=jobrun_id)
10801080

@@ -1324,7 +1324,10 @@ def cancel(self, eval_id) -> dict:
13241324
raise AquaRuntimeError(
13251325
f"Failed to get evaluation details for model {eval_id}"
13261326
)
1327-
job_run_id = model.provenance_metadata.training_id
1327+
1328+
job_run_id = (
1329+
model.provenance_metadata.training_id if model.provenance_metadata else None
1330+
)
13281331
if not job_run_id:
13291332
raise AquaMissingKeyError(
13301333
"Model provenance is missing job run training_id key"
@@ -1387,7 +1390,7 @@ def delete(self, eval_id):
13871390
job_id = model.custom_metadata_list.get(
13881391
EvaluationCustomMetadata.EVALUATION_JOB_ID.value
13891392
).value
1390-
except ValueError:
1393+
except Exception:
13911394
raise AquaMissingKeyError(
13921395
f"Custom metadata is missing {EvaluationCustomMetadata.EVALUATION_JOB_ID.value} key"
13931396
)
@@ -1419,7 +1422,7 @@ def _delete_job_and_model(job, model):
14191422
)
14201423

14211424
def load_evaluation_config(self, eval_id):
1422-
# TODO
1425+
"""Loads evaluation config."""
14231426
return {
14241427
"model_params": {
14251428
"max_tokens": 500,
@@ -1597,20 +1600,6 @@ def _build_resource_identifier(
15971600
)
15981601
return AquaResourceIdentifier()
15991602

1600-
def _get_jobrun(
1601-
self, model: oci.resource_search.models.ResourceSummary, mapping: dict = {}
1602-
) -> Union[
1603-
oci.resource_search.models.ResourceSummary, oci.data_science.models.JobRun
1604-
]:
1605-
jobrun_id = self._get_attribute_from_model_metadata(
1606-
model, EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID.value
1607-
)
1608-
job_run = mapping.get(jobrun_id)
1609-
1610-
if not job_run:
1611-
job_run = self._fetch_jobrun(model, use_rqs=True, jobrun_id=jobrun_id)
1612-
return job_run
1613-
16141603
def _fetch_jobrun(
16151604
self,
16161605
resource: oci.resource_search.models.ResourceSummary,
@@ -1787,7 +1776,7 @@ def _extract_job_lifecycle_details(self, lifecycle_details: str) -> str:
17871776
Examples
17881777
--------
17891778
>>> _extract_job_lifecycle_details("Job run artifact execution failed with exit code 16")
1790-
'The evaluation configuration is invalid due to content validation errors.'
1779+
'Validation errors in the evaluation config. Exit code: 16.'
17911780
17921781
>>> _extract_job_lifecycle_details("Job completed successfully.")
17931782
'Job completed successfully.'

ads/aqua/extension/common_handler.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -7,10 +7,10 @@
77
from importlib import metadata
88

99
from ads.aqua import ODSC_MODEL_COMPARTMENT_OCID
10-
from ads.aqua.exception import AquaResourceAccessError
11-
from ads.aqua.utils import known_realm
1210
from ads.aqua.decorator import handle_exceptions
11+
from ads.aqua.exception import AquaResourceAccessError
1312
from ads.aqua.extension.base_handler import AquaAPIhandler
13+
from ads.aqua.utils import known_realm
1414

1515

1616
class ADSVersionHandler(AquaAPIhandler):

docs/source/release_notes.rst

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,10 @@ Release Notes
44

55
2.11.7
66
------
7-
Release date: April 8, 2024
7+
Release date: April 18, 2024
88

9-
* Fixed bugs and introduced enhancements following our recent release, which included internal adjustments for future features and updates for the Jupyter Lab 3 upgrade.
9+
* Fixed the bug in ``ADSDataset.show_in_notebook()``.
10+
* Updated langchain version.
1011

1112

1213
2.11.6

tests/unitary/with_extras/aqua/test_evaluation.py

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -606,7 +606,6 @@ def test_get(self, mock_get_model_provenance_response, mock_get_job_run_response
606606
def test_get_fail(self, mock_query_resource):
607607
"""Tests get evaluation details failed because of invalid eval id."""
608608
mock_query_resource.return_value = None
609-
self.app.ds_client.get_model_provenance = MagicMock()
610609
with self.assertRaises(AquaRuntimeError) as context:
611610
self.app.get(TestDataset.INVALID_EVAL_ID)
612611

@@ -714,10 +713,10 @@ def test_cancel_evaluation(
714713
@parameterized.expand(
715714
[
716715
(None, AquaRuntimeError),
717-
# (
718-
# DataScienceModel(),
719-
# AquaMissingKeyError,
720-
# ),
716+
(
717+
DataScienceModel(),
718+
AquaMissingKeyError,
719+
),
721720
]
722721
)
723722
@patch.object(DataScienceModel, "from_id")
@@ -859,7 +858,6 @@ def test_get_status_when_missing_jobrun(
859858
def test_get_status_failed(self, mock_query_resource):
860859
"""Tests when no correct evaluation found."""
861860
mock_query_resource.return_value = None
862-
self.app.ds_client.get_model_provenance = MagicMock()
863861
with self.assertRaises(AquaRuntimeError) as context:
864862
self.app.get_status(TestDataset.INVALID_EVAL_ID)
865863

0 commit comments

Comments
 (0)