oracle-ads 2.11.6__py3-none-any.whl → 2.11.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. ads/aqua/__init__.py +24 -14
  2. ads/aqua/base.py +0 -2
  3. ads/aqua/cli.py +50 -2
  4. ads/aqua/decorator.py +8 -0
  5. ads/aqua/deployment.py +37 -34
  6. ads/aqua/evaluation.py +106 -49
  7. ads/aqua/extension/base_handler.py +18 -10
  8. ads/aqua/extension/common_handler.py +21 -2
  9. ads/aqua/extension/deployment_handler.py +1 -4
  10. ads/aqua/extension/evaluation_handler.py +1 -2
  11. ads/aqua/extension/finetune_handler.py +0 -1
  12. ads/aqua/extension/ui_handler.py +1 -12
  13. ads/aqua/extension/utils.py +4 -4
  14. ads/aqua/finetune.py +24 -11
  15. ads/aqua/model.py +2 -4
  16. ads/aqua/utils.py +39 -23
  17. ads/catalog/model.py +3 -3
  18. ads/catalog/notebook.py +3 -3
  19. ads/catalog/project.py +2 -2
  20. ads/catalog/summary.py +2 -4
  21. ads/cli.py +21 -2
  22. ads/common/serializer.py +5 -4
  23. ads/common/utils.py +6 -2
  24. ads/config.py +1 -0
  25. ads/data_labeling/metadata.py +2 -2
  26. ads/dataset/dataset.py +3 -5
  27. ads/dataset/factory.py +2 -3
  28. ads/dataset/label_encoder.py +1 -1
  29. ads/dataset/sampled_dataset.py +3 -5
  30. ads/jobs/ads_job.py +26 -2
  31. ads/jobs/builders/infrastructure/dsc_job.py +20 -7
  32. ads/llm/serializers/runnable_parallel.py +7 -1
  33. ads/model/model_artifact_boilerplate/artifact_introspection_test/model_artifact_validate.py +1 -1
  34. ads/opctl/operator/lowcode/anomaly/README.md +1 -1
  35. ads/opctl/operator/lowcode/anomaly/environment.yaml +1 -1
  36. ads/opctl/operator/lowcode/anomaly/model/anomaly_dataset.py +8 -15
  37. ads/opctl/operator/lowcode/anomaly/model/automlx.py +16 -10
  38. ads/opctl/operator/lowcode/anomaly/model/autots.py +9 -10
  39. ads/opctl/operator/lowcode/anomaly/model/base_model.py +36 -39
  40. ads/opctl/operator/lowcode/anomaly/model/tods.py +4 -4
  41. ads/opctl/operator/lowcode/anomaly/operator_config.py +18 -1
  42. ads/opctl/operator/lowcode/anomaly/schema.yaml +16 -4
  43. ads/opctl/operator/lowcode/common/data.py +16 -2
  44. ads/opctl/operator/lowcode/common/transformations.py +48 -14
  45. ads/opctl/operator/lowcode/forecast/README.md +1 -1
  46. ads/opctl/operator/lowcode/forecast/environment.yaml +5 -4
  47. ads/opctl/operator/lowcode/forecast/model/arima.py +36 -29
  48. ads/opctl/operator/lowcode/forecast/model/automlx.py +91 -90
  49. ads/opctl/operator/lowcode/forecast/model/autots.py +200 -166
  50. ads/opctl/operator/lowcode/forecast/model/base_model.py +144 -140
  51. ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +86 -80
  52. ads/opctl/operator/lowcode/forecast/model/prophet.py +68 -63
  53. ads/opctl/operator/lowcode/forecast/operator_config.py +18 -2
  54. ads/opctl/operator/lowcode/forecast/schema.yaml +20 -4
  55. ads/opctl/operator/lowcode/forecast/utils.py +8 -4
  56. ads/opctl/operator/lowcode/pii/README.md +1 -1
  57. ads/opctl/operator/lowcode/pii/environment.yaml +1 -1
  58. ads/opctl/operator/lowcode/pii/model/report.py +71 -70
  59. ads/pipeline/ads_pipeline_step.py +11 -12
  60. {oracle_ads-2.11.6.dist-info → oracle_ads-2.11.8.dist-info}/METADATA +8 -7
  61. {oracle_ads-2.11.6.dist-info → oracle_ads-2.11.8.dist-info}/RECORD +64 -64
  62. {oracle_ads-2.11.6.dist-info → oracle_ads-2.11.8.dist-info}/LICENSE.txt +0 -0
  63. {oracle_ads-2.11.6.dist-info → oracle_ads-2.11.8.dist-info}/WHEEL +0 -0
  64. {oracle_ads-2.11.6.dist-info → oracle_ads-2.11.8.dist-info}/entry_points.txt +0 -0
ads/aqua/__init__.py CHANGED
@@ -4,25 +4,35 @@
4
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
5
 
6
6
 
7
- import logging
8
- import sys
9
7
  import os
8
+
9
+ from ads import logger, set_auth
10
10
  from ads.aqua.utils import fetch_service_compartment
11
11
  from ads.config import OCI_RESOURCE_PRINCIPAL_VERSION
12
- from ads import set_auth
13
12
 
14
- logger = logging.getLogger(__name__)
15
- handler = logging.StreamHandler(sys.stdout)
16
- logger.setLevel(logging.INFO)
13
+ ENV_VAR_LOG_LEVEL = "ADS_AQUA_LOG_LEVEL"
14
+
15
+
16
+ def get_logger_level():
17
+ """Retrieves logging level from environment variable `ADS_AQUA_LOG_LEVEL`."""
18
+ level = os.environ.get(ENV_VAR_LOG_LEVEL, "INFO").upper()
19
+ return level
20
+
21
+
22
+ logger.setLevel(get_logger_level())
23
+
24
+
25
+ def set_log_level(log_level: str):
26
+ """Global for setting logging level."""
27
+
28
+ log_level = log_level.upper()
29
+ logger.setLevel(log_level.upper())
30
+ logger.handlers[0].setLevel(log_level)
31
+
17
32
 
18
33
  if OCI_RESOURCE_PRINCIPAL_VERSION:
19
34
  set_auth("resource_principal")
20
35
 
21
- ODSC_MODEL_COMPARTMENT_OCID = os.environ.get("ODSC_MODEL_COMPARTMENT_OCID")
22
- if not ODSC_MODEL_COMPARTMENT_OCID:
23
- try:
24
- ODSC_MODEL_COMPARTMENT_OCID = fetch_service_compartment()
25
- except Exception as e:
26
- logger.error(
27
- f"ODSC_MODEL_COMPARTMENT_OCID environment variable is not set for Aqua, due to {e}."
28
- )
36
+ ODSC_MODEL_COMPARTMENT_OCID = (
37
+ os.environ.get("ODSC_MODEL_COMPARTMENT_OCID") or fetch_service_compartment()
38
+ )
ads/aqua/base.py CHANGED
@@ -19,7 +19,6 @@ from ads.aqua.utils import (
19
19
  get_artifact_path,
20
20
  is_valid_ocid,
21
21
  load_config,
22
- logger,
23
22
  )
24
23
  from ads.common import oci_client as oc
25
24
  from ads.common.auth import default_signer
@@ -164,7 +163,6 @@ class AquaApp:
164
163
  tag = Tags.AQUA_FINE_TUNING.value
165
164
 
166
165
  if not model_version_set_id:
167
- tag = Tags.AQUA_FINE_TUNING.value # TODO: Fix this
168
166
  try:
169
167
  model_version_set = ModelVersionSet.from_name(
170
168
  name=model_version_set_name,
ads/aqua/cli.py CHANGED
@@ -3,17 +3,65 @@
3
3
 
4
4
  # Copyright (c) 2024 Oracle and/or its affiliates.
5
5
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
+ import os
7
+ import sys
6
8
 
9
+ from ads.aqua import (
10
+ ENV_VAR_LOG_LEVEL,
11
+ set_log_level,
12
+ ODSC_MODEL_COMPARTMENT_OCID,
13
+ logger,
14
+ )
7
15
  from ads.aqua.deployment import AquaDeploymentApp
16
+ from ads.aqua.evaluation import AquaEvaluationApp
8
17
  from ads.aqua.finetune import AquaFineTuningApp
9
18
  from ads.aqua.model import AquaModelApp
10
- from ads.aqua.evaluation import AquaEvaluationApp
19
+ from ads.config import NB_SESSION_OCID
20
+ from ads.common.utils import LOG_LEVELS
11
21
 
12
22
 
13
23
  class AquaCommand:
14
- """Contains the command groups for project Aqua."""
24
+ """Contains the command groups for project Aqua.
25
+
26
+ Acts as an entry point for managing different components of the Aqua
27
+ project including model management, fine-tuning, deployment, and
28
+ evaluation.
29
+ """
15
30
 
16
31
  model = AquaModelApp
17
32
  fine_tuning = AquaFineTuningApp
18
33
  deployment = AquaDeploymentApp
19
34
  evaluation = AquaEvaluationApp
35
+
36
+ def __init__(
37
+ self,
38
+ log_level: str = os.environ.get(ENV_VAR_LOG_LEVEL, "ERROR").upper(),
39
+ ):
40
+ """
41
+ Initialize the command line interface settings for the Aqua project.
42
+
43
+ FLAGS
44
+ -----
45
+ log_level (str):
46
+ Sets the logging level for the application.
47
+ Default is retrieved from environment variable `LOG_LEVEL`,
48
+ or 'ERROR' if not set. Example values include 'DEBUG', 'INFO',
49
+ 'WARNING', 'ERROR', and 'CRITICAL'.
50
+ """
51
+ if log_level.upper() not in LOG_LEVELS:
52
+ logger.error(
53
+ f"Log level should be one of {LOG_LEVELS}. Setting default to ERROR."
54
+ )
55
+ log_level = "ERROR"
56
+ set_log_level(log_level)
57
+ # gracefully exit if env var is not set
58
+ if not ODSC_MODEL_COMPARTMENT_OCID:
59
+ logger.debug(
60
+ "ODSC_MODEL_COMPARTMENT_OCID environment variable is not set for Aqua."
61
+ )
62
+ if NB_SESSION_OCID:
63
+ logger.error(
64
+ f"Aqua is not available for the notebook session {NB_SESSION_OCID}. For more information, "
65
+ f"please refer to the documentation."
66
+ )
67
+ sys.exit(1)
ads/aqua/decorator.py CHANGED
@@ -17,6 +17,7 @@ from oci.exceptions import (
17
17
  RequestException,
18
18
  ServiceError,
19
19
  )
20
+ from tornado.web import HTTPError
20
21
 
21
22
  from ads.aqua.exception import AquaError
22
23
  from ads.aqua.extension.base_handler import AquaAPIhandler
@@ -58,6 +59,7 @@ def handle_exceptions(func):
58
59
  except ServiceError as error:
59
60
  self.write_error(
60
61
  status_code=error.status or 500,
62
+ message=error.message,
61
63
  reason=error.message,
62
64
  service_payload=error.args[0] if error.args else None,
63
65
  exc_info=sys.exc_info(),
@@ -91,6 +93,12 @@ def handle_exceptions(func):
91
93
  service_payload=error.service_payload,
92
94
  exc_info=sys.exc_info(),
93
95
  )
96
+ except HTTPError as e:
97
+ self.write_error(
98
+ status_code=e.status_code,
99
+ reason=e.log_message,
100
+ exc_info=sys.exc_info(),
101
+ )
94
102
  except Exception as ex:
95
103
  self.write_error(
96
104
  status_code=500,
ads/aqua/deployment.py CHANGED
@@ -22,6 +22,9 @@ from ads.aqua.utils import (
22
22
  UNKNOWN_DICT,
23
23
  get_resource_name,
24
24
  get_model_by_reference_paths,
25
+ get_ocid_substring,
26
+ AQUA_MODEL_TYPE_SERVICE,
27
+ AQUA_MODEL_TYPE_CUSTOM,
25
28
  )
26
29
  from ads.aqua.finetune import FineTuneCustomMetadata
27
30
  from ads.aqua.data import AquaResourceIdentifier
@@ -391,40 +394,27 @@ class AquaDeploymentApp(AquaApp):
391
394
  .with_runtime(container_runtime)
392
395
  ).deploy(wait_for_completion=False)
393
396
 
394
- if is_fine_tuned_model:
395
- # tracks unique deployments that were created in the user compartment
396
- self.telemetry.record_event_async(
397
- category="aqua/custom/deployment", action="create", detail=model_name
398
- )
399
- # tracks the shape used for deploying the custom models
400
- self.telemetry.record_event_async(
401
- category="aqua/custom/deployment/create",
402
- action="shape",
403
- detail=instance_shape,
404
- )
405
- # tracks the shape used for deploying the custom models by name
406
- self.telemetry.record_event_async(
407
- category=f"aqua/custom/{model_name}/deployment/create",
408
- action="shape",
409
- detail=instance_shape,
410
- )
411
- else:
412
- # tracks unique deployments that were created in the user compartment
413
- self.telemetry.record_event_async(
414
- category="aqua/service/deployment", action="create", detail=model_name
415
- )
416
- # tracks the shape used for deploying the service models
417
- self.telemetry.record_event_async(
418
- category="aqua/service/deployment/create",
419
- action="shape",
420
- detail=instance_shape,
421
- )
422
- # tracks the shape used for deploying the service models by name
423
- self.telemetry.record_event_async(
424
- category=f"aqua/service/{model_name}/deployment/create",
425
- action="shape",
426
- detail=instance_shape,
427
- )
397
+ model_type = (
398
+ AQUA_MODEL_TYPE_CUSTOM if is_fine_tuned_model else AQUA_MODEL_TYPE_SERVICE
399
+ )
400
+ deployment_id = deployment.dsc_model_deployment.id
401
+ # we arbitrarily choose last 8 characters of OCID to identify MD in telemetry
402
+ telemetry_kwargs = {"ocid": get_ocid_substring(deployment_id, key_len=8)}
403
+
404
+ # tracks unique deployments that were created in the user compartment
405
+ self.telemetry.record_event_async(
406
+ category=f"aqua/{model_type}/deployment",
407
+ action="create",
408
+ detail=model_name,
409
+ **telemetry_kwargs,
410
+ )
411
+ # tracks the shape used for deploying the custom or service models by name
412
+ self.telemetry.record_event_async(
413
+ category=f"aqua/{model_type}/deployment/create",
414
+ action="shape",
415
+ detail=instance_shape,
416
+ value=model_name,
417
+ )
428
418
 
429
419
  return AquaDeployment.from_oci_model_deployment(
430
420
  deployment.dsc_model_deployment, self.region
@@ -471,6 +461,19 @@ class AquaDeploymentApp(AquaApp):
471
461
  )
472
462
  )
473
463
 
464
+ # log telemetry if MD is in active or failed state
465
+ deployment_id = model_deployment.id
466
+ state = model_deployment.lifecycle_state.upper()
467
+ if state in ["ACTIVE", "FAILED"]:
468
+ # tracks unique deployments that were listed in the user compartment
469
+ # we arbitrarily choose last 8 characters of OCID to identify MD in telemetry
470
+ self.telemetry.record_event_async(
471
+ category=f"aqua/deployment",
472
+ action="list",
473
+ detail=get_ocid_substring(deployment_id, key_len=8),
474
+ value=state,
475
+ )
476
+
474
477
  # tracks number of times deployment listing was called
475
478
  self.telemetry.record_event_async(category="aqua/deployment", action="list")
476
479
 
ads/aqua/evaluation.py CHANGED
@@ -38,6 +38,7 @@ from ads.aqua.utils import (
38
38
  JOB_INFRASTRUCTURE_TYPE_DEFAULT_NETWORKING,
39
39
  NB_SESSION_IDENTIFIER,
40
40
  UNKNOWN,
41
+ extract_id_and_name_from_tag,
41
42
  fire_and_forget,
42
43
  get_container_image,
43
44
  is_valid_ocid,
@@ -78,7 +79,7 @@ class EvaluationJobExitCode(Enum):
78
79
  SUCCESS = 0
79
80
  COMMON_ERROR = 1
80
81
 
81
- # Configuration-related issues
82
+ # Configuration-related issues 10-19
82
83
  INVALID_EVALUATION_CONFIG = 10
83
84
  EVALUATION_CONFIG_NOT_PROVIDED = 11
84
85
  INVALID_OUTPUT_DIR = 12
@@ -87,7 +88,7 @@ class EvaluationJobExitCode(Enum):
87
88
  INVALID_TARGET_EVALUATION_ID = 15
88
89
  INVALID_EVALUATION_CONFIG_VALIDATION = 16
89
90
 
90
- # Evaluation process issues
91
+ # Evaluation process issues 20-39
91
92
  OUTPUT_DIR_NOT_FOUND = 20
92
93
  INVALID_INPUT_DATASET = 21
93
94
  INPUT_DATA_NOT_FOUND = 22
@@ -100,6 +101,7 @@ class EvaluationJobExitCode(Enum):
100
101
  MODEL_INFERENCE_WRONG_RESPONSE_FORMAT = 29
101
102
  UNSUPPORTED_METRICS = 30
102
103
  METRIC_CALCULATION_FAILURE = 31
104
+ EVALUATION_MODEL_CATALOG_RECORD_CREATION_FAILED = 32
103
105
 
104
106
 
105
107
  EVALUATION_JOB_EXIT_CODE_MESSAGE = {
@@ -124,6 +126,11 @@ EVALUATION_JOB_EXIT_CODE_MESSAGE = {
124
126
  EvaluationJobExitCode.MODEL_INFERENCE_WRONG_RESPONSE_FORMAT.value: "Evaluation encountered unsupported, or unexpected model output, verify the target evaluation model is compatible and produces the correct format.",
125
127
  EvaluationJobExitCode.UNSUPPORTED_METRICS.value: "None of the provided metrics are supported by the framework.",
126
128
  EvaluationJobExitCode.METRIC_CALCULATION_FAILURE.value: "All attempted metric calculations were unsuccessful. Please review the metric configurations and input data.",
129
+ EvaluationJobExitCode.EVALUATION_MODEL_CATALOG_RECORD_CREATION_FAILED.value: (
130
+ "Failed to create a Model Catalog record for the evaluation. "
131
+ "This could be due to missing required permissions. "
132
+ "Please check the log for more information."
133
+ ),
127
134
  }
128
135
 
129
136
 
@@ -311,6 +318,8 @@ class CreateAquaEvaluationDetails(DataClassSerializable):
311
318
  The log id for the evaluation job infrastructure.
312
319
  metrics: (list, optional). Defaults to `None`.
313
320
  The metrics for the evaluation.
321
+ force_overwrite: (bool, optional). Defaults to `False`.
322
+ Whether to force overwrite the existing file in object storage.
314
323
  """
315
324
 
316
325
  evaluation_source_id: str
@@ -331,6 +340,7 @@ class CreateAquaEvaluationDetails(DataClassSerializable):
331
340
  log_group_id: Optional[str] = None
332
341
  log_id: Optional[str] = None
333
342
  metrics: Optional[List] = None
343
+ force_overwrite: Optional[bool] = False
334
344
 
335
345
 
336
346
  class AquaEvaluationApp(AquaApp):
@@ -434,12 +444,12 @@ class AquaEvaluationApp(AquaApp):
434
444
  src_uri=evaluation_dataset_path,
435
445
  dst_uri=dst_uri,
436
446
  auth=default_signer(),
437
- force_overwrite=False,
447
+ force_overwrite=create_aqua_evaluation_details.force_overwrite,
438
448
  )
439
449
  except FileExistsError:
440
450
  raise AquaFileExistsError(
441
451
  f"Dataset {dataset_file} already exists in {create_aqua_evaluation_details.report_path}. "
442
- "Please use a new dataset file name or report path."
452
+ "Please use a new dataset file name, report path or set `force_overwrite` as True."
443
453
  )
444
454
  logger.debug(
445
455
  f"Uploaded local file {evaluation_dataset_path} to object storage {dst_uri}."
@@ -673,11 +683,19 @@ class AquaEvaluationApp(AquaApp):
673
683
  ),
674
684
  )
675
685
 
686
+ # tracks shapes used in evaluation that were created for the given evaluation source
687
+ self.telemetry.record_event_async(
688
+ category="aqua/evaluation/create",
689
+ action="shape",
690
+ detail=create_aqua_evaluation_details.shape_name,
691
+ value=self._get_service_model_name(evaluation_source),
692
+ )
693
+
676
694
  # tracks unique evaluation that were created for the given evaluation source
677
695
  self.telemetry.record_event_async(
678
696
  category="aqua/evaluation",
679
697
  action="create",
680
- detail=evaluation_source.display_name,
698
+ detail=self._get_service_model_name(evaluation_source),
681
699
  )
682
700
 
683
701
  return AquaEvaluationSummary(
@@ -768,6 +786,34 @@ class AquaEvaluationApp(AquaApp):
768
786
 
769
787
  return runtime
770
788
 
789
+ @staticmethod
790
+ def _get_service_model_name(
791
+ source: Union[ModelDeployment, DataScienceModel]
792
+ ) -> str:
793
+ """Gets the service model name from source. If it's ModelDeployment, needs to check
794
+ if its model has been fine tuned or not.
795
+
796
+ Parameters
797
+ ----------
798
+ source: Union[ModelDeployment, DataScienceModel]
799
+ An instance of either ModelDeployment or DataScienceModel
800
+
801
+ Returns
802
+ -------
803
+ str:
804
+ The service model name of source.
805
+ """
806
+ if isinstance(source, ModelDeployment):
807
+ fine_tuned_model_tag = source.freeform_tags.get(
808
+ Tags.AQUA_FINE_TUNED_MODEL_TAG.value, UNKNOWN
809
+ )
810
+ if not fine_tuned_model_tag:
811
+ return source.freeform_tags.get(Tags.AQUA_MODEL_NAME_TAG.value)
812
+ else:
813
+ return extract_id_and_name_from_tag(fine_tuned_model_tag)[1]
814
+
815
+ return source.display_name
816
+
771
817
  @staticmethod
772
818
  def _get_evaluation_container(source_id: str) -> str:
773
819
  # todo: use the source, identify if it is a model or a deployment. If latter, then fetch the base model id
@@ -824,13 +870,13 @@ class AquaEvaluationApp(AquaApp):
824
870
  logger.info(f"Fetching evaluation: {eval_id} details ...")
825
871
 
826
872
  resource = utils.query_resource(eval_id)
827
- model_provenance = self.ds_client.get_model_provenance(eval_id).data
828
-
829
873
  if not resource:
830
874
  raise AquaRuntimeError(
831
875
  f"Failed to retrieve evalution {eval_id}."
832
876
  "Please check if the OCID is correct."
833
877
  )
878
+ model_provenance = self.ds_client.get_model_provenance(eval_id).data
879
+
834
880
  jobrun_id = model_provenance.training_id
835
881
  job_run_details = self._fetch_jobrun(
836
882
  resource, use_rqs=False, jobrun_id=jobrun_id
@@ -849,13 +895,17 @@ class AquaEvaluationApp(AquaApp):
849
895
  loggroup_id = ""
850
896
 
851
897
  loggroup_url = get_log_links(region=self.region, log_group_id=loggroup_id)
852
- log_url = get_log_links(
853
- region=self.region,
854
- log_group_id=loggroup_id,
855
- log_id=log_id,
856
- compartment_id=job_run_details.compartment_id,
857
- source_id=jobrun_id
858
- ) if job_run_details else ""
898
+ log_url = (
899
+ get_log_links(
900
+ region=self.region,
901
+ log_group_id=loggroup_id,
902
+ log_id=log_id,
903
+ compartment_id=job_run_details.compartment_id,
904
+ source_id=jobrun_id,
905
+ )
906
+ if job_run_details
907
+ else ""
908
+ )
859
909
 
860
910
  log_name = None
861
911
  loggroup_name = None
@@ -916,6 +966,7 @@ class AquaEvaluationApp(AquaApp):
916
966
  List[AquaEvaluationSummary]:
917
967
  The list of the `ads.aqua.evalution.AquaEvaluationSummary`.
918
968
  """
969
+ compartment_id = compartment_id or COMPARTMENT_OCID
919
970
  logger.info(f"Fetching evaluations from compartment {compartment_id}.")
920
971
  models = utils.query_resources(
921
972
  compartment_id=compartment_id,
@@ -931,7 +982,6 @@ class AquaEvaluationApp(AquaApp):
931
982
  evaluations = []
932
983
  async_tasks = []
933
984
  for model in models:
934
-
935
985
  if model.identifier in self._eval_cache.keys():
936
986
  logger.debug(f"Retrieving evaluation {model.identifier} from cache.")
937
987
  evaluations.append(self._eval_cache.get(model.identifier))
@@ -962,7 +1012,7 @@ class AquaEvaluationApp(AquaApp):
962
1012
  self._process_evaluation_summary(model=model, jobrun=jobrun)
963
1013
  )
964
1014
  except Exception as exc:
965
- logger.error(
1015
+ logger.debug(
966
1016
  f"Processing evaluation: {model.identifier} generated an exception: {exc}"
967
1017
  )
968
1018
  evaluations.append(
@@ -1007,7 +1057,7 @@ class AquaEvaluationApp(AquaApp):
1007
1057
  return True if response.status == 200 else False
1008
1058
  except oci.exceptions.ServiceError as ex:
1009
1059
  if ex.status == 404:
1010
- logger.info("Evaluation artifact not found.")
1060
+ logger.debug(f"Evaluation artifact not found for {model.identifier}.")
1011
1061
  return False
1012
1062
 
1013
1063
  @telemetry(entry_point="plugin=evaluation&action=get_status", name="aqua")
@@ -1025,14 +1075,14 @@ class AquaEvaluationApp(AquaApp):
1025
1075
  """
1026
1076
  eval = utils.query_resource(eval_id)
1027
1077
 
1028
- # TODO: add job_run_id as input param to skip the query below
1029
- model_provenance = self.ds_client.get_model_provenance(eval_id).data
1030
-
1031
1078
  if not eval:
1032
1079
  raise AquaRuntimeError(
1033
1080
  f"Failed to retrieve evalution {eval_id}."
1034
1081
  "Please check if the OCID is correct."
1035
1082
  )
1083
+
1084
+ model_provenance = self.ds_client.get_model_provenance(eval_id).data
1085
+
1036
1086
  jobrun_id = model_provenance.training_id
1037
1087
  job_run_details = self._fetch_jobrun(eval, use_rqs=False, jobrun_id=jobrun_id)
1038
1088
 
@@ -1049,13 +1099,17 @@ class AquaEvaluationApp(AquaApp):
1049
1099
  loggroup_id = ""
1050
1100
 
1051
1101
  loggroup_url = get_log_links(region=self.region, log_group_id=loggroup_id)
1052
- log_url = get_log_links(
1053
- region=self.region,
1054
- log_group_id=loggroup_id,
1055
- log_id=log_id,
1056
- compartment_id=job_run_details.compartment_id,
1057
- source_id=jobrun_id
1058
- ) if job_run_details else ""
1102
+ log_url = (
1103
+ get_log_links(
1104
+ region=self.region,
1105
+ log_group_id=loggroup_id,
1106
+ log_id=log_id,
1107
+ compartment_id=job_run_details.compartment_id,
1108
+ source_id=jobrun_id,
1109
+ )
1110
+ if job_run_details
1111
+ else ""
1112
+ )
1059
1113
 
1060
1114
  return dict(
1061
1115
  id=eval_id,
@@ -1100,6 +1154,19 @@ class AquaEvaluationApp(AquaApp):
1100
1154
  ),
1101
1155
  "args": {},
1102
1156
  },
1157
+ {
1158
+ "use_case": ["text_generation"],
1159
+ "key": "bleu",
1160
+ "name": "bleu",
1161
+ "description": (
1162
+ "BLEU (Bilingual Evaluation Understudy) is an algorithm for evaluating the "
1163
+ "quality of text which has been machine-translated from one natural language to another. "
1164
+ "Quality is considered to be the correspondence between a machine's output and that of a "
1165
+ "human: 'the closer a machine translation is to a professional human translation, "
1166
+ "the better it is'."
1167
+ ),
1168
+ "args": {},
1169
+ },
1103
1170
  ]
1104
1171
 
1105
1172
  @telemetry(entry_point="plugin=evaluation&action=load_metrics", name="aqua")
@@ -1265,7 +1332,10 @@ class AquaEvaluationApp(AquaApp):
1265
1332
  raise AquaRuntimeError(
1266
1333
  f"Failed to get evaluation details for model {eval_id}"
1267
1334
  )
1268
- job_run_id = model.provenance_metadata.training_id
1335
+
1336
+ job_run_id = (
1337
+ model.provenance_metadata.training_id if model.provenance_metadata else None
1338
+ )
1269
1339
  if not job_run_id:
1270
1340
  raise AquaMissingKeyError(
1271
1341
  "Model provenance is missing job run training_id key"
@@ -1328,7 +1398,7 @@ class AquaEvaluationApp(AquaApp):
1328
1398
  job_id = model.custom_metadata_list.get(
1329
1399
  EvaluationCustomMetadata.EVALUATION_JOB_ID.value
1330
1400
  ).value
1331
- except ValueError:
1401
+ except Exception:
1332
1402
  raise AquaMissingKeyError(
1333
1403
  f"Custom metadata is missing {EvaluationCustomMetadata.EVALUATION_JOB_ID.value} key"
1334
1404
  )
@@ -1360,7 +1430,7 @@ class AquaEvaluationApp(AquaApp):
1360
1430
  )
1361
1431
 
1362
1432
  def load_evaluation_config(self, eval_id):
1363
- # TODO
1433
+ """Loads evaluation config."""
1364
1434
  return {
1365
1435
  "model_params": {
1366
1436
  "max_tokens": 500,
@@ -1533,25 +1603,12 @@ class AquaEvaluationApp(AquaApp):
1533
1603
  ),
1534
1604
  )
1535
1605
  except Exception as e:
1536
- logger.error(
1537
- f"Failed to construct AquaResourceIdentifier from given id=`{id}`, and name=`{name}`, {str(e)}"
1606
+ logger.debug(
1607
+ f"Failed to construct AquaResourceIdentifier from given id=`{id}`, and name=`{name}`. "
1608
+ f"DEBUG INFO: {str(e)}"
1538
1609
  )
1539
1610
  return AquaResourceIdentifier()
1540
1611
 
1541
- def _get_jobrun(
1542
- self, model: oci.resource_search.models.ResourceSummary, mapping: dict = {}
1543
- ) -> Union[
1544
- oci.resource_search.models.ResourceSummary, oci.data_science.models.JobRun
1545
- ]:
1546
- jobrun_id = self._get_attribute_from_model_metadata(
1547
- model, EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID.value
1548
- )
1549
- job_run = mapping.get(jobrun_id)
1550
-
1551
- if not job_run:
1552
- job_run = self._fetch_jobrun(model, use_rqs=True, jobrun_id=jobrun_id)
1553
- return job_run
1554
-
1555
1612
  def _fetch_jobrun(
1556
1613
  self,
1557
1614
  resource: oci.resource_search.models.ResourceSummary,
@@ -1594,7 +1651,7 @@ class AquaEvaluationApp(AquaApp):
1594
1651
  )
1595
1652
  if not params.get(EvaluationConfig.PARAMS):
1596
1653
  raise AquaMissingKeyError(
1597
- "model parameters have not been saved in correct format in model taxonomy.",
1654
+ "model parameters have not been saved in correct format in model taxonomy. ",
1598
1655
  service_payload={"params": params},
1599
1656
  )
1600
1657
  # TODO: validate the format of parameters.
@@ -1626,7 +1683,7 @@ class AquaEvaluationApp(AquaApp):
1626
1683
 
1627
1684
  except Exception as e:
1628
1685
  logger.debug(
1629
- f"Failed to get job details from job_run_details: {job_run_details}"
1686
+ f"Failed to get job details from job_run_details: {job_run_details} "
1630
1687
  f"DEBUG INFO:{str(e)}"
1631
1688
  )
1632
1689
  return AquaResourceIdentifier()
@@ -1728,7 +1785,7 @@ class AquaEvaluationApp(AquaApp):
1728
1785
  Examples
1729
1786
  --------
1730
1787
  >>> _extract_job_lifecycle_details("Job run artifact execution failed with exit code 16")
1731
- 'The evaluation configuration is invalid due to content validation errors.'
1788
+ 'Validation errors in the evaluation config. Exit code: 16.'
1732
1789
 
1733
1790
  >>> _extract_job_lifecycle_details("Job completed successfully.")
1734
1791
  'Job completed successfully.'