oracle-ads 2.11.14__py3-none-any.whl → 2.11.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. ads/aqua/common/utils.py +77 -20
  2. ads/aqua/constants.py +30 -17
  3. ads/aqua/evaluation/evaluation.py +118 -107
  4. ads/aqua/extension/evaluation_handler.py +4 -7
  5. ads/aqua/extension/evaluation_ws_msg_handler.py +0 -4
  6. ads/aqua/model/entities.py +6 -8
  7. ads/aqua/modeldeployment/constants.py +0 -16
  8. ads/aqua/modeldeployment/deployment.py +45 -67
  9. ads/opctl/operator/common/operator_config.py +1 -0
  10. ads/opctl/operator/lowcode/anomaly/README.md +3 -3
  11. ads/opctl/operator/lowcode/anomaly/__main__.py +5 -6
  12. ads/opctl/operator/lowcode/anomaly/const.py +8 -0
  13. ads/opctl/operator/lowcode/anomaly/model/anomaly_dataset.py +6 -2
  14. ads/opctl/operator/lowcode/anomaly/model/base_model.py +29 -20
  15. ads/opctl/operator/lowcode/anomaly/model/factory.py +41 -13
  16. ads/opctl/operator/lowcode/anomaly/model/isolationforest.py +79 -0
  17. ads/opctl/operator/lowcode/anomaly/model/oneclasssvm.py +79 -0
  18. ads/opctl/operator/lowcode/anomaly/schema.yaml +12 -2
  19. ads/opctl/operator/lowcode/anomaly/utils.py +16 -13
  20. ads/opctl/operator/lowcode/common/data.py +2 -1
  21. ads/opctl/operator/lowcode/common/transformations.py +37 -9
  22. ads/opctl/operator/lowcode/common/utils.py +32 -10
  23. ads/opctl/operator/lowcode/forecast/model/ml_forecast.py +14 -18
  24. ads/opctl/operator/lowcode/forecast/model_evaluator.py +4 -2
  25. ads/opctl/operator/lowcode/forecast/schema.yaml +9 -0
  26. ads/opctl/operator/lowcode/recommender/MLoperator +16 -0
  27. ads/opctl/operator/lowcode/recommender/README.md +206 -0
  28. ads/opctl/operator/lowcode/recommender/__init__.py +5 -0
  29. ads/opctl/operator/lowcode/recommender/__main__.py +82 -0
  30. ads/opctl/operator/lowcode/recommender/cmd.py +33 -0
  31. ads/opctl/operator/lowcode/recommender/constant.py +25 -0
  32. ads/opctl/operator/lowcode/recommender/environment.yaml +11 -0
  33. ads/opctl/operator/lowcode/recommender/model/base_model.py +198 -0
  34. ads/opctl/operator/lowcode/recommender/model/factory.py +58 -0
  35. ads/opctl/operator/lowcode/recommender/model/recommender_dataset.py +25 -0
  36. ads/opctl/operator/lowcode/recommender/model/svd.py +88 -0
  37. ads/opctl/operator/lowcode/recommender/operator_config.py +81 -0
  38. ads/opctl/operator/lowcode/recommender/schema.yaml +265 -0
  39. ads/opctl/operator/lowcode/recommender/utils.py +13 -0
  40. {oracle_ads-2.11.14.dist-info → oracle_ads-2.11.15.dist-info}/METADATA +6 -1
  41. {oracle_ads-2.11.14.dist-info → oracle_ads-2.11.15.dist-info}/RECORD +44 -28
  42. {oracle_ads-2.11.14.dist-info → oracle_ads-2.11.15.dist-info}/LICENSE.txt +0 -0
  43. {oracle_ads-2.11.14.dist-info → oracle_ads-2.11.15.dist-info}/WHEEL +0 -0
  44. {oracle_ads-2.11.14.dist-info → oracle_ads-2.11.15.dist-info}/entry_points.txt +0 -0
@@ -1,5 +1,4 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
2
  # Copyright (c) 2024 Oracle and/or its affiliates.
4
3
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
4
  import base64
@@ -47,17 +46,39 @@ from ads.aqua.common.utils import (
47
46
  upload_local_to_os,
48
47
  )
49
48
  from ads.aqua.constants import (
49
+ CONSOLE_LINK_RESOURCE_TYPE_MAPPING,
50
+ EVALUATION_REPORT,
51
+ EVALUATION_REPORT_JSON,
52
+ EVALUATION_REPORT_MD,
50
53
  JOB_INFRASTRUCTURE_TYPE_DEFAULT_NETWORKING,
54
+ LIFECYCLE_DETAILS_MISSING_JOBRUN,
51
55
  NB_SESSION_IDENTIFIER,
52
56
  UNKNOWN,
53
- CONSOLE_LINK_RESOURCE_TYPE_MAPPING,
54
57
  )
55
- from ads.aqua.evaluation.constants import *
56
- from ads.aqua.evaluation.entities import *
57
- from ads.aqua.evaluation.errors import *
58
+ from ads.aqua.evaluation.constants import (
59
+ EVAL_TERMINATION_STATE,
60
+ EvaluationConfig,
61
+ EvaluationCustomMetadata,
62
+ EvaluationMetricResult,
63
+ EvaluationReportJson,
64
+ )
65
+ from ads.aqua.evaluation.entities import (
66
+ AquaEvalMetric,
67
+ AquaEvalMetrics,
68
+ AquaEvalMetricSummary,
69
+ AquaEvalParams,
70
+ AquaEvalReport,
71
+ AquaEvaluationCommands,
72
+ AquaEvaluationDetail,
73
+ AquaEvaluationSummary,
74
+ AquaResourceIdentifier,
75
+ CreateAquaEvaluationDetails,
76
+ ModelParams,
77
+ )
78
+ from ads.aqua.evaluation.errors import EVALUATION_JOB_EXIT_CODE_MESSAGE
58
79
  from ads.common.auth import default_signer
59
80
  from ads.common.object_storage_details import ObjectStorageDetails
60
- from ads.common.utils import get_console_link, get_files, get_log_links, upload_to_os
81
+ from ads.common.utils import get_console_link, get_files, get_log_links
61
82
  from ads.config import (
62
83
  AQUA_JOB_SUBNET_ID,
63
84
  COMPARTMENT_OCID,
@@ -134,11 +155,11 @@ class AquaEvaluationApp(AquaApp):
134
155
  if not create_aqua_evaluation_details:
135
156
  try:
136
157
  create_aqua_evaluation_details = CreateAquaEvaluationDetails(**kwargs)
137
- except:
158
+ except Exception as ex:
138
159
  raise AquaValueError(
139
160
  "Invalid create evaluation parameters. Allowable parameters are: "
140
161
  f"{', '.join(list(asdict(CreateAquaEvaluationDetails).keys()))}."
141
- )
162
+ ) from ex
142
163
 
143
164
  if not is_valid_ocid(create_aqua_evaluation_details.evaluation_source_id):
144
165
  raise AquaValueError(
@@ -186,11 +207,11 @@ class AquaEvaluationApp(AquaApp):
186
207
  auth=default_signer(),
187
208
  force_overwrite=create_aqua_evaluation_details.force_overwrite,
188
209
  )
189
- except FileExistsError:
210
+ except FileExistsError as err:
190
211
  raise AquaFileExistsError(
191
212
  f"Dataset {dataset_file} already exists in {create_aqua_evaluation_details.report_path}. "
192
213
  "Please use a new dataset file name, report path or set `force_overwrite` as True."
193
- )
214
+ ) from err
194
215
  logger.debug(
195
216
  f"Uploaded local file {evaluation_dataset_path} to object storage {dst_uri}."
196
217
  )
@@ -210,11 +231,11 @@ class AquaEvaluationApp(AquaApp):
210
231
  report_path=create_aqua_evaluation_details.report_path,
211
232
  **create_aqua_evaluation_details.model_parameters,
212
233
  )
213
- except:
234
+ except Exception as ex:
214
235
  raise AquaValueError(
215
236
  "Invalid model parameters. Model parameters should "
216
237
  f"be a dictionary with keys: {', '.join(list(ModelParams.__annotations__.keys()))}."
217
- )
238
+ ) from ex
218
239
 
219
240
  target_compartment = (
220
241
  create_aqua_evaluation_details.compartment_id or COMPARTMENT_OCID
@@ -244,7 +265,7 @@ class AquaEvaluationApp(AquaApp):
244
265
  raise AquaValueError(
245
266
  f"Invalid experiment name. Please provide an experiment with `{Tags.AQUA_EVALUATION}` in tags."
246
267
  )
247
- except:
268
+ except Exception:
248
269
  logger.debug(
249
270
  f"Model version set {experiment_model_version_set_name} doesn't exist. "
250
271
  "Creating new model version set."
@@ -295,11 +316,7 @@ class AquaEvaluationApp(AquaApp):
295
316
  evaluation_model_taxonomy_metadata = ModelTaxonomyMetadata()
296
317
  evaluation_model_taxonomy_metadata[
297
318
  MetadataTaxonomyKeys.HYPERPARAMETERS
298
- ].value = {
299
- "model_params": {
300
- key: value for key, value in asdict(evaluation_model_parameters).items()
301
- }
302
- }
319
+ ].value = {"model_params": dict(asdict(evaluation_model_parameters))}
303
320
 
304
321
  evaluation_model = (
305
322
  DataScienceModel()
@@ -350,14 +367,13 @@ class AquaEvaluationApp(AquaApp):
350
367
  )
351
368
  if AQUA_JOB_SUBNET_ID:
352
369
  evaluation_job.infrastructure.with_subnet_id(AQUA_JOB_SUBNET_ID)
353
- else:
354
- if NB_SESSION_IDENTIFIER in os.environ:
355
- # apply default subnet id for job by setting ME_STANDALONE
356
- # so as to avoid using the notebook session's networking when running on it
357
- # https://accelerated-data-science.readthedocs.io/en/latest/user_guide/jobs/infra_and_runtime.html#networking
358
- evaluation_job.infrastructure.with_job_infrastructure_type(
359
- JOB_INFRASTRUCTURE_TYPE_DEFAULT_NETWORKING
360
- )
370
+ elif NB_SESSION_IDENTIFIER in os.environ:
371
+ # apply default subnet id for job by setting ME_STANDALONE
372
+ # so as to avoid using the notebook session's networking when running on it
373
+ # https://accelerated-data-science.readthedocs.io/en/latest/user_guide/jobs/infra_and_runtime.html#networking
374
+ evaluation_job.infrastructure.with_job_infrastructure_type(
375
+ JOB_INFRASTRUCTURE_TYPE_DEFAULT_NETWORKING
376
+ )
361
377
 
362
378
  container_image = self._get_evaluation_container(
363
379
  create_aqua_evaluation_details.evaluation_source_id
@@ -375,9 +391,7 @@ class AquaEvaluationApp(AquaApp):
375
391
  model_parameters=create_aqua_evaluation_details.model_parameters,
376
392
  metrics=create_aqua_evaluation_details.metrics,
377
393
  )
378
- ).create(
379
- **kwargs
380
- ) ## TODO: decide what parameters will be needed
394
+ ).create(**kwargs) ## TODO: decide what parameters will be needed
381
395
  logger.debug(
382
396
  f"Successfully created evaluation job {evaluation_job.id} for {create_aqua_evaluation_details.evaluation_source_id}."
383
397
  )
@@ -479,12 +493,12 @@ class AquaEvaluationApp(AquaApp):
479
493
  region=self.region,
480
494
  ),
481
495
  ),
482
- tags=dict(
483
- aqua_evaluation=Tags.AQUA_EVALUATION,
484
- evaluation_job_id=evaluation_job.id,
485
- evaluation_source=create_aqua_evaluation_details.evaluation_source_id,
486
- evaluation_experiment_id=experiment_model_version_set_id,
487
- ),
496
+ tags={
497
+ "aqua_evaluation": Tags.AQUA_EVALUATION,
498
+ "evaluation_job_id": evaluation_job.id,
499
+ "evaluation_source": create_aqua_evaluation_details.evaluation_source_id,
500
+ "evaluation_experiment_id": experiment_model_version_set_id,
501
+ },
488
502
  parameters=AquaEvalParams(),
489
503
  )
490
504
 
@@ -526,7 +540,7 @@ class AquaEvaluationApp(AquaApp):
526
540
 
527
541
  @staticmethod
528
542
  def _get_service_model_name(
529
- source: Union[ModelDeployment, DataScienceModel]
543
+ source: Union[ModelDeployment, DataScienceModel],
530
544
  ) -> str:
531
545
  """Gets the service model name from source. If it's ModelDeployment, needs to check
532
546
  if its model has been fine tuned or not.
@@ -652,21 +666,21 @@ class AquaEvaluationApp(AquaApp):
652
666
  try:
653
667
  log = utils.query_resource(log_id, return_all=False)
654
668
  log_name = log.display_name if log else ""
655
- except:
669
+ except Exception:
656
670
  pass
657
671
 
658
672
  if loggroup_id:
659
673
  try:
660
674
  loggroup = utils.query_resource(loggroup_id, return_all=False)
661
675
  loggroup_name = loggroup.display_name if loggroup else ""
662
- except:
676
+ except Exception:
663
677
  pass
664
678
 
665
679
  try:
666
680
  introspection = json.loads(
667
681
  self._get_attribute_from_model_metadata(resource, "ArtifactTestResults")
668
682
  )
669
- except:
683
+ except Exception:
670
684
  introspection = {}
671
685
 
672
686
  summary = AquaEvaluationDetail(
@@ -685,19 +699,13 @@ class AquaEvaluationApp(AquaApp):
685
699
  return summary
686
700
 
687
701
  @telemetry(entry_point="plugin=evaluation&action=list", name="aqua")
688
- def list(
689
- self, compartment_id: str = None, project_id: str = None, **kwargs
690
- ) -> List[AquaEvaluationSummary]:
702
+ def list(self, compartment_id: str = None) -> List[AquaEvaluationSummary]:
691
703
  """List Aqua evaluations in a given compartment and under certain project.
692
704
 
693
705
  Parameters
694
706
  ----------
695
707
  compartment_id: (str, optional). Defaults to `None`.
696
708
  The compartment OCID.
697
- project_id: (str, optional). Defaults to `None`.
698
- The project OCID.
699
- kwargs
700
- Additional keyword arguments.
701
709
 
702
710
  Returns
703
711
  -------
@@ -718,7 +726,7 @@ class AquaEvaluationApp(AquaApp):
718
726
  evaluations = []
719
727
  async_tasks = []
720
728
  for model in models:
721
- if model.identifier in self._eval_cache.keys():
729
+ if model.identifier in self._eval_cache:
722
730
  logger.debug(f"Retrieving evaluation {model.identifier} from cache.")
723
731
  evaluations.append(self._eval_cache.get(model.identifier))
724
732
 
@@ -790,7 +798,7 @@ class AquaEvaluationApp(AquaApp):
790
798
  """Checks if the evaluation artifact exists."""
791
799
  try:
792
800
  response = self.ds_client.head_model_artifact(model_id=model.identifier)
793
- return True if response.status == 200 else False
801
+ return response.status == 200
794
802
  except oci.exceptions.ServiceError as ex:
795
803
  if ex.status == 404:
796
804
  logger.debug(f"Evaluation artifact not found for {model.identifier}.")
@@ -846,18 +854,17 @@ class AquaEvaluationApp(AquaApp):
846
854
  if job_run_details
847
855
  else ""
848
856
  )
849
-
850
- return dict(
851
- id=eval_id,
857
+ return {
858
+ "id": eval_id,
852
859
  **self._get_status(
853
860
  model=eval,
854
861
  jobrun=job_run_details,
855
862
  ),
856
- log_id=log_id,
857
- log_url=log_url,
858
- loggroup_id=loggroup_id,
859
- loggroup_url=loggroup_url,
860
- )
863
+ "log_id": log_id,
864
+ "log_url": log_url,
865
+ "loggroup_id": loggroup_id,
866
+ "loggroup_url": loggroup_url,
867
+ }
861
868
 
862
869
  def get_supported_metrics(self) -> dict:
863
870
  """Gets a list of supported metrics for evaluation."""
@@ -919,8 +926,8 @@ class AquaEvaluationApp(AquaApp):
919
926
  AquaEvalMetrics:
920
927
  An instance of AquaEvalMetrics.
921
928
  """
922
- if eval_id in self._metrics_cache.keys():
923
- logger.info(f"Returning metrics from cache.")
929
+ if eval_id in self._metrics_cache:
930
+ logger.info("Returning metrics from cache.")
924
931
  eval_metrics = self._metrics_cache.get(eval_id)
925
932
  if len(eval_metrics.report) > 0:
926
933
  return eval_metrics
@@ -934,14 +941,14 @@ class AquaEvaluationApp(AquaApp):
934
941
 
935
942
  files_in_artifact = get_files(temp_dir)
936
943
  md_report_content = self._read_from_artifact(
937
- temp_dir, files_in_artifact, utils.EVALUATION_REPORT_MD
944
+ temp_dir, files_in_artifact, EVALUATION_REPORT_MD
938
945
  )
939
946
 
940
947
  # json report not availiable for failed evaluation
941
948
  try:
942
949
  json_report = json.loads(
943
950
  self._read_from_artifact(
944
- temp_dir, files_in_artifact, utils.EVALUATION_REPORT_JSON
951
+ temp_dir, files_in_artifact, EVALUATION_REPORT_JSON
945
952
  )
946
953
  )
947
954
  except Exception as e:
@@ -1028,8 +1035,8 @@ class AquaEvaluationApp(AquaApp):
1028
1035
  AquaFileNotFoundError:
1029
1036
  When missing `report.html` in evaluation artifact.
1030
1037
  """
1031
- if eval_id in self._report_cache.keys():
1032
- logger.info(f"Returning report from cache.")
1038
+ if eval_id in self._report_cache:
1039
+ logger.info("Returning report from cache.")
1033
1040
  report = self._report_cache.get(eval_id)
1034
1041
  if report.content:
1035
1042
  return report
@@ -1040,7 +1047,7 @@ class AquaEvaluationApp(AquaApp):
1040
1047
  auth=self._auth,
1041
1048
  )
1042
1049
  content = self._read_from_artifact(
1043
- temp_dir, get_files(temp_dir), utils.EVALUATION_REPORT
1050
+ temp_dir, get_files(temp_dir), EVALUATION_REPORT
1044
1051
  )
1045
1052
 
1046
1053
  report = AquaEvalReport(
@@ -1084,7 +1091,7 @@ class AquaEvaluationApp(AquaApp):
1084
1091
  "Model provenance is missing job run training_id key"
1085
1092
  )
1086
1093
 
1087
- status = dict(id=eval_id, status=UNKNOWN, time_accepted="")
1094
+ status = {"id": eval_id, "lifecycle_state": UNKNOWN, "time_accepted": UNKNOWN}
1088
1095
  run = DataScienceJobRun.from_ocid(job_run_id)
1089
1096
  if run.lifecycle_state in [
1090
1097
  DataScienceJobRun.LIFECYCLE_STATE_ACCEPTED,
@@ -1092,11 +1099,11 @@ class AquaEvaluationApp(AquaApp):
1092
1099
  DataScienceJobRun.LIFECYCLE_STATE_NEEDS_ATTENTION,
1093
1100
  ]:
1094
1101
  self._cancel_job_run(run, model)
1095
- status = dict(
1096
- id=eval_id,
1097
- lifecycle_state="CANCELING",
1098
- time_accepted=datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f%z"),
1099
- )
1102
+ status = {
1103
+ "id": eval_id,
1104
+ "lifecycle_state": "CANCELING",
1105
+ "time_accepted": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f%z"),
1106
+ }
1100
1107
  return status
1101
1108
 
1102
1109
  @staticmethod
@@ -1142,10 +1149,10 @@ class AquaEvaluationApp(AquaApp):
1142
1149
  job_id = model.custom_metadata_list.get(
1143
1150
  EvaluationCustomMetadata.EVALUATION_JOB_ID
1144
1151
  ).value
1145
- except Exception:
1152
+ except Exception as ex:
1146
1153
  raise AquaMissingKeyError(
1147
1154
  f"Custom metadata is missing {EvaluationCustomMetadata.EVALUATION_JOB_ID} key"
1148
- )
1155
+ ) from ex
1149
1156
 
1150
1157
  job = DataScienceJob.from_id(job_id)
1151
1158
 
@@ -1163,11 +1170,11 @@ class AquaEvaluationApp(AquaApp):
1163
1170
  self._eval_cache.pop(key=eval_id, default=None)
1164
1171
  self._deletion_cache.__setitem__(key=eval_id, value="")
1165
1172
 
1166
- status = dict(
1167
- id=eval_id,
1168
- lifecycle_state=jobrun.lifecycle_state if jobrun else "DELETING",
1169
- time_accepted=datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f%z"),
1170
- )
1173
+ status = {
1174
+ "id": eval_id,
1175
+ "lifecycle_state": jobrun.lifecycle_state if jobrun else "DELETING",
1176
+ "time_accepted": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f%z"),
1177
+ }
1171
1178
  return status
1172
1179
 
1173
1180
  @staticmethod
@@ -1236,7 +1243,7 @@ class AquaEvaluationApp(AquaApp):
1236
1243
  model.additional_details.get(RqsAdditionalDetails.METADATA),
1237
1244
  target_attribute,
1238
1245
  )
1239
- except:
1246
+ except Exception:
1240
1247
  logger.debug(
1241
1248
  f"Missing `{target_attribute}` in custom metadata of the evaluation."
1242
1249
  f"Evaluation id: {model.identifier} "
@@ -1254,7 +1261,7 @@ class AquaEvaluationApp(AquaApp):
1254
1261
  def _get_source(
1255
1262
  self,
1256
1263
  evaluation: oci.resource_search.models.ResourceSummary,
1257
- resources_mapping: dict = {},
1264
+ resources_mapping: dict = None,
1258
1265
  ) -> tuple:
1259
1266
  """Returns ocid and name of the model has been evaluated."""
1260
1267
  source_id = self._get_attribute_from_model_metadata(
@@ -1263,14 +1270,16 @@ class AquaEvaluationApp(AquaApp):
1263
1270
  )
1264
1271
 
1265
1272
  try:
1266
- source = resources_mapping.get(source_id)
1267
- source_name = (
1268
- source.display_name
1269
- if source
1270
- else self._get_attribute_from_model_metadata(
1271
- evaluation, EvaluationCustomMetadata.EVALUATION_SOURCE_NAME
1273
+ source_name = None
1274
+ if resources_mapping:
1275
+ source = resources_mapping.get(source_id)
1276
+ source_name = (
1277
+ source.display_name
1278
+ if source
1279
+ else self._get_attribute_from_model_metadata(
1280
+ evaluation, EvaluationCustomMetadata.EVALUATION_SOURCE_NAME
1281
+ )
1272
1282
  )
1273
- )
1274
1283
 
1275
1284
  # try to resolve source_name from source id
1276
1285
  if source_id and not source_name:
@@ -1286,13 +1295,13 @@ class AquaEvaluationApp(AquaApp):
1286
1295
  raise AquaRuntimeError(
1287
1296
  f"Not supported source type: {resource_type}"
1288
1297
  )
1289
- except Exception as e:
1298
+ except Exception:
1290
1299
  logger.debug(
1291
1300
  f"Failed to retrieve source information for evaluation {evaluation.identifier}."
1292
1301
  )
1293
1302
  source_name = ""
1294
1303
 
1295
- return (source_id, source_name)
1304
+ return source_id, source_name
1296
1305
 
1297
1306
  def _get_experiment_info(
1298
1307
  self, model: oci.resource_search.models.ResourceSummary
@@ -1306,7 +1315,7 @@ class AquaEvaluationApp(AquaApp):
1306
1315
  def _process(
1307
1316
  self,
1308
1317
  model: oci.resource_search.models.ResourceSummary,
1309
- resources_mapping: dict = {},
1318
+ resources_mapping: dict = None,
1310
1319
  ) -> dict:
1311
1320
  """Constructs AquaEvaluationSummary from `oci.resource_search.models.ResourceSummary`."""
1312
1321
 
@@ -1320,25 +1329,27 @@ class AquaEvaluationApp(AquaApp):
1320
1329
  ocid=model_id,
1321
1330
  region=self.region,
1322
1331
  )
1323
- source_model_id, source_model_name = self._get_source(model, resources_mapping)
1332
+ source_model_id, source_model_name = self._get_source(
1333
+ model, resources_mapping if resources_mapping else {}
1334
+ )
1324
1335
  experiment_id, experiment_name = self._get_experiment_info(model)
1325
1336
  parameters = self._fetch_runtime_params(model)
1326
1337
 
1327
- return dict(
1328
- id=model_id,
1329
- name=model.display_name,
1330
- console_url=console_url,
1331
- time_created=model.time_created,
1332
- tags=tags,
1333
- experiment=self._build_resource_identifier(
1338
+ return {
1339
+ "id": model_id,
1340
+ "name": model.display_name,
1341
+ "console_url": console_url,
1342
+ "time_created": model.time_created,
1343
+ "tags": tags,
1344
+ "experiment": self._build_resource_identifier(
1334
1345
  id=experiment_id,
1335
1346
  name=experiment_name,
1336
1347
  ),
1337
- source=self._build_resource_identifier(
1348
+ "source": self._build_resource_identifier(
1338
1349
  id=source_model_id, name=source_model_name
1339
1350
  ),
1340
- parameters=parameters,
1341
- )
1351
+ "parameters": parameters,
1352
+ }
1342
1353
 
1343
1354
  def _build_resource_identifier(
1344
1355
  self, id: str = None, name: str = None
@@ -1465,7 +1476,7 @@ class AquaEvaluationApp(AquaApp):
1465
1476
  job_run_status = jobrun.lifecycle_state
1466
1477
 
1467
1478
  if jobrun is None:
1468
- if model.identifier in self._deletion_cache.keys():
1479
+ if model.identifier in self._deletion_cache:
1469
1480
  job_run_status = JobRun.LIFECYCLE_STATE_DELETED
1470
1481
 
1471
1482
  elif self._get_attribute_from_model_metadata(
@@ -1484,20 +1495,20 @@ class AquaEvaluationApp(AquaApp):
1484
1495
 
1485
1496
  try:
1486
1497
  lifecycle_details = (
1487
- utils.LIFECYCLE_DETAILS_MISSING_JOBRUN
1498
+ LIFECYCLE_DETAILS_MISSING_JOBRUN
1488
1499
  if not jobrun
1489
1500
  else self._extract_job_lifecycle_details(jobrun.lifecycle_details)
1490
1501
  )
1491
- except:
1502
+ except Exception:
1492
1503
  # ResourceSummary does not have lifecycle_details attr
1493
1504
  lifecycle_details = ""
1494
1505
 
1495
- return dict(
1496
- lifecycle_state=(
1506
+ return {
1507
+ "lifecycle_state": (
1497
1508
  lifecycle_state if isinstance(lifecycle_state, str) else lifecycle_state
1498
1509
  ),
1499
- lifecycle_details=lifecycle_details,
1500
- )
1510
+ "lifecycle_details": lifecycle_details,
1511
+ }
1501
1512
 
1502
1513
  def _prefetch_resources(self, compartment_id) -> dict:
1503
1514
  """Fetches all AQUA resources."""
@@ -1554,7 +1565,7 @@ class AquaEvaluationApp(AquaApp):
1554
1565
  exit_code_message = EVALUATION_JOB_EXIT_CODE_MESSAGE.get(exit_code)
1555
1566
  if exit_code_message:
1556
1567
  message = f"{exit_code_message} Exit code: {exit_code}."
1557
- except:
1568
+ except Exception:
1558
1569
  pass
1559
1570
 
1560
1571
  return message
@@ -1,5 +1,4 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
2
  # Copyright (c) 2024 Oracle and/or its affiliates.
4
3
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
4
 
@@ -10,8 +9,8 @@ from tornado.web import HTTPError
10
9
  from ads.aqua.common.decorator import handle_exceptions
11
10
  from ads.aqua.evaluation import AquaEvaluationApp
12
11
  from ads.aqua.evaluation.entities import CreateAquaEvaluationDetails
13
- from ads.aqua.extension.errors import Errors
14
12
  from ads.aqua.extension.base_handler import AquaAPIhandler
13
+ from ads.aqua.extension.errors import Errors
15
14
  from ads.aqua.extension.utils import validate_function_parameters
16
15
  from ads.config import COMPARTMENT_OCID
17
16
 
@@ -41,8 +40,8 @@ class AquaEvaluationHandler(AquaAPIhandler):
41
40
  """
42
41
  try:
43
42
  input_data = self.get_json_body()
44
- except Exception:
45
- raise HTTPError(400, Errors.INVALID_INPUT_DATA_FORMAT)
43
+ except Exception as ex:
44
+ raise HTTPError(400, Errors.INVALID_INPUT_DATA_FORMAT) from ex
46
45
 
47
46
  if not input_data:
48
47
  raise HTTPError(400, Errors.NO_INPUT_DATA)
@@ -77,9 +76,7 @@ class AquaEvaluationHandler(AquaAPIhandler):
77
76
  def list(self):
78
77
  """List Aqua models."""
79
78
  compartment_id = self.get_argument("compartment_id", default=COMPARTMENT_OCID)
80
- # project_id is no needed.
81
- project_id = self.get_argument("project_id", default=None)
82
- return self.finish(AquaEvaluationApp().list(compartment_id, project_id))
79
+ return self.finish(AquaEvaluationApp().list(compartment_id))
83
80
 
84
81
  def get_default_metrics(self):
85
82
  """Lists supported metrics for evaluation."""
@@ -1,13 +1,10 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*--
3
2
 
4
3
  # Copyright (c) 2024 Oracle and/or its affiliates.
5
4
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
6
5
 
7
6
  from typing import List, Union
8
7
 
9
- from tornado.web import HTTPError
10
-
11
8
  from ads.aqua.common.decorator import handle_exceptions
12
9
  from ads.aqua.evaluation import AquaEvaluationApp
13
10
  from ads.aqua.extension.aqua_ws_msg_handler import AquaWSMsgHandler
@@ -33,7 +30,6 @@ class AquaEvaluationWSMsgHandler(AquaWSMsgHandler):
33
30
 
34
31
  eval_list = AquaEvaluationApp().list(
35
32
  list_eval_request.compartment_id or COMPARTMENT_OCID,
36
- list_eval_request.project_id,
37
33
  )
38
34
  response = ListEvaluationsResponse(
39
35
  message_id=list_eval_request.message_id,
@@ -1,5 +1,4 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
2
  # Copyright (c) 2024 Oracle and/or its affiliates.
4
3
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
4
 
@@ -9,6 +8,7 @@ aqua.model.entities
9
8
 
10
9
  This module contains dataclasses for Aqua Model.
11
10
  """
11
+
12
12
  import re
13
13
  from dataclasses import InitVar, dataclass, field
14
14
  from typing import List, Optional
@@ -18,7 +18,7 @@ import oci
18
18
  from ads.aqua import logger
19
19
  from ads.aqua.app import CLIBuilderMixin
20
20
  from ads.aqua.common import utils
21
- from ads.aqua.constants import UNKNOWN_VALUE
21
+ from ads.aqua.constants import LIFECYCLE_DETAILS_MISSING_JOBRUN, UNKNOWN_VALUE
22
22
  from ads.aqua.data import AquaResourceIdentifier
23
23
  from ads.aqua.model.enums import FineTuningDefinedMetadata
24
24
  from ads.aqua.training.exceptions import exit_code_dict
@@ -147,14 +147,14 @@ class AquaEvalFTCommon(DataClassSerializable):
147
147
  try:
148
148
  log = utils.query_resource(log_id, return_all=False)
149
149
  log_name = log.display_name if log else ""
150
- except:
150
+ except Exception:
151
151
  pass
152
152
 
153
153
  if loggroup_id:
154
154
  try:
155
155
  loggroup = utils.query_resource(loggroup_id, return_all=False)
156
156
  loggroup_name = loggroup.display_name if loggroup else ""
157
- except:
157
+ except Exception:
158
158
  pass
159
159
 
160
160
  experiment_id, experiment_name = utils._get_experiment_info(model)
@@ -168,9 +168,7 @@ class AquaEvalFTCommon(DataClassSerializable):
168
168
  )
169
169
  self.job = utils._build_job_identifier(job_run_details=jobrun, region=region)
170
170
  self.lifecycle_details = (
171
- utils.LIFECYCLE_DETAILS_MISSING_JOBRUN
172
- if not jobrun
173
- else jobrun.lifecycle_details
171
+ LIFECYCLE_DETAILS_MISSING_JOBRUN if not jobrun else jobrun.lifecycle_details
174
172
  )
175
173
 
176
174
 
@@ -247,7 +245,7 @@ class AquaFineTuneModel(AquaModel, AquaEvalFTCommon, DataClassSerializable):
247
245
  lifecycle_details,
248
246
  )
249
247
  message = f"{exception.reason} (exit code {exit_code})"
250
- except:
248
+ except Exception:
251
249
  pass
252
250
 
253
251
  return message
@@ -1,5 +1,4 @@
1
1
  #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
2
  # Copyright (c) 2024 Oracle and/or its affiliates.
4
3
  # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
4
 
@@ -9,18 +8,3 @@ aqua.modeldeployment.constants
9
8
 
10
9
  This module contains constants used in Aqua Model Deployment.
11
10
  """
12
-
13
- VLLMInferenceRestrictedParams = {
14
- "--tensor-parallel-size",
15
- "--port",
16
- "--host",
17
- "--served-model-name",
18
- "--seed",
19
- }
20
- TGIInferenceRestrictedParams = {
21
- "--port",
22
- "--hostname",
23
- "--num-shard",
24
- "--sharded",
25
- "--trust-remote-code",
26
- }