oracle-ads 2.11.9__py3-none-any.whl → 2.11.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. ads/aqua/__init__.py +1 -1
  2. ads/aqua/{base.py → app.py} +27 -7
  3. ads/aqua/cli.py +59 -17
  4. ads/aqua/common/__init__.py +5 -0
  5. ads/aqua/{decorator.py → common/decorator.py} +14 -8
  6. ads/aqua/common/enums.py +69 -0
  7. ads/aqua/{exception.py → common/errors.py} +28 -0
  8. ads/aqua/{utils.py → common/utils.py} +168 -77
  9. ads/aqua/config/config.py +18 -0
  10. ads/aqua/constants.py +51 -33
  11. ads/aqua/data.py +15 -26
  12. ads/aqua/evaluation/__init__.py +8 -0
  13. ads/aqua/evaluation/constants.py +53 -0
  14. ads/aqua/evaluation/entities.py +170 -0
  15. ads/aqua/evaluation/errors.py +71 -0
  16. ads/aqua/{evaluation.py → evaluation/evaluation.py} +122 -370
  17. ads/aqua/extension/__init__.py +2 -0
  18. ads/aqua/extension/aqua_ws_msg_handler.py +97 -0
  19. ads/aqua/extension/base_handler.py +0 -7
  20. ads/aqua/extension/common_handler.py +12 -6
  21. ads/aqua/extension/deployment_handler.py +70 -4
  22. ads/aqua/extension/errors.py +10 -0
  23. ads/aqua/extension/evaluation_handler.py +5 -3
  24. ads/aqua/extension/evaluation_ws_msg_handler.py +43 -0
  25. ads/aqua/extension/finetune_handler.py +41 -3
  26. ads/aqua/extension/model_handler.py +56 -4
  27. ads/aqua/extension/models/__init__.py +0 -0
  28. ads/aqua/extension/models/ws_models.py +69 -0
  29. ads/aqua/extension/ui_handler.py +65 -4
  30. ads/aqua/extension/ui_websocket_handler.py +124 -0
  31. ads/aqua/extension/utils.py +1 -1
  32. ads/aqua/finetuning/__init__.py +7 -0
  33. ads/aqua/finetuning/constants.py +17 -0
  34. ads/aqua/finetuning/entities.py +102 -0
  35. ads/aqua/{finetune.py → finetuning/finetuning.py} +162 -136
  36. ads/aqua/model/__init__.py +8 -0
  37. ads/aqua/model/constants.py +46 -0
  38. ads/aqua/model/entities.py +266 -0
  39. ads/aqua/model/enums.py +26 -0
  40. ads/aqua/{model.py → model/model.py} +401 -309
  41. ads/aqua/modeldeployment/__init__.py +8 -0
  42. ads/aqua/modeldeployment/constants.py +26 -0
  43. ads/aqua/{deployment.py → modeldeployment/deployment.py} +288 -227
  44. ads/aqua/modeldeployment/entities.py +142 -0
  45. ads/aqua/modeldeployment/inference.py +75 -0
  46. ads/aqua/ui.py +88 -8
  47. ads/cli.py +55 -7
  48. ads/common/serializer.py +2 -2
  49. ads/config.py +2 -1
  50. ads/jobs/builders/infrastructure/dsc_job.py +49 -6
  51. ads/model/datascience_model.py +1 -1
  52. ads/model/deployment/model_deployment.py +11 -0
  53. ads/model/model_metadata.py +17 -6
  54. ads/opctl/operator/lowcode/anomaly/README.md +0 -2
  55. ads/opctl/operator/lowcode/anomaly/__main__.py +3 -3
  56. ads/opctl/operator/lowcode/anomaly/environment.yaml +0 -2
  57. ads/opctl/operator/lowcode/anomaly/model/automlx.py +2 -2
  58. ads/opctl/operator/lowcode/anomaly/model/autots.py +1 -1
  59. ads/opctl/operator/lowcode/anomaly/model/base_model.py +13 -17
  60. ads/opctl/operator/lowcode/anomaly/operator_config.py +2 -0
  61. ads/opctl/operator/lowcode/anomaly/schema.yaml +1 -2
  62. ads/opctl/operator/lowcode/anomaly/utils.py +3 -2
  63. ads/opctl/operator/lowcode/common/transformations.py +2 -1
  64. ads/opctl/operator/lowcode/common/utils.py +1 -1
  65. ads/opctl/operator/lowcode/forecast/README.md +1 -3
  66. ads/opctl/operator/lowcode/forecast/__main__.py +3 -18
  67. ads/opctl/operator/lowcode/forecast/const.py +2 -0
  68. ads/opctl/operator/lowcode/forecast/environment.yaml +1 -2
  69. ads/opctl/operator/lowcode/forecast/model/arima.py +1 -0
  70. ads/opctl/operator/lowcode/forecast/model/automlx.py +7 -4
  71. ads/opctl/operator/lowcode/forecast/model/autots.py +1 -0
  72. ads/opctl/operator/lowcode/forecast/model/base_model.py +38 -22
  73. ads/opctl/operator/lowcode/forecast/model/factory.py +33 -4
  74. ads/opctl/operator/lowcode/forecast/model/forecast_datasets.py +15 -1
  75. ads/opctl/operator/lowcode/forecast/model/ml_forecast.py +234 -0
  76. ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +9 -1
  77. ads/opctl/operator/lowcode/forecast/model/prophet.py +1 -0
  78. ads/opctl/operator/lowcode/forecast/model_evaluator.py +147 -0
  79. ads/opctl/operator/lowcode/forecast/operator_config.py +2 -1
  80. ads/opctl/operator/lowcode/forecast/schema.yaml +7 -2
  81. ads/opctl/operator/lowcode/forecast/utils.py +18 -44
  82. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.10.dist-info}/METADATA +9 -12
  83. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.10.dist-info}/RECORD +86 -61
  84. ads/aqua/job.py +0 -29
  85. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.10.dist-info}/LICENSE.txt +0 -0
  86. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.10.dist-info}/WHEEL +0 -0
  87. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.10.dist-info}/entry_points.txt +0 -0
@@ -8,12 +8,11 @@ import os
8
8
  import re
9
9
  import tempfile
10
10
  from concurrent.futures import ThreadPoolExecutor, as_completed
11
- from dataclasses import asdict, dataclass, field
11
+ from dataclasses import asdict
12
12
  from datetime import datetime, timedelta
13
- from enum import Enum
14
13
  from pathlib import Path
15
14
  from threading import Lock
16
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Union
17
16
 
18
17
  import oci
19
18
  from cachetools import TTLCache
@@ -24,29 +23,40 @@ from oci.data_science.models import (
24
23
  UpdateModelProvenanceDetails,
25
24
  )
26
25
 
27
- from ads.aqua import logger, utils
28
- from ads.aqua.base import AquaApp
29
- from ads.aqua.data import Tags
30
- from ads.aqua.exception import (
26
+ from ads.aqua import logger
27
+ from ads.aqua.app import AquaApp
28
+ from ads.aqua.common import utils
29
+ from ads.aqua.common.enums import (
30
+ DataScienceResource,
31
+ Resource,
32
+ RqsAdditionalDetails,
33
+ Tags,
34
+ )
35
+ from ads.aqua.common.errors import (
31
36
  AquaFileExistsError,
32
37
  AquaFileNotFoundError,
33
38
  AquaMissingKeyError,
34
39
  AquaRuntimeError,
35
40
  AquaValueError,
36
41
  )
37
- from ads.aqua.utils import (
38
- JOB_INFRASTRUCTURE_TYPE_DEFAULT_NETWORKING,
39
- NB_SESSION_IDENTIFIER,
40
- UNKNOWN,
42
+ from ads.aqua.common.utils import (
41
43
  extract_id_and_name_from_tag,
42
44
  fire_and_forget,
43
45
  get_container_image,
44
46
  is_valid_ocid,
45
47
  upload_local_to_os,
46
48
  )
49
+ from ads.aqua.constants import (
50
+ JOB_INFRASTRUCTURE_TYPE_DEFAULT_NETWORKING,
51
+ NB_SESSION_IDENTIFIER,
52
+ UNKNOWN,
53
+ CONSOLE_LINK_RESOURCE_TYPE_MAPPING,
54
+ )
55
+ from ads.aqua.evaluation.constants import *
56
+ from ads.aqua.evaluation.entities import *
57
+ from ads.aqua.evaluation.errors import *
47
58
  from ads.common.auth import default_signer
48
59
  from ads.common.object_storage_details import ObjectStorageDetails
49
- from ads.common.serializer import DataClassSerializable
50
60
  from ads.common.utils import get_console_link, get_files, get_log_links, upload_to_os
51
61
  from ads.config import (
52
62
  AQUA_JOB_SUBNET_ID,
@@ -69,279 +79,6 @@ from ads.model.model_metadata import (
69
79
  from ads.model.model_version_set import ModelVersionSet
70
80
  from ads.telemetry import telemetry
71
81
 
72
- EVAL_TERMINATION_STATE = [
73
- JobRun.LIFECYCLE_STATE_SUCCEEDED,
74
- JobRun.LIFECYCLE_STATE_FAILED,
75
- ]
76
-
77
-
78
- class EvaluationJobExitCode(Enum):
79
- SUCCESS = 0
80
- COMMON_ERROR = 1
81
-
82
- # Configuration-related issues 10-19
83
- INVALID_EVALUATION_CONFIG = 10
84
- EVALUATION_CONFIG_NOT_PROVIDED = 11
85
- INVALID_OUTPUT_DIR = 12
86
- INVALID_INPUT_DATASET_PATH = 13
87
- INVALID_EVALUATION_ID = 14
88
- INVALID_TARGET_EVALUATION_ID = 15
89
- INVALID_EVALUATION_CONFIG_VALIDATION = 16
90
-
91
- # Evaluation process issues 20-39
92
- OUTPUT_DIR_NOT_FOUND = 20
93
- INVALID_INPUT_DATASET = 21
94
- INPUT_DATA_NOT_FOUND = 22
95
- EVALUATION_ID_NOT_FOUND = 23
96
- EVALUATION_ALREADY_PERFORMED = 24
97
- EVALUATION_TARGET_NOT_FOUND = 25
98
- NO_SUCCESS_INFERENCE_RESULT = 26
99
- COMPUTE_EVALUATION_ERROR = 27
100
- EVALUATION_REPORT_ERROR = 28
101
- MODEL_INFERENCE_WRONG_RESPONSE_FORMAT = 29
102
- UNSUPPORTED_METRICS = 30
103
- METRIC_CALCULATION_FAILURE = 31
104
- EVALUATION_MODEL_CATALOG_RECORD_CREATION_FAILED = 32
105
-
106
-
107
- EVALUATION_JOB_EXIT_CODE_MESSAGE = {
108
- EvaluationJobExitCode.SUCCESS.value: "Success",
109
- EvaluationJobExitCode.COMMON_ERROR.value: "An error occurred during the evaluation, please check the log for more information.",
110
- EvaluationJobExitCode.INVALID_EVALUATION_CONFIG.value: "The provided evaluation configuration was not in the correct format, supported formats are YAML or JSON.",
111
- EvaluationJobExitCode.EVALUATION_CONFIG_NOT_PROVIDED.value: "The evaluation config was not provided.",
112
- EvaluationJobExitCode.INVALID_OUTPUT_DIR.value: "The specified output directory path is invalid.",
113
- EvaluationJobExitCode.INVALID_INPUT_DATASET_PATH.value: "Dataset path is invalid.",
114
- EvaluationJobExitCode.INVALID_EVALUATION_ID.value: "Evaluation ID was not found in the Model Catalog.",
115
- EvaluationJobExitCode.INVALID_TARGET_EVALUATION_ID.value: "Target evaluation ID was not found in the Model Deployment.",
116
- EvaluationJobExitCode.INVALID_EVALUATION_CONFIG_VALIDATION.value: "Validation errors in the evaluation config.",
117
- EvaluationJobExitCode.OUTPUT_DIR_NOT_FOUND.value: "Destination folder does not exist or cannot be used for writing, verify the folder's existence and permissions.",
118
- EvaluationJobExitCode.INVALID_INPUT_DATASET.value: "Input dataset is in an invalid format, ensure the dataset is in jsonl format and that includes the required columns: 'prompt', 'completion' (optional 'category').",
119
- EvaluationJobExitCode.INPUT_DATA_NOT_FOUND.value: "Input data file does not exist or cannot be use for reading, verify the file's existence and permissions.",
120
- EvaluationJobExitCode.EVALUATION_ID_NOT_FOUND.value: "Evaluation ID does not match any resource in the Model Catalog, or access may be blocked by policies.",
121
- EvaluationJobExitCode.EVALUATION_ALREADY_PERFORMED.value: "Evaluation already has an attached artifact, indicating that the evaluation has already been performed.",
122
- EvaluationJobExitCode.EVALUATION_TARGET_NOT_FOUND.value: "Target evaluation ID does not match any resources in Model Deployment.",
123
- EvaluationJobExitCode.NO_SUCCESS_INFERENCE_RESULT.value: "Inference process completed without producing expected outcome, verify the model parameters and config.",
124
- EvaluationJobExitCode.COMPUTE_EVALUATION_ERROR.value: "Evaluation process encountered an issue while calculating metrics.",
125
- EvaluationJobExitCode.EVALUATION_REPORT_ERROR.value: "Failed to save the evaluation report due to an error. Ensure the evaluation model is currently active and the specified path for the output report is valid and accessible. Verify these conditions and reinitiate the evaluation process.",
126
- EvaluationJobExitCode.MODEL_INFERENCE_WRONG_RESPONSE_FORMAT.value: "Evaluation encountered unsupported, or unexpected model output, verify the target evaluation model is compatible and produces the correct format.",
127
- EvaluationJobExitCode.UNSUPPORTED_METRICS.value: "None of the provided metrics are supported by the framework.",
128
- EvaluationJobExitCode.METRIC_CALCULATION_FAILURE.value: "All attempted metric calculations were unsuccessful. Please review the metric configurations and input data.",
129
- EvaluationJobExitCode.EVALUATION_MODEL_CATALOG_RECORD_CREATION_FAILED.value: (
130
- "Failed to create a Model Catalog record for the evaluation. "
131
- "This could be due to missing required permissions. "
132
- "Please check the log for more information."
133
- ),
134
- }
135
-
136
-
137
- class Resource(Enum):
138
- JOB = "jobs"
139
- MODEL = "models"
140
- MODEL_DEPLOYMENT = "modeldeployments"
141
- MODEL_VERSION_SET = "model-version-sets"
142
-
143
-
144
- class DataScienceResource(Enum):
145
- MODEL_DEPLOYMENT = "datasciencemodeldeployment"
146
- MODEL = "datasciencemodel"
147
-
148
-
149
- class EvaluationCustomMetadata(Enum):
150
- EVALUATION_SOURCE = "evaluation_source"
151
- EVALUATION_JOB_ID = "evaluation_job_id"
152
- EVALUATION_JOB_RUN_ID = "evaluation_job_run_id"
153
- EVALUATION_OUTPUT_PATH = "evaluation_output_path"
154
- EVALUATION_SOURCE_NAME = "evaluation_source_name"
155
- EVALUATION_ERROR = "aqua_evaluate_error"
156
-
157
-
158
- class EvaluationModelTags(Enum):
159
- AQUA_EVALUATION = "aqua_evaluation"
160
-
161
-
162
- class EvaluationJobTags(Enum):
163
- AQUA_EVALUATION = "aqua_evaluation"
164
- EVALUATION_MODEL_ID = "evaluation_model_id"
165
-
166
-
167
- class EvaluationUploadStatus(Enum):
168
- IN_PROGRESS = "IN_PROGRESS"
169
- COMPLETED = "COMPLETED"
170
-
171
-
172
- @dataclass(repr=False)
173
- class AquaResourceIdentifier(DataClassSerializable):
174
- id: str = ""
175
- name: str = ""
176
- url: str = ""
177
-
178
-
179
- @dataclass(repr=False)
180
- class AquaEvalReport(DataClassSerializable):
181
- evaluation_id: str = ""
182
- content: str = ""
183
-
184
-
185
- @dataclass(repr=False)
186
- class ModelParams(DataClassSerializable):
187
- max_tokens: str = ""
188
- top_p: str = ""
189
- top_k: str = ""
190
- temperature: str = ""
191
- presence_penalty: Optional[float] = 0.0
192
- frequency_penalty: Optional[float] = 0.0
193
- stop: Optional[Union[str, List[str]]] = field(default_factory=list)
194
-
195
-
196
- @dataclass(repr=False)
197
- class AquaEvalParams(ModelParams, DataClassSerializable):
198
- shape: str = ""
199
- dataset_path: str = ""
200
- report_path: str = ""
201
-
202
-
203
- @dataclass(repr=False)
204
- class AquaEvalMetric(DataClassSerializable):
205
- key: str
206
- name: str
207
- description: str = ""
208
-
209
-
210
- @dataclass(repr=False)
211
- class AquaEvalMetricSummary(DataClassSerializable):
212
- metric: str = ""
213
- score: str = ""
214
- grade: str = ""
215
-
216
-
217
- @dataclass(repr=False)
218
- class AquaEvalMetrics(DataClassSerializable):
219
- id: str
220
- report: str
221
- metric_results: List[AquaEvalMetric] = field(default_factory=list)
222
- metric_summary_result: List[AquaEvalMetricSummary] = field(default_factory=list)
223
-
224
-
225
- @dataclass(repr=False)
226
- class AquaEvaluationCommands(DataClassSerializable):
227
- evaluation_id: str
228
- evaluation_target_id: str
229
- input_data: dict
230
- metrics: list
231
- output_dir: str
232
- params: dict
233
-
234
-
235
- @dataclass(repr=False)
236
- class AquaEvaluationSummary(DataClassSerializable):
237
- """Represents a summary of Aqua evalution."""
238
-
239
- id: str
240
- name: str
241
- console_url: str
242
- lifecycle_state: str
243
- lifecycle_details: str
244
- time_created: str
245
- tags: dict
246
- experiment: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
247
- source: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
248
- job: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
249
- parameters: AquaEvalParams = field(default_factory=AquaEvalParams)
250
-
251
-
252
- @dataclass(repr=False)
253
- class AquaEvaluationDetail(AquaEvaluationSummary, DataClassSerializable):
254
- """Represents a details of Aqua evalution."""
255
-
256
- log_group: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
257
- log: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
258
- introspection: dict = field(default_factory=dict)
259
-
260
-
261
- class RqsAdditionalDetails:
262
- METADATA = "metadata"
263
- CREATED_BY = "createdBy"
264
- DESCRIPTION = "description"
265
- MODEL_VERSION_SET_ID = "modelVersionSetId"
266
- MODEL_VERSION_SET_NAME = "modelVersionSetName"
267
- PROJECT_ID = "projectId"
268
- VERSION_LABEL = "versionLabel"
269
-
270
-
271
- class EvaluationConfig:
272
- PARAMS = "model_params"
273
-
274
-
275
- @dataclass(repr=False)
276
- class CreateAquaEvaluationDetails(DataClassSerializable):
277
- """Dataclass to create aqua model evaluation.
278
-
279
- Fields
280
- ------
281
- evaluation_source_id: str
282
- The evaluation source id. Must be either model or model deployment ocid.
283
- evaluation_name: str
284
- The name for evaluation.
285
- dataset_path: str
286
- The dataset path for the evaluation. Could be either a local path from notebook session
287
- or an object storage path.
288
- report_path: str
289
- The report path for the evaluation. Must be an object storage path.
290
- model_parameters: dict
291
- The parameters for the evaluation.
292
- shape_name: str
293
- The shape name for the evaluation job infrastructure.
294
- memory_in_gbs: float
295
- The memory in gbs for the shape selected.
296
- ocpus: float
297
- The ocpu count for the shape selected.
298
- block_storage_size: int
299
- The storage for the evaluation job infrastructure.
300
- compartment_id: (str, optional). Defaults to `None`.
301
- The compartment id for the evaluation.
302
- project_id: (str, optional). Defaults to `None`.
303
- The project id for the evaluation.
304
- evaluation_description: (str, optional). Defaults to `None`.
305
- The description for evaluation
306
- experiment_id: (str, optional). Defaults to `None`.
307
- The evaluation model version set id. If provided,
308
- evaluation model will be associated with it.
309
- experiment_name: (str, optional). Defaults to `None`.
310
- The evaluation model version set name. If provided,
311
- the model version set with the same name will be used if exists,
312
- otherwise a new model version set will be created with the name.
313
- experiment_description: (str, optional). Defaults to `None`.
314
- The description for the evaluation model version set.
315
- log_group_id: (str, optional). Defaults to `None`.
316
- The log group id for the evaluation job infrastructure.
317
- log_id: (str, optional). Defaults to `None`.
318
- The log id for the evaluation job infrastructure.
319
- metrics: (list, optional). Defaults to `None`.
320
- The metrics for the evaluation.
321
- force_overwrite: (bool, optional). Defaults to `False`.
322
- Whether to force overwrite the existing file in object storage.
323
- """
324
-
325
- evaluation_source_id: str
326
- evaluation_name: str
327
- dataset_path: str
328
- report_path: str
329
- model_parameters: dict
330
- shape_name: str
331
- block_storage_size: int
332
- compartment_id: Optional[str] = None
333
- project_id: Optional[str] = None
334
- evaluation_description: Optional[str] = None
335
- experiment_id: Optional[str] = None
336
- experiment_name: Optional[str] = None
337
- experiment_description: Optional[str] = None
338
- memory_in_gbs: Optional[float] = None
339
- ocpus: Optional[float] = None
340
- log_group_id: Optional[str] = None
341
- log_id: Optional[str] = None
342
- metrics: Optional[List] = None
343
- force_overwrite: Optional[bool] = False
344
-
345
82
 
346
83
  class AquaEvaluationApp(AquaApp):
347
84
  """Provides a suite of APIs to interact with Aqua evaluations within the
@@ -367,6 +104,9 @@ class AquaEvaluationApp(AquaApp):
367
104
  _report_cache = TTLCache(maxsize=10, ttl=timedelta(hours=5), timer=datetime.now)
368
105
  _metrics_cache = TTLCache(maxsize=10, ttl=timedelta(hours=5), timer=datetime.now)
369
106
  _eval_cache = TTLCache(maxsize=200, ttl=timedelta(hours=10), timer=datetime.now)
107
+ _deletion_cache = TTLCache(
108
+ maxsize=10, ttl=timedelta(minutes=10), timer=datetime.now
109
+ )
370
110
  _cache_lock = Lock()
371
111
 
372
112
  @telemetry(entry_point="plugin=evaluation&action=create", name="aqua")
@@ -408,14 +148,14 @@ class AquaEvaluationApp(AquaApp):
408
148
 
409
149
  evaluation_source = None
410
150
  if (
411
- DataScienceResource.MODEL_DEPLOYMENT.value
151
+ DataScienceResource.MODEL_DEPLOYMENT
412
152
  in create_aqua_evaluation_details.evaluation_source_id
413
153
  ):
414
154
  evaluation_source = ModelDeployment.from_id(
415
155
  create_aqua_evaluation_details.evaluation_source_id
416
156
  )
417
157
  elif (
418
- DataScienceResource.MODEL.value
158
+ DataScienceResource.MODEL
419
159
  in create_aqua_evaluation_details.evaluation_source_id
420
160
  ):
421
161
  evaluation_source = DataScienceModel.from_id(
@@ -500,11 +240,9 @@ class AquaEvaluationApp(AquaApp):
500
240
  name=experiment_model_version_set_name,
501
241
  compartment_id=target_compartment,
502
242
  )
503
- if not utils._is_valid_mvs(
504
- model_version_set, Tags.AQUA_EVALUATION.value
505
- ):
243
+ if not utils._is_valid_mvs(model_version_set, Tags.AQUA_EVALUATION):
506
244
  raise AquaValueError(
507
- f"Invalid experiment name. Please provide an experiment with `{Tags.AQUA_EVALUATION.value}` in tags."
245
+ f"Invalid experiment name. Please provide an experiment with `{Tags.AQUA_EVALUATION}` in tags."
508
246
  )
509
247
  except:
510
248
  logger.debug(
@@ -513,7 +251,7 @@ class AquaEvaluationApp(AquaApp):
513
251
  )
514
252
 
515
253
  evaluation_mvs_freeform_tags = {
516
- Tags.AQUA_EVALUATION.value: Tags.AQUA_EVALUATION.value,
254
+ Tags.AQUA_EVALUATION: Tags.AQUA_EVALUATION,
517
255
  }
518
256
 
519
257
  model_version_set = (
@@ -534,23 +272,23 @@ class AquaEvaluationApp(AquaApp):
534
272
  experiment_model_version_set_id = model_version_set.id
535
273
  else:
536
274
  model_version_set = ModelVersionSet.from_id(experiment_model_version_set_id)
537
- if not utils._is_valid_mvs(model_version_set, Tags.AQUA_EVALUATION.value):
275
+ if not utils._is_valid_mvs(model_version_set, Tags.AQUA_EVALUATION):
538
276
  raise AquaValueError(
539
- f"Invalid experiment id. Please provide an experiment with `{Tags.AQUA_EVALUATION.value}` in tags."
277
+ f"Invalid experiment id. Please provide an experiment with `{Tags.AQUA_EVALUATION}` in tags."
540
278
  )
541
279
  experiment_model_version_set_name = model_version_set.name
542
280
 
543
281
  evaluation_model_custom_metadata = ModelCustomMetadata()
544
282
  evaluation_model_custom_metadata.add(
545
- key=EvaluationCustomMetadata.EVALUATION_SOURCE.value,
283
+ key=EvaluationCustomMetadata.EVALUATION_SOURCE,
546
284
  value=create_aqua_evaluation_details.evaluation_source_id,
547
285
  )
548
286
  evaluation_model_custom_metadata.add(
549
- key=EvaluationCustomMetadata.EVALUATION_OUTPUT_PATH.value,
287
+ key=EvaluationCustomMetadata.EVALUATION_OUTPUT_PATH,
550
288
  value=create_aqua_evaluation_details.report_path,
551
289
  )
552
290
  evaluation_model_custom_metadata.add(
553
- key=EvaluationCustomMetadata.EVALUATION_SOURCE_NAME.value,
291
+ key=EvaluationCustomMetadata.EVALUATION_SOURCE_NAME,
554
292
  value=evaluation_source.display_name,
555
293
  )
556
294
 
@@ -588,8 +326,8 @@ class AquaEvaluationApp(AquaApp):
588
326
  # TODO: validate metrics if it's provided
589
327
 
590
328
  evaluation_job_freeform_tags = {
591
- EvaluationJobTags.AQUA_EVALUATION.value: EvaluationJobTags.AQUA_EVALUATION.value,
592
- EvaluationJobTags.EVALUATION_MODEL_ID.value: evaluation_model.id,
329
+ Tags.AQUA_EVALUATION: Tags.AQUA_EVALUATION,
330
+ Tags.AQUA_EVALUATION_MODEL_ID: evaluation_model.id,
593
331
  }
594
332
 
595
333
  evaluation_job = Job(name=evaluation_model.display_name).with_infrastructure(
@@ -654,11 +392,11 @@ class AquaEvaluationApp(AquaApp):
654
392
  )
655
393
 
656
394
  evaluation_model_custom_metadata.add(
657
- key=EvaluationCustomMetadata.EVALUATION_JOB_ID.value,
395
+ key=EvaluationCustomMetadata.EVALUATION_JOB_ID,
658
396
  value=evaluation_job.id,
659
397
  )
660
398
  evaluation_model_custom_metadata.add(
661
- key=EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID.value,
399
+ key=EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID,
662
400
  value=evaluation_job_run.id,
663
401
  )
664
402
  updated_custom_metadata_list = [
@@ -671,7 +409,7 @@ class AquaEvaluationApp(AquaApp):
671
409
  update_model_details=UpdateModelDetails(
672
410
  custom_metadata_list=updated_custom_metadata_list,
673
411
  freeform_tags={
674
- EvaluationModelTags.AQUA_EVALUATION.value: EvaluationModelTags.AQUA_EVALUATION.value,
412
+ Tags.AQUA_EVALUATION: Tags.AQUA_EVALUATION,
675
413
  },
676
414
  ),
677
415
  )
@@ -702,7 +440,7 @@ class AquaEvaluationApp(AquaApp):
702
440
  id=evaluation_model.id,
703
441
  name=evaluation_model.display_name,
704
442
  console_url=get_console_link(
705
- resource=Resource.MODEL.value,
443
+ resource=Resource.MODEL,
706
444
  ocid=evaluation_model.id,
707
445
  region=self.region,
708
446
  ),
@@ -713,7 +451,7 @@ class AquaEvaluationApp(AquaApp):
713
451
  id=experiment_model_version_set_id,
714
452
  name=experiment_model_version_set_name,
715
453
  url=get_console_link(
716
- resource=Resource.MODEL_VERSION_SET.value,
454
+ resource=Resource.MODEL_VERSION_SET,
717
455
  ocid=experiment_model_version_set_id,
718
456
  region=self.region,
719
457
  ),
@@ -723,10 +461,10 @@ class AquaEvaluationApp(AquaApp):
723
461
  name=evaluation_source.display_name,
724
462
  url=get_console_link(
725
463
  resource=(
726
- Resource.MODEL_DEPLOYMENT.value
727
- if DataScienceResource.MODEL_DEPLOYMENT.value
464
+ Resource.MODEL_DEPLOYMENT
465
+ if DataScienceResource.MODEL_DEPLOYMENT
728
466
  in create_aqua_evaluation_details.evaluation_source_id
729
- else Resource.MODEL.value
467
+ else Resource.MODEL
730
468
  ),
731
469
  ocid=create_aqua_evaluation_details.evaluation_source_id,
732
470
  region=self.region,
@@ -736,13 +474,13 @@ class AquaEvaluationApp(AquaApp):
736
474
  id=evaluation_job.id,
737
475
  name=evaluation_job.name,
738
476
  url=get_console_link(
739
- resource=Resource.JOB.value,
477
+ resource=Resource.JOB,
740
478
  ocid=evaluation_job.id,
741
479
  region=self.region,
742
480
  ),
743
481
  ),
744
482
  tags=dict(
745
- aqua_evaluation=EvaluationModelTags.AQUA_EVALUATION.value,
483
+ aqua_evaluation=Tags.AQUA_EVALUATION,
746
484
  evaluation_job_id=evaluation_job.id,
747
485
  evaluation_source=create_aqua_evaluation_details.evaluation_source_id,
748
486
  evaluation_experiment_id=experiment_model_version_set_id,
@@ -805,10 +543,10 @@ class AquaEvaluationApp(AquaApp):
805
543
  """
806
544
  if isinstance(source, ModelDeployment):
807
545
  fine_tuned_model_tag = source.freeform_tags.get(
808
- Tags.AQUA_FINE_TUNED_MODEL_TAG.value, UNKNOWN
546
+ Tags.AQUA_FINE_TUNED_MODEL_TAG, UNKNOWN
809
547
  )
810
548
  if not fine_tuned_model_tag:
811
- return source.freeform_tags.get(Tags.AQUA_MODEL_NAME_TAG.value)
549
+ return source.freeform_tags.get(Tags.AQUA_MODEL_NAME_TAG)
812
550
  else:
813
551
  return extract_id_and_name_from_tag(fine_tuned_model_tag)[1]
814
552
 
@@ -971,12 +709,10 @@ class AquaEvaluationApp(AquaApp):
971
709
  models = utils.query_resources(
972
710
  compartment_id=compartment_id,
973
711
  resource_type="datasciencemodel",
974
- tag_list=[EvaluationModelTags.AQUA_EVALUATION.value],
712
+ tag_list=[Tags.AQUA_EVALUATION],
975
713
  )
976
714
  logger.info(f"Fetched {len(models)} evaluations.")
977
715
 
978
- # TODO: add filter based on project_id if needed.
979
-
980
716
  mapping = self._prefetch_resources(compartment_id)
981
717
 
982
718
  evaluations = []
@@ -988,7 +724,7 @@ class AquaEvaluationApp(AquaApp):
988
724
 
989
725
  else:
990
726
  jobrun_id = self._get_attribute_from_model_metadata(
991
- model, EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID.value
727
+ model, EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID
992
728
  )
993
729
  job_run = mapping.get(jobrun_id)
994
730
 
@@ -1197,11 +933,13 @@ class AquaEvaluationApp(AquaApp):
1197
933
  )
1198
934
 
1199
935
  files_in_artifact = get_files(temp_dir)
1200
- report_content = self._read_from_artifact(
936
+ md_report_content = self._read_from_artifact(
1201
937
  temp_dir, files_in_artifact, utils.EVALUATION_REPORT_MD
1202
938
  )
939
+
940
+ # json report not availiable for failed evaluation
1203
941
  try:
1204
- report = json.loads(
942
+ json_report = json.loads(
1205
943
  self._read_from_artifact(
1206
944
  temp_dir, files_in_artifact, utils.EVALUATION_REPORT_JSON
1207
945
  )
@@ -1210,27 +948,32 @@ class AquaEvaluationApp(AquaApp):
1210
948
  logger.debug(
1211
949
  "Failed to load `report.json` from evaluation artifact" f"{str(e)}"
1212
950
  )
1213
- report = {}
951
+ json_report = {}
1214
952
 
1215
- # TODO: after finalizing the format of report.json, move the constant to class
1216
953
  eval_metrics = AquaEvalMetrics(
1217
954
  id=eval_id,
1218
- report=base64.b64encode(report_content).decode(),
955
+ report=base64.b64encode(md_report_content).decode(),
1219
956
  metric_results=[
1220
957
  AquaEvalMetric(
1221
- key=metric_key,
1222
- name=metadata.get("name", utils.UNKNOWN),
1223
- description=metadata.get("description", utils.UNKNOWN),
958
+ key=metadata.get(EvaluationMetricResult.SHORT_NAME, utils.UNKNOWN),
959
+ name=metadata.get(EvaluationMetricResult.NAME, utils.UNKNOWN),
960
+ description=metadata.get(
961
+ EvaluationMetricResult.DESCRIPTION, utils.UNKNOWN
962
+ ),
1224
963
  )
1225
- for metric_key, metadata in report.get("metric_results", {}).items()
964
+ for _, metadata in json_report.get(
965
+ EvaluationReportJson.METRIC_RESULT, {}
966
+ ).items()
1226
967
  ],
1227
968
  metric_summary_result=[
1228
969
  AquaEvalMetricSummary(**m)
1229
- for m in report.get("metric_summary_result", [{}])
970
+ for m in json_report.get(
971
+ EvaluationReportJson.METRIC_SUMMARY_RESULT, [{}]
972
+ )
1230
973
  ],
1231
974
  )
1232
975
 
1233
- if report_content:
976
+ if md_report_content:
1234
977
  self._metrics_cache.__setitem__(key=eval_id, value=eval_metrics)
1235
978
 
1236
979
  return eval_metrics
@@ -1371,6 +1114,7 @@ class AquaEvaluationApp(AquaApp):
1371
1114
  @telemetry(entry_point="plugin=evaluation&action=delete", name="aqua")
1372
1115
  def delete(self, eval_id):
1373
1116
  """Deletes the job and the associated model for the given evaluation id.
1117
+
1374
1118
  Parameters
1375
1119
  ----------
1376
1120
  eval_id: str
@@ -1383,9 +1127,9 @@ class AquaEvaluationApp(AquaApp):
1383
1127
  Raises
1384
1128
  ------
1385
1129
  AquaRuntimeError:
1386
- if a model doesn't exist for the given eval_id
1130
+ if a model doesn't exist for the given eval_id.
1387
1131
  AquaMissingKeyError:
1388
- if training_id is missing the job run id
1132
+ if job/jobrun id is missing.
1389
1133
  """
1390
1134
 
1391
1135
  model = DataScienceModel.from_id(eval_id)
@@ -1396,20 +1140,32 @@ class AquaEvaluationApp(AquaApp):
1396
1140
 
1397
1141
  try:
1398
1142
  job_id = model.custom_metadata_list.get(
1399
- EvaluationCustomMetadata.EVALUATION_JOB_ID.value
1143
+ EvaluationCustomMetadata.EVALUATION_JOB_ID
1400
1144
  ).value
1401
1145
  except Exception:
1402
1146
  raise AquaMissingKeyError(
1403
- f"Custom metadata is missing {EvaluationCustomMetadata.EVALUATION_JOB_ID.value} key"
1147
+ f"Custom metadata is missing {EvaluationCustomMetadata.EVALUATION_JOB_ID} key"
1404
1148
  )
1405
1149
 
1406
1150
  job = DataScienceJob.from_id(job_id)
1407
1151
 
1408
1152
  self._delete_job_and_model(job, model)
1409
1153
 
1154
+ try:
1155
+ jobrun_id = model.custom_metadata_list.get(
1156
+ EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID
1157
+ ).value
1158
+ jobrun = utils.query_resource(jobrun_id, return_all=False)
1159
+ except Exception:
1160
+ logger.debug("Associated Job Run OCID is missing.")
1161
+ jobrun = None
1162
+
1163
+ self._eval_cache.pop(key=eval_id, default=None)
1164
+ self._deletion_cache.__setitem__(key=eval_id, value="")
1165
+
1410
1166
  status = dict(
1411
1167
  id=eval_id,
1412
- lifecycle_state="DELETING",
1168
+ lifecycle_state=jobrun.lifecycle_state if jobrun else "DELETING",
1413
1169
  time_accepted=datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f%z"),
1414
1170
  )
1415
1171
  return status
@@ -1503,7 +1259,7 @@ class AquaEvaluationApp(AquaApp):
1503
1259
  """Returns ocid and name of the model has been evaluated."""
1504
1260
  source_id = self._get_attribute_from_model_metadata(
1505
1261
  evaluation,
1506
- EvaluationCustomMetadata.EVALUATION_SOURCE.value,
1262
+ EvaluationCustomMetadata.EVALUATION_SOURCE,
1507
1263
  )
1508
1264
 
1509
1265
  try:
@@ -1512,20 +1268,20 @@ class AquaEvaluationApp(AquaApp):
1512
1268
  source.display_name
1513
1269
  if source
1514
1270
  else self._get_attribute_from_model_metadata(
1515
- evaluation, EvaluationCustomMetadata.EVALUATION_SOURCE_NAME.value
1271
+ evaluation, EvaluationCustomMetadata.EVALUATION_SOURCE_NAME
1516
1272
  )
1517
1273
  )
1518
1274
 
1519
- if not source_name:
1275
+ # try to resolve source_name from source id
1276
+ if source_id and not source_name:
1520
1277
  resource_type = utils.get_resource_type(source_id)
1521
1278
 
1522
- # TODO: adjust resource principal mapping
1523
- if resource_type == "datasciencemodel":
1524
- source_name = self.ds_client.get_model(source_id).data.display_name
1525
- elif resource_type == "datasciencemodeldeployment":
1279
+ if resource_type.startswith("datasciencemodeldeployment"):
1526
1280
  source_name = self.ds_client.get_model_deployment(
1527
1281
  source_id
1528
1282
  ).data.display_name
1283
+ elif resource_type.startswith("datasciencemodel"):
1284
+ source_name = self.ds_client.get_model(source_id).data.display_name
1529
1285
  else:
1530
1286
  raise AquaRuntimeError(
1531
1287
  f"Not supported source type: {resource_type}"
@@ -1589,7 +1345,7 @@ class AquaEvaluationApp(AquaApp):
1589
1345
  ) -> AquaResourceIdentifier:
1590
1346
  """Constructs AquaResourceIdentifier based on the given ocid and display name."""
1591
1347
  try:
1592
- resource_type = utils.CONSOLE_LINK_RESOURCE_TYPE_MAPPING.get(
1348
+ resource_type = CONSOLE_LINK_RESOURCE_TYPE_MAPPING.get(
1593
1349
  utils.get_resource_type(id)
1594
1350
  )
1595
1351
 
@@ -1620,7 +1376,7 @@ class AquaEvaluationApp(AquaApp):
1620
1376
  """Extracts job run id from metadata, and gets related job run information."""
1621
1377
 
1622
1378
  jobrun_id = jobrun_id or self._get_attribute_from_model_metadata(
1623
- resource, EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID.value
1379
+ resource, EvaluationCustomMetadata.EVALUATION_JOB_RUN_ID
1624
1380
  )
1625
1381
 
1626
1382
  logger.info(f"Fetching associated job run: {jobrun_id}")
@@ -1654,8 +1410,6 @@ class AquaEvaluationApp(AquaApp):
1654
1410
  "model parameters have not been saved in correct format in model taxonomy. ",
1655
1411
  service_payload={"params": params},
1656
1412
  )
1657
- # TODO: validate the format of parameters.
1658
- # self._validate_params(params)
1659
1413
 
1660
1414
  return AquaEvalParams(**params[EvaluationConfig.PARAMS])
1661
1415
  except Exception as e:
@@ -1688,7 +1442,6 @@ class AquaEvaluationApp(AquaApp):
1688
1442
  )
1689
1443
  return AquaResourceIdentifier()
1690
1444
 
1691
- # TODO: fix the logic for determine termination state
1692
1445
  def _get_status(
1693
1446
  self,
1694
1447
  model: oci.resource_search.models.ResourceSummary,
@@ -1697,30 +1450,33 @@ class AquaEvaluationApp(AquaApp):
1697
1450
  ] = None,
1698
1451
  ) -> dict:
1699
1452
  """Builds evaluation status based on the model status and job run status.
1700
- When detect `aqua_evaluation_error` in custom metadata, the jobrun is failed.
1701
- However, if jobrun failed before saving this meta, we need to check the existance
1702
- of the evaluation artifact.
1453
+ When missing jobrun information, the status will be decided based on:
1703
1454
 
1704
- """
1705
- # TODO: revisit for CANCELED evaluation
1706
- job_run_status = (
1707
- JobRun.LIFECYCLE_STATE_FAILED
1708
- if self._get_attribute_from_model_metadata(
1709
- model, EvaluationCustomMetadata.EVALUATION_ERROR.value
1710
- )
1711
- else None
1712
- )
1455
+ * If the evaluation just has been deleted, the jobrun status should be deleted.
1456
+ * When detect `aqua_evaluation_error` in custom metadata, the jobrun is failed.
1457
+ * If jobrun failed before saving this meta, we need to check the existance
1458
+ of the evaluation artifact.
1713
1459
 
1460
+ """
1714
1461
  model_status = model.lifecycle_state
1715
- job_run_status = job_run_status or (
1716
- jobrun.lifecycle_state
1717
- if jobrun and not jobrun.lifecycle_state == JobRun.LIFECYCLE_STATE_DELETED
1718
- else (
1719
- JobRun.LIFECYCLE_STATE_SUCCEEDED
1720
- if self._if_eval_artifact_exist(model)
1721
- else JobRun.LIFECYCLE_STATE_FAILED
1722
- )
1723
- )
1462
+ job_run_status = None
1463
+
1464
+ if jobrun:
1465
+ job_run_status = jobrun.lifecycle_state
1466
+
1467
+ if jobrun is None:
1468
+ if model.identifier in self._deletion_cache.keys():
1469
+ job_run_status = JobRun.LIFECYCLE_STATE_DELETED
1470
+
1471
+ elif self._get_attribute_from_model_metadata(
1472
+ model, EvaluationCustomMetadata.EVALUATION_ERROR
1473
+ ):
1474
+ job_run_status = JobRun.LIFECYCLE_STATE_FAILED
1475
+
1476
+ elif self._if_eval_artifact_exist(model):
1477
+ job_run_status = JobRun.LIFECYCLE_STATE_SUCCEEDED
1478
+ else:
1479
+ job_run_status = JobRun.LIFECYCLE_STATE_FAILED
1724
1480
 
1725
1481
  lifecycle_state = utils.LifecycleStatus.get_status(
1726
1482
  evaluation_status=model_status, job_run_status=job_run_status
@@ -1738,21 +1494,17 @@ class AquaEvaluationApp(AquaApp):
1738
1494
 
1739
1495
  return dict(
1740
1496
  lifecycle_state=(
1741
- lifecycle_state
1742
- if isinstance(lifecycle_state, str)
1743
- else lifecycle_state.value
1497
+ lifecycle_state if isinstance(lifecycle_state, str) else lifecycle_state
1744
1498
  ),
1745
1499
  lifecycle_details=lifecycle_details,
1746
1500
  )
1747
1501
 
1748
1502
  def _prefetch_resources(self, compartment_id) -> dict:
1749
1503
  """Fetches all AQUA resources."""
1750
- # TODO: handle cross compartment/tenency resources
1751
- # TODO: add cache
1752
1504
  resources = utils.query_resources(
1753
1505
  compartment_id=compartment_id,
1754
1506
  resource_type="all",
1755
- tag_list=[EvaluationModelTags.AQUA_EVALUATION.value, "OCI_AQUA"],
1507
+ tag_list=[Tags.AQUA_EVALUATION, "OCI_AQUA"],
1756
1508
  connect_by_ampersands=False,
1757
1509
  return_all=False,
1758
1510
  )