oracle-ads 2.11.9__py3-none-any.whl → 2.11.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. ads/aqua/__init__.py +1 -1
  2. ads/aqua/{base.py → app.py} +27 -7
  3. ads/aqua/cli.py +59 -17
  4. ads/aqua/common/__init__.py +5 -0
  5. ads/aqua/{decorator.py → common/decorator.py} +14 -8
  6. ads/aqua/common/enums.py +69 -0
  7. ads/aqua/{exception.py → common/errors.py} +28 -0
  8. ads/aqua/{utils.py → common/utils.py} +193 -95
  9. ads/aqua/config/config.py +18 -0
  10. ads/aqua/constants.py +51 -33
  11. ads/aqua/data.py +15 -26
  12. ads/aqua/evaluation/__init__.py +8 -0
  13. ads/aqua/evaluation/constants.py +53 -0
  14. ads/aqua/evaluation/entities.py +170 -0
  15. ads/aqua/evaluation/errors.py +71 -0
  16. ads/aqua/{evaluation.py → evaluation/evaluation.py} +122 -370
  17. ads/aqua/extension/__init__.py +2 -0
  18. ads/aqua/extension/aqua_ws_msg_handler.py +97 -0
  19. ads/aqua/extension/base_handler.py +0 -7
  20. ads/aqua/extension/common_handler.py +12 -6
  21. ads/aqua/extension/deployment_handler.py +70 -4
  22. ads/aqua/extension/errors.py +10 -0
  23. ads/aqua/extension/evaluation_handler.py +5 -3
  24. ads/aqua/extension/evaluation_ws_msg_handler.py +43 -0
  25. ads/aqua/extension/finetune_handler.py +41 -3
  26. ads/aqua/extension/model_handler.py +56 -4
  27. ads/aqua/extension/models/__init__.py +0 -0
  28. ads/aqua/extension/models/ws_models.py +69 -0
  29. ads/aqua/extension/ui_handler.py +65 -4
  30. ads/aqua/extension/ui_websocket_handler.py +124 -0
  31. ads/aqua/extension/utils.py +1 -1
  32. ads/aqua/finetuning/__init__.py +7 -0
  33. ads/aqua/finetuning/constants.py +17 -0
  34. ads/aqua/finetuning/entities.py +102 -0
  35. ads/aqua/{finetune.py → finetuning/finetuning.py} +170 -141
  36. ads/aqua/model/__init__.py +8 -0
  37. ads/aqua/model/constants.py +46 -0
  38. ads/aqua/model/entities.py +266 -0
  39. ads/aqua/model/enums.py +26 -0
  40. ads/aqua/{model.py → model/model.py} +405 -309
  41. ads/aqua/modeldeployment/__init__.py +8 -0
  42. ads/aqua/modeldeployment/constants.py +26 -0
  43. ads/aqua/{deployment.py → modeldeployment/deployment.py} +288 -227
  44. ads/aqua/modeldeployment/entities.py +142 -0
  45. ads/aqua/modeldeployment/inference.py +75 -0
  46. ads/aqua/ui.py +88 -8
  47. ads/cli.py +55 -7
  48. ads/common/decorator/threaded.py +97 -0
  49. ads/common/serializer.py +2 -2
  50. ads/config.py +5 -1
  51. ads/jobs/builders/infrastructure/dsc_job.py +49 -6
  52. ads/model/datascience_model.py +1 -1
  53. ads/model/deployment/model_deployment.py +11 -0
  54. ads/model/model_metadata.py +17 -6
  55. ads/opctl/operator/lowcode/anomaly/README.md +0 -2
  56. ads/opctl/operator/lowcode/anomaly/__main__.py +3 -3
  57. ads/opctl/operator/lowcode/anomaly/environment.yaml +0 -2
  58. ads/opctl/operator/lowcode/anomaly/model/automlx.py +2 -2
  59. ads/opctl/operator/lowcode/anomaly/model/autots.py +1 -1
  60. ads/opctl/operator/lowcode/anomaly/model/base_model.py +13 -17
  61. ads/opctl/operator/lowcode/anomaly/operator_config.py +2 -0
  62. ads/opctl/operator/lowcode/anomaly/schema.yaml +1 -2
  63. ads/opctl/operator/lowcode/anomaly/utils.py +3 -2
  64. ads/opctl/operator/lowcode/common/transformations.py +2 -1
  65. ads/opctl/operator/lowcode/common/utils.py +1 -1
  66. ads/opctl/operator/lowcode/forecast/README.md +1 -3
  67. ads/opctl/operator/lowcode/forecast/__main__.py +3 -18
  68. ads/opctl/operator/lowcode/forecast/const.py +2 -0
  69. ads/opctl/operator/lowcode/forecast/environment.yaml +1 -2
  70. ads/opctl/operator/lowcode/forecast/model/arima.py +1 -0
  71. ads/opctl/operator/lowcode/forecast/model/automlx.py +7 -4
  72. ads/opctl/operator/lowcode/forecast/model/autots.py +1 -0
  73. ads/opctl/operator/lowcode/forecast/model/base_model.py +38 -22
  74. ads/opctl/operator/lowcode/forecast/model/factory.py +33 -4
  75. ads/opctl/operator/lowcode/forecast/model/forecast_datasets.py +15 -1
  76. ads/opctl/operator/lowcode/forecast/model/ml_forecast.py +234 -0
  77. ads/opctl/operator/lowcode/forecast/model/neuralprophet.py +9 -1
  78. ads/opctl/operator/lowcode/forecast/model/prophet.py +1 -0
  79. ads/opctl/operator/lowcode/forecast/model_evaluator.py +147 -0
  80. ads/opctl/operator/lowcode/forecast/operator_config.py +2 -1
  81. ads/opctl/operator/lowcode/forecast/schema.yaml +7 -2
  82. ads/opctl/operator/lowcode/forecast/utils.py +18 -44
  83. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.11.dist-info}/METADATA +9 -12
  84. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.11.dist-info}/RECORD +87 -61
  85. ads/aqua/job.py +0 -29
  86. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.11.dist-info}/LICENSE.txt +0 -0
  87. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.11.dist-info}/WHEEL +0 -0
  88. {oracle_ads-2.11.9.dist-info → oracle_ads-2.11.11.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,170 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # Copyright (c) 2024 Oracle and/or its affiliates.
4
+ # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
+
6
+ """
7
+ aqua.evaluation.entities
8
+ ~~~~~~~~~~~~~~
9
+
10
+ This module contains dataclasses for aqua evaluation.
11
+ """
12
+
13
+ from dataclasses import dataclass, field
14
+ from typing import List, Optional, Union
15
+
16
+ from ads.aqua.data import AquaResourceIdentifier
17
+ from ads.common.serializer import DataClassSerializable
18
+
19
+
20
+ @dataclass(repr=False)
21
+ class CreateAquaEvaluationDetails(DataClassSerializable):
22
+ """Dataclass to create aqua model evaluation.
23
+
24
+ Fields
25
+ ------
26
+ evaluation_source_id: str
27
+ The evaluation source id. Must be either model or model deployment ocid.
28
+ evaluation_name: str
29
+ The name for evaluation.
30
+ dataset_path: str
31
+ The dataset path for the evaluation. Could be either a local path from notebook session
32
+ or an object storage path.
33
+ report_path: str
34
+ The report path for the evaluation. Must be an object storage path.
35
+ model_parameters: dict
36
+ The parameters for the evaluation.
37
+ shape_name: str
38
+ The shape name for the evaluation job infrastructure.
39
+ memory_in_gbs: float
40
+ The memory in gbs for the shape selected.
41
+ ocpus: float
42
+ The ocpu count for the shape selected.
43
+ block_storage_size: int
44
+ The storage for the evaluation job infrastructure.
45
+ compartment_id: (str, optional). Defaults to `None`.
46
+ The compartment id for the evaluation.
47
+ project_id: (str, optional). Defaults to `None`.
48
+ The project id for the evaluation.
49
+ evaluation_description: (str, optional). Defaults to `None`.
50
+ The description for evaluation
51
+ experiment_id: (str, optional). Defaults to `None`.
52
+ The evaluation model version set id. If provided,
53
+ evaluation model will be associated with it.
54
+ experiment_name: (str, optional). Defaults to `None`.
55
+ The evaluation model version set name. If provided,
56
+ the model version set with the same name will be used if exists,
57
+ otherwise a new model version set will be created with the name.
58
+ experiment_description: (str, optional). Defaults to `None`.
59
+ The description for the evaluation model version set.
60
+ log_group_id: (str, optional). Defaults to `None`.
61
+ The log group id for the evaluation job infrastructure.
62
+ log_id: (str, optional). Defaults to `None`.
63
+ The log id for the evaluation job infrastructure.
64
+ metrics: (list, optional). Defaults to `None`.
65
+ The metrics for the evaluation.
66
+ force_overwrite: (bool, optional). Defaults to `False`.
67
+ Whether to force overwrite the existing file in object storage.
68
+ """
69
+
70
+ evaluation_source_id: str
71
+ evaluation_name: str
72
+ dataset_path: str
73
+ report_path: str
74
+ model_parameters: dict
75
+ shape_name: str
76
+ block_storage_size: int
77
+ compartment_id: Optional[str] = None
78
+ project_id: Optional[str] = None
79
+ evaluation_description: Optional[str] = None
80
+ experiment_id: Optional[str] = None
81
+ experiment_name: Optional[str] = None
82
+ experiment_description: Optional[str] = None
83
+ memory_in_gbs: Optional[float] = None
84
+ ocpus: Optional[float] = None
85
+ log_group_id: Optional[str] = None
86
+ log_id: Optional[str] = None
87
+ metrics: Optional[List] = None
88
+ force_overwrite: Optional[bool] = False
89
+
90
+
91
+ @dataclass(repr=False)
92
+ class AquaEvalReport(DataClassSerializable):
93
+ evaluation_id: str = ""
94
+ content: str = ""
95
+
96
+
97
+ @dataclass(repr=False)
98
+ class ModelParams(DataClassSerializable):
99
+ max_tokens: str = ""
100
+ top_p: str = ""
101
+ top_k: str = ""
102
+ temperature: str = ""
103
+ presence_penalty: Optional[float] = 0.0
104
+ frequency_penalty: Optional[float] = 0.0
105
+ stop: Optional[Union[str, List[str]]] = field(default_factory=list)
106
+
107
+
108
+ @dataclass(repr=False)
109
+ class AquaEvalParams(ModelParams, DataClassSerializable):
110
+ shape: str = ""
111
+ dataset_path: str = ""
112
+ report_path: str = ""
113
+
114
+
115
+ @dataclass(repr=False)
116
+ class AquaEvalMetric(DataClassSerializable):
117
+ key: str
118
+ name: str
119
+ description: str = ""
120
+
121
+
122
+ @dataclass(repr=False)
123
+ class AquaEvalMetricSummary(DataClassSerializable):
124
+ metric: str = ""
125
+ score: str = ""
126
+ grade: str = ""
127
+
128
+
129
+ @dataclass(repr=False)
130
+ class AquaEvalMetrics(DataClassSerializable):
131
+ id: str
132
+ report: str
133
+ metric_results: List[AquaEvalMetric] = field(default_factory=list)
134
+ metric_summary_result: List[AquaEvalMetricSummary] = field(default_factory=list)
135
+
136
+
137
+ @dataclass(repr=False)
138
+ class AquaEvaluationCommands(DataClassSerializable):
139
+ evaluation_id: str
140
+ evaluation_target_id: str
141
+ input_data: dict
142
+ metrics: list
143
+ output_dir: str
144
+ params: dict
145
+
146
+
147
+ @dataclass(repr=False)
148
+ class AquaEvaluationSummary(DataClassSerializable):
149
+ """Represents a summary of Aqua evalution."""
150
+
151
+ id: str
152
+ name: str
153
+ console_url: str
154
+ lifecycle_state: str
155
+ lifecycle_details: str
156
+ time_created: str
157
+ tags: dict
158
+ experiment: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
159
+ source: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
160
+ job: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
161
+ parameters: AquaEvalParams = field(default_factory=AquaEvalParams)
162
+
163
+
164
+ @dataclass(repr=False)
165
+ class AquaEvaluationDetail(AquaEvaluationSummary, DataClassSerializable):
166
+ """Represents a details of Aqua evalution."""
167
+
168
+ log_group: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
169
+ log: AquaResourceIdentifier = field(default_factory=AquaResourceIdentifier)
170
+ introspection: dict = field(default_factory=dict)
@@ -0,0 +1,71 @@
1
+ #!/usr/bin/env python
2
+ # -*- coding: utf-8 -*-
3
+ # Copyright (c) 2024 Oracle and/or its affiliates.
4
+ # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
5
+ """
6
+ aqua.evaluation.errors
7
+ ~~~~~~~~~~~~~~
8
+
9
+ This module contains errors in Aqua Evaluation.
10
+ """
11
+
12
+ from ads.common.extended_enum import ExtendedEnumMeta
13
+
14
+
15
+ class EvaluationJobExitCode(str, metaclass=ExtendedEnumMeta):
16
+ SUCCESS = 0
17
+ COMMON_ERROR = 1
18
+
19
+ # Configuration-related issues 10-19
20
+ INVALID_EVALUATION_CONFIG = 10
21
+ EVALUATION_CONFIG_NOT_PROVIDED = 11
22
+ INVALID_OUTPUT_DIR = 12
23
+ INVALID_INPUT_DATASET_PATH = 13
24
+ INVALID_EVALUATION_ID = 14
25
+ INVALID_TARGET_EVALUATION_ID = 15
26
+ INVALID_EVALUATION_CONFIG_VALIDATION = 16
27
+
28
+ # Evaluation process issues 20-39
29
+ OUTPUT_DIR_NOT_FOUND = 20
30
+ INVALID_INPUT_DATASET = 21
31
+ INPUT_DATA_NOT_FOUND = 22
32
+ EVALUATION_ID_NOT_FOUND = 23
33
+ EVALUATION_ALREADY_PERFORMED = 24
34
+ EVALUATION_TARGET_NOT_FOUND = 25
35
+ NO_SUCCESS_INFERENCE_RESULT = 26
36
+ COMPUTE_EVALUATION_ERROR = 27
37
+ EVALUATION_REPORT_ERROR = 28
38
+ MODEL_INFERENCE_WRONG_RESPONSE_FORMAT = 29
39
+ UNSUPPORTED_METRICS = 30
40
+ METRIC_CALCULATION_FAILURE = 31
41
+ EVALUATION_MODEL_CATALOG_RECORD_CREATION_FAILED = 32
42
+
43
+
44
+ EVALUATION_JOB_EXIT_CODE_MESSAGE = {
45
+ EvaluationJobExitCode.SUCCESS: "Success",
46
+ EvaluationJobExitCode.COMMON_ERROR: "An error occurred during the evaluation, please check the log for more information.",
47
+ EvaluationJobExitCode.INVALID_EVALUATION_CONFIG: "The provided evaluation configuration was not in the correct format, supported formats are YAML or JSON.",
48
+ EvaluationJobExitCode.EVALUATION_CONFIG_NOT_PROVIDED: "The evaluation config was not provided.",
49
+ EvaluationJobExitCode.INVALID_OUTPUT_DIR: "The specified output directory path is invalid.",
50
+ EvaluationJobExitCode.INVALID_INPUT_DATASET_PATH: "Dataset path is invalid.",
51
+ EvaluationJobExitCode.INVALID_EVALUATION_ID: "Evaluation ID was not found in the Model Catalog.",
52
+ EvaluationJobExitCode.INVALID_TARGET_EVALUATION_ID: "Target evaluation ID was not found in the Model Deployment.",
53
+ EvaluationJobExitCode.INVALID_EVALUATION_CONFIG_VALIDATION: "Validation errors in the evaluation config.",
54
+ EvaluationJobExitCode.OUTPUT_DIR_NOT_FOUND: "Destination folder does not exist or cannot be used for writing, verify the folder's existence and permissions.",
55
+ EvaluationJobExitCode.INVALID_INPUT_DATASET: "Input dataset is in an invalid format, ensure the dataset is in jsonl format and that includes the required columns: 'prompt', 'completion' (optional 'category').",
56
+ EvaluationJobExitCode.INPUT_DATA_NOT_FOUND: "Input data file does not exist or cannot be use for reading, verify the file's existence and permissions.",
57
+ EvaluationJobExitCode.EVALUATION_ID_NOT_FOUND: "Evaluation ID does not match any resource in the Model Catalog, or access may be blocked by policies.",
58
+ EvaluationJobExitCode.EVALUATION_ALREADY_PERFORMED: "Evaluation already has an attached artifact, indicating that the evaluation has already been performed.",
59
+ EvaluationJobExitCode.EVALUATION_TARGET_NOT_FOUND: "Target evaluation ID does not match any resources in Model Deployment.",
60
+ EvaluationJobExitCode.NO_SUCCESS_INFERENCE_RESULT: "Inference process completed without producing expected outcome, verify the model parameters and config.",
61
+ EvaluationJobExitCode.COMPUTE_EVALUATION_ERROR: "Evaluation process encountered an issue while calculating metrics.",
62
+ EvaluationJobExitCode.EVALUATION_REPORT_ERROR: "Failed to save the evaluation report due to an error. Ensure the evaluation model is currently active and the specified path for the output report is valid and accessible. Verify these conditions and reinitiate the evaluation process.",
63
+ EvaluationJobExitCode.MODEL_INFERENCE_WRONG_RESPONSE_FORMAT: "Evaluation encountered unsupported, or unexpected model output, verify the target evaluation model is compatible and produces the correct format.",
64
+ EvaluationJobExitCode.UNSUPPORTED_METRICS: "None of the provided metrics are supported by the framework.",
65
+ EvaluationJobExitCode.METRIC_CALCULATION_FAILURE: "All attempted metric calculations were unsuccessful. Please review the metric configurations and input data.",
66
+ EvaluationJobExitCode.EVALUATION_MODEL_CATALOG_RECORD_CREATION_FAILED: (
67
+ "Failed to create a Model Catalog record for the evaluation. "
68
+ "This could be due to missing required permissions. "
69
+ "Please check the log for more information."
70
+ ),
71
+ }