azure-ai-evaluation 0.0.0b0__py3-none-any.whl → 1.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-evaluation might be problematic. Click here for more details.

Files changed (100) hide show
  1. azure/ai/evaluation/__init__.py +60 -0
  2. azure/ai/evaluation/_common/__init__.py +16 -0
  3. azure/ai/evaluation/_common/constants.py +65 -0
  4. azure/ai/evaluation/_common/rai_service.py +452 -0
  5. azure/ai/evaluation/_common/utils.py +87 -0
  6. azure/ai/evaluation/_constants.py +50 -0
  7. azure/ai/evaluation/_evaluate/__init__.py +3 -0
  8. azure/ai/evaluation/_evaluate/_batch_run_client/__init__.py +8 -0
  9. azure/ai/evaluation/_evaluate/_batch_run_client/batch_run_context.py +72 -0
  10. azure/ai/evaluation/_evaluate/_batch_run_client/code_client.py +150 -0
  11. azure/ai/evaluation/_evaluate/_batch_run_client/proxy_client.py +61 -0
  12. azure/ai/evaluation/_evaluate/_eval_run.py +494 -0
  13. azure/ai/evaluation/_evaluate/_evaluate.py +689 -0
  14. azure/ai/evaluation/_evaluate/_telemetry/__init__.py +174 -0
  15. azure/ai/evaluation/_evaluate/_utils.py +237 -0
  16. azure/ai/evaluation/_evaluators/__init__.py +3 -0
  17. azure/ai/evaluation/_evaluators/_bleu/__init__.py +9 -0
  18. azure/ai/evaluation/_evaluators/_bleu/_bleu.py +73 -0
  19. azure/ai/evaluation/_evaluators/_chat/__init__.py +9 -0
  20. azure/ai/evaluation/_evaluators/_chat/_chat.py +350 -0
  21. azure/ai/evaluation/_evaluators/_chat/retrieval/__init__.py +9 -0
  22. azure/ai/evaluation/_evaluators/_chat/retrieval/_retrieval.py +163 -0
  23. azure/ai/evaluation/_evaluators/_chat/retrieval/retrieval.prompty +48 -0
  24. azure/ai/evaluation/_evaluators/_coherence/__init__.py +7 -0
  25. azure/ai/evaluation/_evaluators/_coherence/_coherence.py +122 -0
  26. azure/ai/evaluation/_evaluators/_coherence/coherence.prompty +62 -0
  27. azure/ai/evaluation/_evaluators/_content_safety/__init__.py +21 -0
  28. azure/ai/evaluation/_evaluators/_content_safety/_content_safety.py +108 -0
  29. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_base.py +66 -0
  30. azure/ai/evaluation/_evaluators/_content_safety/_content_safety_chat.py +296 -0
  31. azure/ai/evaluation/_evaluators/_content_safety/_hate_unfairness.py +78 -0
  32. azure/ai/evaluation/_evaluators/_content_safety/_self_harm.py +76 -0
  33. azure/ai/evaluation/_evaluators/_content_safety/_sexual.py +76 -0
  34. azure/ai/evaluation/_evaluators/_content_safety/_violence.py +76 -0
  35. azure/ai/evaluation/_evaluators/_eci/__init__.py +0 -0
  36. azure/ai/evaluation/_evaluators/_eci/_eci.py +99 -0
  37. azure/ai/evaluation/_evaluators/_f1_score/__init__.py +9 -0
  38. azure/ai/evaluation/_evaluators/_f1_score/_f1_score.py +141 -0
  39. azure/ai/evaluation/_evaluators/_fluency/__init__.py +9 -0
  40. azure/ai/evaluation/_evaluators/_fluency/_fluency.py +122 -0
  41. azure/ai/evaluation/_evaluators/_fluency/fluency.prompty +61 -0
  42. azure/ai/evaluation/_evaluators/_gleu/__init__.py +9 -0
  43. azure/ai/evaluation/_evaluators/_gleu/_gleu.py +71 -0
  44. azure/ai/evaluation/_evaluators/_groundedness/__init__.py +9 -0
  45. azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +123 -0
  46. azure/ai/evaluation/_evaluators/_groundedness/groundedness.prompty +54 -0
  47. azure/ai/evaluation/_evaluators/_meteor/__init__.py +9 -0
  48. azure/ai/evaluation/_evaluators/_meteor/_meteor.py +96 -0
  49. azure/ai/evaluation/_evaluators/_protected_material/__init__.py +5 -0
  50. azure/ai/evaluation/_evaluators/_protected_material/_protected_material.py +104 -0
  51. azure/ai/evaluation/_evaluators/_protected_materials/__init__.py +5 -0
  52. azure/ai/evaluation/_evaluators/_protected_materials/_protected_materials.py +104 -0
  53. azure/ai/evaluation/_evaluators/_qa/__init__.py +9 -0
  54. azure/ai/evaluation/_evaluators/_qa/_qa.py +111 -0
  55. azure/ai/evaluation/_evaluators/_relevance/__init__.py +9 -0
  56. azure/ai/evaluation/_evaluators/_relevance/_relevance.py +131 -0
  57. azure/ai/evaluation/_evaluators/_relevance/relevance.prompty +69 -0
  58. azure/ai/evaluation/_evaluators/_rouge/__init__.py +10 -0
  59. azure/ai/evaluation/_evaluators/_rouge/_rouge.py +98 -0
  60. azure/ai/evaluation/_evaluators/_similarity/__init__.py +9 -0
  61. azure/ai/evaluation/_evaluators/_similarity/_similarity.py +130 -0
  62. azure/ai/evaluation/_evaluators/_similarity/similarity.prompty +71 -0
  63. azure/ai/evaluation/_evaluators/_xpia/__init__.py +5 -0
  64. azure/ai/evaluation/_evaluators/_xpia/xpia.py +140 -0
  65. azure/ai/evaluation/_exceptions.py +107 -0
  66. azure/ai/evaluation/_http_utils.py +395 -0
  67. azure/ai/evaluation/_model_configurations.py +27 -0
  68. azure/ai/evaluation/_user_agent.py +6 -0
  69. azure/ai/evaluation/_version.py +5 -0
  70. azure/ai/evaluation/py.typed +0 -0
  71. azure/ai/evaluation/simulator/__init__.py +15 -0
  72. azure/ai/evaluation/simulator/_adversarial_scenario.py +27 -0
  73. azure/ai/evaluation/simulator/_adversarial_simulator.py +450 -0
  74. azure/ai/evaluation/simulator/_constants.py +17 -0
  75. azure/ai/evaluation/simulator/_conversation/__init__.py +315 -0
  76. azure/ai/evaluation/simulator/_conversation/_conversation.py +178 -0
  77. azure/ai/evaluation/simulator/_conversation/constants.py +30 -0
  78. azure/ai/evaluation/simulator/_direct_attack_simulator.py +252 -0
  79. azure/ai/evaluation/simulator/_helpers/__init__.py +4 -0
  80. azure/ai/evaluation/simulator/_helpers/_language_suffix_mapping.py +17 -0
  81. azure/ai/evaluation/simulator/_helpers/_simulator_data_classes.py +93 -0
  82. azure/ai/evaluation/simulator/_indirect_attack_simulator.py +207 -0
  83. azure/ai/evaluation/simulator/_model_tools/__init__.py +23 -0
  84. azure/ai/evaluation/simulator/_model_tools/_identity_manager.py +147 -0
  85. azure/ai/evaluation/simulator/_model_tools/_proxy_completion_model.py +228 -0
  86. azure/ai/evaluation/simulator/_model_tools/_rai_client.py +157 -0
  87. azure/ai/evaluation/simulator/_model_tools/_template_handler.py +157 -0
  88. azure/ai/evaluation/simulator/_model_tools/models.py +616 -0
  89. azure/ai/evaluation/simulator/_prompty/task_query_response.prompty +69 -0
  90. azure/ai/evaluation/simulator/_prompty/task_simulate.prompty +36 -0
  91. azure/ai/evaluation/simulator/_tracing.py +92 -0
  92. azure/ai/evaluation/simulator/_utils.py +111 -0
  93. azure/ai/evaluation/simulator/simulator.py +579 -0
  94. azure_ai_evaluation-1.0.0b1.dist-info/METADATA +377 -0
  95. azure_ai_evaluation-1.0.0b1.dist-info/RECORD +97 -0
  96. {azure_ai_evaluation-0.0.0b0.dist-info → azure_ai_evaluation-1.0.0b1.dist-info}/WHEEL +1 -1
  97. azure_ai_evaluation-1.0.0b1.dist-info/top_level.txt +1 -0
  98. azure_ai_evaluation-0.0.0b0.dist-info/METADATA +0 -7
  99. azure_ai_evaluation-0.0.0b0.dist-info/RECORD +0 -4
  100. azure_ai_evaluation-0.0.0b0.dist-info/top_level.txt +0 -1
@@ -0,0 +1,494 @@
1
+ # ---------------------------------------------------------
2
+ # Copyright (c) Microsoft Corporation. All rights reserved.
3
+ # ---------------------------------------------------------
4
+ import contextlib
5
+ import dataclasses
6
+ import enum
7
+ import logging
8
+ import os
9
+ import posixpath
10
+ import time
11
+ import uuid
12
+ from typing import Any, Dict, Optional, Set
13
+ from urllib.parse import urlparse
14
+
15
+ from azure.core.pipeline.policies import RetryPolicy
16
+ from azure.core.rest import HttpResponse
17
+
18
+ from promptflow._sdk.entities import Run
19
+ from azure.ai.evaluation._http_utils import get_http_client
20
+ from azure.ai.evaluation._version import VERSION
21
+ from azure.ai.evaluation._exceptions import EvaluationException, ErrorBlame, ErrorCategory, ErrorTarget
22
+
23
+ LOGGER = logging.getLogger(__name__)
24
+
25
+
26
+ # Handle optional import. The azure libraries are only present if
27
+ # promptflow-azure is installed.
28
+ try:
29
+ from azure.ai.ml.entities._credentials import AccountKeyConfiguration # pylint: disable=ungrouped-imports
30
+ from azure.ai.ml.entities._datastore.datastore import Datastore
31
+ from azure.storage.blob import BlobServiceClient
32
+ except (ModuleNotFoundError, ImportError):
33
+ # If the above mentioned modules cannot be imported, we are running
34
+ # in local mode and MLClient in the constructor will be None, so
35
+ # we will not arrive to Azure-dependent code.
36
+
37
+ # We are logging the import failure only if debug logging level is set because:
38
+ # - If the project configuration was not provided this import is not needed.
39
+ # - If the project configuration was provided, the error will be raised by PFClient.
40
+ LOGGER.debug("promptflow.azure is not installed.")
41
+
42
+
43
+ @dataclasses.dataclass
44
+ class RunInfo:
45
+ """
46
+ A holder for run info, needed for logging.
47
+ """
48
+
49
+ run_id: str
50
+ experiment_id: str
51
+ run_name: str
52
+
53
+ @staticmethod
54
+ def generate(run_name: Optional[str]) -> "RunInfo":
55
+ """
56
+ Generate the new RunInfo instance with the RunID and Experiment ID.
57
+
58
+ **Note:** This code is used when we are in failed state and cannot get a run.
59
+
60
+ :param run_name: The name of a run.
61
+ :type run_name: Optional[str]
62
+ :return: The RunInfo instance.
63
+ :rtype: azure.ai.evaluation._evaluate._eval_run.RunInfo
64
+ """
65
+ return RunInfo(str(uuid.uuid4()), str(uuid.uuid4()), run_name or "")
66
+
67
+
68
+ class RunStatus(enum.Enum):
69
+ """Run states."""
70
+
71
+ NOT_STARTED = 0
72
+ STARTED = 1
73
+ BROKEN = 2
74
+ TERMINATED = 3
75
+
76
+
77
+ class EvalRun(contextlib.AbstractContextManager): # pylint: disable=too-many-instance-attributes
78
+ """
79
+ The simple singleton run class, used for accessing artifact store.
80
+
81
+ :param run_name: The name of the run.
82
+ :type run_name: Optional[str]
83
+ :param tracking_uri: Tracking URI for this run; required to make calls.
84
+ :type tracking_uri: str
85
+ :param subscription_id: The subscription ID used to track run.
86
+ :type subscription_id: str
87
+ :param group_name: The resource group used to track run.
88
+ :type group_name: str
89
+ :param workspace_name: The name of workspace/project used to track run.
90
+ :type workspace_name: str
91
+ :param ml_client: The ml client used for authentication into Azure.
92
+ :type ml_client: azure.ai.ml.MLClient
93
+ :param promptflow_run: The promptflow run used by the
94
+ """
95
+
96
+ _MAX_RETRIES = 5
97
+ _BACKOFF_FACTOR = 2
98
+ _TIMEOUT = 5
99
+ _SCOPE = "https://management.azure.com/.default"
100
+
101
+ EVALUATION_ARTIFACT = "instance_results.jsonl"
102
+ EVALUATION_ARTIFACT_DUMMY_RUN = "eval_results.jsonl"
103
+
104
+ def __init__(
105
+ self,
106
+ run_name: Optional[str],
107
+ tracking_uri: str,
108
+ subscription_id: str,
109
+ group_name: str,
110
+ workspace_name: str,
111
+ ml_client: "MLClient",
112
+ promptflow_run: Optional[Run] = None,
113
+ ) -> None:
114
+ self._tracking_uri: str = tracking_uri
115
+ self._subscription_id: str = subscription_id
116
+ self._resource_group_name: str = group_name
117
+ self._workspace_name: str = workspace_name
118
+ self._ml_client: Any = ml_client
119
+ self._is_promptflow_run: bool = promptflow_run is not None
120
+ self._run_name = run_name
121
+ self._promptflow_run = promptflow_run
122
+ self._status = RunStatus.NOT_STARTED
123
+ self._url_base = None
124
+ self.info = None
125
+
126
+ @property
127
+ def status(self) -> RunStatus:
128
+ """
129
+ Return the run status.
130
+
131
+ :return: The status of the run.
132
+ :rtype: promptflow._sdk._constants.RunStatus
133
+ """
134
+ return self._status
135
+
136
+ def _get_scope(self) -> str:
137
+ """
138
+ Return the scope information for the workspace.
139
+
140
+ :return: The scope information for the workspace.
141
+ :rtype: str
142
+ """
143
+ return (
144
+ "/subscriptions/{}/resourceGroups/{}/providers" "/Microsoft.MachineLearningServices" "/workspaces/{}"
145
+ ).format(
146
+ self._subscription_id,
147
+ self._resource_group_name,
148
+ self._workspace_name,
149
+ )
150
+
151
+ def _start_run(self) -> None:
152
+ """
153
+ Start the run, or, if it is not applicable (for example, if tracking is not enabled), mark it as started.
154
+ """
155
+ self._check_state_and_log("start run", {v for v in RunStatus if v != RunStatus.NOT_STARTED}, True)
156
+ self._status = RunStatus.STARTED
157
+ if self._tracking_uri is None:
158
+ LOGGER.warning(
159
+ "A tracking_uri was not provided, The results will be saved locally, but will not be logged to Azure."
160
+ )
161
+ self._url_base = None
162
+ self._status = RunStatus.BROKEN
163
+ self.info = RunInfo.generate(self._run_name)
164
+ else:
165
+ self._url_base = urlparse(self._tracking_uri).netloc
166
+ if self._promptflow_run is not None:
167
+ self.info = RunInfo(
168
+ self._promptflow_run.name, self._promptflow_run._experiment_name, self._promptflow_run.name
169
+ )
170
+ else:
171
+ url = f"https://{self._url_base}/mlflow/v2.0" f"{self._get_scope()}/api/2.0/mlflow/runs/create"
172
+ body = {
173
+ "experiment_id": "0",
174
+ "user_id": "azure-ai-evaluation",
175
+ "start_time": int(time.time() * 1000),
176
+ "tags": [{"key": "mlflow.user", "value": "azure-ai-evaluation"}],
177
+ }
178
+ if self._run_name:
179
+ body["run_name"] = self._run_name
180
+ response = self.request_with_retry(url=url, method="POST", json_dict=body)
181
+ if response.status_code != 200:
182
+ self.info = RunInfo.generate(self._run_name)
183
+ LOGGER.warning(
184
+ f"The run failed to start: {response.status_code}: {response.text()}."
185
+ "The results will be saved locally, but will not be logged to Azure."
186
+ )
187
+ self._status = RunStatus.BROKEN
188
+ else:
189
+ parsed_response = response.json()
190
+ self.info = RunInfo(
191
+ run_id=parsed_response["run"]["info"]["run_id"],
192
+ experiment_id=parsed_response["run"]["info"]["experiment_id"],
193
+ run_name=parsed_response["run"]["info"]["run_name"],
194
+ )
195
+ self._status = RunStatus.STARTED
196
+
197
+ def _end_run(self, reason: str) -> None:
198
+ """
199
+ Terminate the run.
200
+
201
+ :param reason: Reason for run termination. Possible values are "FINISHED" "FAILED", and "KILLED"
202
+ :type reason: str
203
+ :raises EvaluationException: Raised if the run is not in ("FINISHED", "FAILED", "KILLED")
204
+ """
205
+ if not self._check_state_and_log(
206
+ "stop run", {RunStatus.BROKEN, RunStatus.NOT_STARTED, RunStatus.TERMINATED}, False
207
+ ):
208
+ return
209
+ if self._is_promptflow_run:
210
+ # This run is already finished, we just add artifacts/metrics to it.
211
+ self._status = RunStatus.TERMINATED
212
+ return
213
+ if reason not in ("FINISHED", "FAILED", "KILLED"):
214
+ raise EvaluationException(
215
+ message=f"Incorrect terminal status {reason}. Valid statuses are 'FINISHED', 'FAILED' and 'KILLED'.",
216
+ internal_message="Incorrect terminal status. Valid statuses are 'FINISHED', 'FAILED' and 'KILLED'",
217
+ target=ErrorTarget.EVAL_RUN,
218
+ category=ErrorCategory.FAILED_EXECUTION,
219
+ blame=ErrorBlame.UNKNOWN
220
+ )
221
+ url = f"https://{self._url_base}/mlflow/v2.0" f"{self._get_scope()}/api/2.0/mlflow/runs/update"
222
+ body = {
223
+ "run_uuid": self.info.run_id,
224
+ "status": reason,
225
+ "end_time": int(time.time() * 1000),
226
+ "run_id": self.info.run_id,
227
+ }
228
+ response = self.request_with_retry(url=url, method="POST", json_dict=body)
229
+ if response.status_code != 200:
230
+ LOGGER.warning("Unable to terminate the run.")
231
+ self._status = RunStatus.TERMINATED
232
+
233
+ def __enter__(self):
234
+ """The Context Manager enter call.
235
+
236
+ :return: The instance of the class.
237
+ :rtype: azure.ai.evaluation._evaluate._eval_run.EvalRun
238
+ """
239
+ self._start_run()
240
+ return self
241
+
242
+ def __exit__(self, exc_type, exc_value, exc_tb):
243
+ """The context manager exit call."""
244
+ self._end_run("FINISHED")
245
+
246
+ def get_run_history_uri(self) -> str:
247
+ """
248
+ Get the run history service URI.
249
+
250
+ :return: The run history service URI.
251
+ :rtype: str
252
+ """
253
+ return (
254
+ f"https://{self._url_base}"
255
+ "/history/v1.0"
256
+ f"{self._get_scope()}"
257
+ f"/experimentids/{self.info.experiment_id}/runs/{self.info.run_id}"
258
+ )
259
+
260
+ def get_artifacts_uri(self) -> str:
261
+ """
262
+ Gets the URI to upload the artifacts to.
263
+
264
+ :return: The URI to upload the artifacts to.
265
+ :rtype: str
266
+ """
267
+ return self.get_run_history_uri() + "/artifacts/batch/metadata"
268
+
269
+ def get_metrics_url(self):
270
+ """
271
+ Return the url needed to track the mlflow metrics.
272
+
273
+ :return: The url needed to track the mlflow metrics.
274
+ :rtype: str
275
+ """
276
+ return f"https://{self._url_base}" "/mlflow/v2.0" f"{self._get_scope()}" f"/api/2.0/mlflow/runs/log-metric"
277
+
278
+ def _get_token(self):
279
+ # We have to use lazy import because promptflow.azure
280
+ # is an optional dependency.
281
+ from promptflow.azure._utils._token_cache import ArmTokenCache # pylint: disable=import-error,no-name-in-module
282
+
283
+ return ArmTokenCache().get_token(self._ml_client._credential)
284
+
285
+ def request_with_retry(
286
+ self, url: str, method: str, json_dict: Dict[str, Any], headers: Optional[Dict[str, str]] = None
287
+ ) -> HttpResponse:
288
+ """
289
+ Send the request with retries.
290
+
291
+ :param url: The url to send the request to.
292
+ :type url: str
293
+ :param method: The request method to be used.
294
+ :type method: str
295
+ :param json_dict: The json dictionary (not serialized) to be sent.
296
+ :type json_dict: Dict[str, Any]
297
+ :param headers: The headers to be sent with the request.
298
+ :type headers: Optional[Dict[str, str]]
299
+ :return: The response
300
+ :rtype: HttpResponse
301
+ """
302
+ if headers is None:
303
+ headers = {}
304
+ headers["User-Agent"] = f"promptflow/{VERSION}"
305
+ headers["Authorization"] = f"Bearer {self._get_token()}"
306
+
307
+ session = get_http_client().with_policies(
308
+ retry_policy=RetryPolicy(
309
+ retry_total=EvalRun._MAX_RETRIES,
310
+ retry_connect=EvalRun._MAX_RETRIES,
311
+ retry_read=EvalRun._MAX_RETRIES,
312
+ retry_status=EvalRun._MAX_RETRIES,
313
+ retry_on_status_codes=(408, 429, 500, 502, 503, 504),
314
+ retry_backoff_factor=EvalRun._BACKOFF_FACTOR,
315
+ )
316
+ )
317
+ return session.request(method, url, headers=headers, json=json_dict, timeout=EvalRun._TIMEOUT)
318
+
319
+ def _log_warning(self, failed_op: str, response: HttpResponse) -> None:
320
+ """
321
+ Log the error if request was not successful.
322
+
323
+ :param failed_op: The user-friendly message for the failed operation.
324
+ :type failed_op: str
325
+ :param response: The request.
326
+ :type response: HttpResponse
327
+ """
328
+ LOGGER.warning(
329
+ f"Unable to {failed_op}, "
330
+ f"the request failed with status code {response.status_code}, "
331
+ f"{response.text()=}."
332
+ )
333
+
334
+ def _check_state_and_log(self, action: str, bad_states: Set[RunStatus], should_raise: bool) -> bool:
335
+ """
336
+ Check that the run is in the correct state and log worning if it is not.
337
+
338
+ :param action: Action, which caused this check. For example if it is "log artifact",
339
+ the log message will start "Unable to log artifact."
340
+ :type action: str
341
+ :param bad_states: The states, considered invalid for given action.
342
+ :type bad_states: Set[RunStatus]
343
+ :param should_raise: Should we raise an error if the bad state has been encountered
344
+ :type should_raise: bool
345
+ :raises: ~azure.ai.evaluations._exceptions.EvaluationException if should_raise is True and invalid state was encountered.
346
+ :return: Whether or not run is in the correct state.
347
+ :rtype: bool
348
+ """
349
+ if self._status in bad_states:
350
+ msg = f"Unable to {action} due to Run status={self._status}."
351
+ if should_raise:
352
+ raise EvaluationException(
353
+ message=msg,
354
+ internal_message=msg,
355
+ target=ErrorTarget.EVAL_RUN,
356
+ category=ErrorCategory.FAILED_EXECUTION,
357
+ blame=ErrorBlame.UNKNOWN
358
+ )
359
+ LOGGER.warning(msg)
360
+ return False
361
+ return True
362
+
363
+ def log_artifact(self, artifact_folder: str, artifact_name: str = EVALUATION_ARTIFACT) -> None:
364
+ """
365
+ The local implementation of mlflow-like artifact logging.
366
+
367
+ **Note:** In the current implementation we are not using the thread pool executor
368
+ as it is done in azureml-mlflow, instead we are just running upload in cycle as we are not
369
+ expecting uploading a lot of artifacts.
370
+
371
+ :param artifact_folder: The folder with artifacts to be uploaded.
372
+ :type artifact_folder: str
373
+ :param artifact_name: The name of the artifact to be uploaded. Defaults to
374
+ azure.ai.evaluation._evaluate._eval_run.EvalRun.EVALUATION_ARTIFACT.
375
+ :type artifact_name: str
376
+ """
377
+ if not self._check_state_and_log("log artifact", {RunStatus.BROKEN, RunStatus.NOT_STARTED}, False):
378
+ return
379
+ # Check if artifact dirrectory is empty or does not exist.
380
+ if not os.path.isdir(artifact_folder):
381
+ LOGGER.warning("The path to the artifact is either not a directory or does not exist.")
382
+ return
383
+ if not os.listdir(artifact_folder):
384
+ LOGGER.warning("The path to the artifact is empty.")
385
+ return
386
+ if not os.path.isfile(os.path.join(artifact_folder, artifact_name)):
387
+ LOGGER.warning("The run results file was not found, skipping artifacts upload.")
388
+ return
389
+ # First we will list the files and the appropriate remote paths for them.
390
+ root_upload_path = posixpath.join("promptflow", "PromptFlowArtifacts", self.info.run_name)
391
+ remote_paths = {"paths": []}
392
+ local_paths = []
393
+ # Go over the artifact folder and upload all artifacts.
394
+ for root, _, filenames in os.walk(artifact_folder):
395
+ upload_path = root_upload_path
396
+ if root != artifact_folder:
397
+ rel_path = os.path.relpath(root, artifact_folder)
398
+ if rel_path != ".":
399
+ upload_path = posixpath.join(root_upload_path, rel_path)
400
+ for f in filenames:
401
+ remote_file_path = posixpath.join(upload_path, f)
402
+ remote_paths["paths"].append({"path": remote_file_path})
403
+ local_file_path = os.path.join(root, f)
404
+ local_paths.append(local_file_path)
405
+
406
+ # We will write the artifacts to the workspaceblobstore
407
+ datastore = self._ml_client.datastores.get_default(include_secrets=True)
408
+ account_url = f"{datastore.account_name}.blob.{datastore.endpoint}"
409
+ svc_client = BlobServiceClient(account_url=account_url, credential=self._get_datastore_credential(datastore))
410
+ for local, remote in zip(local_paths, remote_paths["paths"]):
411
+ blob_client = svc_client.get_blob_client(container=datastore.container_name, blob=remote["path"])
412
+ with open(local, "rb") as fp:
413
+ blob_client.upload_blob(fp, overwrite=True)
414
+
415
+ # To show artifact in UI we will need to register it. If it is a promptflow run,
416
+ # we are rewriting already registered artifact and need to skip this step.
417
+ if self._is_promptflow_run:
418
+ return
419
+ url = (
420
+ f"https://{self._url_base}/artifact/v2.0/subscriptions/{self._subscription_id}"
421
+ f"/resourceGroups/{self._resource_group_name}/providers/"
422
+ f"Microsoft.MachineLearningServices/workspaces/{self._workspace_name}/artifacts/register"
423
+ )
424
+
425
+ response = self.request_with_retry(
426
+ url=url,
427
+ method="POST",
428
+ json_dict={
429
+ "origin": "ExperimentRun",
430
+ "container": f"dcid.{self.info.run_id}",
431
+ "path": artifact_name,
432
+ "dataPath": {
433
+ "dataStoreName": datastore.name,
434
+ "relativePath": posixpath.join(root_upload_path, artifact_name),
435
+ },
436
+ },
437
+ )
438
+ if response.status_code != 200:
439
+ self._log_warning("register artifact", response)
440
+
441
+ def _get_datastore_credential(self, datastore: "Datastore"):
442
+ # Reference the logic in azure.ai.ml._artifact._artifact_utilities
443
+ # https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ml/azure-ai-ml/azure/ai/ml/_artifacts/_artifact_utilities.py#L103
444
+ credential = datastore.credentials
445
+ if isinstance(credential, AccountKeyConfiguration):
446
+ return credential.account_key
447
+ if hasattr(credential, "sas_token"):
448
+ return credential.sas_token
449
+ return self._ml_client.datastores._credential
450
+
451
+ def log_metric(self, key: str, value: float) -> None:
452
+ """
453
+ Log the metric to azure similar to how it is done by mlflow.
454
+
455
+ :param key: The metric name to be logged.
456
+ :type key: str
457
+ :param value: The valure to be logged.
458
+ :type value: float
459
+ """
460
+ if not self._check_state_and_log("log metric", {RunStatus.BROKEN, RunStatus.NOT_STARTED}, False):
461
+ return
462
+ body = {
463
+ "run_uuid": self.info.run_id,
464
+ "key": key,
465
+ "value": value,
466
+ "timestamp": int(time.time() * 1000),
467
+ "step": 0,
468
+ "run_id": self.info.run_id,
469
+ }
470
+ response = self.request_with_retry(
471
+ url=self.get_metrics_url(),
472
+ method="POST",
473
+ json_dict=body,
474
+ )
475
+ if response.status_code != 200:
476
+ self._log_warning("save metrics", response)
477
+
478
+ def write_properties_to_run_history(self, properties: Dict[str, Any]) -> None:
479
+ """
480
+ Write properties to the RunHistory service.
481
+
482
+ :param properties: The properties to be written to run history.
483
+ :type properties: dict
484
+ """
485
+ if not self._check_state_and_log("write properties", {RunStatus.BROKEN, RunStatus.NOT_STARTED}, False):
486
+ return
487
+ # update host to run history and request PATCH API
488
+ response = self.request_with_retry(
489
+ url=self.get_run_history_uri(),
490
+ method="PATCH",
491
+ json_dict={"runId": self.info.run_id, "properties": properties},
492
+ )
493
+ if response.status_code != 200:
494
+ LOGGER.error("Fail writing properties '%s' to run history: %s", properties, response.text())