beekeeper-monitors-watsonx 1.1.0.post1__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,16 +2,18 @@ import json
2
2
  import logging
3
3
  import os
4
4
  import uuid
5
- from typing import Dict, List, Literal, Union
5
+ import warnings
6
+ from typing import Dict, List, Union
6
7
 
7
8
  import certifi
8
9
  from beekeeper.core.monitors import PromptMonitor
9
10
  from beekeeper.core.monitors.types import PayloadRecord
11
+ from beekeeper.core.prompts import PromptTemplate
10
12
  from beekeeper.core.prompts.utils import extract_template_vars
11
13
  from beekeeper.monitors.watsonx.supporting_classes.credentials import (
12
14
  CloudPakforDataCredentials,
13
15
  )
14
- from beekeeper.monitors.watsonx.supporting_classes.enums import Region
16
+ from beekeeper.monitors.watsonx.supporting_classes.enums import Region, TaskType
15
17
  from beekeeper.monitors.watsonx.utils.data_utils import validate_and_filter_dict
16
18
  from beekeeper.monitors.watsonx.utils.instrumentation import suppress_output
17
19
  from deprecated import deprecated
@@ -73,6 +75,8 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
73
75
 
74
76
  Example:
75
77
  ```python
78
+ from beekeeper.monitors.watsonx.supporting_classes.enums import Region
79
+
76
80
  from beekeeper.monitors.watsonx import (
77
81
  WatsonxExternalPromptMonitor,
78
82
  CloudPakforDataCredentials,
@@ -80,7 +84,7 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
80
84
 
81
85
  # watsonx.governance (IBM Cloud)
82
86
  wxgov_client = WatsonxExternalPromptMonitor(
83
- api_key="API_KEY", space_id="SPACE_ID"
87
+ api_key="API_KEY", space_id="SPACE_ID", region=Region.US_SOUTH
84
88
  )
85
89
 
86
90
  # watsonx.governance (CP4D)
@@ -198,6 +202,40 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
198
202
 
199
203
  return created_detached_pta.to_dict()["asset_id"]
200
204
 
205
+ def _delete_detached_prompt(self, detached_pta_id: str) -> None:
206
+ from ibm_aigov_facts_client import ( # type: ignore
207
+ AIGovFactsClient,
208
+ CloudPakforDataConfig,
209
+ )
210
+
211
+ try:
212
+ if hasattr(self, "_fact_cpd_creds") and self._fact_cpd_creds:
213
+ cpd_creds = CloudPakforDataConfig(**self._fact_cpd_creds)
214
+
215
+ aigov_client = AIGovFactsClient(
216
+ container_id=self._container_id,
217
+ container_type=self._container_type,
218
+ cloud_pak_for_data_configs=cpd_creds,
219
+ disable_tracing=True,
220
+ )
221
+
222
+ else:
223
+ aigov_client = AIGovFactsClient(
224
+ api_key=self._api_key,
225
+ container_id=self._container_id,
226
+ container_type=self._container_type,
227
+ disable_tracing=True,
228
+ region=self.region.factsheet,
229
+ )
230
+
231
+ except Exception as e:
232
+ logging.error(
233
+ f"Error connecting to IBM watsonx.governance (factsheets): {e}",
234
+ )
235
+ raise
236
+
237
+ suppress_output(aigov_client.assets.delete_prompt_asset, detached_pta_id)
238
+
201
239
  def _create_deployment_pta(self, asset_id: str, name: str, model_id: str) -> str:
202
240
  from ibm_watsonx_ai import APIClient, Credentials # type: ignore
203
241
 
@@ -235,6 +273,30 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
235
273
 
236
274
  return wml_client.deployments.get_uid(created_deployment)
237
275
 
276
+ def _delete_deployment_pta(self, deployment_id: str):
277
+ from ibm_watsonx_ai import APIClient, Credentials # type: ignore
278
+
279
+ try:
280
+ if hasattr(self, "_wml_cpd_creds") and self._wml_cpd_creds:
281
+ creds = Credentials(**self._wml_cpd_creds)
282
+
283
+ wml_client = APIClient(creds)
284
+ wml_client.set.default_space(self.space_id)
285
+
286
+ else:
287
+ creds = Credentials(
288
+ url=self.region.watsonxai,
289
+ api_key=self._api_key,
290
+ )
291
+ wml_client = APIClient(creds)
292
+ wml_client.set.default_space(self.space_id)
293
+
294
+ except Exception as e:
295
+ logging.error(f"Error connecting to IBM watsonx.ai Runtime: {e}")
296
+ raise
297
+
298
+ suppress_output(wml_client.deployments.delete, deployment_id)
299
+
238
300
  @deprecated(
239
301
  reason="'add_prompt_observer()' is deprecated and will be removed in a future version. Use 'create_prompt_monitor()' instead.",
240
302
  version="1.0.5",
@@ -244,13 +306,7 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
244
306
  self,
245
307
  name: str,
246
308
  model_id: str,
247
- task_id: Literal[
248
- "extraction",
249
- "generation",
250
- "question_answering",
251
- "retrieval_augmented_generation",
252
- "summarization",
253
- ],
309
+ task_id: Union[TaskType, str],
254
310
  detached_model_provider: str,
255
311
  description: str = "",
256
312
  model_parameters: Dict = None,
@@ -291,13 +347,7 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
291
347
  self,
292
348
  name: str,
293
349
  model_id: str,
294
- task_id: Literal[
295
- "extraction",
296
- "generation",
297
- "question_answering",
298
- "retrieval_augmented_generation",
299
- "summarization",
300
- ],
350
+ task_id: Union[TaskType, str],
301
351
  detached_model_provider: str,
302
352
  description: str = "",
303
353
  model_parameters: Dict = None,
@@ -333,13 +383,7 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
333
383
  self,
334
384
  name: str,
335
385
  model_id: str,
336
- task_id: Literal[
337
- "extraction",
338
- "generation",
339
- "question_answering",
340
- "retrieval_augmented_generation",
341
- "summarization",
342
- ],
386
+ task_id: Union[TaskType, str],
343
387
  detached_model_provider: str,
344
388
  description: str = "",
345
389
  model_parameters: Dict = None,
@@ -347,9 +391,10 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
347
391
  detached_model_url: str = None,
348
392
  detached_prompt_url: str = None,
349
393
  detached_prompt_additional_info: Dict = None,
394
+ prompt_template: Union[PromptTemplate, str] = None,
350
395
  prompt_variables: List[str] = None,
351
396
  locale: str = "en",
352
- input_text: str = None,
397
+ input_text: str = None, # DEPRECATED
353
398
  context_fields: List[str] = None,
354
399
  question_field: str = None,
355
400
  ) -> Dict:
@@ -359,7 +404,7 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
359
404
  Args:
360
405
  name (str): The name of the External Prompt Template Asset.
361
406
  model_id (str): The ID of the model associated with the prompt.
362
- task_id (str): The task identifier.
407
+ task_id (TaskType): The task identifier.
363
408
  detached_model_provider (str): The external model provider.
364
409
  description (str, optional): A description of the External Prompt Template Asset.
365
410
  model_parameters (Dict, optional): Model parameters and their respective values.
@@ -367,9 +412,9 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
367
412
  detached_model_url (str, optional): The URL of the external model.
368
413
  detached_prompt_url (str, optional): The URL of the external prompt.
369
414
  detached_prompt_additional_info (Dict, optional): Additional information related to the external prompt.
415
+ prompt_template (PromptTemplate, optional): The prompt template.
370
416
  prompt_variables (List[str], optional): Values for the prompt variables.
371
417
  locale (str, optional): Locale code for the input/output language. eg. "en", "pt", "es".
372
- input_text (str, optional): The input text for the prompt.
373
418
  context_fields (List[str], optional): A list of fields that will provide context to the prompt.
374
419
  Applicable only for "retrieval_augmented_generation" task type.
375
420
  question_field (str, optional): The field containing the question to be answered.
@@ -377,20 +422,39 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
377
422
 
378
423
  Example:
379
424
  ```python
425
+ from beekeeper.monitors.watsonx.supporting_classes.enums import TaskType
426
+
380
427
  wxgov_client.create_prompt_monitor(
381
428
  name="Detached prompt (model AWS Anthropic)",
382
429
  model_id="anthropic.claude-v2",
383
- task_id="retrieval_augmented_generation",
430
+ task_id=TaskType.RETRIEVAL_AUGMENTED_GENERATION,
384
431
  detached_model_provider="AWS Bedrock",
385
432
  detached_model_name="Anthropic Claude 2.0",
386
433
  detached_model_url="https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-claude.html",
387
- prompt_variables=["context1", "context2", "input_query"],
388
- input_text="Prompt text to be given",
389
- context_fields=["context1", "context2"],
434
+ prompt_template="You are a helpful AI assistant that provides clear and accurate answers. {context}. Question: {input_query}.",
435
+ prompt_variables=["context", "input_query"],
436
+ context_fields=["context"],
390
437
  question_field="input_query",
391
438
  )
392
439
  ```
393
440
  """
441
+ task_id = TaskType.from_value(task_id).value
442
+ rollback_stack = []
443
+
444
+ # DEPRECATION NOTICE
445
+ if input_text is not None:
446
+ warnings.warn(
447
+ "DEPRECATION NOTICE: `input_text` is deprecated and will be removed in a future release. "
448
+ "Use `prompt_template` instead.",
449
+ DeprecationWarning,
450
+ stacklevel=2,
451
+ )
452
+
453
+ if prompt_template is None:
454
+ prompt_template = input_text
455
+ # END DEPRECATION NOTICE
456
+ prompt_template = PromptTemplate.from_value(prompt_template)
457
+
394
458
  if (not (self.project_id or self.space_id)) or (
395
459
  self.project_id and self.space_id
396
460
  ):
@@ -399,7 +463,7 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
399
463
  "Both were provided: 'project_id' and 'space_id' cannot be set at the same time."
400
464
  )
401
465
 
402
- if task_id == "retrieval_augmented_generation":
466
+ if task_id == TaskType.RETRIEVAL_AUGMENTED_GENERATION.value:
403
467
  if not context_fields or not question_field:
404
468
  raise ValueError(
405
469
  "For 'retrieval_augmented_generation' task, requires non-empty 'context_fields' and 'question_field'."
@@ -413,7 +477,9 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
413
477
  prompt_metadata.pop("locale", None)
414
478
 
415
479
  # Update name of keys to aigov_facts api
416
- prompt_metadata["input"] = prompt_metadata.pop("input_text", None)
480
+ prompt_metadata["input"] = getattr(
481
+ prompt_metadata.pop("prompt_template", None), "template", None
482
+ )
417
483
  prompt_metadata["model_provider"] = prompt_metadata.pop(
418
484
  "detached_model_provider",
419
485
  None,
@@ -487,11 +553,14 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
487
553
  prompt_details,
488
554
  detached_asset_details,
489
555
  )
556
+ rollback_stack.append(lambda: self._delete_detached_prompt(detached_pta_id))
557
+
490
558
  deployment_id = None
491
559
  if self._container_type == "space":
492
560
  deployment_id = suppress_output(
493
561
  self._create_deployment_pta, detached_pta_id, name, model_id
494
562
  )
563
+ rollback_stack.append(lambda: self._delete_deployment_pta(deployment_id))
495
564
 
496
565
  monitors = {
497
566
  "generative_ai_quality": {
@@ -552,10 +621,17 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
552
621
 
553
622
  generative_ai_monitor_details = generative_ai_monitor_details.result._to_dict()
554
623
 
624
+ wos_status = generative_ai_monitor_details.get("status", {})
625
+ if wos_status.get("state") == "ERROR":
626
+ for rollback_step in reversed(rollback_stack):
627
+ rollback_step()
628
+ raise Exception(wos_status.get("failure"))
629
+
630
+
555
631
  return {
556
632
  "detached_prompt_template_asset_id": detached_pta_id,
557
633
  "deployment_id": deployment_id,
558
- "subscription_id": generative_ai_monitor_details["subscription_id"],
634
+ "subscription_id": generative_ai_monitor_details.get("subscription_id", None),
559
635
  }
560
636
 
561
637
  def store_payload_records(
@@ -671,8 +747,9 @@ class WatsonxExternalPromptMonitor(PromptMonitor):
671
747
  """
672
748
  Stores records to the feedback logging system.
673
749
 
674
- Note:
675
- Feedback data for external prompt **must include** the model output named `generated_text`.
750
+ Info:
751
+ - Feedback data for external prompt **must include** the model output named `generated_text`.
752
+ - For prompt monitors created using Beekeeper, the label field is `reference_output`.
676
753
 
677
754
  Args:
678
755
  request_records (List[Dict]): A list of records to be logged, where each record is represented as a dictionary.
@@ -808,13 +885,17 @@ class WatsonxPromptMonitor(PromptMonitor):
808
885
 
809
886
  Example:
810
887
  ```python
888
+ from beekeeper.monitors.watsonx.supporting_classes.enums import Region
889
+
811
890
  from beekeeper.monitors.watsonx import (
812
891
  WatsonxPromptMonitor,
813
892
  CloudPakforDataCredentials,
814
893
  )
815
894
 
816
895
  # watsonx.governance (IBM Cloud)
817
- wxgov_client = WatsonxPromptMonitor(api_key="API_KEY", space_id="SPACE_ID")
896
+ wxgov_client = WatsonxPromptMonitor(
897
+ api_key="API_KEY", space_id="SPACE_ID", region=Region.US_SOUTH
898
+ )
818
899
 
819
900
  # watsonx.governance (CP4D)
820
901
  cpd_creds = CloudPakforDataCredentials(
@@ -927,6 +1008,40 @@ class WatsonxPromptMonitor(PromptMonitor):
927
1008
 
928
1009
  return created_pta.to_dict()["asset_id"]
929
1010
 
1011
+ def _delete_prompt(self, pta_id: str) -> None:
1012
+ from ibm_aigov_facts_client import ( # type: ignore
1013
+ AIGovFactsClient,
1014
+ CloudPakforDataConfig,
1015
+ )
1016
+
1017
+ try:
1018
+ if hasattr(self, "_fact_cpd_creds") and self._fact_cpd_creds:
1019
+ cpd_creds = CloudPakforDataConfig(**self._fact_cpd_creds)
1020
+
1021
+ aigov_client = AIGovFactsClient(
1022
+ container_id=self._container_id,
1023
+ container_type=self._container_type,
1024
+ cloud_pak_for_data_configs=cpd_creds,
1025
+ disable_tracing=True,
1026
+ )
1027
+
1028
+ else:
1029
+ aigov_client = AIGovFactsClient(
1030
+ api_key=self._api_key,
1031
+ container_id=self._container_id,
1032
+ container_type=self._container_type,
1033
+ disable_tracing=True,
1034
+ region=self.region.factsheet,
1035
+ )
1036
+
1037
+ except Exception as e:
1038
+ logging.error(
1039
+ f"Error connecting to IBM watsonx.governance (factsheets): {e}",
1040
+ )
1041
+ raise
1042
+
1043
+ suppress_output(aigov_client.assets.delete_prompt_asset, pta_id)
1044
+
930
1045
  def _create_deployment_pta(self, asset_id: str, name: str, model_id: str) -> str:
931
1046
  from ibm_watsonx_ai import APIClient, Credentials # type: ignore
932
1047
 
@@ -965,6 +1080,30 @@ class WatsonxPromptMonitor(PromptMonitor):
965
1080
 
966
1081
  return wml_client.deployments.get_uid(created_deployment)
967
1082
 
1083
+ def _delete_deployment_pta(self, deployment_id: str):
1084
+ from ibm_watsonx_ai import APIClient, Credentials # type: ignore
1085
+
1086
+ try:
1087
+ if hasattr(self, "_wml_cpd_creds") and self._wml_cpd_creds:
1088
+ creds = Credentials(**self._wml_cpd_creds)
1089
+
1090
+ wml_client = APIClient(creds)
1091
+ wml_client.set.default_space(self.space_id)
1092
+
1093
+ else:
1094
+ creds = Credentials(
1095
+ url=self.region.watsonxai,
1096
+ api_key=self._api_key,
1097
+ )
1098
+ wml_client = APIClient(creds)
1099
+ wml_client.set.default_space(self.space_id)
1100
+
1101
+ except Exception as e:
1102
+ logging.error(f"Error connecting to IBM watsonx.ai Runtime: {e}")
1103
+ raise
1104
+
1105
+ suppress_output(wml_client.deployments.delete, deployment_id)
1106
+
968
1107
  @deprecated(
969
1108
  reason="'add_prompt_observer()' is deprecated and will be removed in a future version. Use 'create_prompt_monitor()' instead.",
970
1109
  version="1.0.5",
@@ -974,13 +1113,7 @@ class WatsonxPromptMonitor(PromptMonitor):
974
1113
  self,
975
1114
  name: str,
976
1115
  model_id: str,
977
- task_id: Literal[
978
- "extraction",
979
- "generation",
980
- "question_answering",
981
- "retrieval_augmented_generation",
982
- "summarization",
983
- ],
1116
+ task_id: Union[TaskType, str],
984
1117
  description: str = "",
985
1118
  model_parameters: Dict = None,
986
1119
  prompt_variables: List[str] = None,
@@ -1011,13 +1144,7 @@ class WatsonxPromptMonitor(PromptMonitor):
1011
1144
  self,
1012
1145
  name: str,
1013
1146
  model_id: str,
1014
- task_id: Literal[
1015
- "extraction",
1016
- "generation",
1017
- "question_answering",
1018
- "retrieval_augmented_generation",
1019
- "summarization",
1020
- ],
1147
+ task_id: Union[TaskType, str],
1021
1148
  description: str = "",
1022
1149
  model_parameters: Dict = None,
1023
1150
  prompt_variables: List[str] = None,
@@ -1043,18 +1170,13 @@ class WatsonxPromptMonitor(PromptMonitor):
1043
1170
  self,
1044
1171
  name: str,
1045
1172
  model_id: str,
1046
- task_id: Literal[
1047
- "extraction",
1048
- "generation",
1049
- "question_answering",
1050
- "retrieval_augmented_generation",
1051
- "summarization",
1052
- ],
1173
+ task_id: Union[TaskType, str],
1053
1174
  description: str = "",
1054
1175
  model_parameters: Dict = None,
1176
+ prompt_template: Union[PromptTemplate, str] = None,
1055
1177
  prompt_variables: List[str] = None,
1056
1178
  locale: str = "en",
1057
- input_text: str = None,
1179
+ input_text: str = None, # DEPRECATED
1058
1180
  context_fields: List[str] = None,
1059
1181
  question_field: str = None,
1060
1182
  ) -> Dict:
@@ -1064,12 +1186,12 @@ class WatsonxPromptMonitor(PromptMonitor):
1064
1186
  Args:
1065
1187
  name (str): The name of the Prompt Template Asset.
1066
1188
  model_id (str): The ID of the model associated with the prompt.
1067
- task_id (str): The task identifier.
1189
+ task_id (TaskType): The task identifier.
1068
1190
  description (str, optional): A description of the Prompt Template Asset.
1069
1191
  model_parameters (Dict, optional): A dictionary of model parameters and their respective values.
1192
+ prompt_template (PromptTemplate, optional): The prompt template.
1070
1193
  prompt_variables (List[str], optional): A list of values for prompt input variables.
1071
1194
  locale (str, optional): Locale code for the input/output language. eg. "en", "pt", "es".
1072
- input_text (str, optional): The input text for the prompt.
1073
1195
  context_fields (List[str], optional): A list of fields that will provide context to the prompt.
1074
1196
  Applicable only for the `retrieval_augmented_generation` task type.
1075
1197
  question_field (str, optional): The field containing the question to be answered.
@@ -1077,17 +1199,36 @@ class WatsonxPromptMonitor(PromptMonitor):
1077
1199
 
1078
1200
  Example:
1079
1201
  ```python
1202
+ from beekeeper.monitors.watsonx.supporting_classes.enums import TaskType
1203
+
1080
1204
  wxgov_client.create_prompt_monitor(
1081
1205
  name="IBM prompt template",
1082
1206
  model_id="ibm/granite-3-2b-instruct",
1083
- task_id="retrieval_augmented_generation",
1084
- prompt_variables=["context1", "context2", "input_query"],
1085
- input_text="Prompt text to be given",
1086
- context_fields=["context1", "context2"],
1207
+ task_id=TaskType.RETRIEVAL_AUGMENTED_GENERATION,
1208
+ prompt_template="You are a helpful AI assistant that provides clear and accurate answers. {context}. Question: {input_query}.",
1209
+ prompt_variables=["context", "input_query"],
1210
+ context_fields=["context"],
1087
1211
  question_field="input_query",
1088
1212
  )
1089
1213
  ```
1090
1214
  """
1215
+ task_id = TaskType.from_value(task_id).value
1216
+ rollback_stack = []
1217
+
1218
+ # DEPRECATION NOTICE
1219
+ if input_text is not None:
1220
+ warnings.warn(
1221
+ "DEPRECATION NOTICE: `input_text` is deprecated and will be removed in a future release. "
1222
+ "Use `prompt_template` instead.",
1223
+ DeprecationWarning,
1224
+ stacklevel=2,
1225
+ )
1226
+
1227
+ if prompt_template is None:
1228
+ prompt_template = input_text
1229
+ # END DEPRECATION NOTICE
1230
+ prompt_template = PromptTemplate.from_value(prompt_template)
1231
+
1091
1232
  if (not (self.project_id or self.space_id)) or (
1092
1233
  self.project_id and self.space_id
1093
1234
  ):
@@ -1096,7 +1237,7 @@ class WatsonxPromptMonitor(PromptMonitor):
1096
1237
  "Both were provided: 'project_id' and 'space_id' cannot be set at the same time."
1097
1238
  )
1098
1239
 
1099
- if task_id == "retrieval_augmented_generation":
1240
+ if task_id == TaskType.RETRIEVAL_AUGMENTED_GENERATION.value:
1100
1241
  if not context_fields or not question_field:
1101
1242
  raise ValueError(
1102
1243
  "For 'retrieval_augmented_generation' task, requires non-empty 'context_fields' and 'question_field'."
@@ -1110,7 +1251,9 @@ class WatsonxPromptMonitor(PromptMonitor):
1110
1251
  prompt_metadata.pop("locale", None)
1111
1252
 
1112
1253
  # Update name of keys to aigov_facts api
1113
- prompt_metadata["input"] = prompt_metadata.pop("input_text", None)
1254
+ prompt_metadata["input"] = getattr(
1255
+ prompt_metadata.pop("prompt_template", None), "template", None
1256
+ )
1114
1257
 
1115
1258
  # Update list of vars to dict
1116
1259
  prompt_metadata["prompt_variables"] = Dict.fromkeys(
@@ -1165,11 +1308,14 @@ class WatsonxPromptMonitor(PromptMonitor):
1165
1308
  pta_id = suppress_output(
1166
1309
  self._create_prompt_template, prompt_details, asset_details
1167
1310
  )
1311
+ rollback_stack.append(lambda: self._delete_detached_prompt(pta_id))
1312
+
1168
1313
  deployment_id = None
1169
1314
  if self._container_type == "space":
1170
1315
  deployment_id = suppress_output(
1171
1316
  self._create_deployment_pta, pta_id, name, model_id
1172
1317
  )
1318
+ rollback_stack.append(lambda: self._delete_deployment_pta(deployment_id))
1173
1319
 
1174
1320
  monitors = {
1175
1321
  "generative_ai_quality": {
@@ -1230,10 +1376,16 @@ class WatsonxPromptMonitor(PromptMonitor):
1230
1376
 
1231
1377
  generative_ai_monitor_details = generative_ai_monitor_details._to_dict()
1232
1378
 
1379
+ wos_status = generative_ai_monitor_details.get("status", {})
1380
+ if wos_status.get("state") == "ERROR":
1381
+ for rollback_step in reversed(rollback_stack):
1382
+ rollback_step()
1383
+ raise Exception(wos_status.get("failure"))
1384
+
1233
1385
  return {
1234
1386
  "prompt_template_asset_id": pta_id,
1235
1387
  "deployment_id": deployment_id,
1236
- "subscription_id": generative_ai_monitor_details["subscription_id"],
1388
+ "subscription_id": generative_ai_monitor_details.get("subscription_id", None),
1237
1389
  }
1238
1390
 
1239
1391
  def store_payload_records(
@@ -1350,6 +1502,9 @@ class WatsonxPromptMonitor(PromptMonitor):
1350
1502
  """
1351
1503
  Stores records to the feedback logging system.
1352
1504
 
1505
+ Info:
1506
+ - For prompt monitors created using Beekeeper, the label field is `reference_output`.
1507
+
1353
1508
  Args:
1354
1509
  request_records (List[Dict]): A list of records to be logged, where each record is represented as a dictionary.
1355
1510
  subscription_id (str, optional): The subscription ID associated with the records being logged.
@@ -29,13 +29,17 @@ class WatsonxCustomMetricsManager:
29
29
 
30
30
  Example:
31
31
  ```python
32
+ from beekeeper.monitors.watsonx.supporting_classes.enums import Region
33
+
32
34
  from beekeeper.monitors.watsonx import (
33
35
  WatsonxCustomMetricsManager,
34
36
  CloudPakforDataCredentials,
35
37
  )
36
38
 
37
39
  # watsonx.governance (IBM Cloud)
38
- wxgov_client = WatsonxCustomMetricsManager(api_key="API_KEY")
40
+ wxgov_client = WatsonxCustomMetricsManager(
41
+ api_key="API_KEY", region=Region.US_SOUTH
42
+ )
39
43
 
40
44
  # watsonx.governance (CP4D)
41
45
  cpd_creds = CloudPakforDataCredentials(
@@ -484,7 +488,7 @@ class WatsonxCustomMetricsManager:
484
488
 
485
489
  Example:
486
490
  ```python
487
- wxgov_client.put_metrics(
491
+ wxgov_client.store_metric_data(
488
492
  monitor_instance_id="01966801-f9ee-7248-a706-41de00a8a998",
489
493
  run_id="RUN_ID",
490
494
  request_records={"context_quality": 0.914, "sensitivity": 0.85},
@@ -49,7 +49,7 @@ class Region(str, Enum):
49
49
  return _REGION_DATA[self.value]["factsheet"]
50
50
 
51
51
  @classmethod
52
- def from_value(cls, value):
52
+ def from_value(cls, value: str) -> "Region":
53
53
  if value is None:
54
54
  return cls.US_SOUTH
55
55
 
@@ -58,7 +58,7 @@ class Region(str, Enum):
58
58
 
59
59
  if isinstance(value, str):
60
60
  try:
61
- return cls(value)
61
+ return cls(value.lower())
62
62
  except ValueError:
63
63
  raise ValueError(
64
64
  "Invalid value for parameter 'region'. Received: '{}'. Valid values are: {}.".format(
@@ -69,3 +69,45 @@ class Region(str, Enum):
69
69
  raise TypeError(
70
70
  f"Invalid type for parameter 'region'. Expected str or Region, but received {type(value).__name__}."
71
71
  )
72
+
73
+
74
+ class TaskType(Enum):
75
+ """
76
+ Supported IBM watsonx.governance tasks.
77
+
78
+ Attributes:
79
+ QUESTION_ANSWERING (str): "question_answering"
80
+ SUMMARIZATION (str): "summarization"
81
+ RETRIEVAL_AUGMENTED_GENERATION (str): "retrieval_augmented_generation"
82
+ CLASSIFICATION (str): "classification"
83
+ GENERATION (str): "generation"
84
+ CODE (str): "code"
85
+ EXTRACTION (str): "extraction"
86
+ """
87
+
88
+ QUESTION_ANSWERING = "question_answering"
89
+ SUMMARIZATION = "summarization"
90
+ RETRIEVAL_AUGMENTED_GENERATION = "retrieval_augmented_generation"
91
+ CLASSIFICATION = "classification"
92
+ GENERATION = "generation"
93
+ CODE = "code"
94
+ EXTRACTION = "extraction"
95
+
96
+ @classmethod
97
+ def from_value(cls, value: str) -> "TaskType":
98
+ if isinstance(value, cls):
99
+ return value
100
+
101
+ if isinstance(value, str):
102
+ try:
103
+ return cls(value.lower())
104
+ except ValueError:
105
+ raise ValueError(
106
+ "Invalid value for parameter 'task_id'. Received: '{}'. Valid values are: {}.".format(
107
+ value, [item.value for item in TaskType]
108
+ )
109
+ )
110
+
111
+ raise TypeError(
112
+ f"Invalid type for parameter 'task_id'. Expected str or TaskType, but received {type(value).__name__}."
113
+ )
@@ -1,11 +1,11 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beekeeper-monitors-watsonx
3
- Version: 1.1.0.post1
3
+ Version: 1.1.2
4
4
  Summary: beekeeper monitors watsonx extension
5
5
  Author-email: Leonardo Furnielis <leonardofurnielis@outlook.com>
6
6
  License: Apache-2.0
7
7
  Requires-Python: <4.0,>=3.10
8
- Requires-Dist: beekeeper-core<2.0.0,>=1.0.5
8
+ Requires-Dist: beekeeper-core<2.0.0,>=1.0.6
9
9
  Requires-Dist: certifi<2026.0.0,>=2025.4.26
10
10
  Requires-Dist: ibm-aigov-facts-client<1.0.97,>=1.0.96
11
11
  Requires-Dist: ibm-watson-openscale<3.1.0,>=3.0.49
@@ -1,12 +1,12 @@
1
1
  beekeeper/monitors/watsonx/__init__.py,sha256=iJv6D68IT00ZC40TNSVYtqyFTen9sWoDqUtxvVVJjOE,789
2
- beekeeper/monitors/watsonx/base.py,sha256=jtgUGIWKWGQDsdEbeHm4iRmXp2-aErj97IKH0P3_Ayw,55775
3
- beekeeper/monitors/watsonx/custom_metric.py,sha256=QCkNDNLK_IehZIdXyczDM7RuDdyvCJN2IaTc-xsm-Lk,23827
2
+ beekeeper/monitors/watsonx/base.py,sha256=gx_iduLDy1TmMSM-jw9OyUNKltxV8xUeMz-POPhjT6k,62174
3
+ beekeeper/monitors/watsonx/custom_metric.py,sha256=XdJ26lb0y_P1nirJE_krpKONuqJjP3sWFApoG-DR6hk,23959
4
4
  beekeeper/monitors/watsonx/supporting_classes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  beekeeper/monitors/watsonx/supporting_classes/credentials.py,sha256=x4rYoOFvx0pWDhFZfuy6fM0rj7JCivaSYn_jYFXlV8U,5190
6
- beekeeper/monitors/watsonx/supporting_classes/enums.py,sha256=7HkSrjU7D8pFPCRdYk_1oE27r_sZ_nIL6cuShIdtmR8,1895
6
+ beekeeper/monitors/watsonx/supporting_classes/enums.py,sha256=Kp8_-YYQ2Z2ha9_pYyF7F7DaZOVFJoV4YSoCqoEgbOk,3274
7
7
  beekeeper/monitors/watsonx/supporting_classes/metric.py,sha256=iERXRi0iBFHITFaLtonoV97nNUCPXbieD7Z1wX6bvX8,3370
8
8
  beekeeper/monitors/watsonx/utils/data_utils.py,sha256=qBLYtHGY0MJ0JJ8BpFDT2YIjA3QOYJQNemLvpA3DMz0,1252
9
9
  beekeeper/monitors/watsonx/utils/instrumentation.py,sha256=ztgR1kZ9h-JvASzRA47AYHdc-Isv33EQum9XBLg-dQk,525
10
- beekeeper_monitors_watsonx-1.1.0.post1.dist-info/METADATA,sha256=CeEXAqhnYUi0qbbhx6EfkB3-tlkAbFNLMde-t0Bp6L4,722
11
- beekeeper_monitors_watsonx-1.1.0.post1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
12
- beekeeper_monitors_watsonx-1.1.0.post1.dist-info/RECORD,,
10
+ beekeeper_monitors_watsonx-1.1.2.dist-info/METADATA,sha256=9NZNF7aRduO-j9Up_BfNbgIRWrBb67zbcGnTvylHk_E,716
11
+ beekeeper_monitors_watsonx-1.1.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
12
+ beekeeper_monitors_watsonx-1.1.2.dist-info/RECORD,,