datarobot-moderations 11.2.10__py3-none-any.whl → 11.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,259 @@
1
+ # ---------------------------------------------------------------------------------
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
+ #
5
+ # DataRobot, Inc. Confidential.
6
+ # This is proprietary source code of DataRobot, Inc. and its affiliates.
7
+ #
8
+ # This file and its contents are subject to DataRobot Tool and Utility Agreement.
9
+ # For details, see
10
+ # https://www.datarobot.com/wp-content/uploads/2021/07/DataRobot-Tool-and-Utility-Agreement.pdf.
11
+ # ---------------------------------------------------------------------------------
12
+ from abc import ABC
13
+
14
+ from datarobot.enums import CustomMetricAggregationType
15
+ from datarobot.enums import CustomMetricDirectionality
16
+
17
+ from datarobot_dome.constants import AGENT_GOAL_ACCURACY_COLUMN_NAME
18
+ from datarobot_dome.constants import COST_COLUMN_NAME
19
+ from datarobot_dome.constants import CUSTOM_METRIC_DESCRIPTION_SUFFIX
20
+ from datarobot_dome.constants import DEFAULT_PROMPT_COLUMN_NAME
21
+ from datarobot_dome.constants import DEFAULT_RESPONSE_COLUMN_NAME
22
+ from datarobot_dome.constants import FAITHFULLNESS_COLUMN_NAME
23
+ from datarobot_dome.constants import GUIDELINE_ADHERENCE_COLUMN_NAME
24
+ from datarobot_dome.constants import NEMO_GUARD_COLUMN_NAME
25
+ from datarobot_dome.constants import ROUGE_1_COLUMN_NAME
26
+ from datarobot_dome.constants import SPAN_PREFIX
27
+ from datarobot_dome.constants import TASK_ADHERENCE_SCORE_COLUMN_NAME
28
+ from datarobot_dome.constants import TOKEN_COUNT_COLUMN_NAME
29
+ from datarobot_dome.constants import GuardAction
30
+ from datarobot_dome.constants import GuardStage
31
+ from datarobot_dome.constants import GuardType
32
+ from datarobot_dome.constants import NemoEvaluatorType
33
+ from datarobot_dome.constants import OOTBType
34
+
35
+
36
+ def get_metric_column_name(
37
+ guard_type: GuardType,
38
+ ootb_type: OOTBType | None,
39
+ stage: GuardStage,
40
+ model_guard_target_name: str | None = None,
41
+ metric_name: str | None = None,
42
+ nemo_evaluator_type: str | None = None,
43
+ ) -> str:
44
+ """Gets the metric column name. Note that this function gets used in buzok code. If you update
45
+ it, please also update the moderation library in the buzok worker image.
46
+ """
47
+ if guard_type == GuardType.MODEL:
48
+ if model_guard_target_name is None:
49
+ raise ValueError(
50
+ "For the model guard type, a valid model_guard_target_name has to be provided."
51
+ )
52
+ metric_result_key = Guard.get_stage_str(stage) + "_" + model_guard_target_name
53
+ elif guard_type == GuardType.OOTB:
54
+ if ootb_type is None:
55
+ raise ValueError("For the OOTB type, a valid OOTB guard type has to be provided.")
56
+ elif ootb_type == OOTBType.TOKEN_COUNT:
57
+ metric_result_key = Guard.get_stage_str(stage) + "_" + TOKEN_COUNT_COLUMN_NAME
58
+ elif ootb_type == OOTBType.ROUGE_1:
59
+ metric_result_key = Guard.get_stage_str(stage) + "_" + ROUGE_1_COLUMN_NAME
60
+ elif ootb_type == OOTBType.FAITHFULNESS:
61
+ metric_result_key = Guard.get_stage_str(stage) + "_" + FAITHFULLNESS_COLUMN_NAME
62
+ elif ootb_type == OOTBType.AGENT_GOAL_ACCURACY:
63
+ metric_result_key = AGENT_GOAL_ACCURACY_COLUMN_NAME
64
+ elif ootb_type == OOTBType.CUSTOM_METRIC:
65
+ if metric_name is None:
66
+ raise ValueError(
67
+ "For the custom metric type, a valid metric_name has to be provided."
68
+ )
69
+ metric_result_key = Guard.get_stage_str(stage) + "_" + metric_name
70
+ elif ootb_type == OOTBType.COST:
71
+ metric_result_key = COST_COLUMN_NAME
72
+ elif ootb_type == OOTBType.TASK_ADHERENCE:
73
+ metric_result_key = TASK_ADHERENCE_SCORE_COLUMN_NAME
74
+ elif ootb_type == OOTBType.GUIDELINE_ADHERENCE:
75
+ metric_result_key = GUIDELINE_ADHERENCE_COLUMN_NAME
76
+ else:
77
+ raise ValueError("The provided OOTB type is not implemented.")
78
+ elif guard_type == GuardType.NEMO_GUARDRAILS:
79
+ metric_result_key = Guard.get_stage_str(stage) + "_" + NEMO_GUARD_COLUMN_NAME
80
+ elif guard_type == GuardType.NEMO_EVALUATOR:
81
+ if nemo_evaluator_type == NemoEvaluatorType.LLM_JUDGE:
82
+ metric_result_key = f"{Guard.get_stage_str(stage)}_nemo_{nemo_evaluator_type}"
83
+ elif nemo_evaluator_type in NemoEvaluatorType.ALL:
84
+ metric_result_key = f"nemo_{nemo_evaluator_type}"
85
+ else:
86
+ raise ValueError("The provided NeMo Evaluator type is not implemented.")
87
+ else:
88
+ raise ValueError("The provided guard type is not implemented.")
89
+ return metric_result_key
90
+
91
+
92
+ class GuardIntervention:
93
+ def __init__(self, intervention_config: dict) -> None:
94
+ self.action = intervention_config["action"]
95
+ self.message = intervention_config.get("message")
96
+ self.threshold = None
97
+ self.comparator = None
98
+ if (
99
+ "conditions" in intervention_config
100
+ and intervention_config["conditions"] is not None
101
+ and len(intervention_config["conditions"]) > 0
102
+ ):
103
+ self.threshold = intervention_config["conditions"][0].get("comparand")
104
+ self.comparator = intervention_config["conditions"][0].get("comparator")
105
+
106
+
107
+ class Guard(ABC):
108
+ def __init__(self, config: dict, stage=None):
109
+ self._name = config["name"]
110
+ self._description = config.get("description")
111
+ self._type = config["type"]
112
+ self._stage = stage if stage else config["stage"]
113
+ self._pipeline = None
114
+ self.intervention = None
115
+ self._deployment_id = config.get("deployment_id")
116
+ self._dr_cm = None
117
+ self._faas_url = config.get("faas_url")
118
+ self._copy_citations = config["copy_citations"]
119
+ self.is_agentic = config.get("is_agentic", False)
120
+ self.metric_column_name = get_metric_column_name(
121
+ config["type"],
122
+ config.get("ootb_type"),
123
+ self._stage,
124
+ config.get("model_info", {}).get("target_name"),
125
+ config["name"],
126
+ config.get("nemo_evaluator_type"),
127
+ )
128
+ if config.get("intervention"):
129
+ self.intervention = GuardIntervention(config["intervention"])
130
+
131
+ @property
132
+ def name(self) -> str:
133
+ return self._name
134
+
135
+ @property
136
+ def description(self) -> str:
137
+ return self._description
138
+
139
+ @property
140
+ def type(self) -> GuardType:
141
+ return self._type
142
+
143
+ @property
144
+ def stage(self) -> GuardStage:
145
+ return self._stage
146
+
147
+ @property
148
+ def faas_url(self) -> str:
149
+ return self._faas_url
150
+
151
+ @property
152
+ def copy_citations(self) -> str:
153
+ return self._copy_citations
154
+
155
+ def set_pipeline(self, pipeline):
156
+ self._pipeline = pipeline
157
+
158
+ @property
159
+ def llm_type(self):
160
+ return self._llm_type
161
+
162
+ @staticmethod
163
+ def get_stage_str(stage):
164
+ return "Prompts" if stage == GuardStage.PROMPT else "Responses"
165
+
166
+ def get_input_column_name(self, stage) -> str:
167
+ match stage:
168
+ case GuardStage.PROMPT:
169
+ return DEFAULT_PROMPT_COLUMN_NAME
170
+ case GuardStage.RESPONSE:
171
+ return DEFAULT_RESPONSE_COLUMN_NAME
172
+ case _:
173
+ raise ValueError(f"Stage ({stage}) is not supported.")
174
+
175
+ def has_latency_custom_metric(self) -> bool:
176
+ """Determines if latency metric is tracked for this guard type. Default is True."""
177
+ return True
178
+
179
+ def get_latency_custom_metric_name(self):
180
+ return f"{self.name} Guard Latency"
181
+
182
+ def get_latency_custom_metric(self):
183
+ return {
184
+ "name": self.get_latency_custom_metric_name(),
185
+ "directionality": CustomMetricDirectionality.LOWER_IS_BETTER,
186
+ "units": "seconds",
187
+ "type": CustomMetricAggregationType.AVERAGE,
188
+ "baselineValue": 0,
189
+ "isModelSpecific": True,
190
+ "timeStep": "hour",
191
+ "description": (
192
+ f"{self.get_latency_custom_metric_name()}. {CUSTOM_METRIC_DESCRIPTION_SUFFIX}"
193
+ ),
194
+ }
195
+
196
+ def has_average_score_custom_metric(self) -> bool:
197
+ """Determines if an average score metric is tracked for this guard type. Default is True."""
198
+ return True
199
+
200
+ def get_average_score_custom_metric_name(self, stage):
201
+ return f"{self.name} Guard Average Score for {self.get_stage_str(stage)}"
202
+
203
+ def get_average_score_metric(self, stage):
204
+ return {
205
+ "name": self.get_average_score_custom_metric_name(stage),
206
+ "directionality": CustomMetricDirectionality.LOWER_IS_BETTER,
207
+ "units": "probability",
208
+ "type": CustomMetricAggregationType.AVERAGE,
209
+ "baselineValue": 0,
210
+ "isModelSpecific": True,
211
+ "timeStep": "hour",
212
+ "description": (
213
+ f"{self.get_average_score_custom_metric_name(stage)}. "
214
+ f" {CUSTOM_METRIC_DESCRIPTION_SUFFIX}"
215
+ ),
216
+ }
217
+
218
+ def get_guard_enforced_custom_metric_name(self, stage, moderation_method):
219
+ if moderation_method == GuardAction.REPLACE:
220
+ return f"{self.name} Guard replaced {self.get_stage_str(stage)}"
221
+ return f"{self.name} Guard {moderation_method}ed {self.get_stage_str(stage)}"
222
+
223
+ def get_enforced_custom_metric(self, stage, moderation_method):
224
+ return {
225
+ "name": self.get_guard_enforced_custom_metric_name(stage, moderation_method),
226
+ "directionality": CustomMetricDirectionality.LOWER_IS_BETTER,
227
+ "units": "count",
228
+ "type": CustomMetricAggregationType.SUM,
229
+ "baselineValue": 0,
230
+ "isModelSpecific": True,
231
+ "timeStep": "hour",
232
+ "description": (
233
+ f"Number of {self.get_stage_str(stage)} {moderation_method}ed by the "
234
+ f"{self.name} guard. {CUSTOM_METRIC_DESCRIPTION_SUFFIX}"
235
+ ),
236
+ }
237
+
238
+ def get_intervention_action(self):
239
+ if not self.intervention:
240
+ return GuardAction.NONE
241
+ return self.intervention.action
242
+
243
+ def get_comparand(self):
244
+ return self.intervention.threshold
245
+
246
+ def get_enforced_span_attribute_name(self, stage):
247
+ intervention_action = self.get_intervention_action()
248
+ if intervention_action in [GuardAction.BLOCK, GuardAction.REPORT]:
249
+ return f"{SPAN_PREFIX}.{stage.lower()}.{intervention_action}ed"
250
+ elif intervention_action == GuardAction.REPLACE:
251
+ return f"{SPAN_PREFIX}.{stage.lower()}.replaced"
252
+ else:
253
+ raise NotImplementedError
254
+
255
+ def get_span_column_name(self, _):
256
+ raise NotImplementedError
257
+
258
+ def get_span_attribute_name(self, _):
259
+ raise NotImplementedError
@@ -1,6 +1,6 @@
1
1
  # ---------------------------------------------------------------------------------
2
- # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
- # Last updated 2025.
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
4
  #
5
5
  # DataRobot, Inc. Confidential.
6
6
  # This is proprietary source code of DataRobot, Inc. and its affiliates.
@@ -94,6 +94,8 @@ class GuardLLMMixin:
94
94
  return f"{secret_env_var_name_prefix}{OOTBType.AGENT_GOAL_ACCURACY}_{llm_type_str}"
95
95
  elif config["ootb_type"] == OOTBType.TASK_ADHERENCE:
96
96
  return f"{secret_env_var_name_prefix}{OOTBType.TASK_ADHERENCE}_{llm_type_str}"
97
+ elif config["ootb_type"] == OOTBType.GUIDELINE_ADHERENCE:
98
+ return f"{secret_env_var_name_prefix}{OOTBType.GUIDELINE_ADHERENCE}_{llm_type_str}"
97
99
  else:
98
100
  raise Exception("Invalid guard config for building env var name")
99
101
  else:
@@ -0,0 +1,84 @@
1
+ # ---------------------------------------------------------------------------------
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
+ #
5
+ # DataRobot, Inc. Confidential.
6
+ # This is proprietary source code of DataRobot, Inc. and its affiliates.
7
+ #
8
+ # This file and its contents are subject to DataRobot Tool and Utility Agreement.
9
+ # For details, see
10
+ # https://www.datarobot.com/wp-content/uploads/2021/07/DataRobot-Tool-and-Utility-Agreement.pdf.
11
+ # ---------------------------------------------------------------------------------
12
+ import datarobot as dr
13
+
14
+ from datarobot_dome.constants import SPAN_PREFIX
15
+
16
+ from .base import Guard
17
+
18
+
19
+ class GuardModelInfo:
20
+ def __init__(self, model_config: dict):
21
+ self._model_id = model_config.get("model_id")
22
+ self._target_name = model_config["target_name"]
23
+ self._target_type = model_config["target_type"]
24
+ self._class_names = model_config.get("class_names", [])
25
+ self._input_column_name = model_config["input_column_name"]
26
+ self._replacement_text_column_name = model_config.get("replacement_text_column_name")
27
+
28
+ @property
29
+ def model_id(self) -> str:
30
+ return self._model_id
31
+
32
+ @property
33
+ def target_name(self) -> str:
34
+ return self._target_name
35
+
36
+ @property
37
+ def target_type(self) -> str:
38
+ return self._target_type
39
+
40
+ @property
41
+ def class_names(self) -> list[str]:
42
+ return self._class_names
43
+
44
+ @property
45
+ def input_column_name(self) -> str:
46
+ return self._input_column_name
47
+
48
+ @property
49
+ def replacement_text_column_name(self) -> str:
50
+ return self._replacement_text_column_name
51
+
52
+
53
+ class ModelGuard(Guard):
54
+ def __init__(self, config: dict, stage=None):
55
+ super().__init__(config, stage)
56
+ self._deployment_id = config["deployment_id"]
57
+ self._model_info = GuardModelInfo(config["model_info"])
58
+ # dr.Client is set in the Pipeline init, Lets query the deployment
59
+ # to get the prediction server information
60
+ self.deployment = dr.Deployment.get(self._deployment_id)
61
+
62
+ @property
63
+ def deployment_id(self) -> str:
64
+ return self._deployment_id
65
+
66
+ @property
67
+ def model_info(self):
68
+ return self._model_info
69
+
70
+ def get_input_column_name(self, stage) -> str:
71
+ return self._model_info.input_column_name
72
+
73
+ def get_span_column_name(self, _):
74
+ if self.model_info is None:
75
+ raise NotImplementedError("Missing model_info for model guard")
76
+ # Typically 0th index is the target name
77
+ return self._model_info.target_name.split("_")[0]
78
+
79
+ def get_span_attribute_name(self, stage):
80
+ return f"{SPAN_PREFIX}.{stage.lower()}.{self.get_span_column_name(stage)}"
81
+
82
+ def has_average_score_custom_metric(self) -> bool:
83
+ """A couple ModelGuard types do not have an average score metric"""
84
+ return self.model_info.target_type not in ["Multiclass", "TextGeneration"]
@@ -0,0 +1,329 @@
1
+ # ---------------------------------------------------------------------------------
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
+ #
5
+ # DataRobot, Inc. Confidential.
6
+ # This is proprietary source code of DataRobot, Inc. and its affiliates.
7
+ #
8
+ # This file and its contents are subject to DataRobot Tool and Utility Agreement.
9
+ # For details, see
10
+ # https://www.datarobot.com/wp-content/uploads/2021/07/DataRobot-Tool-and-Utility-Agreement.pdf.
11
+ # ---------------------------------------------------------------------------------
12
+ from functools import cached_property
13
+ from typing import Optional
14
+
15
+ import requests
16
+ from nemo_microservices import AsyncNeMoMicroservices
17
+ from nemo_microservices.types import EvaluationConfigParam
18
+ from nemo_microservices.types import EvaluationTargetParam
19
+ from nemo_microservices.types import LiveEvaluation
20
+ from nemo_microservices.types import MetricConfigParam
21
+ from nemo_microservices.types import TaskConfigParam
22
+
23
+ from datarobot_dome.constants import GuardLLMType
24
+ from datarobot_dome.guard_helpers import get_datarobot_endpoint_and_token
25
+
26
+ from .base import Guard
27
+
28
+
29
+ class NeMoEvaluatorGuard(Guard):
30
+ def __init__(self, config: dict, stage=None):
31
+ super().__init__(config, stage)
32
+ self.nemo_evaluator_type = config["nemo_evaluator_type"]
33
+ self._llm_type = config["llm_type"]
34
+ self.llm_deployment_id = config.get("deployment_id")
35
+ self.llm_gateway_model_id = config.get("llm_gateway_model_id")
36
+
37
+ @cached_property
38
+ def _client(self) -> AsyncNeMoMicroservices:
39
+ """
40
+ Using localhost for development purpose only.
41
+ It will be replaced with url to a deployed NeMo evaluator instance later in the PBMP.
42
+ """
43
+ return AsyncNeMoMicroservices(base_url="http://localhost:8080")
44
+
45
+ @staticmethod
46
+ def _get_default_model_id(deployment_id: str) -> str:
47
+ """Get id of first model as default from a deployment that responds to /models endpoint."""
48
+ datarobot_endpoint, datarobot_api_token = get_datarobot_endpoint_and_token()
49
+ response = requests.get(
50
+ f"{datarobot_endpoint}/deployments/{deployment_id}/directAccess/models",
51
+ headers={"Authorization": f"Bearer {datarobot_api_token}"},
52
+ )
53
+ if response.status_code != 200:
54
+ raise ValueError(f"Unable to query for default model for deployment {deployment_id}")
55
+ try:
56
+ return response.json()["data"][0]["id"]
57
+ except (requests.JSONDecodeError, KeyError, IndexError, TypeError):
58
+ raise ValueError(f"Unable to select default model for deployment {deployment_id}")
59
+
60
+ @cached_property
61
+ def llm_judge_api_endpoint(self) -> dict:
62
+ """LLM Judge API endpoint, to be passed to NeMo evaluator."""
63
+ datarobot_endpoint, datarobot_api_token = get_datarobot_endpoint_and_token()
64
+ if self.llm_type == GuardLLMType.DATAROBOT:
65
+ url = f"{datarobot_endpoint}/deployments/{self.llm_deployment_id}/chat/completions"
66
+ model_id = self._get_default_model_id(self.llm_deployment_id)
67
+ elif self.llm_type == GuardLLMType.LLM_GATEWAY:
68
+ url = f"{datarobot_endpoint}/genai/llmgw/chat/completions"
69
+ model_id = self.llm_gateway_model_id
70
+ else:
71
+ raise ValueError(
72
+ f"LLM type {self.llm_type} is not supported by NeMo Evaluator based guards."
73
+ )
74
+ return {"url": url, "api_key": datarobot_api_token, "model_id": model_id}
75
+
76
+ def has_average_score_custom_metric(self) -> bool:
77
+ return False
78
+
79
+ async def evaluate(
80
+ self,
81
+ *,
82
+ prompt: Optional[str],
83
+ response: Optional[str],
84
+ retrieved_contexts: Optional[list[str]],
85
+ ) -> float:
86
+ raise NotImplementedError
87
+
88
+ def _extract_score(self, evaluation: LiveEvaluation) -> int | float:
89
+ task = evaluation.result.tasks[self.nemo_evaluator_type]
90
+ metric = task.metrics[self.nemo_evaluator_type]
91
+ score = metric.scores[self.nemo_evaluator_type]
92
+ return score.value
93
+
94
+
95
+ class NeMoLLMJudgeGuard(NeMoEvaluatorGuard):
96
+ def __init__(self, config: dict, stage=None):
97
+ super().__init__(config, stage)
98
+ self.nemo_llm_judge_config = config.get("nemo_llm_judge_config", {})
99
+
100
+ async def evaluate(self, *, prompt: str, response: str, **kwargs) -> float:
101
+ system_prompt = self.nemo_llm_judge_config["system_prompt"]
102
+ user_prompt = self.nemo_llm_judge_config["user_prompt"]
103
+ score_parsing_regex = self.nemo_llm_judge_config["score_parsing_regex"]
104
+
105
+ config = EvaluationConfigParam(
106
+ type="custom",
107
+ tasks={
108
+ self.nemo_evaluator_type: TaskConfigParam(
109
+ type="data",
110
+ metrics={
111
+ self.nemo_evaluator_type: MetricConfigParam(
112
+ type="llm-judge",
113
+ params={
114
+ "model": {"api_endpoint": self.llm_judge_api_endpoint},
115
+ "template": {
116
+ "messages": [
117
+ {"role": "system", "content": system_prompt},
118
+ {"role": "user", "content": user_prompt},
119
+ ]
120
+ },
121
+ "scores": {
122
+ self.nemo_evaluator_type: {
123
+ "type": "int",
124
+ "parser": {"type": "regex", "pattern": score_parsing_regex},
125
+ }
126
+ },
127
+ },
128
+ )
129
+ },
130
+ )
131
+ },
132
+ )
133
+ target = EvaluationTargetParam(
134
+ type="rows", rows=[{"promptText": prompt, "responseText": response}]
135
+ )
136
+ evaluation = await self._client.evaluation.live(config=config, target=target)
137
+ return self._extract_score(evaluation)
138
+
139
+
140
+ class NeMoContextRelevanceGuard(NeMoEvaluatorGuard):
141
+ async def evaluate(self, *, prompt: str, retrieved_contexts: list[str], **kwargs) -> float:
142
+ config = EvaluationConfigParam(
143
+ type="custom",
144
+ tasks={
145
+ self.nemo_evaluator_type: TaskConfigParam(
146
+ type="data",
147
+ metrics={
148
+ self.nemo_evaluator_type: MetricConfigParam(
149
+ type=self.nemo_evaluator_type,
150
+ params={
151
+ "judge": {"model": {"api_endpoint": self.llm_judge_api_endpoint}},
152
+ },
153
+ )
154
+ },
155
+ )
156
+ },
157
+ )
158
+ target = EvaluationTargetParam(
159
+ type="rows",
160
+ rows=[{"user_input": prompt, "retrieved_contexts": retrieved_contexts}],
161
+ )
162
+ evaluation = await self._client.evaluation.live(config=config, target=target)
163
+ return self._extract_score(evaluation)
164
+
165
+
166
+ class NeMoResponseGroundednessGuard(NeMoEvaluatorGuard):
167
+ async def evaluate(self, *, response: str, retrieved_contexts: list[str], **kwargs) -> float:
168
+ config = EvaluationConfigParam(
169
+ type="custom",
170
+ tasks={
171
+ self.nemo_evaluator_type: TaskConfigParam(
172
+ type="data",
173
+ metrics={
174
+ self.nemo_evaluator_type: MetricConfigParam(
175
+ type=self.nemo_evaluator_type,
176
+ params={
177
+ "judge": {"model": {"api_endpoint": self.llm_judge_api_endpoint}},
178
+ },
179
+ )
180
+ },
181
+ )
182
+ },
183
+ )
184
+ target = EvaluationTargetParam(
185
+ type="rows",
186
+ rows=[{"response": response, "retrieved_contexts": retrieved_contexts}],
187
+ )
188
+ evaluation = await self._client.evaluation.live(config=config, target=target)
189
+ return self._extract_score(evaluation)
190
+
191
+
192
+ class NeMoTopicAdherenceGuard(NeMoEvaluatorGuard):
193
+ def __init__(self, config: dict, stage=None):
194
+ super().__init__(config, stage)
195
+ self.nemo_topic_adherence_config = config["nemo_topic_adherence_config"]
196
+
197
+ async def evaluate(self, *, prompt: str, response: str, **kwargs) -> float:
198
+ config = EvaluationConfigParam(
199
+ type="custom",
200
+ tasks={
201
+ self.nemo_evaluator_type: TaskConfigParam(
202
+ type="data",
203
+ metrics={
204
+ self.nemo_evaluator_type: MetricConfigParam(
205
+ type=self.nemo_evaluator_type,
206
+ params={
207
+ "judge": {"model": {"api_endpoint": self.llm_judge_api_endpoint}},
208
+ "metric_mode": self.nemo_topic_adherence_config["metric_mode"],
209
+ },
210
+ )
211
+ },
212
+ )
213
+ },
214
+ )
215
+ target = EvaluationTargetParam(
216
+ type="rows",
217
+ rows=[
218
+ {
219
+ "user_input": [
220
+ {"content": prompt, "type": "human"},
221
+ {"content": response, "type": "ai"},
222
+ ],
223
+ "reference_topics": self.nemo_topic_adherence_config["reference_topics"],
224
+ }
225
+ ],
226
+ )
227
+ evaluation = await self._client.evaluation.live(config=config, target=target)
228
+ return self._extract_score(evaluation)
229
+
230
+
231
+ class NeMoAgentGoalAccuracyGuard(NeMoEvaluatorGuard):
232
+ async def evaluate(self, *, prompt: str, response: str, **kwargs) -> float:
233
+ config = EvaluationConfigParam(
234
+ type="custom",
235
+ tasks={
236
+ self.nemo_evaluator_type: TaskConfigParam(
237
+ type="data",
238
+ metrics={
239
+ self.nemo_evaluator_type: MetricConfigParam(
240
+ type=self.nemo_evaluator_type,
241
+ params={
242
+ "judge": {"model": {"api_endpoint": self.llm_judge_api_endpoint}},
243
+ "use_reference": False,
244
+ },
245
+ )
246
+ },
247
+ )
248
+ },
249
+ )
250
+ target = EvaluationTargetParam(
251
+ type="rows",
252
+ rows=[
253
+ {
254
+ "user_input": [
255
+ {"content": prompt, "type": "human"},
256
+ {"content": response, "type": "ai"},
257
+ ],
258
+ }
259
+ ],
260
+ )
261
+ evaluation = await self._client.evaluation.live(config=config, target=target)
262
+ return self._extract_score(evaluation)
263
+
264
+
265
+ class NeMoResponseRelevancyGuard(NeMoEvaluatorGuard):
266
+ def __init__(self, config: dict, stage=None):
267
+ super().__init__(config, stage)
268
+ self.response_relevancy_config = config["nemo_response_relevancy_config"]
269
+
270
+ @cached_property
271
+ def embedding_judge_api_endpoint(self) -> dict:
272
+ """Embedding judge API endpoint, to be passed to NeMo evaluator."""
273
+ datarobot_endpoint, datarobot_api_token = get_datarobot_endpoint_and_token()
274
+ deployment_id = self.response_relevancy_config["embedding_deployment_id"]
275
+ url = f"{datarobot_endpoint}/deployments/{deployment_id}/directAccess/nim/v1/"
276
+ return {"url": url, "api_key": datarobot_api_token, "model_id": ""}
277
+
278
+ async def evaluate(
279
+ self, *, prompt: str, response: str, retrieved_context: Optional[list[str]]
280
+ ) -> float:
281
+ config = EvaluationConfigParam(
282
+ type="custom",
283
+ tasks={
284
+ self.nemo_evaluator_type: TaskConfigParam(
285
+ type="data",
286
+ metrics={
287
+ self.nemo_evaluator_type: MetricConfigParam(
288
+ type=self.nemo_evaluator_type,
289
+ params={
290
+ "judge": {"model": {"api_endpoint": self.llm_judge_api_endpoint}},
291
+ "judge_embeddings": {
292
+ "model": {"api_endpoint": self.embedding_judge_api_endpoint},
293
+ },
294
+ },
295
+ )
296
+ },
297
+ )
298
+ },
299
+ )
300
+ row = {"user_input": prompt, "response": response}
301
+ if retrieved_context:
302
+ row["retrieved_contexts"] = retrieved_context
303
+ target = EvaluationTargetParam(type="rows", rows=[row])
304
+ evaluation = await self._client.evaluation.live(config=config, target=target)
305
+ return self._extract_score(evaluation)
306
+
307
+
308
+ class NeMoFaithfulnessGuard(NeMoEvaluatorGuard):
309
+ async def evaluate(self, *, prompt: str, response: str, retrieved_contexts: list[str]) -> float:
310
+ config = EvaluationConfigParam(
311
+ type="custom",
312
+ tasks={
313
+ self.nemo_evaluator_type: TaskConfigParam(
314
+ type="data",
315
+ metrics={
316
+ self.nemo_evaluator_type: MetricConfigParam(
317
+ type=self.nemo_evaluator_type,
318
+ params={
319
+ "judge": {"model": {"api_endpoint": self.llm_judge_api_endpoint}},
320
+ },
321
+ )
322
+ },
323
+ )
324
+ },
325
+ )
326
+ row = {"user_input": prompt, "response": response, "retrieved_contexts": retrieved_contexts}
327
+ target = EvaluationTargetParam(type="rows", rows=[row])
328
+ evaluation = await self._client.evaluation.live(config=config, target=target)
329
+ return self._extract_score(evaluation)