datarobot-moderations 11.2.10__py3-none-any.whl → 11.2.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,148 @@
1
+ # ---------------------------------------------------------------------------------
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
+ #
5
+ # DataRobot, Inc. Confidential.
6
+ # This is proprietary source code of DataRobot, Inc. and its affiliates.
7
+ #
8
+ # This file and its contents are subject to DataRobot Tool and Utility Agreement.
9
+ # For details, see
10
+ # https://www.datarobot.com/wp-content/uploads/2021/07/DataRobot-Tool-and-Utility-Agreement.pdf.
11
+ # ---------------------------------------------------------------------------------
12
+ import logging
13
+ import os
14
+
15
+ import datarobot as dr
16
+ from nemoguardrails import LLMRails
17
+ from nemoguardrails import RailsConfig
18
+
19
+ from datarobot_dome.constants import NEMO_GUARDRAILS_DIR
20
+ from datarobot_dome.constants import GuardLLMType
21
+ from datarobot_dome.constants import GuardOperatorType
22
+ from datarobot_dome.guard_helpers import DEFAULT_OPEN_AI_API_VERSION
23
+ from datarobot_dome.guard_helpers import get_azure_openai_client
24
+ from datarobot_dome.guard_helpers import get_chat_nvidia_llm
25
+ from datarobot_dome.guard_helpers import get_datarobot_endpoint_and_token
26
+ from datarobot_dome.guard_helpers import get_llm_gateway_client
27
+ from datarobot_dome.guard_helpers import use_llm_gateway_inference
28
+ from datarobot_dome.guards.base import Guard
29
+ from datarobot_dome.guards.guard_llm_mixin import GuardLLMMixin
30
+
31
+ NEMO_THRESHOLD = "TRUE"
32
+
33
+
34
+ class NeMoGuard(Guard, GuardLLMMixin):
35
+ def __init__(self, config: dict, stage=None, model_dir: str = os.getcwd()):
36
+ super().__init__(config, stage)
37
+ # NeMo guard only takes a boolean as threshold and equal to as comparator.
38
+ # Threshold bool == TRUE is defined in the colang file as the output of
39
+ # `bot should intervene`
40
+ if self.intervention:
41
+ if not self.intervention.threshold:
42
+ self.intervention.threshold = NEMO_THRESHOLD
43
+ if not self.intervention.comparator:
44
+ self.intervention.comparator = GuardOperatorType.EQUALS
45
+
46
+ # Default LLM Type for NeMo is set to OpenAI
47
+ self._llm_type = config.get("llm_type", GuardLLMType.OPENAI)
48
+ self.openai_api_base = config.get("openai_api_base")
49
+ self.openai_deployment_id = config.get("openai_deployment_id")
50
+ llm_id = None
51
+ credentials = None
52
+ use_llm_gateway = use_llm_gateway_inference(self._llm_type)
53
+ try:
54
+ self.openai_api_key = self.get_openai_api_key(config, self._llm_type)
55
+ if self._llm_type != GuardLLMType.NIM and self.openai_api_key is None:
56
+ raise ValueError("OpenAI API key is required for NeMo Guardrails")
57
+
58
+ if self.llm_type == GuardLLMType.OPENAI:
59
+ credentials = {
60
+ "credential_type": "openai",
61
+ "api_key": self.openai_api_key,
62
+ }
63
+ os.environ["OPENAI_API_KEY"] = self.openai_api_key
64
+ llm = None
65
+ elif self.llm_type == GuardLLMType.AZURE_OPENAI:
66
+ if self.openai_api_base is None:
67
+ raise ValueError("Azure OpenAI API base url is required for LLM Guard")
68
+ if self.openai_deployment_id is None:
69
+ raise ValueError("Azure OpenAI deployment ID is required for LLM Guard")
70
+ credentials = {
71
+ "credential_type": "azure_openai",
72
+ "api_base": self.openai_api_base,
73
+ "api_version": DEFAULT_OPEN_AI_API_VERSION,
74
+ "api_key": self.openai_api_key,
75
+ }
76
+ azure_openai_client = get_azure_openai_client(
77
+ openai_api_key=self.openai_api_key,
78
+ openai_api_base=self.openai_api_base,
79
+ openai_deployment_id=self.openai_deployment_id,
80
+ )
81
+ llm = azure_openai_client
82
+ elif self.llm_type == GuardLLMType.GOOGLE:
83
+ # llm_id = config["google_model"]
84
+ raise NotImplementedError
85
+ elif self.llm_type == GuardLLMType.AMAZON:
86
+ # llm_id = config["aws_model"]
87
+ raise NotImplementedError
88
+ elif self.llm_type == GuardLLMType.DATAROBOT:
89
+ raise NotImplementedError
90
+ elif self.llm_type == GuardLLMType.LLM_GATEWAY:
91
+ raise NotImplementedError
92
+ elif self.llm_type == GuardLLMType.NIM:
93
+ if config.get("deployment_id") is None:
94
+ if self.openai_api_base is None:
95
+ raise ValueError("NIM DataRobot deployment id is required for NIM LLM Type")
96
+ else:
97
+ logging.warning(
98
+ "Using 'openai_api_base' is being deprecated and will be removed "
99
+ "in the next release. Please configure NIM DataRobot deployment "
100
+ "using deployment_id"
101
+ )
102
+ if self.openai_api_key is None:
103
+ raise ValueError("OpenAI API key is required for NeMo Guardrails")
104
+ else:
105
+ self.deployment = dr.Deployment.get(self._deployment_id)
106
+ datarobot_endpoint, self.openai_api_key = get_datarobot_endpoint_and_token()
107
+ self.openai_api_base = (
108
+ f"{datarobot_endpoint}/deployments/{str(self._deployment_id)}"
109
+ )
110
+ llm = get_chat_nvidia_llm(
111
+ api_key=self.openai_api_key,
112
+ base_url=self.openai_api_base,
113
+ )
114
+ else:
115
+ raise ValueError(f"Invalid LLMType: {self.llm_type}")
116
+
117
+ except Exception as e:
118
+ # no valid user credentials provided, raise if not using LLM Gateway
119
+ credentials = None
120
+ if not use_llm_gateway:
121
+ raise e
122
+
123
+ if use_llm_gateway:
124
+ # Currently only OPENAI and AZURE_OPENAI are supported by NeMoGuard
125
+ # For Bedrock and Vertex the model in the config is actually the LLM ID
126
+ # For OpenAI we use the default model defined in get_llm_gateway_client
127
+ # For Azure we use the deployment ID
128
+ llm = get_llm_gateway_client(
129
+ llm_id=llm_id,
130
+ openai_deployment_id=self.openai_deployment_id,
131
+ credentials=credentials,
132
+ )
133
+
134
+ # Use guard stage to determine whether to read from prompt/response subdirectory
135
+ # for nemo configurations. "nemo_guardrails" folder is at same level of custom.py
136
+ # So, the config path becomes model_dir + "nemo_guardrails"
137
+ nemo_config_path = os.path.join(model_dir, NEMO_GUARDRAILS_DIR)
138
+ self.nemo_rails_config_path = os.path.join(nemo_config_path, self.stage)
139
+ nemo_rails_config = RailsConfig.from_path(config_path=self.nemo_rails_config_path)
140
+ self._nemo_llm_rails = LLMRails(nemo_rails_config, llm=llm)
141
+
142
+ def has_average_score_custom_metric(self) -> bool:
143
+ """No average score metrics for NemoGuard's"""
144
+ return False
145
+
146
+ @property
147
+ def nemo_llm_rails(self):
148
+ return self._nemo_llm_rails
@@ -0,0 +1,209 @@
1
+ # ---------------------------------------------------------------------------------
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
+ #
5
+ # DataRobot, Inc. Confidential.
6
+ # This is proprietary source code of DataRobot, Inc. and its affiliates.
7
+ #
8
+ # This file and its contents are subject to DataRobot Tool and Utility Agreement.
9
+ # For details, see
10
+ # https://www.datarobot.com/wp-content/uploads/2021/07/DataRobot-Tool-and-Utility-Agreement.pdf.
11
+ # ---------------------------------------------------------------------------------
12
+ from datarobot.enums import CustomMetricAggregationType
13
+ from datarobot.enums import CustomMetricDirectionality
14
+ from deepeval.metrics import TaskCompletionMetric
15
+ from llama_index.core import Settings
16
+ from llama_index.core.evaluation import FaithfulnessEvaluator
17
+ from llama_index.core.evaluation import GuidelineEvaluator
18
+ from ragas.llms import LangchainLLMWrapper
19
+ from ragas.llms import LlamaIndexLLMWrapper
20
+ from ragas.metrics import AgentGoalAccuracyWithoutReference
21
+
22
+ from datarobot_dome.constants import AGENT_GOAL_ACCURACY_COLUMN_NAME
23
+ from datarobot_dome.constants import COST_COLUMN_NAME
24
+ from datarobot_dome.constants import CUSTOM_METRIC_DESCRIPTION_SUFFIX
25
+ from datarobot_dome.constants import FAITHFULLNESS_COLUMN_NAME
26
+ from datarobot_dome.constants import GUIDELINE_ADHERENCE_COLUMN_NAME
27
+ from datarobot_dome.constants import ROUGE_1_COLUMN_NAME
28
+ from datarobot_dome.constants import SPAN_PREFIX
29
+ from datarobot_dome.constants import TASK_ADHERENCE_SCORE_COLUMN_NAME
30
+ from datarobot_dome.constants import TOKEN_COUNT_COLUMN_NAME
31
+ from datarobot_dome.constants import GuardLLMType
32
+ from datarobot_dome.constants import GuardStage
33
+ from datarobot_dome.constants import OOTBType
34
+ from datarobot_dome.guard_helpers import ModerationDeepEvalLLM
35
+
36
+ from .base import Guard
37
+ from .guard_llm_mixin import GuardLLMMixin
38
+
39
+
40
+ class OOTBGuard(Guard):
41
+ def __init__(self, config: dict, stage=None):
42
+ super().__init__(config, stage)
43
+ self._ootb_type = config["ootb_type"]
44
+
45
+ @property
46
+ def ootb_type(self):
47
+ return self._ootb_type
48
+
49
+ def has_latency_custom_metric(self):
50
+ """Latency is not tracked for token counts guards"""
51
+ return self._ootb_type != OOTBType.TOKEN_COUNT
52
+
53
+ def get_span_column_name(self, _):
54
+ if self._ootb_type == OOTBType.TOKEN_COUNT:
55
+ return TOKEN_COUNT_COLUMN_NAME
56
+ elif self._ootb_type == OOTBType.ROUGE_1:
57
+ return ROUGE_1_COLUMN_NAME
58
+ elif self._ootb_type == OOTBType.CUSTOM_METRIC:
59
+ return self.name
60
+ else:
61
+ raise NotImplementedError(f"No span attribute name defined for {self._ootb_type} guard")
62
+
63
+ def get_span_attribute_name(self, stage):
64
+ return f"{SPAN_PREFIX}.{stage.lower()}.{self.get_span_column_name(stage)}"
65
+
66
+
67
+ class OOTBCostMetric(OOTBGuard):
68
+ def __init__(self, config, stage):
69
+ super().__init__(config, stage)
70
+ # The cost is calculated based on the usage metrics returned by the
71
+ # completion object, so it can be evaluated only at response stage
72
+ self._stage = GuardStage.RESPONSE
73
+ cost_config = config["additional_guard_config"]["cost"]
74
+ self.currency = cost_config["currency"]
75
+ self.input_price = cost_config["input_price"]
76
+ self.input_unit = cost_config["input_unit"]
77
+ self.input_multiplier = self.input_price / self.input_unit
78
+ self.output_price = cost_config["output_price"]
79
+ self.output_unit = cost_config["output_unit"]
80
+ self.output_multiplier = self.output_price / self.output_unit
81
+
82
+ def get_average_score_custom_metric_name(self, _):
83
+ return f"Total cost in {self.currency}"
84
+
85
+ def get_average_score_metric(self, _):
86
+ return {
87
+ "name": self.get_average_score_custom_metric_name(_),
88
+ "directionality": CustomMetricDirectionality.LOWER_IS_BETTER,
89
+ "units": "value",
90
+ "type": CustomMetricAggregationType.SUM,
91
+ "baselineValue": 0,
92
+ "isModelSpecific": True,
93
+ "timeStep": "hour",
94
+ "description": (
95
+ f"{self.get_average_score_custom_metric_name(_)}. "
96
+ f" {CUSTOM_METRIC_DESCRIPTION_SUFFIX}"
97
+ ),
98
+ }
99
+
100
+ def get_span_column_name(self, _):
101
+ return f"{COST_COLUMN_NAME}.{self.currency.lower()}"
102
+
103
+ def get_span_attribute_name(self, _):
104
+ return f"{SPAN_PREFIX}.{self._stage.lower()}.{self.get_span_column_name(_)}"
105
+
106
+
107
+ class OOTBFaithfulnessGuard(OOTBGuard, GuardLLMMixin):
108
+ def __init__(self, config: dict, stage=None):
109
+ super().__init__(config, stage)
110
+
111
+ if self.stage == GuardStage.PROMPT:
112
+ raise Exception("Faithfulness cannot be configured for the Prompt stage")
113
+
114
+ # Default LLM Type for Faithfulness is set to Azure OpenAI
115
+ self._llm_type = config.get("llm_type", GuardLLMType.AZURE_OPENAI)
116
+ Settings.llm = self.get_llm(config, self._llm_type)
117
+ Settings.embed_model = None
118
+ self._evaluator = FaithfulnessEvaluator()
119
+
120
+ @property
121
+ def faithfulness_evaluator(self):
122
+ return self._evaluator
123
+
124
+ def get_span_column_name(self, _):
125
+ return FAITHFULLNESS_COLUMN_NAME
126
+
127
+ def get_span_attribute_name(self, _):
128
+ return f"{SPAN_PREFIX}.{self._stage.lower()}.{self.get_span_column_name(_)}"
129
+
130
+
131
+ class OOTBAgentGoalAccuracyGuard(OOTBGuard, GuardLLMMixin):
132
+ def __init__(self, config: dict, stage=None):
133
+ super().__init__(config, stage)
134
+
135
+ if self.stage == GuardStage.PROMPT:
136
+ raise Exception("Agent Goal Accuracy guard cannot be configured for the Prompt stage")
137
+
138
+ # Default LLM Type for Agent Goal Accuracy is set to Azure OpenAI
139
+ self._llm_type = config.get("llm_type", GuardLLMType.AZURE_OPENAI)
140
+ llm = self.get_llm(config, self._llm_type)
141
+ if self._llm_type == GuardLLMType.AZURE_OPENAI:
142
+ evaluator_llm = LangchainLLMWrapper(llm)
143
+ else:
144
+ evaluator_llm = LlamaIndexLLMWrapper(llm)
145
+ self.scorer = AgentGoalAccuracyWithoutReference(llm=evaluator_llm)
146
+
147
+ @property
148
+ def accuracy_scorer(self):
149
+ return self.scorer
150
+
151
+ def get_span_column_name(self, _):
152
+ return AGENT_GOAL_ACCURACY_COLUMN_NAME
153
+
154
+ def get_span_attribute_name(self, _):
155
+ return f"{SPAN_PREFIX}.{self._stage.lower()}.{self.get_span_column_name(_)}"
156
+
157
+
158
+ class OOTBTaskAdherenceGuard(OOTBGuard, GuardLLMMixin):
159
+ def __init__(self, config: dict, stage=None):
160
+ super().__init__(config, stage)
161
+
162
+ if self.stage == GuardStage.PROMPT:
163
+ raise Exception("Agent Goal Accuracy guard cannot be configured for the Prompt stage")
164
+
165
+ # Default LLM Type for Task Adherence is set to Azure OpenAI
166
+ self._llm_type = config.get("llm_type", GuardLLMType.AZURE_OPENAI)
167
+ llm = self.get_llm(config, self._llm_type)
168
+ deepeval_llm = ModerationDeepEvalLLM(llm)
169
+ self.scorer = TaskCompletionMetric(model=deepeval_llm, include_reason=True)
170
+
171
+ @property
172
+ def task_adherence_scorer(self):
173
+ return self.scorer
174
+
175
+ def get_span_column_name(self, _):
176
+ return TASK_ADHERENCE_SCORE_COLUMN_NAME
177
+
178
+ def get_span_attribute_name(self, _):
179
+ return f"{SPAN_PREFIX}.{self._stage.lower()}.{self.get_span_column_name(_)}"
180
+
181
+
182
+ class OOTBAgentGuidelineAdherence(OOTBGuard, GuardLLMMixin):
183
+ def __init__(self, config: dict, stage=None):
184
+ super().__init__(config, stage)
185
+
186
+ if self.stage == GuardStage.PROMPT:
187
+ raise Exception(
188
+ "Agent Guideline Adherence guard cannot be configured for the Prompt stage"
189
+ )
190
+
191
+ self.guideline = config.get("additional_guard_config", {}).get("agent_guideline")
192
+ if self.guideline is None or len(self.guideline) == 0:
193
+ raise Exception("Agent Guideline Adherence requires at least one guideline.")
194
+
195
+ # Default LLM Type for Guideline Adherence is set to Azure OpenAI
196
+ self._llm_type = config.get("llm_type", GuardLLMType.AZURE_OPENAI)
197
+ llm = self.get_llm(config, self._llm_type)
198
+
199
+ self.scorer = GuidelineEvaluator(llm=llm, guidelines=self.guideline)
200
+
201
+ @property
202
+ def guideline_adherence_scorer(self):
203
+ return self.scorer
204
+
205
+ def get_span_column_name(self, _):
206
+ return GUIDELINE_ADHERENCE_COLUMN_NAME
207
+
208
+ def get_span_attribute_name(self, _):
209
+ return f"{SPAN_PREFIX}.{self._stage.lower()}.{self.get_span_column_name(_)}"
@@ -0,0 +1,234 @@
1
+ # ---------------------------------------------------------------------------------
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
+ #
5
+ # DataRobot, Inc. Confidential.
6
+ # This is proprietary source code of DataRobot, Inc. and its affiliates.
7
+ #
8
+ # This file and its contents are subject to DataRobot Tool and Utility Agreement.
9
+ # For details, see
10
+ # https://www.datarobot.com/wp-content/uploads/2021/07/DataRobot-Tool-and-Utility-Agreement.pdf.
11
+ # ---------------------------------------------------------------------------------
12
+ import trafaret as t
13
+
14
+ from datarobot_dome.constants import DEFAULT_GUARD_PREDICTION_TIMEOUT_IN_SEC
15
+ from datarobot_dome.constants import AwsModel
16
+ from datarobot_dome.constants import CostCurrency
17
+ from datarobot_dome.constants import GoogleModel
18
+ from datarobot_dome.constants import GuardAction
19
+ from datarobot_dome.constants import GuardLLMType
20
+ from datarobot_dome.constants import GuardModelTargetType
21
+ from datarobot_dome.constants import GuardOperatorType
22
+ from datarobot_dome.constants import GuardStage
23
+ from datarobot_dome.constants import GuardTimeoutAction
24
+ from datarobot_dome.constants import GuardType
25
+ from datarobot_dome.constants import NemoEvaluatorType
26
+ from datarobot_dome.constants import OOTBType
27
+
28
+ MAX_GUARD_NAME_LENGTH = 255
29
+ MAX_COLUMN_NAME_LENGTH = 255
30
+ MAX_GUARD_COLUMN_NAME_LENGTH = 255
31
+ MAX_GUARD_MESSAGE_LENGTH = 4096
32
+ MAX_GUARD_DESCRIPTION_LENGTH = 4096
33
+ OBJECT_ID_LENGTH = 24
34
+ MAX_REGEX_LENGTH = 255
35
+ MAX_URL_LENGTH = 255
36
+ MAX_TOKEN_LENGTH = 255
37
+ MAX_GUIDELINE_LENGTH = 4096
38
+ MAX_REFERENCE_TOPIC_LENGTH = 4096
39
+
40
+ cost_metric_trafaret = t.Dict(
41
+ {
42
+ t.Key("currency", to_name="currency", optional=True, default=CostCurrency.USD): t.Enum(
43
+ *CostCurrency.ALL
44
+ ),
45
+ t.Key("input_price", to_name="input_price", optional=False): t.Float(),
46
+ t.Key("input_unit", to_name="input_unit", optional=False): t.Int(),
47
+ t.Key("output_price", to_name="output_price", optional=False): t.Float(),
48
+ t.Key("output_unit", to_name="output_unit", optional=False): t.Int(),
49
+ }
50
+ )
51
+
52
+
53
+ model_info_trafaret = t.Dict(
54
+ {
55
+ t.Key("class_names", to_name="class_names", optional=True): t.List(
56
+ t.String(max_length=MAX_COLUMN_NAME_LENGTH)
57
+ ),
58
+ t.Key("model_id", to_name="model_id", optional=True): t.String(max_length=OBJECT_ID_LENGTH),
59
+ t.Key("input_column_name", to_name="input_column_name", optional=False): t.String(
60
+ max_length=MAX_COLUMN_NAME_LENGTH
61
+ ),
62
+ t.Key("target_name", to_name="target_name", optional=False): t.String(
63
+ max_length=MAX_COLUMN_NAME_LENGTH
64
+ ),
65
+ t.Key(
66
+ "replacement_text_column_name", to_name="replacement_text_column_name", optional=True
67
+ ): t.Or(t.String(allow_blank=True, max_length=MAX_COLUMN_NAME_LENGTH), t.Null),
68
+ t.Key("target_type", to_name="target_type", optional=False): t.Enum(
69
+ *GuardModelTargetType.ALL
70
+ ),
71
+ },
72
+ allow_extra=["*"],
73
+ )
74
+
75
+
76
+ nemo_llm_judge_config_trafaret = t.Dict(
77
+ {
78
+ t.Key("system_prompt", optional=False): t.String(),
79
+ t.Key("user_prompt", optional=False): t.String(),
80
+ t.Key("score_parsing_regex", optional=False): t.String(max_length=MAX_REGEX_LENGTH),
81
+ },
82
+ )
83
+
84
+ nemo_topic_adherence_config_trafaret = t.Dict(
85
+ {
86
+ t.Key("metric_mode", optional=False): t.Enum("f1", "recall", "precision"),
87
+ t.Key("reference_topics", optional=False): t.List(
88
+ t.String(max_length=MAX_REFERENCE_TOPIC_LENGTH)
89
+ ),
90
+ },
91
+ )
92
+
93
+ nemo_response_relevancy_config_trafaret = t.Dict(
94
+ {
95
+ t.Key("embedding_deployment_id", optional=False): t.String(max_length=OBJECT_ID_LENGTH),
96
+ },
97
+ )
98
+
99
+
100
+ model_guard_intervention_trafaret = t.Dict(
101
+ {
102
+ t.Key("comparand", to_name="comparand", optional=False): t.Or(
103
+ t.String(max_length=MAX_GUARD_NAME_LENGTH),
104
+ t.Float(),
105
+ t.Bool(),
106
+ t.List(t.String(max_length=MAX_GUARD_NAME_LENGTH)),
107
+ t.List(t.Float()),
108
+ ),
109
+ t.Key("comparator", to_name="comparator", optional=False): t.Enum(*GuardOperatorType.ALL),
110
+ },
111
+ allow_extra=["*"],
112
+ )
113
+
114
+
115
+ guard_intervention_trafaret = t.Dict(
116
+ {
117
+ t.Key("action", to_name="action", optional=False): t.Enum(*GuardAction.ALL),
118
+ t.Key("message", to_name="message", optional=True): t.String(
119
+ max_length=MAX_GUARD_MESSAGE_LENGTH, allow_blank=True
120
+ ),
121
+ t.Key("conditions", to_name="conditions", optional=True): t.Or(
122
+ t.List(
123
+ model_guard_intervention_trafaret,
124
+ max_length=1,
125
+ min_length=0,
126
+ ),
127
+ t.Null,
128
+ ),
129
+ t.Key("send_notification", to_name="send_notification", optional=True): t.Bool(),
130
+ },
131
+ allow_extra=["*"],
132
+ )
133
+
134
+ additional_guard_config_trafaret = t.Dict(
135
+ {
136
+ t.Key("cost", to_name="cost", optional=True): t.Or(cost_metric_trafaret, t.Null),
137
+ t.Key("tool_call", to_name="tool_call", optional=True): t.Or(t.Any(), t.Null),
138
+ t.Key("agent_guideline", to_name="agent_guideline", optional=True): t.String(
139
+ max_length=MAX_GUIDELINE_LENGTH, allow_blank=True
140
+ ),
141
+ }
142
+ )
143
+
144
+
145
+ guard_trafaret = t.Dict(
146
+ {
147
+ t.Key("name", to_name="name", optional=False): t.String(max_length=MAX_GUARD_NAME_LENGTH),
148
+ t.Key("description", to_name="description", optional=True): t.String(
149
+ max_length=MAX_GUARD_DESCRIPTION_LENGTH
150
+ ),
151
+ t.Key("stage", to_name="stage", optional=False): t.Or(
152
+ t.List(t.Enum(*GuardStage.ALL)), t.Enum(*GuardStage.ALL)
153
+ ),
154
+ t.Key("type", to_name="type", optional=False): t.Enum(*GuardType.ALL),
155
+ t.Key("ootb_type", to_name="ootb_type", optional=True): t.Enum(*OOTBType.ALL),
156
+ t.Key("nemo_evaluator_type", to_name="nemo_evaluator_type", optional=True): t.Enum(
157
+ *NemoEvaluatorType.ALL
158
+ ),
159
+ t.Key("llm_type", to_name="llm_type", optional=True): t.Enum(*GuardLLMType.ALL),
160
+ t.Key("deployment_id", to_name="deployment_id", optional=True): t.Or(
161
+ t.String(max_length=OBJECT_ID_LENGTH), t.Null
162
+ ),
163
+ t.Key("llm_gateway_model_id", to_name="llm_gateway_model_id", optional=True): t.Or(
164
+ t.String, t.Null
165
+ ),
166
+ t.Key("model_info", to_name="model_info", optional=True): model_info_trafaret,
167
+ t.Key("nemo_llm_judge_config", optional=True): nemo_llm_judge_config_trafaret,
168
+ t.Key("nemo_topic_adherence_config", optional=True): nemo_topic_adherence_config_trafaret,
169
+ t.Key(
170
+ "nemo_response_relevancy_config", optional=True
171
+ ): nemo_response_relevancy_config_trafaret,
172
+ t.Key("intervention", to_name="intervention", optional=True): t.Or(
173
+ guard_intervention_trafaret, t.Null
174
+ ),
175
+ t.Key("openai_api_key", to_name="openai_api_key", optional=True): t.Or(
176
+ t.String(max_length=MAX_TOKEN_LENGTH), t.Null
177
+ ),
178
+ t.Key("openai_deployment_id", to_name="openai_deployment_id", optional=True): t.Or(
179
+ t.String(max_length=OBJECT_ID_LENGTH), t.Null
180
+ ),
181
+ t.Key("openai_api_base", to_name="openai_api_base", optional=True): t.Or(
182
+ t.String(max_length=MAX_URL_LENGTH), t.Null
183
+ ),
184
+ t.Key("google_region", to_name="google_region", optional=True): t.Or(t.String, t.Null),
185
+ t.Key("google_model", to_name="google_model", optional=True): t.Or(
186
+ t.Enum(*GoogleModel.ALL), t.Null
187
+ ),
188
+ t.Key("aws_region", to_name="aws_region", optional=True): t.Or(t.String, t.Null),
189
+ t.Key("aws_model", to_name="aws_model", optional=True): t.Or(t.Enum(*AwsModel.ALL), t.Null),
190
+ t.Key("faas_url", optional=True): t.Or(t.String(max_length=MAX_URL_LENGTH), t.Null),
191
+ t.Key("copy_citations", optional=True, default=False): t.Bool(),
192
+ t.Key("is_agentic", to_name="is_agentic", optional=True, default=False): t.Bool(),
193
+ t.Key(
194
+ "additional_guard_config",
195
+ to_name="additional_guard_config",
196
+ optional=True,
197
+ default=None,
198
+ ): t.Or(additional_guard_config_trafaret, t.Null),
199
+ },
200
+ allow_extra=["*"],
201
+ )
202
+
203
+
204
+ moderation_config_trafaret = t.Dict(
205
+ {
206
+ t.Key(
207
+ "timeout_sec",
208
+ to_name="timeout_sec",
209
+ optional=True,
210
+ default=DEFAULT_GUARD_PREDICTION_TIMEOUT_IN_SEC,
211
+ ): t.Int(gt=1),
212
+ t.Key(
213
+ "timeout_action",
214
+ to_name="timeout_action",
215
+ optional=True,
216
+ default=GuardTimeoutAction.SCORE,
217
+ ): t.Enum(*GuardTimeoutAction.ALL),
218
+ # Why default is True?
219
+ # We manually tested it and sending extra output with OpenAI completion object under
220
+ # "datarobot_moderations" field seems to be working by default, "EVEN WITH" OpenAI client
221
+ # It will always work with the API response (because it will simply be treated as extra data
222
+ # in the json response). So, most of the times it is going to work. In future, if the
223
+ # OpenAI client couldn't recognize extra data - we can simply disable this flag, so that
224
+ # it won't break the client and user flow
225
+ t.Key(
226
+ "enable_extra_model_output_for_chat",
227
+ to_name="enable_extra_model_output_for_chat",
228
+ optional=True,
229
+ default=True,
230
+ ): t.Bool(),
231
+ t.Key("guards", to_name="guards", optional=False): t.List(guard_trafaret),
232
+ },
233
+ allow_extra=["*"],
234
+ )
datarobot_dome/llm.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # ---------------------------------------------------------------------------------
2
- # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
- # Last updated 2025.
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
4
  #
5
5
  # DataRobot, Inc. Confidential.
6
6
  # This is proprietary source code of DataRobot, Inc. and its affiliates.
@@ -1,6 +1,6 @@
1
1
  # ---------------------------------------------------------------------------------
2
- # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
- # Last updated 2025.
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
4
  #
5
5
  # DataRobot, Inc. Confidential.
6
6
  # This is proprietary source code of DataRobot, Inc. and its affiliates.
@@ -1,6 +1,6 @@
1
1
  # ---------------------------------------------------------------------------------
2
- # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
- # Last updated 2025.
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
4
  #
5
5
  # DataRobot, Inc. Confidential.
6
6
  # This is proprietary source code of DataRobot, Inc. and its affiliates.
@@ -1,6 +1,6 @@
1
1
  # ---------------------------------------------------------------------------------
2
- # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
- # Last updated 2025.
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
4
  #
5
5
  # DataRobot, Inc. Confidential.
6
6
  # This is proprietary source code of DataRobot, Inc. and its affiliates.
@@ -1,6 +1,6 @@
1
1
  # ---------------------------------------------------------------------------------
2
- # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
- # Last updated 2025.
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
4
  #
5
5
  # DataRobot, Inc. Confidential.
6
6
  # This is proprietary source code of DataRobot, Inc. and its affiliates.
@@ -1,6 +1,6 @@
1
1
  # ---------------------------------------------------------------------------------
2
- # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
- # Last updated 2025.
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
4
  #
5
5
  # DataRobot, Inc. Confidential.
6
6
  # This is proprietary source code of DataRobot, Inc. and its affiliates.
@@ -1,6 +1,6 @@
1
1
  # ---------------------------------------------------------------------------------
2
- # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
- # Last updated 2025.
2
+ # Copyright (c) 2026 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2026.
4
4
  #
5
5
  # DataRobot, Inc. Confidential.
6
6
  # This is proprietary source code of DataRobot, Inc. and its affiliates.
@@ -24,9 +24,9 @@ from datarobot_dome.constants import LOGGER_NAME_PREFIX
24
24
  from datarobot_dome.constants import GuardAction
25
25
  from datarobot_dome.constants import GuardOperatorType
26
26
  from datarobot_dome.constants import GuardStage
27
- from datarobot_dome.guard import GuardFactory
28
- from datarobot_dome.guard import moderation_config_trafaret
27
+ from datarobot_dome.guard_factory import GuardFactory
29
28
  from datarobot_dome.guard_helpers import get_rouge_1_scorer
29
+ from datarobot_dome.guards.validation import moderation_config_trafaret
30
30
  from datarobot_dome.pipeline.pipeline import Pipeline
31
31
 
32
32
  CUSTOM_METRICS_BULK_UPLOAD_API_PREFIX = "deployments"