datarobot-moderations 11.1.15__py3-none-any.whl → 11.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -41,6 +41,7 @@ RETRY_COUNT = 10
41
41
  MODERATION_CONFIG_FILE_NAME = "moderation_config.yaml"
42
42
  DATAROBOT_SERVERLESS_PLATFORM = "datarobotServerless"
43
43
 
44
+ RUNTIME_PARAMETER_PREFIX = "MLOPS_RUNTIME_PARAM_"
44
45
  SECRET_DEFINITION_PREFIX = "MLOPS_RUNTIME_PARAM_MODERATION"
45
46
  OPENAI_SECRET_DEFINITION_SUFFIX = "OPENAI_API_KEY"
46
47
  GOOGLE_SERVICE_ACCOUNT_SECRET_DEFINITION_SUFFIX = "GOOGLE_SERVICE_ACCOUNT"
@@ -62,8 +63,8 @@ PROMPT_VECTOR_ATTR = "prompt_vector"
62
63
  DATAROBOT_CONFIGURED_ON_PREM_ST_SAAS_URL = "http://datarobot-nginx/api/v2"
63
64
  DATAROBOT_ACTUAL_ON_PREM_ST_SAAS_URL = "http://datarobot-prediction-server:80/predApi/v1.0"
64
65
 
65
-
66
- DISABLE_MODERATION_RUNTIME_PARAM_NAME = "MLOPS_RUNTIME_PARAM_DISABLE_MODERATION"
66
+ DISABLE_MODERATION_RUNTIME_PARAM_NAME = "DISABLE_MODERATION"
67
+ ENABLE_LLM_GATEWAY_INFERENCE_RUNTIME_PARAM_NAME = "ENABLE_LLM_GATEWAY_INFERENCE"
67
68
 
68
69
  LLM_CONTEXT_COLUMN_NAME = "_LLM_CONTEXT"
69
70
  PROMPT_TOKEN_COUNT_COLUMN_NAME_FROM_USAGE = "prompt_token_count_from_usage"
@@ -242,6 +243,11 @@ class AwsModel:
242
243
  ANTHROPIC_CLAUDE_3_HAIKU = "anthropic-claude-3-haiku"
243
244
  ANTHROPIC_CLAUDE_3_SONNET = "anthropic-claude-3-sonnet"
244
245
  ANTHROPIC_CLAUDE_3_OPUS = "anthropic-claude-3-opus"
246
+ ANTHROPIC_CLAUDE_3_5_SONNET_V1 = "anthropic-claude-3-sonnet-v1"
247
+ ANTHROPIC_CLAUDE_3_5_SONNET_V2 = "anthropic-claude-3-sonnet-v2"
248
+ AMAZON_NOVA_LITE = "amazon-nova-lite"
249
+ AMAZON_NOVA_MICRO = "amazon-nova-micro"
250
+ AMAZON_NOVA_PRO = "amazon-nova-pro"
245
251
 
246
252
  ALL = [
247
253
  TITAN,
@@ -249,6 +255,11 @@ class AwsModel:
249
255
  ANTHROPIC_CLAUDE_3_HAIKU,
250
256
  ANTHROPIC_CLAUDE_3_SONNET,
251
257
  ANTHROPIC_CLAUDE_3_OPUS,
258
+ ANTHROPIC_CLAUDE_3_5_SONNET_V1,
259
+ ANTHROPIC_CLAUDE_3_5_SONNET_V2,
260
+ AMAZON_NOVA_LITE,
261
+ AMAZON_NOVA_MICRO,
262
+ AMAZON_NOVA_PRO,
252
263
  ]
253
264
 
254
265
 
@@ -266,6 +277,11 @@ class AwsModelVersion:
266
277
  ANTHROPIC_CLAUDE_3_HAIKU = "anthropic.claude-3-haiku-20240307-v1:0"
267
278
  ANTHROPIC_CLAUDE_3_SONNET = "anthropic.claude-3-sonnet-20240229-v1:0"
268
279
  ANTHROPIC_CLAUDE_3_OPUS = "anthropic.claude-3-opus-20240229-v1:0"
280
+ ANTHROPIC_CLAUDE_3_5_SONNET_V1 = "anthropic.claude-3-5-sonnet-20240620-v1:0"
281
+ ANTHROPIC_CLAUDE_3_5_SONNET_V2 = "anthropic.claude-3-5-sonnet-20241022-v2:0"
282
+ AMAZON_NOVA_LITE = "amazon.nova-lite-v1:0"
283
+ AMAZON_NOVA_MICRO = "amazon.nova-micro-v1:0"
284
+ AMAZON_NOVA_PRO = "amazon.nova-pro-v1:0"
269
285
 
270
286
  ALL = [
271
287
  TITAN,
@@ -273,6 +289,11 @@ class AwsModelVersion:
273
289
  ANTHROPIC_CLAUDE_3_HAIKU,
274
290
  ANTHROPIC_CLAUDE_3_SONNET,
275
291
  ANTHROPIC_CLAUDE_3_OPUS,
292
+ ANTHROPIC_CLAUDE_3_5_SONNET_V1,
293
+ ANTHROPIC_CLAUDE_3_5_SONNET_V2,
294
+ AMAZON_NOVA_LITE,
295
+ AMAZON_NOVA_MICRO,
296
+ AMAZON_NOVA_PRO,
276
297
  ]
277
298
 
278
299
 
@@ -288,6 +309,11 @@ AWS_MODEL_TO_AWS_MODEL_VERSION_MAP = {
288
309
  AwsModel.ANTHROPIC_CLAUDE_3_HAIKU: AwsModelVersion.ANTHROPIC_CLAUDE_3_HAIKU,
289
310
  AwsModel.ANTHROPIC_CLAUDE_3_SONNET: AwsModelVersion.ANTHROPIC_CLAUDE_3_SONNET,
290
311
  AwsModel.ANTHROPIC_CLAUDE_3_OPUS: AwsModelVersion.ANTHROPIC_CLAUDE_3_OPUS,
312
+ AwsModel.ANTHROPIC_CLAUDE_3_5_SONNET_V1: AwsModelVersion.ANTHROPIC_CLAUDE_3_5_SONNET_V1,
313
+ AwsModel.ANTHROPIC_CLAUDE_3_5_SONNET_V2: AwsModelVersion.ANTHROPIC_CLAUDE_3_5_SONNET_V2,
314
+ AwsModel.AMAZON_NOVA_LITE: AwsModelVersion.AMAZON_NOVA_LITE,
315
+ AwsModel.AMAZON_NOVA_MICRO: AwsModelVersion.AMAZON_NOVA_MICRO,
316
+ AwsModel.AMAZON_NOVA_PRO: AwsModelVersion.AMAZON_NOVA_PRO,
291
317
  }
292
318
 
293
319
 
@@ -55,6 +55,7 @@ from datarobot_dome.constants import TargetType
55
55
  from datarobot_dome.guard_executor import AsyncGuardExecutor
56
56
  from datarobot_dome.pipeline.llm_pipeline import LLMPipeline
57
57
  from datarobot_dome.pipeline.vdb_pipeline import VDBPipeline
58
+ from datarobot_dome.runtime import get_runtime_parameter_value_bool
58
59
  from datarobot_dome.streaming import ModerationIterator
59
60
  from datarobot_dome.streaming import StreamingContextBuilder
60
61
 
@@ -915,13 +916,11 @@ def init(model_dir: str = os.getcwd()):
915
916
  pipeline: A Guard pipeline object required to enforce moderations while
916
917
  scoring on user data
917
918
  """
918
- disable_moderation_runtime_value = json.loads(
919
- os.environ.get(DISABLE_MODERATION_RUNTIME_PARAM_NAME, "{}")
919
+ disable_moderation_runtime_value = get_runtime_parameter_value_bool(
920
+ param_name=DISABLE_MODERATION_RUNTIME_PARAM_NAME,
921
+ default_value=False,
920
922
  )
921
- if (
922
- "payload" in disable_moderation_runtime_value
923
- and disable_moderation_runtime_value["payload"]
924
- ):
923
+ if disable_moderation_runtime_value:
925
924
  _logger.warning("Moderation is disabled via runtime parameter on the model")
926
925
  return None
927
926
 
@@ -999,6 +998,8 @@ class VdbModerationPipeline(ModerationPipeline):
999
998
  def moderation_pipeline_factory(
1000
999
  target_type: str, model_dir: str = os.getcwd()
1001
1000
  ) -> Optional[ModerationPipeline]:
1001
+ # Disable ragas tracking while loading the module.
1002
+ os.environ["RAGAS_DO_NOT_TRACK"] = "true"
1002
1003
  if target_type in TargetType.guards():
1003
1004
  pipeline = init(model_dir=model_dir)
1004
1005
  if pipeline:
datarobot_dome/guard.py CHANGED
@@ -49,11 +49,13 @@ from datarobot_dome.constants import GuardStage
49
49
  from datarobot_dome.constants import GuardTimeoutAction
50
50
  from datarobot_dome.constants import GuardType
51
51
  from datarobot_dome.constants import OOTBType
52
+ from datarobot_dome.guard_helpers import DEFAULT_OPEN_AI_API_VERSION
52
53
  from datarobot_dome.guard_helpers import ModerationDeepEvalLLM
53
54
  from datarobot_dome.guard_helpers import get_azure_openai_client
54
55
  from datarobot_dome.guard_helpers import get_chat_nvidia_llm
55
56
  from datarobot_dome.guard_helpers import get_datarobot_endpoint_and_token
56
- from datarobot_dome.guard_helpers import try_to_fallback_to_llm_gateway
57
+ from datarobot_dome.guard_helpers import get_llm_gateway_client
58
+ from datarobot_dome.guard_helpers import use_llm_gateway_inference
57
59
  from datarobot_dome.guards.guard_llm_mixin import GuardLLMMixin
58
60
 
59
61
  MAX_GUARD_NAME_LENGTH = 255
@@ -142,6 +144,7 @@ guard_intervention_trafaret = t.Dict(
142
144
  additional_guard_config_trafaret = t.Dict(
143
145
  {
144
146
  t.Key("cost", to_name="cost", optional=True): t.Or(cost_metric_trafaret, t.Null),
147
+ t.Key("tool_call", to_name="tool_call", optional=True): t.Or(t.Any(), t.Null),
145
148
  }
146
149
  )
147
150
 
@@ -484,12 +487,18 @@ class NeMoGuard(Guard, GuardLLMMixin):
484
487
  self.openai_api_base = config.get("openai_api_base")
485
488
  self.openai_deployment_id = config.get("openai_deployment_id")
486
489
  llm_id = None
490
+ credentials = None
491
+ use_llm_gateway = use_llm_gateway_inference(self._llm_type)
487
492
  try:
488
493
  self.openai_api_key = self.get_openai_api_key(config, self._llm_type)
489
494
  if self._llm_type != GuardLLMType.NIM and self.openai_api_key is None:
490
495
  raise ValueError("OpenAI API key is required for NeMo Guardrails")
491
496
 
492
497
  if self.llm_type == GuardLLMType.OPENAI:
498
+ credentials = {
499
+ "credential_type": "openai",
500
+ "api_key": self.openai_api_key,
501
+ }
493
502
  os.environ["OPENAI_API_KEY"] = self.openai_api_key
494
503
  llm = None
495
504
  elif self.llm_type == GuardLLMType.AZURE_OPENAI:
@@ -497,6 +506,12 @@ class NeMoGuard(Guard, GuardLLMMixin):
497
506
  raise ValueError("Azure OpenAI API base url is required for LLM Guard")
498
507
  if self.openai_deployment_id is None:
499
508
  raise ValueError("Azure OpenAI deployment ID is required for LLM Guard")
509
+ credentials = {
510
+ "credential_type": "azure_openai",
511
+ "api_base": self.openai_api_base,
512
+ "api_version": DEFAULT_OPEN_AI_API_VERSION,
513
+ "api_key": self.openai_api_key,
514
+ }
500
515
  azure_openai_client = get_azure_openai_client(
501
516
  openai_api_key=self.openai_api_key,
502
517
  openai_api_base=self.openai_api_base,
@@ -537,15 +552,20 @@ class NeMoGuard(Guard, GuardLLMMixin):
537
552
  raise ValueError(f"Invalid LLMType: {self.llm_type}")
538
553
 
539
554
  except Exception as e:
540
- llm = try_to_fallback_to_llm_gateway(
541
- # Currently only OPENAI and AZURE_OPENAI are supported by NeMoGuard
542
- # For Bedrock and Vertex the model in the config is actually the LLM ID
543
- # For OpenAI we use the default model defined in get_llm_gateway_client
544
- # For Azure we use the deployment ID
555
+ # no valid user credentials provided, raise if not using LLM Gateway
556
+ credentials = None
557
+ if not use_llm_gateway:
558
+ raise e
559
+
560
+ if use_llm_gateway:
561
+ # Currently only OPENAI and AZURE_OPENAI are supported by NeMoGuard
562
+ # For Bedrock and Vertex the model in the config is actually the LLM ID
563
+ # For OpenAI we use the default model defined in get_llm_gateway_client
564
+ # For Azure we use the deployment ID
565
+ llm = get_llm_gateway_client(
545
566
  llm_id=llm_id,
546
567
  openai_deployment_id=self.openai_deployment_id,
547
- llm_type=self.llm_type,
548
- e=e,
568
+ credentials=credentials,
549
569
  )
550
570
 
551
571
  # Use guard stage to determine whether to read from prompt/response subdirectory
@@ -34,6 +34,7 @@ from ragas.metrics import AgentGoalAccuracyWithoutReference
34
34
  from rouge_score import rouge_scorer
35
35
 
36
36
  from datarobot_dome.constants import AWS_MODEL_TO_AWS_MODEL_VERSION_MAP
37
+ from datarobot_dome.constants import ENABLE_LLM_GATEWAY_INFERENCE_RUNTIME_PARAM_NAME
37
38
  from datarobot_dome.constants import GOOGLE_MODEL_TO_GOOGLE_MODEL_VERSION_MAP
38
39
  from datarobot_dome.constants import LOGGER_NAME_PREFIX
39
40
  from datarobot_dome.constants import PROMPT_TOKEN_COUNT_COLUMN_NAME_FROM_USAGE
@@ -42,12 +43,13 @@ from datarobot_dome.constants import AwsModel
42
43
  from datarobot_dome.constants import GoogleModel
43
44
  from datarobot_dome.constants import GuardLLMType
44
45
  from datarobot_dome.llm import DataRobotLLM
46
+ from datarobot_dome.runtime import get_runtime_parameter_value_bool
45
47
 
46
48
  # Ideally, we want to return confidence score between 0.0 and 100.0,
47
49
  # but for ROUGE-1 guard, UI allows the user to configure value between
48
50
  # 0 and 1, so making scaling factor 1.
49
51
  SCALING_FACTOR = 1
50
- DEFAULT_OPEN_AI_API_VERSION = "2023-03-15-preview"
52
+ DEFAULT_OPEN_AI_API_VERSION = "2024-10-21"
51
53
 
52
54
  _logger = logging.getLogger(LOGGER_NAME_PREFIX + ".guard_helpers")
53
55
 
@@ -195,8 +197,10 @@ def get_llm_gateway_client(
195
197
  model: str | None = None,
196
198
  llm_id: str | None = None,
197
199
  openai_deployment_id: str | None = None,
200
+ credentials: dict | None = None,
198
201
  ) -> ChatOpenAI:
199
202
  """The LLM gateway client enables chat completions with DR provided credentials and metering.
203
+ User provided credentials are optional and passed to the completion request as json string.
200
204
 
201
205
  Providing model is always required due to openai's chat api.
202
206
  llm_id and deployment_id override model if provided.
@@ -208,7 +212,8 @@ def get_llm_gateway_client(
208
212
  model=model or "azure/gpt-4o",
209
213
  api_key=datarobot_api_token,
210
214
  base_url=f"{datarobot_endpoint}/genai/llmgw",
211
- max_retries=0, # retries are handled by the LLM Gateway
215
+ # retries are handled by the LLM Gateway
216
+ max_retries=0,
212
217
  default_headers={
213
218
  # used for metering
214
219
  "Client-Id": "moderations",
@@ -217,28 +222,34 @@ def get_llm_gateway_client(
217
222
  # optional model overrides
218
223
  "deployment_id": openai_deployment_id,
219
224
  "llm_id": llm_id,
225
+ # optional user provided credentials
226
+ "credential_json": json.dumps(credentials) if credentials else None,
220
227
  },
221
228
  )
222
229
  return client
223
230
 
224
231
 
225
- def try_to_fallback_to_llm_gateway(
226
- llm_id: str | None,
227
- openai_deployment_id: str | None,
228
- llm_type: GuardLLMType,
229
- e: Exception,
230
- ) -> ChatOpenAI:
231
- # USE the LLM gateway if its runtime parameter is available and enabled
232
- # DO NOT USE the gateway if user provided credentials are specified
233
- # which is the case if no exception was raised trying to create the LLM
234
- # DATAROBOT and NIM LLM types are not supported by the gateway
235
- if not json.loads(os.environ.get("ENABLE_LLM_GATEWAY_INFERENCE", "false")) or llm_type in [
236
- GuardLLMType.DATAROBOT,
237
- GuardLLMType.NIM,
238
- ]:
239
- raise e
240
- llm = get_llm_gateway_client(llm_id=llm_id, openai_deployment_id=openai_deployment_id)
241
- return llm
232
+ def use_llm_gateway_inference(llm_type: str):
233
+ """
234
+ Determine whether the given LLM should use the LLM Gateway for inference.
235
+
236
+ `DATAROBOT` and `NIM LLM` types are not supported by the gateway.
237
+
238
+ Parameters
239
+ ----------
240
+ llm_type
241
+ The type of the LLM used in the guard.
242
+
243
+ Returns
244
+ -------
245
+ True if LLM Gateway should be used, False otherwise.
246
+ """
247
+ is_enabled_by_runtime_parameter = get_runtime_parameter_value_bool(
248
+ param_name=ENABLE_LLM_GATEWAY_INFERENCE_RUNTIME_PARAM_NAME,
249
+ default_value=False,
250
+ )
251
+ is_compatible_llm = llm_type not in [GuardLLMType.DATAROBOT, GuardLLMType.NIM]
252
+ return is_enabled_by_runtime_parameter and is_compatible_llm
242
253
 
243
254
 
244
255
  def get_azure_openai_client(
@@ -22,11 +22,13 @@ from datarobot_dome.constants import SECRET_DEFINITION_PREFIX
22
22
  from datarobot_dome.constants import GuardLLMType
23
23
  from datarobot_dome.constants import GuardType
24
24
  from datarobot_dome.constants import OOTBType
25
+ from datarobot_dome.guard_helpers import DEFAULT_OPEN_AI_API_VERSION
25
26
  from datarobot_dome.guard_helpers import get_azure_openai_client
26
27
  from datarobot_dome.guard_helpers import get_bedrock_client
27
28
  from datarobot_dome.guard_helpers import get_datarobot_llm
29
+ from datarobot_dome.guard_helpers import get_llm_gateway_client
28
30
  from datarobot_dome.guard_helpers import get_vertex_client
29
- from datarobot_dome.guard_helpers import try_to_fallback_to_llm_gateway
31
+ from datarobot_dome.guard_helpers import use_llm_gateway_inference
30
32
 
31
33
  basic_credential_trafaret = t.Dict(
32
34
  {
@@ -156,6 +158,8 @@ class GuardLLMMixin:
156
158
  openai_api_base = config.get("openai_api_base")
157
159
  openai_deployment_id = config.get("openai_deployment_id")
158
160
  llm_id = None
161
+ credentials = None
162
+ use_llm_gateway = use_llm_gateway_inference(llm_type)
159
163
  try:
160
164
  if llm_type in [GuardLLMType.OPENAI, GuardLLMType.AZURE_OPENAI]:
161
165
  openai_api_key = self.get_openai_api_key(config, llm_type)
@@ -163,6 +167,10 @@ class GuardLLMMixin:
163
167
  raise ValueError("OpenAI API key is required for Faithfulness guard")
164
168
 
165
169
  if llm_type == GuardLLMType.OPENAI:
170
+ credentials = {
171
+ "credential_type": "openai",
172
+ "api_key": openai_api_key,
173
+ }
166
174
  os.environ["OPENAI_API_KEY"] = openai_api_key
167
175
  llm = "default"
168
176
  elif llm_type == GuardLLMType.AZURE_OPENAI:
@@ -170,6 +178,12 @@ class GuardLLMMixin:
170
178
  raise ValueError("OpenAI API base url is required for LLM Guard")
171
179
  if openai_deployment_id is None:
172
180
  raise ValueError("OpenAI deployment ID is required for LLM Guard")
181
+ credentials = {
182
+ "credential_type": "azure_openai",
183
+ "api_key": openai_api_key,
184
+ "api_base": openai_api_base,
185
+ "api_version": DEFAULT_OPEN_AI_API_VERSION,
186
+ }
173
187
  azure_openai_client = get_azure_openai_client(
174
188
  openai_api_key=openai_api_key,
175
189
  openai_api_base=openai_api_base,
@@ -182,9 +196,15 @@ class GuardLLMMixin:
182
196
  raise ValueError("Google model is required for LLM Guard")
183
197
  if config.get("google_region") is None:
184
198
  raise ValueError("Google region is required for LLM Guard")
199
+ service_account_info = self.get_google_service_account(config)
200
+ credentials = {
201
+ "credential_type": "google_vertex_ai",
202
+ "region": config["google_region"],
203
+ "service_account_info": service_account_info,
204
+ }
185
205
  llm = get_vertex_client(
186
206
  google_model=llm_id,
187
- google_service_account=self.get_google_service_account(config),
207
+ google_service_account=service_account_info,
188
208
  google_region=config["google_region"],
189
209
  )
190
210
  elif llm_type == GuardLLMType.AMAZON:
@@ -194,6 +214,13 @@ class GuardLLMMixin:
194
214
  if config.get("aws_region") is None:
195
215
  raise ValueError("AWS region is required for LLM Guard")
196
216
  credential_config = self.get_aws_account(config)
217
+ credentials = {
218
+ "credential_type": "amazon_bedrock",
219
+ "access_key_id": credential_config["aws_access_key_id"],
220
+ "secret_access_key": credential_config["aws_secret_access_key"],
221
+ "session_token": credential_config["aws_session_token"],
222
+ "region": config["aws_region"],
223
+ }
197
224
  llm = get_bedrock_client(
198
225
  aws_model=llm_id,
199
226
  aws_access_key_id=credential_config["aws_access_key_id"],
@@ -219,14 +246,19 @@ class GuardLLMMixin:
219
246
  raise ValueError(f"Invalid LLMType: {llm_type}")
220
247
 
221
248
  except Exception as e:
222
- llm = try_to_fallback_to_llm_gateway(
223
- # For Bedrock and Vertex the model in the config is actually the LLM ID
224
- # For OpenAI we use the default model defined in get_llm_gateway_client
225
- # For Azure we use the deployment ID
249
+ # no valid user credentials provided, raise if not using LLM Gateway
250
+ credentials = None
251
+ if not use_llm_gateway:
252
+ raise e
253
+
254
+ if use_llm_gateway:
255
+ # For Bedrock and Vertex the model in the config is actually the LLM ID
256
+ # For OpenAI we use the default model defined in get_llm_gateway_client
257
+ # For Azure we use the deployment ID
258
+ llm = get_llm_gateway_client(
226
259
  llm_id=llm_id,
227
260
  openai_deployment_id=openai_deployment_id,
228
- llm_type=llm_type,
229
- e=e,
261
+ credentials=credentials,
230
262
  )
231
263
 
232
264
  return llm
@@ -0,0 +1,42 @@
1
+ # ---------------------------------------------------------------------------------
2
+ # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2025.
4
+ #
5
+ # DataRobot, Inc. Confidential.
6
+ # This is proprietary source code of DataRobot, Inc. and its affiliates.
7
+ #
8
+ # This file and its contents are subject to DataRobot Tool and Utility Agreement.
9
+ # For details, see
10
+ # https://www.datarobot.com/wp-content/uploads/2021/07/DataRobot-Tool-and-Utility-Agreement.pdf.
11
+ # ---------------------------------------------------------------------------------
12
+ import json
13
+ import os
14
+
15
+ from datarobot_dome.constants import RUNTIME_PARAMETER_PREFIX
16
+
17
+
18
+ def get_runtime_parameter_value_bool(param_name: str, default_value: bool) -> bool:
19
+ """
20
+ Retrieve the value of a boolean-typed model runtime parameter with the specified name.
21
+
22
+ Parameters
23
+ ----------
24
+ param_name
25
+ The name of the model runtime parameter to retrieve (without the env variable prefix).
26
+ default_value
27
+ The default value to return if the model runtime parameter is undefined or underspecified.
28
+
29
+ Returns
30
+ -------
31
+ The parsed runtime parameter value.
32
+ """
33
+ env_var_name = f"{RUNTIME_PARAMETER_PREFIX}{param_name}"
34
+ param_body = json.loads(os.environ.get(env_var_name, "{}"))
35
+
36
+ if not param_body:
37
+ return default_value
38
+
39
+ if "payload" not in param_body:
40
+ return default_value
41
+
42
+ return bool(param_body["payload"])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: datarobot-moderations
3
- Version: 11.1.15
3
+ Version: 11.1.17
4
4
  Summary: DataRobot Monitoring and Moderation framework
5
5
  License: DataRobot Tool and Utility Agreement
6
6
  Author: DataRobot
@@ -1,13 +1,13 @@
1
1
  datarobot_dome/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
2
2
  datarobot_dome/async_http_client.py,sha256=wkB4irwvnchNGzO1bk2C_HWM-GOSB3AUn5TXKl-X0ZI,9649
3
3
  datarobot_dome/chat_helper.py,sha256=BzvtUyZSZxzOqq-5a2wQKhHhr2kMlcP1MFrHaDAeD_o,9671
4
- datarobot_dome/constants.py,sha256=mnSa8rUAha4XlsS2lwPmFCkH2RzfSL_MMkErsWHqIbA,9040
5
- datarobot_dome/drum_integration.py,sha256=nULpLYVMiS5vihfNUyuq-nvZpgXrQibQbVu2UMAscu8,42102
6
- datarobot_dome/guard.py,sha256=7T0a1gsWqVmVvEf4SLkVBi8lIRYl8PeMB7TnQGszWtc,32371
4
+ datarobot_dome/constants.py,sha256=SfZTkfW3rc7oeG2zmYiYxIkUT5dwkay1HXCusZod_-Q,10412
5
+ datarobot_dome/drum_integration.py,sha256=7gXi29t1Huhio68frMksrkWXfMq08V1Q49MmIRjzgMA,42227
6
+ datarobot_dome/guard.py,sha256=1INYx17n9ToiB5bzI-jIReUUuqkK_ucxpOx4jQLts6g,33264
7
7
  datarobot_dome/guard_executor.py,sha256=AOI8MZeZETHMoFgBePe0wa2vE9d2975MYQnEDHLZL7s,35462
8
- datarobot_dome/guard_helpers.py,sha256=YHhSUSuvxAgDdWPXiwYiHtrl-6ZlObE9n6CjYPQNSuA,16375
8
+ datarobot_dome/guard_helpers.py,sha256=7jXxRRpO41tumZFZPumpM78qD-hQ-z5yjMtIA8s9_xM,16762
9
9
  datarobot_dome/guards/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
10
- datarobot_dome/guards/guard_llm_mixin.py,sha256=ON-zuVL3xhQmXv0rFkalWrW_Q67Wwya2IQerHO8WkKU,10694
10
+ datarobot_dome/guards/guard_llm_mixin.py,sha256=VovlpNZjWIGamF4SSvLF5lzOFyApH5IoOiB_qtCmRg0,12216
11
11
  datarobot_dome/llm.py,sha256=L02OvTrflmD34-FrfXebfF-zzKTeuin7fpne1Cl5psg,5719
12
12
  datarobot_dome/metrics/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
13
13
  datarobot_dome/metrics/citation_metrics.py,sha256=q0hTMWuk6wy_jqk2UjFPON3kU94HN3W2vxr9giJ8O8E,3544
@@ -17,7 +17,8 @@ datarobot_dome/pipeline/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-Nzl
17
17
  datarobot_dome/pipeline/llm_pipeline.py,sha256=fOp_OJnQMDUJH-LKv12kEqli-EqfHjAiSTFqtxzMkhM,19942
18
18
  datarobot_dome/pipeline/pipeline.py,sha256=_pZ_4K2LMnfYCYj_ur9EwJzo3T-pbO6lFYz1O-_3uQ4,16491
19
19
  datarobot_dome/pipeline/vdb_pipeline.py,sha256=WTOGn1qe_ZvEcdlvHgeXxl2xTqp7GjfL13c6S-FmAfM,5146
20
+ datarobot_dome/runtime.py,sha256=FD8wXOweqoQVzbZMh-mucL66xT2kGxPsJUGAcJBgwxw,1468
20
21
  datarobot_dome/streaming.py,sha256=6nYvh6SoxPRLfO6GGdEoHsQuyLP9oX1lDMe8IeGo4lw,17801
21
- datarobot_moderations-11.1.15.dist-info/METADATA,sha256=zHt26VnmHpn-0cL-egKPqdcTvKPTittBNtVHLVylbHo,4827
22
- datarobot_moderations-11.1.15.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
23
- datarobot_moderations-11.1.15.dist-info/RECORD,,
22
+ datarobot_moderations-11.1.17.dist-info/METADATA,sha256=EZg5IXeC1CXbfEsc4gl2cXd2suSIjj0svvBgGVi4fGQ,4827
23
+ datarobot_moderations-11.1.17.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
24
+ datarobot_moderations-11.1.17.dist-info/RECORD,,