datarobot-moderations 11.2.1__py3-none-any.whl → 11.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -667,7 +667,7 @@ def __get_otel_values(guards_list, stage, result_df):
667
667
  for guard in guards_list:
668
668
  if not guard.has_average_score_custom_metric():
669
669
  continue
670
- guard_metric_column_name = guard.get_metric_column_name(stage)
670
+ guard_metric_column_name = guard.metric_column_name
671
671
  if guard_metric_column_name not in result_df.columns:
672
672
  _logger.warning(f"Missing column: {guard_metric_column_name} in result_df")
673
673
  continue
datarobot_dome/guard.py CHANGED
@@ -23,6 +23,7 @@ from llama_index.core.evaluation import FaithfulnessEvaluator
23
23
  from nemoguardrails import LLMRails
24
24
  from nemoguardrails import RailsConfig
25
25
  from ragas.llms import LangchainLLMWrapper
26
+ from ragas.llms import LlamaIndexLLMWrapper
26
27
  from ragas.metrics import AgentGoalAccuracyWithoutReference
27
28
 
28
29
  from datarobot_dome.constants import AGENT_GOAL_ACCURACY_COLUMN_NAME
@@ -230,6 +231,52 @@ moderation_config_trafaret = t.Dict(
230
231
  )
231
232
 
232
233
 
234
+ def get_metric_column_name(
235
+ guard_type: GuardType,
236
+ ootb_type: OOTBType | None,
237
+ stage: GuardStage,
238
+ model_guard_target_name: str | None = None,
239
+ metric_name: str | None = None,
240
+ ) -> str:
241
+ """Gets the metric column name. Note that this function gets used in buzok code. If you update
242
+ it, please also update the moderation library in the buzok worker image.
243
+ """
244
+ if guard_type == GuardType.MODEL:
245
+ if model_guard_target_name is None:
246
+ raise ValueError(
247
+ "For the model guard type, a valid model_guard_target_name has to be provided."
248
+ )
249
+ metric_result_key = Guard.get_stage_str(stage) + "_" + model_guard_target_name
250
+ elif guard_type == GuardType.OOTB:
251
+ if ootb_type is None:
252
+ raise ValueError("For the OOTB type, a valid OOTB guard type has to be provided.")
253
+ elif ootb_type == OOTBType.TOKEN_COUNT:
254
+ metric_result_key = Guard.get_stage_str(stage) + "_" + TOKEN_COUNT_COLUMN_NAME
255
+ elif ootb_type == OOTBType.ROUGE_1:
256
+ metric_result_key = Guard.get_stage_str(stage) + "_" + ROUGE_1_COLUMN_NAME
257
+ elif ootb_type == OOTBType.FAITHFULNESS:
258
+ metric_result_key = Guard.get_stage_str(stage) + "_" + FAITHFULLNESS_COLUMN_NAME
259
+ elif ootb_type == OOTBType.AGENT_GOAL_ACCURACY:
260
+ metric_result_key = AGENT_GOAL_ACCURACY_COLUMN_NAME
261
+ elif ootb_type == OOTBType.CUSTOM_METRIC:
262
+ if metric_name is None:
263
+ raise ValueError(
264
+ "For the custom metric type, a valid metric_name has to be provided."
265
+ )
266
+ metric_result_key = Guard.get_stage_str(stage) + "_" + metric_name
267
+ elif ootb_type == OOTBType.COST:
268
+ metric_result_key = COST_COLUMN_NAME
269
+ elif ootb_type == OOTBType.TASK_ADHERENCE:
270
+ metric_result_key = TASK_ADHERENCE_SCORE_COLUMN_NAME
271
+ else:
272
+ raise ValueError("The provided OOTB type is not implemented.")
273
+ elif guard_type == GuardType.NEMO_GUARDRAILS:
274
+ metric_result_key = Guard.get_stage_str(stage) + "_" + NEMO_GUARD_COLUMN_NAME
275
+ else:
276
+ raise ValueError("The provided guard type is not implemented.")
277
+ return metric_result_key
278
+
279
+
233
280
  class Guard(ABC):
234
281
  def __init__(self, config: dict, stage=None):
235
282
  self._name = config["name"]
@@ -244,6 +291,13 @@ class Guard(ABC):
244
291
  self._faas_url = config.get("faas_url")
245
292
  self._copy_citations = config["copy_citations"]
246
293
  self.is_agentic = config.get("is_agentic", False)
294
+ self.metric_column_name = get_metric_column_name(
295
+ config["type"],
296
+ config.get("ootb_type"),
297
+ self._stage,
298
+ config.get("model_info", {}).get("target_name"),
299
+ config["name"],
300
+ )
247
301
 
248
302
  if config.get("intervention"):
249
303
  self.intervention = GuardIntervention(config["intervention"])
@@ -448,11 +502,6 @@ class ModelGuard(Guard):
448
502
  def model_info(self):
449
503
  return self._model_info
450
504
 
451
- def get_metric_column_name(self, stage):
452
- if self.model_info is None:
453
- raise NotImplementedError("Missing model_info for model guard")
454
- return self.get_stage_str(stage) + "_" + self._model_info.target_name
455
-
456
505
  def get_span_column_name(self, _):
457
506
  if self.model_info is None:
458
507
  raise NotImplementedError("Missing model_info for model guard")
@@ -580,9 +629,6 @@ class NeMoGuard(Guard, GuardLLMMixin):
580
629
  """No average score metrics for NemoGuard's"""
581
630
  return False
582
631
 
583
- def get_metric_column_name(self, stage):
584
- return self.get_stage_str(stage) + "_" + NEMO_GUARD_COLUMN_NAME
585
-
586
632
  @property
587
633
  def nemo_llm_rails(self):
588
634
  return self._nemo_llm_rails
@@ -601,16 +647,6 @@ class OOTBGuard(Guard):
601
647
  """Latency is not tracked for token counts guards"""
602
648
  return self._ootb_type != OOTBType.TOKEN_COUNT
603
649
 
604
- def get_metric_column_name(self, stage):
605
- if self._ootb_type == OOTBType.TOKEN_COUNT:
606
- return self.get_stage_str(stage) + "_" + TOKEN_COUNT_COLUMN_NAME
607
- elif self._ootb_type == OOTBType.ROUGE_1:
608
- return self.get_stage_str(stage) + "_" + ROUGE_1_COLUMN_NAME
609
- elif self._ootb_type == OOTBType.CUSTOM_METRIC:
610
- return self.get_stage_str(stage) + "_" + self.name
611
- else:
612
- raise NotImplementedError(f"No metric column name defined for {self._ootb_type} guard")
613
-
614
650
  def get_span_column_name(self, _):
615
651
  if self._ootb_type == OOTBType.TOKEN_COUNT:
616
652
  return TOKEN_COUNT_COLUMN_NAME
@@ -640,9 +676,6 @@ class OOTBCostMetric(OOTBGuard):
640
676
  self.output_unit = cost_config["output_unit"]
641
677
  self.output_multiplier = self.output_price / self.output_unit
642
678
 
643
- def get_metric_column_name(self, _):
644
- return COST_COLUMN_NAME
645
-
646
679
  def get_average_score_custom_metric_name(self, _):
647
680
  return f"Total cost in {self.currency}"
648
681
 
@@ -681,9 +714,6 @@ class FaithfulnessGuard(OOTBGuard, GuardLLMMixin):
681
714
  Settings.embed_model = None
682
715
  self._evaluator = FaithfulnessEvaluator()
683
716
 
684
- def get_metric_column_name(self, stage):
685
- return self.get_stage_str(stage) + "_" + FAITHFULLNESS_COLUMN_NAME
686
-
687
717
  @property
688
718
  def faithfulness_evaluator(self):
689
719
  return self._evaluator
@@ -705,12 +735,12 @@ class AgentGoalAccuracyGuard(OOTBGuard, GuardLLMMixin):
705
735
  # Default LLM Type for Agent Goal Accuracy is set to Azure OpenAI
706
736
  self._llm_type = config.get("llm_type", GuardLLMType.AZURE_OPENAI)
707
737
  llm = self.get_llm(config, self._llm_type)
708
- evaluator_llm = LangchainLLMWrapper(llm)
738
+ if self._llm_type == GuardLLMType.AZURE_OPENAI:
739
+ evaluator_llm = LangchainLLMWrapper(llm)
740
+ else:
741
+ evaluator_llm = LlamaIndexLLMWrapper(llm)
709
742
  self.scorer = AgentGoalAccuracyWithoutReference(llm=evaluator_llm)
710
743
 
711
- def get_metric_column_name(self, _):
712
- return AGENT_GOAL_ACCURACY_COLUMN_NAME
713
-
714
744
  @property
715
745
  def accuracy_scorer(self):
716
746
  return self.scorer
@@ -735,9 +765,6 @@ class TaskAdherenceGuard(OOTBGuard, GuardLLMMixin):
735
765
  deepeval_llm = ModerationDeepEvalLLM(llm)
736
766
  self.scorer = TaskCompletionMetric(model=deepeval_llm, include_reason=True)
737
767
 
738
- def get_metric_column_name(self, _):
739
- return TASK_ADHERENCE_SCORE_COLUMN_NAME
740
-
741
768
  @property
742
769
  def task_adherence_scorer(self):
743
770
  return self.scorer
@@ -113,7 +113,7 @@ class AsyncGuardExecutor:
113
113
  if guard.has_latency_custom_metric():
114
114
  self.pipeline.report_guard_latency(guard, latency)
115
115
  if guard.has_average_score_custom_metric():
116
- metric_column_name = guard.get_metric_column_name(stage)
116
+ metric_column_name = guard.metric_column_name
117
117
  if metric_column_name in df.columns:
118
118
  span.set_attribute(
119
119
  guard.get_span_attribute_name(stage),
@@ -222,7 +222,7 @@ class AsyncGuardExecutor:
222
222
  # and "Response_toxicity_toxic_PREDICTION", if toxicity is configured for both
223
223
  # prompts and responses
224
224
  copy_df.rename(
225
- columns={metric_column: guard.get_metric_column_name(stage)},
225
+ columns={metric_column: guard.metric_column_name},
226
226
  inplace=True,
227
227
  )
228
228
  except Exception as ex:
@@ -359,7 +359,7 @@ class AsyncGuardExecutor:
359
359
  else:
360
360
  prompt_column_name = self.pipeline.get_input_column(GuardStage.PROMPT)
361
361
  response_column_name = self.pipeline.get_input_column(GuardStage.RESPONSE)
362
- metric_column_name = guard.get_metric_column_name(stage)
362
+ metric_column_name = guard.metric_column_name
363
363
 
364
364
  try:
365
365
  copy_df[metric_column_name] = copy_df.apply(
@@ -407,7 +407,7 @@ class AsyncGuardExecutor:
407
407
  else:
408
408
  prompt_column_name = self.pipeline.get_input_column(GuardStage.PROMPT)
409
409
  response_column_name = self.pipeline.get_input_column(GuardStage.RESPONSE)
410
- metric_column_name = guard.get_metric_column_name(stage)
410
+ metric_column_name = guard.metric_column_name
411
411
 
412
412
  try:
413
413
  copy_df[metric_column_name] = copy_df.apply(
@@ -454,7 +454,7 @@ class AsyncGuardExecutor:
454
454
  else:
455
455
  prompt_column_name = self.pipeline.get_input_column(GuardStage.PROMPT)
456
456
  response_column_name = self.pipeline.get_input_column(GuardStage.RESPONSE)
457
- metric_column_name = guard.get_metric_column_name(stage)
457
+ metric_column_name = guard.metric_column_name
458
458
 
459
459
  try:
460
460
  copy_df[metric_column_name] = copy_df.apply(
@@ -500,7 +500,7 @@ class AsyncGuardExecutor:
500
500
  intervene = False
501
501
  else:
502
502
  input_column = self.pipeline.get_input_column(stage)
503
- metric_column_name = guard.get_metric_column_name(stage)
503
+ metric_column_name = guard.metric_column_name
504
504
  copy_df[metric_column_name] = copy_df.apply(
505
505
  lambda x: get_rouge_1_score(
506
506
  scorer=self.pipeline.rouge_scorer,
@@ -517,7 +517,7 @@ class AsyncGuardExecutor:
517
517
 
518
518
  prompt_column_name = self.pipeline.get_input_column(GuardStage.PROMPT)
519
519
  response_column_name = self.pipeline.get_input_column(GuardStage.RESPONSE)
520
- metric_column_name = guard.get_metric_column_name(stage)
520
+ metric_column_name = guard.metric_column_name
521
521
  if (
522
522
  PROMPT_TOKEN_COUNT_COLUMN_NAME_FROM_USAGE not in copy_df.columns
523
523
  or RESPONSE_TOKEN_COUNT_COLUMN_NAME_FROM_USAGE not in copy_df.columns
@@ -539,7 +539,7 @@ class AsyncGuardExecutor:
539
539
  if not isinstance(guard, OOTBGuard):
540
540
  raise ValueError(f"Guard object should be of type OOTBGuard, got: {type(guard)}")
541
541
  input_column = self.pipeline.get_input_column(stage)
542
- metric_column_name = guard.get_metric_column_name(stage)
542
+ metric_column_name = guard.metric_column_name
543
543
  intervene = self._should_intervene(guard)
544
544
 
545
545
  if guard.ootb_type == OOTBType.TOKEN_COUNT:
@@ -594,7 +594,7 @@ class AsyncGuardExecutor:
594
594
  raise ValueError(f"Guard object should be of type NeMoGuard, got: {type(guard)}")
595
595
 
596
596
  input_column = self.pipeline.get_input_column(stage)
597
- metric_column_name = guard.get_metric_column_name(stage)
597
+ metric_column_name = guard.metric_column_name
598
598
  intervene = self._should_intervene(guard)
599
599
 
600
600
  try:
@@ -19,6 +19,7 @@ import tiktoken
19
19
  from deepeval.metrics import TaskCompletionMetric
20
20
  from deepeval.models import DeepEvalBaseLLM
21
21
  from deepeval.test_case import LLMTestCase
22
+ from langchain_core.language_models import BaseLanguageModel
22
23
  from langchain_nvidia_ai_endpoints import ChatNVIDIA
23
24
  from langchain_nvidia_ai_endpoints import Model
24
25
  from langchain_nvidia_ai_endpoints import register_model
@@ -441,11 +442,21 @@ class ModerationDeepEvalLLM(DeepEvalBaseLLM):
441
442
  return self.llm
442
443
 
443
444
  def generate(self, prompt: str) -> str:
444
- return self.llm.invoke(prompt).content
445
+ if isinstance(self.llm, BaseLanguageModel):
446
+ # Langchain LLM
447
+ return self.llm.invoke(prompt).content
448
+ else:
449
+ # LlamaIndex LLM
450
+ return self.llm.complete(prompt)
445
451
 
446
452
  async def a_generate(self, prompt: str) -> str:
447
- res = await self.llm.ainvoke(prompt)
448
- return res.content
453
+ if isinstance(self.llm, BaseLanguageModel):
454
+ # Langchain LLM
455
+ res = await self.llm.ainvoke(prompt)
456
+ return res.content
457
+ else:
458
+ res = await self.llm.acomplete(prompt)
459
+ return res.text
449
460
 
450
461
  def get_model_name(self):
451
462
  return "DeepEval LLM for Moderation"
@@ -14,6 +14,7 @@ import os
14
14
 
15
15
  import datarobot as dr
16
16
  import trafaret as t
17
+ from llama_index.llms.openai import OpenAI
17
18
 
18
19
  from datarobot_dome.constants import AWS_ACCOUNT_SECRET_DEFINITION_SUFFIX
19
20
  from datarobot_dome.constants import GOOGLE_SERVICE_ACCOUNT_SECRET_DEFINITION_SUFFIX
@@ -172,7 +173,7 @@ class GuardLLMMixin:
172
173
  "api_key": openai_api_key,
173
174
  }
174
175
  os.environ["OPENAI_API_KEY"] = openai_api_key
175
- llm = "default"
176
+ llm = OpenAI()
176
177
  elif llm_type == GuardLLMType.AZURE_OPENAI:
177
178
  if openai_api_base is None:
178
179
  raise ValueError("OpenAI API base url is required for LLM Guard")
@@ -229,13 +230,6 @@ class GuardLLMMixin:
229
230
  aws_region=config["aws_region"],
230
231
  )
231
232
  elif llm_type == GuardLLMType.DATAROBOT:
232
- if config["type"] == GuardType.OOTB and config["ootb_type"] in [
233
- OOTBType.AGENT_GOAL_ACCURACY,
234
- OOTBType.TASK_ADHERENCE,
235
- ]:
236
- # DataRobot LLM does not implement generate / agenerate yet
237
- # so can't support it for Agent Goal Accuracy
238
- raise NotImplementedError
239
233
  if config.get("deployment_id") is None:
240
234
  raise ValueError("Deployment ID is required for LLM Guard")
241
235
  deployment = dr.Deployment.get(config["deployment_id"])
@@ -304,7 +304,7 @@ class LLMPipeline(Pipeline):
304
304
  metric_list = [
305
305
  (
306
306
  guard.get_average_score_custom_metric_name(guard.stage),
307
- guard.get_metric_column_name(guard.stage),
307
+ guard.metric_column_name,
308
308
  )
309
309
  ]
310
310
  if intervention_action:
@@ -323,7 +323,7 @@ class ModerationIterator:
323
323
  self.aggregated_metrics_df[column_name] = metrics_df[column_name]
324
324
  elif guard.type == GuardType.OOTB:
325
325
  if guard.ootb_type == OOTBType.TOKEN_COUNT:
326
- column_name = guard.get_metric_column_name(GuardStage.RESPONSE)
326
+ column_name = guard.metric_column_name
327
327
  self.aggregated_metrics_df[column_name] += metrics_df[column_name]
328
328
  else:
329
329
  # Faithfulness, ROUGE-1 can't run on chunks so no merging
@@ -368,7 +368,7 @@ class ModerationIterator:
368
368
  postscore_df = postscore_df_chunk.copy(deep=True)
369
369
  for guard in self.pipeline.get_postscore_guards():
370
370
  if not self._guard_can_work_on_chunk(guard):
371
- metric_column_name = guard.get_metric_column_name(GuardStage.RESPONSE)
371
+ metric_column_name = guard.metric_column_name
372
372
  if metric_column_name in postscore_df_assembled.columns:
373
373
  postscore_df[metric_column_name] = postscore_df_assembled[metric_column_name]
374
374
  if guard.has_latency_custom_metric():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: datarobot-moderations
3
- Version: 11.2.1
3
+ Version: 11.2.3
4
4
  Summary: DataRobot Monitoring and Moderation framework
5
5
  License: DataRobot Tool and Utility Agreement
6
6
  Author: DataRobot
@@ -15,7 +15,7 @@ Requires-Dist: aiohttp (>=3.9.5)
15
15
  Requires-Dist: backoff (>=2.2.1)
16
16
  Requires-Dist: datarobot (>=3.6.0)
17
17
  Requires-Dist: datarobot-predict (>=1.9.6)
18
- Requires-Dist: deepeval (==2.7.9)
18
+ Requires-Dist: deepeval (>=3.3.5)
19
19
  Requires-Dist: langchain (>=0.1.12)
20
20
  Requires-Dist: langchain-nvidia-ai-endpoints (>=0.3.9)
21
21
  Requires-Dist: langchain-openai (>=0.1.7)
@@ -2,23 +2,23 @@ datarobot_dome/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,58
2
2
  datarobot_dome/async_http_client.py,sha256=wkB4irwvnchNGzO1bk2C_HWM-GOSB3AUn5TXKl-X0ZI,9649
3
3
  datarobot_dome/chat_helper.py,sha256=BzvtUyZSZxzOqq-5a2wQKhHhr2kMlcP1MFrHaDAeD_o,9671
4
4
  datarobot_dome/constants.py,sha256=vM2_JkXbn4dkWARCqxNfLriSo0E05LDXVrwNktptpuc,10416
5
- datarobot_dome/drum_integration.py,sha256=HresblJwlCk_sRnWReWQWeZMg5rYzKTA2hjmy1Rcn6U,40553
6
- datarobot_dome/guard.py,sha256=1INYx17n9ToiB5bzI-jIReUUuqkK_ucxpOx4jQLts6g,33264
7
- datarobot_dome/guard_executor.py,sha256=AOI8MZeZETHMoFgBePe0wa2vE9d2975MYQnEDHLZL7s,35462
8
- datarobot_dome/guard_helpers.py,sha256=ajxm-w7MS7eN5DMMO-jbbzjcOYMZ-LvhO53n2NI5_Fk,16773
5
+ datarobot_dome/drum_integration.py,sha256=BnhAP-D4AaEeh4ferZ-qXnORuWQzYzw9qKAZUTZZnJU,40542
6
+ datarobot_dome/guard.py,sha256=xJds9hcbUaS-KD5nC1mn0GiPdBrileFUu6BuTAjDNuY,34668
7
+ datarobot_dome/guard_executor.py,sha256=ox5_jOHcqMaxaaagIYJJHhCwEI7Wg-rUEiu5rutsfVU,35363
8
+ datarobot_dome/guard_helpers.py,sha256=jfu8JTWCcxu4WD1MKxeP1n53DeebY3SSuP-t5sWyV1U,17187
9
9
  datarobot_dome/guards/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
10
- datarobot_dome/guards/guard_llm_mixin.py,sha256=VovlpNZjWIGamF4SSvLF5lzOFyApH5IoOiB_qtCmRg0,12216
10
+ datarobot_dome/guards/guard_llm_mixin.py,sha256=OIjOHeIAwJiM8BQOfqj1fY2jy-jJfc_CNToYrzG_6xk,11871
11
11
  datarobot_dome/llm.py,sha256=L02OvTrflmD34-FrfXebfF-zzKTeuin7fpne1Cl5psg,5719
12
12
  datarobot_dome/metrics/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
13
13
  datarobot_dome/metrics/citation_metrics.py,sha256=l2mnV1gz7nQeJ_yfaS4dcP3DFWf0p5QIBnKQ6shLnw4,4652
14
14
  datarobot_dome/metrics/factory.py,sha256=7caa8paI9LuFXDgguXdC4on28V7IwwIsKJT2Z-Aps8A,2187
15
15
  datarobot_dome/metrics/metric_scorer.py,sha256=uJ_IJRw7ZFHueg8xjsaXbt0ypO7JiydZ0WapCp96yng,2540
16
16
  datarobot_dome/pipeline/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
17
- datarobot_dome/pipeline/llm_pipeline.py,sha256=g7PAiLOMADr2DQFrtg2NrUj4u_tcvnoiJXrBR8xWsmY,18789
17
+ datarobot_dome/pipeline/llm_pipeline.py,sha256=DMZ4gu88MiSSEQtshDyHOzT3R2Seuf8UqZ7A36QHj3M,18772
18
18
  datarobot_dome/pipeline/pipeline.py,sha256=7UmvrZtNxTGewpgM4cf2oThHPoJSarEU1Dyp7xEsASU,17401
19
19
  datarobot_dome/pipeline/vdb_pipeline.py,sha256=q3c_Z-hGUqhH6j6n8VpS3wZiBIkWgpRDsBnyJyZhiw4,9855
20
20
  datarobot_dome/runtime.py,sha256=FD8wXOweqoQVzbZMh-mucL66xT2kGxPsJUGAcJBgwxw,1468
21
- datarobot_dome/streaming.py,sha256=6nYvh6SoxPRLfO6GGdEoHsQuyLP9oX1lDMe8IeGo4lw,17801
22
- datarobot_moderations-11.2.1.dist-info/METADATA,sha256=fEyM5I3z0qS9dT2ofZ3J8UAN_ybm2SBF_46mifEHIpA,4742
23
- datarobot_moderations-11.2.1.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
24
- datarobot_moderations-11.2.1.dist-info/RECORD,,
21
+ datarobot_dome/streaming.py,sha256=DkvKEH0yN0aPEWMTAjMFJB3Kx4iLGdjUMQU1pAplbeg,17751
22
+ datarobot_moderations-11.2.3.dist-info/METADATA,sha256=dzpTYxhAXg-NEm8Rrko8U8qvbQncQoGw93a9ZhWV3jo,4742
23
+ datarobot_moderations-11.2.3.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
24
+ datarobot_moderations-11.2.3.dist-info/RECORD,,