datarobot-moderations 11.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,395 @@
1
+ # ---------------------------------------------------------------------------------
2
+ # Copyright (c) 2025 DataRobot, Inc. and its affiliates. All rights reserved.
3
+ # Last updated 2025.
4
+ #
5
+ # DataRobot, Inc. Confidential.
6
+ # This is proprietary source code of DataRobot, Inc. and its affiliates.
7
+ #
8
+ # This file and its contents are subject to DataRobot Tool and Utility Agreement.
9
+ # For details, see
10
+ # https://www.datarobot.com/wp-content/uploads/2021/07/DataRobot-Tool-and-Utility-Agreement.pdf.
11
+ # ---------------------------------------------------------------------------------
12
+ import logging
13
+ import time
14
+ import uuid
15
+ from collections.abc import Iterable
16
+
17
+ import pandas as pd
18
+ from openai.types.chat import ChatCompletionChunk
19
+ from openai.types.chat.chat_completion import ChatCompletion
20
+ from openai.types.chat.chat_completion_chunk import Choice
21
+ from openai.types.chat.chat_completion_chunk import ChoiceDelta
22
+
23
+ from datarobot_dome.chat_helper import add_citations_to_df
24
+ from datarobot_dome.chat_helper import build_moderations_attribute_for_completion
25
+ from datarobot_dome.chat_helper import calculate_token_counts_and_confidence_score
26
+ from datarobot_dome.chat_helper import get_response_message_and_finish_reason
27
+ from datarobot_dome.chat_helper import remove_unnecessary_columns
28
+ from datarobot_dome.chat_helper import run_postscore_guards
29
+ from datarobot_dome.constants import CHAT_COMPLETION_CHUNK_OBJECT
30
+ from datarobot_dome.constants import CITATIONS_ATTR
31
+ from datarobot_dome.constants import DATAROBOT_MODERATIONS_ATTR
32
+ from datarobot_dome.constants import LOGGER_NAME_PREFIX
33
+ from datarobot_dome.constants import MODERATION_MODEL_NAME
34
+ from datarobot_dome.constants import GuardAction
35
+ from datarobot_dome.constants import GuardStage
36
+ from datarobot_dome.constants import GuardType
37
+ from datarobot_dome.constants import OOTBType
38
+ from datarobot_dome.guard import Guard
39
+
40
+
41
+ class StreamingContext:
42
+ """Object maintains the context for later streaming requests"""
43
+
44
+ def __init__(self):
45
+ self.pipeline = None
46
+ self.prompt = None
47
+ self.association_id = None
48
+ self.prescore_df = None
49
+ self.prescore_latency = None
50
+ self.input_df = None
51
+
52
+
53
+ class StreamingContextBuilder:
54
+ def __init__(self):
55
+ self.streaming_context = StreamingContext()
56
+
57
+ def set_pipeline(self, pipeline):
58
+ self.streaming_context.pipeline = pipeline
59
+ return self
60
+
61
+ def set_prompt(self, prompt):
62
+ self.streaming_context.prompt = prompt
63
+ return self
64
+
65
+ def set_association_id(self, association_id):
66
+ self.streaming_context.association_id = association_id
67
+ return self
68
+
69
+ def set_prescore_df(self, prescore_df):
70
+ self.streaming_context.prescore_df = prescore_df
71
+ return self
72
+
73
+ def set_prescore_latency(self, prescore_latency):
74
+ self.streaming_context.prescore_latency = prescore_latency
75
+ return self
76
+
77
+ def set_input_df(self, input_df):
78
+ self.streaming_context.input_df = input_df
79
+ return self
80
+
81
+ def build(self):
82
+ return self.streaming_context
83
+
84
+
85
+ class ModerationIterator:
86
+ def __init__(self, streaming_context, completion):
87
+ self.logger = logging.getLogger(LOGGER_NAME_PREFIX + "." + self.__class__.__name__)
88
+ self.input_df = streaming_context.input_df
89
+ self.original_completion = completion
90
+ self.pipeline = streaming_context.pipeline
91
+ self.prescore_df = streaming_context.prescore_df
92
+ self.latency_so_far = streaming_context.prescore_latency
93
+ self.datarobot_moderations = None
94
+ self.chat_completion = self._build_streaming_chat_completion()
95
+ # Dequeue first chunk
96
+ self.chunk = next(self.chat_completion)
97
+ self.assembled_response = []
98
+ self.postscore_latency = 0
99
+ self.postscore_df_assembled = None
100
+ self.aggregated_metrics_df = None
101
+
102
+ # List of postscore guards that can work on chunks
103
+ self.postscore_guards_applied_to_chunks = []
104
+
105
+ # List of guards that don't need citations
106
+ for guard in self.pipeline.get_postscore_guards():
107
+ if self._guard_can_work_on_chunk(guard):
108
+ self.postscore_guards_applied_to_chunks.append(guard)
109
+
110
+ self.first_chunk = True
111
+ self.last_chunk = False
112
+
113
+ def _set_prescore_moderations_info(self, chunk):
114
+ """
115
+ Returning prescore moderations information with the first chunk, so that user
116
+ has access to this information quickly.
117
+ """
118
+ moderations = build_moderations_attribute_for_completion(self.pipeline, self.prescore_df)
119
+ setattr(chunk, DATAROBOT_MODERATIONS_ATTR, moderations)
120
+ self.first_chunk = False
121
+
122
+ def _guard_can_work_on_chunk(self, guard):
123
+ if guard.type == GuardType.OOTB and guard.ootb_type in [
124
+ OOTBType.ROUGE_1,
125
+ OOTBType.FAITHFULNESS,
126
+ ]:
127
+ return False
128
+ if guard.type == GuardType.NEMO_GUARDRAILS:
129
+ return False
130
+ return True
131
+
132
+ @staticmethod
133
+ def create_chat_completion_chunk(content, finish_reason=None, role=None, citations=None):
134
+ chunk = ChatCompletionChunk(
135
+ id=str(uuid.uuid4()),
136
+ choices=[
137
+ Choice(
138
+ delta=ChoiceDelta(content=content, role=role),
139
+ finish_reason=finish_reason,
140
+ index=0,
141
+ )
142
+ ],
143
+ created=int(time.time()),
144
+ model=MODERATION_MODEL_NAME,
145
+ object=CHAT_COMPLETION_CHUNK_OBJECT,
146
+ )
147
+ if citations:
148
+ setattr(chunk, CITATIONS_ATTR, citations)
149
+ return chunk
150
+
151
+ def _build_streaming_chat_completion(self):
152
+ if isinstance(self.original_completion, ChatCompletion):
153
+
154
+ def generator():
155
+ yield self.create_chat_completion_chunk("", role="assistant")
156
+ yield self.create_chat_completion_chunk(
157
+ self.original_completion.choices[0].message.content
158
+ )
159
+
160
+ citations = None
161
+ if hasattr(self.original_completion, CITATIONS_ATTR):
162
+ citations = self.original_completion.citations
163
+ yield self.create_chat_completion_chunk(
164
+ None,
165
+ finish_reason=self.original_completion.choices[0].finish_reason,
166
+ citations=citations,
167
+ )
168
+
169
+ return generator()
170
+
171
+ elif isinstance(self.original_completion, Iterable):
172
+ return self.original_completion
173
+
174
+ raise Exception(f"Unhandled completion type: {type(self.original_completion)}")
175
+
176
+ def __iter__(self):
177
+ return self
178
+
179
+ def __next__(self):
180
+ """
181
+ The main iterator for streaming response.
182
+
183
+ It returns the prescore guard information with first chunk, then information about
184
+ the post score guards that can be run on chunks will be incorporated and the last
185
+ chunk will have information about postscore guard information for the last chunk
186
+ as well as faithfulness - rouge guards information (if configured) on the aseembled
187
+ response.
188
+ """
189
+ if self.last_chunk:
190
+ raise StopIteration
191
+
192
+ return_chunk = self.chunk
193
+ try:
194
+ self.chunk = next(self.chat_completion)
195
+ except StopIteration:
196
+ self.last_chunk = True
197
+
198
+ if len(self.pipeline.get_postscore_guards()) == 0:
199
+ # No postscore guards, relay the stream we get
200
+ if self.first_chunk:
201
+ self._set_prescore_moderations_info(return_chunk)
202
+ if self.last_chunk:
203
+ self.pipeline.report_custom_metrics(self.prescore_df)
204
+ return return_chunk
205
+
206
+ chunk_content = return_chunk.choices[0].delta.content
207
+ if not self.last_chunk:
208
+ if not chunk_content:
209
+ if self.first_chunk:
210
+ self._set_prescore_moderations_info(return_chunk)
211
+ return return_chunk
212
+ else:
213
+ self.assembled_response.append(chunk_content)
214
+ postscore_df = self._run_postscore_guards_on_chunk(return_chunk)
215
+ self._merge_metrics(postscore_df)
216
+ if return_chunk.choices[0].finish_reason == "content_filter":
217
+ # If the moderation blocks the chunk - mark it last chunk - the library is not
218
+ # going to return any further chunks
219
+ self.last_chunk = True
220
+ else:
221
+ if chunk_content:
222
+ # Typical OpenAI stream would have last chunk = None indicating its the last
223
+ # chunk in the stream. So, we don't expect it often to have to run postscore
224
+ # guards on the last chunk. But, "in case" there is a valid last chunk - the
225
+ # library ends up running the postscore guards on chunk and then on the assembled
226
+ # response. Means -> latency for last chunk.
227
+ #
228
+ # We tried to explore options to make it concurrent using asyncio, threads, but
229
+ # that complicates the overall structure.
230
+ #
231
+ # Because this is not a typical case, we don't want to over optimize it for now.
232
+ self.assembled_response.append(chunk_content)
233
+ postscore_df_chunk = self._run_postscore_guards_on_chunk(return_chunk)
234
+ else:
235
+ postscore_df_chunk = None
236
+
237
+ citations = None
238
+ if getattr(return_chunk, CITATIONS_ATTR, None):
239
+ citations = return_chunk.citations
240
+ postscore_df_assembled = self._run_postscore_guards_on_assembled_response(citations)
241
+ if postscore_df_chunk is not None:
242
+ if return_chunk.choices[0].finish_reason == "content_filter":
243
+ self._merge_metrics(postscore_df_chunk)
244
+ postscore_df = self._merge_assembled(postscore_df_chunk, postscore_df_assembled)
245
+ self.aggregated_metrics_df = postscore_df
246
+ else:
247
+ self.aggregated_metrics_df = postscore_df_assembled
248
+ postscore_df = postscore_df_assembled
249
+ else:
250
+ self.aggregated_metrics_df = postscore_df_assembled
251
+ postscore_df = postscore_df_assembled
252
+
253
+ if self.first_chunk:
254
+ moderations_df = postscore_df.merge(self.prescore_df, on=list(self.input_df.columns))
255
+ self.first_chunk = False
256
+ else:
257
+ moderations_df = postscore_df
258
+ moderations = build_moderations_attribute_for_completion(self.pipeline, moderations_df)
259
+ setattr(return_chunk, DATAROBOT_MODERATIONS_ATTR, moderations)
260
+ if self.last_chunk:
261
+ self._aggregate_guard_latencies()
262
+ self._report_metrics()
263
+ return return_chunk
264
+
265
+ def _run_postscore_guards_on_chunk(self, return_chunk):
266
+ chunk_content = return_chunk.choices[0].delta.content
267
+ response_column_name = self.pipeline.get_input_column(GuardStage.RESPONSE)
268
+
269
+ predictions_df = self.input_df.copy(deep=True)
270
+ predictions_df[response_column_name] = [chunk_content]
271
+
272
+ # Run postscore guards on the chunk content - Note that we are only running
273
+ # the guards which don't need citations or the whole response (eg. NeMo)
274
+ postscore_df_chunk, postscore_latency_chunk = run_postscore_guards(
275
+ self.pipeline, predictions_df, postscore_guards=self.postscore_guards_applied_to_chunks
276
+ )
277
+ self.postscore_latency += postscore_latency_chunk
278
+
279
+ final_response_message, final_finish_reason = get_response_message_and_finish_reason(
280
+ self.pipeline, postscore_df_chunk, streaming=True
281
+ )
282
+
283
+ postscore_df_chunk = remove_unnecessary_columns(self.pipeline, postscore_df_chunk)
284
+ return_chunk.choices[0].delta.content = final_response_message
285
+ return_chunk.choices[0].finish_reason = final_finish_reason
286
+
287
+ return postscore_df_chunk
288
+
289
+ def _run_postscore_guards_on_assembled_response(self, citations):
290
+ if len(self.assembled_response) == 0:
291
+ response_column_name = self.pipeline.get_input_column(GuardStage.RESPONSE)
292
+ blocked_completion_column_name = f"blocked_{response_column_name}"
293
+ return pd.DataFrame({blocked_completion_column_name: [False]})
294
+
295
+ predictions_df = self.input_df.copy(deep=True)
296
+ response_column_name = self.pipeline.get_input_column(GuardStage.RESPONSE)
297
+ predictions_df[response_column_name] = "".join(self.assembled_response)
298
+ predictions_df = add_citations_to_df(citations, predictions_df)
299
+ postscore_df_assembled, postscore_latency_assembled = run_postscore_guards(
300
+ self.pipeline, predictions_df
301
+ )
302
+ self.postscore_latency += postscore_latency_assembled
303
+
304
+ calculate_token_counts_and_confidence_score(self.pipeline, postscore_df_assembled)
305
+ postscore_df_assembled["datarobot_latency"] = self.latency_so_far + self.postscore_latency
306
+
307
+ postscore_df_assembled = remove_unnecessary_columns(self.pipeline, postscore_df_assembled)
308
+ return postscore_df_assembled
309
+
310
+ def _merge_metrics(self, metrics_df):
311
+ if self.aggregated_metrics_df is None:
312
+ self.aggregated_metrics_df = metrics_df.copy(deep=True)
313
+ return
314
+
315
+ response_column_name = self.pipeline.get_input_column(GuardStage.RESPONSE)
316
+ for guard in self.postscore_guards_applied_to_chunks:
317
+ if guard.type == GuardType.MODEL:
318
+ metric_name = guard.model_info.target_name
319
+ column_name = Guard.get_stage_str(GuardStage.RESPONSE) + "_" + metric_name
320
+ # Metric value for the current chunk will be used for reporting. If the
321
+ # prompt was blocked because metric was higher than threshold, that value
322
+ # should show up in the tracing table and metrics
323
+ self.aggregated_metrics_df[column_name] = metrics_df[column_name]
324
+ elif guard.type == GuardType.OOTB:
325
+ if guard.ootb_type == OOTBType.TOKEN_COUNT:
326
+ column_name = guard.get_metric_column_name(GuardStage.RESPONSE)
327
+ self.aggregated_metrics_df[column_name] += metrics_df[column_name]
328
+ else:
329
+ # Faithfulness, ROUGE-1 can't run on chunks so no merging
330
+ pass
331
+ elif guard.type == GuardType.NEMO_GUARDRAILS:
332
+ # No average score metric for NeMo
333
+ pass
334
+
335
+ if guard.has_latency_custom_metric():
336
+ latency_column_name = f"{guard.name}_latency"
337
+ # Each chunk incurs latency - so just sum it up.
338
+ self.aggregated_metrics_df[latency_column_name] += metrics_df[latency_column_name]
339
+
340
+ if guard.intervention:
341
+ enforced_column_name = self.pipeline.get_enforced_column_name(
342
+ guard, GuardStage.RESPONSE
343
+ )
344
+ # For enforcement column - its simply logical OR of the enforced value of previous
345
+ # chunks and current chunks
346
+ self.aggregated_metrics_df[enforced_column_name] += metrics_df[enforced_column_name]
347
+ action_column_name = f"action_{response_column_name}"
348
+ self.aggregated_metrics_df[action_column_name] = metrics_df[action_column_name]
349
+
350
+ for column_name in GuardAction.possible_column_names(response_column_name):
351
+ self.aggregated_metrics_df[column_name] += metrics_df[column_name]
352
+
353
+ def _aggregate_guard_latencies(self):
354
+ for guard in self.postscore_guards_applied_to_chunks:
355
+ if guard.has_latency_custom_metric():
356
+ # Aggregate latencies
357
+ latency_column_name = f"{guard.name}_latency"
358
+ self.pipeline.report_guard_latency(
359
+ guard, self.aggregated_metrics_df.loc[0, latency_column_name]
360
+ )
361
+
362
+ def _merge_assembled(self, postscore_df_chunk, postscore_df_assembled):
363
+ """
364
+ Merge metric values from the guards that cannot be run on chunk
365
+ :param postscore_df_assembled:
366
+ :return:
367
+ """
368
+ postscore_df = postscore_df_chunk.copy(deep=True)
369
+ for guard in self.pipeline.get_postscore_guards():
370
+ if not self._guard_can_work_on_chunk(guard):
371
+ metric_column_name = guard.get_metric_column_name(GuardStage.RESPONSE)
372
+ if metric_column_name in postscore_df_assembled.columns:
373
+ postscore_df[metric_column_name] = postscore_df_assembled[metric_column_name]
374
+ if guard.has_latency_custom_metric():
375
+ latency_column_name = f"{guard.name}_latency"
376
+ postscore_df[latency_column_name] = postscore_df_assembled[latency_column_name]
377
+ if guard.intervention:
378
+ enforced_column_name = self.pipeline.get_enforced_column_name(
379
+ guard, GuardStage.RESPONSE
380
+ )
381
+ postscore_df[enforced_column_name] = postscore_df_assembled[
382
+ enforced_column_name
383
+ ]
384
+
385
+ for key in ["datarobot_token_count", "datarobot_confidence_score", "datarobot_latency"]:
386
+ if key in postscore_df_assembled.columns:
387
+ postscore_df[key] = postscore_df_assembled[key]
388
+ return postscore_df
389
+
390
+ def _report_metrics(self):
391
+ self.pipeline.report_stage_latency(self.postscore_latency, GuardStage.RESPONSE)
392
+ result_df = self.aggregated_metrics_df.merge(
393
+ self.prescore_df, on=list(self.input_df.columns)
394
+ )
395
+ self.pipeline.report_custom_metrics(result_df)
@@ -0,0 +1,113 @@
1
+ Metadata-Version: 2.3
2
+ Name: datarobot-moderations
3
+ Version: 11.1.12
4
+ Summary: DataRobot Monitoring and Moderation framework
5
+ License: DataRobot Tool and Utility Agreement
6
+ Author: DataRobot
7
+ Author-email: support@datarobot.com
8
+ Requires-Python: >=3.11.5,<3.13
9
+ Classifier: License :: Other/Proprietary License
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.12
12
+ Requires-Dist: aiohttp (>=3.9.5)
13
+ Requires-Dist: asyncio (>=3.4.3)
14
+ Requires-Dist: backoff (>=2.2.1)
15
+ Requires-Dist: datarobot (>=3.6.0)
16
+ Requires-Dist: datarobot-predict (>=1.9.6)
17
+ Requires-Dist: deepeval (>=3.0.0)
18
+ Requires-Dist: langchain (>=0.1.12)
19
+ Requires-Dist: langchain-nvidia-ai-endpoints (>=0.3.9)
20
+ Requires-Dist: langchain-openai (>=0.1.7)
21
+ Requires-Dist: llama-index (>=0.12.9)
22
+ Requires-Dist: llama-index-embeddings-azure-openai (>=0.1.6)
23
+ Requires-Dist: llama-index-llms-bedrock-converse (>=0.1.6)
24
+ Requires-Dist: llama-index-llms-fireworks (>=0.1.5)
25
+ Requires-Dist: llama-index-llms-langchain (>=0.1.3)
26
+ Requires-Dist: llama-index-llms-vertex (>=0.1.5)
27
+ Requires-Dist: nemoguardrails (>=0.9.0)
28
+ Requires-Dist: nest-asyncio (>=1.6.0)
29
+ Requires-Dist: numpy (>=1.25.0)
30
+ Requires-Dist: openai (>=1.14.3)
31
+ Requires-Dist: pandas (>=2.0.3)
32
+ Requires-Dist: ragas (>=0.2.15)
33
+ Requires-Dist: rouge-score (>=0.1.2)
34
+ Requires-Dist: tiktoken (>=0.5.1)
35
+ Requires-Dist: trafaret (>=2.1.1)
36
+ Description-Content-Type: text/markdown
37
+
38
+ # DataRobot Moderations library
39
+
40
+ This library enforces the intervention in the prompt and response texts as per the
41
+ guard configuration set by the user.
42
+
43
+ The library accepts the guard configuration in the yaml format and the input prompts
44
+ and outputs the dataframe with the details like:
45
+ - should the prompt be blocked
46
+ - should the completion be blocked
47
+ - metric values obtained from the model guards
48
+ - is the prompt or response modifed as per the modifier guard configuration
49
+
50
+
51
+ ## Architecture
52
+
53
+ The library is architected in a way that it wraps around the typical LLM prediction method.
54
+ The library will first run the pre-score guards - the guards that will evaluate prompts and
55
+ enforce moderation if necessary. All the prompts that were not moderated by the library are
56
+ forwarded to the actual LLM to get their respective completions. The library then evaluates
57
+ these completions using post-score guards and enforces intervention on them.
58
+
59
+ ![](pics/img.png)
60
+
61
+ ## How to build it?
62
+
63
+ The repository uses `poetry` to manage the build process and a wheel can be built using:
64
+ ```
65
+ make clean
66
+ make
67
+ ```
68
+
69
+ ## How to use it?
70
+
71
+ A wheel file generated or downloaded can be installed with pip and will pull its
72
+ dependencies as well.
73
+ ```
74
+ pip3 install datarobot-moderations
75
+ ```
76
+
77
+ ### With [DRUM](https://github.com/datarobot/datarobot-user-models)
78
+ As described above the library nicely wraps DRUM's `score` method for pre and post score
79
+ guards. Hence, in case of DRUM, the user simply runs her custom model using `drum score`
80
+ and can avail the moderation library features.
81
+
82
+ ```
83
+ pip3 install datarobot-drum
84
+ drum score --verbose --logging-level info --code-dir ./ --input ./input.csv --target-type textgeneration --runtime-params-file values.yaml
85
+ ```
86
+ Please refer to the DRUM documentation on [how to define custom inference model](https://github.com/datarobot/datarobot-user-models?tab=readme-ov-file#custom-inference-models-reference-)
87
+ which will walk you through how to assemble custom inference model to how to [test it locally](https://github.com/datarobot/datarobot-user-models/blob/master/DEFINE-INFERENCE-MODEL.md#test_inference_model_drum) using `drum score` method
88
+
89
+ ### Standalone use
90
+
91
+ However, moderation library is not tightly coupled with DRUM and we are actively working
92
+ towards using this library in non-DRUM use case. [run.py](./run.py) is an example on how
93
+ to use this library in a stand alone way. This example uses Azure OpenAI service to get
94
+ LLM completions.
95
+
96
+ ```
97
+ export AZURE_OPENAI_API_KEY=<your-azure-openai-api-key>
98
+ python run.py --config ./moderation_config.yaml --input ./input.csv --azure-openai-api-base <azure-openai-base-url> --score
99
+ ```
100
+
101
+ This will output the response dataframe with bunch of information indicating which prompts
102
+ and responses were blocked / reported, why they are blocked / reported etc
103
+
104
+ [run.py](./run.py) also has an example on how to use this library to moderate the chat
105
+ interface. It also uses Azure OpenAI service to get chat completions:
106
+
107
+ ```
108
+ export AZURE_OPENAI_API_KEY=<your-azure-openai-api-key>
109
+ python run.py --config ./moderation_config.yaml --input ./input_chat.csv --azure-openai-api-base <azure-openai-base-url> --chat
110
+ ```
111
+
112
+ It will output the conversation with LLM line by line.
113
+
@@ -0,0 +1,23 @@
1
+ datarobot_dome/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
2
+ datarobot_dome/async_http_client.py,sha256=iJV6e0HZDyvIZjGkZmJEQx1NkbYRu-szNn-BixniXww,9654
3
+ datarobot_dome/chat_helper.py,sha256=BzvtUyZSZxzOqq-5a2wQKhHhr2kMlcP1MFrHaDAeD_o,9671
4
+ datarobot_dome/constants.py,sha256=fYBgkAQBRnwXyJIO1BBfb0Fe12Sg74gtOQ4WpQQwBXc,9059
5
+ datarobot_dome/drum_integration.py,sha256=LzcoRNWofLLCFktJ1tY7IZ3C0adMrlGM1WVqCOfuthM,40691
6
+ datarobot_dome/guard.py,sha256=afcJSSo509aHHvM6nm-QTKzQjuWE7VzgpihenDaAf3w,29921
7
+ datarobot_dome/guard_executor.py,sha256=CMa2IIP6Uhu2lN1DDvWhVky40BnE37AdKJkifE4vVNU,34063
8
+ datarobot_dome/guard_helpers.py,sha256=wPWFuCTGmD-dxFx8CcTG0UkahW2ouHrzuQykVlg5rzw,16435
9
+ datarobot_dome/guards/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
10
+ datarobot_dome/guards/guard_llm_mixin.py,sha256=ON-zuVL3xhQmXv0rFkalWrW_Q67Wwya2IQerHO8WkKU,10694
11
+ datarobot_dome/llm.py,sha256=L02OvTrflmD34-FrfXebfF-zzKTeuin7fpne1Cl5psg,5719
12
+ datarobot_dome/metrics/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
13
+ datarobot_dome/metrics/citation_metrics.py,sha256=q0hTMWuk6wy_jqk2UjFPON3kU94HN3W2vxr9giJ8O8E,3544
14
+ datarobot_dome/metrics/factory.py,sha256=7caa8paI9LuFXDgguXdC4on28V7IwwIsKJT2Z-Aps8A,2187
15
+ datarobot_dome/metrics/metric_scorer.py,sha256=mGxW3NNP93LpbpOiX3MeYyd0YEEjTPE8WVYMGS4SWoY,2516
16
+ datarobot_dome/pipeline/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
17
+ datarobot_dome/pipeline/llm_pipeline.py,sha256=fOp_OJnQMDUJH-LKv12kEqli-EqfHjAiSTFqtxzMkhM,19942
18
+ datarobot_dome/pipeline/pipeline.py,sha256=_pZ_4K2LMnfYCYj_ur9EwJzo3T-pbO6lFYz1O-_3uQ4,16491
19
+ datarobot_dome/pipeline/vdb_pipeline.py,sha256=WTOGn1qe_ZvEcdlvHgeXxl2xTqp7GjfL13c6S-FmAfM,5146
20
+ datarobot_dome/streaming.py,sha256=6nYvh6SoxPRLfO6GGdEoHsQuyLP9oX1lDMe8IeGo4lw,17801
21
+ datarobot_moderations-11.1.12.dist-info/METADATA,sha256=k6c3fCxxtxF6MUWboup1UNhwJbbzZk95dXe-v-4NkvA,4639
22
+ datarobot_moderations-11.1.12.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
23
+ datarobot_moderations-11.1.12.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: poetry-core 2.1.3
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any