datarobot-moderations 11.2.4__py3-none-any.whl → 11.2.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datarobot_dome/constants.py +1 -1
- datarobot_dome/drum_integration.py +27 -20
- datarobot_dome/pipeline/llm_pipeline.py +45 -5
- datarobot_dome/pipeline/pipeline.py +53 -22
- datarobot_dome/pipeline/vdb_pipeline.py +1 -1
- {datarobot_moderations-11.2.4.dist-info → datarobot_moderations-11.2.6.dist-info}/METADATA +2 -2
- {datarobot_moderations-11.2.4.dist-info → datarobot_moderations-11.2.6.dist-info}/RECORD +8 -8
- {datarobot_moderations-11.2.4.dist-info → datarobot_moderations-11.2.6.dist-info}/WHEEL +0 -0
datarobot_dome/constants.py
CHANGED
|
@@ -74,8 +74,8 @@ PROMPT_TOKEN_COUNT_COLUMN_NAME_FROM_USAGE = "prompt_token_count_from_usage"
|
|
|
74
74
|
RESPONSE_TOKEN_COUNT_COLUMN_NAME_FROM_USAGE = "response_token_count_from_usage"
|
|
75
75
|
|
|
76
76
|
SPAN_PREFIX = "datarobot.guard"
|
|
77
|
-
DATAROBOT_EXTRA_BODY_PREFIX = "datarobot_"
|
|
78
77
|
DATAROBOT_ASSOCIATION_ID_FIELD_NAME = "datarobot_association_id"
|
|
78
|
+
DATAROBOT_METRICS_DICT_FIELD_NAME = "datarobot_metrics"
|
|
79
79
|
|
|
80
80
|
|
|
81
81
|
class TargetType(str, Enum):
|
|
@@ -42,7 +42,7 @@ from datarobot_dome.constants import AGENTIC_PIPELINE_INTERACTIONS_ATTR
|
|
|
42
42
|
from datarobot_dome.constants import CHAT_COMPLETION_OBJECT
|
|
43
43
|
from datarobot_dome.constants import CITATIONS_ATTR
|
|
44
44
|
from datarobot_dome.constants import DATAROBOT_ASSOCIATION_ID_FIELD_NAME
|
|
45
|
-
from datarobot_dome.constants import
|
|
45
|
+
from datarobot_dome.constants import DATAROBOT_METRICS_DICT_FIELD_NAME
|
|
46
46
|
from datarobot_dome.constants import DATAROBOT_MODERATIONS_ATTR
|
|
47
47
|
from datarobot_dome.constants import DISABLE_MODERATION_RUNTIME_PARAM_NAME
|
|
48
48
|
from datarobot_dome.constants import LLM_BLUEPRINT_ID_ATTR
|
|
@@ -721,23 +721,24 @@ def report_otel_evaluation_set_metric(pipeline, result_df):
|
|
|
721
721
|
|
|
722
722
|
def filter_extra_body(
|
|
723
723
|
completion_create_params: CompletionCreateParams,
|
|
724
|
-
) -> tuple[CompletionCreateParams,
|
|
724
|
+
) -> tuple[CompletionCreateParams, dict]:
|
|
725
725
|
"""
|
|
726
726
|
completion_create_params is a typed dict of a few standard fields,
|
|
727
727
|
and arbitrary fields from extra_body.
|
|
728
|
-
|
|
729
|
-
|
|
728
|
+
If "datarobot_metrics" is in extra_body, process it here.
|
|
729
|
+
Save its value only if it is a dict as expected.
|
|
730
730
|
:param completion_create_params: the chat completion params from OpenAI client via DRUM
|
|
731
|
-
:return: filtered completion_create_params
|
|
731
|
+
:return: filtered completion_create_params; dict of {name: value} for "datarobot_" fields
|
|
732
732
|
"""
|
|
733
|
-
datarobot_extra_body_params =
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
]
|
|
737
|
-
for name in our_param_names:
|
|
733
|
+
datarobot_extra_body_params = {}
|
|
734
|
+
name = DATAROBOT_METRICS_DICT_FIELD_NAME
|
|
735
|
+
if name in completion_create_params:
|
|
738
736
|
value = completion_create_params[name]
|
|
739
|
-
|
|
740
|
-
|
|
737
|
+
_logger.debug("found DataRobot metrics in extra_body: %s", f"{name}={value}")
|
|
738
|
+
if isinstance(value, dict):
|
|
739
|
+
datarobot_extra_body_params = copy.deepcopy(value)
|
|
740
|
+
else:
|
|
741
|
+
_logger.warning("DataRobot metrics in extra_body is not a dict: %s", f"{name}={value}")
|
|
741
742
|
completion_create_params.pop(name, None)
|
|
742
743
|
return completion_create_params, datarobot_extra_body_params
|
|
743
744
|
|
|
@@ -770,9 +771,11 @@ def guard_chat_wrapper(
|
|
|
770
771
|
# if association ID was included in extra_body, extract field name and value
|
|
771
772
|
completion_create_params, eb_assoc_id_value = filter_association_id(completion_create_params)
|
|
772
773
|
|
|
773
|
-
#
|
|
774
|
-
|
|
774
|
+
# extract any fields mentioned in "datarobot_metrics" to send as custom metrics later
|
|
775
|
+
completion_create_params, chat_extra_body_params = filter_extra_body(completion_create_params)
|
|
775
776
|
|
|
777
|
+
# define all pipeline-based and guard-based custom metrics (but not those from extra_body)
|
|
778
|
+
# note: this is usually partially done at pipeline init; see delayed_custom_metric_creation
|
|
776
779
|
pipeline.get_new_metrics_payload()
|
|
777
780
|
|
|
778
781
|
# the chat request is not a dataframe, but we'll build a DF internally for moderation.
|
|
@@ -795,6 +798,10 @@ def guard_chat_wrapper(
|
|
|
795
798
|
if association_id:
|
|
796
799
|
data[association_id_column_name] = [association_id]
|
|
797
800
|
|
|
801
|
+
# report any metrics from extra_body. They are not tied to a prompt or response phase.
|
|
802
|
+
_logger.debug("Report extra_body params as custom metrics")
|
|
803
|
+
pipeline.report_custom_metrics_from_extra_body(association_id, chat_extra_body_params)
|
|
804
|
+
|
|
798
805
|
# ==================================================================
|
|
799
806
|
# Step 1: Prescore Guards processing
|
|
800
807
|
#
|
|
@@ -804,11 +811,6 @@ def guard_chat_wrapper(
|
|
|
804
811
|
_logger.debug(filtered_df)
|
|
805
812
|
_logger.debug(f"Pre Score Guard Latency: {prescore_latency} sec")
|
|
806
813
|
|
|
807
|
-
# todo future: add extra_body parameters to custom metrics reporting
|
|
808
|
-
# _logger.debug("Add extra_body params as custom metrics")
|
|
809
|
-
# for param in chat_extra_body_params:
|
|
810
|
-
# _logger.debug(f"Future: add extra_body param: {param}")
|
|
811
|
-
|
|
812
814
|
blocked_prompt_column_name = f"blocked_{prompt_column_name}"
|
|
813
815
|
if prescore_df.loc[0, blocked_prompt_column_name]:
|
|
814
816
|
pipeline.report_custom_metrics(prescore_df)
|
|
@@ -974,6 +976,8 @@ class ModerationPipeline:
|
|
|
974
976
|
Base class to simplify interactions with DRUM.
|
|
975
977
|
This class is not used outside of testing;
|
|
976
978
|
moderation_pipeline_factory() will select the LLM or VDB subclass instead.
|
|
979
|
+
Also: Pipeline and ModerationPipeline are separate classes (not in samm hierarchy)
|
|
980
|
+
However, LlmModerationPipeline includes LLMPipeline by composition.
|
|
977
981
|
"""
|
|
978
982
|
|
|
979
983
|
def score(self, input_df: pd.DataFrame, model, drum_score_fn, **kwargs):
|
|
@@ -1010,7 +1014,10 @@ class LlmModerationPipeline(ModerationPipeline):
|
|
|
1010
1014
|
association_id=None,
|
|
1011
1015
|
**kwargs,
|
|
1012
1016
|
):
|
|
1013
|
-
"""
|
|
1017
|
+
"""
|
|
1018
|
+
Calls the standard guard chat function.
|
|
1019
|
+
See PythonModelAdapter.chat() in DRUM, which calls chat() here.
|
|
1020
|
+
"""
|
|
1014
1021
|
return guard_chat_wrapper(
|
|
1015
1022
|
completion_create_params,
|
|
1016
1023
|
model,
|
|
@@ -197,17 +197,17 @@ class LLMPipeline(Pipeline):
|
|
|
197
197
|
|
|
198
198
|
if guard.has_average_score_custom_metric():
|
|
199
199
|
metric_def = self._get_average_score_metric_definition(guard)
|
|
200
|
-
self.
|
|
200
|
+
self.add_custom_metric_definition(metric_def, True)
|
|
201
201
|
|
|
202
202
|
if guard.has_latency_custom_metric():
|
|
203
203
|
metric_def = guard.get_latency_custom_metric()
|
|
204
|
-
self.
|
|
204
|
+
self.add_custom_metric_definition(metric_def, False)
|
|
205
205
|
|
|
206
206
|
if intervention_action:
|
|
207
207
|
# Enforced metric for all kinds of guards, as long as they have intervention
|
|
208
208
|
# action defined - even for token count
|
|
209
209
|
metric_def = guard.get_enforced_custom_metric(guard_stage, intervention_action)
|
|
210
|
-
self.
|
|
210
|
+
self.add_custom_metric_definition(metric_def, True)
|
|
211
211
|
|
|
212
212
|
def _add_default_custom_metrics(self):
|
|
213
213
|
"""Default custom metrics"""
|
|
@@ -219,14 +219,14 @@ class LLMPipeline(Pipeline):
|
|
|
219
219
|
postscore_guard_latency_custom_metric,
|
|
220
220
|
score_latency,
|
|
221
221
|
]:
|
|
222
|
-
self.
|
|
222
|
+
self.add_custom_metric_definition(metric_def, False)
|
|
223
223
|
|
|
224
224
|
# These metrics report with an association-id
|
|
225
225
|
for metric_def in [
|
|
226
226
|
get_blocked_custom_metric(GuardStage.PROMPT),
|
|
227
227
|
get_blocked_custom_metric(GuardStage.RESPONSE),
|
|
228
228
|
]:
|
|
229
|
-
self.
|
|
229
|
+
self.add_custom_metric_definition(metric_def, True)
|
|
230
230
|
|
|
231
231
|
def _add_guard_to_pipeline(self, guard):
|
|
232
232
|
if guard.stage == GuardStage.PROMPT:
|
|
@@ -380,6 +380,46 @@ class LLMPipeline(Pipeline):
|
|
|
380
380
|
buckets = self._add_guard_specific_custom_metrics(row, self.get_postscore_guards())
|
|
381
381
|
payload["buckets"].extend(buckets)
|
|
382
382
|
|
|
383
|
+
def report_custom_metrics_from_extra_body(
|
|
384
|
+
self, association_id: str, extra_params: dict
|
|
385
|
+
) -> None:
|
|
386
|
+
"""
|
|
387
|
+
Add any key-value pairs extracted from extra_body as custom metrics.
|
|
388
|
+
:param association_id: Association ID of the chat request
|
|
389
|
+
:param extra_params: a dict of {"name": value} for all extra_body parameters found
|
|
390
|
+
"""
|
|
391
|
+
# If no association ID is defined for deployment, custom metrics will not be processed
|
|
392
|
+
if self._association_id_column_name is None:
|
|
393
|
+
return
|
|
394
|
+
if not extra_params:
|
|
395
|
+
return # nothing to send
|
|
396
|
+
payload = {"buckets": []}
|
|
397
|
+
for name, value in extra_params.items():
|
|
398
|
+
if name in self.custom_metric_map:
|
|
399
|
+
# In case of name collision:
|
|
400
|
+
# the extra_body metric will _not_ override the other moderation metric
|
|
401
|
+
self._logger.warning(
|
|
402
|
+
"extra_body custom metric name is already in use in moderation; "
|
|
403
|
+
f"will not be sent: {name}"
|
|
404
|
+
)
|
|
405
|
+
continue
|
|
406
|
+
if name not in self.custom_metric_names_to_ids:
|
|
407
|
+
self._logger.warning(f"extra_body custom metric ID not in map: {name}")
|
|
408
|
+
continue
|
|
409
|
+
metric_id = self.custom_metric_names_to_ids.get(name)
|
|
410
|
+
if not metric_id:
|
|
411
|
+
# this should not be possible, as the name/id information
|
|
412
|
+
# is taken directly from DataRobot API
|
|
413
|
+
self._logger.warning(f"extra_body custom metric has missing ID: {name}")
|
|
414
|
+
continue
|
|
415
|
+
payload["buckets"].append(
|
|
416
|
+
self.custom_metric_individual_payload(
|
|
417
|
+
metric_id=metric_id, value=value, association_id=association_id
|
|
418
|
+
)
|
|
419
|
+
)
|
|
420
|
+
self._logger.debug(f"Sending custom metrics payload from extra_body: {payload}")
|
|
421
|
+
self.upload_custom_metrics(payload)
|
|
422
|
+
|
|
383
423
|
def report_custom_metrics(self, result_df):
|
|
384
424
|
if self.delayed_custom_metric_creation:
|
|
385
425
|
# Flag is not set yet, so no point reporting custom metrics
|
|
@@ -41,7 +41,6 @@ class Pipeline:
|
|
|
41
41
|
|
|
42
42
|
def __init__(self, async_http_timeout_sec=DEFAULT_GUARD_PREDICTION_TIMEOUT_IN_SEC):
|
|
43
43
|
self._logger = logging.getLogger(LOGGER_NAME_PREFIX + "." + self.__class__.__name__)
|
|
44
|
-
self.custom_metric = {}
|
|
45
44
|
self._deployment = None
|
|
46
45
|
self._association_id_column_name = None
|
|
47
46
|
self._datarobot_url = None
|
|
@@ -54,6 +53,7 @@ class Pipeline:
|
|
|
54
53
|
self._custom_metrics_bulk_upload_url = None
|
|
55
54
|
self.aggregate_custom_metric = None
|
|
56
55
|
self.custom_metric_map = dict()
|
|
56
|
+
self.custom_metric_names_to_ids = dict()
|
|
57
57
|
self.delayed_custom_metric_creation = False
|
|
58
58
|
self.upload_custom_metrics_tasks = set()
|
|
59
59
|
|
|
@@ -77,6 +77,13 @@ class Pipeline:
|
|
|
77
77
|
self._logger.warning(f"Missing DataRobot API Token, {self.common_message}")
|
|
78
78
|
return
|
|
79
79
|
|
|
80
|
+
self._deployment_id = os.environ.get("MLOPS_DEPLOYMENT_ID", None)
|
|
81
|
+
if self._deployment_id is None:
|
|
82
|
+
self._logger.warning(
|
|
83
|
+
f"DataRobot deployment id not exported (MLOPS_DEPLOYMENT_ID), {self.common_message}"
|
|
84
|
+
)
|
|
85
|
+
return
|
|
86
|
+
|
|
80
87
|
# This is regular / default DataRobot Client
|
|
81
88
|
self.dr_client = dr.Client(endpoint=self._datarobot_url, token=self._datarobot_api_token)
|
|
82
89
|
self._headers = {
|
|
@@ -91,7 +98,6 @@ class Pipeline:
|
|
|
91
98
|
moderations for
|
|
92
99
|
:return:
|
|
93
100
|
"""
|
|
94
|
-
self._deployment_id = os.environ.get("MLOPS_DEPLOYMENT_ID", None)
|
|
95
101
|
if self._deployment_id is None:
|
|
96
102
|
self._logger.warning(f'Custom Model workshop "test" mode?, {self.common_message}')
|
|
97
103
|
return
|
|
@@ -172,13 +178,17 @@ class Pipeline:
|
|
|
172
178
|
self.create_custom_metrics()
|
|
173
179
|
self.delayed_custom_metric_creation = False
|
|
174
180
|
|
|
175
|
-
def
|
|
181
|
+
def add_custom_metric_definition(
|
|
176
182
|
self, metric_definition: dict[str, Any], requires_association_id: bool, **kwargs
|
|
177
183
|
) -> None:
|
|
178
184
|
"""
|
|
179
185
|
Adds an entry to the `custom_metric_map`.
|
|
186
|
+
Only 2 functions should write to this map:
|
|
187
|
+
* this function -- links the custom metric definition to its name
|
|
188
|
+
* create_custom_metrics() -- queries DR for the object ID and links it to the name
|
|
180
189
|
|
|
181
190
|
NOTE: the kwargs allow implementations to add their own specialized values.
|
|
191
|
+
Currently only VDBPipeline calls this with kwargs.
|
|
182
192
|
"""
|
|
183
193
|
name = metric_definition["name"]
|
|
184
194
|
self.custom_metric_map[name] = {
|
|
@@ -187,12 +197,41 @@ class Pipeline:
|
|
|
187
197
|
**kwargs,
|
|
188
198
|
}
|
|
189
199
|
|
|
200
|
+
def lookup_custom_metric_ids(self):
|
|
201
|
+
"""
|
|
202
|
+
The deployment's list of custom metrics is known when the pipeline is created.
|
|
203
|
+
The complete set of guard metrics is also known at that time.
|
|
204
|
+
However, the extra_body metrics needed are not known until guard_chat_wrapper
|
|
205
|
+
parses extra_body.
|
|
206
|
+
For that reason, read and cache all the metric name/id pairs at pipeline create time.
|
|
207
|
+
Side effect: updates self.custom_metric_names_to_ids
|
|
208
|
+
"""
|
|
209
|
+
if self.custom_metric_names_to_ids:
|
|
210
|
+
self._logger.debug("lookup_custom_metric_ids(): IDs were already read; skipping")
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
# Manually paginate; the dmm list_custom_metrics does not implement pagination
|
|
214
|
+
custom_metrics_list = []
|
|
215
|
+
offset, limit = 0, 50
|
|
216
|
+
while True:
|
|
217
|
+
response_list = self.dr_client.get(
|
|
218
|
+
f"deployments/{self._deployment_id}/customMetrics/?offset={offset}&limit={limit}"
|
|
219
|
+
).json()
|
|
220
|
+
custom_metrics_list.extend(response_list["data"])
|
|
221
|
+
offset += response_list["count"]
|
|
222
|
+
if response_list["next"] is None:
|
|
223
|
+
break
|
|
224
|
+
|
|
225
|
+
self.custom_metric_names_to_ids = {m["name"]: m["id"] for m in custom_metrics_list}
|
|
226
|
+
|
|
190
227
|
def create_custom_metrics(self):
|
|
191
228
|
"""
|
|
192
229
|
Creates all the custom-metrics in the DR app for an active deployment.
|
|
193
230
|
|
|
194
231
|
Updates the `custom_metric_map` with id's to insure the appropriate data
|
|
195
232
|
is put in place for reporting.
|
|
233
|
+
|
|
234
|
+
Every custom metric we want to use must already exist by name in the map.
|
|
196
235
|
"""
|
|
197
236
|
cleanup_metrics_list = list()
|
|
198
237
|
for index, (metric_name, custom_metric) in enumerate(self.custom_metric_map.items()):
|
|
@@ -255,26 +294,18 @@ class Pipeline:
|
|
|
255
294
|
# Now query all the metrics and get their custom metric ids. Specifically,
|
|
256
295
|
# required in case a metric is duplicated, in which case, we don't have its
|
|
257
296
|
# id in the loop above
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
#
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
if response_list["next"] is None:
|
|
270
|
-
break
|
|
271
|
-
|
|
272
|
-
for metric in custom_metrics_list:
|
|
273
|
-
metric_name = metric["name"]
|
|
274
|
-
if metric_name not in self.custom_metric_map:
|
|
275
|
-
self._logger.error(f"Metric '{metric_name}' exists at DR but not in moderation")
|
|
297
|
+
self.lookup_custom_metric_ids()
|
|
298
|
+
|
|
299
|
+
# assign IDs to the "metric by name" maps so we can upload by ID later
|
|
300
|
+
for metric_name, metric_id in self.custom_metric_names_to_ids.items():
|
|
301
|
+
if metric_name in self.custom_metric_map:
|
|
302
|
+
self.custom_metric_map[metric_name]["id"] = metric_id
|
|
303
|
+
else:
|
|
304
|
+
self._logger.warning(
|
|
305
|
+
f"Metric '{metric_name}' exists at DR but not in moderation; "
|
|
306
|
+
"no moderation value will be reported for it"
|
|
307
|
+
)
|
|
276
308
|
continue
|
|
277
|
-
self.custom_metric_map[metric_name]["id"] = metric["id"]
|
|
278
309
|
|
|
279
310
|
# These are the metrics we couldn't create - so, don't track them
|
|
280
311
|
for metric_name in cleanup_metrics_list:
|
|
@@ -70,7 +70,7 @@ class VDBPipeline(Pipeline):
|
|
|
70
70
|
|
|
71
71
|
# Metric list so far does not need association id for reporting
|
|
72
72
|
for metric_def, per_row, score_type in metric_list:
|
|
73
|
-
self.
|
|
73
|
+
self.add_custom_metric_definition(metric_def, per_row, scorer_type=score_type)
|
|
74
74
|
|
|
75
75
|
def create_scorers(self):
|
|
76
76
|
"""
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: datarobot-moderations
|
|
3
|
-
Version: 11.2.
|
|
3
|
+
Version: 11.2.6
|
|
4
4
|
Summary: DataRobot Monitoring and Moderation framework
|
|
5
5
|
License: DataRobot Tool and Utility Agreement
|
|
6
6
|
Author: DataRobot
|
|
@@ -19,7 +19,7 @@ Requires-Dist: deepeval (>=3.3.5)
|
|
|
19
19
|
Requires-Dist: langchain (>=0.1.12)
|
|
20
20
|
Requires-Dist: langchain-nvidia-ai-endpoints (>=0.3.9)
|
|
21
21
|
Requires-Dist: langchain-openai (>=0.1.7)
|
|
22
|
-
Requires-Dist: llama-index (>=0.
|
|
22
|
+
Requires-Dist: llama-index (>=0.13.0)
|
|
23
23
|
Requires-Dist: llama-index-embeddings-azure-openai (>=0.1.6)
|
|
24
24
|
Requires-Dist: llama-index-llms-bedrock-converse (>=0.1.6)
|
|
25
25
|
Requires-Dist: llama-index-llms-langchain (>=0.1.3)
|
|
@@ -1,8 +1,8 @@
|
|
|
1
1
|
datarobot_dome/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
|
|
2
2
|
datarobot_dome/async_http_client.py,sha256=cQFoSI2ovt0Kyk4XWQPXod5PAfA-ZPkjLYVWQZhDGDE,9809
|
|
3
3
|
datarobot_dome/chat_helper.py,sha256=BzvtUyZSZxzOqq-5a2wQKhHhr2kMlcP1MFrHaDAeD_o,9671
|
|
4
|
-
datarobot_dome/constants.py,sha256=
|
|
5
|
-
datarobot_dome/drum_integration.py,sha256=
|
|
4
|
+
datarobot_dome/constants.py,sha256=jvgpHa3Wh_nZVZmfU-6ab8FHnKNW3KxOPYIIEb_oS6U,10662
|
|
5
|
+
datarobot_dome/drum_integration.py,sha256=NWTYJWGPM_z8RQYXteEOWQkCVBXzJ5XRZEWJYeUdHf8,45656
|
|
6
6
|
datarobot_dome/guard.py,sha256=xJds9hcbUaS-KD5nC1mn0GiPdBrileFUu6BuTAjDNuY,34668
|
|
7
7
|
datarobot_dome/guard_executor.py,sha256=ox5_jOHcqMaxaaagIYJJHhCwEI7Wg-rUEiu5rutsfVU,35363
|
|
8
8
|
datarobot_dome/guard_helpers.py,sha256=jfu8JTWCcxu4WD1MKxeP1n53DeebY3SSuP-t5sWyV1U,17187
|
|
@@ -14,11 +14,11 @@ datarobot_dome/metrics/citation_metrics.py,sha256=l2mnV1gz7nQeJ_yfaS4dcP3DFWf0p5
|
|
|
14
14
|
datarobot_dome/metrics/factory.py,sha256=7caa8paI9LuFXDgguXdC4on28V7IwwIsKJT2Z-Aps8A,2187
|
|
15
15
|
datarobot_dome/metrics/metric_scorer.py,sha256=uJ_IJRw7ZFHueg8xjsaXbt0ypO7JiydZ0WapCp96yng,2540
|
|
16
16
|
datarobot_dome/pipeline/__init__.py,sha256=B5Rx8_CNCNsOpxBbRj27XOXCfRZmvmrAR-NzlzIKnDw,583
|
|
17
|
-
datarobot_dome/pipeline/llm_pipeline.py,sha256=
|
|
18
|
-
datarobot_dome/pipeline/pipeline.py,sha256=
|
|
19
|
-
datarobot_dome/pipeline/vdb_pipeline.py,sha256=
|
|
17
|
+
datarobot_dome/pipeline/llm_pipeline.py,sha256=PRJ7t5Bc8S2ZbD0yK8ztdTD1hPC7Yo8IvhFlWEkMkmU,20810
|
|
18
|
+
datarobot_dome/pipeline/pipeline.py,sha256=m8m_QtoQNwJ5U-bbJGdtlmAbdI-UpobcMCuRz-U_H_Y,19052
|
|
19
|
+
datarobot_dome/pipeline/vdb_pipeline.py,sha256=zt5d_41oJjdT8qOtvpgz-l5uvImwKE9f6pQsAU_TdR4,9866
|
|
20
20
|
datarobot_dome/runtime.py,sha256=FD8wXOweqoQVzbZMh-mucL66xT2kGxPsJUGAcJBgwxw,1468
|
|
21
21
|
datarobot_dome/streaming.py,sha256=DkvKEH0yN0aPEWMTAjMFJB3Kx4iLGdjUMQU1pAplbeg,17751
|
|
22
|
-
datarobot_moderations-11.2.
|
|
23
|
-
datarobot_moderations-11.2.
|
|
24
|
-
datarobot_moderations-11.2.
|
|
22
|
+
datarobot_moderations-11.2.6.dist-info/METADATA,sha256=U20ej2pA1_79GwOl4f7qShgB9Q4F_zb-phsOBTITOF0,4741
|
|
23
|
+
datarobot_moderations-11.2.6.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
|
24
|
+
datarobot_moderations-11.2.6.dist-info/RECORD,,
|
|
File without changes
|