datarobot-genai 0.1.75__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- datarobot_genai/core/agents/base.py +2 -1
- datarobot_genai/core/chat/responses.py +32 -4
- datarobot_genai/drmcp/core/dr_mcp_server.py +34 -0
- datarobot_genai/drmcp/core/dynamic_prompts/dr_lib.py +22 -80
- datarobot_genai/drmcp/core/dynamic_prompts/register.py +4 -5
- datarobot_genai/drmcp/core/mcp_instance.py +41 -2
- datarobot_genai/drmcp/core/routes.py +4 -1
- datarobot_genai/drmcp/test_utils/mcp_utils_ete.py +29 -0
- datarobot_genai/langgraph/agent.py +143 -42
- {datarobot_genai-0.1.75.dist-info → datarobot_genai-0.2.3.dist-info}/METADATA +4 -2
- {datarobot_genai-0.1.75.dist-info → datarobot_genai-0.2.3.dist-info}/RECORD +15 -15
- {datarobot_genai-0.1.75.dist-info → datarobot_genai-0.2.3.dist-info}/WHEEL +0 -0
- {datarobot_genai-0.1.75.dist-info → datarobot_genai-0.2.3.dist-info}/entry_points.txt +0 -0
- {datarobot_genai-0.1.75.dist-info → datarobot_genai-0.2.3.dist-info}/licenses/AUTHORS +0 -0
- {datarobot_genai-0.1.75.dist-info → datarobot_genai-0.2.3.dist-info}/licenses/LICENSE +0 -0
|
@@ -23,6 +23,7 @@ from typing import TypedDict
|
|
|
23
23
|
from typing import TypeVar
|
|
24
24
|
from typing import cast
|
|
25
25
|
|
|
26
|
+
from ag_ui.core import Event
|
|
26
27
|
from openai.types.chat import CompletionCreateParams
|
|
27
28
|
from ragas import MultiTurnSample
|
|
28
29
|
|
|
@@ -167,7 +168,7 @@ class UsageMetrics(TypedDict):
|
|
|
167
168
|
|
|
168
169
|
# Canonical return type for DRUM-compatible invoke implementations
|
|
169
170
|
InvokeReturn = (
|
|
170
|
-
AsyncGenerator[tuple[str, MultiTurnSample | None, UsageMetrics], None]
|
|
171
|
+
AsyncGenerator[tuple[str | Event, MultiTurnSample | None, UsageMetrics], None]
|
|
171
172
|
| tuple[str, MultiTurnSample | None, UsageMetrics]
|
|
172
173
|
)
|
|
173
174
|
|
|
@@ -27,6 +27,10 @@ from concurrent.futures import ThreadPoolExecutor
|
|
|
27
27
|
from typing import Any
|
|
28
28
|
from typing import TypeVar
|
|
29
29
|
|
|
30
|
+
from ag_ui.core import BaseEvent
|
|
31
|
+
from ag_ui.core import Event
|
|
32
|
+
from ag_ui.core import TextMessageChunkEvent
|
|
33
|
+
from ag_ui.core import TextMessageContentEvent
|
|
30
34
|
from openai.types import CompletionUsage
|
|
31
35
|
from openai.types.chat import ChatCompletion
|
|
32
36
|
from openai.types.chat import ChatCompletionChunk
|
|
@@ -45,6 +49,7 @@ class CustomModelChatResponse(ChatCompletion):
|
|
|
45
49
|
|
|
46
50
|
class CustomModelStreamingResponse(ChatCompletionChunk):
|
|
47
51
|
pipeline_interactions: str | None = None
|
|
52
|
+
event: Event | None = None
|
|
48
53
|
|
|
49
54
|
|
|
50
55
|
def to_custom_model_chat_response(
|
|
@@ -88,7 +93,7 @@ def to_custom_model_streaming_response(
|
|
|
88
93
|
thread_pool_executor: ThreadPoolExecutor,
|
|
89
94
|
event_loop: AbstractEventLoop,
|
|
90
95
|
streaming_response_generator: AsyncGenerator[
|
|
91
|
-
tuple[str, MultiTurnSample | None, dict[str, int]], None
|
|
96
|
+
tuple[str | Event, MultiTurnSample | None, dict[str, int]], None
|
|
92
97
|
],
|
|
93
98
|
model: str | object | None,
|
|
94
99
|
) -> Iterator[CustomModelStreamingResponse]:
|
|
@@ -110,7 +115,7 @@ def to_custom_model_streaming_response(
|
|
|
110
115
|
while True:
|
|
111
116
|
try:
|
|
112
117
|
(
|
|
113
|
-
|
|
118
|
+
response_text_or_event,
|
|
114
119
|
pipeline_interactions,
|
|
115
120
|
usage_metrics,
|
|
116
121
|
) = thread_pool_executor.submit(
|
|
@@ -119,10 +124,10 @@ def to_custom_model_streaming_response(
|
|
|
119
124
|
last_pipeline_interactions = pipeline_interactions
|
|
120
125
|
last_usage_metrics = usage_metrics
|
|
121
126
|
|
|
122
|
-
if
|
|
127
|
+
if isinstance(response_text_or_event, str) and response_text_or_event:
|
|
123
128
|
choice = ChunkChoice(
|
|
124
129
|
index=0,
|
|
125
|
-
delta=ChoiceDelta(role="assistant", content=
|
|
130
|
+
delta=ChoiceDelta(role="assistant", content=response_text_or_event),
|
|
126
131
|
finish_reason=None,
|
|
127
132
|
)
|
|
128
133
|
yield CustomModelStreamingResponse(
|
|
@@ -135,6 +140,29 @@ def to_custom_model_streaming_response(
|
|
|
135
140
|
if usage_metrics
|
|
136
141
|
else None,
|
|
137
142
|
)
|
|
143
|
+
elif isinstance(response_text_or_event, BaseEvent):
|
|
144
|
+
content = ""
|
|
145
|
+
if isinstance(
|
|
146
|
+
response_text_or_event, (TextMessageContentEvent, TextMessageChunkEvent)
|
|
147
|
+
):
|
|
148
|
+
content = response_text_or_event.delta or content
|
|
149
|
+
choice = ChunkChoice(
|
|
150
|
+
index=0,
|
|
151
|
+
delta=ChoiceDelta(role="assistant", content=content),
|
|
152
|
+
finish_reason=None,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
yield CustomModelStreamingResponse(
|
|
156
|
+
id=completion_id,
|
|
157
|
+
object="chat.completion.chunk",
|
|
158
|
+
created=created,
|
|
159
|
+
model=model,
|
|
160
|
+
choices=[choice],
|
|
161
|
+
usage=CompletionUsage.model_validate(required_usage_metrics | usage_metrics)
|
|
162
|
+
if usage_metrics
|
|
163
|
+
else None,
|
|
164
|
+
event=response_text_or_event,
|
|
165
|
+
)
|
|
138
166
|
except StopAsyncIteration:
|
|
139
167
|
break
|
|
140
168
|
event_loop.run_until_complete(streaming_response_generator.aclose())
|
|
@@ -115,6 +115,9 @@ class DataRobotMCPServer:
|
|
|
115
115
|
self._mcp = mcp
|
|
116
116
|
self._mcp_transport = transport
|
|
117
117
|
|
|
118
|
+
# Configure MCP server capabilities
|
|
119
|
+
self._configure_mcp_capabilities()
|
|
120
|
+
|
|
118
121
|
# Initialize telemetry
|
|
119
122
|
initialize_telemetry(mcp)
|
|
120
123
|
|
|
@@ -163,6 +166,37 @@ class DataRobotMCPServer:
|
|
|
163
166
|
if transport == "streamable-http":
|
|
164
167
|
register_routes(self._mcp)
|
|
165
168
|
|
|
169
|
+
def _configure_mcp_capabilities(self) -> None:
|
|
170
|
+
"""Configure MCP capabilities that FastMCP doesn't expose directly.
|
|
171
|
+
|
|
172
|
+
See: https://github.com/modelcontextprotocol/python-sdk/issues/1126
|
|
173
|
+
"""
|
|
174
|
+
server = self._mcp._mcp_server
|
|
175
|
+
|
|
176
|
+
# Declare prompts_changed capability (capabilities.prompts.listChanged: true)
|
|
177
|
+
server.notification_options.prompts_changed = True
|
|
178
|
+
|
|
179
|
+
# Declare experimental capabilities ( experimental.dynamic_prompts: true)
|
|
180
|
+
server.experimental_capabilities = {"dynamic_prompts": {"enabled": True}}
|
|
181
|
+
|
|
182
|
+
# Patch to include experimental_capabilities (FastMCP doesn't expose this)
|
|
183
|
+
original = server.create_initialization_options
|
|
184
|
+
|
|
185
|
+
def patched(
|
|
186
|
+
notification_options: Any = None,
|
|
187
|
+
experimental_capabilities: dict[str, dict[str, Any]] | None = None,
|
|
188
|
+
**kwargs: Any,
|
|
189
|
+
) -> Any:
|
|
190
|
+
if experimental_capabilities is None:
|
|
191
|
+
experimental_capabilities = getattr(server, "experimental_capabilities", None)
|
|
192
|
+
return original(
|
|
193
|
+
notification_options=notification_options,
|
|
194
|
+
experimental_capabilities=experimental_capabilities,
|
|
195
|
+
**kwargs,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
server.create_initialization_options = patched
|
|
199
|
+
|
|
166
200
|
def run(self, show_banner: bool = False) -> None:
|
|
167
201
|
"""Run the DataRobot MCP server synchronously."""
|
|
168
202
|
try:
|
|
@@ -12,78 +12,23 @@
|
|
|
12
12
|
# See the License for the specific language governing permissions and
|
|
13
13
|
# limitations under the License.
|
|
14
14
|
from collections import defaultdict
|
|
15
|
-
from dataclasses import dataclass
|
|
16
15
|
|
|
17
16
|
import datarobot as dr
|
|
18
17
|
|
|
19
18
|
from datarobot_genai.drmcp.core.clients import get_api_client
|
|
20
19
|
|
|
21
|
-
# Needed SDK version (3.10.0) is not published yet. We'll reimplement simplified version of it.
|
|
22
|
-
# get_datarobot_prompt_templates = dr.genai.PromptTemplate.list()
|
|
23
|
-
# DrPrompt = dr.genai.PromptTemplate
|
|
24
|
-
# DrPromptVersion = dr.genai.PromptTemplateVersion
|
|
25
|
-
# DrVariable = dr.genai.Variable
|
|
26
20
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
@dataclass
|
|
35
|
-
class DrPromptVersion:
|
|
36
|
-
id: str
|
|
37
|
-
prompt_template_id: str
|
|
38
|
-
version: int
|
|
39
|
-
prompt_text: str
|
|
40
|
-
variables: list[DrVariable]
|
|
41
|
-
|
|
42
|
-
@classmethod
|
|
43
|
-
def from_dict(cls, d: dict) -> "DrPromptVersion":
|
|
44
|
-
variables = [
|
|
45
|
-
DrVariable(name=v["name"], description=v["description"]) for v in d["variables"]
|
|
46
|
-
]
|
|
47
|
-
return cls(
|
|
48
|
-
id=d["id"],
|
|
49
|
-
prompt_template_id=d["promptTemplateId"],
|
|
50
|
-
version=d["version"],
|
|
51
|
-
prompt_text=d["promptText"],
|
|
52
|
-
variables=variables,
|
|
53
|
-
)
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
@dataclass
|
|
57
|
-
class DrPrompt:
|
|
58
|
-
id: str
|
|
59
|
-
name: str
|
|
60
|
-
description: str
|
|
61
|
-
|
|
62
|
-
def get_latest_version(self) -> DrPromptVersion | None:
|
|
63
|
-
all_prompt_template_versions = get_datarobot_prompt_template_versions([self.id])
|
|
64
|
-
prompt_template_versions = all_prompt_template_versions.get(self.id)
|
|
65
|
-
|
|
66
|
-
if not prompt_template_versions:
|
|
67
|
-
return None
|
|
68
|
-
latest_version = max(prompt_template_versions, key=lambda v: v.version)
|
|
69
|
-
return latest_version
|
|
70
|
-
|
|
71
|
-
@classmethod
|
|
72
|
-
def from_dict(cls, d: dict) -> "DrPrompt":
|
|
73
|
-
return cls(id=d["id"], name=d["name"], description=d["description"])
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
def get_datarobot_prompt_templates() -> list[DrPrompt]:
|
|
77
|
-
prompt_templates_data = dr.utils.pagination.unpaginate(
|
|
78
|
-
initial_url="genai/promptTemplates/", initial_params={}, client=get_api_client()
|
|
79
|
-
)
|
|
80
|
-
|
|
81
|
-
return [DrPrompt.from_dict(prompt_template) for prompt_template in prompt_templates_data]
|
|
21
|
+
def get_datarobot_prompt_templates() -> list[dr.genai.PromptTemplate]:
|
|
22
|
+
try:
|
|
23
|
+
return dr.genai.PromptTemplate.list()
|
|
24
|
+
except Exception:
|
|
25
|
+
return []
|
|
82
26
|
|
|
83
27
|
|
|
84
28
|
def get_datarobot_prompt_template_versions(
|
|
85
29
|
prompt_template_ids: list[str],
|
|
86
|
-
) -> dict[str, list[
|
|
30
|
+
) -> dict[str, list[dr.genai.PromptTemplateVersion]]:
|
|
31
|
+
# Still missing in SDK
|
|
87
32
|
prompt_template_versions_data = dr.utils.pagination.unpaginate(
|
|
88
33
|
initial_url="genai/promptTemplates/versions/",
|
|
89
34
|
initial_params={
|
|
@@ -94,35 +39,32 @@ def get_datarobot_prompt_template_versions(
|
|
|
94
39
|
prompt_template_versions = defaultdict(list)
|
|
95
40
|
for prompt_template_version in prompt_template_versions_data:
|
|
96
41
|
prompt_template_versions[prompt_template_version["promptTemplateId"]].append(
|
|
97
|
-
|
|
42
|
+
dr.genai.PromptTemplateVersion(
|
|
43
|
+
id=prompt_template_version["id"],
|
|
44
|
+
prompt_template_id=prompt_template_version["promptTemplateId"],
|
|
45
|
+
prompt_text=prompt_template_version["promptText"],
|
|
46
|
+
commit_comment=prompt_template_version["commitComment"],
|
|
47
|
+
version=prompt_template_version["version"],
|
|
48
|
+
variables=prompt_template_version["variables"],
|
|
49
|
+
creation_date=prompt_template_version["creationDate"],
|
|
50
|
+
creation_user_id=prompt_template_version["creationUserId"],
|
|
51
|
+
user_name=prompt_template_version["userName"],
|
|
52
|
+
)
|
|
98
53
|
)
|
|
99
54
|
return prompt_template_versions
|
|
100
55
|
|
|
101
56
|
|
|
102
|
-
def get_datarobot_prompt_template(prompt_template_id: str) ->
|
|
103
|
-
api_client = get_api_client()
|
|
57
|
+
def get_datarobot_prompt_template(prompt_template_id: str) -> dr.genai.PromptTemplate | None:
|
|
104
58
|
try:
|
|
105
|
-
|
|
106
|
-
f"genai/promptTemplates/{prompt_template_id}/", join_endpoint=True
|
|
107
|
-
)
|
|
108
|
-
prompt_template_json = prompt_template_response.json()
|
|
59
|
+
return dr.genai.PromptTemplate.get(prompt_template_id)
|
|
109
60
|
except Exception:
|
|
110
61
|
return None
|
|
111
62
|
|
|
112
|
-
return DrPrompt.from_dict(prompt_template_json)
|
|
113
|
-
|
|
114
63
|
|
|
115
64
|
def get_datarobot_prompt_template_version(
|
|
116
65
|
prompt_template_id: str, prompt_template_version_id: str
|
|
117
|
-
) ->
|
|
118
|
-
api_client = get_api_client()
|
|
66
|
+
) -> dr.genai.PromptTemplateVersion | None:
|
|
119
67
|
try:
|
|
120
|
-
|
|
121
|
-
f"genai/promptTemplates/{prompt_template_id}/versions/{prompt_template_version_id}/",
|
|
122
|
-
join_endpoint=True,
|
|
123
|
-
)
|
|
124
|
-
prompt_template_version_json = prompt_template_version_response.json()
|
|
68
|
+
return dr.genai.PromptTemplateVersion.get(prompt_template_id, prompt_template_version_id)
|
|
125
69
|
except Exception:
|
|
126
70
|
return None
|
|
127
|
-
|
|
128
|
-
return DrPromptVersion.from_dict(prompt_template_version_json)
|
|
@@ -18,15 +18,13 @@ from collections.abc import Callable
|
|
|
18
18
|
from inspect import Parameter
|
|
19
19
|
from inspect import Signature
|
|
20
20
|
|
|
21
|
+
import datarobot as dr
|
|
21
22
|
from fastmcp.prompts.prompt import Prompt
|
|
22
23
|
from pydantic import Field
|
|
23
24
|
|
|
24
25
|
from datarobot_genai.drmcp.core.exceptions import DynamicPromptRegistrationError
|
|
25
26
|
from datarobot_genai.drmcp.core.mcp_instance import register_prompt
|
|
26
27
|
|
|
27
|
-
from .dr_lib import DrPrompt
|
|
28
|
-
from .dr_lib import DrPromptVersion
|
|
29
|
-
from .dr_lib import DrVariable
|
|
30
28
|
from .dr_lib import get_datarobot_prompt_template_versions
|
|
31
29
|
from .dr_lib import get_datarobot_prompt_templates
|
|
32
30
|
|
|
@@ -57,7 +55,8 @@ async def register_prompts_from_datarobot_prompt_management() -> None:
|
|
|
57
55
|
|
|
58
56
|
|
|
59
57
|
async def register_prompt_from_datarobot_prompt_management(
|
|
60
|
-
prompt_template:
|
|
58
|
+
prompt_template: dr.genai.PromptTemplate,
|
|
59
|
+
prompt_template_version: dr.genai.PromptTemplateVersion | None = None,
|
|
61
60
|
) -> Prompt:
|
|
62
61
|
"""Register a single prompt.
|
|
63
62
|
|
|
@@ -173,7 +172,7 @@ def to_valid_mcp_prompt_name(s: str) -> str:
|
|
|
173
172
|
|
|
174
173
|
|
|
175
174
|
def make_prompt_function(
|
|
176
|
-
name: str, description: str, prompt_text: str, variables: list[
|
|
175
|
+
name: str, description: str, prompt_text: str, variables: list[dr.genai.Variable]
|
|
177
176
|
) -> Callable:
|
|
178
177
|
params = []
|
|
179
178
|
for v in variables:
|
|
@@ -22,6 +22,7 @@ from fastmcp import Context
|
|
|
22
22
|
from fastmcp import FastMCP
|
|
23
23
|
from fastmcp.exceptions import NotFoundError
|
|
24
24
|
from fastmcp.prompts.prompt import Prompt
|
|
25
|
+
from fastmcp.server.dependencies import get_context
|
|
25
26
|
from fastmcp.tools import FunctionTool
|
|
26
27
|
from fastmcp.tools import Tool
|
|
27
28
|
from fastmcp.utilities.types import NotSet
|
|
@@ -91,6 +92,34 @@ class TaggedFastMCP(FastMCP):
|
|
|
91
92
|
self._deployments_map: dict[str, str] = {}
|
|
92
93
|
self._prompts_map: dict[str, tuple[str, str]] = {}
|
|
93
94
|
|
|
95
|
+
async def notify_prompts_changed(self) -> None:
|
|
96
|
+
"""
|
|
97
|
+
Notify connected clients that the prompt list has changed.
|
|
98
|
+
|
|
99
|
+
This method attempts to send a prompts/list_changed notification to inform
|
|
100
|
+
clients that they should refresh their prompt list.
|
|
101
|
+
|
|
102
|
+
Note: In stateless HTTP mode (default for this server), notifications may not
|
|
103
|
+
reach clients since each request is independent. This method still logs the
|
|
104
|
+
change for auditing purposes and will work if the server is configured for
|
|
105
|
+
stateful connections.
|
|
106
|
+
|
|
107
|
+
See: https://github.com/modelcontextprotocol/python-sdk/issues/710
|
|
108
|
+
"""
|
|
109
|
+
logger.info("Prompt list changed - attempting to notify connected clients")
|
|
110
|
+
|
|
111
|
+
# Try to use FastMCP's built-in notification mechanism if in an MCP context
|
|
112
|
+
try:
|
|
113
|
+
context = get_context()
|
|
114
|
+
context._queue_prompt_list_changed()
|
|
115
|
+
logger.debug("Queued prompts_changed notification via MCP context")
|
|
116
|
+
except RuntimeError:
|
|
117
|
+
# No active MCP context - this is expected when called from REST API
|
|
118
|
+
logger.debug(
|
|
119
|
+
"No active MCP context for notification. "
|
|
120
|
+
"In stateless mode, clients will see changes on next request."
|
|
121
|
+
)
|
|
122
|
+
|
|
94
123
|
@overload
|
|
95
124
|
def tool(
|
|
96
125
|
self,
|
|
@@ -286,6 +315,9 @@ class TaggedFastMCP(FastMCP):
|
|
|
286
315
|
f"already mapped to {existing_prompt_template_version_id}. "
|
|
287
316
|
f"Updating to version id = {prompt_template_version_id} and name = {prompt_name}"
|
|
288
317
|
)
|
|
318
|
+
await self.remove_prompt_mapping(
|
|
319
|
+
prompt_template_id, existing_prompt_template_version_id
|
|
320
|
+
)
|
|
289
321
|
|
|
290
322
|
self._prompts_map[prompt_template_id] = (prompt_template_version_id, prompt_name)
|
|
291
323
|
|
|
@@ -308,7 +340,7 @@ class TaggedFastMCP(FastMCP):
|
|
|
308
340
|
f"skipping removal."
|
|
309
341
|
)
|
|
310
342
|
else:
|
|
311
|
-
prompts_d = await
|
|
343
|
+
prompts_d = await self.get_prompts()
|
|
312
344
|
for prompt in prompts_d.values():
|
|
313
345
|
if (
|
|
314
346
|
prompt.meta is not None
|
|
@@ -319,6 +351,9 @@ class TaggedFastMCP(FastMCP):
|
|
|
319
351
|
prompt.disable()
|
|
320
352
|
|
|
321
353
|
self._prompts_map.pop(prompt_template_id, None)
|
|
354
|
+
|
|
355
|
+
# Notify clients that the prompt list has changed
|
|
356
|
+
await self.notify_prompts_changed()
|
|
322
357
|
else:
|
|
323
358
|
logger.debug(
|
|
324
359
|
f"Do not found prompt template with id = {prompt_template_id} in registry, "
|
|
@@ -526,17 +561,21 @@ async def register_prompt(
|
|
|
526
561
|
)
|
|
527
562
|
|
|
528
563
|
# Register the prompt
|
|
529
|
-
registered_prompt = mcp.add_prompt(prompt)
|
|
530
564
|
if prompt_template:
|
|
531
565
|
prompt_template_id, prompt_template_version_id = prompt_template
|
|
532
566
|
await mcp.set_prompt_mapping(
|
|
533
567
|
prompt_template_id, prompt_template_version_id, prompt_name_no_duplicate
|
|
534
568
|
)
|
|
535
569
|
|
|
570
|
+
registered_prompt = mcp.add_prompt(prompt)
|
|
571
|
+
|
|
536
572
|
# Verify prompt is registered
|
|
537
573
|
prompts = await mcp.get_prompts()
|
|
538
574
|
if not any(prompt.name == prompt_name_no_duplicate for prompt in prompts.values()):
|
|
539
575
|
raise RuntimeError(f"Prompt {prompt_name_no_duplicate} was not registered successfully")
|
|
540
576
|
logger.info(f"Registered prompts: {len(prompts)}")
|
|
541
577
|
|
|
578
|
+
# Notify clients that the prompt list has changed
|
|
579
|
+
await mcp.notify_prompts_changed()
|
|
580
|
+
|
|
542
581
|
return registered_prompt
|
|
@@ -428,7 +428,10 @@ def register_routes(mcp: TaggedFastMCP) -> None:
|
|
|
428
428
|
"""Refresh prompt templates."""
|
|
429
429
|
try:
|
|
430
430
|
await refresh_registered_prompt_template()
|
|
431
|
-
return JSONResponse(
|
|
431
|
+
return JSONResponse(
|
|
432
|
+
status_code=HTTPStatus.OK,
|
|
433
|
+
content={"message": "Prompts refreshed successfully"},
|
|
434
|
+
)
|
|
432
435
|
except Exception as e:
|
|
433
436
|
return JSONResponse(
|
|
434
437
|
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
|
|
@@ -16,6 +16,8 @@ import os
|
|
|
16
16
|
from collections.abc import AsyncGenerator
|
|
17
17
|
from contextlib import asynccontextmanager
|
|
18
18
|
|
|
19
|
+
import aiohttp
|
|
20
|
+
from aiohttp import ClientSession as HttpClientSession
|
|
19
21
|
from mcp import ClientSession
|
|
20
22
|
from mcp.client.streamable_http import streamablehttp_client
|
|
21
23
|
|
|
@@ -29,6 +31,11 @@ def get_dr_mcp_server_url() -> str | None:
|
|
|
29
31
|
return os.environ.get("DR_MCP_SERVER_URL")
|
|
30
32
|
|
|
31
33
|
|
|
34
|
+
def get_dr_mcp_server_http_url() -> str | None:
|
|
35
|
+
"""Get DataRobot MCP server http URL."""
|
|
36
|
+
return os.environ.get("DR_MCP_SERVER_HTTP_URL")
|
|
37
|
+
|
|
38
|
+
|
|
32
39
|
def get_openai_llm_client_config() -> dict[str, str]:
|
|
33
40
|
"""Get OpenAI LLM client configuration."""
|
|
34
41
|
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
|
@@ -94,3 +101,25 @@ async def ete_test_mcp_session(
|
|
|
94
101
|
yield session
|
|
95
102
|
except asyncio.TimeoutError:
|
|
96
103
|
raise TimeoutError(f"Check if the MCP server is running at {get_dr_mcp_server_url()}")
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
@asynccontextmanager
|
|
107
|
+
async def ete_test_http_session(
|
|
108
|
+
additional_headers: dict[str, str] | None = None,
|
|
109
|
+
) -> AsyncGenerator[HttpClientSession, None]:
|
|
110
|
+
"""Create an HTTP session for each test that can connect to MCP custom http routes.
|
|
111
|
+
|
|
112
|
+
Parameters
|
|
113
|
+
----------
|
|
114
|
+
additional_headers : dict[str, str], optional
|
|
115
|
+
Additional headers to include in the HTTP session (e.g., auth headers for testing).
|
|
116
|
+
"""
|
|
117
|
+
headers = get_headers()
|
|
118
|
+
if additional_headers:
|
|
119
|
+
headers.update(additional_headers)
|
|
120
|
+
|
|
121
|
+
async with ete_test_mcp_session(additional_headers=additional_headers):
|
|
122
|
+
async with aiohttp.ClientSession(
|
|
123
|
+
base_url=get_dr_mcp_server_http_url(), headers=headers
|
|
124
|
+
) as client:
|
|
125
|
+
yield client
|
|
@@ -17,6 +17,15 @@ from collections.abc import AsyncGenerator
|
|
|
17
17
|
from typing import Any
|
|
18
18
|
from typing import cast
|
|
19
19
|
|
|
20
|
+
from ag_ui.core import Event
|
|
21
|
+
from ag_ui.core import EventType
|
|
22
|
+
from ag_ui.core import TextMessageContentEvent
|
|
23
|
+
from ag_ui.core import TextMessageEndEvent
|
|
24
|
+
from ag_ui.core import TextMessageStartEvent
|
|
25
|
+
from ag_ui.core import ToolCallArgsEvent
|
|
26
|
+
from ag_ui.core import ToolCallEndEvent
|
|
27
|
+
from ag_ui.core import ToolCallResultEvent
|
|
28
|
+
from ag_ui.core import ToolCallStartEvent
|
|
20
29
|
from langchain.tools import BaseTool
|
|
21
30
|
from langchain_core.messages import AIMessageChunk
|
|
22
31
|
from langchain_core.messages import ToolMessage
|
|
@@ -158,43 +167,7 @@ class LangGraphAgent(BaseAgent[BaseTool], abc.ABC):
|
|
|
158
167
|
# The main difference is returning a generator for streaming or a final response for sync.
|
|
159
168
|
if is_streaming(completion_create_params):
|
|
160
169
|
# Streaming response: yield each message as it is generated
|
|
161
|
-
|
|
162
|
-
tuple[str, MultiTurnSample | None, UsageMetrics], None
|
|
163
|
-
]:
|
|
164
|
-
# Iterate over the graph stream. For message events, yield the content.
|
|
165
|
-
# For update events, accumulate the usage metrics.
|
|
166
|
-
events = []
|
|
167
|
-
async for _, mode, event in graph_stream:
|
|
168
|
-
if mode == "messages":
|
|
169
|
-
message_event: tuple[AIMessageChunk, dict[str, Any]] = event # type: ignore[assignment]
|
|
170
|
-
llm_token, _ = message_event
|
|
171
|
-
yield (
|
|
172
|
-
str(llm_token.content),
|
|
173
|
-
None,
|
|
174
|
-
usage_metrics,
|
|
175
|
-
)
|
|
176
|
-
elif mode == "updates":
|
|
177
|
-
update_event: dict[str, Any] = event # type: ignore[assignment]
|
|
178
|
-
events.append(update_event)
|
|
179
|
-
current_node = next(iter(update_event))
|
|
180
|
-
node_data = update_event[current_node]
|
|
181
|
-
current_usage = node_data.get("usage", {}) if node_data is not None else {}
|
|
182
|
-
if current_usage:
|
|
183
|
-
usage_metrics["total_tokens"] += current_usage.get("total_tokens", 0)
|
|
184
|
-
usage_metrics["prompt_tokens"] += current_usage.get("prompt_tokens", 0)
|
|
185
|
-
usage_metrics["completion_tokens"] += current_usage.get(
|
|
186
|
-
"completion_tokens", 0
|
|
187
|
-
)
|
|
188
|
-
else:
|
|
189
|
-
raise ValueError(f"Invalid mode: {mode}")
|
|
190
|
-
|
|
191
|
-
# Create a list of events from the event listener
|
|
192
|
-
pipeline_interactions = self.create_pipeline_interactions_from_events(events)
|
|
193
|
-
|
|
194
|
-
# yield the final response indicating completion
|
|
195
|
-
yield "", pipeline_interactions, usage_metrics
|
|
196
|
-
|
|
197
|
-
return stream_generator()
|
|
170
|
+
return self._stream_generator(graph_stream, usage_metrics)
|
|
198
171
|
else:
|
|
199
172
|
# Synchronous response: collect all events and return the final message
|
|
200
173
|
events: list[dict[str, Any]] = [
|
|
@@ -203,6 +176,16 @@ class LangGraphAgent(BaseAgent[BaseTool], abc.ABC):
|
|
|
203
176
|
if mode == "updates"
|
|
204
177
|
]
|
|
205
178
|
|
|
179
|
+
# Accumulate the usage metrics from the updates
|
|
180
|
+
for update in events:
|
|
181
|
+
current_node = next(iter(update))
|
|
182
|
+
node_data = update[current_node]
|
|
183
|
+
current_usage = node_data.get("usage", {}) if node_data is not None else {}
|
|
184
|
+
if current_usage:
|
|
185
|
+
usage_metrics["total_tokens"] += current_usage.get("total_tokens", 0)
|
|
186
|
+
usage_metrics["prompt_tokens"] += current_usage.get("prompt_tokens", 0)
|
|
187
|
+
usage_metrics["completion_tokens"] += current_usage.get("completion_tokens", 0)
|
|
188
|
+
|
|
206
189
|
pipeline_interactions = self.create_pipeline_interactions_from_events(events)
|
|
207
190
|
|
|
208
191
|
# Extract the final event from the graph stream as the synchronous response
|
|
@@ -214,14 +197,132 @@ class LangGraphAgent(BaseAgent[BaseTool], abc.ABC):
|
|
|
214
197
|
if node_data is not None and "messages" in node_data
|
|
215
198
|
else ""
|
|
216
199
|
)
|
|
217
|
-
current_usage = node_data.get("usage", {}) if node_data is not None else {}
|
|
218
|
-
if current_usage:
|
|
219
|
-
usage_metrics["total_tokens"] += current_usage.get("total_tokens", 0)
|
|
220
|
-
usage_metrics["prompt_tokens"] += current_usage.get("prompt_tokens", 0)
|
|
221
|
-
usage_metrics["completion_tokens"] += current_usage.get("completion_tokens", 0)
|
|
222
200
|
|
|
223
201
|
return response_text, pipeline_interactions, usage_metrics
|
|
224
202
|
|
|
203
|
+
async def _stream_generator(
|
|
204
|
+
self, graph_stream: AsyncGenerator[tuple[Any, str, Any], None], usage_metrics: UsageMetrics
|
|
205
|
+
) -> AsyncGenerator[tuple[str | Event, MultiTurnSample | None, UsageMetrics], None]:
|
|
206
|
+
# Iterate over the graph stream. For message events, yield the content.
|
|
207
|
+
# For update events, accumulate the usage metrics.
|
|
208
|
+
events = []
|
|
209
|
+
current_message_id = None
|
|
210
|
+
tool_call_id = ""
|
|
211
|
+
async for _, mode, event in graph_stream:
|
|
212
|
+
if mode == "messages":
|
|
213
|
+
message_event: tuple[AIMessageChunk | ToolMessage, dict[str, Any]] = event # type: ignore[assignment]
|
|
214
|
+
message = message_event[0]
|
|
215
|
+
if isinstance(message, ToolMessage):
|
|
216
|
+
yield (
|
|
217
|
+
ToolCallEndEvent(
|
|
218
|
+
type=EventType.TOOL_CALL_END, tool_call_id=message.tool_call_id
|
|
219
|
+
),
|
|
220
|
+
None,
|
|
221
|
+
usage_metrics,
|
|
222
|
+
)
|
|
223
|
+
yield (
|
|
224
|
+
ToolCallResultEvent(
|
|
225
|
+
type=EventType.TOOL_CALL_RESULT,
|
|
226
|
+
message_id=message.id,
|
|
227
|
+
tool_call_id=message.tool_call_id,
|
|
228
|
+
content=message.content,
|
|
229
|
+
role="tool",
|
|
230
|
+
),
|
|
231
|
+
None,
|
|
232
|
+
usage_metrics,
|
|
233
|
+
)
|
|
234
|
+
tool_call_id = ""
|
|
235
|
+
elif isinstance(message, AIMessageChunk):
|
|
236
|
+
if message.tool_call_chunks:
|
|
237
|
+
# This is a tool call message
|
|
238
|
+
for tool_call_chunk in message.tool_call_chunks:
|
|
239
|
+
if name := tool_call_chunk.get("name"):
|
|
240
|
+
# Its a tool call start message
|
|
241
|
+
tool_call_id = tool_call_chunk["id"]
|
|
242
|
+
yield (
|
|
243
|
+
ToolCallStartEvent(
|
|
244
|
+
type=EventType.TOOL_CALL_START,
|
|
245
|
+
tool_call_id=tool_call_id,
|
|
246
|
+
tool_call_name=name,
|
|
247
|
+
parent_message_id=message.id,
|
|
248
|
+
),
|
|
249
|
+
None,
|
|
250
|
+
usage_metrics,
|
|
251
|
+
)
|
|
252
|
+
elif args := tool_call_chunk.get("args"):
|
|
253
|
+
# Its a tool call args message
|
|
254
|
+
yield (
|
|
255
|
+
ToolCallArgsEvent(
|
|
256
|
+
type=EventType.TOOL_CALL_ARGS,
|
|
257
|
+
# Its empty when the tool chunk is not a start message
|
|
258
|
+
# So we use the tool call id from a previous start message
|
|
259
|
+
tool_call_id=tool_call_id,
|
|
260
|
+
delta=args,
|
|
261
|
+
),
|
|
262
|
+
None,
|
|
263
|
+
usage_metrics,
|
|
264
|
+
)
|
|
265
|
+
elif message.content:
|
|
266
|
+
# Its a text message
|
|
267
|
+
# Handle the start and end of the text message
|
|
268
|
+
if message.id != current_message_id:
|
|
269
|
+
if current_message_id:
|
|
270
|
+
yield (
|
|
271
|
+
TextMessageEndEvent(
|
|
272
|
+
type=EventType.TEXT_MESSAGE_END,
|
|
273
|
+
message_id=current_message_id,
|
|
274
|
+
),
|
|
275
|
+
None,
|
|
276
|
+
usage_metrics,
|
|
277
|
+
)
|
|
278
|
+
current_message_id = message.id
|
|
279
|
+
yield (
|
|
280
|
+
TextMessageStartEvent(
|
|
281
|
+
type=EventType.TEXT_MESSAGE_START,
|
|
282
|
+
message_id=message.id,
|
|
283
|
+
role="assistant",
|
|
284
|
+
),
|
|
285
|
+
None,
|
|
286
|
+
usage_metrics,
|
|
287
|
+
)
|
|
288
|
+
yield (
|
|
289
|
+
TextMessageContentEvent(
|
|
290
|
+
type=EventType.TEXT_MESSAGE_CONTENT,
|
|
291
|
+
message_id=message.id,
|
|
292
|
+
delta=message.content,
|
|
293
|
+
),
|
|
294
|
+
None,
|
|
295
|
+
usage_metrics,
|
|
296
|
+
)
|
|
297
|
+
else:
|
|
298
|
+
raise ValueError(f"Invalid message event: {message_event}")
|
|
299
|
+
elif mode == "updates":
|
|
300
|
+
update_event: dict[str, Any] = event # type: ignore[assignment]
|
|
301
|
+
events.append(update_event)
|
|
302
|
+
current_node = next(iter(update_event))
|
|
303
|
+
node_data = update_event[current_node]
|
|
304
|
+
current_usage = node_data.get("usage", {}) if node_data is not None else {}
|
|
305
|
+
if current_usage:
|
|
306
|
+
usage_metrics["total_tokens"] += current_usage.get("total_tokens", 0)
|
|
307
|
+
usage_metrics["prompt_tokens"] += current_usage.get("prompt_tokens", 0)
|
|
308
|
+
usage_metrics["completion_tokens"] += current_usage.get("completion_tokens", 0)
|
|
309
|
+
if current_message_id:
|
|
310
|
+
yield (
|
|
311
|
+
TextMessageEndEvent(
|
|
312
|
+
type=EventType.TEXT_MESSAGE_END,
|
|
313
|
+
message_id=current_message_id,
|
|
314
|
+
),
|
|
315
|
+
None,
|
|
316
|
+
usage_metrics,
|
|
317
|
+
)
|
|
318
|
+
current_message_id = None
|
|
319
|
+
|
|
320
|
+
# Create a list of events from the event listener
|
|
321
|
+
pipeline_interactions = self.create_pipeline_interactions_from_events(events)
|
|
322
|
+
|
|
323
|
+
# yield the final response indicating completion
|
|
324
|
+
yield "", pipeline_interactions, usage_metrics
|
|
325
|
+
|
|
225
326
|
@classmethod
|
|
226
327
|
def create_pipeline_interactions_from_events(
|
|
227
328
|
cls,
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: datarobot-genai
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.2.3
|
|
4
4
|
Summary: Generic helpers for GenAI
|
|
5
5
|
Project-URL: Homepage, https://github.com/datarobot-oss/datarobot-genai
|
|
6
6
|
Author: DataRobot, Inc.
|
|
@@ -8,14 +8,16 @@ License: Apache-2.0
|
|
|
8
8
|
License-File: AUTHORS
|
|
9
9
|
License-File: LICENSE
|
|
10
10
|
Requires-Python: <3.13,>=3.10
|
|
11
|
+
Requires-Dist: ag-ui-protocol<0.2.0,>=0.1.9
|
|
11
12
|
Requires-Dist: datarobot-drum<2.0.0,>=1.17.5
|
|
12
13
|
Requires-Dist: datarobot-predict<2.0.0,>=1.13.2
|
|
13
|
-
Requires-Dist: datarobot<4.0.0,>=3.
|
|
14
|
+
Requires-Dist: datarobot<4.0.0,>=3.10.0
|
|
14
15
|
Requires-Dist: openai<2.0.0,>=1.76.2
|
|
15
16
|
Requires-Dist: opentelemetry-instrumentation-aiohttp-client<1.0.0,>=0.43b0
|
|
16
17
|
Requires-Dist: opentelemetry-instrumentation-httpx<1.0.0,>=0.43b0
|
|
17
18
|
Requires-Dist: opentelemetry-instrumentation-openai<1.0.0,>=0.40.5
|
|
18
19
|
Requires-Dist: opentelemetry-instrumentation-requests<1.0.0,>=0.43b0
|
|
20
|
+
Requires-Dist: opentelemetry-instrumentation-threading<1.0.0,>=0.43b0
|
|
19
21
|
Requires-Dist: pandas<3.0.0,>=2.2.3
|
|
20
22
|
Requires-Dist: pyjwt<3.0.0,>=2.10.1
|
|
21
23
|
Requires-Dist: pypdf<7.0.0,>=6.1.3
|
|
@@ -4,11 +4,11 @@ datarobot_genai/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hS
|
|
|
4
4
|
datarobot_genai/core/custom_model.py,sha256=9rSJhKZ1Fo8BpVVLnRMUj3QV2AYUr5ADHEs0aW6s2tY,5813
|
|
5
5
|
datarobot_genai/core/telemetry_agent.py,sha256=CxvoyResG3jXQ7ucU26NXCzWjWQyua-5qSYvVxpZJQg,5343
|
|
6
6
|
datarobot_genai/core/agents/__init__.py,sha256=mTG_QVV5aoOWOgVA3KEq7KQLJllyxtG2ZQoq9wiUNYo,1542
|
|
7
|
-
datarobot_genai/core/agents/base.py,sha256=
|
|
7
|
+
datarobot_genai/core/agents/base.py,sha256=63JRi0r7L94p6QHpor4JBFu_g5nxaQkqb_I4dsfrDc0,6722
|
|
8
8
|
datarobot_genai/core/chat/__init__.py,sha256=kAxp4Dc-6HIM_cdBl-3IxwzJQr13UYYQ2Zc-hMwz2F8,638
|
|
9
9
|
datarobot_genai/core/chat/auth.py,sha256=6qITKTHFtESsBc2NsA6cvJf78pPUrcA5XV3Vxlhb5us,5457
|
|
10
10
|
datarobot_genai/core/chat/client.py,sha256=fk8MebXa8_R33VK0_DrXCS0Fgw3wFvPEvsuubC27c3s,6639
|
|
11
|
-
datarobot_genai/core/chat/responses.py,sha256=
|
|
11
|
+
datarobot_genai/core/chat/responses.py,sha256=vGxTA433f2AxGVlijV6O4EghyNPJCDmEqpAK2oWnsIs,10583
|
|
12
12
|
datarobot_genai/core/cli/__init__.py,sha256=B93Yb6VavoZpatrh8ltCL6YglIfR5FHgytXbO9UuxBw,733
|
|
13
13
|
datarobot_genai/core/cli/agent_environment.py,sha256=BJzQoiDvZF5gW4mFE71U0yeg-l72C--kxiE-fv6W194,1662
|
|
14
14
|
datarobot_genai/core/cli/agent_kernel.py,sha256=3XX58DQ6XPpWB_tn5m3iGb3XTfhZf5X3W9tc6ADieU4,7790
|
|
@@ -31,13 +31,13 @@ datarobot_genai/drmcp/core/config.py,sha256=D7bSi40Yc5J71_JxmpfppG83snbIJW9iz1J7
|
|
|
31
31
|
datarobot_genai/drmcp/core/config_utils.py,sha256=U-aieWw7MyP03cGDFIp97JH99ZUfr3vD9uuTzBzxn7w,6428
|
|
32
32
|
datarobot_genai/drmcp/core/constants.py,sha256=lUwoW_PTrbaBGqRJifKqCn3EoFacoEgdO-CpoFVrUoU,739
|
|
33
33
|
datarobot_genai/drmcp/core/credentials.py,sha256=PYEUDNMVw1BoMzZKLkPVTypNkVevEPtmk3scKnE-zYg,6706
|
|
34
|
-
datarobot_genai/drmcp/core/dr_mcp_server.py,sha256=
|
|
34
|
+
datarobot_genai/drmcp/core/dr_mcp_server.py,sha256=peAq0TL4ZL0P6XJjwCEWNfi0OVPPSQMSRQyKFH6yjtY,14228
|
|
35
35
|
datarobot_genai/drmcp/core/dr_mcp_server_logo.py,sha256=hib-nfR1SNTW6CnpFsFCkL9H_OMwa4YYyinV7VNOuLk,4708
|
|
36
36
|
datarobot_genai/drmcp/core/exceptions.py,sha256=eqsGI-lxybgvWL5w4BFhbm3XzH1eU5tetwjnhJxelpc,905
|
|
37
37
|
datarobot_genai/drmcp/core/logging.py,sha256=Y_hig4eBWiXGaVV7B_3wBcaYVRNH4ydptbEQhrP9-mY,3414
|
|
38
|
-
datarobot_genai/drmcp/core/mcp_instance.py,sha256=
|
|
38
|
+
datarobot_genai/drmcp/core/mcp_instance.py,sha256=hArS-BIdsIdRyRA21a4_ILgqqzmuRxZts-Ewgtf1H60,20917
|
|
39
39
|
datarobot_genai/drmcp/core/mcp_server_tools.py,sha256=odNZKozfx0VV38SLZHw9lY0C0JM_JnRI06W3BBXnyE4,4278
|
|
40
|
-
datarobot_genai/drmcp/core/routes.py,sha256=
|
|
40
|
+
datarobot_genai/drmcp/core/routes.py,sha256=dqE2M0UzAyyN9vQjlyTjYW4rpju3LT039po5weuO__I,17936
|
|
41
41
|
datarobot_genai/drmcp/core/routes_utils.py,sha256=vSseXWlplMSnRgoJgtP_rHxWSAVYcx_tpTv4lyTpQoc,944
|
|
42
42
|
datarobot_genai/drmcp/core/server_life_cycle.py,sha256=WKGJWGxalvqxupzJ2y67Kklc_9PgpZT0uyjlv_sr5wc,3419
|
|
43
43
|
datarobot_genai/drmcp/core/telemetry.py,sha256=NEkSTC1w6uQgtukLHI-sWvR4EMgInysgATcvfQ5CplM,15378
|
|
@@ -45,8 +45,8 @@ datarobot_genai/drmcp/core/tool_filter.py,sha256=tLOcG50QBvS48cOVHM6OqoODYiiS6Ke
|
|
|
45
45
|
datarobot_genai/drmcp/core/utils.py,sha256=dSjrayWVcnC5GxQcvOIOSHaoEymPIVtG_s2ZBMlmSOw,4336
|
|
46
46
|
datarobot_genai/drmcp/core/dynamic_prompts/__init__.py,sha256=y4yapzp3KnFMzSR6HlNDS4uSuyNT7I1iPBvaCLsS0sU,577
|
|
47
47
|
datarobot_genai/drmcp/core/dynamic_prompts/controllers.py,sha256=AGJlKqgHRO0Kd7Gl-Ulw9KYBgzjTTFXWBvOUF-SuKUI,5454
|
|
48
|
-
datarobot_genai/drmcp/core/dynamic_prompts/dr_lib.py,sha256=
|
|
49
|
-
datarobot_genai/drmcp/core/dynamic_prompts/register.py,sha256=
|
|
48
|
+
datarobot_genai/drmcp/core/dynamic_prompts/dr_lib.py,sha256=4j33AKmq7kQX_EE2_RWAbP8-K5KPVEvpUginTWn_MHs,2701
|
|
49
|
+
datarobot_genai/drmcp/core/dynamic_prompts/register.py,sha256=2c-vBaTfu3mq_8tSFfDAzG5hG06uS9CghIC1sJxHRNw,7173
|
|
50
50
|
datarobot_genai/drmcp/core/dynamic_prompts/utils.py,sha256=BZ3792AgfvYlwL0_J0MzQfGecyEA5_OKUMynEZYzCds,1136
|
|
51
51
|
datarobot_genai/drmcp/core/dynamic_tools/__init__.py,sha256=0kq9vMkF7EBsS6lkEdiLibmUrghTQqosHbZ5k-V9a5g,578
|
|
52
52
|
datarobot_genai/drmcp/core/dynamic_tools/register.py,sha256=3M5-F0mhUYTZJWmFDmqzsj3QAd7ut7b0kPv-JZyaTzg,9204
|
|
@@ -67,7 +67,7 @@ datarobot_genai/drmcp/core/memory_management/manager.py,sha256=gmc_SQs12YQFMWl2U
|
|
|
67
67
|
datarobot_genai/drmcp/core/memory_management/memory_tools.py,sha256=AxzpwOlldmhhDfKZcAxaGs7Xih2SCe0XbQuXX5nQczI,6397
|
|
68
68
|
datarobot_genai/drmcp/test_utils/__init__.py,sha256=y4yapzp3KnFMzSR6HlNDS4uSuyNT7I1iPBvaCLsS0sU,577
|
|
69
69
|
datarobot_genai/drmcp/test_utils/integration_mcp_server.py,sha256=MdoR7r3m9uT7crodyhY69yhkrM7Thpe__BBD9lB_2oA,3328
|
|
70
|
-
datarobot_genai/drmcp/test_utils/mcp_utils_ete.py,sha256=
|
|
70
|
+
datarobot_genai/drmcp/test_utils/mcp_utils_ete.py,sha256=rgZkPF26YCHX2FGppWE4v22l_NQ3kLSPSUimO0tD4nM,4402
|
|
71
71
|
datarobot_genai/drmcp/test_utils/mcp_utils_integration.py,sha256=0sU29Khal0CelnHBDInyTRiuPKrFFbTbIomOoUbyMhs,3271
|
|
72
72
|
datarobot_genai/drmcp/test_utils/openai_llm_mcp_client.py,sha256=Va3_5c2ToZyfIsEjK2ef5d3z-FA5SE51voikvjKPt8Q,8837
|
|
73
73
|
datarobot_genai/drmcp/test_utils/tool_base_ete.py,sha256=-mKHBkGkyOKQCVS2LHFhSnRofIqJBbeAPRkwizBDtTg,6104
|
|
@@ -83,7 +83,7 @@ datarobot_genai/drmcp/tools/predictive/predict_realtime.py,sha256=t7f28y_ealZoA6
|
|
|
83
83
|
datarobot_genai/drmcp/tools/predictive/project.py,sha256=KaMDAvJY4s12j_4ybA7-KcCS1yMOj-KPIKNBgCSE2iM,2536
|
|
84
84
|
datarobot_genai/drmcp/tools/predictive/training.py,sha256=kxeDVLqUh9ajDk8wK7CZRRydDK8UNuTVZCB3huUihF8,23660
|
|
85
85
|
datarobot_genai/langgraph/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
86
|
-
datarobot_genai/langgraph/agent.py,sha256=
|
|
86
|
+
datarobot_genai/langgraph/agent.py,sha256=DRnywmS9KDywyChtuIZZwNKbJs8BpC259EG_kxYbiQ8,15828
|
|
87
87
|
datarobot_genai/langgraph/mcp.py,sha256=iA2_j46mZAaNaL7ntXT-LW6C-NMJkzr3VfKDDfe7mh8,2851
|
|
88
88
|
datarobot_genai/llama_index/__init__.py,sha256=JEMkLQLuP8n14kNE3bZ2j08NdajnkJMfYjDQYqj7C0c,407
|
|
89
89
|
datarobot_genai/llama_index/agent.py,sha256=V6ZsD9GcBDJS-RJo1tJtIHhyW69_78gM6_fOHFV-Piw,1829
|
|
@@ -93,9 +93,9 @@ datarobot_genai/nat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSu
|
|
|
93
93
|
datarobot_genai/nat/agent.py,sha256=siBLDWAff2-JwZ8Q3iNpM_e4_IoSwG9IvY0hyEjNenw,10292
|
|
94
94
|
datarobot_genai/nat/datarobot_llm_clients.py,sha256=STzAZ4OF8U-Y_cUTywxmKBGVotwsnbGP6vTojnu6q0g,9921
|
|
95
95
|
datarobot_genai/nat/datarobot_llm_providers.py,sha256=aDoQcTeGI-odqydPXEX9OGGNFbzAtpqzTvHHEkmJuEQ,4963
|
|
96
|
-
datarobot_genai-0.
|
|
97
|
-
datarobot_genai-0.
|
|
98
|
-
datarobot_genai-0.
|
|
99
|
-
datarobot_genai-0.
|
|
100
|
-
datarobot_genai-0.
|
|
101
|
-
datarobot_genai-0.
|
|
96
|
+
datarobot_genai-0.2.3.dist-info/METADATA,sha256=TVfYbmNgyNUZ0hv9lg-p3lrcJAGJUgmaGU6AVK6qxSE,6012
|
|
97
|
+
datarobot_genai-0.2.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
98
|
+
datarobot_genai-0.2.3.dist-info/entry_points.txt,sha256=CZhmZcSyt_RBltgLN_b9xasJD6J5SaDc_z7K0wuOY9Y,150
|
|
99
|
+
datarobot_genai-0.2.3.dist-info/licenses/AUTHORS,sha256=isJGUXdjq1U7XZ_B_9AH8Qf0u4eX0XyQifJZ_Sxm4sA,80
|
|
100
|
+
datarobot_genai-0.2.3.dist-info/licenses/LICENSE,sha256=U2_VkLIktQoa60Nf6Tbt7E4RMlfhFSjWjcJJfVC-YCE,11341
|
|
101
|
+
datarobot_genai-0.2.3.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|