datarobot-genai 0.2.37__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. datarobot_genai/core/agents/__init__.py +1 -1
  2. datarobot_genai/core/agents/base.py +5 -2
  3. datarobot_genai/core/chat/responses.py +6 -1
  4. datarobot_genai/core/utils/auth.py +188 -31
  5. datarobot_genai/crewai/__init__.py +1 -4
  6. datarobot_genai/crewai/agent.py +150 -17
  7. datarobot_genai/crewai/events.py +11 -4
  8. datarobot_genai/drmcp/__init__.py +4 -2
  9. datarobot_genai/drmcp/core/config.py +21 -1
  10. datarobot_genai/drmcp/core/mcp_instance.py +5 -49
  11. datarobot_genai/drmcp/core/routes.py +108 -13
  12. datarobot_genai/drmcp/core/tool_config.py +16 -0
  13. datarobot_genai/drmcp/core/utils.py +110 -0
  14. datarobot_genai/drmcp/test_utils/tool_base_ete.py +41 -26
  15. datarobot_genai/drmcp/tools/clients/gdrive.py +2 -0
  16. datarobot_genai/drmcp/tools/clients/microsoft_graph.py +141 -0
  17. datarobot_genai/drmcp/tools/clients/perplexity.py +173 -0
  18. datarobot_genai/drmcp/tools/clients/tavily.py +199 -0
  19. datarobot_genai/drmcp/tools/confluence/tools.py +43 -94
  20. datarobot_genai/drmcp/tools/gdrive/tools.py +44 -133
  21. datarobot_genai/drmcp/tools/jira/tools.py +19 -41
  22. datarobot_genai/drmcp/tools/microsoft_graph/tools.py +201 -32
  23. datarobot_genai/drmcp/tools/perplexity/__init__.py +0 -0
  24. datarobot_genai/drmcp/tools/perplexity/tools.py +117 -0
  25. datarobot_genai/drmcp/tools/predictive/data.py +1 -9
  26. datarobot_genai/drmcp/tools/predictive/deployment.py +0 -8
  27. datarobot_genai/drmcp/tools/predictive/deployment_info.py +91 -117
  28. datarobot_genai/drmcp/tools/predictive/model.py +0 -21
  29. datarobot_genai/drmcp/tools/predictive/predict_realtime.py +3 -0
  30. datarobot_genai/drmcp/tools/predictive/project.py +3 -19
  31. datarobot_genai/drmcp/tools/predictive/training.py +1 -19
  32. datarobot_genai/drmcp/tools/tavily/__init__.py +13 -0
  33. datarobot_genai/drmcp/tools/tavily/tools.py +141 -0
  34. datarobot_genai/langgraph/agent.py +10 -2
  35. datarobot_genai/llama_index/__init__.py +1 -1
  36. datarobot_genai/llama_index/agent.py +284 -5
  37. datarobot_genai/nat/agent.py +17 -6
  38. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/METADATA +3 -1
  39. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/RECORD +43 -40
  40. datarobot_genai/crewai/base.py +0 -159
  41. datarobot_genai/drmcp/core/tool_filter.py +0 -117
  42. datarobot_genai/llama_index/base.py +0 -299
  43. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/WHEEL +0 -0
  44. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/entry_points.txt +0 -0
  45. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/licenses/AUTHORS +0 -0
  46. {datarobot_genai-0.2.37.dist-info → datarobot_genai-0.3.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,299 +0,0 @@
1
- # Copyright 2025 DataRobot, Inc. and its affiliates.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
- """
16
- Base class for LlamaIndex-based agents.
17
-
18
- Provides a standard ``invoke`` that runs an AgentWorkflow, collects events,
19
- and converts them into pipeline interactions. Subclasses provide the workflow
20
- and response extraction logic.
21
- """
22
-
23
- from __future__ import annotations
24
-
25
- import abc
26
- import inspect
27
- from collections.abc import AsyncGenerator
28
- from typing import Any
29
-
30
- from llama_index.core.tools import BaseTool
31
- from openai.types.chat import CompletionCreateParams
32
- from ragas import MultiTurnSample
33
-
34
- from datarobot_genai.core.agents.base import BaseAgent
35
- from datarobot_genai.core.agents.base import InvokeReturn
36
- from datarobot_genai.core.agents.base import UsageMetrics
37
- from datarobot_genai.core.agents.base import default_usage_metrics
38
- from datarobot_genai.core.agents.base import extract_user_prompt_content
39
- from datarobot_genai.core.agents.base import is_streaming
40
-
41
- from .agent import create_pipeline_interactions_from_events
42
- from .mcp import load_mcp_tools
43
-
44
-
45
- class LlamaIndexAgent(BaseAgent[BaseTool], abc.ABC):
46
- """Abstract base agent for LlamaIndex workflows."""
47
-
48
- def __init__(self, *args: Any, **kwargs: Any) -> None:
49
- super().__init__(*args, **kwargs)
50
- self._mcp_tools: list[Any] = []
51
-
52
- def set_mcp_tools(self, tools: list[Any]) -> None:
53
- """Set MCP tools for this agent."""
54
- self._mcp_tools = tools
55
-
56
- @property
57
- def mcp_tools(self) -> list[Any]:
58
- """Return the list of MCP tools available to this agent.
59
-
60
- Subclasses can use this to wire tools into LlamaIndex agents during
61
- workflow construction inside ``build_workflow``.
62
- """
63
- return self._mcp_tools
64
-
65
- @abc.abstractmethod
66
- def build_workflow(self) -> Any:
67
- """Return an AgentWorkflow instance ready to run."""
68
- raise NotImplementedError
69
-
70
- @abc.abstractmethod
71
- def extract_response_text(self, result_state: Any, events: list[Any]) -> str:
72
- """Extract final response text from workflow state and/or events."""
73
- raise NotImplementedError
74
-
75
- def make_input_message(self, completion_create_params: CompletionCreateParams) -> str:
76
- """Create an input string for the workflow from the user prompt."""
77
- user_prompt_content = extract_user_prompt_content(completion_create_params)
78
- return str(user_prompt_content)
79
-
80
- async def invoke(self, completion_create_params: CompletionCreateParams) -> InvokeReturn:
81
- """Run the LlamaIndex workflow with the provided completion parameters."""
82
- input_message = self.make_input_message(completion_create_params)
83
-
84
- # Load MCP tools (if configured) asynchronously before building workflow
85
- mcp_tools = await load_mcp_tools(
86
- authorization_context=self._authorization_context,
87
- forwarded_headers=self.forwarded_headers,
88
- )
89
- self.set_mcp_tools(mcp_tools)
90
-
91
- # Preserve prior template startup print for CLI parity
92
- try:
93
- print(
94
- "Running agent with user prompt:",
95
- extract_user_prompt_content(completion_create_params),
96
- flush=True,
97
- )
98
- except Exception:
99
- # Printing is best-effort; proceed regardless
100
- pass
101
-
102
- workflow = self.build_workflow()
103
- handler = workflow.run(user_msg=input_message)
104
-
105
- usage_metrics: UsageMetrics = default_usage_metrics()
106
-
107
- # Streaming parity with LangGraph: yield incremental deltas during event processing
108
- if is_streaming(completion_create_params):
109
-
110
- async def _gen() -> AsyncGenerator[tuple[str, MultiTurnSample | None, UsageMetrics]]:
111
- events: list[Any] = []
112
- current_agent_name: str | None = None
113
- async for event in handler.stream_events():
114
- events.append(event)
115
- # Best-effort extraction of incremental text from LlamaIndex events
116
- delta: str | None = None
117
- # Agent switch banner if available on event
118
- try:
119
- if hasattr(event, "current_agent_name"):
120
- new_agent = getattr(event, "current_agent_name")
121
- if (
122
- isinstance(new_agent, str)
123
- and new_agent
124
- and new_agent != current_agent_name
125
- ):
126
- current_agent_name = new_agent
127
- # Print banner for agent switch (do not emit as streamed content)
128
- print("\n" + "=" * 50, flush=True)
129
- print(f"🤖 Agent: {current_agent_name}", flush=True)
130
- print("=" * 50 + "\n", flush=True)
131
- except Exception:
132
- pass
133
-
134
- try:
135
- if hasattr(event, "delta") and isinstance(getattr(event, "delta"), str):
136
- delta = getattr(event, "delta")
137
- # Some event types may carry incremental text under "text" or similar
138
- elif hasattr(event, "text") and isinstance(getattr(event, "text"), str):
139
- delta = getattr(event, "text")
140
- except Exception:
141
- # Ignore malformed events and continue
142
- delta = None
143
-
144
- if delta:
145
- # Yield token/content delta with current (accumulated) usage metrics
146
- yield delta, None, usage_metrics
147
-
148
- # Best-effort debug/event messages printed to CLI (do not stream as content)
149
- try:
150
- event_type = type(event).__name__
151
- if event_type == "AgentInput" and hasattr(event, "input"):
152
- print("📥 Input:", getattr(event, "input"), flush=True)
153
- elif event_type == "AgentOutput":
154
- # Output content
155
- resp = getattr(event, "response", None)
156
- if (
157
- resp is not None
158
- and hasattr(resp, "content")
159
- and getattr(resp, "content")
160
- ):
161
- print("📤 Output:", getattr(resp, "content"), flush=True)
162
- # Planned tool calls
163
- tcalls = getattr(event, "tool_calls", None)
164
- if isinstance(tcalls, list) and tcalls:
165
- names = []
166
- for c in tcalls:
167
- try:
168
- nm = getattr(c, "tool_name", None) or (
169
- c.get("tool_name") if isinstance(c, dict) else None
170
- )
171
- if nm:
172
- names.append(str(nm))
173
- except Exception:
174
- pass
175
- if names:
176
- print("🛠️ Planning to use tools:", names, flush=True)
177
- elif event_type == "ToolCallResult":
178
- tname = getattr(event, "tool_name", None)
179
- tkwargs = getattr(event, "tool_kwargs", None)
180
- tout = getattr(event, "tool_output", None)
181
- print(f"🔧 Tool Result ({tname}):", flush=True)
182
- print(f" Arguments: {tkwargs}", flush=True)
183
- print(f" Output: {tout}", flush=True)
184
- elif event_type == "ToolCall":
185
- tname = getattr(event, "tool_name", None)
186
- tkwargs = getattr(event, "tool_kwargs", None)
187
- print(f"🔨 Calling Tool: {tname}", flush=True)
188
- print(f" With arguments: {tkwargs}", flush=True)
189
- except Exception:
190
- # Ignore best-effort debug rendering errors
191
- pass
192
-
193
- # After streaming completes, build final interactions and finish chunk
194
- # Extract state from workflow context (supports sync/async get or attribute)
195
- state = None
196
- ctx = getattr(handler, "ctx", None)
197
- try:
198
- if ctx is not None:
199
- get = getattr(ctx, "get", None)
200
- if callable(get):
201
- result = get("state")
202
- state = await result if inspect.isawaitable(result) else result
203
- elif hasattr(ctx, "state"):
204
- state = getattr(ctx, "state")
205
- except (AttributeError, TypeError):
206
- state = None
207
-
208
- # Run subclass-defined response extraction (not streamed) for completeness
209
- _ = self.extract_response_text(state, events)
210
-
211
- pipeline_interactions = create_pipeline_interactions_from_events(events)
212
- # Final empty chunk indicates end of stream, carrying interactions and usage
213
- yield "", pipeline_interactions, usage_metrics
214
-
215
- return _gen()
216
-
217
- # Non-streaming path: run to completion, emit debug prints, then return final response
218
- events: list[Any] = []
219
- current_agent_name: str | None = None
220
- async for event in handler.stream_events():
221
- events.append(event)
222
-
223
- # Replicate prior template CLI prints for non-streaming mode
224
- try:
225
- if hasattr(event, "current_agent_name"):
226
- new_agent = getattr(event, "current_agent_name")
227
- if isinstance(new_agent, str) and new_agent and new_agent != current_agent_name:
228
- current_agent_name = new_agent
229
- print(f"\n{'=' * 50}", flush=True)
230
- print(f"🤖 Agent: {current_agent_name}", flush=True)
231
- print(f"{'=' * 50}\n", flush=True)
232
- except Exception:
233
- pass
234
-
235
- try:
236
- if hasattr(event, "delta") and isinstance(getattr(event, "delta"), str):
237
- print(getattr(event, "delta"), end="", flush=True)
238
- elif hasattr(event, "text") and isinstance(getattr(event, "text"), str):
239
- print(getattr(event, "text"), end="", flush=True)
240
- else:
241
- event_type = type(event).__name__
242
- if event_type == "AgentInput" and hasattr(event, "input"):
243
- print("📥 Input:", getattr(event, "input"), flush=True)
244
- elif event_type == "AgentOutput":
245
- resp = getattr(event, "response", None)
246
- if (
247
- resp is not None
248
- and hasattr(resp, "content")
249
- and getattr(resp, "content")
250
- ):
251
- print("📤 Output:", getattr(resp, "content"), flush=True)
252
- tcalls = getattr(event, "tool_calls", None)
253
- if isinstance(tcalls, list) and tcalls:
254
- names: list[str] = []
255
- for c in tcalls:
256
- try:
257
- nm = getattr(c, "tool_name", None) or (
258
- c.get("tool_name") if isinstance(c, dict) else None
259
- )
260
- if nm:
261
- names.append(str(nm))
262
- except Exception:
263
- pass
264
- if names:
265
- print("🛠️ Planning to use tools:", names, flush=True)
266
- elif event_type == "ToolCallResult":
267
- tname = getattr(event, "tool_name", None)
268
- tkwargs = getattr(event, "tool_kwargs", None)
269
- tout = getattr(event, "tool_output", None)
270
- print(f"🔧 Tool Result ({tname}):", flush=True)
271
- print(f" Arguments: {tkwargs}", flush=True)
272
- print(f" Output: {tout}", flush=True)
273
- elif event_type == "ToolCall":
274
- tname = getattr(event, "tool_name", None)
275
- tkwargs = getattr(event, "tool_kwargs", None)
276
- print(f"🔨 Calling Tool: {tname}", flush=True)
277
- print(f" With arguments: {tkwargs}", flush=True)
278
- except Exception:
279
- # Best-effort debug printing; continue on errors
280
- pass
281
-
282
- # Extract state from workflow context (supports sync/async get or attribute)
283
- state = None
284
- ctx = getattr(handler, "ctx", None)
285
- try:
286
- if ctx is not None:
287
- get = getattr(ctx, "get", None)
288
- if callable(get):
289
- result = get("state")
290
- state = await result if inspect.isawaitable(result) else result
291
- elif hasattr(ctx, "state"):
292
- state = getattr(ctx, "state")
293
- except (AttributeError, TypeError):
294
- state = None
295
- response_text = self.extract_response_text(state, events)
296
-
297
- pipeline_interactions = create_pipeline_interactions_from_events(events)
298
-
299
- return response_text, pipeline_interactions, usage_metrics