rakam-systems-agent 0.1.1rc7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,35 @@
1
+ """
2
+ AI Agents Module
3
+
4
+ Provides flexible agent implementations with support for:
5
+ - Async/sync operations
6
+ - Tool integration
7
+ - Pydantic AI compatibility
8
+ - Streaming responses
9
+ - Multi-provider LLM gateway
10
+ """
11
+
12
+ from .components import (
13
+ BaseAgent,
14
+ LLMGateway,
15
+ LLMRequest,
16
+ LLMResponse,
17
+ OpenAIGateway,
18
+ MistralGateway,
19
+ LLMGatewayFactory,
20
+ get_llm_gateway,
21
+ )
22
+
23
+ # __version__ = "0.1.0"
24
+
25
+ __all__ = [
26
+ "BaseAgent",
27
+ "LLMGateway",
28
+ "LLMRequest",
29
+ "LLMResponse",
30
+ "OpenAIGateway",
31
+ "MistralGateway",
32
+ "LLMGatewayFactory",
33
+ "get_llm_gateway",
34
+ ]
35
+
@@ -0,0 +1,26 @@
1
+ from .base_agent import BaseAgent
2
+ from .chat_history import JSONChatHistory, SQLChatHistory, PostgresChatHistory
3
+ from .llm_gateway import (
4
+ LLMGateway,
5
+ LLMRequest,
6
+ LLMResponse,
7
+ OpenAIGateway,
8
+ MistralGateway,
9
+ LLMGatewayFactory,
10
+ get_llm_gateway,
11
+ )
12
+
13
+ __all__ = [
14
+ "BaseAgent",
15
+ "JSONChatHistory",
16
+ "SQLChatHistory",
17
+ "PostgresChatHistory",
18
+ "LLMGateway",
19
+ "LLMRequest",
20
+ "LLMResponse",
21
+ "OpenAIGateway",
22
+ "MistralGateway",
23
+ "LLMGatewayFactory",
24
+ "get_llm_gateway",
25
+ ]
26
+
@@ -0,0 +1,358 @@
1
+ from __future__ import annotations
2
+ from typing import Any, AsyncIterator, Callable, Iterator, List, Optional, Type, Union
3
+ from rakam_systems_core.ai_core.interfaces.agent import AgentComponent, AgentInput, AgentOutput, ModelSettings
4
+ from rakam_systems_core.ai_core.interfaces.tool import ToolComponent
5
+ from rakam_systems_core.ai_core.tracking import track_method, TrackingMixin
6
+
7
+ try:
8
+ from rakam_systems_core.ai_core.interfaces.tool_registry import ToolRegistry, ToolMode
9
+ from rakam_systems_core.ai_core.interfaces.tool_invoker import ToolInvoker
10
+ TOOL_SYSTEM_AVAILABLE = True
11
+ except ImportError:
12
+ ToolRegistry = None # type: ignore
13
+ ToolMode = None # type: ignore
14
+ ToolInvoker = None # type: ignore
15
+ TOOL_SYSTEM_AVAILABLE = False
16
+
17
+ try:
18
+ from pydantic_ai import Agent as PydanticAgent
19
+ from pydantic_ai import Tool as PydanticTool
20
+ from pydantic_ai.settings import ModelSettings as PydanticModelSettings
21
+ PYDANTIC_AI_AVAILABLE = True
22
+ except ImportError:
23
+ PYDANTIC_AI_AVAILABLE = False
24
+ PydanticAgent = None # type: ignore
25
+ PydanticTool = None # type: ignore
26
+ PydanticModelSettings = None # type: ignore
27
+
28
+
29
+ # Type alias for dynamic system prompt functions
30
+ # Can be: () -> str, (ctx) -> str, async () -> str, or async (ctx) -> str
31
+ DynamicSystemPromptFunc = Callable[..., Union[str, Any]]
32
+
33
+
34
+ class BaseAgent(TrackingMixin, AgentComponent):
35
+ """Base agent implementation using Pydantic AI.
36
+
37
+ This is the core agent implementation in our system, powered by Pydantic AI.
38
+ It supports both traditional tool lists and the new ToolRegistry/ToolInvoker system.
39
+ When using a ToolRegistry, tools will be automatically loaded from the registry.
40
+
41
+ Features:
42
+ - Configuration-based initialization
43
+ - Input/output tracking
44
+ - Tool registry integration
45
+ - Streaming support
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ name: str = "base_agent",
51
+ config: Optional[dict] = None,
52
+ model: Optional[str] = None,
53
+ deps_type: Optional[Type[Any]] = None,
54
+ output_type: Optional[Type[Any]] = None,
55
+ system_prompt: Optional[str] = None,
56
+ tools: Optional[List[ToolComponent]] = None,
57
+ tool_registry: Optional[Any] = None, # ToolRegistry
58
+ tool_invoker: Optional[Any] = None, # ToolInvoker
59
+ enable_tracking: bool = False,
60
+ tracking_output_dir: str = "./agent_tracking",
61
+ ) -> None:
62
+ if not PYDANTIC_AI_AVAILABLE:
63
+ raise ImportError(
64
+ "pydantic_ai is not installed. Please install it with: pip install pydantic_ai"
65
+ )
66
+
67
+ # Call super().__init__() which will handle both mixins properly
68
+ super().__init__(
69
+ name=name,
70
+ config=config,
71
+ model=model,
72
+ deps_type=deps_type,
73
+ output_type=output_type,
74
+ system_prompt=system_prompt,
75
+ tools=tools,
76
+ enable_tracking=enable_tracking,
77
+ tracking_output_dir=tracking_output_dir,
78
+ )
79
+
80
+ # Optional new tool system support
81
+ self.tool_registry = tool_registry
82
+ self.tool_invoker = tool_invoker
83
+
84
+ # If registry is provided but no invoker, create one
85
+ if tool_registry is not None and tool_invoker is None and TOOL_SYSTEM_AVAILABLE:
86
+ self.tool_invoker = ToolInvoker(tool_registry)
87
+
88
+ # Get tools from registry if provided, otherwise use tools list
89
+ tools_to_use = self._get_tools_for_agent(tools, tool_registry)
90
+
91
+ # Build kwargs for PydanticAgent, only including output_type if specified
92
+ # (pydantic-ai defaults to str when not provided, but None causes issues)
93
+ agent_kwargs = {
94
+ "model": self.model,
95
+ "deps_type": self.deps_type,
96
+ "system_prompt": self.system_prompt,
97
+ "tools": self._convert_tools_to_pydantic(tools_to_use),
98
+ }
99
+ if self.output_type is not None:
100
+ agent_kwargs["output_type"] = self.output_type
101
+
102
+ # Initialize Pydantic AI agent
103
+ self._pydantic_agent = PydanticAgent(**agent_kwargs)
104
+
105
+ # Store registered dynamic system prompt functions
106
+ self._dynamic_system_prompts: List[DynamicSystemPromptFunc] = []
107
+
108
+ def dynamic_system_prompt(
109
+ self,
110
+ func: Optional[DynamicSystemPromptFunc] = None
111
+ ) -> Union[DynamicSystemPromptFunc, Callable[[DynamicSystemPromptFunc], DynamicSystemPromptFunc]]:
112
+ """Register a dynamic system prompt function.
113
+
114
+ This method can be used as a decorator to register functions that dynamically
115
+ generate parts of the system prompt. The registered functions are passed directly
116
+ to the underlying Pydantic AI agent.
117
+
118
+ The decorated function can take an optional RunContext parameter and should return
119
+ a string. It can be sync or async.
120
+
121
+ Usage:
122
+ ```python
123
+ from datetime import date
124
+ from pydantic_ai import RunContext
125
+
126
+ agent = BaseAgent(
127
+ name="my_agent",
128
+ model="openai:gpt-4o",
129
+ deps_type=str,
130
+ system_prompt="Base system prompt."
131
+ )
132
+
133
+ @agent.dynamic_system_prompt
134
+ def add_user_name(ctx: RunContext[str]) -> str:
135
+ return f"The user's name is {ctx.deps}."
136
+
137
+ @agent.dynamic_system_prompt
138
+ def add_date() -> str:
139
+ return f"The date is {date.today()}."
140
+
141
+ # Now run the agent with deps
142
+ result = await agent.arun("What is the date?", deps="Frank")
143
+ ```
144
+
145
+ Args:
146
+ func: The function to register. If None, returns a decorator.
147
+
148
+ Returns:
149
+ The registered function (unchanged), or a decorator if func is None.
150
+ """
151
+ def decorator(f: DynamicSystemPromptFunc) -> DynamicSystemPromptFunc:
152
+ # Register with the underlying Pydantic AI agent
153
+ self._pydantic_agent.system_prompt(f)
154
+ # Also keep track of it locally
155
+ self._dynamic_system_prompts.append(f)
156
+ return f
157
+
158
+ if func is not None:
159
+ # Used as @agent.dynamic_system_prompt without parentheses
160
+ return decorator(func)
161
+ else:
162
+ # Used as @agent.dynamic_system_prompt() with parentheses
163
+ return decorator
164
+
165
+ def add_dynamic_system_prompt(self, func: DynamicSystemPromptFunc) -> DynamicSystemPromptFunc:
166
+ """Add a dynamic system prompt function (non-decorator version).
167
+
168
+ This is a convenience method for adding dynamic system prompts without
169
+ using the decorator syntax.
170
+
171
+ Usage:
172
+ ```python
173
+ def get_user_context(ctx: RunContext[str]) -> str:
174
+ return f"User context: {ctx.deps}"
175
+
176
+ agent.add_dynamic_system_prompt(get_user_context)
177
+ ```
178
+
179
+ Args:
180
+ func: The function to register.
181
+
182
+ Returns:
183
+ The registered function (unchanged).
184
+ """
185
+ return self.dynamic_system_prompt(func)
186
+
187
+ def _get_tools_for_agent(
188
+ self,
189
+ tools: Optional[List[ToolComponent]],
190
+ tool_registry: Optional[Any]
191
+ ) -> List[ToolComponent]:
192
+ """Get tools from registry or use provided tools list."""
193
+ if tools is not None:
194
+ # Use explicitly provided tools
195
+ return tools
196
+
197
+ if tool_registry is not None:
198
+ # Load direct tools from registry (MCP tools can't be used directly with agents)
199
+ try:
200
+ if TOOL_SYSTEM_AVAILABLE and ToolMode is not None:
201
+ direct_tools = tool_registry.get_tools_by_mode(
202
+ ToolMode.DIRECT)
203
+ result_tools = []
204
+ for metadata in direct_tools:
205
+ if metadata.tool_instance is not None:
206
+ result_tools.append(metadata.tool_instance)
207
+ return result_tools
208
+ except (ImportError, AttributeError):
209
+ pass
210
+
211
+ # No tools available
212
+ return []
213
+
214
+ def _convert_tools_to_pydantic(self, tools: List[ToolComponent]) -> List[Any]:
215
+ """Convert ToolComponent format to Pydantic AI Tool format."""
216
+ if not PYDANTIC_AI_AVAILABLE:
217
+ return []
218
+
219
+ pydantic_tools = []
220
+ for tool in tools:
221
+ # ToolComponent now has all the attributes needed for Pydantic AI
222
+ pydantic_tool = PydanticTool.from_schema(
223
+ function=tool.function,
224
+ name=tool.name,
225
+ description=tool.description,
226
+ json_schema=tool.json_schema,
227
+ takes_ctx=tool.takes_ctx,
228
+ )
229
+ pydantic_tools.append(pydantic_tool)
230
+
231
+ return pydantic_tools
232
+
233
+ def _convert_model_settings(self, model_settings: Optional[ModelSettings]) -> Optional[PydanticModelSettings]:
234
+ """Convert our ModelSettings to Pydantic AI ModelSettings."""
235
+ if model_settings is None or not PYDANTIC_AI_AVAILABLE:
236
+ return None
237
+
238
+ kwargs = {}
239
+
240
+ # Only set parallel_tool_calls if agent has tools
241
+ if self.tools:
242
+ kwargs['parallel_tool_calls'] = model_settings.parallel_tool_calls
243
+
244
+ if model_settings.temperature is not None:
245
+ kwargs['temperature'] = model_settings.temperature
246
+
247
+ if model_settings.max_tokens is not None:
248
+ kwargs['max_tokens'] = model_settings.max_tokens
249
+
250
+ kwargs.update(model_settings.extra_settings)
251
+
252
+ return PydanticModelSettings(**kwargs)
253
+
254
+ def _normalize_input(self, input_data: Union[str, AgentInput]) -> AgentInput:
255
+ """Convert string or AgentInput to AgentInput."""
256
+ if isinstance(input_data, str):
257
+ return AgentInput(input_text=input_data)
258
+ return input_data
259
+
260
+ def infer(
261
+ self,
262
+ input_data: AgentInput,
263
+ deps: Optional[Any] = None,
264
+ model_settings: Optional[ModelSettings] = None
265
+ ) -> AgentOutput:
266
+ """Synchronous inference - not supported by Pydantic AI."""
267
+ raise NotImplementedError(
268
+ "BaseAgent only supports async operations. Use ainfer() or arun() instead."
269
+ )
270
+
271
+ @track_method()
272
+ async def ainfer(
273
+ self,
274
+ input_data: AgentInput,
275
+ deps: Optional[Any] = None,
276
+ model_settings: Optional[ModelSettings] = None
277
+ ) -> AgentOutput:
278
+ """Async inference using Pydantic AI."""
279
+ pydantic_settings = self._convert_model_settings(model_settings)
280
+
281
+ # Run the Pydantic AI agent
282
+ result = await self._pydantic_agent.run(
283
+ input_data.input_text,
284
+ deps=deps,
285
+ model_settings=pydantic_settings,
286
+ )
287
+
288
+ # Get the raw output from Pydantic AI
289
+ raw_output = result.output if hasattr(
290
+ result, 'output') else result.data
291
+
292
+ # Convert result to our AgentOutput format
293
+ # If output_type is used, raw_output will be the structured object
294
+ if self.output_type is not None:
295
+ output_text = str(raw_output)
296
+ structured_output = raw_output
297
+ else:
298
+ output_text = raw_output if isinstance(
299
+ raw_output, str) else str(raw_output)
300
+ structured_output = None
301
+
302
+ metadata = {
303
+ 'usage': result.usage() if hasattr(result, 'usage') else None,
304
+ 'messages': result.all_messages() if hasattr(result, 'all_messages') else None,
305
+ }
306
+
307
+ return AgentOutput(output_text=output_text, metadata=metadata, output=structured_output)
308
+
309
+ def run(
310
+ self,
311
+ input_data: Union[str, AgentInput],
312
+ deps: Optional[Any] = None,
313
+ model_settings: Optional[ModelSettings] = None
314
+ ) -> AgentOutput:
315
+ """Synchronous run - not supported by Pydantic AI."""
316
+ raise NotImplementedError(
317
+ "BaseAgent only supports async operations. Use arun() instead."
318
+ )
319
+
320
+ @track_method()
321
+ async def arun(
322
+ self,
323
+ input_data: Union[str, AgentInput],
324
+ deps: Optional[Any] = None,
325
+ model_settings: Optional[ModelSettings] = None
326
+ ) -> AgentOutput:
327
+ normalized_input = self._normalize_input(input_data)
328
+ return await self.ainfer(normalized_input, deps=deps, model_settings=model_settings)
329
+
330
+ def stream(
331
+ self,
332
+ input_data: Union[str, AgentInput],
333
+ deps: Optional[Any] = None,
334
+ model_settings: Optional[ModelSettings] = None
335
+ ) -> Iterator[str]:
336
+ """Synchronous streaming - not supported by Pydantic AI."""
337
+ raise NotImplementedError(
338
+ "BaseAgent only supports async operations. Use astream() instead."
339
+ )
340
+
341
+ async def astream(
342
+ self,
343
+ input_data: Union[str, AgentInput],
344
+ deps: Optional[Any] = None,
345
+ model_settings: Optional[ModelSettings] = None
346
+ ) -> AsyncIterator[str]:
347
+ """Async streaming using Pydantic AI."""
348
+ normalized_input = self._normalize_input(input_data)
349
+ pydantic_settings = self._convert_model_settings(model_settings)
350
+
351
+ # Stream from the Pydantic AI agent
352
+ async with self._pydantic_agent.run_stream(
353
+ normalized_input.input_text,
354
+ deps=deps,
355
+ model_settings=pydantic_settings,
356
+ ) as result:
357
+ async for chunk in result.stream():
358
+ yield chunk
@@ -0,0 +1,10 @@
1
+ """Chat History Components.
2
+
3
+ This module provides implementations for chat history management.
4
+ """
5
+
6
+ from .json_chat_history import JSONChatHistory
7
+ from .sql_chat_history import SQLChatHistory
8
+ from .postgres_chat_history import PostgresChatHistory
9
+
10
+ __all__ = ["JSONChatHistory", "SQLChatHistory", "PostgresChatHistory"]