rakam-systems-agent 0.1.1rc7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rakam_systems_agent-0.1.1rc7/.gitignore +46 -0
- rakam_systems_agent-0.1.1rc7/.python-version +1 -0
- rakam_systems_agent-0.1.1rc7/PKG-INFO +367 -0
- rakam_systems_agent-0.1.1rc7/README.md +333 -0
- rakam_systems_agent-0.1.1rc7/main.py +6 -0
- rakam_systems_agent-0.1.1rc7/pyproject.toml +64 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/__init__.py +35 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/__init__.py +26 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/base_agent.py +358 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/chat_history/__init__.py +10 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/chat_history/json_chat_history.py +372 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/chat_history/postgres_chat_history.py +668 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/chat_history/sql_chat_history.py +446 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/llm_gateway/README.md +505 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/llm_gateway/__init__.py +16 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/llm_gateway/gateway_factory.py +313 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/llm_gateway/mistral_gateway.py +287 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/llm_gateway/openai_gateway.py +295 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/tools/LLM_GATEWAY_TOOLS_README.md +533 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/tools/__init__.py +46 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/tools/example_tools.py +431 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/tools/llm_gateway_tools.py +605 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/components/tools/search_tool.py +14 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/server/README.md +375 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/server/__init__.py +12 -0
- rakam_systems_agent-0.1.1rc7/src/rakam_systems_agent/server/mcp_server_agent.py +127 -0
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# Python specific
|
|
2
|
+
*.pyc
|
|
3
|
+
*.pyo
|
|
4
|
+
*.pyd
|
|
5
|
+
__pycache__/
|
|
6
|
+
.pytest_cache/
|
|
7
|
+
*.so
|
|
8
|
+
|
|
9
|
+
# Environments
|
|
10
|
+
.env
|
|
11
|
+
*.env
|
|
12
|
+
*.venv*
|
|
13
|
+
venv/
|
|
14
|
+
*venv/
|
|
15
|
+
ENV/
|
|
16
|
+
env/
|
|
17
|
+
env.bak/
|
|
18
|
+
venv.bak/
|
|
19
|
+
|
|
20
|
+
# VS Code
|
|
21
|
+
.vscode/
|
|
22
|
+
.vscode/*
|
|
23
|
+
|
|
24
|
+
# PyCharm
|
|
25
|
+
.idea/
|
|
26
|
+
.idea/*
|
|
27
|
+
|
|
28
|
+
# OS specific
|
|
29
|
+
.DS_Store
|
|
30
|
+
Thumbs.db
|
|
31
|
+
|
|
32
|
+
#data
|
|
33
|
+
data/
|
|
34
|
+
dist/
|
|
35
|
+
logs/
|
|
36
|
+
|
|
37
|
+
# Build artifacts
|
|
38
|
+
*.egg-info/
|
|
39
|
+
|
|
40
|
+
# tracking data
|
|
41
|
+
agent_tracking/
|
|
42
|
+
|
|
43
|
+
# docs
|
|
44
|
+
docs/
|
|
45
|
+
|
|
46
|
+
temp_path/
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
3.11
|
|
@@ -0,0 +1,367 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: rakam-systems-agent
|
|
3
|
+
Version: 0.1.1rc7
|
|
4
|
+
Summary: AI Agents framework with Pydantic AI support and LLM Gateway integration
|
|
5
|
+
Project-URL: Homepage, https://github.com/Rakam-AI/rakam_systems-inhouse
|
|
6
|
+
Project-URL: Documentation, https://github.com/Rakam-AI/rakam_systems-inhouse
|
|
7
|
+
Project-URL: Repository, https://github.com/Rakam-AI/rakam_systems-inhouse
|
|
8
|
+
Author-email: Mohamed Hilel <mohammedjassemhlel@gmail.com>, Peng Zheng <pengzheng990630@outlook.com>
|
|
9
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Classifier: Programming Language :: Python :: 3
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Requires-Python: >=3.10
|
|
16
|
+
Requires-Dist: psycopg2-binary
|
|
17
|
+
Requires-Dist: pydantic-ai<2.0.0,>=1.11.0
|
|
18
|
+
Requires-Dist: pydantic>=2.11.5
|
|
19
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
20
|
+
Requires-Dist: pyyaml>=6.0
|
|
21
|
+
Requires-Dist: rakam-system-core
|
|
22
|
+
Provides-Extra: all
|
|
23
|
+
Requires-Dist: rakam-system-agent[llm-providers]; extra == 'all'
|
|
24
|
+
Provides-Extra: dev
|
|
25
|
+
Requires-Dist: black>=23.0.0; extra == 'dev'
|
|
26
|
+
Requires-Dist: pytest-asyncio>=0.21.0; extra == 'dev'
|
|
27
|
+
Requires-Dist: pytest>=7.0.0; extra == 'dev'
|
|
28
|
+
Requires-Dist: ruff>=0.1.0; extra == 'dev'
|
|
29
|
+
Provides-Extra: llm-providers
|
|
30
|
+
Requires-Dist: mistralai<2.0.0,>=1.9.0; extra == 'llm-providers'
|
|
31
|
+
Requires-Dist: openai<3.0.0,>=1.37.0; extra == 'llm-providers'
|
|
32
|
+
Requires-Dist: tiktoken; extra == 'llm-providers'
|
|
33
|
+
Description-Content-Type: text/markdown
|
|
34
|
+
|
|
35
|
+
# Rakam System Agent
|
|
36
|
+
|
|
37
|
+
The agent package of Rakam Systems providing AI agent implementations powered by Pydantic AI.
|
|
38
|
+
|
|
39
|
+
## Overview
|
|
40
|
+
|
|
41
|
+
`rakam-system-agent` provides flexible AI agents with tool integration, chat history, and LLM gateway abstractions. This package depends on `rakam-system-core`.
|
|
42
|
+
|
|
43
|
+
## Features
|
|
44
|
+
|
|
45
|
+
- **Configuration-First Design**: Change agents without code changes - just update YAML files
|
|
46
|
+
- **Async/Sync Support**: Full support for both synchronous and asynchronous agent operations
|
|
47
|
+
- **Tool Integration**: Easy tool definition and integration using the `Tool.from_schema` pattern
|
|
48
|
+
- **Model Settings**: Control model behavior including parallel tool calls, temperature, and max tokens
|
|
49
|
+
- **Pydantic AI Powered**: Built on top of Pydantic AI library
|
|
50
|
+
- **Streaming Support**: Both sync and async streaming interfaces
|
|
51
|
+
- **Chat History**: Multiple backends (JSON, SQLite, PostgreSQL)
|
|
52
|
+
- **LLM Gateway**: Unified interface for OpenAI and Mistral AI
|
|
53
|
+
|
|
54
|
+
### 🎯 Configuration Convenience
|
|
55
|
+
|
|
56
|
+
The agent package supports comprehensive YAML configuration, allowing you to:
|
|
57
|
+
|
|
58
|
+
- **Switch LLM models** without changing code (GPT-4 → GPT-4o-mini → Claude)
|
|
59
|
+
- **Tune parameters** instantly (temperature, max tokens, parallel tools)
|
|
60
|
+
- **Modify prompts** without redeployment
|
|
61
|
+
- **Add/remove tools** by editing config files
|
|
62
|
+
- **Enable/disable tracking** via configuration
|
|
63
|
+
- **Use different configs** for different environments
|
|
64
|
+
|
|
65
|
+
**Example**: Switch from GPT-4o to GPT-4o-mini by changing one line in your YAML - no code changes, no redeployment needed!
|
|
66
|
+
|
|
67
|
+
## Installation
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
# Requires core package
|
|
71
|
+
pip install -e ./rakam-system-core
|
|
72
|
+
|
|
73
|
+
# Install agent package
|
|
74
|
+
pip install -e ./rakam-system-agent
|
|
75
|
+
```
|
|
76
|
+
|
|
77
|
+
## Quick Start
|
|
78
|
+
|
|
79
|
+
### Using BaseAgent (Pydantic AI-powered)
|
|
80
|
+
|
|
81
|
+
```python
|
|
82
|
+
import asyncio
|
|
83
|
+
from rakam_system_agent import BaseAgent
|
|
84
|
+
from rakam_systems_core.ai_core.interfaces import ModelSettings
|
|
85
|
+
from rakam_systems_core.ai_core.interfaces.tool import ToolComponent as Tool
|
|
86
|
+
|
|
87
|
+
# Define a tool function
|
|
88
|
+
async def get_weather(city: str) -> dict:
|
|
89
|
+
"""Get weather information for a city"""
|
|
90
|
+
# Your implementation here
|
|
91
|
+
return {"city": city, "temperature": 72, "condition": "sunny"}
|
|
92
|
+
|
|
93
|
+
# Create an agent with tools
|
|
94
|
+
agent = BaseAgent(
|
|
95
|
+
name="weather_agent",
|
|
96
|
+
model="openai:gpt-4o",
|
|
97
|
+
system_prompt="You are a helpful weather assistant.",
|
|
98
|
+
tools=[
|
|
99
|
+
Tool.from_schema(
|
|
100
|
+
function=get_weather,
|
|
101
|
+
name='get_weather',
|
|
102
|
+
description='Get weather information for a city',
|
|
103
|
+
json_schema={
|
|
104
|
+
'type': 'object',
|
|
105
|
+
'properties': {
|
|
106
|
+
'city': {'type': 'string', 'description': 'The city name'},
|
|
107
|
+
},
|
|
108
|
+
'required': ['city'],
|
|
109
|
+
'additionalProperties': False,
|
|
110
|
+
},
|
|
111
|
+
takes_ctx=False,
|
|
112
|
+
),
|
|
113
|
+
],
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Run the agent
|
|
117
|
+
async def main():
|
|
118
|
+
result = await agent.arun(
|
|
119
|
+
"What's the weather in San Francisco?",
|
|
120
|
+
model_settings=ModelSettings(parallel_tool_calls=True),
|
|
121
|
+
)
|
|
122
|
+
print(result.output_text)
|
|
123
|
+
|
|
124
|
+
asyncio.run(main())
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
## Core Components
|
|
128
|
+
|
|
129
|
+
### AgentComponent
|
|
130
|
+
|
|
131
|
+
The base abstract class for all agents. Provides:
|
|
132
|
+
|
|
133
|
+
- `run()` / `arun()`: Execute the agent synchronously or asynchronously
|
|
134
|
+
- `stream()` / `astream()`: Stream responses
|
|
135
|
+
- Support for tools, model settings, and dependencies
|
|
136
|
+
|
|
137
|
+
### BaseAgent
|
|
138
|
+
|
|
139
|
+
The core agent implementation powered by Pydantic AI. This is the primary agent class in our system. Features:
|
|
140
|
+
|
|
141
|
+
- Direct integration with Pydantic AI's Agent
|
|
142
|
+
- Full support for parallel tool calls
|
|
143
|
+
- Automatic conversion between our interfaces and Pydantic AI's
|
|
144
|
+
- Support for both traditional tool lists and ToolRegistry/ToolInvoker system
|
|
145
|
+
|
|
146
|
+
### Tool
|
|
147
|
+
|
|
148
|
+
Wrapper for tool functions compatible with Pydantic AI's `Tool.from_schema` pattern:
|
|
149
|
+
|
|
150
|
+
```python
|
|
151
|
+
Tool.from_schema(
|
|
152
|
+
function=my_function,
|
|
153
|
+
name='my_function',
|
|
154
|
+
description='What this function does',
|
|
155
|
+
json_schema={
|
|
156
|
+
'type': 'object',
|
|
157
|
+
'properties': {...},
|
|
158
|
+
'required': [...],
|
|
159
|
+
},
|
|
160
|
+
takes_ctx=False,
|
|
161
|
+
)
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
### ModelSettings
|
|
165
|
+
|
|
166
|
+
Configure model behavior:
|
|
167
|
+
|
|
168
|
+
```python
|
|
169
|
+
ModelSettings(
|
|
170
|
+
parallel_tool_calls=True, # Enable parallel tool execution
|
|
171
|
+
temperature=0.7, # Control randomness
|
|
172
|
+
max_tokens=1000, # Limit response length
|
|
173
|
+
)
|
|
174
|
+
```
|
|
175
|
+
|
|
176
|
+
## Advanced Usage
|
|
177
|
+
|
|
178
|
+
### Parallel vs Sequential Tool Calls
|
|
179
|
+
|
|
180
|
+
Control whether tools are called in parallel or sequentially:
|
|
181
|
+
|
|
182
|
+
```python
|
|
183
|
+
# Parallel (faster for independent tools)
|
|
184
|
+
result = await agent.arun(
|
|
185
|
+
"Get weather for NYC and LA",
|
|
186
|
+
model_settings=ModelSettings(parallel_tool_calls=True),
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
# Sequential (for dependent operations)
|
|
190
|
+
result = await agent.arun(
|
|
191
|
+
"Get weather for NYC and LA",
|
|
192
|
+
model_settings=ModelSettings(parallel_tool_calls=False),
|
|
193
|
+
)
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
### Using Dependencies
|
|
197
|
+
|
|
198
|
+
Pass context/dependencies to your agent:
|
|
199
|
+
|
|
200
|
+
```python
|
|
201
|
+
class Deps:
|
|
202
|
+
def __init__(self, user_id: str):
|
|
203
|
+
self.user_id = user_id
|
|
204
|
+
|
|
205
|
+
agent = BaseAgent(
|
|
206
|
+
deps_type=Deps,
|
|
207
|
+
# ...
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
result = await agent.arun(
|
|
211
|
+
"Process this",
|
|
212
|
+
deps=Deps(user_id="123"),
|
|
213
|
+
)
|
|
214
|
+
```
|
|
215
|
+
|
|
216
|
+
### Streaming Responses
|
|
217
|
+
|
|
218
|
+
```python
|
|
219
|
+
async for chunk in agent.astream("Tell me a story"):
|
|
220
|
+
print(chunk, end='', flush=True)
|
|
221
|
+
```
|
|
222
|
+
|
|
223
|
+
## API Reference
|
|
224
|
+
|
|
225
|
+
### AgentComponent
|
|
226
|
+
|
|
227
|
+
```python
|
|
228
|
+
class AgentComponent(BaseComponent):
|
|
229
|
+
def __init__(
|
|
230
|
+
self,
|
|
231
|
+
name: str,
|
|
232
|
+
config: Optional[Dict[str, Any]] = None,
|
|
233
|
+
model: Optional[str] = None,
|
|
234
|
+
deps_type: Optional[Type[Any]] = None,
|
|
235
|
+
system_prompt: Optional[str] = None,
|
|
236
|
+
tools: Optional[List[Any]] = None,
|
|
237
|
+
)
|
|
238
|
+
|
|
239
|
+
def run(
|
|
240
|
+
self,
|
|
241
|
+
input_data: Union[str, AgentInput],
|
|
242
|
+
deps: Optional[Any] = None,
|
|
243
|
+
model_settings: Optional[ModelSettings] = None
|
|
244
|
+
) -> AgentOutput
|
|
245
|
+
|
|
246
|
+
async def arun(
|
|
247
|
+
self,
|
|
248
|
+
input_data: Union[str, AgentInput],
|
|
249
|
+
deps: Optional[Any] = None,
|
|
250
|
+
model_settings: Optional[ModelSettings] = None
|
|
251
|
+
) -> AgentOutput
|
|
252
|
+
```
|
|
253
|
+
|
|
254
|
+
### Tool
|
|
255
|
+
|
|
256
|
+
```python
|
|
257
|
+
class Tool:
|
|
258
|
+
@classmethod
|
|
259
|
+
def from_schema(
|
|
260
|
+
cls,
|
|
261
|
+
function: Callable[..., Any],
|
|
262
|
+
name: str,
|
|
263
|
+
description: str,
|
|
264
|
+
json_schema: Dict[str, Any],
|
|
265
|
+
takes_ctx: bool = False,
|
|
266
|
+
) -> "Tool"
|
|
267
|
+
```
|
|
268
|
+
|
|
269
|
+
### ModelSettings
|
|
270
|
+
|
|
271
|
+
```python
|
|
272
|
+
class ModelSettings:
|
|
273
|
+
def __init__(
|
|
274
|
+
self,
|
|
275
|
+
parallel_tool_calls: bool = True,
|
|
276
|
+
temperature: Optional[float] = None,
|
|
277
|
+
max_tokens: Optional[int] = None,
|
|
278
|
+
**kwargs: Any
|
|
279
|
+
)
|
|
280
|
+
```
|
|
281
|
+
|
|
282
|
+
## Examples
|
|
283
|
+
|
|
284
|
+
See the `examples/ai_agents_examples/` directory in the main repository for complete examples demonstrating:
|
|
285
|
+
|
|
286
|
+
- Multiple tool definitions
|
|
287
|
+
- Parallel vs sequential tool calls
|
|
288
|
+
- Performance comparisons
|
|
289
|
+
- Complex multi-tool workflows
|
|
290
|
+
- Chat history integration
|
|
291
|
+
- RAG systems
|
|
292
|
+
|
|
293
|
+
## Package Structure
|
|
294
|
+
|
|
295
|
+
```
|
|
296
|
+
rakam-system-agent/
|
|
297
|
+
├── src/rakam_system_agent/
|
|
298
|
+
│ ├── components/
|
|
299
|
+
│ │ ├── base_agent.py # BaseAgent (Pydantic AI-powered)
|
|
300
|
+
│ │ ├── llm_gateway/ # LLM provider gateways
|
|
301
|
+
│ │ ├── chat_history/ # Chat history backends
|
|
302
|
+
│ │ ├── tools/ # Built-in tools
|
|
303
|
+
│ │ └── __init__.py # Exports
|
|
304
|
+
│ └── server/ # MCP server
|
|
305
|
+
└── pyproject.toml
|
|
306
|
+
```
|
|
307
|
+
|
|
308
|
+
## Best Practices
|
|
309
|
+
|
|
310
|
+
1. **Use async when possible**: Async operations are more efficient, especially with tools
|
|
311
|
+
2. **Enable parallel tool calls**: For independent operations, parallel execution is much faster
|
|
312
|
+
3. **Provide clear tool descriptions**: Better descriptions help the LLM use tools correctly
|
|
313
|
+
4. **Use type hints**: JSON schemas should match your function signatures
|
|
314
|
+
5. **Handle errors gracefully**: Tools should catch and return meaningful errors
|
|
315
|
+
|
|
316
|
+
## Migration Guide
|
|
317
|
+
|
|
318
|
+
### Using BaseAgent
|
|
319
|
+
|
|
320
|
+
`BaseAgent` is the core Pydantic AI-powered agent implementation in this framework.
|
|
321
|
+
|
|
322
|
+
```python
|
|
323
|
+
from rakam_system_agent import BaseAgent
|
|
324
|
+
|
|
325
|
+
agent = BaseAgent(
|
|
326
|
+
name="agent",
|
|
327
|
+
model="openai:gpt-4o",
|
|
328
|
+
system_prompt="You are a helpful assistant."
|
|
329
|
+
)
|
|
330
|
+
```
|
|
331
|
+
|
|
332
|
+
## Troubleshooting
|
|
333
|
+
|
|
334
|
+
### ImportError: pydantic_ai not installed
|
|
335
|
+
|
|
336
|
+
Install Pydantic AI:
|
|
337
|
+
|
|
338
|
+
```bash
|
|
339
|
+
pip install pydantic_ai
|
|
340
|
+
```
|
|
341
|
+
|
|
342
|
+
### Tool not being called
|
|
343
|
+
|
|
344
|
+
Check:
|
|
345
|
+
|
|
346
|
+
1. Tool description is clear and relevant
|
|
347
|
+
2. JSON schema matches function signature
|
|
348
|
+
3. System prompt doesn't contradict tool usage
|
|
349
|
+
|
|
350
|
+
### Performance issues
|
|
351
|
+
|
|
352
|
+
- Enable `parallel_tool_calls=True` for independent operations
|
|
353
|
+
- Use async functions for I/O-bound operations
|
|
354
|
+
- Consider caching tool results when appropriate
|
|
355
|
+
|
|
356
|
+
## Contributing
|
|
357
|
+
|
|
358
|
+
When adding new agent types:
|
|
359
|
+
|
|
360
|
+
1. Inherit from `BaseAgent`
|
|
361
|
+
2. Implement `ainfer()` for async or `infer()` for sync
|
|
362
|
+
3. Add tests in `tests/`
|
|
363
|
+
4. Update this README with examples
|
|
364
|
+
|
|
365
|
+
## License
|
|
366
|
+
|
|
367
|
+
See main project LICENSE file.
|