rakam-systems-agent 0.1.1rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rakam_systems_agent/__init__.py +35 -0
- rakam_systems_agent/components/__init__.py +26 -0
- rakam_systems_agent/components/base_agent.py +358 -0
- rakam_systems_agent/components/chat_history/__init__.py +10 -0
- rakam_systems_agent/components/chat_history/json_chat_history.py +372 -0
- rakam_systems_agent/components/chat_history/postgres_chat_history.py +668 -0
- rakam_systems_agent/components/chat_history/sql_chat_history.py +446 -0
- rakam_systems_agent/components/llm_gateway/README.md +505 -0
- rakam_systems_agent/components/llm_gateway/__init__.py +16 -0
- rakam_systems_agent/components/llm_gateway/gateway_factory.py +313 -0
- rakam_systems_agent/components/llm_gateway/mistral_gateway.py +287 -0
- rakam_systems_agent/components/llm_gateway/openai_gateway.py +295 -0
- rakam_systems_agent/components/tools/LLM_GATEWAY_TOOLS_README.md +533 -0
- rakam_systems_agent/components/tools/__init__.py +46 -0
- rakam_systems_agent/components/tools/example_tools.py +431 -0
- rakam_systems_agent/components/tools/llm_gateway_tools.py +605 -0
- rakam_systems_agent/components/tools/search_tool.py +14 -0
- rakam_systems_agent/server/README.md +375 -0
- rakam_systems_agent/server/__init__.py +12 -0
- rakam_systems_agent/server/mcp_server_agent.py +127 -0
- rakam_systems_agent-0.1.1rc7.dist-info/METADATA +367 -0
- rakam_systems_agent-0.1.1rc7.dist-info/RECORD +23 -0
- rakam_systems_agent-0.1.1rc7.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,505 @@
|
|
|
1
|
+
# LLM Gateway System
|
|
2
|
+
|
|
3
|
+
A centralized, provider-agnostic LLM gateway for managing interactions with multiple LLM providers through a standardized interface.
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
The LLM Gateway system provides:
|
|
8
|
+
|
|
9
|
+
- ✅ **Multi-Provider Support**: OpenAI and Mistral (extensible to others)
|
|
10
|
+
- ✅ **Standardized Interface**: Consistent API across all providers
|
|
11
|
+
- ✅ **Configuration-Driven**: Select models via config files without code changes
|
|
12
|
+
- ✅ **Structured Outputs**: Type-safe responses using Pydantic schemas
|
|
13
|
+
- ✅ **Streaming Support**: Real-time response streaming
|
|
14
|
+
- ✅ **Token Counting**: Built-in token usage tracking
|
|
15
|
+
- ✅ **Factory Pattern**: Easy provider routing and model selection
|
|
16
|
+
|
|
17
|
+
## Architecture
|
|
18
|
+
|
|
19
|
+
```
|
|
20
|
+
┌─────────────────────────────────────────────────────┐
|
|
21
|
+
│ LLMGatewayFactory │
|
|
22
|
+
│ (Provider routing & configuration) │
|
|
23
|
+
└───────────────┬────────────────────┬────────────────┘
|
|
24
|
+
│ │
|
|
25
|
+
┌───────────▼────────┐ ┌───────▼──────────┐
|
|
26
|
+
│ OpenAIGateway │ │ MistralGateway │
|
|
27
|
+
└───────────┬────────┘ └───────┬──────────┘
|
|
28
|
+
│ │
|
|
29
|
+
└────────┬───────────┘
|
|
30
|
+
│
|
|
31
|
+
┌────────▼────────┐
|
|
32
|
+
│ LLMGateway │
|
|
33
|
+
│ (Base Class) │
|
|
34
|
+
└─────────────────┘
|
|
35
|
+
```
|
|
36
|
+
|
|
37
|
+
## Quick Start
|
|
38
|
+
|
|
39
|
+
### Basic Usage
|
|
40
|
+
|
|
41
|
+
```python
|
|
42
|
+
from ai_agents.components.llm_gateway import get_llm_gateway, LLMRequest
|
|
43
|
+
|
|
44
|
+
# Create a gateway using model string
|
|
45
|
+
gateway = get_llm_gateway(model="openai:gpt-4o", temperature=0.7)
|
|
46
|
+
|
|
47
|
+
# Make a request
|
|
48
|
+
request = LLMRequest(
|
|
49
|
+
system_prompt="You are a helpful assistant.",
|
|
50
|
+
user_prompt="What is AI?",
|
|
51
|
+
temperature=0.7,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
response = gateway.generate(request)
|
|
55
|
+
print(response.content)
|
|
56
|
+
print(f"Tokens used: {response.usage}")
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
### Structured Output
|
|
60
|
+
|
|
61
|
+
```python
|
|
62
|
+
from pydantic import BaseModel, Field
|
|
63
|
+
from ai_agents.components.llm_gateway import OpenAIGateway, LLMRequest
|
|
64
|
+
|
|
65
|
+
class Book(BaseModel):
|
|
66
|
+
title: str = Field(description="Book title")
|
|
67
|
+
author: str = Field(description="Author name")
|
|
68
|
+
year: int = Field(description="Publication year")
|
|
69
|
+
|
|
70
|
+
gateway = OpenAIGateway(model="gpt-4o")
|
|
71
|
+
request = LLMRequest(
|
|
72
|
+
system_prompt="You are a librarian.",
|
|
73
|
+
user_prompt="Tell me about '1984' by George Orwell.",
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
book = gateway.generate_structured(request, Book)
|
|
77
|
+
print(f"{book.title} by {book.author} ({book.year})")
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### Streaming Responses
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from ai_agents.components.llm_gateway import get_llm_gateway, LLMRequest
|
|
84
|
+
|
|
85
|
+
gateway = get_llm_gateway(model="openai:gpt-4o")
|
|
86
|
+
request = LLMRequest(
|
|
87
|
+
user_prompt="Write a short story about AI.",
|
|
88
|
+
temperature=0.8,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
for chunk in gateway.stream(request):
|
|
92
|
+
print(chunk, end="", flush=True)
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
## Configuration-Driven Usage
|
|
96
|
+
|
|
97
|
+
### YAML Configuration
|
|
98
|
+
|
|
99
|
+
```yaml
|
|
100
|
+
# config.yaml
|
|
101
|
+
llm_gateways:
|
|
102
|
+
default:
|
|
103
|
+
provider: "openai"
|
|
104
|
+
model: "gpt-4o"
|
|
105
|
+
temperature: 0.7
|
|
106
|
+
max_tokens: 2000
|
|
107
|
+
|
|
108
|
+
creative:
|
|
109
|
+
provider: "openai"
|
|
110
|
+
model: "gpt-4o"
|
|
111
|
+
temperature: 0.9
|
|
112
|
+
max_tokens: 3000
|
|
113
|
+
|
|
114
|
+
analytical:
|
|
115
|
+
provider: "mistral"
|
|
116
|
+
model: "mistral-large-latest"
|
|
117
|
+
temperature: 0.3
|
|
118
|
+
```
|
|
119
|
+
|
|
120
|
+
### Load from Configuration
|
|
121
|
+
|
|
122
|
+
```python
|
|
123
|
+
from ai_agents.components.llm_gateway import LLMGatewayFactory
|
|
124
|
+
|
|
125
|
+
# Load config (pseudo-code)
|
|
126
|
+
config = load_yaml_config("config.yaml")
|
|
127
|
+
|
|
128
|
+
# Create gateways from config
|
|
129
|
+
default_gateway = LLMGatewayFactory.create_gateway_from_config(
|
|
130
|
+
config["llm_gateways"]["default"]
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
creative_gateway = LLMGatewayFactory.create_gateway_from_config(
|
|
134
|
+
config["llm_gateways"]["creative"]
|
|
135
|
+
)
|
|
136
|
+
```
|
|
137
|
+
|
|
138
|
+
## Factory Patterns
|
|
139
|
+
|
|
140
|
+
### Pattern 1: Model String
|
|
141
|
+
|
|
142
|
+
```python
|
|
143
|
+
from ai_agents.components.llm_gateway import LLMGatewayFactory
|
|
144
|
+
|
|
145
|
+
# Provider:model format
|
|
146
|
+
gateway = LLMGatewayFactory.create_gateway("openai:gpt-4o")
|
|
147
|
+
|
|
148
|
+
# Auto-detect provider (defaults to OpenAI)
|
|
149
|
+
gateway = LLMGatewayFactory.create_gateway("gpt-4o")
|
|
150
|
+
|
|
151
|
+
# Mistral
|
|
152
|
+
gateway = LLMGatewayFactory.create_gateway("mistral:mistral-large-latest")
|
|
153
|
+
```
|
|
154
|
+
|
|
155
|
+
### Pattern 2: Configuration Dictionary
|
|
156
|
+
|
|
157
|
+
```python
|
|
158
|
+
config = {
|
|
159
|
+
"provider": "openai",
|
|
160
|
+
"model": "gpt-4o",
|
|
161
|
+
"temperature": 0.7,
|
|
162
|
+
"base_url": "https://api.openai.com/v1", # optional
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
gateway = LLMGatewayFactory.create_gateway_from_config(config)
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
### Pattern 3: Environment Variables
|
|
169
|
+
|
|
170
|
+
```python
|
|
171
|
+
# Set environment variables
|
|
172
|
+
# DEFAULT_LLM_MODEL=openai:gpt-4o
|
|
173
|
+
# DEFAULT_LLM_TEMPERATURE=0.7
|
|
174
|
+
|
|
175
|
+
gateway = LLMGatewayFactory.get_default_gateway()
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
### Pattern 4: Direct Instantiation
|
|
179
|
+
|
|
180
|
+
```python
|
|
181
|
+
from ai_agents.components.llm_gateway import OpenAIGateway, MistralGateway
|
|
182
|
+
|
|
183
|
+
openai_gateway = OpenAIGateway(
|
|
184
|
+
model="gpt-4o",
|
|
185
|
+
default_temperature=0.7,
|
|
186
|
+
api_key="your-key", # or use OPENAI_API_KEY env var
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
mistral_gateway = MistralGateway(
|
|
190
|
+
model="mistral-large-latest",
|
|
191
|
+
default_temperature=0.7,
|
|
192
|
+
api_key="your-key", # or use MISTRAL_API_KEY env var
|
|
193
|
+
)
|
|
194
|
+
```
|
|
195
|
+
|
|
196
|
+
## Provider Details
|
|
197
|
+
|
|
198
|
+
### OpenAI
|
|
199
|
+
|
|
200
|
+
**Supported Models:**
|
|
201
|
+
- `gpt-4o`
|
|
202
|
+
- `gpt-4o-mini`
|
|
203
|
+
- `gpt-4-turbo`
|
|
204
|
+
- `gpt-4`
|
|
205
|
+
- `gpt-3.5-turbo`
|
|
206
|
+
|
|
207
|
+
**Features:**
|
|
208
|
+
- Native structured output support via `response_format`
|
|
209
|
+
- Accurate token counting with `tiktoken`
|
|
210
|
+
- Streaming support
|
|
211
|
+
- Custom base URL support
|
|
212
|
+
|
|
213
|
+
**Configuration:**
|
|
214
|
+
```python
|
|
215
|
+
gateway = OpenAIGateway(
|
|
216
|
+
model="gpt-4o",
|
|
217
|
+
default_temperature=0.7,
|
|
218
|
+
api_key="sk-...", # or OPENAI_API_KEY env var
|
|
219
|
+
base_url="https://api.openai.com/v1", # optional
|
|
220
|
+
organization="org-...", # optional
|
|
221
|
+
)
|
|
222
|
+
```
|
|
223
|
+
|
|
224
|
+
### Mistral
|
|
225
|
+
|
|
226
|
+
**Supported Models:**
|
|
227
|
+
- `mistral-large-latest`
|
|
228
|
+
- `mistral-medium-latest`
|
|
229
|
+
- `mistral-small-latest`
|
|
230
|
+
- `open-mistral-7b`
|
|
231
|
+
- `open-mixtral-8x7b`
|
|
232
|
+
|
|
233
|
+
**Features:**
|
|
234
|
+
- JSON mode for structured outputs
|
|
235
|
+
- Approximate token counting
|
|
236
|
+
- Streaming support
|
|
237
|
+
|
|
238
|
+
**Configuration:**
|
|
239
|
+
```python
|
|
240
|
+
gateway = MistralGateway(
|
|
241
|
+
model="mistral-large-latest",
|
|
242
|
+
default_temperature=0.7,
|
|
243
|
+
api_key="...", # or MISTRAL_API_KEY env var
|
|
244
|
+
)
|
|
245
|
+
```
|
|
246
|
+
|
|
247
|
+
## Standardized Request/Response
|
|
248
|
+
|
|
249
|
+
### LLMRequest
|
|
250
|
+
|
|
251
|
+
```python
|
|
252
|
+
class LLMRequest(BaseModel):
|
|
253
|
+
system_prompt: Optional[str] = None
|
|
254
|
+
user_prompt: str
|
|
255
|
+
temperature: Optional[float] = None
|
|
256
|
+
max_tokens: Optional[int] = None
|
|
257
|
+
response_format: Optional[str] = None
|
|
258
|
+
json_schema: Optional[Type[BaseModel]] = None
|
|
259
|
+
extra_params: Dict[str, Any] = {}
|
|
260
|
+
```
|
|
261
|
+
|
|
262
|
+
### LLMResponse
|
|
263
|
+
|
|
264
|
+
```python
|
|
265
|
+
class LLMResponse(BaseModel):
|
|
266
|
+
content: str
|
|
267
|
+
parsed_content: Optional[Any] = None
|
|
268
|
+
usage: Optional[Dict[str, Any]] = None # token counts
|
|
269
|
+
model: Optional[str] = None
|
|
270
|
+
finish_reason: Optional[str] = None
|
|
271
|
+
metadata: Dict[str, Any] = {}
|
|
272
|
+
```
|
|
273
|
+
|
|
274
|
+
## Advanced Features
|
|
275
|
+
|
|
276
|
+
### Token Counting
|
|
277
|
+
|
|
278
|
+
```python
|
|
279
|
+
gateway = get_llm_gateway(model="openai:gpt-4o")
|
|
280
|
+
|
|
281
|
+
text = "Hello, world!"
|
|
282
|
+
token_count = gateway.count_tokens(text)
|
|
283
|
+
print(f"Tokens: {token_count}")
|
|
284
|
+
```
|
|
285
|
+
|
|
286
|
+
### Custom Provider Registration
|
|
287
|
+
|
|
288
|
+
```python
|
|
289
|
+
from ai_agents.components.llm_gateway import LLMGatewayFactory, LLMGateway
|
|
290
|
+
|
|
291
|
+
class CustomGateway(LLMGateway):
|
|
292
|
+
# Implement required methods
|
|
293
|
+
pass
|
|
294
|
+
|
|
295
|
+
LLMGatewayFactory.register_provider(
|
|
296
|
+
provider_name="custom",
|
|
297
|
+
gateway_class=CustomGateway,
|
|
298
|
+
default_model="custom-model-v1",
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
# Now you can use it
|
|
302
|
+
gateway = LLMGatewayFactory.create_gateway("custom:custom-model-v1")
|
|
303
|
+
```
|
|
304
|
+
|
|
305
|
+
### List Available Providers
|
|
306
|
+
|
|
307
|
+
```python
|
|
308
|
+
providers = LLMGatewayFactory.list_providers()
|
|
309
|
+
print(f"Available providers: {providers}")
|
|
310
|
+
# Output: ['openai', 'mistral']
|
|
311
|
+
|
|
312
|
+
for provider in providers:
|
|
313
|
+
default_model = LLMGatewayFactory.get_default_model(provider)
|
|
314
|
+
print(f"{provider}: {default_model}")
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
## Error Handling
|
|
318
|
+
|
|
319
|
+
```python
|
|
320
|
+
from ai_agents.components.llm_gateway import LLMGatewayFactory
|
|
321
|
+
|
|
322
|
+
try:
|
|
323
|
+
gateway = LLMGatewayFactory.create_gateway("invalid:model")
|
|
324
|
+
except ValueError as e:
|
|
325
|
+
print(f"Error: {e}")
|
|
326
|
+
# Error: Unknown provider 'invalid'
|
|
327
|
+
|
|
328
|
+
try:
|
|
329
|
+
gateway = OpenAIGateway(model="gpt-4o")
|
|
330
|
+
# Missing OPENAI_API_KEY
|
|
331
|
+
except ValueError as e:
|
|
332
|
+
print(f"Error: {e}")
|
|
333
|
+
# Error: OpenAI API key must be provided
|
|
334
|
+
```
|
|
335
|
+
|
|
336
|
+
## Environment Variables
|
|
337
|
+
|
|
338
|
+
The gateway system respects the following environment variables:
|
|
339
|
+
|
|
340
|
+
| Variable | Description | Default |
|
|
341
|
+
|----------|-------------|---------|
|
|
342
|
+
| `OPENAI_API_KEY` | OpenAI API key | Required for OpenAI |
|
|
343
|
+
| `MISTRAL_API_KEY` | Mistral API key | Required for Mistral |
|
|
344
|
+
| `DEFAULT_LLM_MODEL` | Default model string | `openai:gpt-4o` |
|
|
345
|
+
| `DEFAULT_LLM_PROVIDER` | Default provider | `openai` |
|
|
346
|
+
| `DEFAULT_LLM_TEMPERATURE` | Default temperature | `0.7` |
|
|
347
|
+
|
|
348
|
+
## Best Practices
|
|
349
|
+
|
|
350
|
+
### 1. Use Configuration Files
|
|
351
|
+
|
|
352
|
+
Store gateway configurations in YAML files for easy management:
|
|
353
|
+
|
|
354
|
+
```yaml
|
|
355
|
+
llm_gateways:
|
|
356
|
+
production:
|
|
357
|
+
provider: "openai"
|
|
358
|
+
model: "gpt-4o"
|
|
359
|
+
temperature: 0.7
|
|
360
|
+
|
|
361
|
+
development:
|
|
362
|
+
provider: "openai"
|
|
363
|
+
model: "gpt-4o-mini"
|
|
364
|
+
temperature: 0.7
|
|
365
|
+
```
|
|
366
|
+
|
|
367
|
+
### 2. Environment-Based API Keys
|
|
368
|
+
|
|
369
|
+
Never hardcode API keys. Use environment variables:
|
|
370
|
+
|
|
371
|
+
```bash
|
|
372
|
+
export OPENAI_API_KEY="sk-..."
|
|
373
|
+
export MISTRAL_API_KEY="..."
|
|
374
|
+
```
|
|
375
|
+
|
|
376
|
+
### 3. Handle Provider Differences
|
|
377
|
+
|
|
378
|
+
While the interface is standardized, be aware of provider-specific behaviors:
|
|
379
|
+
|
|
380
|
+
```python
|
|
381
|
+
# OpenAI has native structured output
|
|
382
|
+
openai_result = openai_gateway.generate_structured(request, Schema)
|
|
383
|
+
|
|
384
|
+
# Mistral uses JSON mode (may require schema in prompt)
|
|
385
|
+
mistral_result = mistral_gateway.generate_structured(request, Schema)
|
|
386
|
+
```
|
|
387
|
+
|
|
388
|
+
### 4. Use Appropriate Models
|
|
389
|
+
|
|
390
|
+
Choose models based on your use case:
|
|
391
|
+
|
|
392
|
+
- **Fast, cost-effective**: `gpt-4o-mini`, `mistral-small-latest`
|
|
393
|
+
- **High quality**: `gpt-4o`, `mistral-large-latest`
|
|
394
|
+
- **Creative tasks**: Higher temperature (0.8-0.9)
|
|
395
|
+
- **Analytical tasks**: Lower temperature (0.2-0.4)
|
|
396
|
+
|
|
397
|
+
### 5. Monitor Token Usage
|
|
398
|
+
|
|
399
|
+
Always check token usage to manage costs:
|
|
400
|
+
|
|
401
|
+
```python
|
|
402
|
+
response = gateway.generate(request)
|
|
403
|
+
if response.usage:
|
|
404
|
+
print(f"Tokens: {response.usage['total_tokens']}")
|
|
405
|
+
print(f"Cost estimate: ${response.usage['total_tokens'] * 0.00001}")
|
|
406
|
+
```
|
|
407
|
+
|
|
408
|
+
## Integration with Agents
|
|
409
|
+
|
|
410
|
+
The LLM Gateway can be used alongside the BaseAgent system:
|
|
411
|
+
|
|
412
|
+
```python
|
|
413
|
+
from ai_agents import BaseAgent
|
|
414
|
+
from ai_agents.components.llm_gateway import get_llm_gateway
|
|
415
|
+
|
|
416
|
+
# Create a gateway for custom LLM calls
|
|
417
|
+
gateway = get_llm_gateway(model="openai:gpt-4o")
|
|
418
|
+
|
|
419
|
+
# Use BaseAgent for agent framework (uses Pydantic AI internally)
|
|
420
|
+
agent = BaseAgent(
|
|
421
|
+
name="my_agent",
|
|
422
|
+
model="openai:gpt-4o", # Pydantic AI model string
|
|
423
|
+
system_prompt="You are a helpful assistant.",
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
# Gateway for direct LLM calls, agent for tool-using conversations
|
|
427
|
+
```
|
|
428
|
+
|
|
429
|
+
## Examples
|
|
430
|
+
|
|
431
|
+
See the complete examples in:
|
|
432
|
+
- `examples/llm_gateway_example.py` - Comprehensive usage examples
|
|
433
|
+
- `examples/configs/llm_gateway_config.yaml` - Configuration examples
|
|
434
|
+
|
|
435
|
+
Run examples:
|
|
436
|
+
```bash
|
|
437
|
+
cd rakam_systems
|
|
438
|
+
python -m examples.llm_gateway_example
|
|
439
|
+
```
|
|
440
|
+
|
|
441
|
+
## API Reference
|
|
442
|
+
|
|
443
|
+
### LLMGateway (Base Class)
|
|
444
|
+
|
|
445
|
+
**Methods:**
|
|
446
|
+
- `generate(request: LLMRequest) -> LLMResponse`
|
|
447
|
+
- `generate_structured(request: LLMRequest, schema: Type[T]) -> T`
|
|
448
|
+
- `stream(request: LLMRequest) -> Iterator[str]`
|
|
449
|
+
- `count_tokens(text: str, model: Optional[str]) -> int`
|
|
450
|
+
|
|
451
|
+
### LLMGatewayFactory
|
|
452
|
+
|
|
453
|
+
**Methods:**
|
|
454
|
+
- `create_gateway(model_string: str, **kwargs) -> LLMGateway`
|
|
455
|
+
- `create_gateway_from_config(config: Dict) -> LLMGateway`
|
|
456
|
+
- `get_default_gateway() -> LLMGateway`
|
|
457
|
+
- `register_provider(name: str, gateway_class: Type, default_model: str)`
|
|
458
|
+
- `list_providers() -> List[str]`
|
|
459
|
+
- `get_default_model(provider: str) -> Optional[str]`
|
|
460
|
+
|
|
461
|
+
### Convenience Functions
|
|
462
|
+
|
|
463
|
+
- `get_llm_gateway(model: str, **kwargs) -> LLMGateway`
|
|
464
|
+
|
|
465
|
+
## Dependencies
|
|
466
|
+
|
|
467
|
+
- `openai>=1.0.0` - OpenAI Python SDK
|
|
468
|
+
- `mistralai>=0.1.0` - Mistral Python SDK
|
|
469
|
+
- `tiktoken>=0.5.0` - Token counting for OpenAI
|
|
470
|
+
- `pydantic>=2.0.0` - Data validation and structured outputs
|
|
471
|
+
|
|
472
|
+
## License
|
|
473
|
+
|
|
474
|
+
See LICENSE file in the rakam_systems package root.
|
|
475
|
+
|
|
476
|
+
## Contributing
|
|
477
|
+
|
|
478
|
+
To add a new provider:
|
|
479
|
+
|
|
480
|
+
1. Create a new gateway class inheriting from `LLMGateway`
|
|
481
|
+
2. Implement all required abstract methods
|
|
482
|
+
3. Register with `LLMGatewayFactory.register_provider()`
|
|
483
|
+
4. Add tests and documentation
|
|
484
|
+
|
|
485
|
+
Example:
|
|
486
|
+
|
|
487
|
+
```python
|
|
488
|
+
class NewProviderGateway(LLMGateway):
|
|
489
|
+
def generate(self, request: LLMRequest) -> LLMResponse:
|
|
490
|
+
# Implementation
|
|
491
|
+
pass
|
|
492
|
+
|
|
493
|
+
def generate_structured(self, request: LLMRequest, schema: Type[T]) -> T:
|
|
494
|
+
# Implementation
|
|
495
|
+
pass
|
|
496
|
+
|
|
497
|
+
def count_tokens(self, text: str, model: Optional[str]) -> int:
|
|
498
|
+
# Implementation
|
|
499
|
+
pass
|
|
500
|
+
```
|
|
501
|
+
|
|
502
|
+
## Support
|
|
503
|
+
|
|
504
|
+
For issues, questions, or contributions, please refer to the main rakam_systems repository.
|
|
505
|
+
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
"""LLM Gateway components for standardized multi-provider LLM interactions."""
|
|
2
|
+
|
|
3
|
+
from rakam_systems_core.ai_core.interfaces.llm_gateway import LLMGateway, LLMRequest, LLMResponse
|
|
4
|
+
from .openai_gateway import OpenAIGateway
|
|
5
|
+
from .mistral_gateway import MistralGateway
|
|
6
|
+
from .gateway_factory import LLMGatewayFactory, get_llm_gateway
|
|
7
|
+
|
|
8
|
+
__all__ = [
|
|
9
|
+
"LLMGateway",
|
|
10
|
+
"LLMRequest",
|
|
11
|
+
"LLMResponse",
|
|
12
|
+
"OpenAIGateway",
|
|
13
|
+
"MistralGateway",
|
|
14
|
+
"LLMGatewayFactory",
|
|
15
|
+
"get_llm_gateway",
|
|
16
|
+
]
|