django-agent-runtime 0.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- django_agent_runtime/__init__.py +25 -0
- django_agent_runtime/admin.py +155 -0
- django_agent_runtime/api/__init__.py +26 -0
- django_agent_runtime/api/permissions.py +109 -0
- django_agent_runtime/api/serializers.py +114 -0
- django_agent_runtime/api/views.py +472 -0
- django_agent_runtime/apps.py +26 -0
- django_agent_runtime/conf.py +241 -0
- django_agent_runtime/examples/__init__.py +10 -0
- django_agent_runtime/examples/langgraph_adapter.py +164 -0
- django_agent_runtime/examples/langgraph_tools.py +179 -0
- django_agent_runtime/examples/simple_chat.py +69 -0
- django_agent_runtime/examples/tool_agent.py +157 -0
- django_agent_runtime/management/__init__.py +2 -0
- django_agent_runtime/management/commands/__init__.py +2 -0
- django_agent_runtime/management/commands/runagent.py +419 -0
- django_agent_runtime/migrations/0001_initial.py +117 -0
- django_agent_runtime/migrations/0002_persistence_models.py +129 -0
- django_agent_runtime/migrations/0003_persistenceconversation_active_branch_id_and_more.py +212 -0
- django_agent_runtime/migrations/0004_add_anonymous_session_id.py +18 -0
- django_agent_runtime/migrations/__init__.py +2 -0
- django_agent_runtime/models/__init__.py +54 -0
- django_agent_runtime/models/base.py +450 -0
- django_agent_runtime/models/concrete.py +146 -0
- django_agent_runtime/persistence/__init__.py +60 -0
- django_agent_runtime/persistence/helpers.py +148 -0
- django_agent_runtime/persistence/models.py +506 -0
- django_agent_runtime/persistence/stores.py +1191 -0
- django_agent_runtime/runtime/__init__.py +23 -0
- django_agent_runtime/runtime/events/__init__.py +65 -0
- django_agent_runtime/runtime/events/base.py +135 -0
- django_agent_runtime/runtime/events/db.py +129 -0
- django_agent_runtime/runtime/events/redis.py +228 -0
- django_agent_runtime/runtime/events/sync.py +140 -0
- django_agent_runtime/runtime/interfaces.py +475 -0
- django_agent_runtime/runtime/llm/__init__.py +91 -0
- django_agent_runtime/runtime/llm/anthropic.py +249 -0
- django_agent_runtime/runtime/llm/litellm_adapter.py +173 -0
- django_agent_runtime/runtime/llm/openai.py +230 -0
- django_agent_runtime/runtime/queue/__init__.py +75 -0
- django_agent_runtime/runtime/queue/base.py +158 -0
- django_agent_runtime/runtime/queue/postgres.py +248 -0
- django_agent_runtime/runtime/queue/redis_streams.py +336 -0
- django_agent_runtime/runtime/queue/sync.py +277 -0
- django_agent_runtime/runtime/registry.py +186 -0
- django_agent_runtime/runtime/runner.py +540 -0
- django_agent_runtime/runtime/tracing/__init__.py +48 -0
- django_agent_runtime/runtime/tracing/langfuse.py +117 -0
- django_agent_runtime/runtime/tracing/noop.py +36 -0
- django_agent_runtime/urls.py +39 -0
- django_agent_runtime-0.3.6.dist-info/METADATA +723 -0
- django_agent_runtime-0.3.6.dist-info/RECORD +55 -0
- django_agent_runtime-0.3.6.dist-info/WHEEL +5 -0
- django_agent_runtime-0.3.6.dist-info/licenses/LICENSE +22 -0
- django_agent_runtime-0.3.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Synchronous event bus implementations.
|
|
3
|
+
|
|
4
|
+
These are for use in sync contexts like management commands, Celery tasks,
|
|
5
|
+
and traditional Django views.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from typing import Iterator, Optional
|
|
10
|
+
from uuid import UUID
|
|
11
|
+
|
|
12
|
+
from django.db.models import Max
|
|
13
|
+
|
|
14
|
+
from django_agent_runtime.models import AgentEvent, AgentRun
|
|
15
|
+
from django_agent_runtime.runtime.events.base import Event
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class SyncEventBus(ABC):
|
|
19
|
+
"""
|
|
20
|
+
Synchronous interface for event bus implementations.
|
|
21
|
+
|
|
22
|
+
Use this in sync contexts like management commands, Celery tasks,
|
|
23
|
+
and traditional Django views.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
@abstractmethod
|
|
27
|
+
def publish(self, event: Event) -> None:
|
|
28
|
+
"""Publish an event."""
|
|
29
|
+
...
|
|
30
|
+
|
|
31
|
+
@abstractmethod
|
|
32
|
+
def get_events(
|
|
33
|
+
self,
|
|
34
|
+
run_id: UUID,
|
|
35
|
+
from_seq: int = 0,
|
|
36
|
+
to_seq: Optional[int] = None,
|
|
37
|
+
) -> list[Event]:
|
|
38
|
+
"""Get historical events for a run."""
|
|
39
|
+
...
|
|
40
|
+
|
|
41
|
+
@abstractmethod
|
|
42
|
+
def get_next_seq(self, run_id: UUID) -> int:
|
|
43
|
+
"""Get the next sequence number for a run."""
|
|
44
|
+
...
|
|
45
|
+
|
|
46
|
+
def close(self) -> None:
|
|
47
|
+
"""Close any connections. Override if needed."""
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class SyncDatabaseEventBus(SyncEventBus):
|
|
52
|
+
"""
|
|
53
|
+
Synchronous database-backed event bus implementation.
|
|
54
|
+
|
|
55
|
+
All events are persisted to the AgentEvent table.
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
def publish(self, event: Event) -> None:
|
|
59
|
+
"""Publish event to database."""
|
|
60
|
+
AgentEvent.objects.create(
|
|
61
|
+
run_id=event.run_id,
|
|
62
|
+
seq=event.seq,
|
|
63
|
+
event_type=event.event_type,
|
|
64
|
+
payload=event.payload,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
def get_events(
|
|
68
|
+
self,
|
|
69
|
+
run_id: UUID,
|
|
70
|
+
from_seq: int = 0,
|
|
71
|
+
to_seq: Optional[int] = None,
|
|
72
|
+
) -> list[Event]:
|
|
73
|
+
"""Get events from database."""
|
|
74
|
+
queryset = AgentEvent.objects.filter(
|
|
75
|
+
run_id=run_id,
|
|
76
|
+
seq__gte=from_seq,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
if to_seq is not None:
|
|
80
|
+
queryset = queryset.filter(seq__lte=to_seq)
|
|
81
|
+
|
|
82
|
+
return [
|
|
83
|
+
Event(
|
|
84
|
+
run_id=e.run_id,
|
|
85
|
+
seq=e.seq,
|
|
86
|
+
event_type=e.event_type,
|
|
87
|
+
payload=e.payload,
|
|
88
|
+
timestamp=e.timestamp,
|
|
89
|
+
)
|
|
90
|
+
for e in queryset.order_by("seq")
|
|
91
|
+
]
|
|
92
|
+
|
|
93
|
+
def get_next_seq(self, run_id: UUID) -> int:
|
|
94
|
+
"""Get next sequence number."""
|
|
95
|
+
result = AgentEvent.objects.filter(run_id=run_id).aggregate(max_seq=Max("seq"))
|
|
96
|
+
max_seq = result["max_seq"]
|
|
97
|
+
# Note: can't use `max_seq or -1` because 0 is falsy!
|
|
98
|
+
if max_seq is None:
|
|
99
|
+
return 0
|
|
100
|
+
return max_seq + 1
|
|
101
|
+
|
|
102
|
+
def is_run_complete(self, run_id: UUID) -> bool:
|
|
103
|
+
"""Check if run is in terminal state."""
|
|
104
|
+
try:
|
|
105
|
+
run = AgentRun.objects.get(id=run_id)
|
|
106
|
+
return run.is_terminal
|
|
107
|
+
except AgentRun.DoesNotExist:
|
|
108
|
+
return True
|
|
109
|
+
|
|
110
|
+
def poll_events(
|
|
111
|
+
self,
|
|
112
|
+
run_id: UUID,
|
|
113
|
+
from_seq: int = 0,
|
|
114
|
+
) -> Iterator[Event]:
|
|
115
|
+
"""
|
|
116
|
+
Poll for events (blocking iterator).
|
|
117
|
+
|
|
118
|
+
This is a simple polling implementation for sync contexts.
|
|
119
|
+
For real-time streaming, use the async version with WebSockets.
|
|
120
|
+
"""
|
|
121
|
+
import time
|
|
122
|
+
|
|
123
|
+
current_seq = from_seq
|
|
124
|
+
poll_interval = 0.5
|
|
125
|
+
|
|
126
|
+
while True:
|
|
127
|
+
# Get new events
|
|
128
|
+
events = self.get_events(run_id, from_seq=current_seq)
|
|
129
|
+
|
|
130
|
+
for event in events:
|
|
131
|
+
yield event
|
|
132
|
+
current_seq = event.seq + 1
|
|
133
|
+
|
|
134
|
+
# Check if run is complete
|
|
135
|
+
if self.is_run_complete(run_id):
|
|
136
|
+
break
|
|
137
|
+
|
|
138
|
+
# Poll interval
|
|
139
|
+
time.sleep(poll_interval)
|
|
140
|
+
|
|
@@ -0,0 +1,475 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Core interfaces for the Django Agent Runtime.
|
|
3
|
+
|
|
4
|
+
These interfaces are the stable public API. Everything else can change.
|
|
5
|
+
Agent frameworks (LangGraph, CrewAI, custom) adapt to these interfaces.
|
|
6
|
+
|
|
7
|
+
SEMVER PROTECTED - Breaking changes require major version bump.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from abc import ABC, abstractmethod
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from enum import Enum
|
|
14
|
+
from typing import Any, Callable, Optional, Protocol, TypedDict
|
|
15
|
+
from uuid import UUID
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class EventVisibility(str, Enum):
|
|
19
|
+
"""
|
|
20
|
+
Visibility levels for events.
|
|
21
|
+
|
|
22
|
+
Controls which events are shown to users in the UI.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
INTERNAL = "internal" # Never shown to UI (checkpoints, heartbeats)
|
|
26
|
+
DEBUG = "debug" # Shown only in debug mode (tool calls, tool results)
|
|
27
|
+
USER = "user" # Always shown to users (assistant messages, errors)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class EventType(str, Enum):
|
|
31
|
+
"""
|
|
32
|
+
Standard event types emitted by agent runtimes.
|
|
33
|
+
|
|
34
|
+
All agent frameworks must emit through these types.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
# Lifecycle events
|
|
38
|
+
RUN_STARTED = "run.started"
|
|
39
|
+
RUN_HEARTBEAT = "run.heartbeat"
|
|
40
|
+
RUN_SUCCEEDED = "run.succeeded"
|
|
41
|
+
RUN_FAILED = "run.failed"
|
|
42
|
+
RUN_CANCELLED = "run.cancelled"
|
|
43
|
+
RUN_TIMED_OUT = "run.timed_out"
|
|
44
|
+
|
|
45
|
+
# Message events
|
|
46
|
+
ASSISTANT_DELTA = "assistant.delta" # Token streaming (optional)
|
|
47
|
+
ASSISTANT_MESSAGE = "assistant.message" # Complete message
|
|
48
|
+
|
|
49
|
+
# Tool events
|
|
50
|
+
TOOL_CALL = "tool.call"
|
|
51
|
+
TOOL_RESULT = "tool.result"
|
|
52
|
+
|
|
53
|
+
# State events
|
|
54
|
+
STATE_CHECKPOINT = "state.checkpoint"
|
|
55
|
+
|
|
56
|
+
# Error events (distinct from run.failed - for runtime errors shown to users)
|
|
57
|
+
ERROR = "error"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class Message(TypedDict, total=False):
|
|
61
|
+
"""
|
|
62
|
+
Framework-neutral message format.
|
|
63
|
+
|
|
64
|
+
Compatible with OpenAI, Anthropic, and other providers.
|
|
65
|
+
"""
|
|
66
|
+
|
|
67
|
+
role: str # "system" | "user" | "assistant" | "tool"
|
|
68
|
+
content: str | dict | list # String or structured content
|
|
69
|
+
name: Optional[str] # For tool messages
|
|
70
|
+
tool_call_id: Optional[str] # For tool results
|
|
71
|
+
tool_calls: Optional[list] # For assistant tool calls
|
|
72
|
+
metadata: dict # Additional metadata
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
@dataclass
|
|
76
|
+
class RunResult:
|
|
77
|
+
"""
|
|
78
|
+
Result returned by an agent runtime after execution.
|
|
79
|
+
|
|
80
|
+
This is what the runner receives when an agent completes.
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
final_output: dict = field(default_factory=dict)
|
|
84
|
+
final_messages: list[Message] = field(default_factory=list)
|
|
85
|
+
usage: dict = field(default_factory=dict) # Token usage, costs, etc.
|
|
86
|
+
artifacts: dict = field(default_factory=dict) # Files, images, etc.
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@dataclass
|
|
90
|
+
class ErrorInfo:
|
|
91
|
+
"""Structured error information for failed runs."""
|
|
92
|
+
|
|
93
|
+
type: str # Error class name
|
|
94
|
+
message: str
|
|
95
|
+
stack: str = ""
|
|
96
|
+
retriable: bool = True
|
|
97
|
+
details: dict = field(default_factory=dict)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class RunContext(Protocol):
|
|
101
|
+
"""
|
|
102
|
+
Context provided to agent runtimes during execution.
|
|
103
|
+
|
|
104
|
+
This is what agent frameworks use to interact with the runtime.
|
|
105
|
+
Implementations are provided by the runner.
|
|
106
|
+
"""
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def run_id(self) -> UUID:
|
|
110
|
+
"""Unique identifier for this run."""
|
|
111
|
+
...
|
|
112
|
+
|
|
113
|
+
@property
|
|
114
|
+
def conversation_id(self) -> Optional[UUID]:
|
|
115
|
+
"""Conversation this run belongs to (if any)."""
|
|
116
|
+
...
|
|
117
|
+
|
|
118
|
+
@property
|
|
119
|
+
def input_messages(self) -> list[Message]:
|
|
120
|
+
"""Input messages for this run."""
|
|
121
|
+
...
|
|
122
|
+
|
|
123
|
+
@property
|
|
124
|
+
def params(self) -> dict:
|
|
125
|
+
"""Additional parameters for this run."""
|
|
126
|
+
...
|
|
127
|
+
|
|
128
|
+
@property
|
|
129
|
+
def metadata(self) -> dict:
|
|
130
|
+
"""Metadata associated with this run (e.g., channel_id, user context)."""
|
|
131
|
+
...
|
|
132
|
+
|
|
133
|
+
@property
|
|
134
|
+
def tool_registry(self) -> "ToolRegistry":
|
|
135
|
+
"""Registry of available tools for this agent."""
|
|
136
|
+
...
|
|
137
|
+
|
|
138
|
+
async def emit(self, event_type: EventType | str, payload: dict) -> None:
|
|
139
|
+
"""
|
|
140
|
+
Emit an event to the event bus.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
event_type: Type of event (use EventType enum)
|
|
144
|
+
payload: Event payload data
|
|
145
|
+
"""
|
|
146
|
+
...
|
|
147
|
+
|
|
148
|
+
async def emit_user_message(self, content: str) -> None:
|
|
149
|
+
"""
|
|
150
|
+
Emit a message that will always be shown to the user.
|
|
151
|
+
|
|
152
|
+
This is a convenience method for emitting assistant messages.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
content: The message content to display
|
|
156
|
+
"""
|
|
157
|
+
...
|
|
158
|
+
|
|
159
|
+
async def emit_error(self, error: str, details: dict = None) -> None:
|
|
160
|
+
"""
|
|
161
|
+
Emit an error that will be shown to the user.
|
|
162
|
+
|
|
163
|
+
This is for runtime errors that should be displayed to users,
|
|
164
|
+
distinct from run.failed which is the final failure event.
|
|
165
|
+
|
|
166
|
+
Args:
|
|
167
|
+
error: The error message
|
|
168
|
+
details: Optional additional error details
|
|
169
|
+
"""
|
|
170
|
+
...
|
|
171
|
+
|
|
172
|
+
async def checkpoint(self, state: dict) -> None:
|
|
173
|
+
"""
|
|
174
|
+
Save a state checkpoint for recovery.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
state: Serializable state to checkpoint
|
|
178
|
+
"""
|
|
179
|
+
...
|
|
180
|
+
|
|
181
|
+
async def get_state(self) -> Optional[dict]:
|
|
182
|
+
"""
|
|
183
|
+
Get the last checkpointed state.
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
The last saved state, or None if no checkpoint exists.
|
|
187
|
+
"""
|
|
188
|
+
...
|
|
189
|
+
|
|
190
|
+
def cancelled(self) -> bool:
|
|
191
|
+
"""
|
|
192
|
+
Check if cancellation has been requested.
|
|
193
|
+
|
|
194
|
+
Agent runtimes should check this between steps.
|
|
195
|
+
"""
|
|
196
|
+
...
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class AgentRuntime(ABC):
|
|
200
|
+
"""
|
|
201
|
+
Base class for agent runtime implementations.
|
|
202
|
+
|
|
203
|
+
Subclass this to create custom agent runtimes.
|
|
204
|
+
Each runtime is identified by a unique key.
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
@property
|
|
208
|
+
@abstractmethod
|
|
209
|
+
def key(self) -> str:
|
|
210
|
+
"""
|
|
211
|
+
Unique identifier for this runtime.
|
|
212
|
+
|
|
213
|
+
Used to route runs to the correct runtime.
|
|
214
|
+
"""
|
|
215
|
+
...
|
|
216
|
+
|
|
217
|
+
@abstractmethod
|
|
218
|
+
async def run(self, ctx: RunContext) -> RunResult:
|
|
219
|
+
"""
|
|
220
|
+
Execute an agent run.
|
|
221
|
+
|
|
222
|
+
Args:
|
|
223
|
+
ctx: Runtime context with input, tools, and event emission
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
RunResult with final output and messages
|
|
227
|
+
|
|
228
|
+
Raises:
|
|
229
|
+
Exception: On unrecoverable errors (will be caught by runner)
|
|
230
|
+
"""
|
|
231
|
+
...
|
|
232
|
+
|
|
233
|
+
async def cancel(self, ctx: RunContext) -> None:
|
|
234
|
+
"""
|
|
235
|
+
Handle cancellation request.
|
|
236
|
+
|
|
237
|
+
Override for custom cleanup. Default does nothing.
|
|
238
|
+
Called when cancellation is requested but run is still active.
|
|
239
|
+
"""
|
|
240
|
+
pass
|
|
241
|
+
|
|
242
|
+
async def on_error(self, ctx: RunContext, error: Exception) -> Optional[ErrorInfo]:
|
|
243
|
+
"""
|
|
244
|
+
Handle an error during execution.
|
|
245
|
+
|
|
246
|
+
Override to customize error handling/classification.
|
|
247
|
+
Return ErrorInfo to control retry behavior.
|
|
248
|
+
"""
|
|
249
|
+
return ErrorInfo(
|
|
250
|
+
type=type(error).__name__,
|
|
251
|
+
message=str(error),
|
|
252
|
+
retriable=True,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
@dataclass
|
|
257
|
+
class ToolDefinition:
|
|
258
|
+
"""Definition of a tool available to agents."""
|
|
259
|
+
|
|
260
|
+
name: str
|
|
261
|
+
description: str
|
|
262
|
+
parameters: dict # JSON Schema for parameters
|
|
263
|
+
handler: Callable # async def handler(**kwargs) -> Any
|
|
264
|
+
has_side_effects: bool = False
|
|
265
|
+
requires_confirmation: bool = False
|
|
266
|
+
metadata: dict = field(default_factory=dict)
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
class ToolRegistry:
|
|
270
|
+
"""
|
|
271
|
+
Registry of tools available to a specific agent.
|
|
272
|
+
|
|
273
|
+
Tools are allow-listed per agent_key for security.
|
|
274
|
+
"""
|
|
275
|
+
|
|
276
|
+
def __init__(self):
|
|
277
|
+
self._tools: dict[str, ToolDefinition] = {}
|
|
278
|
+
|
|
279
|
+
def register(self, tool: ToolDefinition) -> None:
|
|
280
|
+
"""Register a tool."""
|
|
281
|
+
self._tools[tool.name] = tool
|
|
282
|
+
|
|
283
|
+
def get(self, name: str) -> Optional[ToolDefinition]:
|
|
284
|
+
"""Get a tool by name."""
|
|
285
|
+
return self._tools.get(name)
|
|
286
|
+
|
|
287
|
+
def list_tools(self) -> list[ToolDefinition]:
|
|
288
|
+
"""List all registered tools."""
|
|
289
|
+
return list(self._tools.values())
|
|
290
|
+
|
|
291
|
+
def to_openai_format(self) -> list[dict]:
|
|
292
|
+
"""Convert tools to OpenAI function calling format."""
|
|
293
|
+
return [
|
|
294
|
+
{
|
|
295
|
+
"type": "function",
|
|
296
|
+
"function": {
|
|
297
|
+
"name": tool.name,
|
|
298
|
+
"description": tool.description,
|
|
299
|
+
"parameters": tool.parameters,
|
|
300
|
+
},
|
|
301
|
+
}
|
|
302
|
+
for tool in self._tools.values()
|
|
303
|
+
]
|
|
304
|
+
|
|
305
|
+
def get_tool_definitions(self) -> list[dict]:
|
|
306
|
+
"""Alias for to_openai_format() for backwards compatibility."""
|
|
307
|
+
return self.to_openai_format()
|
|
308
|
+
|
|
309
|
+
async def execute(self, name: str, arguments: dict) -> Any:
|
|
310
|
+
"""
|
|
311
|
+
Execute a tool by name.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
name: Tool name
|
|
315
|
+
arguments: Tool arguments
|
|
316
|
+
|
|
317
|
+
Returns:
|
|
318
|
+
Tool result
|
|
319
|
+
|
|
320
|
+
Raises:
|
|
321
|
+
KeyError: If tool not found
|
|
322
|
+
"""
|
|
323
|
+
tool = self._tools.get(name)
|
|
324
|
+
if not tool:
|
|
325
|
+
raise KeyError(f"Tool not found: {name}")
|
|
326
|
+
return await tool.handler(**arguments)
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
class LLMClient(ABC):
|
|
330
|
+
"""
|
|
331
|
+
Abstract LLM client interface.
|
|
332
|
+
|
|
333
|
+
Implementations: OpenAIClient, AnthropicClient, LiteLLMClient, etc.
|
|
334
|
+
This abstraction is what makes the runtime model-agnostic.
|
|
335
|
+
"""
|
|
336
|
+
|
|
337
|
+
@abstractmethod
|
|
338
|
+
async def generate(
|
|
339
|
+
self,
|
|
340
|
+
messages: list[Message],
|
|
341
|
+
*,
|
|
342
|
+
model: Optional[str] = None,
|
|
343
|
+
stream: bool = False,
|
|
344
|
+
tools: Optional[list[dict]] = None,
|
|
345
|
+
temperature: Optional[float] = None,
|
|
346
|
+
max_tokens: Optional[int] = None,
|
|
347
|
+
**kwargs,
|
|
348
|
+
) -> "LLMResponse":
|
|
349
|
+
"""
|
|
350
|
+
Generate a completion from the LLM.
|
|
351
|
+
|
|
352
|
+
Args:
|
|
353
|
+
messages: Conversation messages
|
|
354
|
+
model: Model identifier (uses default if not specified)
|
|
355
|
+
stream: Whether to stream the response
|
|
356
|
+
tools: Tool definitions in OpenAI format
|
|
357
|
+
temperature: Sampling temperature
|
|
358
|
+
max_tokens: Maximum tokens to generate
|
|
359
|
+
**kwargs: Provider-specific parameters
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
LLMResponse with message and usage info
|
|
363
|
+
"""
|
|
364
|
+
...
|
|
365
|
+
|
|
366
|
+
@abstractmethod
|
|
367
|
+
async def stream(
|
|
368
|
+
self,
|
|
369
|
+
messages: list[Message],
|
|
370
|
+
*,
|
|
371
|
+
model: Optional[str] = None,
|
|
372
|
+
tools: Optional[list[dict]] = None,
|
|
373
|
+
**kwargs,
|
|
374
|
+
):
|
|
375
|
+
"""
|
|
376
|
+
Stream a completion from the LLM.
|
|
377
|
+
|
|
378
|
+
Yields:
|
|
379
|
+
LLMStreamChunk objects with deltas
|
|
380
|
+
"""
|
|
381
|
+
...
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
@dataclass
|
|
385
|
+
class LLMResponse:
|
|
386
|
+
"""Response from an LLM generation."""
|
|
387
|
+
|
|
388
|
+
message: Message
|
|
389
|
+
usage: dict = field(default_factory=dict) # prompt_tokens, completion_tokens, etc.
|
|
390
|
+
model: str = ""
|
|
391
|
+
finish_reason: str = ""
|
|
392
|
+
raw_response: Optional[Any] = None
|
|
393
|
+
|
|
394
|
+
@property
|
|
395
|
+
def tool_calls(self) -> Optional[list]:
|
|
396
|
+
"""Extract tool_calls from the message for convenience."""
|
|
397
|
+
if isinstance(self.message, dict):
|
|
398
|
+
calls = self.message.get("tool_calls")
|
|
399
|
+
if calls:
|
|
400
|
+
# Convert to objects with name, arguments, id attributes
|
|
401
|
+
return [ToolCall(tc) for tc in calls]
|
|
402
|
+
return None
|
|
403
|
+
|
|
404
|
+
@property
|
|
405
|
+
def content(self) -> str:
|
|
406
|
+
"""Extract content from the message for convenience."""
|
|
407
|
+
if isinstance(self.message, dict):
|
|
408
|
+
return self.message.get("content", "")
|
|
409
|
+
return ""
|
|
410
|
+
|
|
411
|
+
|
|
412
|
+
class ToolCall:
|
|
413
|
+
"""Wrapper for tool call data to provide attribute access."""
|
|
414
|
+
|
|
415
|
+
def __init__(self, data: dict):
|
|
416
|
+
self._data = data
|
|
417
|
+
|
|
418
|
+
@property
|
|
419
|
+
def id(self) -> str:
|
|
420
|
+
return self._data.get("id", "")
|
|
421
|
+
|
|
422
|
+
@property
|
|
423
|
+
def name(self) -> str:
|
|
424
|
+
func = self._data.get("function", {})
|
|
425
|
+
return func.get("name", "")
|
|
426
|
+
|
|
427
|
+
@property
|
|
428
|
+
def arguments(self) -> dict:
|
|
429
|
+
func = self._data.get("function", {})
|
|
430
|
+
args = func.get("arguments", "{}")
|
|
431
|
+
if isinstance(args, str):
|
|
432
|
+
import json
|
|
433
|
+
try:
|
|
434
|
+
return json.loads(args)
|
|
435
|
+
except json.JSONDecodeError:
|
|
436
|
+
return {}
|
|
437
|
+
return args
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
@dataclass
|
|
441
|
+
class LLMStreamChunk:
|
|
442
|
+
"""A chunk from a streaming LLM response."""
|
|
443
|
+
|
|
444
|
+
delta: str = ""
|
|
445
|
+
tool_calls: Optional[list] = None
|
|
446
|
+
finish_reason: Optional[str] = None
|
|
447
|
+
usage: Optional[dict] = None
|
|
448
|
+
|
|
449
|
+
|
|
450
|
+
class TraceSink(ABC):
|
|
451
|
+
"""
|
|
452
|
+
Abstract trace sink for observability.
|
|
453
|
+
|
|
454
|
+
Implementations: NoopTraceSink, LangfuseTraceSink, OpenTelemetrySink, etc.
|
|
455
|
+
"""
|
|
456
|
+
|
|
457
|
+
@abstractmethod
|
|
458
|
+
def start_run(self, run_id: UUID, metadata: dict) -> None:
|
|
459
|
+
"""Start tracing a run."""
|
|
460
|
+
...
|
|
461
|
+
|
|
462
|
+
@abstractmethod
|
|
463
|
+
def log_event(self, run_id: UUID, event_type: str, payload: dict) -> None:
|
|
464
|
+
"""Log an event within a run."""
|
|
465
|
+
...
|
|
466
|
+
|
|
467
|
+
@abstractmethod
|
|
468
|
+
def end_run(self, run_id: UUID, outcome: str, metadata: Optional[dict] = None) -> None:
|
|
469
|
+
"""End tracing a run."""
|
|
470
|
+
...
|
|
471
|
+
|
|
472
|
+
def flush(self) -> None:
|
|
473
|
+
"""Flush any buffered traces. Default is no-op."""
|
|
474
|
+
pass
|
|
475
|
+
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM client implementations.
|
|
3
|
+
|
|
4
|
+
Provides:
|
|
5
|
+
- LLMClient: Abstract interface (from interfaces.py)
|
|
6
|
+
- OpenAIClient: OpenAI API client
|
|
7
|
+
- AnthropicClient: Anthropic API client
|
|
8
|
+
- LiteLLMClient: LiteLLM adapter (optional)
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from django_agent_runtime.runtime.interfaces import LLMClient, LLMResponse, LLMStreamChunk
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"LLMClient",
|
|
15
|
+
"LLMResponse",
|
|
16
|
+
"LLMStreamChunk",
|
|
17
|
+
"get_llm_client",
|
|
18
|
+
"OpenAIConfigurationError",
|
|
19
|
+
"AnthropicConfigurationError",
|
|
20
|
+
]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class OpenAIConfigurationError(Exception):
|
|
24
|
+
"""Raised when OpenAI API key is not configured."""
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class AnthropicConfigurationError(Exception):
|
|
29
|
+
"""Raised when Anthropic API key is not configured."""
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def get_llm_client(provider: str = None, **kwargs) -> LLMClient:
|
|
34
|
+
"""
|
|
35
|
+
Factory function to get an LLM client.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
provider: "openai", "anthropic", "litellm", etc.
|
|
39
|
+
**kwargs: Provider-specific configuration (e.g., api_key, default_model)
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
LLMClient instance
|
|
43
|
+
|
|
44
|
+
Raises:
|
|
45
|
+
OpenAIConfigurationError: If OpenAI is selected but API key is not configured
|
|
46
|
+
AnthropicConfigurationError: If Anthropic is selected but API key is not configured
|
|
47
|
+
ValueError: If an unknown provider is specified
|
|
48
|
+
|
|
49
|
+
Example:
|
|
50
|
+
# Using Django settings (recommended)
|
|
51
|
+
# In settings.py:
|
|
52
|
+
# DJANGO_AGENT_RUNTIME = {
|
|
53
|
+
# 'MODEL_PROVIDER': 'openai',
|
|
54
|
+
# 'OPENAI_API_KEY': 'sk-...',
|
|
55
|
+
# }
|
|
56
|
+
llm = get_llm_client()
|
|
57
|
+
|
|
58
|
+
# Or with explicit API key
|
|
59
|
+
llm = get_llm_client(api_key='sk-...')
|
|
60
|
+
|
|
61
|
+
# Or with a different provider
|
|
62
|
+
llm = get_llm_client(provider='anthropic', api_key='sk-ant-...')
|
|
63
|
+
"""
|
|
64
|
+
from django_agent_runtime.conf import runtime_settings
|
|
65
|
+
|
|
66
|
+
settings = runtime_settings()
|
|
67
|
+
provider = provider or settings.MODEL_PROVIDER
|
|
68
|
+
|
|
69
|
+
if provider == "openai":
|
|
70
|
+
from django_agent_runtime.runtime.llm.openai import OpenAIClient
|
|
71
|
+
|
|
72
|
+
return OpenAIClient(**kwargs)
|
|
73
|
+
|
|
74
|
+
elif provider == "anthropic":
|
|
75
|
+
from django_agent_runtime.runtime.llm.anthropic import AnthropicClient
|
|
76
|
+
|
|
77
|
+
return AnthropicClient(**kwargs)
|
|
78
|
+
|
|
79
|
+
elif provider == "litellm":
|
|
80
|
+
if not settings.LITELLM_ENABLED:
|
|
81
|
+
raise ValueError("LiteLLM is not enabled in settings")
|
|
82
|
+
from django_agent_runtime.runtime.llm.litellm_adapter import LiteLLMClient
|
|
83
|
+
|
|
84
|
+
return LiteLLMClient(**kwargs)
|
|
85
|
+
|
|
86
|
+
else:
|
|
87
|
+
raise ValueError(
|
|
88
|
+
f"Unknown LLM provider: {provider}\n\n"
|
|
89
|
+
f"Supported providers: 'openai', 'anthropic', 'litellm'\n"
|
|
90
|
+
f"Set MODEL_PROVIDER in your DJANGO_AGENT_RUNTIME settings."
|
|
91
|
+
)
|