django-agent-runtime 0.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- django_agent_runtime/__init__.py +25 -0
- django_agent_runtime/admin.py +155 -0
- django_agent_runtime/api/__init__.py +26 -0
- django_agent_runtime/api/permissions.py +109 -0
- django_agent_runtime/api/serializers.py +114 -0
- django_agent_runtime/api/views.py +472 -0
- django_agent_runtime/apps.py +26 -0
- django_agent_runtime/conf.py +241 -0
- django_agent_runtime/examples/__init__.py +10 -0
- django_agent_runtime/examples/langgraph_adapter.py +164 -0
- django_agent_runtime/examples/langgraph_tools.py +179 -0
- django_agent_runtime/examples/simple_chat.py +69 -0
- django_agent_runtime/examples/tool_agent.py +157 -0
- django_agent_runtime/management/__init__.py +2 -0
- django_agent_runtime/management/commands/__init__.py +2 -0
- django_agent_runtime/management/commands/runagent.py +419 -0
- django_agent_runtime/migrations/0001_initial.py +117 -0
- django_agent_runtime/migrations/0002_persistence_models.py +129 -0
- django_agent_runtime/migrations/0003_persistenceconversation_active_branch_id_and_more.py +212 -0
- django_agent_runtime/migrations/0004_add_anonymous_session_id.py +18 -0
- django_agent_runtime/migrations/__init__.py +2 -0
- django_agent_runtime/models/__init__.py +54 -0
- django_agent_runtime/models/base.py +450 -0
- django_agent_runtime/models/concrete.py +146 -0
- django_agent_runtime/persistence/__init__.py +60 -0
- django_agent_runtime/persistence/helpers.py +148 -0
- django_agent_runtime/persistence/models.py +506 -0
- django_agent_runtime/persistence/stores.py +1191 -0
- django_agent_runtime/runtime/__init__.py +23 -0
- django_agent_runtime/runtime/events/__init__.py +65 -0
- django_agent_runtime/runtime/events/base.py +135 -0
- django_agent_runtime/runtime/events/db.py +129 -0
- django_agent_runtime/runtime/events/redis.py +228 -0
- django_agent_runtime/runtime/events/sync.py +140 -0
- django_agent_runtime/runtime/interfaces.py +475 -0
- django_agent_runtime/runtime/llm/__init__.py +91 -0
- django_agent_runtime/runtime/llm/anthropic.py +249 -0
- django_agent_runtime/runtime/llm/litellm_adapter.py +173 -0
- django_agent_runtime/runtime/llm/openai.py +230 -0
- django_agent_runtime/runtime/queue/__init__.py +75 -0
- django_agent_runtime/runtime/queue/base.py +158 -0
- django_agent_runtime/runtime/queue/postgres.py +248 -0
- django_agent_runtime/runtime/queue/redis_streams.py +336 -0
- django_agent_runtime/runtime/queue/sync.py +277 -0
- django_agent_runtime/runtime/registry.py +186 -0
- django_agent_runtime/runtime/runner.py +540 -0
- django_agent_runtime/runtime/tracing/__init__.py +48 -0
- django_agent_runtime/runtime/tracing/langfuse.py +117 -0
- django_agent_runtime/runtime/tracing/noop.py +36 -0
- django_agent_runtime/urls.py +39 -0
- django_agent_runtime-0.3.6.dist-info/METADATA +723 -0
- django_agent_runtime-0.3.6.dist-info/RECORD +55 -0
- django_agent_runtime-0.3.6.dist-info/WHEEL +5 -0
- django_agent_runtime-0.3.6.dist-info/licenses/LICENSE +22 -0
- django_agent_runtime-0.3.6.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration management for django_agent_runtime.
|
|
3
|
+
|
|
4
|
+
All settings are namespaced under DJANGO_AGENT_RUNTIME in Django settings.
|
|
5
|
+
This module provides defaults and validation.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import os
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from typing import Any, Callable, Optional
|
|
11
|
+
|
|
12
|
+
from django.conf import settings
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class AgentRuntimeSettings:
|
|
17
|
+
"""
|
|
18
|
+
Settings for the Django Agent Runtime.
|
|
19
|
+
|
|
20
|
+
All settings can be overridden via DJANGO_AGENT_RUNTIME dict in Django settings.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
# Queue configuration
|
|
24
|
+
QUEUE_BACKEND: str = "postgres" # "postgres" | "redis_streams"
|
|
25
|
+
EVENT_BUS_BACKEND: str = "db" # "redis" | "db"
|
|
26
|
+
REDIS_URL: Optional[str] = None
|
|
27
|
+
|
|
28
|
+
# Lease and timeout configuration
|
|
29
|
+
LEASE_TTL_SECONDS: int = 30
|
|
30
|
+
RUN_TIMEOUT_SECONDS: int = 900 # 15 minutes
|
|
31
|
+
STEP_TIMEOUT_SECONDS: int = 120 # 2 minutes per LLM/tool call
|
|
32
|
+
HEARTBEAT_INTERVAL_SECONDS: int = 10
|
|
33
|
+
|
|
34
|
+
# Retry configuration
|
|
35
|
+
DEFAULT_MAX_ATTEMPTS: int = 3
|
|
36
|
+
RETRY_BACKOFF_BASE: float = 2.0
|
|
37
|
+
RETRY_BACKOFF_MAX: int = 300 # 5 minutes max backoff
|
|
38
|
+
|
|
39
|
+
# Concurrency
|
|
40
|
+
DEFAULT_PROCESSES: int = 1
|
|
41
|
+
DEFAULT_CONCURRENCY: int = 10 # async tasks per process
|
|
42
|
+
|
|
43
|
+
# Streaming
|
|
44
|
+
ENABLE_SSE: bool = True
|
|
45
|
+
ENABLE_CHANNELS: bool = False # Django Channels (optional)
|
|
46
|
+
SSE_KEEPALIVE_SECONDS: int = 15
|
|
47
|
+
|
|
48
|
+
# Event persistence
|
|
49
|
+
PERSIST_TOKEN_DELTAS: bool = False # Token deltas go to Redis only by default
|
|
50
|
+
EVENT_TTL_SECONDS: int = 3600 * 6 # 6 hours in Redis
|
|
51
|
+
|
|
52
|
+
# LLM configuration
|
|
53
|
+
MODEL_PROVIDER: str = "openai" # "openai" | "anthropic" | "litellm" | ...
|
|
54
|
+
LITELLM_ENABLED: bool = False
|
|
55
|
+
DEFAULT_MODEL: str = "gpt-4o"
|
|
56
|
+
|
|
57
|
+
# API Keys - can be set here or via environment variables
|
|
58
|
+
# Priority: 1) Explicit setting here, 2) Environment variable
|
|
59
|
+
OPENAI_API_KEY: Optional[str] = None
|
|
60
|
+
ANTHROPIC_API_KEY: Optional[str] = None
|
|
61
|
+
|
|
62
|
+
# Tracing/observability
|
|
63
|
+
LANGFUSE_ENABLED: bool = False
|
|
64
|
+
LANGFUSE_PUBLIC_KEY: Optional[str] = None
|
|
65
|
+
LANGFUSE_SECRET_KEY: Optional[str] = None
|
|
66
|
+
LANGFUSE_HOST: Optional[str] = None
|
|
67
|
+
|
|
68
|
+
# Plugin discovery
|
|
69
|
+
RUNTIME_REGISTRY: list = field(default_factory=list) # Dotted paths to register functions
|
|
70
|
+
|
|
71
|
+
# Authorization hooks (dotted paths to callables)
|
|
72
|
+
AUTHZ_HOOK: Optional[str] = None # (user, action, run) -> bool
|
|
73
|
+
QUOTA_HOOK: Optional[str] = None # (user, agent_key) -> bool
|
|
74
|
+
|
|
75
|
+
# Completion callback hook (dotted path to callable)
|
|
76
|
+
# Called when a run completes successfully: (run_id: str, output: dict) -> None
|
|
77
|
+
RUN_COMPLETED_HOOK: Optional[str] = None
|
|
78
|
+
|
|
79
|
+
# Model customization (for swappable models pattern)
|
|
80
|
+
RUN_MODEL: Optional[str] = None # e.g., "myapp.MyAgentRun"
|
|
81
|
+
CONVERSATION_MODEL: Optional[str] = None
|
|
82
|
+
|
|
83
|
+
# Anonymous session model (optional)
|
|
84
|
+
# Set to your model path, e.g., "accounts.AnonymousSession"
|
|
85
|
+
# Model must have: token field, is_expired property
|
|
86
|
+
ANONYMOUS_SESSION_MODEL: Optional[str] = None
|
|
87
|
+
|
|
88
|
+
# Event visibility configuration
|
|
89
|
+
# Controls which events are shown to users in the UI
|
|
90
|
+
# Levels: "internal" (never shown), "debug" (shown in debug mode), "user" (always shown)
|
|
91
|
+
EVENT_VISIBILITY: dict = field(default_factory=lambda: {
|
|
92
|
+
# Lifecycle events
|
|
93
|
+
"run.started": "internal",
|
|
94
|
+
"run.heartbeat": "internal",
|
|
95
|
+
"run.succeeded": "internal",
|
|
96
|
+
"run.failed": "user", # Always show errors
|
|
97
|
+
"run.cancelled": "user",
|
|
98
|
+
"run.timed_out": "user",
|
|
99
|
+
# Message events
|
|
100
|
+
"assistant.delta": "user", # Token streaming
|
|
101
|
+
"assistant.message": "user", # Complete messages
|
|
102
|
+
# Tool events
|
|
103
|
+
"tool.call": "debug",
|
|
104
|
+
"tool.result": "debug",
|
|
105
|
+
# State events
|
|
106
|
+
"state.checkpoint": "internal",
|
|
107
|
+
# Error events
|
|
108
|
+
"error": "user", # Runtime errors always shown
|
|
109
|
+
})
|
|
110
|
+
|
|
111
|
+
# When True, 'debug' visibility events become visible to UI
|
|
112
|
+
DEBUG_MODE: bool = False
|
|
113
|
+
|
|
114
|
+
def __post_init__(self):
|
|
115
|
+
"""Validate settings after initialization."""
|
|
116
|
+
valid_queue_backends = {"postgres", "redis_streams"}
|
|
117
|
+
if self.QUEUE_BACKEND not in valid_queue_backends:
|
|
118
|
+
raise ValueError(
|
|
119
|
+
f"QUEUE_BACKEND must be one of {valid_queue_backends}, got {self.QUEUE_BACKEND}"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
valid_event_backends = {"redis", "db"}
|
|
123
|
+
if self.EVENT_BUS_BACKEND not in valid_event_backends:
|
|
124
|
+
raise ValueError(
|
|
125
|
+
f"EVENT_BUS_BACKEND must be one of {valid_event_backends}, got {self.EVENT_BUS_BACKEND}"
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
if self.QUEUE_BACKEND == "redis_streams" and not self.REDIS_URL:
|
|
129
|
+
raise ValueError("REDIS_URL is required when using redis_streams queue backend")
|
|
130
|
+
|
|
131
|
+
if self.EVENT_BUS_BACKEND == "redis" and not self.REDIS_URL:
|
|
132
|
+
raise ValueError("REDIS_URL is required when using redis event bus backend")
|
|
133
|
+
|
|
134
|
+
def get_openai_api_key(self) -> Optional[str]:
|
|
135
|
+
"""
|
|
136
|
+
Get OpenAI API key with fallback to environment variable.
|
|
137
|
+
|
|
138
|
+
Priority:
|
|
139
|
+
1. OPENAI_API_KEY in DJANGO_AGENT_RUNTIME settings
|
|
140
|
+
2. OPENAI_API_KEY environment variable
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
API key string or None if not configured.
|
|
144
|
+
"""
|
|
145
|
+
if self.OPENAI_API_KEY:
|
|
146
|
+
return self.OPENAI_API_KEY
|
|
147
|
+
return os.environ.get("OPENAI_API_KEY")
|
|
148
|
+
|
|
149
|
+
def get_anthropic_api_key(self) -> Optional[str]:
|
|
150
|
+
"""
|
|
151
|
+
Get Anthropic API key with fallback to environment variable.
|
|
152
|
+
|
|
153
|
+
Priority:
|
|
154
|
+
1. ANTHROPIC_API_KEY in DJANGO_AGENT_RUNTIME settings
|
|
155
|
+
2. ANTHROPIC_API_KEY environment variable
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
API key string or None if not configured.
|
|
159
|
+
"""
|
|
160
|
+
if self.ANTHROPIC_API_KEY:
|
|
161
|
+
return self.ANTHROPIC_API_KEY
|
|
162
|
+
return os.environ.get("ANTHROPIC_API_KEY")
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def get_settings() -> AgentRuntimeSettings:
|
|
166
|
+
"""
|
|
167
|
+
Get the agent runtime settings, merging defaults with user overrides.
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
AgentRuntimeSettings instance with all configuration.
|
|
171
|
+
"""
|
|
172
|
+
user_settings = getattr(settings, "DJANGO_AGENT_RUNTIME", {})
|
|
173
|
+
|
|
174
|
+
# Build settings from defaults + overrides
|
|
175
|
+
return AgentRuntimeSettings(**user_settings)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def get_hook(hook_path: Optional[str]) -> Optional[Callable]:
|
|
179
|
+
"""
|
|
180
|
+
Import and return a hook function from a dotted path.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
hook_path: Dotted path like "myapp.hooks.check_auth"
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
The callable, or None if hook_path is None.
|
|
187
|
+
"""
|
|
188
|
+
if not hook_path:
|
|
189
|
+
return None
|
|
190
|
+
|
|
191
|
+
from django.utils.module_loading import import_string
|
|
192
|
+
|
|
193
|
+
return import_string(hook_path)
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
# Singleton instance (lazy-loaded)
|
|
197
|
+
_settings_instance: Optional[AgentRuntimeSettings] = None
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def runtime_settings() -> AgentRuntimeSettings:
|
|
201
|
+
"""Get the cached settings instance."""
|
|
202
|
+
global _settings_instance
|
|
203
|
+
if _settings_instance is None:
|
|
204
|
+
_settings_instance = get_settings()
|
|
205
|
+
return _settings_instance
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
def reset_settings():
|
|
209
|
+
"""Reset cached settings (useful for testing)."""
|
|
210
|
+
global _settings_instance
|
|
211
|
+
_settings_instance = None
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def get_event_visibility(event_type: str) -> tuple[str, bool]:
|
|
215
|
+
"""
|
|
216
|
+
Get the visibility level and ui_visible flag for an event type.
|
|
217
|
+
|
|
218
|
+
Args:
|
|
219
|
+
event_type: The event type string (e.g., "run.started", "assistant.message")
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
Tuple of (visibility_level, ui_visible)
|
|
223
|
+
- visibility_level: "internal", "debug", or "user"
|
|
224
|
+
- ui_visible: True if the event should be shown in UI
|
|
225
|
+
"""
|
|
226
|
+
settings = runtime_settings()
|
|
227
|
+
visibility_map = settings.EVENT_VISIBILITY
|
|
228
|
+
debug_mode = settings.DEBUG_MODE
|
|
229
|
+
|
|
230
|
+
# Get visibility level from config, default to "user" for unknown events
|
|
231
|
+
visibility_level = visibility_map.get(event_type, "user")
|
|
232
|
+
|
|
233
|
+
# Determine if visible in UI
|
|
234
|
+
if visibility_level == "internal":
|
|
235
|
+
ui_visible = False
|
|
236
|
+
elif visibility_level == "debug":
|
|
237
|
+
ui_visible = debug_mode
|
|
238
|
+
else: # "user"
|
|
239
|
+
ui_visible = True
|
|
240
|
+
|
|
241
|
+
return visibility_level, ui_visible
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LangGraph adapter for django_agent_runtime.
|
|
3
|
+
|
|
4
|
+
This example shows how to integrate LangGraph agents with the runtime.
|
|
5
|
+
LangGraph provides a powerful graph-based approach to building agents
|
|
6
|
+
with state management, branching, and cycles.
|
|
7
|
+
|
|
8
|
+
Requirements:
|
|
9
|
+
pip install langgraph langchain-openai
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
1. Add to RUNTIME_REGISTRY in settings:
|
|
13
|
+
'RUNTIME_REGISTRY': ['django_agent_runtime.examples.langgraph_adapter:register']
|
|
14
|
+
|
|
15
|
+
2. Create a run with agent_key="langgraph-agent"
|
|
16
|
+
|
|
17
|
+
Example LangGraph agent structure:
|
|
18
|
+
- StateGraph with nodes for different agent steps
|
|
19
|
+
- Conditional edges for routing
|
|
20
|
+
- Checkpointing for state persistence
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
from typing import Any, TypedDict, Annotated, Sequence
|
|
24
|
+
import operator
|
|
25
|
+
|
|
26
|
+
from django_agent_runtime.runtime.interfaces import (
|
|
27
|
+
AgentRuntime,
|
|
28
|
+
RunContext,
|
|
29
|
+
RunResult,
|
|
30
|
+
EventType,
|
|
31
|
+
)
|
|
32
|
+
from django_agent_runtime.runtime.registry import register_runtime
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class AgentState(TypedDict):
|
|
36
|
+
"""State for the LangGraph agent."""
|
|
37
|
+
messages: Annotated[Sequence[dict], operator.add]
|
|
38
|
+
next_step: str
|
|
39
|
+
iteration: int
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class LangGraphRuntime(AgentRuntime):
|
|
43
|
+
"""
|
|
44
|
+
Runtime adapter for LangGraph agents.
|
|
45
|
+
|
|
46
|
+
This adapter:
|
|
47
|
+
- Wraps a LangGraph StateGraph
|
|
48
|
+
- Emits events for each node execution
|
|
49
|
+
- Supports checkpointing via RunContext
|
|
50
|
+
- Handles cancellation between steps
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
MAX_ITERATIONS = 20
|
|
54
|
+
|
|
55
|
+
@property
|
|
56
|
+
def key(self) -> str:
|
|
57
|
+
return "langgraph-agent"
|
|
58
|
+
|
|
59
|
+
async def run(self, ctx: RunContext) -> RunResult:
|
|
60
|
+
"""Execute the LangGraph agent."""
|
|
61
|
+
try:
|
|
62
|
+
from langgraph.graph import StateGraph, END
|
|
63
|
+
from langchain_openai import ChatOpenAI
|
|
64
|
+
except ImportError:
|
|
65
|
+
raise ImportError(
|
|
66
|
+
"LangGraph integration requires: pip install langgraph langchain-openai"
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
# Build the graph
|
|
70
|
+
graph = self._build_graph()
|
|
71
|
+
app = graph.compile()
|
|
72
|
+
|
|
73
|
+
# Initialize state
|
|
74
|
+
state: AgentState = {
|
|
75
|
+
"messages": ctx.input_messages,
|
|
76
|
+
"next_step": "agent",
|
|
77
|
+
"iteration": 0,
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
# Run the graph
|
|
81
|
+
total_usage = {"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}
|
|
82
|
+
|
|
83
|
+
async for event in app.astream(state):
|
|
84
|
+
# Check for cancellation
|
|
85
|
+
if ctx.cancelled():
|
|
86
|
+
await ctx.emit(EventType.RUN_CANCELLED, {"reason": "User requested"})
|
|
87
|
+
return RunResult()
|
|
88
|
+
|
|
89
|
+
# Emit step event
|
|
90
|
+
for node_name, node_output in event.items():
|
|
91
|
+
await ctx.emit(EventType.STEP_COMPLETED, {
|
|
92
|
+
"node": node_name,
|
|
93
|
+
"output": node_output,
|
|
94
|
+
})
|
|
95
|
+
|
|
96
|
+
# Checkpoint after each step
|
|
97
|
+
await ctx.checkpoint({
|
|
98
|
+
"node": node_name,
|
|
99
|
+
"state": node_output,
|
|
100
|
+
})
|
|
101
|
+
|
|
102
|
+
# Update state
|
|
103
|
+
if isinstance(node_output, dict):
|
|
104
|
+
state.update(node_output)
|
|
105
|
+
|
|
106
|
+
# Extract final response
|
|
107
|
+
final_messages = state.get("messages", [])
|
|
108
|
+
final_output = {}
|
|
109
|
+
|
|
110
|
+
if final_messages:
|
|
111
|
+
last_message = final_messages[-1]
|
|
112
|
+
if isinstance(last_message, dict):
|
|
113
|
+
final_output = {"response": last_message.get("content", "")}
|
|
114
|
+
await ctx.emit(EventType.ASSISTANT_MESSAGE, last_message)
|
|
115
|
+
|
|
116
|
+
return RunResult(
|
|
117
|
+
final_output=final_output,
|
|
118
|
+
final_messages=final_messages,
|
|
119
|
+
usage=total_usage,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
def _build_graph(self):
|
|
123
|
+
"""Build the LangGraph StateGraph."""
|
|
124
|
+
from langgraph.graph import StateGraph, END
|
|
125
|
+
from langchain_openai import ChatOpenAI
|
|
126
|
+
|
|
127
|
+
# Create the LLM
|
|
128
|
+
llm = ChatOpenAI(model="gpt-4o", temperature=0)
|
|
129
|
+
|
|
130
|
+
# Define nodes
|
|
131
|
+
async def agent_node(state: AgentState) -> dict:
|
|
132
|
+
"""Main agent node - calls the LLM."""
|
|
133
|
+
messages = state["messages"]
|
|
134
|
+
response = await llm.ainvoke(messages)
|
|
135
|
+
|
|
136
|
+
return {
|
|
137
|
+
"messages": [{"role": "assistant", "content": response.content}],
|
|
138
|
+
"next_step": "end",
|
|
139
|
+
"iteration": state["iteration"] + 1,
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
async def should_continue(state: AgentState) -> str:
|
|
143
|
+
"""Determine if we should continue or end."""
|
|
144
|
+
if state["iteration"] >= self.MAX_ITERATIONS:
|
|
145
|
+
return "end"
|
|
146
|
+
return state.get("next_step", "end")
|
|
147
|
+
|
|
148
|
+
# Build graph
|
|
149
|
+
graph = StateGraph(AgentState)
|
|
150
|
+
graph.add_node("agent", agent_node)
|
|
151
|
+
graph.set_entry_point("agent")
|
|
152
|
+
graph.add_conditional_edges(
|
|
153
|
+
"agent",
|
|
154
|
+
should_continue,
|
|
155
|
+
{"end": END, "agent": "agent"},
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return graph
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def register():
|
|
162
|
+
"""Register the LangGraph runtime."""
|
|
163
|
+
register_runtime(LangGraphRuntime())
|
|
164
|
+
|
|
@@ -0,0 +1,179 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LangGraph agent with tools example.
|
|
3
|
+
|
|
4
|
+
This demonstrates a more complete LangGraph agent that:
|
|
5
|
+
- Uses tools for external actions
|
|
6
|
+
- Has a ReAct-style reasoning loop
|
|
7
|
+
- Emits detailed events for UI streaming
|
|
8
|
+
|
|
9
|
+
Requirements:
|
|
10
|
+
pip install langgraph langchain-openai langchain-core
|
|
11
|
+
|
|
12
|
+
Usage:
|
|
13
|
+
1. Add to RUNTIME_REGISTRY in settings:
|
|
14
|
+
'RUNTIME_REGISTRY': ['django_agent_runtime.examples.langgraph_tools:register']
|
|
15
|
+
|
|
16
|
+
2. Create a run with agent_key="langgraph-tools-agent"
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from typing import Any, TypedDict, Annotated, Sequence, Literal
|
|
20
|
+
import operator
|
|
21
|
+
import json
|
|
22
|
+
|
|
23
|
+
from django_agent_runtime.runtime.interfaces import (
|
|
24
|
+
AgentRuntime,
|
|
25
|
+
RunContext,
|
|
26
|
+
RunResult,
|
|
27
|
+
EventType,
|
|
28
|
+
)
|
|
29
|
+
from django_agent_runtime.runtime.registry import register_runtime
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class ToolsAgentState(TypedDict):
|
|
33
|
+
"""State for the tools agent."""
|
|
34
|
+
messages: Annotated[Sequence[dict], operator.add]
|
|
35
|
+
tool_calls: list[dict]
|
|
36
|
+
iteration: int
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class LangGraphToolsRuntime(AgentRuntime):
|
|
40
|
+
"""
|
|
41
|
+
LangGraph agent with tool calling capabilities.
|
|
42
|
+
|
|
43
|
+
Implements a ReAct-style loop:
|
|
44
|
+
1. Agent decides what to do
|
|
45
|
+
2. If tool call needed, execute tool
|
|
46
|
+
3. Feed result back to agent
|
|
47
|
+
4. Repeat until done
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
MAX_ITERATIONS = 10
|
|
51
|
+
|
|
52
|
+
@property
|
|
53
|
+
def key(self) -> str:
|
|
54
|
+
return "langgraph-tools-agent"
|
|
55
|
+
|
|
56
|
+
async def run(self, ctx: RunContext) -> RunResult:
|
|
57
|
+
"""Execute the tools agent."""
|
|
58
|
+
try:
|
|
59
|
+
from langgraph.graph import StateGraph, END
|
|
60
|
+
from langchain_openai import ChatOpenAI
|
|
61
|
+
from langchain_core.messages import HumanMessage, AIMessage, ToolMessage
|
|
62
|
+
except ImportError:
|
|
63
|
+
raise ImportError(
|
|
64
|
+
"LangGraph tools integration requires: "
|
|
65
|
+
"pip install langgraph langchain-openai langchain-core"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Define tools
|
|
69
|
+
tools = self._get_tools()
|
|
70
|
+
|
|
71
|
+
# Create LLM with tools
|
|
72
|
+
llm = ChatOpenAI(model="gpt-4o", temperature=0).bind_tools(tools)
|
|
73
|
+
|
|
74
|
+
# Build and compile graph
|
|
75
|
+
graph = self._build_graph(llm, tools, ctx)
|
|
76
|
+
app = graph.compile()
|
|
77
|
+
|
|
78
|
+
# Initialize state
|
|
79
|
+
state: ToolsAgentState = {
|
|
80
|
+
"messages": ctx.input_messages,
|
|
81
|
+
"tool_calls": [],
|
|
82
|
+
"iteration": 0,
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
# Run the graph
|
|
86
|
+
final_state = None
|
|
87
|
+
async for event in app.astream(state):
|
|
88
|
+
if ctx.cancelled():
|
|
89
|
+
return RunResult()
|
|
90
|
+
|
|
91
|
+
for node_name, node_output in event.items():
|
|
92
|
+
final_state = node_output
|
|
93
|
+
await ctx.checkpoint({"node": node_name, "iteration": state["iteration"]})
|
|
94
|
+
|
|
95
|
+
# Extract result
|
|
96
|
+
messages = final_state.get("messages", []) if final_state else []
|
|
97
|
+
final_content = ""
|
|
98
|
+
if messages:
|
|
99
|
+
last = messages[-1]
|
|
100
|
+
final_content = last.get("content", "") if isinstance(last, dict) else str(last)
|
|
101
|
+
|
|
102
|
+
await ctx.emit(EventType.ASSISTANT_MESSAGE, {
|
|
103
|
+
"role": "assistant",
|
|
104
|
+
"content": final_content,
|
|
105
|
+
})
|
|
106
|
+
|
|
107
|
+
return RunResult(
|
|
108
|
+
final_output={"response": final_content},
|
|
109
|
+
final_messages=messages,
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
def _get_tools(self) -> list:
|
|
113
|
+
"""Define available tools."""
|
|
114
|
+
from langchain_core.tools import tool
|
|
115
|
+
|
|
116
|
+
@tool
|
|
117
|
+
def search(query: str) -> str:
|
|
118
|
+
"""Search for information."""
|
|
119
|
+
# Mock search - replace with real implementation
|
|
120
|
+
return f"Search results for: {query}"
|
|
121
|
+
|
|
122
|
+
@tool
|
|
123
|
+
def calculate(expression: str) -> str:
|
|
124
|
+
"""Evaluate a math expression."""
|
|
125
|
+
try:
|
|
126
|
+
result = eval(expression, {"__builtins__": {}}, {})
|
|
127
|
+
return str(result)
|
|
128
|
+
except Exception as e:
|
|
129
|
+
return f"Error: {e}"
|
|
130
|
+
|
|
131
|
+
return [search, calculate]
|
|
132
|
+
|
|
133
|
+
def _build_graph(self, llm, tools, ctx: RunContext):
|
|
134
|
+
"""Build the ReAct-style graph."""
|
|
135
|
+
from langgraph.graph import StateGraph, END
|
|
136
|
+
from langchain_core.messages import ToolMessage
|
|
137
|
+
|
|
138
|
+
tool_map = {t.name: t for t in tools}
|
|
139
|
+
|
|
140
|
+
async def agent_node(state: ToolsAgentState) -> dict:
|
|
141
|
+
"""Call the LLM."""
|
|
142
|
+
response = await llm.ainvoke(state["messages"])
|
|
143
|
+
tool_calls = getattr(response, "tool_calls", [])
|
|
144
|
+
|
|
145
|
+
return {
|
|
146
|
+
"messages": [response],
|
|
147
|
+
"tool_calls": tool_calls,
|
|
148
|
+
"iteration": state["iteration"] + 1,
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
async def tool_node(state: ToolsAgentState) -> dict:
|
|
152
|
+
"""Execute tool calls."""
|
|
153
|
+
results = []
|
|
154
|
+
for tc in state["tool_calls"]:
|
|
155
|
+
tool = tool_map.get(tc["name"])
|
|
156
|
+
if tool:
|
|
157
|
+
result = await tool.ainvoke(tc["args"])
|
|
158
|
+
results.append(ToolMessage(content=str(result), tool_call_id=tc["id"]))
|
|
159
|
+
return {"messages": results, "tool_calls": []}
|
|
160
|
+
|
|
161
|
+
def should_continue(state: ToolsAgentState) -> Literal["tools", "end"]:
|
|
162
|
+
if state["iteration"] >= self.MAX_ITERATIONS:
|
|
163
|
+
return "end"
|
|
164
|
+
return "tools" if state["tool_calls"] else "end"
|
|
165
|
+
|
|
166
|
+
graph = StateGraph(ToolsAgentState)
|
|
167
|
+
graph.add_node("agent", agent_node)
|
|
168
|
+
graph.add_node("tools", tool_node)
|
|
169
|
+
graph.set_entry_point("agent")
|
|
170
|
+
graph.add_conditional_edges("agent", should_continue, {"tools": "tools", "end": END})
|
|
171
|
+
graph.add_edge("tools", "agent")
|
|
172
|
+
|
|
173
|
+
return graph
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def register():
|
|
177
|
+
"""Register the LangGraph tools runtime."""
|
|
178
|
+
register_runtime(LangGraphToolsRuntime())
|
|
179
|
+
|
|
@@ -0,0 +1,69 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Simple chat agent example.
|
|
3
|
+
|
|
4
|
+
This demonstrates a basic agent that:
|
|
5
|
+
- Takes user messages
|
|
6
|
+
- Calls an LLM
|
|
7
|
+
- Returns the response
|
|
8
|
+
|
|
9
|
+
Usage:
|
|
10
|
+
1. Add to RUNTIME_REGISTRY in settings:
|
|
11
|
+
'RUNTIME_REGISTRY': ['django_agent_runtime.examples.simple_chat:register']
|
|
12
|
+
|
|
13
|
+
2. Create a run with agent_key="simple-chat"
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from django_agent_runtime.runtime.interfaces import (
|
|
17
|
+
AgentRuntime,
|
|
18
|
+
RunContext,
|
|
19
|
+
RunResult,
|
|
20
|
+
EventType,
|
|
21
|
+
)
|
|
22
|
+
from django_agent_runtime.runtime.registry import register_runtime
|
|
23
|
+
from django_agent_runtime.runtime.llm import get_llm_client
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class SimpleChatRuntime(AgentRuntime):
|
|
27
|
+
"""
|
|
28
|
+
A simple chat agent that forwards messages to an LLM.
|
|
29
|
+
|
|
30
|
+
This is the most basic agent - no tools, no state, just chat.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
@property
|
|
34
|
+
def key(self) -> str:
|
|
35
|
+
return "simple-chat"
|
|
36
|
+
|
|
37
|
+
async def run(self, ctx: RunContext) -> RunResult:
|
|
38
|
+
"""Execute the chat agent."""
|
|
39
|
+
# Get LLM client
|
|
40
|
+
llm = get_llm_client()
|
|
41
|
+
|
|
42
|
+
# Check for cancellation
|
|
43
|
+
if ctx.cancelled():
|
|
44
|
+
return RunResult()
|
|
45
|
+
|
|
46
|
+
# Call LLM
|
|
47
|
+
response = await llm.generate(
|
|
48
|
+
messages=ctx.input_messages,
|
|
49
|
+
**ctx.params,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# Emit the assistant message
|
|
53
|
+
await ctx.emit(EventType.ASSISTANT_MESSAGE, {
|
|
54
|
+
"content": response.message.get("content", ""),
|
|
55
|
+
"role": "assistant",
|
|
56
|
+
})
|
|
57
|
+
|
|
58
|
+
# Return result
|
|
59
|
+
return RunResult(
|
|
60
|
+
final_output={"response": response.message.get("content", "")},
|
|
61
|
+
final_messages=[response.message],
|
|
62
|
+
usage=response.usage,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def register():
|
|
67
|
+
"""Register the simple chat runtime."""
|
|
68
|
+
register_runtime(SimpleChatRuntime())
|
|
69
|
+
|