fastapi-fullstack 0.1.7__py3-none-any.whl → 0.1.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/METADATA +9 -2
  2. {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/RECORD +71 -55
  3. fastapi_gen/__init__.py +6 -1
  4. fastapi_gen/cli.py +9 -0
  5. fastapi_gen/config.py +154 -2
  6. fastapi_gen/generator.py +34 -14
  7. fastapi_gen/prompts.py +172 -31
  8. fastapi_gen/template/VARIABLES.md +33 -4
  9. fastapi_gen/template/cookiecutter.json +10 -0
  10. fastapi_gen/template/hooks/post_gen_project.py +87 -2
  11. fastapi_gen/template/{{cookiecutter.project_slug}}/.env.prod.example +9 -0
  12. fastapi_gen/template/{{cookiecutter.project_slug}}/.gitlab-ci.yml +178 -0
  13. fastapi_gen/template/{{cookiecutter.project_slug}}/CLAUDE.md +3 -0
  14. fastapi_gen/template/{{cookiecutter.project_slug}}/README.md +334 -0
  15. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/.env.example +32 -0
  16. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/alembic/env.py +10 -1
  17. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/admin.py +1 -1
  18. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/__init__.py +31 -0
  19. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/crewai_assistant.py +563 -0
  20. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/deepagents_assistant.py +526 -0
  21. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/langchain_assistant.py +4 -3
  22. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/langgraph_assistant.py +371 -0
  23. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/api/routes/v1/agent.py +1472 -0
  24. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/api/routes/v1/oauth.py +3 -7
  25. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/commands/cleanup.py +2 -2
  26. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/commands/seed.py +7 -2
  27. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/core/config.py +44 -7
  28. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/__init__.py +7 -0
  29. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/base.py +42 -0
  30. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/conversation.py +262 -1
  31. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/item.py +76 -1
  32. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/session.py +118 -1
  33. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/user.py +158 -1
  34. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/webhook.py +185 -3
  35. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/main.py +29 -2
  36. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/repositories/base.py +6 -0
  37. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/repositories/session.py +4 -4
  38. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/services/conversation.py +9 -9
  39. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/services/session.py +6 -6
  40. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/services/webhook.py +7 -7
  41. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/worker/__init__.py +1 -1
  42. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/worker/arq_app.py +165 -0
  43. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/worker/tasks/__init__.py +10 -1
  44. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/pyproject.toml +40 -0
  45. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/tests/api/test_metrics.py +53 -0
  46. fastapi_gen/template/{{cookiecutter.project_slug}}/backend/tests/test_agents.py +2 -0
  47. fastapi_gen/template/{{cookiecutter.project_slug}}/docker-compose.dev.yml +6 -0
  48. fastapi_gen/template/{{cookiecutter.project_slug}}/docker-compose.prod.yml +100 -0
  49. fastapi_gen/template/{{cookiecutter.project_slug}}/docker-compose.yml +39 -0
  50. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/.env.example +5 -0
  51. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/chat-container.tsx +28 -1
  52. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/index.ts +1 -0
  53. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/message-item.tsx +22 -4
  54. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/message-list.tsx +23 -3
  55. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/tool-approval-dialog.tsx +138 -0
  56. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/hooks/use-chat.ts +242 -18
  57. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/hooks/use-local-chat.ts +242 -17
  58. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/lib/constants.ts +1 -1
  59. fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/types/chat.ts +57 -1
  60. fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/configmap.yaml +63 -0
  61. fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/deployment.yaml +242 -0
  62. fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/ingress.yaml +44 -0
  63. fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/kustomization.yaml +28 -0
  64. fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/namespace.yaml +12 -0
  65. fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/secret.yaml +59 -0
  66. fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/service.yaml +23 -0
  67. fastapi_gen/template/{{cookiecutter.project_slug}}/nginx/nginx.conf +225 -0
  68. fastapi_gen/template/{{cookiecutter.project_slug}}/nginx/ssl/.gitkeep +18 -0
  69. {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/WHEEL +0 -0
  70. {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/entry_points.txt +0 -0
  71. {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,526 @@
1
+ {%- if cookiecutter.enable_ai_agent and cookiecutter.use_deepagents %}
2
+ """DeepAgents implementation with middleware stacking and human-in-the-loop.
3
+
4
+ DeepAgents is a framework for building agentic coding assistants.
5
+ It uses LangGraph under the hood and comes with built-in tools for:
6
+ - File operations: ls, read_file, write_file, edit_file, glob, grep
7
+ - Task management: write_todos, task (subagent spawning)
8
+ - Shell execution: execute (when sandbox backend is enabled)
9
+
10
+ Human-in-the-loop (HITL) support:
11
+ - Configure tools requiring approval via DEEPAGENTS_INTERRUPT_TOOLS
12
+ - Allowed decisions: approve, edit, reject
13
+ - Interrupts are returned via stream/run and can be resumed with decisions
14
+
15
+ Configuration via settings:
16
+ - DEEPAGENTS_SKILLS_PATHS: Comma-separated skill paths
17
+ - DEEPAGENTS_ENABLE_FILESYSTEM: Enable file tools (default: True)
18
+ - DEEPAGENTS_ENABLE_EXECUTE: Enable shell execution (default: False)
19
+ - DEEPAGENTS_ENABLE_TODOS: Enable todo list tool (default: True)
20
+ - DEEPAGENTS_ENABLE_SUBAGENTS: Enable subagent spawning (default: True)
21
+ - DEEPAGENTS_INTERRUPT_TOOLS: Tools requiring human approval
22
+ - DEEPAGENTS_ALLOWED_DECISIONS: Allowed decisions (approve,edit,reject)
23
+ """
24
+
25
+ import logging
26
+ from typing import Annotated, Any, TypedDict
27
+
28
+ from deepagents import create_deep_agent
29
+ from deepagents.backends import StateBackend
30
+ from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage
31
+ from langgraph.checkpoint.memory import MemorySaver
32
+ from langgraph.graph.message import add_messages
33
+ from langgraph.types import Command, interrupt
34
+ {%- if cookiecutter.use_openai %}
35
+ from langchain_openai import ChatOpenAI
36
+ {%- endif %}
37
+ {%- if cookiecutter.use_anthropic %}
38
+ from langchain_anthropic import ChatAnthropic
39
+ {%- endif %}
40
+
41
+ from app.agents.prompts import DEFAULT_SYSTEM_PROMPT
42
+ from app.core.config import settings
43
+
44
+ logger = logging.getLogger(__name__)
45
+
46
+
47
+ class AgentContext(TypedDict, total=False):
48
+ """Runtime context for the agent.
49
+
50
+ Passed via config parameter to the graph.
51
+ """
52
+
53
+ user_id: str | None
54
+ user_name: str | None
55
+ metadata: dict[str, Any]
56
+
57
+
58
+ class AgentState(TypedDict):
59
+ """State for the DeepAgents agent.
60
+
61
+ This is what flows through the agent graph.
62
+ The messages field uses add_messages reducer to properly
63
+ append new messages to the conversation history.
64
+ """
65
+
66
+ messages: Annotated[list[BaseMessage], add_messages]
67
+
68
+
69
+ class InterruptData(TypedDict):
70
+ """Data structure for human-in-the-loop interrupts."""
71
+
72
+ action_requests: list[dict[str, Any]] # List of tool calls pending approval
73
+ review_configs: list[dict[str, Any]] # Config for each tool (allowed_decisions)
74
+
75
+
76
+ class Decision(TypedDict, total=False):
77
+ """Human decision for a tool call."""
78
+
79
+ type: str # "approve", "edit", or "reject"
80
+ edited_action: dict[str, Any] | None # For "edit" type: modified tool call
81
+
82
+
83
+ def _parse_skills_paths() -> list[str] | None:
84
+ """Parse skills paths from settings.
85
+
86
+ Returns:
87
+ List of skill paths or None if not configured.
88
+ """
89
+ if not settings.DEEPAGENTS_SKILLS_PATHS:
90
+ return None
91
+
92
+ paths = [p.strip() for p in settings.DEEPAGENTS_SKILLS_PATHS.split(",") if p.strip()]
93
+ return paths if paths else None
94
+
95
+
96
+ def _parse_interrupt_config() -> dict[str, bool | dict[str, list[str]]] | None:
97
+ """Parse interrupt_on configuration from settings.
98
+
99
+ Returns:
100
+ Dict mapping tool names to interrupt configs, or None if not configured.
101
+ """
102
+ if not settings.DEEPAGENTS_INTERRUPT_TOOLS:
103
+ return None
104
+
105
+ tools = [t.strip() for t in settings.DEEPAGENTS_INTERRUPT_TOOLS.split(",") if t.strip()]
106
+ if not tools:
107
+ return None
108
+
109
+ # Parse allowed decisions
110
+ allowed = [d.strip() for d in settings.DEEPAGENTS_ALLOWED_DECISIONS.split(",") if d.strip()]
111
+ if not allowed:
112
+ allowed = ["approve", "edit", "reject"]
113
+
114
+ # Build interrupt_on config
115
+ interrupt_on: dict[str, bool | dict[str, list[str]]] = {}
116
+
117
+ # Built-in DeepAgents tools
118
+ builtin_tools = [
119
+ "ls", "read_file", "write_file", "edit_file", "glob", "grep",
120
+ "execute", "write_todos", "task"
121
+ ]
122
+
123
+ if "all" in tools:
124
+ # Interrupt all tools
125
+ for tool_name in builtin_tools:
126
+ interrupt_on[tool_name] = {"allowed_decisions": allowed}
127
+ else:
128
+ for tool_name in tools:
129
+ interrupt_on[tool_name] = {"allowed_decisions": allowed}
130
+
131
+ return interrupt_on if interrupt_on else None
132
+
133
+
134
+ class DeepAgentsAssistant:
135
+ """Wrapper for DeepAgents with run() and stream() methods.
136
+
137
+ DeepAgents creates a LangGraph-based agent with built-in tools for
138
+ filesystem operations, task management, and code execution.
139
+
140
+ Uses StateBackend (in-memory) for file state management.
141
+ Skills can be configured via DEEPAGENTS_SKILLS_PATHS setting.
142
+ Human-in-the-loop via DEEPAGENTS_INTERRUPT_TOOLS setting.
143
+ """
144
+
145
+ def __init__(
146
+ self,
147
+ model_name: str | None = None,
148
+ temperature: float | None = None,
149
+ system_prompt: str | None = None,
150
+ skills: list[str] | None = None,
151
+ interrupt_on: dict[str, bool | dict[str, list[str]]] | None = None,
152
+ ):
153
+ """Initialize DeepAgentsAssistant.
154
+
155
+ Args:
156
+ model_name: LLM model name (default from settings.AI_MODEL)
157
+ temperature: LLM temperature (default from settings.AI_TEMPERATURE)
158
+ system_prompt: System prompt (default from DEFAULT_SYSTEM_PROMPT)
159
+ skills: List of skill paths (default from settings.DEEPAGENTS_SKILLS_PATHS)
160
+ interrupt_on: Dict of tool names to interrupt configs (default from settings)
161
+ """
162
+ self.model_name = model_name or settings.AI_MODEL
163
+ self.temperature = temperature or settings.AI_TEMPERATURE
164
+ self.system_prompt = system_prompt or DEFAULT_SYSTEM_PROMPT
165
+ self.skills = skills if skills is not None else _parse_skills_paths()
166
+ self.interrupt_on = interrupt_on if interrupt_on is not None else _parse_interrupt_config()
167
+ self._graph = None
168
+ self._checkpointer = MemorySaver()
169
+
170
+ def _create_model(self):
171
+ """Create the LLM model for DeepAgents."""
172
+ {%- if cookiecutter.use_openai %}
173
+ return ChatOpenAI(
174
+ model=self.model_name,
175
+ temperature=self.temperature,
176
+ api_key=settings.OPENAI_API_KEY,
177
+ streaming=True,
178
+ )
179
+ {%- endif %}
180
+ {%- if cookiecutter.use_anthropic %}
181
+ return ChatAnthropic(
182
+ model=self.model_name,
183
+ temperature=self.temperature,
184
+ api_key=settings.ANTHROPIC_API_KEY,
185
+ streaming=True,
186
+ )
187
+ {%- endif %}
188
+
189
+ @property
190
+ def graph(self):
191
+ """Get or create the compiled graph instance.
192
+
193
+ The agent is created with:
194
+ - StateBackend: In-memory file state management
195
+ - TodoListMiddleware: For task tracking (if enabled)
196
+ - FilesystemMiddleware: For file operations (if enabled)
197
+ - SubAgentMiddleware: For spawning subagents (if enabled)
198
+ - Skills: Loaded from configured paths (if any)
199
+ - interrupt_on: Human-in-the-loop config (if any)
200
+ """
201
+ if self._graph is None:
202
+ model = self._create_model()
203
+
204
+ # Create agent with StateBackend (in-memory)
205
+ self._graph = create_deep_agent(
206
+ model=model,
207
+ system_prompt=self.system_prompt,
208
+ checkpointer=self._checkpointer,
209
+ backend=lambda rt: StateBackend(rt),
210
+ skills=self.skills,
211
+ interrupt_on=self.interrupt_on,
212
+ )
213
+
214
+ logger.info(
215
+ f"DeepAgents initialized with model={self.model_name}, "
216
+ f"skills={self.skills}, "
217
+ f"interrupt_on={list(self.interrupt_on.keys()) if self.interrupt_on else None}, "
218
+ f"filesystem={settings.DEEPAGENTS_ENABLE_FILESYSTEM}, "
219
+ f"execute={settings.DEEPAGENTS_ENABLE_EXECUTE}"
220
+ )
221
+
222
+ return self._graph
223
+
224
+ @staticmethod
225
+ def _convert_history(
226
+ history: list[dict[str, str]] | None,
227
+ ) -> list[HumanMessage | AIMessage | SystemMessage]:
228
+ """Convert conversation history to LangChain message format."""
229
+ messages: list[HumanMessage | AIMessage | SystemMessage] = []
230
+
231
+ for msg in history or []:
232
+ if msg["role"] == "user":
233
+ messages.append(HumanMessage(content=msg["content"]))
234
+ elif msg["role"] == "assistant":
235
+ messages.append(AIMessage(content=msg["content"]))
236
+ elif msg["role"] == "system":
237
+ messages.append(SystemMessage(content=msg["content"]))
238
+
239
+ return messages
240
+
241
+ @staticmethod
242
+ def extract_interrupt(result: dict[str, Any]) -> InterruptData | None:
243
+ """Extract interrupt data from agent result if present.
244
+
245
+ Args:
246
+ result: The result from agent.invoke() or final state from stream.
247
+
248
+ Returns:
249
+ InterruptData if interrupted, None otherwise.
250
+ """
251
+ if not result.get("__interrupt__"):
252
+ return None
253
+
254
+ interrupt_value = result["__interrupt__"][0].value
255
+ return InterruptData(
256
+ action_requests=interrupt_value.get("action_requests", []),
257
+ review_configs=interrupt_value.get("review_configs", []),
258
+ )
259
+
260
+ async def run(
261
+ self,
262
+ user_input: str,
263
+ history: list[dict[str, str]] | None = None,
264
+ context: AgentContext | None = None,
265
+ thread_id: str = "default",
266
+ files: dict[str, str] | None = None,
267
+ ) -> tuple[str, list[Any], AgentContext, InterruptData | None]:
268
+ """Run agent and return the output along with tool call events.
269
+
270
+ Args:
271
+ user_input: User's message.
272
+ history: Conversation history as list of {"role": "...", "content": "..."}.
273
+ context: Optional runtime context with user info.
274
+ thread_id: Thread ID for conversation continuity.
275
+ files: Optional dict of {path: content} to provide to StateBackend.
276
+
277
+ Returns:
278
+ Tuple of (output_text, tool_events, context, interrupt_data).
279
+ interrupt_data is None if not interrupted, otherwise contains pending approvals.
280
+ """
281
+ messages = self._convert_history(history)
282
+ messages.append(HumanMessage(content=user_input))
283
+
284
+ agent_context: AgentContext = context if context is not None else {}
285
+
286
+ logger.info(f"Running DeepAgents with user input: {user_input[:100]}...")
287
+
288
+ config = {
289
+ "configurable": {
290
+ "thread_id": thread_id,
291
+ **agent_context,
292
+ }
293
+ }
294
+
295
+ # Prepare input with optional files for StateBackend
296
+ input_data: dict[str, Any] = {"messages": messages}
297
+ if files:
298
+ input_data["files"] = files
299
+
300
+ result = await self.graph.ainvoke(input_data, config=config)
301
+
302
+ # Check for interrupt
303
+ interrupt_data = self.extract_interrupt(result)
304
+ if interrupt_data:
305
+ logger.info(f"Agent interrupted with {len(interrupt_data['action_requests'])} pending approvals")
306
+ return "", [], agent_context, interrupt_data
307
+
308
+ # Extract the final response and tool events
309
+ output = ""
310
+ tool_events: list[Any] = []
311
+
312
+ for message in result.get("messages", []):
313
+ if isinstance(message, AIMessage):
314
+ if message.content:
315
+ output = message.content if isinstance(message.content, str) else str(message.content)
316
+ if hasattr(message, "tool_calls") and message.tool_calls:
317
+ tool_events.extend(message.tool_calls)
318
+
319
+ logger.info(f"DeepAgents run complete. Output length: {len(output)} chars")
320
+
321
+ return output, tool_events, agent_context, None
322
+
323
+ async def resume(
324
+ self,
325
+ decisions: list[Decision],
326
+ thread_id: str = "default",
327
+ context: AgentContext | None = None,
328
+ ) -> tuple[str, list[Any], AgentContext, InterruptData | None]:
329
+ """Resume agent execution after human-in-the-loop interrupt.
330
+
331
+ Args:
332
+ decisions: List of decisions for each pending tool call.
333
+ thread_id: Thread ID (must match the interrupted session).
334
+ context: Optional runtime context.
335
+
336
+ Returns:
337
+ Tuple of (output_text, tool_events, context, interrupt_data).
338
+ """
339
+ agent_context: AgentContext = context if context is not None else {}
340
+
341
+ config = {
342
+ "configurable": {
343
+ "thread_id": thread_id,
344
+ **agent_context,
345
+ }
346
+ }
347
+
348
+ logger.info(f"Resuming DeepAgents with {len(decisions)} decisions")
349
+
350
+ # Resume with Command
351
+ result = await self.graph.ainvoke(
352
+ Command(resume={"decisions": decisions}),
353
+ config=config
354
+ )
355
+
356
+ # Check for another interrupt
357
+ interrupt_data = self.extract_interrupt(result)
358
+ if interrupt_data:
359
+ logger.info(f"Agent interrupted again with {len(interrupt_data['action_requests'])} pending approvals")
360
+ return "", [], agent_context, interrupt_data
361
+
362
+ # Extract the final response and tool events
363
+ output = ""
364
+ tool_events: list[Any] = []
365
+
366
+ for message in result.get("messages", []):
367
+ if isinstance(message, AIMessage):
368
+ if message.content:
369
+ output = message.content if isinstance(message.content, str) else str(message.content)
370
+ if hasattr(message, "tool_calls") and message.tool_calls:
371
+ tool_events.extend(message.tool_calls)
372
+
373
+ logger.info(f"DeepAgents resume complete. Output length: {len(output)} chars")
374
+
375
+ return output, tool_events, agent_context, None
376
+
377
+ async def stream(
378
+ self,
379
+ user_input: str,
380
+ history: list[dict[str, str]] | None = None,
381
+ context: AgentContext | None = None,
382
+ thread_id: str = "default",
383
+ files: dict[str, str] | None = None,
384
+ ):
385
+ """Stream agent execution with message and state update streaming.
386
+
387
+ Args:
388
+ user_input: User's message.
389
+ history: Conversation history.
390
+ context: Optional runtime context.
391
+ thread_id: Thread ID for conversation continuity.
392
+ files: Optional dict of {path: content} to provide to StateBackend.
393
+
394
+ Yields:
395
+ Tuples of (stream_mode, data) for streaming responses.
396
+ - stream_mode="messages": (chunk, metadata) for LLM tokens
397
+ - stream_mode="updates": state updates after each node
398
+ - stream_mode="interrupt": InterruptData when human approval needed
399
+ """
400
+ messages = self._convert_history(history)
401
+ messages.append(HumanMessage(content=user_input))
402
+
403
+ agent_context: AgentContext = context if context is not None else {}
404
+
405
+ config = {
406
+ "configurable": {
407
+ "thread_id": thread_id,
408
+ **agent_context,
409
+ }
410
+ }
411
+
412
+ # Prepare input with optional files for StateBackend
413
+ input_data: dict[str, Any] = {"messages": messages}
414
+ if files:
415
+ input_data["files"] = files
416
+
417
+ logger.info(f"Starting DeepAgents stream for user input: {user_input[:100]}...")
418
+
419
+ final_state: dict[str, Any] = {}
420
+
421
+ async for stream_mode, data in self.graph.astream(
422
+ input_data,
423
+ config=config,
424
+ stream_mode=["messages", "updates"],
425
+ ):
426
+ final_state = data if stream_mode == "updates" else final_state
427
+ yield stream_mode, data
428
+
429
+ # Check for interrupt after stream completes
430
+ # Get the final state to check for interrupts
431
+ state = await self.graph.aget_state(config)
432
+ if state.next: # If there's a next step, we're likely interrupted
433
+ # Fetch the actual interrupt data
434
+ result = await self.graph.ainvoke(input_data, config=config)
435
+ interrupt_data = self.extract_interrupt(result)
436
+ if interrupt_data:
437
+ yield "interrupt", interrupt_data
438
+
439
+ async def stream_resume(
440
+ self,
441
+ decisions: list[Decision],
442
+ thread_id: str = "default",
443
+ context: AgentContext | None = None,
444
+ ):
445
+ """Stream agent execution after resuming from interrupt.
446
+
447
+ Args:
448
+ decisions: List of decisions for each pending tool call.
449
+ thread_id: Thread ID (must match the interrupted session).
450
+ context: Optional runtime context.
451
+
452
+ Yields:
453
+ Tuples of (stream_mode, data) for streaming responses.
454
+ """
455
+ agent_context: AgentContext = context if context is not None else {}
456
+
457
+ config = {
458
+ "configurable": {
459
+ "thread_id": thread_id,
460
+ **agent_context,
461
+ }
462
+ }
463
+
464
+ logger.info(f"Streaming resume with {len(decisions)} decisions")
465
+
466
+ async for stream_mode, data in self.graph.astream(
467
+ Command(resume={"decisions": decisions}),
468
+ config=config,
469
+ stream_mode=["messages", "updates"],
470
+ ):
471
+ yield stream_mode, data
472
+
473
+ # Check for another interrupt
474
+ state = await self.graph.aget_state(config)
475
+ if state.next:
476
+ result = await self.graph.ainvoke(
477
+ Command(resume={"decisions": decisions}),
478
+ config=config
479
+ )
480
+ interrupt_data = self.extract_interrupt(result)
481
+ if interrupt_data:
482
+ yield "interrupt", interrupt_data
483
+
484
+
485
+ def get_agent(
486
+ skills: list[str] | None = None,
487
+ interrupt_on: dict[str, bool | dict[str, list[str]]] | None = None,
488
+ ) -> DeepAgentsAssistant:
489
+ """Factory function to create a DeepAgentsAssistant.
490
+
491
+ Args:
492
+ skills: Optional list of skill paths to override settings.
493
+ interrupt_on: Optional interrupt config to override settings.
494
+
495
+ Returns:
496
+ Configured DeepAgentsAssistant instance.
497
+ """
498
+ return DeepAgentsAssistant(skills=skills, interrupt_on=interrupt_on)
499
+
500
+
501
+ async def run_agent(
502
+ user_input: str,
503
+ history: list[dict[str, str]],
504
+ context: AgentContext | None = None,
505
+ thread_id: str = "default",
506
+ files: dict[str, str] | None = None,
507
+ ) -> tuple[str, list[Any], AgentContext, InterruptData | None]:
508
+ """Run agent and return the output along with tool call events.
509
+
510
+ This is a convenience function for backwards compatibility.
511
+
512
+ Args:
513
+ user_input: User's message.
514
+ history: Conversation history.
515
+ context: Optional runtime context.
516
+ thread_id: Thread ID for conversation continuity.
517
+ files: Optional dict of {path: content} to provide to StateBackend.
518
+
519
+ Returns:
520
+ Tuple of (output_text, tool_events, context, interrupt_data).
521
+ """
522
+ agent = get_agent()
523
+ return await agent.run(user_input, history, context, thread_id, files)
524
+ {%- else %}
525
+ """DeepAgents Assistant agent - not configured."""
526
+ {%- endif %}
@@ -163,7 +163,7 @@ class LangChainAssistant:
163
163
 
164
164
  return output, tool_events, agent_context
165
165
 
166
- def stream(
166
+ async def stream(
167
167
  self,
168
168
  user_input: str,
169
169
  history: list[dict[str, str]] | None = None,
@@ -186,11 +186,12 @@ class LangChainAssistant:
186
186
 
187
187
  agent_context: AgentContext = context if context is not None else {}
188
188
 
189
- yield from self.agent.stream(
189
+ async for event in self.agent.astream(
190
190
  {"messages": messages},
191
191
  stream_mode=["messages", "updates"],
192
192
  config={"configurable": agent_context} if agent_context else None,
193
- )
193
+ ):
194
+ yield event
194
195
 
195
196
 
196
197
  def get_agent() -> LangChainAssistant: