fastapi-fullstack 0.1.7__py3-none-any.whl → 0.1.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/METADATA +9 -2
- {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/RECORD +71 -55
- fastapi_gen/__init__.py +6 -1
- fastapi_gen/cli.py +9 -0
- fastapi_gen/config.py +154 -2
- fastapi_gen/generator.py +34 -14
- fastapi_gen/prompts.py +172 -31
- fastapi_gen/template/VARIABLES.md +33 -4
- fastapi_gen/template/cookiecutter.json +10 -0
- fastapi_gen/template/hooks/post_gen_project.py +87 -2
- fastapi_gen/template/{{cookiecutter.project_slug}}/.env.prod.example +9 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/.gitlab-ci.yml +178 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/CLAUDE.md +3 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/README.md +334 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/.env.example +32 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/alembic/env.py +10 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/admin.py +1 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/__init__.py +31 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/crewai_assistant.py +563 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/deepagents_assistant.py +526 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/langchain_assistant.py +4 -3
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/agents/langgraph_assistant.py +371 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/api/routes/v1/agent.py +1472 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/api/routes/v1/oauth.py +3 -7
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/commands/cleanup.py +2 -2
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/commands/seed.py +7 -2
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/core/config.py +44 -7
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/__init__.py +7 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/base.py +42 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/conversation.py +262 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/item.py +76 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/session.py +118 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/user.py +158 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/db/models/webhook.py +185 -3
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/main.py +29 -2
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/repositories/base.py +6 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/repositories/session.py +4 -4
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/services/conversation.py +9 -9
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/services/session.py +6 -6
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/services/webhook.py +7 -7
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/worker/__init__.py +1 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/worker/arq_app.py +165 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/app/worker/tasks/__init__.py +10 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/pyproject.toml +40 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/tests/api/test_metrics.py +53 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/backend/tests/test_agents.py +2 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/docker-compose.dev.yml +6 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/docker-compose.prod.yml +100 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/docker-compose.yml +39 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/.env.example +5 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/chat-container.tsx +28 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/index.ts +1 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/message-item.tsx +22 -4
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/message-list.tsx +23 -3
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/components/chat/tool-approval-dialog.tsx +138 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/hooks/use-chat.ts +242 -18
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/hooks/use-local-chat.ts +242 -17
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/lib/constants.ts +1 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/frontend/src/types/chat.ts +57 -1
- fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/configmap.yaml +63 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/deployment.yaml +242 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/ingress.yaml +44 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/kustomization.yaml +28 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/namespace.yaml +12 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/secret.yaml +59 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/kubernetes/service.yaml +23 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/nginx/nginx.conf +225 -0
- fastapi_gen/template/{{cookiecutter.project_slug}}/nginx/ssl/.gitkeep +18 -0
- {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/WHEEL +0 -0
- {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/entry_points.txt +0 -0
- {fastapi_fullstack-0.1.7.dist-info → fastapi_fullstack-0.1.15.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,371 @@
|
|
|
1
|
+
{%- if cookiecutter.enable_ai_agent and cookiecutter.use_langgraph %}
|
|
2
|
+
"""LangGraph ReAct Agent implementation.
|
|
3
|
+
|
|
4
|
+
A simple ReAct (Reasoning + Acting) agent built with LangGraph.
|
|
5
|
+
Uses a graph-based architecture with conditional edges for tool execution.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Annotated, Any, Literal, TypedDict
|
|
10
|
+
|
|
11
|
+
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage
|
|
12
|
+
from langchain_core.tools import tool
|
|
13
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
14
|
+
from langgraph.graph import END, START, StateGraph
|
|
15
|
+
from langgraph.graph.message import add_messages
|
|
16
|
+
{%- if cookiecutter.use_openai %}
|
|
17
|
+
from langchain_openai import ChatOpenAI
|
|
18
|
+
{%- endif %}
|
|
19
|
+
{%- if cookiecutter.use_anthropic %}
|
|
20
|
+
from langchain_anthropic import ChatAnthropic
|
|
21
|
+
{%- endif %}
|
|
22
|
+
|
|
23
|
+
from app.agents.prompts import DEFAULT_SYSTEM_PROMPT
|
|
24
|
+
from app.agents.tools import get_current_datetime
|
|
25
|
+
from app.core.config import settings
|
|
26
|
+
|
|
27
|
+
logger = logging.getLogger(__name__)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class AgentContext(TypedDict, total=False):
|
|
31
|
+
"""Runtime context for the agent.
|
|
32
|
+
|
|
33
|
+
Passed via config parameter to the graph.
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
user_id: str | None
|
|
37
|
+
user_name: str | None
|
|
38
|
+
metadata: dict[str, Any]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class AgentState(TypedDict):
|
|
42
|
+
"""State for the LangGraph agent.
|
|
43
|
+
|
|
44
|
+
This is what flows through the agent graph.
|
|
45
|
+
The messages field uses add_messages reducer to properly
|
|
46
|
+
append new messages to the conversation history.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
messages: Annotated[list[BaseMessage], add_messages]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
@tool
|
|
53
|
+
def current_datetime() -> str:
|
|
54
|
+
"""Get the current date and time.
|
|
55
|
+
|
|
56
|
+
Use this tool when you need to know the current date or time.
|
|
57
|
+
"""
|
|
58
|
+
return get_current_datetime()
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
# List of all available tools
|
|
62
|
+
ALL_TOOLS = [current_datetime]
|
|
63
|
+
|
|
64
|
+
# Create a dictionary for quick tool lookup by name
|
|
65
|
+
TOOLS_BY_NAME = {t.name: t for t in ALL_TOOLS}
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class LangGraphAssistant:
|
|
69
|
+
"""ReAct agent wrapper using LangGraph.
|
|
70
|
+
|
|
71
|
+
Implements a graph-based agent with:
|
|
72
|
+
- An agent node that processes messages and decides actions
|
|
73
|
+
- A tools node that executes tool calls
|
|
74
|
+
- Conditional edges that loop back for tool execution or end
|
|
75
|
+
|
|
76
|
+
The ReAct pattern:
|
|
77
|
+
1. Agent receives input and reasons about it
|
|
78
|
+
2. If tool calls are needed, execute them
|
|
79
|
+
3. Tool results are added to messages
|
|
80
|
+
4. Agent reasons again with new information
|
|
81
|
+
5. Repeat until agent provides final response
|
|
82
|
+
"""
|
|
83
|
+
|
|
84
|
+
def __init__(
|
|
85
|
+
self,
|
|
86
|
+
model_name: str | None = None,
|
|
87
|
+
temperature: float | None = None,
|
|
88
|
+
system_prompt: str | None = None,
|
|
89
|
+
):
|
|
90
|
+
self.model_name = model_name or settings.AI_MODEL
|
|
91
|
+
self.temperature = temperature or settings.AI_TEMPERATURE
|
|
92
|
+
self.system_prompt = system_prompt or DEFAULT_SYSTEM_PROMPT
|
|
93
|
+
self._graph = None
|
|
94
|
+
self._checkpointer = MemorySaver()
|
|
95
|
+
|
|
96
|
+
def _create_model(self):
|
|
97
|
+
"""Create the LLM model with tools bound."""
|
|
98
|
+
{%- if cookiecutter.use_openai %}
|
|
99
|
+
model = ChatOpenAI(
|
|
100
|
+
model=self.model_name,
|
|
101
|
+
temperature=self.temperature,
|
|
102
|
+
api_key=settings.OPENAI_API_KEY,
|
|
103
|
+
streaming=True,
|
|
104
|
+
)
|
|
105
|
+
{%- endif %}
|
|
106
|
+
{%- if cookiecutter.use_anthropic %}
|
|
107
|
+
model = ChatAnthropic(
|
|
108
|
+
model=self.model_name,
|
|
109
|
+
temperature=self.temperature,
|
|
110
|
+
api_key=settings.ANTHROPIC_API_KEY,
|
|
111
|
+
streaming=True,
|
|
112
|
+
)
|
|
113
|
+
{%- endif %}
|
|
114
|
+
|
|
115
|
+
return model.bind_tools(ALL_TOOLS)
|
|
116
|
+
|
|
117
|
+
def _agent_node(self, state: AgentState) -> dict[str, list[BaseMessage]]:
|
|
118
|
+
"""Agent node that processes messages and decides whether to call tools.
|
|
119
|
+
|
|
120
|
+
This is the main reasoning node in the ReAct pattern.
|
|
121
|
+
"""
|
|
122
|
+
model = self._create_model()
|
|
123
|
+
|
|
124
|
+
# Prepend system message to the conversation
|
|
125
|
+
messages = [SystemMessage(content=self.system_prompt), *state["messages"]]
|
|
126
|
+
|
|
127
|
+
response = model.invoke(messages)
|
|
128
|
+
|
|
129
|
+
logger.info(
|
|
130
|
+
f"Agent processed message - Tool calls: {len(response.tool_calls) if hasattr(response, 'tool_calls') else 0}"
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
return {"messages": [response]}
|
|
134
|
+
|
|
135
|
+
def _tools_node(self, state: AgentState) -> dict[str, list[ToolMessage]]:
|
|
136
|
+
"""Tools node that executes tool calls from the agent.
|
|
137
|
+
|
|
138
|
+
Processes each tool call and returns results as ToolMessages.
|
|
139
|
+
"""
|
|
140
|
+
messages = state["messages"]
|
|
141
|
+
last_message = messages[-1]
|
|
142
|
+
|
|
143
|
+
tool_results = []
|
|
144
|
+
|
|
145
|
+
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
|
|
146
|
+
for tool_call in last_message.tool_calls:
|
|
147
|
+
tool_name = tool_call["name"]
|
|
148
|
+
tool_args = tool_call["args"]
|
|
149
|
+
tool_id = tool_call["id"]
|
|
150
|
+
|
|
151
|
+
logger.info(f"Executing tool: {tool_name} with args: {tool_args}")
|
|
152
|
+
|
|
153
|
+
try:
|
|
154
|
+
tool_fn = TOOLS_BY_NAME.get(tool_name)
|
|
155
|
+
if tool_fn:
|
|
156
|
+
result = tool_fn.invoke(tool_args)
|
|
157
|
+
tool_results.append(
|
|
158
|
+
ToolMessage(
|
|
159
|
+
content=str(result),
|
|
160
|
+
tool_call_id=tool_id,
|
|
161
|
+
name=tool_name,
|
|
162
|
+
)
|
|
163
|
+
)
|
|
164
|
+
logger.info(f"Tool {tool_name} completed successfully")
|
|
165
|
+
else:
|
|
166
|
+
error_msg = f"Unknown tool: {tool_name}"
|
|
167
|
+
logger.error(error_msg)
|
|
168
|
+
tool_results.append(
|
|
169
|
+
ToolMessage(
|
|
170
|
+
content=error_msg,
|
|
171
|
+
tool_call_id=tool_id,
|
|
172
|
+
name=tool_name,
|
|
173
|
+
)
|
|
174
|
+
)
|
|
175
|
+
except Exception as e:
|
|
176
|
+
error_msg = f"Error executing {tool_name}: {str(e)}"
|
|
177
|
+
logger.error(error_msg, exc_info=True)
|
|
178
|
+
tool_results.append(
|
|
179
|
+
ToolMessage(
|
|
180
|
+
content=error_msg,
|
|
181
|
+
tool_call_id=tool_id,
|
|
182
|
+
name=tool_name,
|
|
183
|
+
)
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
return {"messages": tool_results}
|
|
187
|
+
|
|
188
|
+
def _should_continue(self, state: AgentState) -> Literal["tools", "__end__"]:
|
|
189
|
+
"""Conditional edge that decides whether to continue to tools or end.
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
- "tools" if the agent made tool calls (needs to execute tools)
|
|
193
|
+
- "__end__" if the agent provided a final response (no tool calls)
|
|
194
|
+
"""
|
|
195
|
+
messages = state["messages"]
|
|
196
|
+
last_message = messages[-1]
|
|
197
|
+
|
|
198
|
+
if hasattr(last_message, "tool_calls") and last_message.tool_calls:
|
|
199
|
+
logger.info(f"Continuing to tools - {len(last_message.tool_calls)} tool(s) to execute")
|
|
200
|
+
return "tools"
|
|
201
|
+
|
|
202
|
+
logger.info("No tool calls - ending conversation")
|
|
203
|
+
return "__end__"
|
|
204
|
+
|
|
205
|
+
def _build_graph(self) -> StateGraph:
|
|
206
|
+
"""Build and compile the LangGraph state graph."""
|
|
207
|
+
workflow = StateGraph(AgentState)
|
|
208
|
+
|
|
209
|
+
# Add nodes
|
|
210
|
+
workflow.add_node("agent", self._agent_node)
|
|
211
|
+
workflow.add_node("tools", self._tools_node)
|
|
212
|
+
|
|
213
|
+
# Add edges
|
|
214
|
+
workflow.add_edge(START, "agent")
|
|
215
|
+
workflow.add_conditional_edges(
|
|
216
|
+
"agent",
|
|
217
|
+
self._should_continue,
|
|
218
|
+
{"tools": "tools", "__end__": END},
|
|
219
|
+
)
|
|
220
|
+
workflow.add_edge("tools", "agent")
|
|
221
|
+
|
|
222
|
+
return workflow.compile(checkpointer=self._checkpointer)
|
|
223
|
+
|
|
224
|
+
@property
|
|
225
|
+
def graph(self):
|
|
226
|
+
"""Get or create the compiled graph instance."""
|
|
227
|
+
if self._graph is None:
|
|
228
|
+
self._graph = self._build_graph()
|
|
229
|
+
return self._graph
|
|
230
|
+
|
|
231
|
+
@staticmethod
|
|
232
|
+
def _convert_history(
|
|
233
|
+
history: list[dict[str, str]] | None,
|
|
234
|
+
) -> list[HumanMessage | AIMessage | SystemMessage]:
|
|
235
|
+
"""Convert conversation history to LangChain message format."""
|
|
236
|
+
messages: list[HumanMessage | AIMessage | SystemMessage] = []
|
|
237
|
+
|
|
238
|
+
for msg in history or []:
|
|
239
|
+
if msg["role"] == "user":
|
|
240
|
+
messages.append(HumanMessage(content=msg["content"]))
|
|
241
|
+
elif msg["role"] == "assistant":
|
|
242
|
+
messages.append(AIMessage(content=msg["content"]))
|
|
243
|
+
elif msg["role"] == "system":
|
|
244
|
+
messages.append(SystemMessage(content=msg["content"]))
|
|
245
|
+
|
|
246
|
+
return messages
|
|
247
|
+
|
|
248
|
+
async def run(
|
|
249
|
+
self,
|
|
250
|
+
user_input: str,
|
|
251
|
+
history: list[dict[str, str]] | None = None,
|
|
252
|
+
context: AgentContext | None = None,
|
|
253
|
+
thread_id: str = "default",
|
|
254
|
+
) -> tuple[str, list[Any], AgentContext]:
|
|
255
|
+
"""Run agent and return the output along with tool call events.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
user_input: User's message.
|
|
259
|
+
history: Conversation history as list of {"role": "...", "content": "..."}.
|
|
260
|
+
context: Optional runtime context with user info.
|
|
261
|
+
thread_id: Thread ID for conversation continuity.
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
Tuple of (output_text, tool_events, context).
|
|
265
|
+
"""
|
|
266
|
+
messages = self._convert_history(history)
|
|
267
|
+
messages.append(HumanMessage(content=user_input))
|
|
268
|
+
|
|
269
|
+
agent_context: AgentContext = context if context is not None else {}
|
|
270
|
+
|
|
271
|
+
logger.info(f"Running agent with user input: {user_input[:100]}...")
|
|
272
|
+
|
|
273
|
+
config = {
|
|
274
|
+
"configurable": {
|
|
275
|
+
"thread_id": thread_id,
|
|
276
|
+
**agent_context,
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
result = await self.graph.ainvoke({"messages": messages}, config=config)
|
|
281
|
+
|
|
282
|
+
# Extract the final response and tool events
|
|
283
|
+
output = ""
|
|
284
|
+
tool_events: list[Any] = []
|
|
285
|
+
|
|
286
|
+
for message in result.get("messages", []):
|
|
287
|
+
if isinstance(message, AIMessage):
|
|
288
|
+
if message.content:
|
|
289
|
+
output = message.content if isinstance(message.content, str) else str(message.content)
|
|
290
|
+
if hasattr(message, "tool_calls") and message.tool_calls:
|
|
291
|
+
tool_events.extend(message.tool_calls)
|
|
292
|
+
|
|
293
|
+
logger.info(f"Agent run complete. Output length: {len(output)} chars")
|
|
294
|
+
|
|
295
|
+
return output, tool_events, agent_context
|
|
296
|
+
|
|
297
|
+
async def stream(
|
|
298
|
+
self,
|
|
299
|
+
user_input: str,
|
|
300
|
+
history: list[dict[str, str]] | None = None,
|
|
301
|
+
context: AgentContext | None = None,
|
|
302
|
+
thread_id: str = "default",
|
|
303
|
+
):
|
|
304
|
+
"""Stream agent execution with message and state update streaming.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
user_input: User's message.
|
|
308
|
+
history: Conversation history.
|
|
309
|
+
context: Optional runtime context.
|
|
310
|
+
thread_id: Thread ID for conversation continuity.
|
|
311
|
+
|
|
312
|
+
Yields:
|
|
313
|
+
Tuples of (stream_mode, data) for streaming responses.
|
|
314
|
+
- stream_mode="messages": (chunk, metadata) for LLM tokens
|
|
315
|
+
- stream_mode="updates": state updates after each node
|
|
316
|
+
"""
|
|
317
|
+
messages = self._convert_history(history)
|
|
318
|
+
messages.append(HumanMessage(content=user_input))
|
|
319
|
+
|
|
320
|
+
agent_context: AgentContext = context if context is not None else {}
|
|
321
|
+
|
|
322
|
+
config = {
|
|
323
|
+
"configurable": {
|
|
324
|
+
"thread_id": thread_id,
|
|
325
|
+
**agent_context,
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
|
|
329
|
+
logger.info(f"Starting stream for user input: {user_input[:100]}...")
|
|
330
|
+
|
|
331
|
+
async for stream_mode, data in self.graph.astream(
|
|
332
|
+
{"messages": messages},
|
|
333
|
+
config=config,
|
|
334
|
+
stream_mode=["messages", "updates"],
|
|
335
|
+
):
|
|
336
|
+
yield stream_mode, data
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
def get_agent() -> LangGraphAssistant:
|
|
340
|
+
"""Factory function to create a LangGraphAssistant.
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
Configured LangGraphAssistant instance.
|
|
344
|
+
"""
|
|
345
|
+
return LangGraphAssistant()
|
|
346
|
+
|
|
347
|
+
|
|
348
|
+
async def run_agent(
|
|
349
|
+
user_input: str,
|
|
350
|
+
history: list[dict[str, str]],
|
|
351
|
+
context: AgentContext | None = None,
|
|
352
|
+
thread_id: str = "default",
|
|
353
|
+
) -> tuple[str, list[Any], AgentContext]:
|
|
354
|
+
"""Run agent and return the output along with tool call events.
|
|
355
|
+
|
|
356
|
+
This is a convenience function for backwards compatibility.
|
|
357
|
+
|
|
358
|
+
Args:
|
|
359
|
+
user_input: User's message.
|
|
360
|
+
history: Conversation history.
|
|
361
|
+
context: Optional runtime context.
|
|
362
|
+
thread_id: Thread ID for conversation continuity.
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
Tuple of (output_text, tool_events, context).
|
|
366
|
+
"""
|
|
367
|
+
agent = get_agent()
|
|
368
|
+
return await agent.run(user_input, history, context, thread_id)
|
|
369
|
+
{%- else %}
|
|
370
|
+
"""LangGraph Assistant agent - not configured."""
|
|
371
|
+
{%- endif %}
|