botrun-flow-lang 5.12.263__py3-none-any.whl → 6.2.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/auth_api.py +39 -39
- botrun_flow_lang/api/auth_utils.py +183 -183
- botrun_flow_lang/api/botrun_back_api.py +65 -65
- botrun_flow_lang/api/flow_api.py +3 -3
- botrun_flow_lang/api/hatch_api.py +508 -508
- botrun_flow_lang/api/langgraph_api.py +816 -811
- botrun_flow_lang/api/langgraph_constants.py +11 -0
- botrun_flow_lang/api/line_bot_api.py +1484 -1484
- botrun_flow_lang/api/model_api.py +300 -300
- botrun_flow_lang/api/rate_limit_api.py +32 -32
- botrun_flow_lang/api/routes.py +79 -79
- botrun_flow_lang/api/search_api.py +53 -53
- botrun_flow_lang/api/storage_api.py +395 -395
- botrun_flow_lang/api/subsidy_api.py +290 -290
- botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
- botrun_flow_lang/api/user_setting_api.py +70 -70
- botrun_flow_lang/api/version_api.py +31 -31
- botrun_flow_lang/api/youtube_api.py +26 -26
- botrun_flow_lang/constants.py +13 -13
- botrun_flow_lang/langgraph_agents/agents/agent_runner.py +178 -178
- botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
- botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gemini_subsidy_graph.py +460 -460
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +730 -723
- botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
- botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
- botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
- botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
- botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py +336 -294
- botrun_flow_lang/langgraph_agents/agents/util/local_files.py +419 -419
- botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
- botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +562 -486
- botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py +250 -250
- botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py +204 -204
- botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
- botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
- botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
- botrun_flow_lang/langgraph_agents/agents/util/usage_metadata.py +34 -0
- botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
- botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
- botrun_flow_lang/llm_agent/llm_agent.py +19 -19
- botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
- botrun_flow_lang/log/.gitignore +2 -2
- botrun_flow_lang/main.py +61 -61
- botrun_flow_lang/main_fast.py +51 -51
- botrun_flow_lang/mcp_server/__init__.py +10 -10
- botrun_flow_lang/mcp_server/default_mcp.py +854 -744
- botrun_flow_lang/models/nodes/utils.py +205 -205
- botrun_flow_lang/models/token_usage.py +34 -34
- botrun_flow_lang/requirements.txt +21 -21
- botrun_flow_lang/services/base/firestore_base.py +30 -30
- botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
- botrun_flow_lang/services/hatch/hatch_fs_store.py +419 -419
- botrun_flow_lang/services/storage/storage_cs_store.py +206 -206
- botrun_flow_lang/services/storage/storage_factory.py +12 -12
- botrun_flow_lang/services/storage/storage_store.py +65 -65
- botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
- botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
- botrun_flow_lang/static/docs/tools/index.html +926 -926
- botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
- botrun_flow_lang/tests/api_stress_test.py +357 -357
- botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
- botrun_flow_lang/tests/test_botrun_app.py +46 -46
- botrun_flow_lang/tests/test_html_util.py +31 -31
- botrun_flow_lang/tests/test_img_analyzer.py +190 -190
- botrun_flow_lang/tests/test_img_util.py +39 -39
- botrun_flow_lang/tests/test_local_files.py +114 -114
- botrun_flow_lang/tests/test_mermaid_util.py +103 -103
- botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
- botrun_flow_lang/tests/test_plotly_util.py +151 -151
- botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
- botrun_flow_lang/tools/generate_docs.py +133 -133
- botrun_flow_lang/tools/templates/tools.html +153 -153
- botrun_flow_lang/utils/__init__.py +7 -7
- botrun_flow_lang/utils/botrun_logger.py +344 -344
- botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
- botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
- botrun_flow_lang/utils/google_drive_utils.py +654 -654
- botrun_flow_lang/utils/langchain_utils.py +324 -324
- botrun_flow_lang/utils/yaml_utils.py +9 -9
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/METADATA +6 -6
- botrun_flow_lang-6.2.21.dist-info/RECORD +104 -0
- botrun_flow_lang-5.12.263.dist-info/RECORD +0 -102
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/WHEEL +0 -0
|
@@ -1,811 +1,816 @@
|
|
|
1
|
-
import logging
|
|
2
|
-
import uuid
|
|
3
|
-
import json
|
|
4
|
-
import random
|
|
5
|
-
import time
|
|
6
|
-
import re
|
|
7
|
-
|
|
8
|
-
from fastapi import APIRouter, HTTPException
|
|
9
|
-
|
|
10
|
-
from pydantic import BaseModel
|
|
11
|
-
|
|
12
|
-
from typing import Dict, Any, List, Optional
|
|
13
|
-
|
|
14
|
-
from fastapi.responses import StreamingResponse
|
|
15
|
-
|
|
16
|
-
from botrun_flow_lang.constants import ERR_GRAPH_RECURSION_ERROR, LANG_EN, LANG_ZH_TW
|
|
17
|
-
|
|
18
|
-
from botrun_flow_lang.langgraph_agents.agents.agent_runner import (
|
|
19
|
-
agent_runner,
|
|
20
|
-
langgraph_runner,
|
|
21
|
-
)
|
|
22
|
-
|
|
23
|
-
from botrun_flow_lang.langgraph_agents.agents.gov_researcher.gov_researcher_graph import (
|
|
24
|
-
GovResearcherGraph,
|
|
25
|
-
get_content_for_gov_researcher,
|
|
26
|
-
)
|
|
27
|
-
from botrun_flow_lang.langgraph_agents.agents.gov_researcher.gov_researcher_2_graph import (
|
|
28
|
-
TAIWAN_SUBSIDY_SUPERVISOR_PROMPT,
|
|
29
|
-
create_taiwan_subsidy_agent_graph,
|
|
30
|
-
taiwan_subsidy_agent_graph,
|
|
31
|
-
)
|
|
32
|
-
from botrun_flow_lang.langgraph_agents.agents.gov_researcher.gemini_subsidy_graph import (
|
|
33
|
-
TAIWAN_SUBSIDY_SUPERVISOR_PROMPT as GEMINI_SUBSIDY_PROMPT,
|
|
34
|
-
create_gemini_subsidy_agent_graph,
|
|
35
|
-
gemini_subsidy_agent_graph,
|
|
36
|
-
)
|
|
37
|
-
from botrun_flow_lang.langgraph_agents.agents.langgraph_react_agent import (
|
|
38
|
-
create_react_agent_graph,
|
|
39
|
-
get_react_agent_model_name,
|
|
40
|
-
)
|
|
41
|
-
|
|
42
|
-
from botrun_flow_lang.langgraph_agents.cache.langgraph_botrun_cache import (
|
|
43
|
-
get_botrun_cache,
|
|
44
|
-
)
|
|
45
|
-
|
|
46
|
-
from botrun_flow_lang.models.token_usage import TokenUsage
|
|
47
|
-
|
|
48
|
-
from botrun_flow_lang.utils.botrun_logger import (
|
|
49
|
-
get_session_botrun_logger,
|
|
50
|
-
default_logger,
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
# 放到要用的時候才 init,不然loading 會花時間
|
|
54
|
-
# 因為要讓 langgraph 在本地端執行,所以這一段又搬回到外面了
|
|
55
|
-
from langgraph.errors import GraphRecursionError
|
|
56
|
-
import anthropic # Keep relevant imports if needed for error handling here
|
|
57
|
-
|
|
58
|
-
# ==========
|
|
59
|
-
|
|
60
|
-
from botrun_flow_lang.langgraph_agents.agents.search_agent_graph import (
|
|
61
|
-
SearchAgentGraph,
|
|
62
|
-
# graph as search_agent_graph,
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
from botrun_flow_lang.utils.langchain_utils import (
|
|
66
|
-
extract_token_usage_from_state,
|
|
67
|
-
langgraph_event_to_json,
|
|
68
|
-
litellm_msgs_to_langchain_msgs,
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
router = APIRouter(prefix="/langgraph")
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
class LangGraphRequest(BaseModel):
|
|
76
|
-
graph_name: str
|
|
77
|
-
# todo LangGraph 應該要傳 thread_id,但是因為現在是 cloud run 的架構,所以 thread_id 不一定會讀的到 (auto scale)
|
|
78
|
-
thread_id: Optional[str] = None
|
|
79
|
-
user_input: Optional[str] = None
|
|
80
|
-
messages: List[Dict[str, Any]] = []
|
|
81
|
-
config: Optional[Dict[str, Any]] = None
|
|
82
|
-
stream: bool = False
|
|
83
|
-
# LangGraph 是否需要從 checkpoint 恢復
|
|
84
|
-
need_resume: bool = False
|
|
85
|
-
session_id: Optional[str] = None
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
class LangGraphResponse(BaseModel):
|
|
89
|
-
"""
|
|
90
|
-
@param content: 這個是給評測用來評估結果用的
|
|
91
|
-
@param state: 這個是graph的 final state,如果需要額外資訊可以使用
|
|
92
|
-
@param token_usage: Token usage information for the entire graph execution
|
|
93
|
-
"""
|
|
94
|
-
|
|
95
|
-
id: str
|
|
96
|
-
object: str = "chat.completion"
|
|
97
|
-
created: int
|
|
98
|
-
model: str
|
|
99
|
-
content: Optional[str] = None
|
|
100
|
-
state: Optional[Dict[str, Any]] = None
|
|
101
|
-
token_usage: Optional[TokenUsage] = None
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
class SupportedGraphsResponse(BaseModel):
|
|
105
|
-
"""Response model for listing supported graphs"""
|
|
106
|
-
|
|
107
|
-
graphs: List[str]
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
class GraphSchemaRequest(BaseModel):
|
|
111
|
-
"""Request model for getting graph schema"""
|
|
112
|
-
|
|
113
|
-
graph_name: str
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
LANGGRAPH_REACT_AGENT
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
GOV_SUBSIDY_AGENT
|
|
134
|
-
GEMINI_SUBSIDY_AGENT
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
if
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
if
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
"
|
|
255
|
-
"
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
config.get("
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
),
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
return {
|
|
333
|
-
"messages":
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
return
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
if
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
)
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
)
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
logger.info(
|
|
488
|
-
f"Final state
|
|
489
|
-
)
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
)
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
request
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
#
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
#
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
)
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
)
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
|
|
723
|
-
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
|
|
729
|
-
|
|
730
|
-
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
"
|
|
751
|
-
|
|
752
|
-
)
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
)
|
|
772
|
-
yield f"data: {
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
"""
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
1
|
+
import logging
|
|
2
|
+
import uuid
|
|
3
|
+
import json
|
|
4
|
+
import random
|
|
5
|
+
import time
|
|
6
|
+
import re
|
|
7
|
+
|
|
8
|
+
from fastapi import APIRouter, HTTPException
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel
|
|
11
|
+
|
|
12
|
+
from typing import Dict, Any, List, Optional
|
|
13
|
+
|
|
14
|
+
from fastapi.responses import StreamingResponse
|
|
15
|
+
|
|
16
|
+
from botrun_flow_lang.constants import ERR_GRAPH_RECURSION_ERROR, LANG_EN, LANG_ZH_TW
|
|
17
|
+
|
|
18
|
+
from botrun_flow_lang.langgraph_agents.agents.agent_runner import (
|
|
19
|
+
agent_runner,
|
|
20
|
+
langgraph_runner,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
from botrun_flow_lang.langgraph_agents.agents.gov_researcher.gov_researcher_graph import (
|
|
24
|
+
GovResearcherGraph,
|
|
25
|
+
get_content_for_gov_researcher,
|
|
26
|
+
)
|
|
27
|
+
from botrun_flow_lang.langgraph_agents.agents.gov_researcher.gov_researcher_2_graph import (
|
|
28
|
+
TAIWAN_SUBSIDY_SUPERVISOR_PROMPT,
|
|
29
|
+
create_taiwan_subsidy_agent_graph,
|
|
30
|
+
taiwan_subsidy_agent_graph,
|
|
31
|
+
)
|
|
32
|
+
from botrun_flow_lang.langgraph_agents.agents.gov_researcher.gemini_subsidy_graph import (
|
|
33
|
+
TAIWAN_SUBSIDY_SUPERVISOR_PROMPT as GEMINI_SUBSIDY_PROMPT,
|
|
34
|
+
create_gemini_subsidy_agent_graph,
|
|
35
|
+
gemini_subsidy_agent_graph,
|
|
36
|
+
)
|
|
37
|
+
from botrun_flow_lang.langgraph_agents.agents.langgraph_react_agent import (
|
|
38
|
+
create_react_agent_graph,
|
|
39
|
+
get_react_agent_model_name,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
from botrun_flow_lang.langgraph_agents.cache.langgraph_botrun_cache import (
|
|
43
|
+
get_botrun_cache,
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
from botrun_flow_lang.models.token_usage import TokenUsage
|
|
47
|
+
|
|
48
|
+
from botrun_flow_lang.utils.botrun_logger import (
|
|
49
|
+
get_session_botrun_logger,
|
|
50
|
+
default_logger,
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# 放到要用的時候才 init,不然loading 會花時間
|
|
54
|
+
# 因為要讓 langgraph 在本地端執行,所以這一段又搬回到外面了
|
|
55
|
+
from langgraph.errors import GraphRecursionError
|
|
56
|
+
import anthropic # Keep relevant imports if needed for error handling here
|
|
57
|
+
|
|
58
|
+
# ==========
|
|
59
|
+
|
|
60
|
+
from botrun_flow_lang.langgraph_agents.agents.search_agent_graph import (
|
|
61
|
+
SearchAgentGraph,
|
|
62
|
+
# graph as search_agent_graph,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
from botrun_flow_lang.utils.langchain_utils import (
|
|
66
|
+
extract_token_usage_from_state,
|
|
67
|
+
langgraph_event_to_json,
|
|
68
|
+
litellm_msgs_to_langchain_msgs,
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
router = APIRouter(prefix="/langgraph")
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class LangGraphRequest(BaseModel):
|
|
76
|
+
graph_name: str
|
|
77
|
+
# todo LangGraph 應該要傳 thread_id,但是因為現在是 cloud run 的架構,所以 thread_id 不一定會讀的到 (auto scale)
|
|
78
|
+
thread_id: Optional[str] = None
|
|
79
|
+
user_input: Optional[str] = None
|
|
80
|
+
messages: List[Dict[str, Any]] = []
|
|
81
|
+
config: Optional[Dict[str, Any]] = None
|
|
82
|
+
stream: bool = False
|
|
83
|
+
# LangGraph 是否需要從 checkpoint 恢復
|
|
84
|
+
need_resume: bool = False
|
|
85
|
+
session_id: Optional[str] = None
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class LangGraphResponse(BaseModel):
|
|
89
|
+
"""
|
|
90
|
+
@param content: 這個是給評測用來評估結果用的
|
|
91
|
+
@param state: 這個是graph的 final state,如果需要額外資訊可以使用
|
|
92
|
+
@param token_usage: Token usage information for the entire graph execution
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
id: str
|
|
96
|
+
object: str = "chat.completion"
|
|
97
|
+
created: int
|
|
98
|
+
model: str
|
|
99
|
+
content: Optional[str] = None
|
|
100
|
+
state: Optional[Dict[str, Any]] = None
|
|
101
|
+
token_usage: Optional[TokenUsage] = None
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
class SupportedGraphsResponse(BaseModel):
|
|
105
|
+
"""Response model for listing supported graphs"""
|
|
106
|
+
|
|
107
|
+
graphs: List[str]
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class GraphSchemaRequest(BaseModel):
|
|
111
|
+
"""Request model for getting graph schema"""
|
|
112
|
+
|
|
113
|
+
graph_name: str
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
# 從常數檔案匯入,避免外部模組為了取得常數而觸發重型 import
|
|
117
|
+
from botrun_flow_lang.api.langgraph_constants import (
|
|
118
|
+
LANGGRAPH_REACT_AGENT,
|
|
119
|
+
GOV_SUBSIDY_AGENT,
|
|
120
|
+
PERPLEXITY_SEARCH_AGENT,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
# 僅在此檔案內部使用的常數
|
|
124
|
+
CUSTOM_WEB_RESEARCH_AGENT = "custom_web_research_agent"
|
|
125
|
+
DEEP_RESEARCH_AGENT = "deep_research_agent"
|
|
126
|
+
# GOV_RESEARCHER_AGENT = "gov_researcher_agent"
|
|
127
|
+
GEMINI_SUBSIDY_AGENT = "gemini_subsidy_agent"
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
SUPPORTED_GRAPH_NAMES = [
|
|
131
|
+
# PERPLEXITY_SEARCH_AGENT,
|
|
132
|
+
LANGGRAPH_REACT_AGENT,
|
|
133
|
+
GOV_SUBSIDY_AGENT,
|
|
134
|
+
GEMINI_SUBSIDY_AGENT,
|
|
135
|
+
# GOV_RESEARCHER_AGENT,
|
|
136
|
+
]
|
|
137
|
+
SUPPORTED_GRAPH = {
|
|
138
|
+
GOV_SUBSIDY_AGENT: taiwan_subsidy_agent_graph,
|
|
139
|
+
GEMINI_SUBSIDY_AGENT: gemini_subsidy_agent_graph,
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def contains_chinese_chars(text: str) -> bool:
|
|
144
|
+
"""Check if the given text contains any Chinese characters."""
|
|
145
|
+
if not text:
|
|
146
|
+
return False
|
|
147
|
+
# This pattern matches Chinese characters (both simplified and traditional)
|
|
148
|
+
pattern = re.compile(
|
|
149
|
+
r"[\u4e00-\u9fff\u3400-\u4dbf\U00020000-\U0002a6df\U0002a700-\U0002ebef]"
|
|
150
|
+
)
|
|
151
|
+
return bool(pattern.search(text))
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
async def get_cached_or_create_react_graph(
|
|
155
|
+
botrun_id: Optional[str], # Key parameter - can be None/empty
|
|
156
|
+
config: Optional[Dict[str, Any]] = None,
|
|
157
|
+
messages: Optional[List[Dict]] = None,
|
|
158
|
+
user_input: Optional[str] = None,
|
|
159
|
+
logger: logging.Logger = default_logger,
|
|
160
|
+
) -> Any:
|
|
161
|
+
"""
|
|
162
|
+
Get cached graph for LANGGRAPH_REACT_AGENT or create new one.
|
|
163
|
+
Handles botrun_id-based caching with parameter validation.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
botrun_id: The botrun ID for cache lookup. If None/empty, skips caching.
|
|
167
|
+
config: Configuration dictionary
|
|
168
|
+
messages: List of message dictionaries
|
|
169
|
+
user_input: User input string
|
|
170
|
+
logger: Logger instance
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Graph instance (cached or newly created)
|
|
174
|
+
"""
|
|
175
|
+
config = config or {}
|
|
176
|
+
|
|
177
|
+
# Extract parameters for hash calculation (moved from get_graph)
|
|
178
|
+
system_prompt = config.get("system_prompt", "")
|
|
179
|
+
if messages:
|
|
180
|
+
for message in messages:
|
|
181
|
+
if message.get("role") == "system":
|
|
182
|
+
system_prompt = message.get("content", "")
|
|
183
|
+
|
|
184
|
+
botrun_flow_lang_url = config.get("botrun_flow_lang_url", "")
|
|
185
|
+
user_id = config.get("user_id", "")
|
|
186
|
+
model_name = config.get("model_name", "")
|
|
187
|
+
|
|
188
|
+
# Determine language (moved from get_graph)
|
|
189
|
+
has_chinese = contains_chinese_chars(system_prompt)
|
|
190
|
+
if not has_chinese and user_input:
|
|
191
|
+
has_chinese = contains_chinese_chars(user_input)
|
|
192
|
+
lang = LANG_ZH_TW if has_chinese else LANG_EN
|
|
193
|
+
|
|
194
|
+
mcp_config = config.get("mcp_config")
|
|
195
|
+
|
|
196
|
+
# CRITICAL: Check if botrun_id is provided and not empty
|
|
197
|
+
if not botrun_id:
|
|
198
|
+
# If botrun_id is None or empty, skip caching entirely
|
|
199
|
+
logger.info("No botrun_id provided, creating new graph without caching")
|
|
200
|
+
graph = await create_react_agent_graph(
|
|
201
|
+
system_prompt=system_prompt,
|
|
202
|
+
botrun_flow_lang_url=botrun_flow_lang_url,
|
|
203
|
+
user_id=user_id,
|
|
204
|
+
model_name=model_name,
|
|
205
|
+
lang=lang,
|
|
206
|
+
mcp_config=mcp_config,
|
|
207
|
+
)
|
|
208
|
+
return graph
|
|
209
|
+
|
|
210
|
+
# If botrun_id is provided, use caching logic
|
|
211
|
+
cache = get_botrun_cache()
|
|
212
|
+
params_hash = cache.get_params_hash(
|
|
213
|
+
system_prompt, botrun_flow_lang_url, user_id, model_name, lang, mcp_config
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
# Try to get cached graph
|
|
217
|
+
cached_graph = cache.get_cached_graph(botrun_id, params_hash)
|
|
218
|
+
if cached_graph:
|
|
219
|
+
logger.info(f"Using cached graph for botrun_id: {botrun_id}")
|
|
220
|
+
return cached_graph
|
|
221
|
+
|
|
222
|
+
# Create new graph (same logic as in get_graph)
|
|
223
|
+
logger.info(f"Creating new graph for botrun_id: {botrun_id}")
|
|
224
|
+
graph = await create_react_agent_graph(
|
|
225
|
+
system_prompt=system_prompt,
|
|
226
|
+
botrun_flow_lang_url=botrun_flow_lang_url,
|
|
227
|
+
user_id=user_id,
|
|
228
|
+
model_name=model_name,
|
|
229
|
+
lang=lang,
|
|
230
|
+
mcp_config=mcp_config,
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
# Cache the new graph
|
|
234
|
+
cache.cache_graph(botrun_id, params_hash, graph)
|
|
235
|
+
|
|
236
|
+
return graph
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
async def get_graph(
|
|
240
|
+
graph_name: str,
|
|
241
|
+
config: Optional[Dict[str, Any]] = None,
|
|
242
|
+
stream: bool = False,
|
|
243
|
+
messages: Optional[List[Dict]] = [],
|
|
244
|
+
user_input: Optional[str] = None,
|
|
245
|
+
):
|
|
246
|
+
if (
|
|
247
|
+
graph_name not in SUPPORTED_GRAPH_NAMES
|
|
248
|
+
and graph_name not in SUPPORTED_GRAPH.keys()
|
|
249
|
+
):
|
|
250
|
+
raise ValueError(f"Unsupported graph from get_graph: {graph_name}")
|
|
251
|
+
if graph_name == PERPLEXITY_SEARCH_AGENT:
|
|
252
|
+
graph = SearchAgentGraph().graph
|
|
253
|
+
graph_config = {
|
|
254
|
+
"search_prompt": config.get("search_prompt", ""),
|
|
255
|
+
"model_name": config.get("model_name", "sonar-reasoning-pro"),
|
|
256
|
+
"related_prompt": config.get("related_question_prompt", ""),
|
|
257
|
+
"search_vendor": config.get("search_vendor", "perplexity"),
|
|
258
|
+
"domain_filter": config.get("domain_filter", []),
|
|
259
|
+
"user_prompt_prefix": config.get("user_prompt_prefix", ""),
|
|
260
|
+
"stream": stream,
|
|
261
|
+
}
|
|
262
|
+
elif graph_name == GOV_SUBSIDY_AGENT:
|
|
263
|
+
graph = create_taiwan_subsidy_agent_graph(
|
|
264
|
+
config.get("prompt_template", TAIWAN_SUBSIDY_SUPERVISOR_PROMPT)
|
|
265
|
+
)
|
|
266
|
+
graph_config = {
|
|
267
|
+
"prompt_template": config.get("prompt_template", ""),
|
|
268
|
+
"legal_extraction_prompt": config.get("legal_extraction_prompt", ""),
|
|
269
|
+
"faq_extraction_prompt": config.get("faq_extraction_prompt", ""),
|
|
270
|
+
"calculation_analysis_prompt": config.get(
|
|
271
|
+
"calculation_analysis_prompt", ""
|
|
272
|
+
),
|
|
273
|
+
}
|
|
274
|
+
elif graph_name == GEMINI_SUBSIDY_AGENT:
|
|
275
|
+
graph = create_gemini_subsidy_agent_graph(
|
|
276
|
+
config.get("prompt_template", GEMINI_SUBSIDY_PROMPT)
|
|
277
|
+
)
|
|
278
|
+
graph_config = {
|
|
279
|
+
"prompt_template": config.get("prompt_template", ""),
|
|
280
|
+
}
|
|
281
|
+
else:
|
|
282
|
+
raise ValueError(f"Unsupported graph type: {graph_name}")
|
|
283
|
+
return graph, graph_config
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
def get_init_state(
|
|
287
|
+
graph_name: str,
|
|
288
|
+
user_input: str,
|
|
289
|
+
config: Optional[Dict[str, Any]] = None,
|
|
290
|
+
messages: Optional[List[Dict]] = [],
|
|
291
|
+
enable_prompt_caching: bool = False,
|
|
292
|
+
):
|
|
293
|
+
if graph_name == PERPLEXITY_SEARCH_AGENT:
|
|
294
|
+
if len(messages) > 0:
|
|
295
|
+
return {"messages": litellm_msgs_to_langchain_msgs(messages)}
|
|
296
|
+
if config.get("user_prompt_prefix", ""):
|
|
297
|
+
return {
|
|
298
|
+
"messages": [
|
|
299
|
+
{
|
|
300
|
+
"role": "user",
|
|
301
|
+
"content": config.get("user_prompt_prefix", "")
|
|
302
|
+
+ "\n\n"
|
|
303
|
+
+ user_input,
|
|
304
|
+
}
|
|
305
|
+
]
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
return {"messages": [user_input]}
|
|
309
|
+
elif graph_name == CUSTOM_WEB_RESEARCH_AGENT:
|
|
310
|
+
if len(messages) > 0:
|
|
311
|
+
return {
|
|
312
|
+
"messages": litellm_msgs_to_langchain_msgs(messages),
|
|
313
|
+
"model": config.get("model", "anthropic"),
|
|
314
|
+
}
|
|
315
|
+
return {
|
|
316
|
+
"messages": [user_input],
|
|
317
|
+
"model": config.get("model", "anthropic"),
|
|
318
|
+
}
|
|
319
|
+
elif graph_name == LANGGRAPH_REACT_AGENT:
|
|
320
|
+
if len(messages) > 0:
|
|
321
|
+
new_messages = []
|
|
322
|
+
for message in messages:
|
|
323
|
+
if message.get("role") != "system":
|
|
324
|
+
new_messages.append(message)
|
|
325
|
+
|
|
326
|
+
return {
|
|
327
|
+
"messages": litellm_msgs_to_langchain_msgs(
|
|
328
|
+
new_messages, enable_prompt_caching
|
|
329
|
+
)
|
|
330
|
+
}
|
|
331
|
+
else:
|
|
332
|
+
return {
|
|
333
|
+
"messages": [user_input],
|
|
334
|
+
}
|
|
335
|
+
elif graph_name == DEEP_RESEARCH_AGENT:
|
|
336
|
+
if len(messages) > 0:
|
|
337
|
+
return {
|
|
338
|
+
"messages": litellm_msgs_to_langchain_msgs(messages),
|
|
339
|
+
"topic": user_input,
|
|
340
|
+
}
|
|
341
|
+
return {
|
|
342
|
+
"messages": [user_input],
|
|
343
|
+
"topic": user_input,
|
|
344
|
+
}
|
|
345
|
+
else:
|
|
346
|
+
if len(messages) > 0:
|
|
347
|
+
return {"messages": litellm_msgs_to_langchain_msgs(messages)}
|
|
348
|
+
return {"messages": [user_input]}
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
def get_content(graph_name: str, state: Dict[str, Any]):
|
|
352
|
+
if graph_name == PERPLEXITY_SEARCH_AGENT:
|
|
353
|
+
return state["messages"][-3].content
|
|
354
|
+
elif graph_name == CUSTOM_WEB_RESEARCH_AGENT:
|
|
355
|
+
content = state["answer"].get("markdown", "")
|
|
356
|
+
content = content.replace("\\n", "\n")
|
|
357
|
+
if state["answer"].get("references", []):
|
|
358
|
+
references = "\n\n參考資料:\n"
|
|
359
|
+
for reference in state["answer"]["references"]:
|
|
360
|
+
references += f"- [{reference['title']}]({reference['url']})\n"
|
|
361
|
+
content += references
|
|
362
|
+
return content
|
|
363
|
+
elif graph_name == DEEP_RESEARCH_AGENT:
|
|
364
|
+
sections = state["sections"]
|
|
365
|
+
sections_str = "\n\n".join(
|
|
366
|
+
f"章節: {section.name}\n"
|
|
367
|
+
f"描述: {section.description}\n"
|
|
368
|
+
f"需要研究: {'是' if section.research else '否'}\n"
|
|
369
|
+
for section in sections
|
|
370
|
+
)
|
|
371
|
+
sections_str = "預計報告結構:\n\n" + sections_str
|
|
372
|
+
return sections_str
|
|
373
|
+
# elif graph_name == GOV_RESEARCHER_AGENT:
|
|
374
|
+
# return get_content_for_gov_researcher(state)
|
|
375
|
+
elif graph_name == GOV_SUBSIDY_AGENT or graph_name == GEMINI_SUBSIDY_AGENT:
|
|
376
|
+
messages = state["messages"]
|
|
377
|
+
# Find the last AI message
|
|
378
|
+
for msg in reversed(messages):
|
|
379
|
+
if msg.type == "ai":
|
|
380
|
+
if isinstance(msg.content, list):
|
|
381
|
+
return msg.content[0].get("text", "")
|
|
382
|
+
else:
|
|
383
|
+
return msg.content
|
|
384
|
+
return "" # If no AI message found
|
|
385
|
+
else:
|
|
386
|
+
messages = state["messages"]
|
|
387
|
+
# Find the last human message
|
|
388
|
+
last_human_idx = -1
|
|
389
|
+
for i, msg in enumerate(messages):
|
|
390
|
+
if msg.type == "human":
|
|
391
|
+
last_human_idx = i
|
|
392
|
+
|
|
393
|
+
# Combine all AI messages after the last human message
|
|
394
|
+
ai_contents = ""
|
|
395
|
+
for msg in messages[last_human_idx + 1 :]:
|
|
396
|
+
if msg.type == "ai":
|
|
397
|
+
if isinstance(msg.content, list):
|
|
398
|
+
ai_contents += msg.content[0].get("text", "")
|
|
399
|
+
else:
|
|
400
|
+
ai_contents += msg.content
|
|
401
|
+
|
|
402
|
+
return ai_contents
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
async def process_langgraph_request(
|
|
406
|
+
request: LangGraphRequest,
|
|
407
|
+
retry: bool = False, # Keep retry logic for non-streaming path if needed
|
|
408
|
+
logger: logging.Logger = default_logger,
|
|
409
|
+
) -> Any: # Return type can be LangGraphResponse or StreamingResponse
|
|
410
|
+
"""處理 LangGraph 請求的核心邏輯"""
|
|
411
|
+
# --- Streaming Case ---
|
|
412
|
+
if request.stream:
|
|
413
|
+
logger.info(f"Processing STREAM request for graph: {request.graph_name}")
|
|
414
|
+
# Use the new wrapper generator that handles resource management
|
|
415
|
+
return StreamingResponse(
|
|
416
|
+
managed_langgraph_stream_wrapper(request, logger),
|
|
417
|
+
media_type="text/event-stream",
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
# --- Non-Streaming Case ---
|
|
421
|
+
logger.info(f"Processing NON-STREAM request for graph: {request.graph_name}")
|
|
422
|
+
try:
|
|
423
|
+
config = request.config or {}
|
|
424
|
+
mcp_config = config.get("mcp_config")
|
|
425
|
+
user_id = config.get("user_id")
|
|
426
|
+
|
|
427
|
+
# Get botrun_id from config
|
|
428
|
+
botrun_id = config.get("botrun_id") # Can be None/empty
|
|
429
|
+
|
|
430
|
+
# --- Graph and State Initialization (OUTSIDE of MCP client context) ---
|
|
431
|
+
# Cache logic for LANGGRAPH_REACT_AGENT only
|
|
432
|
+
if request.graph_name == LANGGRAPH_REACT_AGENT:
|
|
433
|
+
graph = await get_cached_or_create_react_graph(
|
|
434
|
+
botrun_id=botrun_id, # Pass botrun_id (can be None)
|
|
435
|
+
config=request.config,
|
|
436
|
+
messages=request.messages,
|
|
437
|
+
user_input=request.user_input,
|
|
438
|
+
logger=logger,
|
|
439
|
+
)
|
|
440
|
+
graph_config = request.config
|
|
441
|
+
else:
|
|
442
|
+
# Existing logic for other graph types (calls modified get_graph)
|
|
443
|
+
graph, graph_config = await get_graph(
|
|
444
|
+
request.graph_name,
|
|
445
|
+
request.config,
|
|
446
|
+
False, # stream=False
|
|
447
|
+
request.messages,
|
|
448
|
+
request.user_input,
|
|
449
|
+
)
|
|
450
|
+
|
|
451
|
+
# Determine model name for init_state caching logic if needed
|
|
452
|
+
# user_input_model_name = request.config.get("model_name", "")
|
|
453
|
+
# enable_caching = get_react_agent_model_name(user_input_model_name).startswith("claude-")
|
|
454
|
+
|
|
455
|
+
init_state = get_init_state(
|
|
456
|
+
request.graph_name,
|
|
457
|
+
request.user_input,
|
|
458
|
+
request.config,
|
|
459
|
+
request.messages,
|
|
460
|
+
False, # enable_prompt_caching=enable_caching
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
thread_id = request.thread_id if request.thread_id else str(uuid.uuid4())
|
|
464
|
+
logger.info(f"Running non-stream with thread_id: {thread_id}")
|
|
465
|
+
|
|
466
|
+
# --- Run the agent (no MCP client needed during execution) ---
|
|
467
|
+
logger.info("Executing agent_runner for non-stream request...")
|
|
468
|
+
async for _ in agent_runner(
|
|
469
|
+
thread_id,
|
|
470
|
+
init_state,
|
|
471
|
+
graph,
|
|
472
|
+
request.need_resume,
|
|
473
|
+
extra_config=graph_config,
|
|
474
|
+
):
|
|
475
|
+
pass # Just consume the events
|
|
476
|
+
|
|
477
|
+
logger.info(
|
|
478
|
+
f"agent_runner completed for thread_id: {thread_id}. Fetching final state."
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
# --- Get Final State and Prepare Response (OUTSIDE of MCP client context) ---
|
|
482
|
+
config_for_state = {"configurable": {"thread_id": thread_id}}
|
|
483
|
+
state = await graph.aget_state(config_for_state)
|
|
484
|
+
|
|
485
|
+
try:
|
|
486
|
+
state_values_json = langgraph_event_to_json(state.values)
|
|
487
|
+
logger.info(
|
|
488
|
+
f"Final state fetched for {thread_id}: {state_values_json[:500]}..."
|
|
489
|
+
) # Log truncated state
|
|
490
|
+
except Exception as e_log:
|
|
491
|
+
logger.error(f"Error serializing final state for logging: {e_log}")
|
|
492
|
+
logger.info(
|
|
493
|
+
f"Final state keys for {thread_id}: {list(state.values.keys())}"
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
content = get_content(request.graph_name, state.values)
|
|
497
|
+
|
|
498
|
+
model_name_config = (
|
|
499
|
+
request.config.get("model_name", "") if request.config else ""
|
|
500
|
+
)
|
|
501
|
+
final_model_name = model_name_config # Default to config model name
|
|
502
|
+
if request.graph_name == LANGGRAPH_REACT_AGENT:
|
|
503
|
+
final_model_name = get_react_agent_model_name(model_name_config)
|
|
504
|
+
token_usage = extract_token_usage_from_state(state.values, final_model_name)
|
|
505
|
+
else:
|
|
506
|
+
token_usage = TokenUsage(
|
|
507
|
+
total_input_tokens=0,
|
|
508
|
+
total_output_tokens=0,
|
|
509
|
+
total_tokens=0,
|
|
510
|
+
nodes=[],
|
|
511
|
+
)
|
|
512
|
+
|
|
513
|
+
return LangGraphResponse(
|
|
514
|
+
id=thread_id,
|
|
515
|
+
created=int(time.time()),
|
|
516
|
+
model=request.graph_name, # Or final_model_name? Check requirements
|
|
517
|
+
content=content,
|
|
518
|
+
state=state.values, # Consider serializing state here if needed client-side
|
|
519
|
+
token_usage=token_usage,
|
|
520
|
+
)
|
|
521
|
+
|
|
522
|
+
except anthropic.RateLimitError as e:
|
|
523
|
+
if retry:
|
|
524
|
+
logger.error(
|
|
525
|
+
"Retry failed with Anthropic RateLimitError (non-stream)", exc_info=True
|
|
526
|
+
)
|
|
527
|
+
raise HTTPException(
|
|
528
|
+
status_code=429, detail=f"Rate limit exceeded after retry: {e}"
|
|
529
|
+
) # 429 is more appropriate
|
|
530
|
+
|
|
531
|
+
logger.warning(
|
|
532
|
+
f"Anthropic RateLimitError occurred (non-stream): {e}. Retrying..."
|
|
533
|
+
)
|
|
534
|
+
retry_delay = random.randint(7, 20)
|
|
535
|
+
time.sleep(
|
|
536
|
+
retry_delay
|
|
537
|
+
) # Note: time.sleep blocks async. Consider asyncio.sleep(retry_delay) if this becomes an issue.
|
|
538
|
+
logger.info(f"Retrying non-stream request after {retry_delay}s delay...")
|
|
539
|
+
return await process_langgraph_request(
|
|
540
|
+
request, retry=True, logger=logger
|
|
541
|
+
) # Recursive call for retry
|
|
542
|
+
|
|
543
|
+
except GraphRecursionError as e:
|
|
544
|
+
logger.error(f"GraphRecursionError (non-stream): {e}", exc_info=True)
|
|
545
|
+
raise HTTPException(
|
|
546
|
+
status_code=500, detail=f"Graph execution exceeded maximum depth: {e}"
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
except Exception as e:
|
|
550
|
+
import traceback
|
|
551
|
+
|
|
552
|
+
tb_str = traceback.format_exc()
|
|
553
|
+
logger.error(
|
|
554
|
+
f"Unhandled exception in process_langgraph_request (non-stream): {e}",
|
|
555
|
+
exc_info=True,
|
|
556
|
+
)
|
|
557
|
+
raise HTTPException(
|
|
558
|
+
status_code=500, detail=f"Internal Server Error during graph execution: {e}"
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
|
|
562
|
+
@router.post("/invoke")
|
|
563
|
+
async def invoke(request: LangGraphRequest):
|
|
564
|
+
"""
|
|
565
|
+
執行指定的 LangGraph,支援串流和非串流模式
|
|
566
|
+
|
|
567
|
+
Args:
|
|
568
|
+
request: 包含 graph_name 和輸入數據的請求
|
|
569
|
+
|
|
570
|
+
Returns:
|
|
571
|
+
串流模式: StreamingResponse
|
|
572
|
+
非串流模式: LangGraphResponse
|
|
573
|
+
"""
|
|
574
|
+
session_id = request.session_id
|
|
575
|
+
user_id = request.config.get("user_id", "")
|
|
576
|
+
|
|
577
|
+
# *** Create a session-specific BotrunLogger for this specific request ***
|
|
578
|
+
# This ensures Cloud Logging and session/user context
|
|
579
|
+
logger = get_session_botrun_logger(session_id=session_id, user_id=user_id)
|
|
580
|
+
|
|
581
|
+
logger.info(
|
|
582
|
+
"invoke LangGraph API",
|
|
583
|
+
request=request.model_dump(),
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
# Pass the request-specific BotrunLogger down
|
|
587
|
+
return await process_langgraph_request(request, logger=logger)
|
|
588
|
+
|
|
589
|
+
|
|
590
|
+
# NEW: Wrapper generator for managing resources during streaming
|
|
591
|
+
async def managed_langgraph_stream_wrapper(
|
|
592
|
+
request: LangGraphRequest, logger: logging.Logger
|
|
593
|
+
):
|
|
594
|
+
"""
|
|
595
|
+
Manages AsyncExitStack and MCPClient lifecycle for streaming responses.
|
|
596
|
+
Initializes graph and then yields events from langgraph_stream_response_generator.
|
|
597
|
+
"""
|
|
598
|
+
try:
|
|
599
|
+
config = request.config or {}
|
|
600
|
+
mcp_config = config.get("mcp_config")
|
|
601
|
+
user_id = config.get("user_id")
|
|
602
|
+
print(f"mcp_config: {mcp_config}, user_id: {user_id}")
|
|
603
|
+
|
|
604
|
+
# Get botrun_id from config
|
|
605
|
+
botrun_id = config.get("botrun_id") # Can be None/empty
|
|
606
|
+
|
|
607
|
+
# --- Graph and State Initialization (OUTSIDE of MCP client context) ---
|
|
608
|
+
logger.info("Getting graph and initial state for stream...")
|
|
609
|
+
# Cache logic for LANGGRAPH_REACT_AGENT only
|
|
610
|
+
if request.graph_name == LANGGRAPH_REACT_AGENT:
|
|
611
|
+
graph = await get_cached_or_create_react_graph(
|
|
612
|
+
botrun_id=botrun_id, # Pass botrun_id (can be None)
|
|
613
|
+
config=request.config,
|
|
614
|
+
messages=request.messages,
|
|
615
|
+
user_input=request.user_input,
|
|
616
|
+
logger=logger,
|
|
617
|
+
)
|
|
618
|
+
graph_config = request.config
|
|
619
|
+
else:
|
|
620
|
+
# Existing logic for other graph types (calls modified get_graph)
|
|
621
|
+
graph, graph_config = await get_graph(
|
|
622
|
+
request.graph_name,
|
|
623
|
+
request.config,
|
|
624
|
+
request.stream, # Pass stream=True
|
|
625
|
+
request.messages,
|
|
626
|
+
request.user_input,
|
|
627
|
+
)
|
|
628
|
+
|
|
629
|
+
# Determine model name for init_state caching logic if needed
|
|
630
|
+
# user_input_model_name = request.config.get("model_name", "")
|
|
631
|
+
# enable_caching = get_react_agent_model_name(user_input_model_name).startswith("claude-") # Example
|
|
632
|
+
|
|
633
|
+
init_state = get_init_state(
|
|
634
|
+
request.graph_name,
|
|
635
|
+
request.user_input,
|
|
636
|
+
request.config,
|
|
637
|
+
request.messages,
|
|
638
|
+
False, # enable_prompt_caching=enable_caching # Pass caching flag if used
|
|
639
|
+
)
|
|
640
|
+
|
|
641
|
+
thread_id = request.thread_id if request.thread_id else str(uuid.uuid4())
|
|
642
|
+
logger.info(f"Streaming with thread_id: {thread_id}")
|
|
643
|
+
|
|
644
|
+
# --- Yield from the actual stream response generator ---
|
|
645
|
+
async for event in langgraph_stream_response_generator(
|
|
646
|
+
thread_id,
|
|
647
|
+
init_state,
|
|
648
|
+
graph,
|
|
649
|
+
request.need_resume,
|
|
650
|
+
logger,
|
|
651
|
+
graph_config,
|
|
652
|
+
):
|
|
653
|
+
yield event # Yield the formatted event string
|
|
654
|
+
|
|
655
|
+
except anthropic.RateLimitError as e:
|
|
656
|
+
# Handle rate limit errors specifically for streaming if needed
|
|
657
|
+
# Note: Retry logic might be complex to implement correctly within a generator.
|
|
658
|
+
# Consider if retry should happen at a higher level or if yielding an error is sufficient.
|
|
659
|
+
logger.error(
|
|
660
|
+
f"Anthropic RateLimitError during stream setup/execution: {e}",
|
|
661
|
+
exc_info=True,
|
|
662
|
+
)
|
|
663
|
+
error_payload = json.dumps(
|
|
664
|
+
{"error": f"Rate Limit Error: {e}", "retry_possible": False}
|
|
665
|
+
) # Indicate no auto-retry here
|
|
666
|
+
yield f"data: {error_payload}\n\n"
|
|
667
|
+
yield "data: [DONE]\n\n" # Ensure stream terminates correctly
|
|
668
|
+
|
|
669
|
+
except GraphRecursionError as e:
|
|
670
|
+
# Handle recursion errors specifically (can happen during graph execution)
|
|
671
|
+
logger.error(
|
|
672
|
+
f"GraphRecursionError during stream: {e} for thread_id: {thread_id}",
|
|
673
|
+
error=str(e),
|
|
674
|
+
exc_info=True,
|
|
675
|
+
)
|
|
676
|
+
try:
|
|
677
|
+
error_msg = json.dumps(
|
|
678
|
+
{"error": ERR_GRAPH_RECURSION_ERROR, "detail": str(e)}
|
|
679
|
+
)
|
|
680
|
+
yield f"data: {error_msg}\n\n"
|
|
681
|
+
except Exception as inner_e:
|
|
682
|
+
logger.error(
|
|
683
|
+
f"Error serializing GraphRecursionError for stream: {inner_e}",
|
|
684
|
+
exc_info=True,
|
|
685
|
+
)
|
|
686
|
+
yield f"data: {json.dumps({'error': ERR_GRAPH_RECURSION_ERROR})}\n\n"
|
|
687
|
+
yield "data: [DONE]\n\n" # Ensure stream terminates correctly
|
|
688
|
+
|
|
689
|
+
except Exception as e:
|
|
690
|
+
# Catch-all for other errors during setup or streaming
|
|
691
|
+
import traceback
|
|
692
|
+
|
|
693
|
+
tb_str = traceback.format_exc()
|
|
694
|
+
logger.error(
|
|
695
|
+
f"Unhandled exception in managed_langgraph_stream_wrapper: {e}",
|
|
696
|
+
exc_info=True,
|
|
697
|
+
traceback=tb_str,
|
|
698
|
+
)
|
|
699
|
+
error_payload = json.dumps({"error": f"Streaming Error: {e}", "detail": tb_str})
|
|
700
|
+
yield f"data: {error_payload}\n\n"
|
|
701
|
+
yield "data: [DONE]\n\n" # Ensure stream terminates correctly
|
|
702
|
+
|
|
703
|
+
|
|
704
|
+
# RENAMED: Original langgraph_stream_response, now focused on generation
|
|
705
|
+
async def langgraph_stream_response_generator(
|
|
706
|
+
thread_id: str,
|
|
707
|
+
init_state: Dict,
|
|
708
|
+
graph: Any, # Receives the already configured graph
|
|
709
|
+
need_resume: bool = False,
|
|
710
|
+
logger: logging.Logger = default_logger,
|
|
711
|
+
extra_config: Optional[Dict] = None,
|
|
712
|
+
):
|
|
713
|
+
"""
|
|
714
|
+
Generates LangGraph stream events using langgraph_runner.
|
|
715
|
+
Handles formatting ('data: ...') and '[DONE]' signal.
|
|
716
|
+
Exception handling specific to langgraph_runner execution.
|
|
717
|
+
"""
|
|
718
|
+
try:
|
|
719
|
+
logger.info(
|
|
720
|
+
"Starting langgraph_runner iteration",
|
|
721
|
+
thread_id=thread_id,
|
|
722
|
+
need_resume=need_resume,
|
|
723
|
+
)
|
|
724
|
+
|
|
725
|
+
final_event = None
|
|
726
|
+
first_event = True # To potentially log first event differently if needed
|
|
727
|
+
async for event in langgraph_runner(
|
|
728
|
+
thread_id, init_state, graph, need_resume, extra_config
|
|
729
|
+
):
|
|
730
|
+
final_event = event # Keep track of the last event
|
|
731
|
+
event_json_str = langgraph_event_to_json(event) # Serialize event safely
|
|
732
|
+
if first_event:
|
|
733
|
+
# Optional: Different logging for the very first event chunk
|
|
734
|
+
logger.info(
|
|
735
|
+
f"First stream event for {thread_id}: {event_json_str[:200]}..."
|
|
736
|
+
) # Log truncated first event
|
|
737
|
+
first_event = False
|
|
738
|
+
|
|
739
|
+
# print statement for local debugging if needed
|
|
740
|
+
# from datetime import datetime
|
|
741
|
+
# print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), event_json_str)
|
|
742
|
+
|
|
743
|
+
yield f"data: {event_json_str}\n\n"
|
|
744
|
+
|
|
745
|
+
# Log details about the final event if needed
|
|
746
|
+
if final_event:
|
|
747
|
+
logger.info(
|
|
748
|
+
"Finished langgraph_runner iteration",
|
|
749
|
+
thread_id=thread_id,
|
|
750
|
+
final_event_type=final_event.get("event"),
|
|
751
|
+
# final_event_data=langgraph_event_to_json(final_event) # Log full final event if useful
|
|
752
|
+
)
|
|
753
|
+
else:
|
|
754
|
+
logger.warning(
|
|
755
|
+
"langgraph_runner finished without yielding any events",
|
|
756
|
+
thread_id=thread_id,
|
|
757
|
+
)
|
|
758
|
+
|
|
759
|
+
yield "data: [DONE]\n\n" # Signal end of stream
|
|
760
|
+
|
|
761
|
+
# Error handling remains here as these errors occur during langgraph_runner
|
|
762
|
+
except GraphRecursionError as e:
|
|
763
|
+
logger.error(
|
|
764
|
+
f"GraphRecursionError in stream generator: {e} for thread_id: {thread_id}",
|
|
765
|
+
error=str(e),
|
|
766
|
+
exc_info=True,
|
|
767
|
+
)
|
|
768
|
+
try:
|
|
769
|
+
error_msg = json.dumps(
|
|
770
|
+
{"error": ERR_GRAPH_RECURSION_ERROR, "detail": str(e)}
|
|
771
|
+
)
|
|
772
|
+
yield f"data: {error_msg}\n\n"
|
|
773
|
+
except Exception as inner_e:
|
|
774
|
+
logger.error(
|
|
775
|
+
f"Error serializing GraphRecursionError msg: {inner_e}", exc_info=True
|
|
776
|
+
)
|
|
777
|
+
yield f"data: {json.dumps({'error': ERR_GRAPH_RECURSION_ERROR})}\n\n"
|
|
778
|
+
# Ensure [DONE] is sent even after handled error to terminate client side
|
|
779
|
+
yield "data: [DONE]\n\n"
|
|
780
|
+
|
|
781
|
+
except Exception as e:
|
|
782
|
+
# Catch errors specifically from langgraph_runner or event processing
|
|
783
|
+
import traceback
|
|
784
|
+
|
|
785
|
+
tb_str = traceback.format_exc()
|
|
786
|
+
logger.error(
|
|
787
|
+
f"Exception in stream generator: {e} for thread_id: {thread_id}",
|
|
788
|
+
error=str(e),
|
|
789
|
+
exc_info=True,
|
|
790
|
+
traceback=tb_str,
|
|
791
|
+
)
|
|
792
|
+
error_response = {"error": f"Stream Generation Error: {e}", "detail": tb_str}
|
|
793
|
+
yield f"data: {json.dumps(error_response)}\n\n"
|
|
794
|
+
# Ensure [DONE] is sent even after handled error
|
|
795
|
+
yield "data: [DONE]\n\n"
|
|
796
|
+
|
|
797
|
+
|
|
798
|
+
@router.get("/list", response_model=SupportedGraphsResponse)
|
|
799
|
+
async def list_supported_graphs():
|
|
800
|
+
"""
|
|
801
|
+
列出所有支援的 LangGraph names
|
|
802
|
+
|
|
803
|
+
Returns:
|
|
804
|
+
包含所有支援的 graph names 的列表
|
|
805
|
+
"""
|
|
806
|
+
return SupportedGraphsResponse(graphs=list(SUPPORTED_GRAPH.keys()))
|
|
807
|
+
|
|
808
|
+
|
|
809
|
+
@router.post("/schema", response_model=dict)
|
|
810
|
+
async def get_graph_schema(request: GraphSchemaRequest):
|
|
811
|
+
"""
|
|
812
|
+
取得指定 graph 的 schema
|
|
813
|
+
"""
|
|
814
|
+
if request.graph_name not in SUPPORTED_GRAPH:
|
|
815
|
+
raise HTTPException(status_code=404, detail="Graph not found")
|
|
816
|
+
return SUPPORTED_GRAPH[request.graph_name].get_context_jsonschema()
|