botrun-flow-lang 5.12.263__py3-none-any.whl → 6.2.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/auth_api.py +39 -39
- botrun_flow_lang/api/auth_utils.py +183 -183
- botrun_flow_lang/api/botrun_back_api.py +65 -65
- botrun_flow_lang/api/flow_api.py +3 -3
- botrun_flow_lang/api/hatch_api.py +508 -508
- botrun_flow_lang/api/langgraph_api.py +816 -811
- botrun_flow_lang/api/langgraph_constants.py +11 -0
- botrun_flow_lang/api/line_bot_api.py +1484 -1484
- botrun_flow_lang/api/model_api.py +300 -300
- botrun_flow_lang/api/rate_limit_api.py +32 -32
- botrun_flow_lang/api/routes.py +79 -79
- botrun_flow_lang/api/search_api.py +53 -53
- botrun_flow_lang/api/storage_api.py +395 -395
- botrun_flow_lang/api/subsidy_api.py +290 -290
- botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
- botrun_flow_lang/api/user_setting_api.py +70 -70
- botrun_flow_lang/api/version_api.py +31 -31
- botrun_flow_lang/api/youtube_api.py +26 -26
- botrun_flow_lang/constants.py +13 -13
- botrun_flow_lang/langgraph_agents/agents/agent_runner.py +178 -178
- botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
- botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gemini_subsidy_graph.py +460 -460
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +730 -723
- botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
- botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
- botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
- botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
- botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py +336 -294
- botrun_flow_lang/langgraph_agents/agents/util/local_files.py +419 -419
- botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
- botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +562 -486
- botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py +250 -250
- botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py +204 -204
- botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
- botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
- botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
- botrun_flow_lang/langgraph_agents/agents/util/usage_metadata.py +34 -0
- botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
- botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
- botrun_flow_lang/llm_agent/llm_agent.py +19 -19
- botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
- botrun_flow_lang/log/.gitignore +2 -2
- botrun_flow_lang/main.py +61 -61
- botrun_flow_lang/main_fast.py +51 -51
- botrun_flow_lang/mcp_server/__init__.py +10 -10
- botrun_flow_lang/mcp_server/default_mcp.py +854 -744
- botrun_flow_lang/models/nodes/utils.py +205 -205
- botrun_flow_lang/models/token_usage.py +34 -34
- botrun_flow_lang/requirements.txt +21 -21
- botrun_flow_lang/services/base/firestore_base.py +30 -30
- botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
- botrun_flow_lang/services/hatch/hatch_fs_store.py +419 -419
- botrun_flow_lang/services/storage/storage_cs_store.py +206 -206
- botrun_flow_lang/services/storage/storage_factory.py +12 -12
- botrun_flow_lang/services/storage/storage_store.py +65 -65
- botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
- botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
- botrun_flow_lang/static/docs/tools/index.html +926 -926
- botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
- botrun_flow_lang/tests/api_stress_test.py +357 -357
- botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
- botrun_flow_lang/tests/test_botrun_app.py +46 -46
- botrun_flow_lang/tests/test_html_util.py +31 -31
- botrun_flow_lang/tests/test_img_analyzer.py +190 -190
- botrun_flow_lang/tests/test_img_util.py +39 -39
- botrun_flow_lang/tests/test_local_files.py +114 -114
- botrun_flow_lang/tests/test_mermaid_util.py +103 -103
- botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
- botrun_flow_lang/tests/test_plotly_util.py +151 -151
- botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
- botrun_flow_lang/tools/generate_docs.py +133 -133
- botrun_flow_lang/tools/templates/tools.html +153 -153
- botrun_flow_lang/utils/__init__.py +7 -7
- botrun_flow_lang/utils/botrun_logger.py +344 -344
- botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
- botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
- botrun_flow_lang/utils/google_drive_utils.py +654 -654
- botrun_flow_lang/utils/langchain_utils.py +324 -324
- botrun_flow_lang/utils/yaml_utils.py +9 -9
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/METADATA +6 -6
- botrun_flow_lang-6.2.21.dist-info/RECORD +104 -0
- botrun_flow_lang-5.12.263.dist-info/RECORD +0 -102
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/WHEEL +0 -0
|
@@ -1,723 +1,730 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import asyncio
|
|
3
|
-
import json
|
|
4
|
-
from datetime import datetime
|
|
5
|
-
from typing import ClassVar, Dict, List, Optional, Any
|
|
6
|
-
|
|
7
|
-
from langchain_core.messages import SystemMessage
|
|
8
|
-
|
|
9
|
-
from botrun_flow_lang.constants import LANG_EN, LANG_ZH_TW
|
|
10
|
-
|
|
11
|
-
from langgraph.checkpoint.memory import MemorySaver
|
|
12
|
-
from langchain_core.runnables import RunnableConfig
|
|
13
|
-
|
|
14
|
-
from langchain_core.tools import BaseTool
|
|
15
|
-
|
|
16
|
-
from langchain_core.tools import tool
|
|
17
|
-
|
|
18
|
-
from botrun_flow_lang.utils.botrun_logger import get_default_botrun_logger
|
|
19
|
-
|
|
20
|
-
# All tools now provided by MCP server - no local tool imports needed
|
|
21
|
-
|
|
22
|
-
from botrun_flow_lang.langgraph_agents.agents.checkpointer.firestore_checkpointer import (
|
|
23
|
-
AsyncFirestoreCheckpointer,
|
|
24
|
-
)
|
|
25
|
-
|
|
26
|
-
from langgraph.prebuilt import create_react_agent
|
|
27
|
-
|
|
28
|
-
from dotenv import load_dotenv
|
|
29
|
-
|
|
30
|
-
import copy # 用於深拷貝 schema,避免意外修改原始對象
|
|
31
|
-
|
|
32
|
-
# Removed DALL-E and rate limiting imports - tools now provided by MCP server
|
|
33
|
-
|
|
34
|
-
# =========
|
|
35
|
-
# 📋 STAGE 4 REFACTORING COMPLETED (MCP Integration)
|
|
36
|
-
#
|
|
37
|
-
# This file has been refactored to integrate with MCP (Model Context Protocol):
|
|
38
|
-
#
|
|
39
|
-
# ✅ REMOVED (~600 lines):
|
|
40
|
-
# - Language-specific system prompts (zh_tw_system_prompt, en_system_prompt)
|
|
41
|
-
# - Local tool definitions: scrape, chat_with_pdf, chat_with_imgs, generate_image,
|
|
42
|
-
# generate_tmp_public_url, create_html_page, compare_date_time
|
|
43
|
-
# - Complex conditional logic (if botrun_flow_lang_url and user_id)
|
|
44
|
-
# - Rate limiting exception and related imports
|
|
45
|
-
# - Unused utility imports
|
|
46
|
-
#
|
|
47
|
-
# ✅ SIMPLIFIED:
|
|
48
|
-
# - Direct system_prompt usage (no concatenation)
|
|
49
|
-
# - Streamlined tools list (only language-specific tools)
|
|
50
|
-
# - Clean MCP integration via mcp_config parameter
|
|
51
|
-
# - Maintained backward compatibility for all parameters
|
|
52
|
-
#
|
|
53
|
-
# 🎯 RESULT:
|
|
54
|
-
# - Reduced complexity while maintaining full functionality
|
|
55
|
-
# - All tools available via MCP server at /mcp/default/mcp/
|
|
56
|
-
# - Ready for Phase 2: language-specific tools migration
|
|
57
|
-
# =========
|
|
58
|
-
|
|
59
|
-
# 放到要用的時候才 init,不然loading 會花時間
|
|
60
|
-
# 因為要讓 langgraph 在本地端執行,所以這一段又搬回到外面了
|
|
61
|
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
62
|
-
|
|
63
|
-
# =========
|
|
64
|
-
# 放到要用的時候才 import,不然loading 會花時間
|
|
65
|
-
# 因為LangGraph 在本地端執行,所以這一段又搬回到外面了
|
|
66
|
-
from botrun_flow_lang.langgraph_agents.agents.util.model_utils import (
|
|
67
|
-
RotatingChatAnthropic,
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
# =========
|
|
71
|
-
# 放到要用的時候才 init,不然loading 會花時間
|
|
72
|
-
# 因為LangGraph 在本地端執行,所以這一段又搬回到外面了
|
|
73
|
-
from langchain_openai import ChatOpenAI
|
|
74
|
-
|
|
75
|
-
# =========
|
|
76
|
-
# 放到要用的時候才 init,不然loading 會花時間
|
|
77
|
-
# 因為LangGraph 在本地端執行,所以這一段又搬回到外面了
|
|
78
|
-
from langchain_anthropic import ChatAnthropic
|
|
79
|
-
|
|
80
|
-
# =========
|
|
81
|
-
|
|
82
|
-
# 假設 MultiServerMCPClient 和 StructuredTool 已經被正確導入
|
|
83
|
-
from
|
|
84
|
-
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
85
|
-
|
|
86
|
-
# ========
|
|
87
|
-
# for Vertex AI
|
|
88
|
-
from google.oauth2 import service_account
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
logger =
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
#
|
|
103
|
-
#
|
|
104
|
-
#
|
|
105
|
-
#
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
# if
|
|
109
|
-
#
|
|
110
|
-
#
|
|
111
|
-
#
|
|
112
|
-
#
|
|
113
|
-
#
|
|
114
|
-
#
|
|
115
|
-
#
|
|
116
|
-
#
|
|
117
|
-
#
|
|
118
|
-
#
|
|
119
|
-
#
|
|
120
|
-
#
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
#
|
|
125
|
-
#
|
|
126
|
-
#
|
|
127
|
-
#
|
|
128
|
-
#
|
|
129
|
-
#
|
|
130
|
-
#
|
|
131
|
-
#
|
|
132
|
-
#
|
|
133
|
-
#
|
|
134
|
-
#
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
f"
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
"
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
f"
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
"
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
"
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
)
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
f"
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
)
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
)
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
#
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
# Removed
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
)
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
568
|
-
processed_tools.append(mcp_tool)
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
""
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
|
|
616
|
-
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
|
|
674
|
-
""
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
693
|
-
#
|
|
694
|
-
#
|
|
695
|
-
#
|
|
696
|
-
#
|
|
697
|
-
#
|
|
698
|
-
#
|
|
699
|
-
#
|
|
700
|
-
#
|
|
701
|
-
#
|
|
702
|
-
#
|
|
703
|
-
|
|
704
|
-
|
|
705
|
-
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
)
|
|
712
|
-
|
|
713
|
-
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
#
|
|
717
|
-
#
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
#
|
|
723
|
-
#
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
import json
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import ClassVar, Dict, List, Optional, Any
|
|
6
|
+
|
|
7
|
+
from langchain_core.messages import SystemMessage
|
|
8
|
+
|
|
9
|
+
from botrun_flow_lang.constants import LANG_EN, LANG_ZH_TW
|
|
10
|
+
|
|
11
|
+
from langgraph.checkpoint.memory import MemorySaver
|
|
12
|
+
from langchain_core.runnables import RunnableConfig
|
|
13
|
+
|
|
14
|
+
from langchain_core.tools import BaseTool
|
|
15
|
+
|
|
16
|
+
from langchain_core.tools import tool
|
|
17
|
+
|
|
18
|
+
from botrun_flow_lang.utils.botrun_logger import get_default_botrun_logger
|
|
19
|
+
|
|
20
|
+
# All tools now provided by MCP server - no local tool imports needed
|
|
21
|
+
|
|
22
|
+
from botrun_flow_lang.langgraph_agents.agents.checkpointer.firestore_checkpointer import (
|
|
23
|
+
AsyncFirestoreCheckpointer,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
from langgraph.prebuilt import create_react_agent
|
|
27
|
+
|
|
28
|
+
from dotenv import load_dotenv
|
|
29
|
+
|
|
30
|
+
import copy # 用於深拷貝 schema,避免意外修改原始對象
|
|
31
|
+
|
|
32
|
+
# Removed DALL-E and rate limiting imports - tools now provided by MCP server
|
|
33
|
+
|
|
34
|
+
# =========
|
|
35
|
+
# 📋 STAGE 4 REFACTORING COMPLETED (MCP Integration)
|
|
36
|
+
#
|
|
37
|
+
# This file has been refactored to integrate with MCP (Model Context Protocol):
|
|
38
|
+
#
|
|
39
|
+
# ✅ REMOVED (~600 lines):
|
|
40
|
+
# - Language-specific system prompts (zh_tw_system_prompt, en_system_prompt)
|
|
41
|
+
# - Local tool definitions: scrape, chat_with_pdf, chat_with_imgs, generate_image,
|
|
42
|
+
# generate_tmp_public_url, create_html_page, compare_date_time
|
|
43
|
+
# - Complex conditional logic (if botrun_flow_lang_url and user_id)
|
|
44
|
+
# - Rate limiting exception and related imports
|
|
45
|
+
# - Unused utility imports
|
|
46
|
+
#
|
|
47
|
+
# ✅ SIMPLIFIED:
|
|
48
|
+
# - Direct system_prompt usage (no concatenation)
|
|
49
|
+
# - Streamlined tools list (only language-specific tools)
|
|
50
|
+
# - Clean MCP integration via mcp_config parameter
|
|
51
|
+
# - Maintained backward compatibility for all parameters
|
|
52
|
+
#
|
|
53
|
+
# 🎯 RESULT:
|
|
54
|
+
# - Reduced complexity while maintaining full functionality
|
|
55
|
+
# - All tools available via MCP server at /mcp/default/mcp/
|
|
56
|
+
# - Ready for Phase 2: language-specific tools migration
|
|
57
|
+
# =========
|
|
58
|
+
|
|
59
|
+
# 放到要用的時候才 init,不然loading 會花時間
|
|
60
|
+
# 因為要讓 langgraph 在本地端執行,所以這一段又搬回到外面了
|
|
61
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
62
|
+
|
|
63
|
+
# =========
|
|
64
|
+
# 放到要用的時候才 import,不然loading 會花時間
|
|
65
|
+
# 因為LangGraph 在本地端執行,所以這一段又搬回到外面了
|
|
66
|
+
from botrun_flow_lang.langgraph_agents.agents.util.model_utils import (
|
|
67
|
+
RotatingChatAnthropic,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
# =========
|
|
71
|
+
# 放到要用的時候才 init,不然loading 會花時間
|
|
72
|
+
# 因為LangGraph 在本地端執行,所以這一段又搬回到外面了
|
|
73
|
+
from langchain_openai import ChatOpenAI
|
|
74
|
+
|
|
75
|
+
# =========
|
|
76
|
+
# 放到要用的時候才 init,不然loading 會花時間
|
|
77
|
+
# 因為LangGraph 在本地端執行,所以這一段又搬回到外面了
|
|
78
|
+
from langchain_anthropic import ChatAnthropic
|
|
79
|
+
|
|
80
|
+
# =========
|
|
81
|
+
|
|
82
|
+
# 假設 MultiServerMCPClient 和 StructuredTool 已經被正確導入
|
|
83
|
+
from langchain_core.tools import StructuredTool
|
|
84
|
+
from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
85
|
+
|
|
86
|
+
# ========
|
|
87
|
+
# for Vertex AI
|
|
88
|
+
from google.oauth2 import service_account
|
|
89
|
+
# 重型 import 改為延遲載入,避免啟動時載入 google-cloud-aiplatform(約 26 秒)
|
|
90
|
+
# ChatVertexAI 已遷移至 ChatGoogleGenerativeAI(vertexai=True)
|
|
91
|
+
# ChatAnthropicVertex 在需要時才 import(見 get_react_agent_model 函數內)
|
|
92
|
+
|
|
93
|
+
load_dotenv()
|
|
94
|
+
|
|
95
|
+
# logger = default_logger
|
|
96
|
+
logger = get_default_botrun_logger()
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# Removed BotrunRateLimitException - rate limiting now handled by MCP server
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
# Load Anthropic API keys from environment
|
|
103
|
+
# anthropic_api_keys_str = os.getenv("ANTHROPIC_API_KEYS", "")
|
|
104
|
+
# anthropic_api_keys = [
|
|
105
|
+
# key.strip() for key in anthropic_api_keys_str.split(",") if key.strip()
|
|
106
|
+
# ]
|
|
107
|
+
|
|
108
|
+
# Initialize the model with key rotation if multiple keys are available
|
|
109
|
+
# if anthropic_api_keys:
|
|
110
|
+
# model = RotatingChatAnthropic(
|
|
111
|
+
# model_name="claude-3-7-sonnet-latest",
|
|
112
|
+
# keys=anthropic_api_keys,
|
|
113
|
+
# temperature=0,
|
|
114
|
+
# max_tokens=8192,
|
|
115
|
+
# )
|
|
116
|
+
# 建立 AWS Session
|
|
117
|
+
# session = boto3.Session(
|
|
118
|
+
# aws_access_key_id="",
|
|
119
|
+
# aws_secret_access_key="",
|
|
120
|
+
# region_name="us-west-2",
|
|
121
|
+
# )
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
# # 使用該 Session 初始化 Bedrock 客戶端
|
|
125
|
+
# bedrock_runtime = session.client(
|
|
126
|
+
# service_name="bedrock-runtime",
|
|
127
|
+
# )
|
|
128
|
+
# model = ChatBedrockConverse(
|
|
129
|
+
# model_id="us.anthropic.claude-3-7-sonnet-20250219-v1:0",
|
|
130
|
+
# client=bedrock_runtime,
|
|
131
|
+
# temperature=0,
|
|
132
|
+
# max_tokens=8192,
|
|
133
|
+
# )
|
|
134
|
+
# else:
|
|
135
|
+
# Fallback to traditional initialization if no keys are specified
|
|
136
|
+
def get_react_agent_model_name(model_name: str = ""):
|
|
137
|
+
final_model_name = model_name
|
|
138
|
+
if final_model_name == "":
|
|
139
|
+
final_model_name = "claude-sonnet-4-5-20250929"
|
|
140
|
+
logger.info(f"final_model_name: {final_model_name}")
|
|
141
|
+
return final_model_name
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
ANTHROPIC_MAX_TOKENS = 64000
|
|
145
|
+
GEMINI_MAX_TOKENS = 32000
|
|
146
|
+
TAIDE_MAX_TOKENS = 8192
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def get_react_agent_model(model_name: str = ""):
|
|
150
|
+
final_model_name = get_react_agent_model_name(model_name).strip()
|
|
151
|
+
|
|
152
|
+
# 處理 taide/ 前綴的模型
|
|
153
|
+
if final_model_name.startswith("taide/"):
|
|
154
|
+
taide_api_key = os.getenv("TAIDE_API_KEY", "")
|
|
155
|
+
taide_base_url = os.getenv("TAIDE_BASE_URL", "")
|
|
156
|
+
|
|
157
|
+
if not taide_api_key or not taide_base_url:
|
|
158
|
+
raise ValueError(
|
|
159
|
+
f"Model name starts with 'taide/' but TAIDE_API_KEY or TAIDE_BASE_URL not set. "
|
|
160
|
+
f"Both environment variables are required for: {final_model_name}"
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
# 取得 taide/ 後面的模型名稱
|
|
164
|
+
taide_model_name = final_model_name[len("taide/"):]
|
|
165
|
+
|
|
166
|
+
if not taide_model_name:
|
|
167
|
+
raise ValueError(
|
|
168
|
+
f"Invalid taide model format: {final_model_name}. "
|
|
169
|
+
"Expected format: taide/<model_name>"
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
model = ChatOpenAI(
|
|
173
|
+
openai_api_key=taide_api_key,
|
|
174
|
+
openai_api_base=taide_base_url,
|
|
175
|
+
model_name=taide_model_name,
|
|
176
|
+
temperature=0,
|
|
177
|
+
max_tokens=TAIDE_MAX_TOKENS,
|
|
178
|
+
)
|
|
179
|
+
logger.info(f"model ChatOpenAI (TAIDE) {taide_model_name} @ {taide_base_url}")
|
|
180
|
+
return model
|
|
181
|
+
|
|
182
|
+
# 處理 vertexai/ 前綴的模型
|
|
183
|
+
if final_model_name.startswith("vertex-ai/"):
|
|
184
|
+
vertex_project = os.getenv("VERTEX_AI_LANGCHAIN_PROJECT", "")
|
|
185
|
+
|
|
186
|
+
# 如果沒有設定 VERTEX_AI_LANGCHAIN_PROJECT,則不處理 vertex-ai/ 前綴
|
|
187
|
+
if not vertex_project:
|
|
188
|
+
logger.warning(
|
|
189
|
+
f"Model name starts with 'vertex-ai/' but VERTEX_AI_LANGCHAIN_PROJECT not set. "
|
|
190
|
+
f"Skipping vertex-ai/ processing for {final_model_name}"
|
|
191
|
+
)
|
|
192
|
+
# 移除 vertex-ai/ 前綴後繼續處理
|
|
193
|
+
final_model_name = final_model_name[len("vertex-ai/"):]
|
|
194
|
+
# 移除 region 部分 (如果有的話)
|
|
195
|
+
if "/" in final_model_name:
|
|
196
|
+
parts = final_model_name.split("/", 1)
|
|
197
|
+
if len(parts) == 2:
|
|
198
|
+
final_model_name = parts[1]
|
|
199
|
+
else:
|
|
200
|
+
# 解析 vertex-ai/region/model_name 格式
|
|
201
|
+
parts = final_model_name.split("/")
|
|
202
|
+
|
|
203
|
+
if len(parts) != 3:
|
|
204
|
+
raise ValueError(
|
|
205
|
+
f"Invalid vertexai model format: {final_model_name}. "
|
|
206
|
+
"Expected format: vertex-ai/<region>/<model_name>"
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
vertex_region = parts[1]
|
|
210
|
+
vertex_model_name = parts[2]
|
|
211
|
+
|
|
212
|
+
if not vertex_region or not vertex_model_name:
|
|
213
|
+
raise ValueError(
|
|
214
|
+
f"Missing region or model_name in: {final_model_name}. "
|
|
215
|
+
"Both region and model_name are required."
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
# 取得 credentials
|
|
219
|
+
vertex_sa_path = os.getenv(
|
|
220
|
+
"VERTEX_AI_LANGCHAIN_GOOGLE_APPLICATION_CREDENTIALS", ""
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
credentials = None
|
|
224
|
+
if vertex_sa_path and os.path.exists(vertex_sa_path):
|
|
225
|
+
SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
|
|
226
|
+
credentials = service_account.Credentials.from_service_account_file(
|
|
227
|
+
vertex_sa_path, scopes=SCOPES
|
|
228
|
+
)
|
|
229
|
+
logger.info(f"Using Vertex AI service account from {vertex_sa_path}")
|
|
230
|
+
else:
|
|
231
|
+
logger.warning(
|
|
232
|
+
"VERTEX_AI_LANGCHAIN_GOOGLE_APPLICATION_CREDENTIALS not set. Using ADC."
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
# 判斷模型類型並創建相應實例
|
|
236
|
+
if vertex_model_name.startswith("gemini-"):
|
|
237
|
+
# Gemini 系列:gemini-2.5-pro, gemini-2.5-flash, gemini-pro
|
|
238
|
+
# 使用 ChatGoogleGenerativeAI + vertexai=True,避免載入重型的 langchain_google_vertexai
|
|
239
|
+
model = ChatGoogleGenerativeAI(
|
|
240
|
+
model=vertex_model_name,
|
|
241
|
+
vertexai=True,
|
|
242
|
+
location=vertex_region,
|
|
243
|
+
project=vertex_project,
|
|
244
|
+
credentials=credentials,
|
|
245
|
+
temperature=0,
|
|
246
|
+
max_tokens=GEMINI_MAX_TOKENS,
|
|
247
|
+
)
|
|
248
|
+
logger.info(
|
|
249
|
+
f"model ChatGoogleGenerativeAI(vertexai=True) {vertex_model_name} @ {vertex_region} (project: {vertex_project})"
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
elif "claude" in vertex_model_name.lower() or vertex_model_name.startswith("maison/"):
|
|
253
|
+
# Anthropic Claude (model garden)
|
|
254
|
+
# 延遲載入 ChatAnthropicVertex,只有在需要時才觸發 langchain_google_vertexai
|
|
255
|
+
from langchain_google_vertexai.model_garden import ChatAnthropicVertex
|
|
256
|
+
model = ChatAnthropicVertex(
|
|
257
|
+
model=vertex_model_name,
|
|
258
|
+
location=vertex_region,
|
|
259
|
+
project=vertex_project,
|
|
260
|
+
credentials=credentials,
|
|
261
|
+
temperature=0,
|
|
262
|
+
max_tokens=ANTHROPIC_MAX_TOKENS,
|
|
263
|
+
)
|
|
264
|
+
logger.info(
|
|
265
|
+
f"model ChatAnthropicVertex {vertex_model_name} @ {vertex_region} (project: {vertex_project})"
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
else:
|
|
269
|
+
raise ValueError(
|
|
270
|
+
f"Unsupported Vertex AI model: {vertex_model_name}. "
|
|
271
|
+
"Supported types: gemini-*, claude*, maison/*"
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
return model
|
|
275
|
+
|
|
276
|
+
if final_model_name.startswith("gemini-"):
|
|
277
|
+
model = ChatGoogleGenerativeAI(
|
|
278
|
+
model=final_model_name, temperature=0, max_tokens=GEMINI_MAX_TOKENS
|
|
279
|
+
)
|
|
280
|
+
logger.info(f"model ChatGoogleGenerativeAI {final_model_name}")
|
|
281
|
+
elif final_model_name.startswith("claude-"):
|
|
282
|
+
# use_vertex_ai = os.getenv("USE_VERTEX_AI", "false").lower() in ("true", "1", "yes")
|
|
283
|
+
vertex_project = os.getenv("VERTEX_AI_LANGCHAIN_PROJECT", "")
|
|
284
|
+
vertex_location = os.getenv("VERTEX_AI_LANGCHAIN_LOCATION", "")
|
|
285
|
+
vertex_model = os.getenv("VERTEX_AI_LANGCHAIN_MODEL", "")
|
|
286
|
+
vertex_sa_path = os.getenv(
|
|
287
|
+
"VERTEX_AI_LANGCHAIN_GOOGLE_APPLICATION_CREDENTIALS", ""
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
if vertex_location and vertex_model and vertex_sa_path and vertex_project:
|
|
291
|
+
# 從環境變數讀取設定
|
|
292
|
+
|
|
293
|
+
# 驗證 service account
|
|
294
|
+
credentials = None
|
|
295
|
+
if vertex_sa_path and os.path.exists(vertex_sa_path):
|
|
296
|
+
# 加入 Vertex AI 需要的 scopes
|
|
297
|
+
SCOPES = [
|
|
298
|
+
"https://www.googleapis.com/auth/cloud-platform",
|
|
299
|
+
]
|
|
300
|
+
credentials = service_account.Credentials.from_service_account_file(
|
|
301
|
+
vertex_sa_path, scopes=SCOPES
|
|
302
|
+
)
|
|
303
|
+
logger.info(f"Using Vertex AI service account from {vertex_sa_path}")
|
|
304
|
+
else:
|
|
305
|
+
logger.warning(
|
|
306
|
+
"VERTEX_AI_GOOGLE_APPLICATION_CREDENTIALS not set or file not found. Using ADC if available."
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# 初始化 ChatAnthropicVertex
|
|
310
|
+
# 延遲載入,只有在需要時才觸發 langchain_google_vertexai
|
|
311
|
+
from langchain_google_vertexai.model_garden import ChatAnthropicVertex
|
|
312
|
+
model = ChatAnthropicVertex(
|
|
313
|
+
project=vertex_project,
|
|
314
|
+
model=vertex_model,
|
|
315
|
+
location=vertex_location,
|
|
316
|
+
credentials=credentials,
|
|
317
|
+
temperature=0,
|
|
318
|
+
max_tokens=ANTHROPIC_MAX_TOKENS,
|
|
319
|
+
)
|
|
320
|
+
logger.info(
|
|
321
|
+
f"model ChatAnthropicVertex {vertex_project} @ {vertex_model} @ {vertex_location}"
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
else:
|
|
325
|
+
anthropic_api_keys_str = os.getenv("ANTHROPIC_API_KEYS", "")
|
|
326
|
+
anthropic_api_keys = [
|
|
327
|
+
key.strip() for key in anthropic_api_keys_str.split(",") if key.strip()
|
|
328
|
+
]
|
|
329
|
+
if anthropic_api_keys:
|
|
330
|
+
|
|
331
|
+
model = RotatingChatAnthropic(
|
|
332
|
+
model_name=final_model_name,
|
|
333
|
+
keys=anthropic_api_keys,
|
|
334
|
+
temperature=0,
|
|
335
|
+
max_tokens=ANTHROPIC_MAX_TOKENS,
|
|
336
|
+
)
|
|
337
|
+
logger.info(f"model RotatingChatAnthropic {final_model_name}")
|
|
338
|
+
elif os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
|
|
339
|
+
|
|
340
|
+
openrouter_model_name = "anthropic/claude-sonnet-4.5"
|
|
341
|
+
# openrouter_model_name = "openai/o4-mini-high"
|
|
342
|
+
# openrouter_model_name = "openai/gpt-4.1"
|
|
343
|
+
model = ChatOpenAI(
|
|
344
|
+
openai_api_key=os.getenv("OPENROUTER_API_KEY"),
|
|
345
|
+
openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
|
|
346
|
+
model_name=openrouter_model_name,
|
|
347
|
+
temperature=0,
|
|
348
|
+
max_tokens=ANTHROPIC_MAX_TOKENS,
|
|
349
|
+
model_kwargs={
|
|
350
|
+
# "headers": {
|
|
351
|
+
# "HTTP-Referer": getenv("YOUR_SITE_URL"),
|
|
352
|
+
# "X-Title": getenv("YOUR_SITE_NAME"),
|
|
353
|
+
# }
|
|
354
|
+
},
|
|
355
|
+
)
|
|
356
|
+
logger.info(f"model OpenRouter {openrouter_model_name}")
|
|
357
|
+
else:
|
|
358
|
+
|
|
359
|
+
model = ChatAnthropic(
|
|
360
|
+
model=final_model_name,
|
|
361
|
+
temperature=0,
|
|
362
|
+
max_tokens=ANTHROPIC_MAX_TOKENS,
|
|
363
|
+
# model_kwargs={
|
|
364
|
+
# "extra_headers": {
|
|
365
|
+
# "anthropic-beta": "token-efficient-tools-2025-02-19",
|
|
366
|
+
# "anthropic-beta": "output-128k-2025-02-19",
|
|
367
|
+
# }
|
|
368
|
+
# },
|
|
369
|
+
)
|
|
370
|
+
logger.info(f"model ChatAnthropic {final_model_name}")
|
|
371
|
+
|
|
372
|
+
else:
|
|
373
|
+
raise ValueError(f"Unknown model name prefix: {final_model_name}")
|
|
374
|
+
|
|
375
|
+
return model
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
# model = ChatOpenAI(model="gpt-4o", temperature=0)
|
|
379
|
+
# model = ChatGoogleGenerativeAI(model="gemini-2.0-pro-exp-02-05", temperature=0)
|
|
380
|
+
# model = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
# For this tutorial we will use custom tool that returns pre-defined values for weather in two cities (NYC & SF)
|
|
384
|
+
|
|
385
|
+
|
|
386
|
+
# Removed scrape and compare_date_time tools - now provided by MCP server
|
|
387
|
+
|
|
388
|
+
|
|
389
|
+
# Removed chat_with_pdf tool - now provided by MCP server
|
|
390
|
+
|
|
391
|
+
|
|
392
|
+
# Removed generate_image tool - now provided by MCP server
|
|
393
|
+
|
|
394
|
+
|
|
395
|
+
# Removed chat_with_imgs tool - now provided by MCP server
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
# Removed generate_tmp_public_url tool - now provided by MCP server
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
def format_dates(dt):
|
|
402
|
+
"""
|
|
403
|
+
將日期時間格式化為西元和民國格式
|
|
404
|
+
西元格式:yyyy-mm-dd hh:mm:ss
|
|
405
|
+
民國格式:(yyyy-1911)-mm-dd hh:mm:ss
|
|
406
|
+
"""
|
|
407
|
+
western_date = dt.strftime("%Y-%m-%d %H:%M:%S")
|
|
408
|
+
taiwan_year = dt.year - 1911
|
|
409
|
+
taiwan_date = f"{taiwan_year}-{dt.strftime('%m-%d %H:%M:%S')}"
|
|
410
|
+
|
|
411
|
+
return {"western_date": western_date, "taiwan_date": taiwan_date}
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
# Removed create_html_page tool - now provided by MCP server
|
|
415
|
+
|
|
416
|
+
|
|
417
|
+
# DICT_VAR = {}
|
|
418
|
+
|
|
419
|
+
# Define the graph
|
|
420
|
+
|
|
421
|
+
# Removed language-specific system prompts - now using user-provided system_prompt directly
|
|
422
|
+
|
|
423
|
+
|
|
424
|
+
def transform_anthropic_incompatible_schema(
|
|
425
|
+
schema_dict: dict,
|
|
426
|
+
) -> tuple[dict, bool, str]:
|
|
427
|
+
"""
|
|
428
|
+
轉換可能與 Anthropic 不相容的頂層 schema 結構。
|
|
429
|
+
|
|
430
|
+
Args:
|
|
431
|
+
schema_dict: 原始 schema 字典。
|
|
432
|
+
|
|
433
|
+
Returns:
|
|
434
|
+
tuple: (轉換後的 schema 字典, 是否進行了轉換, 附加到 description 的提示信息)
|
|
435
|
+
"""
|
|
436
|
+
if not isinstance(schema_dict, dict):
|
|
437
|
+
return schema_dict, False, ""
|
|
438
|
+
|
|
439
|
+
keys_to_check = ["anyOf", "allOf", "oneOf"]
|
|
440
|
+
problematic_key = None
|
|
441
|
+
for key in keys_to_check:
|
|
442
|
+
if key in schema_dict:
|
|
443
|
+
problematic_key = key
|
|
444
|
+
break
|
|
445
|
+
|
|
446
|
+
if problematic_key:
|
|
447
|
+
print(f" 發現頂層 '{problematic_key}',進行轉換...")
|
|
448
|
+
transformed = True
|
|
449
|
+
new_schema = {"type": "object", "properties": {}, "required": []}
|
|
450
|
+
description_notes = f"\n[開發者註記:此工具參數原使用 '{problematic_key}' 結構,已轉換。請依賴參數描述判斷必要輸入。]"
|
|
451
|
+
|
|
452
|
+
# 1. 合併 Properties
|
|
453
|
+
# 先加入頂層的 properties (如果存在)
|
|
454
|
+
if "properties" in schema_dict:
|
|
455
|
+
new_schema["properties"].update(copy.deepcopy(schema_dict["properties"]))
|
|
456
|
+
# 再合併來自 problematic_key 內部的 properties
|
|
457
|
+
for sub_schema in schema_dict.get(problematic_key, []):
|
|
458
|
+
if isinstance(sub_schema, dict) and "properties" in sub_schema:
|
|
459
|
+
# 注意:如果不同 sub_schema 有同名 property,後者會覆蓋前者
|
|
460
|
+
new_schema["properties"].update(copy.deepcopy(sub_schema["properties"]))
|
|
461
|
+
|
|
462
|
+
# 2. 處理 Required
|
|
463
|
+
top_level_required = set(schema_dict.get("required", []))
|
|
464
|
+
|
|
465
|
+
if problematic_key == "allOf":
|
|
466
|
+
# allOf: 合併所有 required
|
|
467
|
+
combined_required = top_level_required
|
|
468
|
+
for sub_schema in schema_dict.get(problematic_key, []):
|
|
469
|
+
if isinstance(sub_schema, dict) and "required" in sub_schema:
|
|
470
|
+
combined_required.update(sub_schema["required"])
|
|
471
|
+
# 只保留實際存在於合併後 properties 中的 required 欄位
|
|
472
|
+
new_schema["required"] = sorted(
|
|
473
|
+
[req for req in combined_required if req in new_schema["properties"]]
|
|
474
|
+
)
|
|
475
|
+
description_notes += " 所有相關參數均需考慮。]" # 簡單提示
|
|
476
|
+
elif problematic_key in ["anyOf", "oneOf"]:
|
|
477
|
+
# anyOf/oneOf: 只保留頂層 required,並在描述中說明選擇性
|
|
478
|
+
new_schema["required"] = sorted(
|
|
479
|
+
[req for req in top_level_required if req in new_schema["properties"]]
|
|
480
|
+
)
|
|
481
|
+
# 嘗試生成更具體的提示 (如果 sub_schema 結構簡單)
|
|
482
|
+
options = []
|
|
483
|
+
for sub_schema in schema_dict.get(problematic_key, []):
|
|
484
|
+
if isinstance(sub_schema, dict) and "required" in sub_schema:
|
|
485
|
+
options.append(f"提供 '{', '.join(sub_schema['required'])}'")
|
|
486
|
+
if options:
|
|
487
|
+
description_notes += (
|
|
488
|
+
f" 通常需要滿足以下條件之一:{'; 或 '.join(options)}。]"
|
|
489
|
+
)
|
|
490
|
+
else:
|
|
491
|
+
description_notes += " 請注意參數間的選擇關係。]"
|
|
492
|
+
|
|
493
|
+
print(
|
|
494
|
+
f" 轉換後 schema: {json.dumps(new_schema, indent=2, ensure_ascii=False)}"
|
|
495
|
+
)
|
|
496
|
+
return new_schema, transformed, description_notes
|
|
497
|
+
else:
|
|
498
|
+
return schema_dict, False, ""
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
# --- Schema 轉換輔助函數 (從 _get_mcp_tools_async 提取) ---
|
|
502
|
+
def _process_mcp_tools_for_anthropic(langchain_tools: List[Any]) -> List[Any]:
|
|
503
|
+
"""處理 MCP 工具列表,轉換不相容的 Schema 並記錄日誌"""
|
|
504
|
+
if not langchain_tools:
|
|
505
|
+
logger.info("[_process_mcp_tools_for_anthropic] 警告 - 未找到任何工具。")
|
|
506
|
+
return []
|
|
507
|
+
|
|
508
|
+
logger.info(
|
|
509
|
+
f"[_process_mcp_tools_for_anthropic] --- 開始處理 {len(langchain_tools)} 個原始 MCP 工具 ---"
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
processed_tools = []
|
|
513
|
+
for mcp_tool in langchain_tools:
|
|
514
|
+
# 只處理 StructuredTool 或類似的有 args_schema 的工具
|
|
515
|
+
if not hasattr(mcp_tool, "args_schema") or not mcp_tool.args_schema:
|
|
516
|
+
logger.debug(
|
|
517
|
+
f"[_process_mcp_tools_for_anthropic] 工具 '{mcp_tool.name}' 沒有 args_schema,直接加入。"
|
|
518
|
+
)
|
|
519
|
+
processed_tools.append(mcp_tool)
|
|
520
|
+
continue
|
|
521
|
+
|
|
522
|
+
original_schema_dict = {}
|
|
523
|
+
try:
|
|
524
|
+
# 嘗試獲取 schema 字典 (根據 Pydantic 版本可能不同)
|
|
525
|
+
if hasattr(mcp_tool.args_schema, "model_json_schema"): # Pydantic V2
|
|
526
|
+
original_schema_dict = mcp_tool.args_schema.model_json_schema()
|
|
527
|
+
elif hasattr(mcp_tool.args_schema, "schema"): # Pydantic V1
|
|
528
|
+
original_schema_dict = mcp_tool.args_schema.schema()
|
|
529
|
+
elif isinstance(mcp_tool.args_schema, dict): # 已經是字典?
|
|
530
|
+
original_schema_dict = mcp_tool.args_schema
|
|
531
|
+
else:
|
|
532
|
+
logger.warning(
|
|
533
|
+
f"[_process_mcp_tools_for_anthropic] 無法獲取工具 '{mcp_tool.name}' 的 schema 字典 ({type(mcp_tool.args_schema)}),跳過轉換。"
|
|
534
|
+
)
|
|
535
|
+
processed_tools.append(mcp_tool)
|
|
536
|
+
continue
|
|
537
|
+
|
|
538
|
+
# 進行轉換檢查
|
|
539
|
+
logger.debug(
|
|
540
|
+
f"[_process_mcp_tools_for_anthropic] 檢查工具 '{mcp_tool.name}' 的 schema..."
|
|
541
|
+
)
|
|
542
|
+
new_schema_dict, transformed, desc_notes = (
|
|
543
|
+
transform_anthropic_incompatible_schema(
|
|
544
|
+
copy.deepcopy(original_schema_dict) # 使用深拷貝操作
|
|
545
|
+
)
|
|
546
|
+
)
|
|
547
|
+
|
|
548
|
+
if transformed:
|
|
549
|
+
mcp_tool.description += desc_notes
|
|
550
|
+
logger.info(
|
|
551
|
+
f"[_process_mcp_tools_for_anthropic] 工具 '{mcp_tool.name}' 的描述已更新。"
|
|
552
|
+
)
|
|
553
|
+
if isinstance(mcp_tool.args_schema, dict):
|
|
554
|
+
logger.debug(
|
|
555
|
+
f"[_process_mcp_tools_for_anthropic] args_schema 是字典,直接替換工具 '{mcp_tool.name}' 的 schema。"
|
|
556
|
+
)
|
|
557
|
+
mcp_tool.args_schema = new_schema_dict
|
|
558
|
+
else:
|
|
559
|
+
# 如果 args_schema 是 Pydantic 模型,直接修改可能無效或困難
|
|
560
|
+
# 附加轉換後的字典可能是一種備選方案,但 Langchain/LangGraph 可能不直接使用它
|
|
561
|
+
# 最好的方法是確保 get_tools 返回的工具的 args_schema 可以被修改,
|
|
562
|
+
# 或者在創建工具時就使用轉換後的 schema。
|
|
563
|
+
# 如果不能直接修改,附加屬性是一種標記方式,但可能需要在工具調用處處理。
|
|
564
|
+
logger.warning(
|
|
565
|
+
f"[_process_mcp_tools_for_anthropic] args_schema 不是字典 ({type(mcp_tool.args_schema)}),僅添加 _transformed_args_schema_dict 屬性到工具 '{mcp_tool.name}'。這可能不足以解決根本問題。"
|
|
566
|
+
)
|
|
567
|
+
setattr(mcp_tool, "_transformed_args_schema_dict", new_schema_dict)
|
|
568
|
+
processed_tools.append(mcp_tool)
|
|
569
|
+
|
|
570
|
+
except Exception as e_schema:
|
|
571
|
+
logger.error(
|
|
572
|
+
f"[_process_mcp_tools_for_anthropic] 處理工具 '{mcp_tool.name}' schema 時發生錯誤: {e_schema}",
|
|
573
|
+
exc_info=True,
|
|
574
|
+
)
|
|
575
|
+
processed_tools.append(mcp_tool) # 保留原始工具
|
|
576
|
+
|
|
577
|
+
logger.info(
|
|
578
|
+
f"[_process_mcp_tools_for_anthropic] --- 完成工具處理,返回 {len(processed_tools)} 個工具 ---"
|
|
579
|
+
)
|
|
580
|
+
return processed_tools
|
|
581
|
+
|
|
582
|
+
|
|
583
|
+
async def create_react_agent_graph(
|
|
584
|
+
system_prompt: str = "",
|
|
585
|
+
botrun_flow_lang_url: str = "",
|
|
586
|
+
user_id: str = "",
|
|
587
|
+
model_name: str = "",
|
|
588
|
+
lang: str = LANG_EN,
|
|
589
|
+
mcp_config: Optional[Dict[str, Any]] = None, # <--- 接收配置而非客戶端實例
|
|
590
|
+
):
|
|
591
|
+
"""
|
|
592
|
+
Create a react agent graph with simplified architecture.
|
|
593
|
+
|
|
594
|
+
This function now creates a fully MCP-integrated agent with:
|
|
595
|
+
- Direct system prompt usage (no language-specific prompt concatenation)
|
|
596
|
+
- Zero local tools - all functionality provided by MCP server
|
|
597
|
+
- Complete MCP server integration for all tools (web search, scraping, PDF/image analysis, time/date, visualizations, etc.)
|
|
598
|
+
- Removed all complex conditional logic and local tool definitions
|
|
599
|
+
|
|
600
|
+
Args:
|
|
601
|
+
system_prompt: The system prompt to use for the agent (used directly, no concatenation)
|
|
602
|
+
botrun_flow_lang_url: URL for botrun flow lang service (reserved for future use)
|
|
603
|
+
user_id: User identifier (reserved for future use)
|
|
604
|
+
model_name: AI model name to use (defaults to claude-sonnet-4-5-20250929)
|
|
605
|
+
lang: Language code affecting language-specific tools (e.g., "en", "zh-TW")
|
|
606
|
+
mcp_config: MCP servers configuration dict providing tools like scrape, chat_with_pdf, etc.
|
|
607
|
+
|
|
608
|
+
Returns:
|
|
609
|
+
A LangGraph react agent configured with simplified architecture
|
|
610
|
+
|
|
611
|
+
Note:
|
|
612
|
+
- Local MCP tools (scrape, chat_with_pdf, etc.) have been removed
|
|
613
|
+
- compare_date_time tool has been completely removed
|
|
614
|
+
- All advanced tools are now provided via MCP server configuration
|
|
615
|
+
- Language-specific prompts have been removed for simplification
|
|
616
|
+
"""
|
|
617
|
+
|
|
618
|
+
# Complete MCP migration - all tools are now provided by MCP server
|
|
619
|
+
# No local tools remain - all functionality accessed via mcp_config
|
|
620
|
+
tools = [
|
|
621
|
+
# ✅ ALL MIGRATED TO MCP: scrape, chat_with_pdf, chat_with_imgs, generate_image,
|
|
622
|
+
# generate_tmp_public_url, create_html_page, create_plotly_chart,
|
|
623
|
+
# create_mermaid_diagram, current_date_time, web_search
|
|
624
|
+
# ❌ REMOVED: compare_date_time (completely eliminated)
|
|
625
|
+
]
|
|
626
|
+
|
|
627
|
+
mcp_tools = []
|
|
628
|
+
if mcp_config:
|
|
629
|
+
logger.info("偵測到 MCP 配置,直接創建 MCP 工具...")
|
|
630
|
+
try:
|
|
631
|
+
# 直接創建 MCP client 並獲取工具,不使用 context manager
|
|
632
|
+
|
|
633
|
+
client = MultiServerMCPClient(mcp_config)
|
|
634
|
+
raw_mcp_tools = await client.get_tools()
|
|
635
|
+
print("raw_mcp_tools============>", raw_mcp_tools)
|
|
636
|
+
|
|
637
|
+
if raw_mcp_tools:
|
|
638
|
+
logger.info(f"從 MCP 配置獲取了 {len(raw_mcp_tools)} 個原始工具。")
|
|
639
|
+
# 處理 Schema (使用提取的輔助函數)
|
|
640
|
+
mcp_tools = _process_mcp_tools_for_anthropic(raw_mcp_tools)
|
|
641
|
+
if mcp_tools:
|
|
642
|
+
tools.extend(mcp_tools)
|
|
643
|
+
logger.info(f"已加入 {len(mcp_tools)} 個處理後的 MCP 工具。")
|
|
644
|
+
logger.debug(
|
|
645
|
+
f"加入的 MCP 工具名稱: {[tool.name for tool in mcp_tools]}"
|
|
646
|
+
)
|
|
647
|
+
else:
|
|
648
|
+
logger.warning("MCP 工具處理後列表為空。")
|
|
649
|
+
else:
|
|
650
|
+
logger.info("MCP Client 返回了空的工具列表。")
|
|
651
|
+
|
|
652
|
+
# 注意:我們不在這裡關閉 client,因為 tools 可能需要它來執行
|
|
653
|
+
# client 會在 graph 執行完畢後自動清理
|
|
654
|
+
logger.info("MCP client 和工具創建完成,client 將保持活動狀態")
|
|
655
|
+
|
|
656
|
+
except Exception as e_get:
|
|
657
|
+
import traceback
|
|
658
|
+
|
|
659
|
+
traceback.print_exc()
|
|
660
|
+
logger.error(f"從 MCP 配置獲取或處理工具時發生錯誤: {e_get}", exc_info=True)
|
|
661
|
+
# 即使出錯,也可能希望繼續執行(不帶 MCP 工具)
|
|
662
|
+
else:
|
|
663
|
+
logger.info("未提供 MCP 配置,跳過 MCP 工具。")
|
|
664
|
+
|
|
665
|
+
# Simplified: use user-provided system_prompt directly (no language-specific prompts)
|
|
666
|
+
new_system_prompt = system_prompt
|
|
667
|
+
if botrun_flow_lang_url and user_id:
|
|
668
|
+
new_system_prompt = (
|
|
669
|
+
f"""IMPORTANT: Any URL returned by tools MUST be included in your response as a markdown link [text](URL).
|
|
670
|
+
Please use the standard [text](URL) format to present links, ensuring the link text remains plain and unformatted.
|
|
671
|
+
Example:
|
|
672
|
+
User: "Create a new page for our project documentation"
|
|
673
|
+
Tool returns: {{"page_url": "https://notion.so/workspace/abc123"}}
|
|
674
|
+
Assistant: "I've created the new page for your project documentation. You can access it here: [Project Documentation](https://notion.so/workspace/abc123)"
|
|
675
|
+
"""
|
|
676
|
+
+ system_prompt
|
|
677
|
+
+ f"""\n\n
|
|
678
|
+
- If the tool needs parameter like botrun_flow_lang_url or user_id, please use the following:
|
|
679
|
+
botrun_flow_lang_url: {botrun_flow_lang_url}
|
|
680
|
+
user_id: {user_id}
|
|
681
|
+
"""
|
|
682
|
+
)
|
|
683
|
+
system_message = SystemMessage(
|
|
684
|
+
content=[
|
|
685
|
+
{
|
|
686
|
+
"text": new_system_prompt,
|
|
687
|
+
"type": "text",
|
|
688
|
+
"cache_control": {"type": "ephemeral"},
|
|
689
|
+
}
|
|
690
|
+
]
|
|
691
|
+
)
|
|
692
|
+
|
|
693
|
+
# 目前先使用了 https://docs.anthropic.com/en/docs/build-with-claude/tool-use/token-efficient-tool-use
|
|
694
|
+
# 這一段會遇到
|
|
695
|
+
# File "/Users/seba/Projects/botrun_flow_lang/.venv/lib/python3.11/site-packages/langgraph/prebuilt/tool_node.py", line 218, in __init__
|
|
696
|
+
# tool_ = create_tool(tool_)
|
|
697
|
+
# ^^^^^^^^^^^^^^^^^^
|
|
698
|
+
# File "/Users/seba/Projects/botrun_flow_lang/.venv/lib/python3.11/site-packages/langchain_core/tools/convert.py", line 334, in tool
|
|
699
|
+
# raise ValueError(msg)
|
|
700
|
+
# ValueError: The first argument must be a string or a callable with a __name__ for tool decorator. Got <class 'dict'>
|
|
701
|
+
# 所以先不使用這一段,這一段是參考 https://python.langchain.com/docs/integrations/chat/anthropic/#tools
|
|
702
|
+
# 也許未來可以引用
|
|
703
|
+
# if get_react_agent_model_name(model_name).startswith("claude-"):
|
|
704
|
+
# new_tools = []
|
|
705
|
+
# for tool in tools:
|
|
706
|
+
# new_tool = convert_to_anthropic_tool(tool)
|
|
707
|
+
# new_tool["cache_control"] = {"type": "ephemeral"}
|
|
708
|
+
# new_tools.append(new_tool)
|
|
709
|
+
# tools = new_tools
|
|
710
|
+
|
|
711
|
+
env_name = os.getenv("ENV_NAME", "botrun-flow-lang-dev")
|
|
712
|
+
result = create_react_agent(
|
|
713
|
+
get_react_agent_model(model_name),
|
|
714
|
+
tools=tools,
|
|
715
|
+
prompt=system_message,
|
|
716
|
+
checkpointer=MemorySaver(), # 如果要執行在 botrun_back 裡面,就不需要 firestore 的 checkpointer
|
|
717
|
+
# checkpointer=AsyncFirestoreCheckpointer(env_name=env_name),
|
|
718
|
+
)
|
|
719
|
+
return result
|
|
720
|
+
|
|
721
|
+
|
|
722
|
+
# Default graph instance with empty prompt
|
|
723
|
+
# if True:
|
|
724
|
+
# react_agent_graph = create_react_agent_graph()
|
|
725
|
+
# LangGraph Studio 測試用,把以下 un-comment 就可以測試
|
|
726
|
+
# react_agent_graph = create_react_agent_graph(
|
|
727
|
+
# system_prompt="",
|
|
728
|
+
# botrun_flow_lang_url="https://botrun-flow-lang-fastapi-dev-36186877499.asia-east1.run.app",
|
|
729
|
+
# user_id="sebastian.hsu@gmail.com",
|
|
730
|
+
# )
|