botrun-flow-lang 5.12.263__py3-none-any.whl → 5.12.264__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (87) hide show
  1. botrun_flow_lang/api/auth_api.py +39 -39
  2. botrun_flow_lang/api/auth_utils.py +183 -183
  3. botrun_flow_lang/api/botrun_back_api.py +65 -65
  4. botrun_flow_lang/api/flow_api.py +3 -3
  5. botrun_flow_lang/api/hatch_api.py +508 -508
  6. botrun_flow_lang/api/langgraph_api.py +811 -811
  7. botrun_flow_lang/api/line_bot_api.py +1484 -1484
  8. botrun_flow_lang/api/model_api.py +300 -300
  9. botrun_flow_lang/api/rate_limit_api.py +32 -32
  10. botrun_flow_lang/api/routes.py +79 -79
  11. botrun_flow_lang/api/search_api.py +53 -53
  12. botrun_flow_lang/api/storage_api.py +395 -395
  13. botrun_flow_lang/api/subsidy_api.py +290 -290
  14. botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
  15. botrun_flow_lang/api/user_setting_api.py +70 -70
  16. botrun_flow_lang/api/version_api.py +31 -31
  17. botrun_flow_lang/api/youtube_api.py +26 -26
  18. botrun_flow_lang/constants.py +13 -13
  19. botrun_flow_lang/langgraph_agents/agents/agent_runner.py +178 -178
  20. botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
  21. botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
  22. botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
  23. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gemini_subsidy_graph.py +460 -460
  24. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
  25. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
  26. botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +723 -723
  27. botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
  28. botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
  29. botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
  30. botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
  31. botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
  32. botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
  33. botrun_flow_lang/langgraph_agents/agents/util/local_files.py +419 -419
  34. botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
  35. botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
  36. botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +486 -486
  37. botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py +250 -250
  38. botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py +204 -204
  39. botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
  40. botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
  41. botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
  42. botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
  43. botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
  44. botrun_flow_lang/llm_agent/llm_agent.py +19 -19
  45. botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
  46. botrun_flow_lang/log/.gitignore +2 -2
  47. botrun_flow_lang/main.py +61 -61
  48. botrun_flow_lang/main_fast.py +51 -51
  49. botrun_flow_lang/mcp_server/__init__.py +10 -10
  50. botrun_flow_lang/mcp_server/default_mcp.py +744 -744
  51. botrun_flow_lang/models/nodes/utils.py +205 -205
  52. botrun_flow_lang/models/token_usage.py +34 -34
  53. botrun_flow_lang/requirements.txt +21 -21
  54. botrun_flow_lang/services/base/firestore_base.py +30 -30
  55. botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
  56. botrun_flow_lang/services/hatch/hatch_fs_store.py +419 -419
  57. botrun_flow_lang/services/storage/storage_cs_store.py +206 -206
  58. botrun_flow_lang/services/storage/storage_factory.py +12 -12
  59. botrun_flow_lang/services/storage/storage_store.py +65 -65
  60. botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
  61. botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
  62. botrun_flow_lang/static/docs/tools/index.html +926 -926
  63. botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
  64. botrun_flow_lang/tests/api_stress_test.py +357 -357
  65. botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
  66. botrun_flow_lang/tests/test_botrun_app.py +46 -46
  67. botrun_flow_lang/tests/test_html_util.py +31 -31
  68. botrun_flow_lang/tests/test_img_analyzer.py +190 -190
  69. botrun_flow_lang/tests/test_img_util.py +39 -39
  70. botrun_flow_lang/tests/test_local_files.py +114 -114
  71. botrun_flow_lang/tests/test_mermaid_util.py +103 -103
  72. botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
  73. botrun_flow_lang/tests/test_plotly_util.py +151 -151
  74. botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
  75. botrun_flow_lang/tools/generate_docs.py +133 -133
  76. botrun_flow_lang/tools/templates/tools.html +153 -153
  77. botrun_flow_lang/utils/__init__.py +7 -7
  78. botrun_flow_lang/utils/botrun_logger.py +344 -344
  79. botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
  80. botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
  81. botrun_flow_lang/utils/google_drive_utils.py +654 -654
  82. botrun_flow_lang/utils/langchain_utils.py +324 -324
  83. botrun_flow_lang/utils/yaml_utils.py +9 -9
  84. {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-5.12.264.dist-info}/METADATA +1 -1
  85. botrun_flow_lang-5.12.264.dist-info/RECORD +102 -0
  86. botrun_flow_lang-5.12.263.dist-info/RECORD +0 -102
  87. {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-5.12.264.dist-info}/WHEEL +0 -0
@@ -1,83 +1,83 @@
1
- import os
2
- import re
3
- from typing import List
4
- from dotenv import load_dotenv
5
-
6
- from botrun_flow_lang.llm_agent.llm_agent import LlmAgent
7
-
8
- load_dotenv()
9
-
10
- DEFAULT_APPEND_SYSTEM_PROMPT = os.getenv("DEFAULT_APPEND_SYSTEM_PROMPT", "")
11
-
12
- AGENT_TEMPLATE = """
13
- 你會遵守以下 tag <你需要遵守的原則> 內的原則,來回應使用者的輸入,使用者的輸入使用 tag <使用者的輸入> 標籤表示。
14
- <你需要遵守的原則>
15
- {rules}
16
- </你需要遵守的原則>
17
-
18
- <使用者的輸入>
19
- {context}
20
- </使用者的輸入>
21
- """
22
-
23
-
24
- def get_agents(xml_system_prompt: str) -> List[LlmAgent]:
25
- # 如果不是 XML 格式,返回空列表
26
- # if not xml_system_prompt.strip().startswith("<agents>"):
27
- # return []
28
-
29
- agent_prompts = []
30
- # 使用正則表達式找出所有 <agent> 標籤及其內容
31
- agent_patterns = re.findall(r"<agent>(.*?)</agent>", xml_system_prompt, re.DOTALL)
32
-
33
- for agent_content in agent_patterns:
34
- # 提取 name, model, print-output(如果存在)
35
- name = re.search(r"<name>(.*?)</name>", agent_content)
36
- name = name.group(1) if name else ""
37
-
38
- model = re.search(r"<model>(.*?)</model>", agent_content)
39
- model = model.group(1) if model else os.getenv("MULTI_AGENT_DEFAULT_MODEL", "")
40
-
41
- print_output = re.search(r"<print-output>(.*?)</print-output>", agent_content)
42
- print_output = print_output.group(1).lower() == "true" if print_output else True
43
-
44
- print_plotly = re.search(r"<print-plotly>(.*?)</print-plotly>", agent_content)
45
- print_plotly = print_plotly.group(1).lower() == "true" if print_plotly else True
46
-
47
- gen_image = re.search(r"<gen-image>(.*?)</gen-image>", agent_content)
48
- gen_image = gen_image.group(1).lower() == "true" if gen_image else False
49
-
50
- include_in_history = re.search(
51
- r"<include-in-history>(.*?)</include-in-history>", agent_content
52
- )
53
- include_in_history = (
54
- include_in_history.group(1).lower() == "true"
55
- if include_in_history
56
- else True
57
- )
58
-
59
- max_system_prompt_length = re.search(
60
- r"<max-system-prompt-length>(.*?)</max-system-prompt-length>", agent_content
61
- )
62
- max_system_prompt_length = (
63
- int(max_system_prompt_length.group(1)) if max_system_prompt_length else None
64
- )
65
-
66
- # 整個 <agent> 標籤的內容(包括 <agent> 標籤本身)作為 system_prompt
67
- system_prompt = f"<agent>{agent_content}</agent>"
68
- if DEFAULT_APPEND_SYSTEM_PROMPT:
69
- system_prompt = f"{system_prompt} \n\n{DEFAULT_APPEND_SYSTEM_PROMPT}"
70
-
71
- agent_prompt = LlmAgent(
72
- name=name,
73
- model=model,
74
- system_prompt=system_prompt,
75
- print_output=print_output,
76
- print_plotly=print_plotly,
77
- gen_image=gen_image,
78
- include_in_history=include_in_history,
79
- max_system_prompt_length=max_system_prompt_length,
80
- )
81
- agent_prompts.append(agent_prompt)
82
-
83
- return agent_prompts
1
+ import os
2
+ import re
3
+ from typing import List
4
+ from dotenv import load_dotenv
5
+
6
+ from botrun_flow_lang.llm_agent.llm_agent import LlmAgent
7
+
8
+ load_dotenv()
9
+
10
+ DEFAULT_APPEND_SYSTEM_PROMPT = os.getenv("DEFAULT_APPEND_SYSTEM_PROMPT", "")
11
+
12
+ AGENT_TEMPLATE = """
13
+ 你會遵守以下 tag <你需要遵守的原則> 內的原則,來回應使用者的輸入,使用者的輸入使用 tag <使用者的輸入> 標籤表示。
14
+ <你需要遵守的原則>
15
+ {rules}
16
+ </你需要遵守的原則>
17
+
18
+ <使用者的輸入>
19
+ {context}
20
+ </使用者的輸入>
21
+ """
22
+
23
+
24
+ def get_agents(xml_system_prompt: str) -> List[LlmAgent]:
25
+ # 如果不是 XML 格式,返回空列表
26
+ # if not xml_system_prompt.strip().startswith("<agents>"):
27
+ # return []
28
+
29
+ agent_prompts = []
30
+ # 使用正則表達式找出所有 <agent> 標籤及其內容
31
+ agent_patterns = re.findall(r"<agent>(.*?)</agent>", xml_system_prompt, re.DOTALL)
32
+
33
+ for agent_content in agent_patterns:
34
+ # 提取 name, model, print-output(如果存在)
35
+ name = re.search(r"<name>(.*?)</name>", agent_content)
36
+ name = name.group(1) if name else ""
37
+
38
+ model = re.search(r"<model>(.*?)</model>", agent_content)
39
+ model = model.group(1) if model else os.getenv("MULTI_AGENT_DEFAULT_MODEL", "")
40
+
41
+ print_output = re.search(r"<print-output>(.*?)</print-output>", agent_content)
42
+ print_output = print_output.group(1).lower() == "true" if print_output else True
43
+
44
+ print_plotly = re.search(r"<print-plotly>(.*?)</print-plotly>", agent_content)
45
+ print_plotly = print_plotly.group(1).lower() == "true" if print_plotly else True
46
+
47
+ gen_image = re.search(r"<gen-image>(.*?)</gen-image>", agent_content)
48
+ gen_image = gen_image.group(1).lower() == "true" if gen_image else False
49
+
50
+ include_in_history = re.search(
51
+ r"<include-in-history>(.*?)</include-in-history>", agent_content
52
+ )
53
+ include_in_history = (
54
+ include_in_history.group(1).lower() == "true"
55
+ if include_in_history
56
+ else True
57
+ )
58
+
59
+ max_system_prompt_length = re.search(
60
+ r"<max-system-prompt-length>(.*?)</max-system-prompt-length>", agent_content
61
+ )
62
+ max_system_prompt_length = (
63
+ int(max_system_prompt_length.group(1)) if max_system_prompt_length else None
64
+ )
65
+
66
+ # 整個 <agent> 標籤的內容(包括 <agent> 標籤本身)作為 system_prompt
67
+ system_prompt = f"<agent>{agent_content}</agent>"
68
+ if DEFAULT_APPEND_SYSTEM_PROMPT:
69
+ system_prompt = f"{system_prompt} \n\n{DEFAULT_APPEND_SYSTEM_PROMPT}"
70
+
71
+ agent_prompt = LlmAgent(
72
+ name=name,
73
+ model=model,
74
+ system_prompt=system_prompt,
75
+ print_output=print_output,
76
+ print_plotly=print_plotly,
77
+ gen_image=gen_image,
78
+ include_in_history=include_in_history,
79
+ max_system_prompt_length=max_system_prompt_length,
80
+ )
81
+ agent_prompts.append(agent_prompt)
82
+
83
+ return agent_prompts
@@ -1,2 +1,2 @@
1
- *.log
2
- temp.json
1
+ *.log
2
+ temp.json
botrun_flow_lang/main.py CHANGED
@@ -1,61 +1,61 @@
1
- import contextlib
2
- from fastapi import FastAPI
3
- from fastapi.staticfiles import StaticFiles
4
-
5
- from botrun_flow_lang.api.routes import router
6
- from botrun_flow_lang.mcp_server import mcp
7
-
8
- from fastapi.middleware.cors import CORSMiddleware
9
- from fastapi.responses import FileResponse
10
- from pathlib import Path
11
-
12
-
13
- # Create a lifespan manager for MCP server
14
- @contextlib.asynccontextmanager
15
- async def lifespan(app: FastAPI):
16
- async with contextlib.AsyncExitStack() as stack:
17
- await stack.enter_async_context(mcp.session_manager.run())
18
- yield
19
-
20
-
21
- app = FastAPI(lifespan=lifespan)
22
-
23
- app.include_router(router)
24
- app.add_middleware(
25
- CORSMiddleware,
26
- allow_origins=["*"], # 允許所有來源
27
- allow_credentials=True,
28
- allow_methods=["*"], # 允許所有方法
29
- allow_headers=["*"], # 允許所有頭
30
- )
31
-
32
- # 獲取專案根目錄的絕對路徑
33
- project_root = Path(__file__).parent
34
-
35
-
36
- @app.get("/docs/tools") # 注意:移除了尾部的斜線
37
- @app.get("/docs/tools/")
38
- async def get_docs():
39
- return FileResponse(project_root / "static/docs/tools/index.html")
40
-
41
-
42
- # 掛載靜態文件目錄
43
- app.mount(
44
- "/docs/tools",
45
- StaticFiles(directory=str(project_root / "static/docs/tools")),
46
- name="tool_docs",
47
- )
48
-
49
- # Mount MCP server
50
- app.mount("/mcp/default", mcp.streamable_http_app())
51
-
52
-
53
- @app.get("/heartbeat")
54
- async def heartbeat():
55
- return {"status": "ok"}
56
-
57
-
58
- if __name__ == "__main__":
59
- import uvicorn
60
-
61
- uvicorn.run(app, host="0.0.0.0", port=8000)
1
+ import contextlib
2
+ from fastapi import FastAPI
3
+ from fastapi.staticfiles import StaticFiles
4
+
5
+ from botrun_flow_lang.api.routes import router
6
+ from botrun_flow_lang.mcp_server import mcp
7
+
8
+ from fastapi.middleware.cors import CORSMiddleware
9
+ from fastapi.responses import FileResponse
10
+ from pathlib import Path
11
+
12
+
13
+ # Create a lifespan manager for MCP server
14
+ @contextlib.asynccontextmanager
15
+ async def lifespan(app: FastAPI):
16
+ async with contextlib.AsyncExitStack() as stack:
17
+ await stack.enter_async_context(mcp.session_manager.run())
18
+ yield
19
+
20
+
21
+ app = FastAPI(lifespan=lifespan)
22
+
23
+ app.include_router(router)
24
+ app.add_middleware(
25
+ CORSMiddleware,
26
+ allow_origins=["*"], # 允許所有來源
27
+ allow_credentials=True,
28
+ allow_methods=["*"], # 允許所有方法
29
+ allow_headers=["*"], # 允許所有頭
30
+ )
31
+
32
+ # 獲取專案根目錄的絕對路徑
33
+ project_root = Path(__file__).parent
34
+
35
+
36
+ @app.get("/docs/tools") # 注意:移除了尾部的斜線
37
+ @app.get("/docs/tools/")
38
+ async def get_docs():
39
+ return FileResponse(project_root / "static/docs/tools/index.html")
40
+
41
+
42
+ # 掛載靜態文件目錄
43
+ app.mount(
44
+ "/docs/tools",
45
+ StaticFiles(directory=str(project_root / "static/docs/tools")),
46
+ name="tool_docs",
47
+ )
48
+
49
+ # Mount MCP server
50
+ app.mount("/mcp/default", mcp.streamable_http_app())
51
+
52
+
53
+ @app.get("/heartbeat")
54
+ async def heartbeat():
55
+ return {"status": "ok"}
56
+
57
+
58
+ if __name__ == "__main__":
59
+ import uvicorn
60
+
61
+ uvicorn.run(app, host="0.0.0.0", port=8000)
@@ -1,51 +1,51 @@
1
- import logging
2
- from fastapi import FastAPI
3
- from fastapi.middleware.cors import CORSMiddleware
4
- import uvicorn
5
- from uvicorn.config import LOGGING_CONFIG
6
-
7
- # Configure custom logging with timestamp
8
- LOGGING_CONFIG["formatters"]["default"][
9
- "fmt"
10
- ] = "%(asctime)s [%(name)s] %(levelprefix)s %(message)s"
11
- LOGGING_CONFIG["formatters"]["access"][
12
- "fmt"
13
- ] = '%(asctime)s [%(name)s] %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s'
14
-
15
-
16
- app = FastAPI(
17
- title="Botrun Flow Language Fast API",
18
- description="A lightweight FastAPI server",
19
- version="0.1.0",
20
- )
21
-
22
-
23
- # Add custom logging middleware
24
-
25
- # Add CORS middleware
26
- app.add_middleware(
27
- CORSMiddleware,
28
- allow_origins=["*"],
29
- allow_credentials=True,
30
- allow_methods=["*"],
31
- allow_headers=["*"],
32
- )
33
-
34
-
35
- @app.get("/fast_hello")
36
- async def fast_hello():
37
- return {
38
- "status": "success",
39
- "message": "Hello from fast API!",
40
- "service": "botrun-flow-lang-fastapi-fast",
41
- }
42
-
43
-
44
- @app.get("/health")
45
- async def health_check():
46
- return {"status": "healthy"}
47
-
48
-
49
- # Add this if you want to run the app directly with python
50
- if __name__ == "__main__":
51
- uvicorn.run("main_fast:app", host="0.0.0.0", port=8081, log_config=LOGGING_CONFIG)
1
+ import logging
2
+ from fastapi import FastAPI
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ import uvicorn
5
+ from uvicorn.config import LOGGING_CONFIG
6
+
7
+ # Configure custom logging with timestamp
8
+ LOGGING_CONFIG["formatters"]["default"][
9
+ "fmt"
10
+ ] = "%(asctime)s [%(name)s] %(levelprefix)s %(message)s"
11
+ LOGGING_CONFIG["formatters"]["access"][
12
+ "fmt"
13
+ ] = '%(asctime)s [%(name)s] %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s'
14
+
15
+
16
+ app = FastAPI(
17
+ title="Botrun Flow Language Fast API",
18
+ description="A lightweight FastAPI server",
19
+ version="0.1.0",
20
+ )
21
+
22
+
23
+ # Add custom logging middleware
24
+
25
+ # Add CORS middleware
26
+ app.add_middleware(
27
+ CORSMiddleware,
28
+ allow_origins=["*"],
29
+ allow_credentials=True,
30
+ allow_methods=["*"],
31
+ allow_headers=["*"],
32
+ )
33
+
34
+
35
+ @app.get("/fast_hello")
36
+ async def fast_hello():
37
+ return {
38
+ "status": "success",
39
+ "message": "Hello from fast API!",
40
+ "service": "botrun-flow-lang-fastapi-fast",
41
+ }
42
+
43
+
44
+ @app.get("/health")
45
+ async def health_check():
46
+ return {"status": "healthy"}
47
+
48
+
49
+ # Add this if you want to run the app directly with python
50
+ if __name__ == "__main__":
51
+ uvicorn.run("main_fast:app", host="0.0.0.0", port=8081, log_config=LOGGING_CONFIG)
@@ -1,10 +1,10 @@
1
- """
2
- MCP Server module for BotrunFlowLang
3
-
4
- This module provides MCP (Model Context Protocol) server implementation
5
- that exposes tools for LangGraph agents.
6
- """
7
-
8
- from .default_mcp import mcp
9
-
10
- __all__ = ["mcp"]
1
+ """
2
+ MCP Server module for BotrunFlowLang
3
+
4
+ This module provides MCP (Model Context Protocol) server implementation
5
+ that exposes tools for LangGraph agents.
6
+ """
7
+
8
+ from .default_mcp import mcp
9
+
10
+ __all__ = ["mcp"]