botrun-flow-lang 5.9.301__py3-none-any.whl → 5.10.82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. botrun_flow_lang/api/auth_api.py +39 -39
  2. botrun_flow_lang/api/auth_utils.py +183 -183
  3. botrun_flow_lang/api/botrun_back_api.py +65 -65
  4. botrun_flow_lang/api/flow_api.py +3 -3
  5. botrun_flow_lang/api/hatch_api.py +481 -481
  6. botrun_flow_lang/api/langgraph_api.py +796 -796
  7. botrun_flow_lang/api/line_bot_api.py +1357 -1357
  8. botrun_flow_lang/api/model_api.py +300 -300
  9. botrun_flow_lang/api/rate_limit_api.py +32 -32
  10. botrun_flow_lang/api/routes.py +79 -79
  11. botrun_flow_lang/api/search_api.py +53 -53
  12. botrun_flow_lang/api/storage_api.py +316 -316
  13. botrun_flow_lang/api/subsidy_api.py +290 -290
  14. botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
  15. botrun_flow_lang/api/user_setting_api.py +70 -70
  16. botrun_flow_lang/api/version_api.py +31 -31
  17. botrun_flow_lang/api/youtube_api.py +26 -26
  18. botrun_flow_lang/constants.py +13 -13
  19. botrun_flow_lang/langgraph_agents/agents/agent_runner.py +174 -174
  20. botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
  21. botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
  22. botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
  23. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
  24. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
  25. botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +548 -542
  26. botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
  27. botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
  28. botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
  29. botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
  30. botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
  31. botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
  32. botrun_flow_lang/langgraph_agents/agents/util/local_files.py +345 -345
  33. botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
  34. botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
  35. botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +160 -160
  36. botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
  37. botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
  38. botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
  39. botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
  40. botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
  41. botrun_flow_lang/llm_agent/llm_agent.py +19 -19
  42. botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
  43. botrun_flow_lang/log/.gitignore +2 -2
  44. botrun_flow_lang/main.py +61 -61
  45. botrun_flow_lang/main_fast.py +51 -51
  46. botrun_flow_lang/mcp_server/__init__.py +10 -10
  47. botrun_flow_lang/mcp_server/default_mcp.py +711 -711
  48. botrun_flow_lang/models/nodes/utils.py +205 -205
  49. botrun_flow_lang/models/token_usage.py +34 -34
  50. botrun_flow_lang/requirements.txt +21 -21
  51. botrun_flow_lang/services/base/firestore_base.py +30 -30
  52. botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
  53. botrun_flow_lang/services/hatch/hatch_fs_store.py +372 -372
  54. botrun_flow_lang/services/storage/storage_cs_store.py +202 -202
  55. botrun_flow_lang/services/storage/storage_factory.py +12 -12
  56. botrun_flow_lang/services/storage/storage_store.py +65 -65
  57. botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
  58. botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
  59. botrun_flow_lang/static/docs/tools/index.html +926 -926
  60. botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
  61. botrun_flow_lang/tests/api_stress_test.py +357 -357
  62. botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
  63. botrun_flow_lang/tests/test_botrun_app.py +46 -46
  64. botrun_flow_lang/tests/test_html_util.py +31 -31
  65. botrun_flow_lang/tests/test_img_analyzer.py +190 -190
  66. botrun_flow_lang/tests/test_img_util.py +39 -39
  67. botrun_flow_lang/tests/test_local_files.py +114 -114
  68. botrun_flow_lang/tests/test_mermaid_util.py +103 -103
  69. botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
  70. botrun_flow_lang/tests/test_plotly_util.py +151 -151
  71. botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
  72. botrun_flow_lang/tools/generate_docs.py +133 -133
  73. botrun_flow_lang/tools/templates/tools.html +153 -153
  74. botrun_flow_lang/utils/__init__.py +7 -7
  75. botrun_flow_lang/utils/botrun_logger.py +344 -344
  76. botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
  77. botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
  78. botrun_flow_lang/utils/google_drive_utils.py +654 -654
  79. botrun_flow_lang/utils/langchain_utils.py +324 -324
  80. botrun_flow_lang/utils/yaml_utils.py +9 -9
  81. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/METADATA +2 -2
  82. botrun_flow_lang-5.10.82.dist-info/RECORD +99 -0
  83. botrun_flow_lang-5.9.301.dist-info/RECORD +0 -99
  84. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/WHEEL +0 -0
@@ -1,83 +1,83 @@
1
- import os
2
- import re
3
- from typing import List
4
- from dotenv import load_dotenv
5
-
6
- from botrun_flow_lang.llm_agent.llm_agent import LlmAgent
7
-
8
- load_dotenv()
9
-
10
- DEFAULT_APPEND_SYSTEM_PROMPT = os.getenv("DEFAULT_APPEND_SYSTEM_PROMPT", "")
11
-
12
- AGENT_TEMPLATE = """
13
- 你會遵守以下 tag <你需要遵守的原則> 內的原則,來回應使用者的輸入,使用者的輸入使用 tag <使用者的輸入> 標籤表示。
14
- <你需要遵守的原則>
15
- {rules}
16
- </你需要遵守的原則>
17
-
18
- <使用者的輸入>
19
- {context}
20
- </使用者的輸入>
21
- """
22
-
23
-
24
- def get_agents(xml_system_prompt: str) -> List[LlmAgent]:
25
- # 如果不是 XML 格式,返回空列表
26
- # if not xml_system_prompt.strip().startswith("<agents>"):
27
- # return []
28
-
29
- agent_prompts = []
30
- # 使用正則表達式找出所有 <agent> 標籤及其內容
31
- agent_patterns = re.findall(r"<agent>(.*?)</agent>", xml_system_prompt, re.DOTALL)
32
-
33
- for agent_content in agent_patterns:
34
- # 提取 name, model, print-output(如果存在)
35
- name = re.search(r"<name>(.*?)</name>", agent_content)
36
- name = name.group(1) if name else ""
37
-
38
- model = re.search(r"<model>(.*?)</model>", agent_content)
39
- model = model.group(1) if model else os.getenv("MULTI_AGENT_DEFAULT_MODEL", "")
40
-
41
- print_output = re.search(r"<print-output>(.*?)</print-output>", agent_content)
42
- print_output = print_output.group(1).lower() == "true" if print_output else True
43
-
44
- print_plotly = re.search(r"<print-plotly>(.*?)</print-plotly>", agent_content)
45
- print_plotly = print_plotly.group(1).lower() == "true" if print_plotly else True
46
-
47
- gen_image = re.search(r"<gen-image>(.*?)</gen-image>", agent_content)
48
- gen_image = gen_image.group(1).lower() == "true" if gen_image else False
49
-
50
- include_in_history = re.search(
51
- r"<include-in-history>(.*?)</include-in-history>", agent_content
52
- )
53
- include_in_history = (
54
- include_in_history.group(1).lower() == "true"
55
- if include_in_history
56
- else True
57
- )
58
-
59
- max_system_prompt_length = re.search(
60
- r"<max-system-prompt-length>(.*?)</max-system-prompt-length>", agent_content
61
- )
62
- max_system_prompt_length = (
63
- int(max_system_prompt_length.group(1)) if max_system_prompt_length else None
64
- )
65
-
66
- # 整個 <agent> 標籤的內容(包括 <agent> 標籤本身)作為 system_prompt
67
- system_prompt = f"<agent>{agent_content}</agent>"
68
- if DEFAULT_APPEND_SYSTEM_PROMPT:
69
- system_prompt = f"{system_prompt} \n\n{DEFAULT_APPEND_SYSTEM_PROMPT}"
70
-
71
- agent_prompt = LlmAgent(
72
- name=name,
73
- model=model,
74
- system_prompt=system_prompt,
75
- print_output=print_output,
76
- print_plotly=print_plotly,
77
- gen_image=gen_image,
78
- include_in_history=include_in_history,
79
- max_system_prompt_length=max_system_prompt_length,
80
- )
81
- agent_prompts.append(agent_prompt)
82
-
83
- return agent_prompts
1
+ import os
2
+ import re
3
+ from typing import List
4
+ from dotenv import load_dotenv
5
+
6
+ from botrun_flow_lang.llm_agent.llm_agent import LlmAgent
7
+
8
+ load_dotenv()
9
+
10
+ DEFAULT_APPEND_SYSTEM_PROMPT = os.getenv("DEFAULT_APPEND_SYSTEM_PROMPT", "")
11
+
12
+ AGENT_TEMPLATE = """
13
+ 你會遵守以下 tag <你需要遵守的原則> 內的原則,來回應使用者的輸入,使用者的輸入使用 tag <使用者的輸入> 標籤表示。
14
+ <你需要遵守的原則>
15
+ {rules}
16
+ </你需要遵守的原則>
17
+
18
+ <使用者的輸入>
19
+ {context}
20
+ </使用者的輸入>
21
+ """
22
+
23
+
24
+ def get_agents(xml_system_prompt: str) -> List[LlmAgent]:
25
+ # 如果不是 XML 格式,返回空列表
26
+ # if not xml_system_prompt.strip().startswith("<agents>"):
27
+ # return []
28
+
29
+ agent_prompts = []
30
+ # 使用正則表達式找出所有 <agent> 標籤及其內容
31
+ agent_patterns = re.findall(r"<agent>(.*?)</agent>", xml_system_prompt, re.DOTALL)
32
+
33
+ for agent_content in agent_patterns:
34
+ # 提取 name, model, print-output(如果存在)
35
+ name = re.search(r"<name>(.*?)</name>", agent_content)
36
+ name = name.group(1) if name else ""
37
+
38
+ model = re.search(r"<model>(.*?)</model>", agent_content)
39
+ model = model.group(1) if model else os.getenv("MULTI_AGENT_DEFAULT_MODEL", "")
40
+
41
+ print_output = re.search(r"<print-output>(.*?)</print-output>", agent_content)
42
+ print_output = print_output.group(1).lower() == "true" if print_output else True
43
+
44
+ print_plotly = re.search(r"<print-plotly>(.*?)</print-plotly>", agent_content)
45
+ print_plotly = print_plotly.group(1).lower() == "true" if print_plotly else True
46
+
47
+ gen_image = re.search(r"<gen-image>(.*?)</gen-image>", agent_content)
48
+ gen_image = gen_image.group(1).lower() == "true" if gen_image else False
49
+
50
+ include_in_history = re.search(
51
+ r"<include-in-history>(.*?)</include-in-history>", agent_content
52
+ )
53
+ include_in_history = (
54
+ include_in_history.group(1).lower() == "true"
55
+ if include_in_history
56
+ else True
57
+ )
58
+
59
+ max_system_prompt_length = re.search(
60
+ r"<max-system-prompt-length>(.*?)</max-system-prompt-length>", agent_content
61
+ )
62
+ max_system_prompt_length = (
63
+ int(max_system_prompt_length.group(1)) if max_system_prompt_length else None
64
+ )
65
+
66
+ # 整個 <agent> 標籤的內容(包括 <agent> 標籤本身)作為 system_prompt
67
+ system_prompt = f"<agent>{agent_content}</agent>"
68
+ if DEFAULT_APPEND_SYSTEM_PROMPT:
69
+ system_prompt = f"{system_prompt} \n\n{DEFAULT_APPEND_SYSTEM_PROMPT}"
70
+
71
+ agent_prompt = LlmAgent(
72
+ name=name,
73
+ model=model,
74
+ system_prompt=system_prompt,
75
+ print_output=print_output,
76
+ print_plotly=print_plotly,
77
+ gen_image=gen_image,
78
+ include_in_history=include_in_history,
79
+ max_system_prompt_length=max_system_prompt_length,
80
+ )
81
+ agent_prompts.append(agent_prompt)
82
+
83
+ return agent_prompts
@@ -1,2 +1,2 @@
1
- *.log
2
- temp.json
1
+ *.log
2
+ temp.json
botrun_flow_lang/main.py CHANGED
@@ -1,61 +1,61 @@
1
- import contextlib
2
- from fastapi import FastAPI
3
- from fastapi.staticfiles import StaticFiles
4
-
5
- from botrun_flow_lang.api.routes import router
6
- from botrun_flow_lang.mcp_server import mcp
7
-
8
- from fastapi.middleware.cors import CORSMiddleware
9
- from fastapi.responses import FileResponse
10
- from pathlib import Path
11
-
12
-
13
- # Create a lifespan manager for MCP server
14
- @contextlib.asynccontextmanager
15
- async def lifespan(app: FastAPI):
16
- async with contextlib.AsyncExitStack() as stack:
17
- await stack.enter_async_context(mcp.session_manager.run())
18
- yield
19
-
20
-
21
- app = FastAPI(lifespan=lifespan)
22
-
23
- app.include_router(router)
24
- app.add_middleware(
25
- CORSMiddleware,
26
- allow_origins=["*"], # 允許所有來源
27
- allow_credentials=True,
28
- allow_methods=["*"], # 允許所有方法
29
- allow_headers=["*"], # 允許所有頭
30
- )
31
-
32
- # 獲取專案根目錄的絕對路徑
33
- project_root = Path(__file__).parent
34
-
35
-
36
- @app.get("/docs/tools") # 注意:移除了尾部的斜線
37
- @app.get("/docs/tools/")
38
- async def get_docs():
39
- return FileResponse(project_root / "static/docs/tools/index.html")
40
-
41
-
42
- # 掛載靜態文件目錄
43
- app.mount(
44
- "/docs/tools",
45
- StaticFiles(directory=str(project_root / "static/docs/tools")),
46
- name="tool_docs",
47
- )
48
-
49
- # Mount MCP server
50
- app.mount("/mcp/default", mcp.streamable_http_app())
51
-
52
-
53
- @app.get("/heartbeat")
54
- async def heartbeat():
55
- return {"status": "ok"}
56
-
57
-
58
- if __name__ == "__main__":
59
- import uvicorn
60
-
61
- uvicorn.run(app, host="0.0.0.0", port=8000)
1
+ import contextlib
2
+ from fastapi import FastAPI
3
+ from fastapi.staticfiles import StaticFiles
4
+
5
+ from botrun_flow_lang.api.routes import router
6
+ from botrun_flow_lang.mcp_server import mcp
7
+
8
+ from fastapi.middleware.cors import CORSMiddleware
9
+ from fastapi.responses import FileResponse
10
+ from pathlib import Path
11
+
12
+
13
+ # Create a lifespan manager for MCP server
14
+ @contextlib.asynccontextmanager
15
+ async def lifespan(app: FastAPI):
16
+ async with contextlib.AsyncExitStack() as stack:
17
+ await stack.enter_async_context(mcp.session_manager.run())
18
+ yield
19
+
20
+
21
+ app = FastAPI(lifespan=lifespan)
22
+
23
+ app.include_router(router)
24
+ app.add_middleware(
25
+ CORSMiddleware,
26
+ allow_origins=["*"], # 允許所有來源
27
+ allow_credentials=True,
28
+ allow_methods=["*"], # 允許所有方法
29
+ allow_headers=["*"], # 允許所有頭
30
+ )
31
+
32
+ # 獲取專案根目錄的絕對路徑
33
+ project_root = Path(__file__).parent
34
+
35
+
36
+ @app.get("/docs/tools") # 注意:移除了尾部的斜線
37
+ @app.get("/docs/tools/")
38
+ async def get_docs():
39
+ return FileResponse(project_root / "static/docs/tools/index.html")
40
+
41
+
42
+ # 掛載靜態文件目錄
43
+ app.mount(
44
+ "/docs/tools",
45
+ StaticFiles(directory=str(project_root / "static/docs/tools")),
46
+ name="tool_docs",
47
+ )
48
+
49
+ # Mount MCP server
50
+ app.mount("/mcp/default", mcp.streamable_http_app())
51
+
52
+
53
+ @app.get("/heartbeat")
54
+ async def heartbeat():
55
+ return {"status": "ok"}
56
+
57
+
58
+ if __name__ == "__main__":
59
+ import uvicorn
60
+
61
+ uvicorn.run(app, host="0.0.0.0", port=8000)
@@ -1,51 +1,51 @@
1
- import logging
2
- from fastapi import FastAPI
3
- from fastapi.middleware.cors import CORSMiddleware
4
- import uvicorn
5
- from uvicorn.config import LOGGING_CONFIG
6
-
7
- # Configure custom logging with timestamp
8
- LOGGING_CONFIG["formatters"]["default"][
9
- "fmt"
10
- ] = "%(asctime)s [%(name)s] %(levelprefix)s %(message)s"
11
- LOGGING_CONFIG["formatters"]["access"][
12
- "fmt"
13
- ] = '%(asctime)s [%(name)s] %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s'
14
-
15
-
16
- app = FastAPI(
17
- title="Botrun Flow Language Fast API",
18
- description="A lightweight FastAPI server",
19
- version="0.1.0",
20
- )
21
-
22
-
23
- # Add custom logging middleware
24
-
25
- # Add CORS middleware
26
- app.add_middleware(
27
- CORSMiddleware,
28
- allow_origins=["*"],
29
- allow_credentials=True,
30
- allow_methods=["*"],
31
- allow_headers=["*"],
32
- )
33
-
34
-
35
- @app.get("/fast_hello")
36
- async def fast_hello():
37
- return {
38
- "status": "success",
39
- "message": "Hello from fast API!",
40
- "service": "botrun-flow-lang-fastapi-fast",
41
- }
42
-
43
-
44
- @app.get("/health")
45
- async def health_check():
46
- return {"status": "healthy"}
47
-
48
-
49
- # Add this if you want to run the app directly with python
50
- if __name__ == "__main__":
51
- uvicorn.run("main_fast:app", host="0.0.0.0", port=8081, log_config=LOGGING_CONFIG)
1
+ import logging
2
+ from fastapi import FastAPI
3
+ from fastapi.middleware.cors import CORSMiddleware
4
+ import uvicorn
5
+ from uvicorn.config import LOGGING_CONFIG
6
+
7
+ # Configure custom logging with timestamp
8
+ LOGGING_CONFIG["formatters"]["default"][
9
+ "fmt"
10
+ ] = "%(asctime)s [%(name)s] %(levelprefix)s %(message)s"
11
+ LOGGING_CONFIG["formatters"]["access"][
12
+ "fmt"
13
+ ] = '%(asctime)s [%(name)s] %(levelprefix)s %(client_addr)s - "%(request_line)s" %(status_code)s'
14
+
15
+
16
+ app = FastAPI(
17
+ title="Botrun Flow Language Fast API",
18
+ description="A lightweight FastAPI server",
19
+ version="0.1.0",
20
+ )
21
+
22
+
23
+ # Add custom logging middleware
24
+
25
+ # Add CORS middleware
26
+ app.add_middleware(
27
+ CORSMiddleware,
28
+ allow_origins=["*"],
29
+ allow_credentials=True,
30
+ allow_methods=["*"],
31
+ allow_headers=["*"],
32
+ )
33
+
34
+
35
+ @app.get("/fast_hello")
36
+ async def fast_hello():
37
+ return {
38
+ "status": "success",
39
+ "message": "Hello from fast API!",
40
+ "service": "botrun-flow-lang-fastapi-fast",
41
+ }
42
+
43
+
44
+ @app.get("/health")
45
+ async def health_check():
46
+ return {"status": "healthy"}
47
+
48
+
49
+ # Add this if you want to run the app directly with python
50
+ if __name__ == "__main__":
51
+ uvicorn.run("main_fast:app", host="0.0.0.0", port=8081, log_config=LOGGING_CONFIG)
@@ -1,10 +1,10 @@
1
- """
2
- MCP Server module for BotrunFlowLang
3
-
4
- This module provides MCP (Model Context Protocol) server implementation
5
- that exposes tools for LangGraph agents.
6
- """
7
-
8
- from .default_mcp import mcp
9
-
10
- __all__ = ["mcp"]
1
+ """
2
+ MCP Server module for BotrunFlowLang
3
+
4
+ This module provides MCP (Model Context Protocol) server implementation
5
+ that exposes tools for LangGraph agents.
6
+ """
7
+
8
+ from .default_mcp import mcp
9
+
10
+ __all__ = ["mcp"]