botrun-flow-lang 5.9.301__py3-none-any.whl → 5.10.82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. botrun_flow_lang/api/auth_api.py +39 -39
  2. botrun_flow_lang/api/auth_utils.py +183 -183
  3. botrun_flow_lang/api/botrun_back_api.py +65 -65
  4. botrun_flow_lang/api/flow_api.py +3 -3
  5. botrun_flow_lang/api/hatch_api.py +481 -481
  6. botrun_flow_lang/api/langgraph_api.py +796 -796
  7. botrun_flow_lang/api/line_bot_api.py +1357 -1357
  8. botrun_flow_lang/api/model_api.py +300 -300
  9. botrun_flow_lang/api/rate_limit_api.py +32 -32
  10. botrun_flow_lang/api/routes.py +79 -79
  11. botrun_flow_lang/api/search_api.py +53 -53
  12. botrun_flow_lang/api/storage_api.py +316 -316
  13. botrun_flow_lang/api/subsidy_api.py +290 -290
  14. botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
  15. botrun_flow_lang/api/user_setting_api.py +70 -70
  16. botrun_flow_lang/api/version_api.py +31 -31
  17. botrun_flow_lang/api/youtube_api.py +26 -26
  18. botrun_flow_lang/constants.py +13 -13
  19. botrun_flow_lang/langgraph_agents/agents/agent_runner.py +174 -174
  20. botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
  21. botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
  22. botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
  23. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
  24. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
  25. botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +548 -542
  26. botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
  27. botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
  28. botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
  29. botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
  30. botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
  31. botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
  32. botrun_flow_lang/langgraph_agents/agents/util/local_files.py +345 -345
  33. botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
  34. botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
  35. botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +160 -160
  36. botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
  37. botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
  38. botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
  39. botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
  40. botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
  41. botrun_flow_lang/llm_agent/llm_agent.py +19 -19
  42. botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
  43. botrun_flow_lang/log/.gitignore +2 -2
  44. botrun_flow_lang/main.py +61 -61
  45. botrun_flow_lang/main_fast.py +51 -51
  46. botrun_flow_lang/mcp_server/__init__.py +10 -10
  47. botrun_flow_lang/mcp_server/default_mcp.py +711 -711
  48. botrun_flow_lang/models/nodes/utils.py +205 -205
  49. botrun_flow_lang/models/token_usage.py +34 -34
  50. botrun_flow_lang/requirements.txt +21 -21
  51. botrun_flow_lang/services/base/firestore_base.py +30 -30
  52. botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
  53. botrun_flow_lang/services/hatch/hatch_fs_store.py +372 -372
  54. botrun_flow_lang/services/storage/storage_cs_store.py +202 -202
  55. botrun_flow_lang/services/storage/storage_factory.py +12 -12
  56. botrun_flow_lang/services/storage/storage_store.py +65 -65
  57. botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
  58. botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
  59. botrun_flow_lang/static/docs/tools/index.html +926 -926
  60. botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
  61. botrun_flow_lang/tests/api_stress_test.py +357 -357
  62. botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
  63. botrun_flow_lang/tests/test_botrun_app.py +46 -46
  64. botrun_flow_lang/tests/test_html_util.py +31 -31
  65. botrun_flow_lang/tests/test_img_analyzer.py +190 -190
  66. botrun_flow_lang/tests/test_img_util.py +39 -39
  67. botrun_flow_lang/tests/test_local_files.py +114 -114
  68. botrun_flow_lang/tests/test_mermaid_util.py +103 -103
  69. botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
  70. botrun_flow_lang/tests/test_plotly_util.py +151 -151
  71. botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
  72. botrun_flow_lang/tools/generate_docs.py +133 -133
  73. botrun_flow_lang/tools/templates/tools.html +153 -153
  74. botrun_flow_lang/utils/__init__.py +7 -7
  75. botrun_flow_lang/utils/botrun_logger.py +344 -344
  76. botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
  77. botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
  78. botrun_flow_lang/utils/google_drive_utils.py +654 -654
  79. botrun_flow_lang/utils/langchain_utils.py +324 -324
  80. botrun_flow_lang/utils/yaml_utils.py +9 -9
  81. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/METADATA +2 -2
  82. botrun_flow_lang-5.10.82.dist-info/RECORD +99 -0
  83. botrun_flow_lang-5.9.301.dist-info/RECORD +0 -99
  84. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/WHEEL +0 -0
@@ -1,86 +1,86 @@
1
- import os
2
- from tempfile import NamedTemporaryFile
3
- from typing import Dict, Any, Optional
4
- from .local_files import upload_and_get_tmp_public_url, upload_html_and_get_public_url
5
-
6
-
7
- async def generate_mermaid_files(
8
- mermaid_data: str,
9
- botrun_flow_lang_url: str,
10
- user_id: str,
11
- title: Optional[str] = None,
12
- ) -> str:
13
- """
14
- Generate mermaid HTML file from mermaid definition and upload it to GCS.
15
-
16
- Args:
17
- mermaid_data: Mermaid diagram definition string
18
- botrun_flow_lang_url: URL for the botrun flow lang API
19
- user_id: User ID for file upload
20
- title: Optional title for the diagram
21
-
22
- Returns:
23
- str: URL for the HTML file or error message starting with "Error: "
24
- """
25
- try:
26
- # Create HTML content with mermaid
27
- html_content = f"""
28
- <!DOCTYPE html>
29
- <html>
30
- <head>
31
- <meta charset="utf-8">
32
- <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
33
- <title>{title if title else 'Mermaid Diagram'}</title>
34
- <style>
35
- body {{
36
- font-family: "Microsoft JhengHei", "微軟正黑體", "Heiti TC", "黑體-繁", sans-serif;
37
- }}
38
- .mermaid {{
39
- margin: 20px;
40
- }}
41
- </style>
42
- <script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
43
- <script>
44
- mermaid.initialize({{
45
- startOnLoad: true,
46
- theme: 'default',
47
- themeVariables: {{
48
- fontFamily: '"Microsoft JhengHei", "微軟正黑體", "Heiti TC", "黑體-繁", sans-serif'
49
- }}
50
- }});
51
- </script>
52
- </head>
53
- <body>
54
- <h1>{title if title else 'Mermaid Diagram'}</h1>
55
- <div class="mermaid">
56
- {mermaid_data}
57
- </div>
58
- </body>
59
- </html>
60
- """
61
-
62
- # Create temporary file
63
- with NamedTemporaryFile(
64
- suffix=".html", mode="w", encoding="utf-8", delete=False
65
- ) as html_temp:
66
- try:
67
- # Save HTML content
68
- html_temp.write(html_content)
69
- html_temp.flush()
70
-
71
- # Upload file to GCS
72
- html_url = await upload_html_and_get_public_url(
73
- html_temp.name, botrun_flow_lang_url, user_id
74
- )
75
-
76
- # Clean up temporary file
77
- os.unlink(html_temp.name)
78
-
79
- return html_url
80
- except Exception as e:
81
- # Clean up temporary file in case of error
82
- os.unlink(html_temp.name)
83
- return f"Error: {str(e)}"
84
-
85
- except Exception as e:
86
- return f"Error: {str(e)}"
1
+ import os
2
+ from tempfile import NamedTemporaryFile
3
+ from typing import Dict, Any, Optional
4
+ from .local_files import upload_and_get_tmp_public_url, upload_html_and_get_public_url
5
+
6
+
7
+ async def generate_mermaid_files(
8
+ mermaid_data: str,
9
+ botrun_flow_lang_url: str,
10
+ user_id: str,
11
+ title: Optional[str] = None,
12
+ ) -> str:
13
+ """
14
+ Generate mermaid HTML file from mermaid definition and upload it to GCS.
15
+
16
+ Args:
17
+ mermaid_data: Mermaid diagram definition string
18
+ botrun_flow_lang_url: URL for the botrun flow lang API
19
+ user_id: User ID for file upload
20
+ title: Optional title for the diagram
21
+
22
+ Returns:
23
+ str: URL for the HTML file or error message starting with "Error: "
24
+ """
25
+ try:
26
+ # Create HTML content with mermaid
27
+ html_content = f"""
28
+ <!DOCTYPE html>
29
+ <html>
30
+ <head>
31
+ <meta charset="utf-8">
32
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
33
+ <title>{title if title else 'Mermaid Diagram'}</title>
34
+ <style>
35
+ body {{
36
+ font-family: "Microsoft JhengHei", "微軟正黑體", "Heiti TC", "黑體-繁", sans-serif;
37
+ }}
38
+ .mermaid {{
39
+ margin: 20px;
40
+ }}
41
+ </style>
42
+ <script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
43
+ <script>
44
+ mermaid.initialize({{
45
+ startOnLoad: true,
46
+ theme: 'default',
47
+ themeVariables: {{
48
+ fontFamily: '"Microsoft JhengHei", "微軟正黑體", "Heiti TC", "黑體-繁", sans-serif'
49
+ }}
50
+ }});
51
+ </script>
52
+ </head>
53
+ <body>
54
+ <h1>{title if title else 'Mermaid Diagram'}</h1>
55
+ <div class="mermaid">
56
+ {mermaid_data}
57
+ </div>
58
+ </body>
59
+ </html>
60
+ """
61
+
62
+ # Create temporary file
63
+ with NamedTemporaryFile(
64
+ suffix=".html", mode="w", encoding="utf-8", delete=False
65
+ ) as html_temp:
66
+ try:
67
+ # Save HTML content
68
+ html_temp.write(html_content)
69
+ html_temp.flush()
70
+
71
+ # Upload file to GCS
72
+ html_url = await upload_html_and_get_public_url(
73
+ html_temp.name, botrun_flow_lang_url, user_id
74
+ )
75
+
76
+ # Clean up temporary file
77
+ os.unlink(html_temp.name)
78
+
79
+ return html_url
80
+ except Exception as e:
81
+ # Clean up temporary file in case of error
82
+ os.unlink(html_temp.name)
83
+ return f"Error: {str(e)}"
84
+
85
+ except Exception as e:
86
+ return f"Error: {str(e)}"
@@ -1,143 +1,143 @@
1
- """
2
- Utility module for LLM model-related functionality, including API key rotation.
3
- """
4
-
5
- import os
6
- import random
7
- from langchain_anthropic import ChatAnthropic
8
- from langchain_openai import ChatOpenAI
9
- from langchain_google_genai import ChatGoogleGenerativeAI
10
-
11
-
12
- def get_model_instance(model_name: str, temperature: float = 0, enable_code_execution: bool = False):
13
- """
14
- 統一的模型實例獲取函數
15
-
16
- Args:
17
- model_name: 模型名稱
18
- temperature: 溫度參數
19
- enable_code_execution: 是否啟用代碼執行(僅 Gemini 2.5 支援)
20
-
21
- Returns:
22
- 對應的模型實例
23
- """
24
- if model_name.startswith("gemini-"):
25
- model_kwargs = {}
26
- if enable_code_execution and "2.5" in model_name:
27
- model_kwargs["enable_code_execution"] = True
28
-
29
- return ChatGoogleGenerativeAI(
30
- model=model_name,
31
- temperature=temperature,
32
- **model_kwargs
33
- )
34
-
35
- elif model_name.startswith("claude-"):
36
- # 檢查是否有多個 Anthropic API keys
37
- anthropic_api_keys_str = os.getenv("ANTHROPIC_API_KEYS", "")
38
- anthropic_api_keys = [
39
- key.strip() for key in anthropic_api_keys_str.split(",") if key.strip()
40
- ]
41
-
42
- if anthropic_api_keys:
43
- return RotatingChatAnthropic(
44
- model_name=model_name,
45
- keys=anthropic_api_keys,
46
- temperature=temperature,
47
- max_tokens=64000,
48
- )
49
- elif os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
50
- openrouter_model_name = "anthropic/claude-sonnet-4"
51
- return ChatOpenAI(
52
- openai_api_key=os.getenv("OPENROUTER_API_KEY"),
53
- openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
54
- model_name=openrouter_model_name,
55
- temperature=temperature,
56
- max_tokens=64000,
57
- )
58
- else:
59
- return ChatAnthropic(
60
- model=model_name,
61
- temperature=temperature,
62
- max_tokens=64000,
63
- )
64
-
65
- elif model_name.startswith("gpt-"):
66
- if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
67
- return ChatOpenAI(
68
- openai_api_key=os.getenv("OPENROUTER_API_KEY"),
69
- openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
70
- model_name=f"openai/{model_name}",
71
- temperature=temperature,
72
- max_tokens=8192,
73
- )
74
- else:
75
- return ChatOpenAI(
76
- model=model_name,
77
- temperature=temperature,
78
- max_tokens=8192,
79
- )
80
-
81
- else:
82
- # 預設使用 Gemini
83
- return ChatGoogleGenerativeAI(
84
- model="gemini-2.5-flash",
85
- temperature=temperature,
86
- )
87
-
88
-
89
- class RotatingChatAnthropic:
90
- """A wrapper class for ChatAnthropic that rotates through multiple API keys."""
91
-
92
- def __init__(self, model_name, keys, temperature=0, max_tokens=8192):
93
- """
94
- Initialize the rotating key model.
95
-
96
- Args:
97
- model_name: The name of the Anthropic model to use
98
- keys: List of API keys to rotate through
99
- temperature: The temperature for model generation
100
- max_tokens: The maximum number of tokens to generate
101
- """
102
- self.keys = keys
103
- self.model_name = model_name
104
- self.temperature = temperature
105
- self.max_tokens = max_tokens
106
-
107
- # Initialize with the first key
108
- self.base_model = ChatAnthropic(
109
- model=self.model_name,
110
- temperature=self.temperature,
111
- max_tokens=self.max_tokens,
112
- api_key=random.choice(self.keys) if self.keys else None,
113
- )
114
-
115
- def invoke(self, *args, **kwargs):
116
- """
117
- Invoke the model with a randomly selected API key.
118
-
119
- This method is called when the model is invoked through LangChain.
120
- """
121
- if self.keys:
122
- # Select a random key for this invocation
123
- self.base_model.client.api_key = random.choice(self.keys)
124
- return self.base_model.invoke(*args, **kwargs)
125
-
126
- def stream(self, *args, **kwargs):
127
- """
128
- Stream the model response with a randomly selected API key.
129
-
130
- This method handles streaming output from the model.
131
- """
132
- if self.keys:
133
- # Select a random key for this streaming invocation
134
- self.base_model.client.api_key = random.choice(self.keys)
135
- return self.base_model.stream(*args, **kwargs)
136
-
137
- def __getattr__(self, name):
138
- """
139
- Forward any other attribute access to the base model.
140
-
141
- This ensures compatibility with the original ChatAnthropic class.
142
- """
143
- return getattr(self.base_model, name)
1
+ """
2
+ Utility module for LLM model-related functionality, including API key rotation.
3
+ """
4
+
5
+ import os
6
+ import random
7
+ from langchain_anthropic import ChatAnthropic
8
+ from langchain_openai import ChatOpenAI
9
+ from langchain_google_genai import ChatGoogleGenerativeAI
10
+
11
+
12
+ def get_model_instance(
13
+ model_name: str, temperature: float = 0, enable_code_execution: bool = False
14
+ ):
15
+ """
16
+ 統一的模型實例獲取函數
17
+
18
+ Args:
19
+ model_name: 模型名稱
20
+ temperature: 溫度參數
21
+ enable_code_execution: 是否啟用代碼執行(僅 Gemini 2.5 支援)
22
+
23
+ Returns:
24
+ 對應的模型實例
25
+ """
26
+ if model_name.startswith("gemini-"):
27
+ model_kwargs = {}
28
+ if enable_code_execution and "2.5" in model_name:
29
+ model_kwargs["enable_code_execution"] = True
30
+
31
+ return ChatGoogleGenerativeAI(
32
+ model=model_name, temperature=temperature, **model_kwargs
33
+ )
34
+
35
+ elif model_name.startswith("claude-"):
36
+ # 檢查是否有多個 Anthropic API keys
37
+ anthropic_api_keys_str = os.getenv("ANTHROPIC_API_KEYS", "")
38
+ anthropic_api_keys = [
39
+ key.strip() for key in anthropic_api_keys_str.split(",") if key.strip()
40
+ ]
41
+
42
+ if anthropic_api_keys:
43
+ return RotatingChatAnthropic(
44
+ model_name=model_name,
45
+ keys=anthropic_api_keys,
46
+ temperature=temperature,
47
+ max_tokens=64000,
48
+ )
49
+ elif os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
50
+ openrouter_model_name = "anthropic/claude-sonnet-4.5"
51
+ return ChatOpenAI(
52
+ openai_api_key=os.getenv("OPENROUTER_API_KEY"),
53
+ openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
54
+ model_name=openrouter_model_name,
55
+ temperature=temperature,
56
+ max_tokens=64000,
57
+ )
58
+ else:
59
+ return ChatAnthropic(
60
+ model=model_name,
61
+ temperature=temperature,
62
+ max_tokens=64000,
63
+ )
64
+
65
+ elif model_name.startswith("gpt-"):
66
+ if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
67
+ return ChatOpenAI(
68
+ openai_api_key=os.getenv("OPENROUTER_API_KEY"),
69
+ openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
70
+ model_name=f"openai/{model_name}",
71
+ temperature=temperature,
72
+ max_tokens=8192,
73
+ )
74
+ else:
75
+ return ChatOpenAI(
76
+ model=model_name,
77
+ temperature=temperature,
78
+ max_tokens=8192,
79
+ )
80
+
81
+ else:
82
+ # 預設使用 Gemini
83
+ return ChatGoogleGenerativeAI(
84
+ model="gemini-2.5-flash",
85
+ temperature=temperature,
86
+ )
87
+
88
+
89
+ class RotatingChatAnthropic:
90
+ """A wrapper class for ChatAnthropic that rotates through multiple API keys."""
91
+
92
+ def __init__(self, model_name, keys, temperature=0, max_tokens=8192):
93
+ """
94
+ Initialize the rotating key model.
95
+
96
+ Args:
97
+ model_name: The name of the Anthropic model to use
98
+ keys: List of API keys to rotate through
99
+ temperature: The temperature for model generation
100
+ max_tokens: The maximum number of tokens to generate
101
+ """
102
+ self.keys = keys
103
+ self.model_name = model_name
104
+ self.temperature = temperature
105
+ self.max_tokens = max_tokens
106
+
107
+ # Initialize with the first key
108
+ self.base_model = ChatAnthropic(
109
+ model=self.model_name,
110
+ temperature=self.temperature,
111
+ max_tokens=self.max_tokens,
112
+ api_key=random.choice(self.keys) if self.keys else None,
113
+ )
114
+
115
+ def invoke(self, *args, **kwargs):
116
+ """
117
+ Invoke the model with a randomly selected API key.
118
+
119
+ This method is called when the model is invoked through LangChain.
120
+ """
121
+ if self.keys:
122
+ # Select a random key for this invocation
123
+ self.base_model.client.api_key = random.choice(self.keys)
124
+ return self.base_model.invoke(*args, **kwargs)
125
+
126
+ def stream(self, *args, **kwargs):
127
+ """
128
+ Stream the model response with a randomly selected API key.
129
+
130
+ This method handles streaming output from the model.
131
+ """
132
+ if self.keys:
133
+ # Select a random key for this streaming invocation
134
+ self.base_model.client.api_key = random.choice(self.keys)
135
+ return self.base_model.stream(*args, **kwargs)
136
+
137
+ def __getattr__(self, name):
138
+ """
139
+ Forward any other attribute access to the base model.
140
+
141
+ This ensures compatibility with the original ChatAnthropic class.
142
+ """
143
+ return getattr(self.base_model, name)