botrun-flow-lang 5.9.301__py3-none-any.whl → 5.10.82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. botrun_flow_lang/api/auth_api.py +39 -39
  2. botrun_flow_lang/api/auth_utils.py +183 -183
  3. botrun_flow_lang/api/botrun_back_api.py +65 -65
  4. botrun_flow_lang/api/flow_api.py +3 -3
  5. botrun_flow_lang/api/hatch_api.py +481 -481
  6. botrun_flow_lang/api/langgraph_api.py +796 -796
  7. botrun_flow_lang/api/line_bot_api.py +1357 -1357
  8. botrun_flow_lang/api/model_api.py +300 -300
  9. botrun_flow_lang/api/rate_limit_api.py +32 -32
  10. botrun_flow_lang/api/routes.py +79 -79
  11. botrun_flow_lang/api/search_api.py +53 -53
  12. botrun_flow_lang/api/storage_api.py +316 -316
  13. botrun_flow_lang/api/subsidy_api.py +290 -290
  14. botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
  15. botrun_flow_lang/api/user_setting_api.py +70 -70
  16. botrun_flow_lang/api/version_api.py +31 -31
  17. botrun_flow_lang/api/youtube_api.py +26 -26
  18. botrun_flow_lang/constants.py +13 -13
  19. botrun_flow_lang/langgraph_agents/agents/agent_runner.py +174 -174
  20. botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
  21. botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
  22. botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
  23. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
  24. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
  25. botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +548 -542
  26. botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
  27. botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
  28. botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
  29. botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
  30. botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
  31. botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
  32. botrun_flow_lang/langgraph_agents/agents/util/local_files.py +345 -345
  33. botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
  34. botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
  35. botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +160 -160
  36. botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
  37. botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
  38. botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
  39. botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
  40. botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
  41. botrun_flow_lang/llm_agent/llm_agent.py +19 -19
  42. botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
  43. botrun_flow_lang/log/.gitignore +2 -2
  44. botrun_flow_lang/main.py +61 -61
  45. botrun_flow_lang/main_fast.py +51 -51
  46. botrun_flow_lang/mcp_server/__init__.py +10 -10
  47. botrun_flow_lang/mcp_server/default_mcp.py +711 -711
  48. botrun_flow_lang/models/nodes/utils.py +205 -205
  49. botrun_flow_lang/models/token_usage.py +34 -34
  50. botrun_flow_lang/requirements.txt +21 -21
  51. botrun_flow_lang/services/base/firestore_base.py +30 -30
  52. botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
  53. botrun_flow_lang/services/hatch/hatch_fs_store.py +372 -372
  54. botrun_flow_lang/services/storage/storage_cs_store.py +202 -202
  55. botrun_flow_lang/services/storage/storage_factory.py +12 -12
  56. botrun_flow_lang/services/storage/storage_store.py +65 -65
  57. botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
  58. botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
  59. botrun_flow_lang/static/docs/tools/index.html +926 -926
  60. botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
  61. botrun_flow_lang/tests/api_stress_test.py +357 -357
  62. botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
  63. botrun_flow_lang/tests/test_botrun_app.py +46 -46
  64. botrun_flow_lang/tests/test_html_util.py +31 -31
  65. botrun_flow_lang/tests/test_img_analyzer.py +190 -190
  66. botrun_flow_lang/tests/test_img_util.py +39 -39
  67. botrun_flow_lang/tests/test_local_files.py +114 -114
  68. botrun_flow_lang/tests/test_mermaid_util.py +103 -103
  69. botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
  70. botrun_flow_lang/tests/test_plotly_util.py +151 -151
  71. botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
  72. botrun_flow_lang/tools/generate_docs.py +133 -133
  73. botrun_flow_lang/tools/templates/tools.html +153 -153
  74. botrun_flow_lang/utils/__init__.py +7 -7
  75. botrun_flow_lang/utils/botrun_logger.py +344 -344
  76. botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
  77. botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
  78. botrun_flow_lang/utils/google_drive_utils.py +654 -654
  79. botrun_flow_lang/utils/langchain_utils.py +324 -324
  80. botrun_flow_lang/utils/yaml_utils.py +9 -9
  81. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/METADATA +2 -2
  82. botrun_flow_lang-5.10.82.dist-info/RECORD +99 -0
  83. botrun_flow_lang-5.9.301.dist-info/RECORD +0 -99
  84. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/WHEEL +0 -0
@@ -1,160 +1,160 @@
1
- import anthropic
2
- import base64
3
- import httpx
4
-
5
-
6
- import os
7
- from dotenv import load_dotenv
8
- from google.oauth2 import service_account
9
-
10
- load_dotenv()
11
-
12
-
13
- def analyze_pdf_with_claude(
14
- pdf_data: str, user_input: str, model_name: str = "claude-sonnet-4-20250514"
15
- ):
16
- """
17
- Analyze a PDF file using Claude API
18
-
19
- Args:
20
- pdf_data: Base64-encoded PDF data
21
- user_input: User's query about the PDF content
22
-
23
- Returns:
24
- str: Claude's analysis of the PDF content based on the query
25
- """
26
- # Initialize Anthropic client
27
- client = anthropic.Anthropic()
28
-
29
- # Send to Claude
30
- message = client.messages.create(
31
- model=model_name,
32
- max_tokens=4096, # Increased token limit for detailed analysis
33
- messages=[
34
- {
35
- "role": "user",
36
- "content": [
37
- {
38
- "type": "document",
39
- "source": {
40
- "type": "base64",
41
- "media_type": "application/pdf",
42
- "data": pdf_data,
43
- },
44
- },
45
- {"type": "text", "text": user_input},
46
- ],
47
- }
48
- ],
49
- )
50
-
51
- print(
52
- f"analyze_pdf_with_claude============> input_token: {message.usage.input_tokens} output_token: {message.usage.output_tokens}",
53
- )
54
- return message.content[0].text
55
-
56
-
57
- def analyze_pdf_with_gemini(
58
- pdf_data: str, user_input: str, model_name: str = "gemini-2.5-flash"
59
- ):
60
- """
61
- Analyze a PDF file using Gemini API
62
-
63
- Args:
64
- pdf_data: Base64-encoded PDF data
65
- user_input: User's query about the PDF content
66
- model_name: Gemini model name to use
67
-
68
- Returns:
69
- str: Gemini's analysis of the PDF content based on the query
70
- """
71
- # 放到要用的時候才 import,不然loading 會花時間
72
- from google import genai
73
- from google.genai import types
74
-
75
- credentials = service_account.Credentials.from_service_account_file(
76
- os.getenv("GOOGLE_APPLICATION_CREDENTIALS_FOR_FASTAPI"),
77
- scopes=["https://www.googleapis.com/auth/cloud-platform"],
78
- )
79
-
80
- client = genai.Client(
81
- credentials=credentials,
82
- project="scoop-386004",
83
- location="us-central1",
84
- )
85
- response = client.models.generate_content(
86
- model=model_name,
87
- contents=[
88
- user_input,
89
- types.Part(
90
- inline_data={
91
- "mime_type": "application/pdf",
92
- "data": pdf_data,
93
- }
94
- ),
95
- ],
96
- )
97
- # Log token usage if available
98
- if hasattr(response, "usage_metadata"):
99
- print(
100
- f"analyze_pdf_with_gemini============> input_token: {response.usage_metadata.prompt_token_count} output_token: {response.usage_metadata.candidates_token_count}",
101
- )
102
-
103
- return response.text
104
-
105
-
106
- def analyze_pdf(pdf_url: str, user_input: str):
107
- """
108
- Analyze a PDF file using multiple models in order of preference based on PDF_ANALYZER_MODEL env var
109
-
110
- If PDF_ANALYZER_MODEL contains comma-separated models, it will try them in order,
111
- falling back to the next one if the previous fails.
112
-
113
- Args:
114
- pdf_url: URL to the PDF file
115
- user_input: User's query about the PDF content
116
-
117
- Returns:
118
- str: Analysis of the PDF content based on the query
119
- """
120
- try:
121
- # Download and encode the PDF file from URL
122
- pdf_data = base64.standard_b64encode(httpx.get(pdf_url).content).decode("utf-8")
123
-
124
- # Get models list from environment variable
125
- models_str = os.getenv("PDF_ANALYZER_MODEL", "gemini-2.5-flash")
126
- print(f"[analyze_pdf] 分析PDF使用模型: {models_str}")
127
- models = [model.strip() for model in models_str.split(",")]
128
-
129
- last_error = None
130
-
131
- # Try each model in order
132
- for model in models:
133
- try:
134
- if model.startswith("gemini-"):
135
- print(f"Trying to analyze PDF with Gemini model: {model}")
136
- return analyze_pdf_with_gemini(pdf_data, user_input, model)
137
- elif model.startswith("claude-"):
138
- print(f"Trying to analyze PDF with Claude model: {model}")
139
- return analyze_pdf_with_claude(pdf_data, user_input, model)
140
- else:
141
- print(f"Unknown model type: {model}, skipping")
142
- continue
143
- except Exception as e:
144
- import traceback
145
-
146
- traceback.print_exc()
147
- error_msg = f"Error analyzing PDF with {model}: {str(e)}"
148
- print(error_msg)
149
- last_error = error_msg
150
- # Continue to the next model in the list
151
- continue
152
-
153
- # If we've reached here, all models failed
154
- return (
155
- f"Error analyzing PDF with all specified models. Last error: {last_error}"
156
- )
157
-
158
- except Exception as e:
159
- print(f"Error downloading PDF: {str(e)}")
160
- return f"Error downloading PDF: {str(e)}"
1
+ import anthropic
2
+ import base64
3
+ import httpx
4
+
5
+
6
+ import os
7
+ from dotenv import load_dotenv
8
+ from google.oauth2 import service_account
9
+
10
+ load_dotenv()
11
+
12
+
13
+ def analyze_pdf_with_claude(
14
+ pdf_data: str, user_input: str, model_name: str = "claude-sonnet-4-5-20250929"
15
+ ):
16
+ """
17
+ Analyze a PDF file using Claude API
18
+
19
+ Args:
20
+ pdf_data: Base64-encoded PDF data
21
+ user_input: User's query about the PDF content
22
+
23
+ Returns:
24
+ str: Claude's analysis of the PDF content based on the query
25
+ """
26
+ # Initialize Anthropic client
27
+ client = anthropic.Anthropic()
28
+
29
+ # Send to Claude
30
+ message = client.messages.create(
31
+ model=model_name,
32
+ max_tokens=4096, # Increased token limit for detailed analysis
33
+ messages=[
34
+ {
35
+ "role": "user",
36
+ "content": [
37
+ {
38
+ "type": "document",
39
+ "source": {
40
+ "type": "base64",
41
+ "media_type": "application/pdf",
42
+ "data": pdf_data,
43
+ },
44
+ },
45
+ {"type": "text", "text": user_input},
46
+ ],
47
+ }
48
+ ],
49
+ )
50
+
51
+ print(
52
+ f"analyze_pdf_with_claude============> input_token: {message.usage.input_tokens} output_token: {message.usage.output_tokens}",
53
+ )
54
+ return message.content[0].text
55
+
56
+
57
+ def analyze_pdf_with_gemini(
58
+ pdf_data: str, user_input: str, model_name: str = "gemini-2.5-flash"
59
+ ):
60
+ """
61
+ Analyze a PDF file using Gemini API
62
+
63
+ Args:
64
+ pdf_data: Base64-encoded PDF data
65
+ user_input: User's query about the PDF content
66
+ model_name: Gemini model name to use
67
+
68
+ Returns:
69
+ str: Gemini's analysis of the PDF content based on the query
70
+ """
71
+ # 放到要用的時候才 import,不然loading 會花時間
72
+ from google import genai
73
+ from google.genai import types
74
+
75
+ credentials = service_account.Credentials.from_service_account_file(
76
+ os.getenv("GOOGLE_APPLICATION_CREDENTIALS_FOR_FASTAPI"),
77
+ scopes=["https://www.googleapis.com/auth/cloud-platform"],
78
+ )
79
+
80
+ client = genai.Client(
81
+ credentials=credentials,
82
+ project="scoop-386004",
83
+ location="us-central1",
84
+ )
85
+ response = client.models.generate_content(
86
+ model=model_name,
87
+ contents=[
88
+ user_input,
89
+ types.Part(
90
+ inline_data={
91
+ "mime_type": "application/pdf",
92
+ "data": pdf_data,
93
+ }
94
+ ),
95
+ ],
96
+ )
97
+ # Log token usage if available
98
+ if hasattr(response, "usage_metadata"):
99
+ print(
100
+ f"analyze_pdf_with_gemini============> input_token: {response.usage_metadata.prompt_token_count} output_token: {response.usage_metadata.candidates_token_count}",
101
+ )
102
+
103
+ return response.text
104
+
105
+
106
+ def analyze_pdf(pdf_url: str, user_input: str):
107
+ """
108
+ Analyze a PDF file using multiple models in order of preference based on PDF_ANALYZER_MODEL env var
109
+
110
+ If PDF_ANALYZER_MODEL contains comma-separated models, it will try them in order,
111
+ falling back to the next one if the previous fails.
112
+
113
+ Args:
114
+ pdf_url: URL to the PDF file
115
+ user_input: User's query about the PDF content
116
+
117
+ Returns:
118
+ str: Analysis of the PDF content based on the query
119
+ """
120
+ try:
121
+ # Download and encode the PDF file from URL
122
+ pdf_data = base64.standard_b64encode(httpx.get(pdf_url).content).decode("utf-8")
123
+
124
+ # Get models list from environment variable
125
+ models_str = os.getenv("PDF_ANALYZER_MODEL", "gemini-2.5-flash")
126
+ print(f"[analyze_pdf] 分析PDF使用模型: {models_str}")
127
+ models = [model.strip() for model in models_str.split(",")]
128
+
129
+ last_error = None
130
+
131
+ # Try each model in order
132
+ for model in models:
133
+ try:
134
+ if model.startswith("gemini-"):
135
+ print(f"Trying to analyze PDF with Gemini model: {model}")
136
+ return analyze_pdf_with_gemini(pdf_data, user_input, model)
137
+ elif model.startswith("claude-"):
138
+ print(f"Trying to analyze PDF with Claude model: {model}")
139
+ return analyze_pdf_with_claude(pdf_data, user_input, model)
140
+ else:
141
+ print(f"Unknown model type: {model}, skipping")
142
+ continue
143
+ except Exception as e:
144
+ import traceback
145
+
146
+ traceback.print_exc()
147
+ error_msg = f"Error analyzing PDF with {model}: {str(e)}"
148
+ print(error_msg)
149
+ last_error = error_msg
150
+ # Continue to the next model in the list
151
+ continue
152
+
153
+ # If we've reached here, all models failed
154
+ return (
155
+ f"Error analyzing PDF with all specified models. Last error: {last_error}"
156
+ )
157
+
158
+ except Exception as e:
159
+ print(f"Error downloading PDF: {str(e)}")
160
+ return f"Error downloading PDF: {str(e)}"