botrun-flow-lang 5.12.263__py3-none-any.whl → 6.2.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/auth_api.py +39 -39
- botrun_flow_lang/api/auth_utils.py +183 -183
- botrun_flow_lang/api/botrun_back_api.py +65 -65
- botrun_flow_lang/api/flow_api.py +3 -3
- botrun_flow_lang/api/hatch_api.py +508 -508
- botrun_flow_lang/api/langgraph_api.py +816 -811
- botrun_flow_lang/api/langgraph_constants.py +11 -0
- botrun_flow_lang/api/line_bot_api.py +1484 -1484
- botrun_flow_lang/api/model_api.py +300 -300
- botrun_flow_lang/api/rate_limit_api.py +32 -32
- botrun_flow_lang/api/routes.py +79 -79
- botrun_flow_lang/api/search_api.py +53 -53
- botrun_flow_lang/api/storage_api.py +395 -395
- botrun_flow_lang/api/subsidy_api.py +290 -290
- botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
- botrun_flow_lang/api/user_setting_api.py +70 -70
- botrun_flow_lang/api/version_api.py +31 -31
- botrun_flow_lang/api/youtube_api.py +26 -26
- botrun_flow_lang/constants.py +13 -13
- botrun_flow_lang/langgraph_agents/agents/agent_runner.py +178 -178
- botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
- botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gemini_subsidy_graph.py +460 -460
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +730 -723
- botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
- botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
- botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
- botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
- botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py +336 -294
- botrun_flow_lang/langgraph_agents/agents/util/local_files.py +419 -419
- botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
- botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +562 -486
- botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py +250 -250
- botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py +204 -204
- botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
- botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
- botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
- botrun_flow_lang/langgraph_agents/agents/util/usage_metadata.py +34 -0
- botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
- botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
- botrun_flow_lang/llm_agent/llm_agent.py +19 -19
- botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
- botrun_flow_lang/log/.gitignore +2 -2
- botrun_flow_lang/main.py +61 -61
- botrun_flow_lang/main_fast.py +51 -51
- botrun_flow_lang/mcp_server/__init__.py +10 -10
- botrun_flow_lang/mcp_server/default_mcp.py +854 -744
- botrun_flow_lang/models/nodes/utils.py +205 -205
- botrun_flow_lang/models/token_usage.py +34 -34
- botrun_flow_lang/requirements.txt +21 -21
- botrun_flow_lang/services/base/firestore_base.py +30 -30
- botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
- botrun_flow_lang/services/hatch/hatch_fs_store.py +419 -419
- botrun_flow_lang/services/storage/storage_cs_store.py +206 -206
- botrun_flow_lang/services/storage/storage_factory.py +12 -12
- botrun_flow_lang/services/storage/storage_store.py +65 -65
- botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
- botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
- botrun_flow_lang/static/docs/tools/index.html +926 -926
- botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
- botrun_flow_lang/tests/api_stress_test.py +357 -357
- botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
- botrun_flow_lang/tests/test_botrun_app.py +46 -46
- botrun_flow_lang/tests/test_html_util.py +31 -31
- botrun_flow_lang/tests/test_img_analyzer.py +190 -190
- botrun_flow_lang/tests/test_img_util.py +39 -39
- botrun_flow_lang/tests/test_local_files.py +114 -114
- botrun_flow_lang/tests/test_mermaid_util.py +103 -103
- botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
- botrun_flow_lang/tests/test_plotly_util.py +151 -151
- botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
- botrun_flow_lang/tools/generate_docs.py +133 -133
- botrun_flow_lang/tools/templates/tools.html +153 -153
- botrun_flow_lang/utils/__init__.py +7 -7
- botrun_flow_lang/utils/botrun_logger.py +344 -344
- botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
- botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
- botrun_flow_lang/utils/google_drive_utils.py +654 -654
- botrun_flow_lang/utils/langchain_utils.py +324 -324
- botrun_flow_lang/utils/yaml_utils.py +9 -9
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/METADATA +6 -6
- botrun_flow_lang-6.2.21.dist-info/RECORD +104 -0
- botrun_flow_lang-5.12.263.dist-info/RECORD +0 -102
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/WHEEL +0 -0
|
@@ -1,294 +1,336 @@
|
|
|
1
|
-
import anthropic
|
|
2
|
-
import base64
|
|
3
|
-
import httpx
|
|
4
|
-
import os
|
|
5
|
-
import imghdr
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
from
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
"
|
|
41
|
-
"
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
#
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
#
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
#
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
#
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
)
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
1
|
+
import anthropic
|
|
2
|
+
import base64
|
|
3
|
+
import httpx
|
|
4
|
+
import os
|
|
5
|
+
import imghdr
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Dict, Any, List, Tuple
|
|
8
|
+
from dotenv import load_dotenv
|
|
9
|
+
|
|
10
|
+
from botrun_flow_lang.langgraph_agents.agents.util.usage_metadata import UsageMetadata
|
|
11
|
+
|
|
12
|
+
load_dotenv()
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_img_content_type(file_path: str | Path) -> str:
|
|
16
|
+
"""
|
|
17
|
+
Get the content type (MIME type) of a local file.
|
|
18
|
+
This function checks the actual image format rather than relying on file extension.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
file_path: Path to the local file (can be string or Path object)
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
str: The content type of the file (e.g., 'image/jpeg', 'image/png')
|
|
25
|
+
|
|
26
|
+
Raises:
|
|
27
|
+
FileNotFoundError: If the file does not exist
|
|
28
|
+
ValueError: If the file type is not recognized or not supported
|
|
29
|
+
"""
|
|
30
|
+
if not os.path.exists(file_path):
|
|
31
|
+
raise FileNotFoundError(f"File not found: {file_path}")
|
|
32
|
+
|
|
33
|
+
# Check actual image type using imghdr
|
|
34
|
+
img_type = imghdr.what(file_path)
|
|
35
|
+
if not img_type:
|
|
36
|
+
raise ValueError(f"File is not a recognized image format: {file_path}")
|
|
37
|
+
|
|
38
|
+
# Map image type to MIME type
|
|
39
|
+
mime_types = {
|
|
40
|
+
"jpeg": "image/jpeg",
|
|
41
|
+
"jpg": "image/jpeg",
|
|
42
|
+
"png": "image/png",
|
|
43
|
+
"gif": "image/gif",
|
|
44
|
+
"webp": "image/webp",
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
content_type = mime_types.get(img_type.lower())
|
|
48
|
+
if not content_type:
|
|
49
|
+
raise ValueError(f"Unsupported image format '{img_type}': {file_path}")
|
|
50
|
+
|
|
51
|
+
return content_type
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def analyze_imgs_with_claude(
|
|
55
|
+
img_urls: list[str], user_input: str, model_name: str = "claude-sonnet-4-5-20250929"
|
|
56
|
+
) -> Tuple[str, UsageMetadata]:
|
|
57
|
+
"""
|
|
58
|
+
Analyze multiple images using Claude Vision API
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
img_urls: List of URLs to the image files
|
|
62
|
+
user_input: User's query about the image content(s)
|
|
63
|
+
model_name: Claude model name to use
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Tuple[str, UsageMetadata]: Claude's analysis and usage metadata
|
|
67
|
+
|
|
68
|
+
Raises:
|
|
69
|
+
ValueError: If image URLs are invalid or model parameters are incorrect
|
|
70
|
+
anthropic.APIError: If there's an error with the Claude API
|
|
71
|
+
Exception: For other errors during processing
|
|
72
|
+
"""
|
|
73
|
+
# Initialize message content
|
|
74
|
+
message_content = []
|
|
75
|
+
|
|
76
|
+
# Download and encode each image file from URLs
|
|
77
|
+
with httpx.Client(follow_redirects=True) as client:
|
|
78
|
+
for img_url in img_urls:
|
|
79
|
+
response = client.get(img_url)
|
|
80
|
+
if response.status_code != 200:
|
|
81
|
+
raise ValueError(f"Failed to download image from URL: {img_url}")
|
|
82
|
+
|
|
83
|
+
# Detect content type from response headers
|
|
84
|
+
content_type = response.headers.get("content-type", "")
|
|
85
|
+
if not content_type.startswith("image/"):
|
|
86
|
+
raise ValueError(f"URL does not point to a valid image: {img_url}")
|
|
87
|
+
|
|
88
|
+
# Check file size (5MB limit for API)
|
|
89
|
+
if len(response.content) > 5 * 1024 * 1024:
|
|
90
|
+
raise ValueError(f"Image file size exceeds 5MB limit: {img_url}")
|
|
91
|
+
|
|
92
|
+
# Encode image data
|
|
93
|
+
img_data = base64.standard_b64encode(response.content).decode("utf-8")
|
|
94
|
+
|
|
95
|
+
# Add image to message content
|
|
96
|
+
message_content.append(
|
|
97
|
+
{
|
|
98
|
+
"type": "image",
|
|
99
|
+
"source": {
|
|
100
|
+
"type": "base64",
|
|
101
|
+
"media_type": content_type,
|
|
102
|
+
"data": img_data,
|
|
103
|
+
},
|
|
104
|
+
}
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Add user input text
|
|
108
|
+
message_content.append({"type": "text", "text": user_input})
|
|
109
|
+
|
|
110
|
+
# Initialize Anthropic client
|
|
111
|
+
client = anthropic.Anthropic()
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
# Send to Claude
|
|
115
|
+
message = client.messages.create(
|
|
116
|
+
model=model_name,
|
|
117
|
+
max_tokens=1024,
|
|
118
|
+
messages=[
|
|
119
|
+
{
|
|
120
|
+
"role": "user",
|
|
121
|
+
"content": message_content,
|
|
122
|
+
}
|
|
123
|
+
],
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Extract usage metadata
|
|
127
|
+
usage = UsageMetadata(
|
|
128
|
+
prompt_tokens=message.usage.input_tokens,
|
|
129
|
+
completion_tokens=message.usage.output_tokens,
|
|
130
|
+
total_tokens=message.usage.input_tokens + message.usage.output_tokens,
|
|
131
|
+
cache_creation_input_tokens=getattr(message.usage, 'cache_creation_input_tokens', 0) or 0,
|
|
132
|
+
cache_read_input_tokens=getattr(message.usage, 'cache_read_input_tokens', 0) or 0,
|
|
133
|
+
model=model_name,
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
print(
|
|
137
|
+
f"analyze_imgs_with_claude============> input_token: {message.usage.input_tokens} output_token: {message.usage.output_tokens}",
|
|
138
|
+
)
|
|
139
|
+
return message.content[0].text, usage
|
|
140
|
+
except anthropic.APIError as e:
|
|
141
|
+
import traceback
|
|
142
|
+
|
|
143
|
+
traceback.print_exc()
|
|
144
|
+
raise anthropic.APIError(
|
|
145
|
+
f"Claude API error with model {model_name}: {str(e)}"
|
|
146
|
+
)
|
|
147
|
+
except Exception as e:
|
|
148
|
+
import traceback
|
|
149
|
+
|
|
150
|
+
traceback.print_exc()
|
|
151
|
+
raise Exception(
|
|
152
|
+
f"Error analyzing image(s) with Claude {model_name}: {str(e)}"
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def analyze_imgs_with_gemini(
|
|
157
|
+
img_urls: list[str],
|
|
158
|
+
user_input: str,
|
|
159
|
+
model_name: str = "gemini-2.5-flash",
|
|
160
|
+
) -> Tuple[str, UsageMetadata]:
|
|
161
|
+
"""
|
|
162
|
+
Analyze multiple images using Gemini Vision API
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
img_urls: List of URLs to the image files
|
|
166
|
+
user_input: User's query about the image content(s)
|
|
167
|
+
model_name: Gemini model name to use
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
Tuple[str, UsageMetadata]: Gemini's analysis and usage metadata
|
|
171
|
+
|
|
172
|
+
Raises:
|
|
173
|
+
ValueError: If image URLs are invalid or model parameters are incorrect
|
|
174
|
+
Exception: For errors during API calls or other processing
|
|
175
|
+
"""
|
|
176
|
+
# 放到要用的時候才 import,不然loading 會花時間
|
|
177
|
+
from google import genai
|
|
178
|
+
from google.genai.types import HttpOptions, Part
|
|
179
|
+
from google.oauth2 import service_account
|
|
180
|
+
|
|
181
|
+
# Initialize the Gemini client
|
|
182
|
+
api_key = os.getenv("GEMINI_API_KEY", "")
|
|
183
|
+
if not api_key:
|
|
184
|
+
raise ValueError("GEMINI_API_KEY environment variable not set")
|
|
185
|
+
|
|
186
|
+
# 設定 API 金鑰
|
|
187
|
+
|
|
188
|
+
try:
|
|
189
|
+
# 初始化模型並準備內容列表
|
|
190
|
+
credentials = service_account.Credentials.from_service_account_file(
|
|
191
|
+
os.getenv("GOOGLE_APPLICATION_CREDENTIALS_FOR_FASTAPI"),
|
|
192
|
+
scopes=["https://www.googleapis.com/auth/cloud-platform"],
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
client = genai.Client(
|
|
196
|
+
credentials=credentials,
|
|
197
|
+
project="scoop-386004",
|
|
198
|
+
location="us-central1",
|
|
199
|
+
)
|
|
200
|
+
contents = [user_input]
|
|
201
|
+
|
|
202
|
+
# 下載並處理每個圖片
|
|
203
|
+
with httpx.Client(follow_redirects=True) as http_client:
|
|
204
|
+
for img_url in img_urls:
|
|
205
|
+
response = http_client.get(img_url)
|
|
206
|
+
if response.status_code != 200:
|
|
207
|
+
raise ValueError(f"Failed to download image from URL: {img_url}")
|
|
208
|
+
|
|
209
|
+
# 檢測內容類型
|
|
210
|
+
content_type = response.headers.get("content-type", "")
|
|
211
|
+
if not content_type.startswith("image/"):
|
|
212
|
+
raise ValueError(f"URL does not point to a valid image: {img_url}")
|
|
213
|
+
|
|
214
|
+
# 檢查檔案大小
|
|
215
|
+
if len(response.content) > 20 * 1024 * 1024: # 20MB 限制
|
|
216
|
+
raise ValueError(f"Image file size too large: {img_url}")
|
|
217
|
+
|
|
218
|
+
# 將圖片添加到內容中
|
|
219
|
+
contents.append(
|
|
220
|
+
Part.from_bytes(
|
|
221
|
+
data=response.content,
|
|
222
|
+
mime_type=content_type,
|
|
223
|
+
)
|
|
224
|
+
)
|
|
225
|
+
|
|
226
|
+
# 使用 genai 生成內容
|
|
227
|
+
response = client.models.generate_content(
|
|
228
|
+
model=model_name,
|
|
229
|
+
contents=contents,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Extract usage metadata
|
|
233
|
+
usage = UsageMetadata(model=model_name)
|
|
234
|
+
if hasattr(response, "usage_metadata"):
|
|
235
|
+
usage_meta = response.usage_metadata
|
|
236
|
+
usage = UsageMetadata(
|
|
237
|
+
prompt_tokens=getattr(usage_meta, 'prompt_token_count', 0) or 0,
|
|
238
|
+
completion_tokens=getattr(usage_meta, 'candidates_token_count', 0) or 0,
|
|
239
|
+
total_tokens=getattr(usage_meta, 'total_token_count', 0) or 0,
|
|
240
|
+
cache_creation_input_tokens=0,
|
|
241
|
+
cache_read_input_tokens=getattr(usage_meta, 'cached_content_token_count', 0) or 0,
|
|
242
|
+
model=model_name,
|
|
243
|
+
)
|
|
244
|
+
print(
|
|
245
|
+
f"analyze_imgs_with_gemini============> input_token: {usage_meta.prompt_token_count} output_token: {usage_meta.candidates_token_count}"
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
return response.text, usage
|
|
249
|
+
|
|
250
|
+
except httpx.RequestError as e:
|
|
251
|
+
import traceback
|
|
252
|
+
|
|
253
|
+
traceback.print_exc()
|
|
254
|
+
raise ValueError(f"Failed to download image(s): {str(e)}")
|
|
255
|
+
except Exception as e:
|
|
256
|
+
import traceback
|
|
257
|
+
|
|
258
|
+
traceback.print_exc()
|
|
259
|
+
raise Exception(f"Error analyzing image(s) with Gemini {model_name}: {str(e)}")
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
def analyze_imgs(img_urls: list[str], user_input: str) -> Dict[str, Any]:
|
|
263
|
+
"""
|
|
264
|
+
Analyze multiple images using configured AI models.
|
|
265
|
+
|
|
266
|
+
Uses models specified in IMG_ANALYZER_MODEL environment variable.
|
|
267
|
+
When multiple models are specified (comma-separated), tries them in order
|
|
268
|
+
until one succeeds, falling back to next model if a model fails.
|
|
269
|
+
|
|
270
|
+
Example: IMG_ANALYZER_MODEL=claude-3-7-sonnet-latest,gemini-2.0-flash
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
img_urls: List of URLs to the image files
|
|
274
|
+
user_input: User's query about the image content(s)
|
|
275
|
+
|
|
276
|
+
Returns:
|
|
277
|
+
Dict[str, Any]: {
|
|
278
|
+
"result": str, # AI analysis result
|
|
279
|
+
"usage_metadata": List[Dict] # Token usage for each LLM call
|
|
280
|
+
}
|
|
281
|
+
"""
|
|
282
|
+
usage_list: List[UsageMetadata] = []
|
|
283
|
+
|
|
284
|
+
# Get models from environment variable, split by comma if multiple models
|
|
285
|
+
models_str = os.getenv("IMG_ANALYZER_MODEL", "gemini-2.5-flash")
|
|
286
|
+
print(f"[analyze_imgs] 分析IMG使用模型: {models_str}")
|
|
287
|
+
models = models_str.split(",")
|
|
288
|
+
|
|
289
|
+
# Remove whitespace around model names
|
|
290
|
+
models = [model.strip() for model in models]
|
|
291
|
+
print(f"[analyze_imgs] 處理後模型列表: {models}")
|
|
292
|
+
|
|
293
|
+
last_error = None
|
|
294
|
+
errors = []
|
|
295
|
+
|
|
296
|
+
# Try each model in sequence until one succeeds
|
|
297
|
+
for model in models:
|
|
298
|
+
try:
|
|
299
|
+
if model.startswith("gemini-"):
|
|
300
|
+
print(f"[analyze_imgs] 嘗試使用 Gemini 模型: {model}")
|
|
301
|
+
result, usage = analyze_imgs_with_gemini(img_urls, user_input, model)
|
|
302
|
+
usage_list.append(usage)
|
|
303
|
+
return {
|
|
304
|
+
"result": result,
|
|
305
|
+
"usage_metadata": [u.to_dict() for u in usage_list],
|
|
306
|
+
}
|
|
307
|
+
elif model.startswith("claude-"):
|
|
308
|
+
print(f"[analyze_imgs] 嘗試使用 Claude 模型: {model}")
|
|
309
|
+
result, usage = analyze_imgs_with_claude(img_urls, user_input, model)
|
|
310
|
+
usage_list.append(usage)
|
|
311
|
+
return {
|
|
312
|
+
"result": result,
|
|
313
|
+
"usage_metadata": [u.to_dict() for u in usage_list],
|
|
314
|
+
}
|
|
315
|
+
else:
|
|
316
|
+
print(f"[analyze_imgs] 不支持的模型格式: {model}, 跳過")
|
|
317
|
+
errors.append(f"不支持的模型格式: {model}")
|
|
318
|
+
continue
|
|
319
|
+
|
|
320
|
+
except Exception as e:
|
|
321
|
+
last_error = e
|
|
322
|
+
error_msg = str(e)
|
|
323
|
+
print(f"[analyze_imgs] 模型 {model} 失敗,錯誤: {error_msg}")
|
|
324
|
+
import traceback
|
|
325
|
+
|
|
326
|
+
traceback.print_exc()
|
|
327
|
+
errors.append(f"模型 {model} 異常: {error_msg}")
|
|
328
|
+
# Continue to the next model in the list
|
|
329
|
+
continue
|
|
330
|
+
|
|
331
|
+
# If we've tried all models and none succeeded, return all errors
|
|
332
|
+
error_summary = "\n".join(errors)
|
|
333
|
+
return {
|
|
334
|
+
"result": f"錯誤: 所有配置的模型都失敗了。詳細錯誤:\n{error_summary}",
|
|
335
|
+
"usage_metadata": [u.to_dict() for u in usage_list],
|
|
336
|
+
}
|