botrun-flow-lang 5.12.264__py3-none-any.whl → 6.2.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -113,12 +113,17 @@ class GraphSchemaRequest(BaseModel):
113
113
  graph_name: str
114
114
 
115
115
 
116
- PERPLEXITY_SEARCH_AGENT = "perplexity_search_agent"
116
+ # 從常數檔案匯入,避免外部模組為了取得常數而觸發重型 import
117
+ from botrun_flow_lang.api.langgraph_constants import (
118
+ LANGGRAPH_REACT_AGENT,
119
+ GOV_SUBSIDY_AGENT,
120
+ PERPLEXITY_SEARCH_AGENT,
121
+ )
122
+
123
+ # 僅在此檔案內部使用的常數
117
124
  CUSTOM_WEB_RESEARCH_AGENT = "custom_web_research_agent"
118
- LANGGRAPH_REACT_AGENT = "langgraph_react_agent"
119
125
  DEEP_RESEARCH_AGENT = "deep_research_agent"
120
126
  # GOV_RESEARCHER_AGENT = "gov_researcher_agent"
121
- GOV_SUBSIDY_AGENT = "gov_subsidy_agent"
122
127
  GEMINI_SUBSIDY_AGENT = "gemini_subsidy_agent"
123
128
 
124
129
 
@@ -0,0 +1,11 @@
1
+ """
2
+ LangGraph 常數定義
3
+
4
+ 此檔案只包含常數定義,不包含任何會觸發重型 SDK 載入的 import。
5
+ 這樣可以讓其他模組在只需要常數時,不會觸發 langchain_google_vertexai 等重型套件的載入。
6
+ """
7
+
8
+ # Graph 名稱常數
9
+ LANGGRAPH_REACT_AGENT = "langgraph_react_agent"
10
+ GOV_SUBSIDY_AGENT = "gov_subsidy_agent"
11
+ PERPLEXITY_SEARCH_AGENT = "perplexity_search_agent"
@@ -86,8 +86,9 @@ from langchain_mcp_adapters.client import MultiServerMCPClient
86
86
  # ========
87
87
  # for Vertex AI
88
88
  from google.oauth2 import service_account
89
- from langchain_google_vertexai import ChatVertexAI
90
- from langchain_google_vertexai.model_garden import ChatAnthropicVertex
89
+ # 重型 import 改為延遲載入,避免啟動時載入 google-cloud-aiplatform(約 26 秒)
90
+ # ChatVertexAI 已遷移至 ChatGoogleGenerativeAI(vertexai=True)
91
+ # ChatAnthropicVertex 在需要時才 import(見 get_react_agent_model 函數內)
91
92
 
92
93
  load_dotenv()
93
94
 
@@ -234,8 +235,10 @@ def get_react_agent_model(model_name: str = ""):
234
235
  # 判斷模型類型並創建相應實例
235
236
  if vertex_model_name.startswith("gemini-"):
236
237
  # Gemini 系列:gemini-2.5-pro, gemini-2.5-flash, gemini-pro
237
- model = ChatVertexAI(
238
+ # 使用 ChatGoogleGenerativeAI + vertexai=True,避免載入重型的 langchain_google_vertexai
239
+ model = ChatGoogleGenerativeAI(
238
240
  model=vertex_model_name,
241
+ vertexai=True,
239
242
  location=vertex_region,
240
243
  project=vertex_project,
241
244
  credentials=credentials,
@@ -243,11 +246,13 @@ def get_react_agent_model(model_name: str = ""):
243
246
  max_tokens=GEMINI_MAX_TOKENS,
244
247
  )
245
248
  logger.info(
246
- f"model ChatVertexAI {vertex_model_name} @ {vertex_region} (project: {vertex_project})"
249
+ f"model ChatGoogleGenerativeAI(vertexai=True) {vertex_model_name} @ {vertex_region} (project: {vertex_project})"
247
250
  )
248
251
 
249
252
  elif "claude" in vertex_model_name.lower() or vertex_model_name.startswith("maison/"):
250
253
  # Anthropic Claude (model garden)
254
+ # 延遲載入 ChatAnthropicVertex,只有在需要時才觸發 langchain_google_vertexai
255
+ from langchain_google_vertexai.model_garden import ChatAnthropicVertex
251
256
  model = ChatAnthropicVertex(
252
257
  model=vertex_model_name,
253
258
  location=vertex_region,
@@ -302,6 +307,8 @@ def get_react_agent_model(model_name: str = ""):
302
307
  )
303
308
 
304
309
  # 初始化 ChatAnthropicVertex
310
+ # 延遲載入,只有在需要時才觸發 langchain_google_vertexai
311
+ from langchain_google_vertexai.model_garden import ChatAnthropicVertex
305
312
  model = ChatAnthropicVertex(
306
313
  project=vertex_project,
307
314
  model=vertex_model,
@@ -4,8 +4,11 @@ import httpx
4
4
  import os
5
5
  import imghdr
6
6
  from pathlib import Path
7
+ from typing import Dict, Any, List, Tuple
7
8
  from dotenv import load_dotenv
8
9
 
10
+ from botrun_flow_lang.langgraph_agents.agents.util.usage_metadata import UsageMetadata
11
+
9
12
  load_dotenv()
10
13
 
11
14
 
@@ -50,7 +53,7 @@ def get_img_content_type(file_path: str | Path) -> str:
50
53
 
51
54
  def analyze_imgs_with_claude(
52
55
  img_urls: list[str], user_input: str, model_name: str = "claude-sonnet-4-5-20250929"
53
- ) -> str:
56
+ ) -> Tuple[str, UsageMetadata]:
54
57
  """
55
58
  Analyze multiple images using Claude Vision API
56
59
 
@@ -60,7 +63,7 @@ def analyze_imgs_with_claude(
60
63
  model_name: Claude model name to use
61
64
 
62
65
  Returns:
63
- str: Claude's analysis of the image content(s) based on the query
66
+ Tuple[str, UsageMetadata]: Claude's analysis and usage metadata
64
67
 
65
68
  Raises:
66
69
  ValueError: If image URLs are invalid or model parameters are incorrect
@@ -120,10 +123,20 @@ def analyze_imgs_with_claude(
120
123
  ],
121
124
  )
122
125
 
126
+ # Extract usage metadata
127
+ usage = UsageMetadata(
128
+ prompt_tokens=message.usage.input_tokens,
129
+ completion_tokens=message.usage.output_tokens,
130
+ total_tokens=message.usage.input_tokens + message.usage.output_tokens,
131
+ cache_creation_input_tokens=getattr(message.usage, 'cache_creation_input_tokens', 0) or 0,
132
+ cache_read_input_tokens=getattr(message.usage, 'cache_read_input_tokens', 0) or 0,
133
+ model=model_name,
134
+ )
135
+
123
136
  print(
124
137
  f"analyze_imgs_with_claude============> input_token: {message.usage.input_tokens} output_token: {message.usage.output_tokens}",
125
138
  )
126
- return message.content[0].text
139
+ return message.content[0].text, usage
127
140
  except anthropic.APIError as e:
128
141
  import traceback
129
142
 
@@ -144,7 +157,7 @@ def analyze_imgs_with_gemini(
144
157
  img_urls: list[str],
145
158
  user_input: str,
146
159
  model_name: str = "gemini-2.5-flash",
147
- ) -> str:
160
+ ) -> Tuple[str, UsageMetadata]:
148
161
  """
149
162
  Analyze multiple images using Gemini Vision API
150
163
 
@@ -154,7 +167,7 @@ def analyze_imgs_with_gemini(
154
167
  model_name: Gemini model name to use
155
168
 
156
169
  Returns:
157
- str: Gemini's analysis of the image content(s) based on the query
170
+ Tuple[str, UsageMetadata]: Gemini's analysis and usage metadata
158
171
 
159
172
  Raises:
160
173
  ValueError: If image URLs are invalid or model parameters are incorrect
@@ -216,10 +229,23 @@ def analyze_imgs_with_gemini(
216
229
  contents=contents,
217
230
  )
218
231
 
219
- print(
220
- f"analyze_imgs_with_gemini============> input_token: {response.usage_metadata.prompt_token_count} output_token: {response.usage_metadata.candidates_token_count}"
221
- )
222
- return response.text
232
+ # Extract usage metadata
233
+ usage = UsageMetadata(model=model_name)
234
+ if hasattr(response, "usage_metadata"):
235
+ usage_meta = response.usage_metadata
236
+ usage = UsageMetadata(
237
+ prompt_tokens=getattr(usage_meta, 'prompt_token_count', 0) or 0,
238
+ completion_tokens=getattr(usage_meta, 'candidates_token_count', 0) or 0,
239
+ total_tokens=getattr(usage_meta, 'total_token_count', 0) or 0,
240
+ cache_creation_input_tokens=0,
241
+ cache_read_input_tokens=getattr(usage_meta, 'cached_content_token_count', 0) or 0,
242
+ model=model_name,
243
+ )
244
+ print(
245
+ f"analyze_imgs_with_gemini============> input_token: {usage_meta.prompt_token_count} output_token: {usage_meta.candidates_token_count}"
246
+ )
247
+
248
+ return response.text, usage
223
249
 
224
250
  except httpx.RequestError as e:
225
251
  import traceback
@@ -233,7 +259,7 @@ def analyze_imgs_with_gemini(
233
259
  raise Exception(f"Error analyzing image(s) with Gemini {model_name}: {str(e)}")
234
260
 
235
261
 
236
- def analyze_imgs(img_urls: list[str], user_input: str) -> str:
262
+ def analyze_imgs(img_urls: list[str], user_input: str) -> Dict[str, Any]:
237
263
  """
238
264
  Analyze multiple images using configured AI models.
239
265
 
@@ -248,8 +274,13 @@ def analyze_imgs(img_urls: list[str], user_input: str) -> str:
248
274
  user_input: User's query about the image content(s)
249
275
 
250
276
  Returns:
251
- str: AI analysis of the image content(s) based on the query
277
+ Dict[str, Any]: {
278
+ "result": str, # AI analysis result
279
+ "usage_metadata": List[Dict] # Token usage for each LLM call
280
+ }
252
281
  """
282
+ usage_list: List[UsageMetadata] = []
283
+
253
284
  # Get models from environment variable, split by comma if multiple models
254
285
  models_str = os.getenv("IMG_ANALYZER_MODEL", "gemini-2.5-flash")
255
286
  print(f"[analyze_imgs] 分析IMG使用模型: {models_str}")
@@ -267,12 +298,20 @@ def analyze_imgs(img_urls: list[str], user_input: str) -> str:
267
298
  try:
268
299
  if model.startswith("gemini-"):
269
300
  print(f"[analyze_imgs] 嘗試使用 Gemini 模型: {model}")
270
- result = analyze_imgs_with_gemini(img_urls, user_input, model)
271
- return result
301
+ result, usage = analyze_imgs_with_gemini(img_urls, user_input, model)
302
+ usage_list.append(usage)
303
+ return {
304
+ "result": result,
305
+ "usage_metadata": [u.to_dict() for u in usage_list],
306
+ }
272
307
  elif model.startswith("claude-"):
273
308
  print(f"[analyze_imgs] 嘗試使用 Claude 模型: {model}")
274
- result = analyze_imgs_with_claude(img_urls, user_input, model)
275
- return result
309
+ result, usage = analyze_imgs_with_claude(img_urls, user_input, model)
310
+ usage_list.append(usage)
311
+ return {
312
+ "result": result,
313
+ "usage_metadata": [u.to_dict() for u in usage_list],
314
+ }
276
315
  else:
277
316
  print(f"[analyze_imgs] 不支持的模型格式: {model}, 跳過")
278
317
  errors.append(f"不支持的模型格式: {model}")
@@ -291,4 +330,7 @@ def analyze_imgs(img_urls: list[str], user_input: str) -> str:
291
330
 
292
331
  # If we've tried all models and none succeeded, return all errors
293
332
  error_summary = "\n".join(errors)
294
- return f"錯誤: 所有配置的模型都失敗了。詳細錯誤:\n{error_summary}"
333
+ return {
334
+ "result": f"錯誤: 所有配置的模型都失敗了。詳細錯誤:\n{error_summary}",
335
+ "usage_metadata": [u.to_dict() for u in usage_list],
336
+ }
@@ -11,11 +11,13 @@ import asyncio
11
11
  import base64
12
12
  import httpx
13
13
  import os
14
- from typing import List, Dict, Any
14
+ from typing import List, Dict, Any, Tuple
15
15
 
16
16
  from dotenv import load_dotenv
17
17
  from google.oauth2 import service_account
18
18
 
19
+ from botrun_flow_lang.langgraph_agents.agents.util.usage_metadata import UsageMetadata
20
+
19
21
  load_dotenv()
20
22
 
21
23
  # 檔案大小閾值(MB)
@@ -30,16 +32,17 @@ MAX_CONCURRENT_CHUNKS = 5
30
32
 
31
33
  def analyze_pdf_with_claude(
32
34
  pdf_data: str, user_input: str, model_name: str = "claude-sonnet-4-5-20250929"
33
- ):
35
+ ) -> Tuple[str, UsageMetadata]:
34
36
  """
35
37
  Analyze a PDF file using Claude API
36
38
 
37
39
  Args:
38
40
  pdf_data: Base64-encoded PDF data
39
41
  user_input: User's query about the PDF content
42
+ model_name: Claude model name to use
40
43
 
41
44
  Returns:
42
- str: Claude's analysis of the PDF content based on the query
45
+ Tuple[str, UsageMetadata]: Claude's analysis and usage metadata
43
46
  """
44
47
  # Initialize Anthropic client
45
48
  client = anthropic.Anthropic()
@@ -66,15 +69,25 @@ def analyze_pdf_with_claude(
66
69
  ],
67
70
  )
68
71
 
72
+ # Extract usage metadata
73
+ usage = UsageMetadata(
74
+ prompt_tokens=message.usage.input_tokens,
75
+ completion_tokens=message.usage.output_tokens,
76
+ total_tokens=message.usage.input_tokens + message.usage.output_tokens,
77
+ cache_creation_input_tokens=getattr(message.usage, 'cache_creation_input_tokens', 0) or 0,
78
+ cache_read_input_tokens=getattr(message.usage, 'cache_read_input_tokens', 0) or 0,
79
+ model=model_name,
80
+ )
81
+
69
82
  print(
70
83
  f"analyze_pdf_with_claude============> input_token: {message.usage.input_tokens} output_token: {message.usage.output_tokens}",
71
84
  )
72
- return message.content[0].text
85
+ return message.content[0].text, usage
73
86
 
74
87
 
75
88
  def analyze_pdf_with_gemini(
76
89
  pdf_data: str, user_input: str, model_name: str = "gemini-2.5-flash", pdf_url: str = ""
77
- ):
90
+ ) -> Tuple[str, UsageMetadata]:
78
91
  """
79
92
  Analyze a PDF file using Gemini API
80
93
 
@@ -82,9 +95,10 @@ def analyze_pdf_with_gemini(
82
95
  pdf_data: Base64-encoded PDF data
83
96
  user_input: User's query about the PDF content
84
97
  model_name: Gemini model name to use
98
+ pdf_url: Original PDF URL for logging
85
99
 
86
100
  Returns:
87
- str: Gemini's analysis of the PDF content based on the query
101
+ Tuple[str, UsageMetadata]: Gemini's analysis and usage metadata
88
102
  """
89
103
  # 放到要用的時候才 import,不然loading 會花時間
90
104
  from google import genai
@@ -112,14 +126,25 @@ def analyze_pdf_with_gemini(
112
126
  ),
113
127
  ],
114
128
  )
115
- # Log token usage if available
129
+
130
+ # Extract usage metadata
131
+ usage = UsageMetadata(model=model_name)
116
132
  if hasattr(response, "usage_metadata"):
133
+ usage_meta = response.usage_metadata
134
+ usage = UsageMetadata(
135
+ prompt_tokens=getattr(usage_meta, 'prompt_token_count', 0) or 0,
136
+ completion_tokens=getattr(usage_meta, 'candidates_token_count', 0) or 0,
137
+ total_tokens=getattr(usage_meta, 'total_token_count', 0) or 0,
138
+ cache_creation_input_tokens=0,
139
+ cache_read_input_tokens=getattr(usage_meta, 'cached_content_token_count', 0) or 0,
140
+ model=model_name,
141
+ )
117
142
  print(
118
- f"analyze_pdf_with_gemini============> input_token: {response.usage_metadata.prompt_token_count} output_token: {response.usage_metadata.candidates_token_count}",
143
+ f"analyze_pdf_with_gemini============> input_token: {usage_meta.prompt_token_count} output_token: {usage_meta.candidates_token_count}",
119
144
  )
120
145
 
121
146
  print(f"{pdf_url} success")
122
- return response.text
147
+ return response.text, usage
123
148
 
124
149
 
125
150
  def _analyze_single_chunk(
@@ -135,7 +160,7 @@ def _analyze_single_chunk(
135
160
  model_name: 使用的模型名稱
136
161
 
137
162
  Returns:
138
- Dict: {"page_range": str, "answer": str, "relevant": bool, "error": str|None}
163
+ Dict: {"page_range": str, "answer": str, "relevant": bool, "error": str|None, "usage": UsageMetadata}
139
164
  """
140
165
  # 構建切片專用的 prompt
141
166
  chunk_prompt = f"""你正在閱讀一份大型 PDF 文件的其中一部分({page_range})。
@@ -149,15 +174,16 @@ def _analyze_single_chunk(
149
174
 
150
175
  try:
151
176
  if model_name.startswith("gemini-"):
152
- answer = analyze_pdf_with_gemini(chunk_data, chunk_prompt, model_name)
177
+ answer, usage = analyze_pdf_with_gemini(chunk_data, chunk_prompt, model_name)
153
178
  elif model_name.startswith("claude-"):
154
- answer = analyze_pdf_with_claude(chunk_data, chunk_prompt, model_name)
179
+ answer, usage = analyze_pdf_with_claude(chunk_data, chunk_prompt, model_name)
155
180
  else:
156
181
  return {
157
182
  "page_range": page_range,
158
183
  "answer": "",
159
184
  "relevant": False,
160
185
  "error": f"Unknown model type: {model_name}",
186
+ "usage": UsageMetadata(),
161
187
  }
162
188
 
163
189
  # 判斷是否相關
@@ -168,6 +194,7 @@ def _analyze_single_chunk(
168
194
  "answer": answer if is_relevant else "",
169
195
  "relevant": is_relevant,
170
196
  "error": None,
197
+ "usage": usage,
171
198
  }
172
199
 
173
200
  except Exception as e:
@@ -179,12 +206,13 @@ def _analyze_single_chunk(
179
206
  "answer": "",
180
207
  "relevant": False,
181
208
  "error": str(e),
209
+ "usage": UsageMetadata(model=model_name),
182
210
  }
183
211
 
184
212
 
185
213
  async def analyze_pdf_chunks_parallel(
186
214
  chunks: List[tuple], user_input: str, model_name: str, max_concurrent: int = 5
187
- ) -> List[Dict[str, Any]]:
215
+ ) -> Tuple[List[Dict[str, Any]], List[UsageMetadata]]:
188
216
  """
189
217
  平行問答多個 PDF 切片
190
218
 
@@ -195,7 +223,7 @@ async def analyze_pdf_chunks_parallel(
195
223
  max_concurrent: 最大平行數量
196
224
 
197
225
  Returns:
198
- List[Dict]: 每個切片的回答結果
226
+ Tuple[List[Dict], List[UsageMetadata]]: 每個切片的回答結果和每次呼叫的 usage list
199
227
  """
200
228
  semaphore = asyncio.Semaphore(max_concurrent)
201
229
 
@@ -224,8 +252,9 @@ async def analyze_pdf_chunks_parallel(
224
252
  # 平行執行
225
253
  results = await asyncio.gather(*tasks, return_exceptions=True)
226
254
 
227
- # 處理例外
255
+ # 處理例外並收集 usage list
228
256
  processed_results = []
257
+ usage_list = []
229
258
  for i, result in enumerate(results):
230
259
  if isinstance(result, Exception):
231
260
  processed_results.append(
@@ -234,19 +263,24 @@ async def analyze_pdf_chunks_parallel(
234
263
  "answer": "",
235
264
  "relevant": False,
236
265
  "error": str(result),
266
+ "usage": UsageMetadata(model=model_name),
237
267
  }
238
268
  )
269
+ usage_list.append(UsageMetadata(model=model_name))
239
270
  else:
240
271
  processed_results.append(result)
272
+ # 收集 usage
273
+ if "usage" in result and isinstance(result["usage"], UsageMetadata):
274
+ usage_list.append(result["usage"])
241
275
 
242
- return processed_results
276
+ return processed_results, usage_list
243
277
 
244
278
 
245
279
  def merge_chunk_results(
246
280
  chunk_results: List[Dict[str, Any]],
247
281
  user_input: str,
248
282
  model_name: str = "gemini-2.5-flash",
249
- ) -> str:
283
+ ) -> Tuple[str, UsageMetadata]:
250
284
  """
251
285
  使用 LLM 統整多個切片的回答
252
286
 
@@ -256,7 +290,7 @@ def merge_chunk_results(
256
290
  model_name: 統整使用的模型名稱
257
291
 
258
292
  Returns:
259
- str: 統整後的回答
293
+ Tuple[str, UsageMetadata]: 統整後的回答和 usage metadata
260
294
  """
261
295
  # 過濾出相關的回答
262
296
  relevant_results = [r for r in chunk_results if r.get("relevant", False)]
@@ -266,12 +300,12 @@ def merge_chunk_results(
266
300
  error_results = [r for r in chunk_results if r.get("error")]
267
301
  if error_results:
268
302
  error_msgs = [f"{r['page_range']}: {r['error']}" for r in error_results]
269
- return f"分析 PDF 時發生錯誤:\n" + "\n".join(error_msgs)
270
- return "在 PDF 文件中未找到與您問題相關的內容。"
303
+ return f"分析 PDF 時發生錯誤:\n" + "\n".join(error_msgs), UsageMetadata(model=model_name)
304
+ return "在 PDF 文件中未找到與您問題相關的內容。", UsageMetadata(model=model_name)
271
305
 
272
- # 只有一個相關結果,直接回傳
306
+ # 只有一個相關結果,直接回傳(不需要額外的 LLM 呼叫)
273
307
  if len(relevant_results) == 1:
274
- return relevant_results[0]["answer"]
308
+ return relevant_results[0]["answer"], UsageMetadata(model=model_name)
275
309
 
276
310
  # 多個相關結果,需要統整
277
311
  combined_content = "\n\n".join(
@@ -310,22 +344,33 @@ def merge_chunk_results(
310
344
  contents=[merge_prompt],
311
345
  )
312
346
 
347
+ # Extract usage metadata
348
+ usage = UsageMetadata(model=model_name)
313
349
  if hasattr(response, "usage_metadata"):
350
+ usage_meta = response.usage_metadata
351
+ usage = UsageMetadata(
352
+ prompt_tokens=getattr(usage_meta, 'prompt_token_count', 0) or 0,
353
+ completion_tokens=getattr(usage_meta, 'candidates_token_count', 0) or 0,
354
+ total_tokens=getattr(usage_meta, 'total_token_count', 0) or 0,
355
+ cache_creation_input_tokens=0,
356
+ cache_read_input_tokens=getattr(usage_meta, 'cached_content_token_count', 0) or 0,
357
+ model=model_name,
358
+ )
314
359
  print(
315
- f"merge_chunk_results============> input_token: {response.usage_metadata.prompt_token_count} output_token: {response.usage_metadata.candidates_token_count}",
360
+ f"merge_chunk_results============> input_token: {usage_meta.prompt_token_count} output_token: {usage_meta.candidates_token_count}",
316
361
  )
317
362
 
318
- return response.text
363
+ return response.text, usage
319
364
 
320
365
  except Exception as e:
321
366
  import traceback
322
367
 
323
368
  traceback.print_exc()
324
369
  # 統整失敗,直接回傳合併的內容
325
- return f"統整時發生錯誤,以下是各部分的回答:\n\n{combined_content}"
370
+ return f"統整時發生錯誤,以下是各部分的回答:\n\n{combined_content}", UsageMetadata(model=model_name)
326
371
 
327
372
 
328
- async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
373
+ async def analyze_pdf_async(pdf_url: str, user_input: str) -> Dict[str, Any]:
329
374
  """
330
375
  非同步分析 PDF 檔案(智慧處理策略)
331
376
 
@@ -338,8 +383,13 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
338
383
  user_input: 使用者問題
339
384
 
340
385
  Returns:
341
- str: 分析結果
386
+ Dict[str, Any]: {
387
+ "result": str, # 分析結果
388
+ "usage_metadata": List[Dict] # 每次 LLM 呼叫的 usage 資訊
389
+ }
342
390
  """
391
+ usage_list: List[UsageMetadata] = []
392
+
343
393
  try:
344
394
  # 1. 下載 PDF
345
395
  print(f"[analyze_pdf_async] 下載 PDF: {pdf_url}")
@@ -364,9 +414,19 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
364
414
  for model in models:
365
415
  try:
366
416
  if model.startswith("gemini-"):
367
- return analyze_pdf_with_gemini(pdf_data, user_input, model, pdf_url)
417
+ result, usage = analyze_pdf_with_gemini(pdf_data, user_input, model, pdf_url)
418
+ usage_list.append(usage)
419
+ return {
420
+ "result": result,
421
+ "usage_metadata": [u.to_dict() for u in usage_list],
422
+ }
368
423
  elif model.startswith("claude-"):
369
- return analyze_pdf_with_claude(pdf_data, user_input, model)
424
+ result, usage = analyze_pdf_with_claude(pdf_data, user_input, model)
425
+ usage_list.append(usage)
426
+ return {
427
+ "result": result,
428
+ "usage_metadata": [u.to_dict() for u in usage_list],
429
+ }
370
430
  except Exception as e:
371
431
  import traceback
372
432
 
@@ -374,7 +434,10 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
374
434
  last_error = str(e)
375
435
  continue
376
436
 
377
- return f"分析 PDF 時所有模型都失敗。最後錯誤: {last_error}"
437
+ return {
438
+ "result": f"分析 PDF 時所有模型都失敗。最後錯誤: {last_error}",
439
+ "usage_metadata": [u.to_dict() for u in usage_list],
440
+ }
378
441
 
379
442
  # 3. 大檔:壓縮 → 切割 → 平行問答 → 統整
380
443
  print(f"[analyze_pdf_async] 大檔模式 (>= {PDF_SIZE_THRESHOLD_MB}MB)")
@@ -427,9 +490,10 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
427
490
 
428
491
  # 3.3 平行問答
429
492
  print(f"[analyze_pdf_async] 開始平行問答 (最大並行: {MAX_CONCURRENT_CHUNKS})...")
430
- chunk_results = await analyze_pdf_chunks_parallel(
493
+ chunk_results, chunk_usage_list = await analyze_pdf_chunks_parallel(
431
494
  chunks, user_input, primary_model, max_concurrent=MAX_CONCURRENT_CHUNKS
432
495
  )
496
+ usage_list.extend(chunk_usage_list)
433
497
 
434
498
  # 統計結果
435
499
  relevant_count = sum(1 for r in chunk_results if r.get("relevant", False))
@@ -441,19 +505,28 @@ async def analyze_pdf_async(pdf_url: str, user_input: str) -> str:
441
505
 
442
506
  # 3.4 統整結果
443
507
  print("[analyze_pdf_async] 統整結果...")
444
- result = merge_chunk_results(chunk_results, user_input, primary_model)
508
+ result, merge_usage = merge_chunk_results(chunk_results, user_input, primary_model)
509
+ # 只有當 merge_usage 有實際 token 使用時才加入(避免加入空的 usage)
510
+ if merge_usage.prompt_tokens > 0 or merge_usage.completion_tokens > 0:
511
+ usage_list.append(merge_usage)
445
512
  print("[analyze_pdf_async] 完成")
446
513
 
447
- return result
514
+ return {
515
+ "result": result,
516
+ "usage_metadata": [u.to_dict() for u in usage_list],
517
+ }
448
518
 
449
519
  except Exception as e:
450
520
  import traceback
451
521
 
452
522
  traceback.print_exc()
453
- return f"分析 PDF {pdf_url} 時發生錯誤: {str(e)}"
523
+ return {
524
+ "result": f"分析 PDF {pdf_url} 時發生錯誤: {str(e)}",
525
+ "usage_metadata": [u.to_dict() for u in usage_list],
526
+ }
454
527
 
455
528
 
456
- def analyze_pdf(pdf_url: str, user_input: str) -> str:
529
+ def analyze_pdf(pdf_url: str, user_input: str) -> Dict[str, Any]:
457
530
  """
458
531
  分析 PDF 檔案(同步包裝函數)
459
532
 
@@ -465,7 +538,10 @@ def analyze_pdf(pdf_url: str, user_input: str) -> str:
465
538
  user_input: 使用者問題
466
539
 
467
540
  Returns:
468
- str: 分析結果
541
+ Dict[str, Any]: {
542
+ "result": str, # 分析結果
543
+ "usage_metadata": List[Dict] # 每次 LLM 呼叫的 usage 資訊
544
+ }
469
545
  """
470
546
  try:
471
547
  # 嘗試取得現有的事件迴圈
@@ -0,0 +1,34 @@
1
+ """
2
+ Usage Metadata 模組
3
+
4
+ 提供 LLM 呼叫的 token 使用量追蹤功能。
5
+ """
6
+
7
+ from dataclasses import dataclass, asdict
8
+ from typing import Dict, Any
9
+
10
+
11
+ @dataclass
12
+ class UsageMetadata:
13
+ """Token usage metadata that matches the expected parsing format."""
14
+ prompt_tokens: int = 0
15
+ completion_tokens: int = 0
16
+ total_tokens: int = 0
17
+ cache_creation_input_tokens: int = 0
18
+ cache_read_input_tokens: int = 0
19
+ model: str = ""
20
+
21
+ def __add__(self, other: "UsageMetadata") -> "UsageMetadata":
22
+ """Combine two UsageMetadata objects."""
23
+ return UsageMetadata(
24
+ prompt_tokens=self.prompt_tokens + other.prompt_tokens,
25
+ completion_tokens=self.completion_tokens + other.completion_tokens,
26
+ total_tokens=self.total_tokens + other.total_tokens,
27
+ cache_creation_input_tokens=self.cache_creation_input_tokens + other.cache_creation_input_tokens,
28
+ cache_read_input_tokens=self.cache_read_input_tokens + other.cache_read_input_tokens,
29
+ model=self.model or other.model,
30
+ )
31
+
32
+ def to_dict(self) -> Dict[str, Any]:
33
+ """Convert to dictionary."""
34
+ return asdict(self)
@@ -34,7 +34,6 @@ from botrun_flow_lang.utils.botrun_logger import get_default_botrun_logger
34
34
 
35
35
  # Import for generate_image
36
36
  from langchain_community.utilities.dalle_image_generator import DallEAPIWrapper
37
- from langchain_community.callbacks import get_openai_callback
38
37
 
39
38
  # Initialize MCP server
40
39
  mcp = FastMCP(name="BotrunFlowLangDefaultMCP", stateless_http=True)
@@ -74,7 +73,7 @@ async def scrape(url: str) -> dict:
74
73
  @mcp.tool()
75
74
  async def chat_with_pdf(
76
75
  pdf_url: str, user_input: str, botrun_flow_lang_url: str, user_id: str
77
- ) -> str:
76
+ ) -> dict:
78
77
  """
79
78
  Analyze a PDF file and answer questions about its content.
80
79
 
@@ -89,7 +88,16 @@ async def chat_with_pdf(
89
88
  user_id: REQUIRED - User ID for file upload (LLM can get this from system prompt)
90
89
 
91
90
  Returns:
92
- str: Analysis result or Plotly-compatible data structure if visualization is needed
91
+ dict: {
92
+ "result": str, # Analysis result
93
+ "usage_metadata": List[Dict] # Token usage for each LLM call, with format:
94
+ - prompt_tokens: int
95
+ - completion_tokens: int
96
+ - total_tokens: int
97
+ - cache_creation_input_tokens: int
98
+ - cache_read_input_tokens: int
99
+ - model: str
100
+ }
93
101
  """
94
102
  logger.info(f"chat_with_pdf pdf_url: {pdf_url} user_input: {user_input}")
95
103
 
@@ -106,7 +114,7 @@ async def chat_with_imgs(
106
114
  user_input: str,
107
115
  botrun_flow_lang_url: str,
108
116
  user_id: str,
109
- ) -> str:
117
+ ) -> dict:
110
118
  """
111
119
  Analyze multiple images and answer questions about their content.
112
120
 
@@ -117,7 +125,16 @@ async def chat_with_imgs(
117
125
  user_id: REQUIRED - User ID for file upload (LLM can get this from system prompt)
118
126
 
119
127
  Returns:
120
- str: Analysis result or Plotly-compatible data structure if visualization is needed
128
+ dict: {
129
+ "result": str, # Analysis result
130
+ "usage_metadata": List[Dict] # Token usage for each LLM call, with format:
131
+ - prompt_tokens: int
132
+ - completion_tokens: int
133
+ - total_tokens: int
134
+ - cache_creation_input_tokens: int
135
+ - cache_read_input_tokens: int
136
+ - model: str
137
+ }
121
138
  """
122
139
  logger.info(f"chat_with_imgs img_urls: {img_urls} user_input: {user_input}")
123
140
 
@@ -136,7 +153,7 @@ async def chat_with_imgs(
136
153
  @mcp.tool()
137
154
  async def generate_image(
138
155
  user_input: str, user_id: str = "", botrun_flow_lang_url: str = ""
139
- ) -> str:
156
+ ) -> dict:
140
157
  """
141
158
  Generate high-quality images using DALL-E 3 and store permanently in GCS.
142
159
 
@@ -173,18 +190,35 @@ async def generate_image(
173
190
  botrun_flow_lang_url: REQUIRED - URL for the botrun flow lang API (LLM can get this from system prompt)
174
191
 
175
192
  Returns:
176
- str: Permanent URL to the generated image stored in GCS, or error message if generation fails
193
+ dict: {
194
+ "result": str, # Permanent URL to the generated image stored in GCS, or error message
195
+ "usage_metadata": List[Dict] # Token usage for each LLM call, with format:
196
+ - prompt_tokens: int
197
+ - completion_tokens: int
198
+ - total_tokens: int
199
+ - cache_creation_input_tokens: int
200
+ - cache_read_input_tokens: int
201
+ - model: str
202
+ }
177
203
  """
204
+ usage_list = []
205
+
178
206
  try:
179
207
  logger.info(f"generate_image user_input: {user_input}")
180
208
 
181
209
  # 驗證必要參數
182
210
  if not user_id:
183
211
  logger.error("User ID not available")
184
- return "User ID not available"
212
+ return {
213
+ "result": "User ID not available",
214
+ "usage_metadata": usage_list,
215
+ }
185
216
  if not botrun_flow_lang_url:
186
217
  logger.error("botrun_flow_lang_url not available")
187
- return "botrun_flow_lang_url not available"
218
+ return {
219
+ "result": "botrun_flow_lang_url not available",
220
+ "usage_metadata": usage_list,
221
+ }
188
222
 
189
223
  # Check rate limit before generating image
190
224
  rate_limit_client = RateLimitClient()
@@ -201,26 +235,38 @@ async def generate_image(
201
235
  f"User {user_id} has reached daily limit of {daily_limit} image generations. "
202
236
  f"Current usage: {current_usage}. Please try again tomorrow."
203
237
  )
204
- return f"[Please tell user error] You have reached your daily limit of {daily_limit} image generations. " \
205
- f"Current usage: {current_usage}. Please try again tomorrow."
206
- # raise BotrunRateLimitException(
207
- # f"You have reached your daily limit of {daily_limit} image generations. "
208
- # f"Current usage: {current_usage}. Please try again tomorrow."
209
- # )
238
+ return {
239
+ "result": f"[Please tell user error] You have reached your daily limit of {daily_limit} image generations. "
240
+ f"Current usage: {current_usage}. Please try again tomorrow.",
241
+ "usage_metadata": usage_list,
242
+ }
210
243
 
211
244
  # 2. 使用 DALL-E 生成圖片
245
+ dalle_size = "1024x1024" # 可選: 1024x1024, 1024x1792, 1792x1024
246
+ dalle_quality = "standard" # 可選: standard, hd
212
247
  dalle_wrapper = DallEAPIWrapper(
213
- api_key=os.getenv("OPENAI_API_KEY"), model="dall-e-3"
248
+ api_key=os.getenv("OPENAI_API_KEY"),
249
+ model="dall-e-3",
250
+ size=dalle_size,
251
+ quality=dalle_quality,
214
252
  )
215
253
 
216
- # Generate image with token usage tracking
217
- with get_openai_callback() as cb:
218
- temp_image_url = dalle_wrapper.run(user_input)
219
- logger.info(
220
- f"DALL-E generated temporary URL: {temp_image_url}, "
221
- f"prompt tokens: {cb.prompt_tokens}, "
222
- f"completion tokens: {cb.completion_tokens}"
223
- )
254
+ # Generate image (DALL-E charges per image, not per token)
255
+ temp_image_url = dalle_wrapper.run(user_input)
256
+ logger.info(f"DALL-E generated temporary URL: {temp_image_url}")
257
+
258
+ # DALL-E 不使用 token 計費,改記錄圖片生成次數和規格
259
+ usage_list.append({
260
+ "prompt_tokens": 0,
261
+ "completion_tokens": 0,
262
+ "total_tokens": 0,
263
+ "cache_creation_input_tokens": 0,
264
+ "cache_read_input_tokens": 0,
265
+ "model": "dall-e-3",
266
+ "image_count": 1,
267
+ "image_size": dalle_size,
268
+ "image_quality": dalle_quality,
269
+ })
224
270
 
225
271
  # 3. 下載並上傳到 GCS,取得永久 URL
226
272
  from botrun_flow_lang.langgraph_agents.agents.util.local_files import (
@@ -236,22 +282,34 @@ async def generate_image(
236
282
  # 4. 更新使用計數
237
283
  await rate_limit_client.update_drawing_usage(user_id)
238
284
 
239
- return permanent_url
285
+ return {
286
+ "result": permanent_url,
287
+ "usage_metadata": usage_list,
288
+ }
240
289
  except Exception as upload_error:
241
290
  logger.error(
242
291
  f"Failed to upload to GCS, returning temporary URL: {upload_error}"
243
292
  )
244
293
  # Fallback: 回傳臨時 URL
245
294
  await rate_limit_client.update_drawing_usage(user_id)
246
- return temp_image_url
295
+ return {
296
+ "result": temp_image_url,
297
+ "usage_metadata": usage_list,
298
+ }
247
299
 
248
300
  except Exception as e:
249
301
  logger.error(f"generate_image error: {e}", error=str(e), exc_info=True)
250
302
 
251
303
  # Check if this is a user-visible exception
252
304
  if str(e).startswith("[Please tell user error]"):
253
- return str(e) # Return the error message as is
254
- return f"Error: {e}"
305
+ return {
306
+ "result": str(e),
307
+ "usage_metadata": usage_list,
308
+ }
309
+ return {
310
+ "result": f"Error: {e}",
311
+ "usage_metadata": usage_list,
312
+ }
255
313
 
256
314
 
257
315
  @mcp.tool()
@@ -267,7 +325,7 @@ async def generate_tmp_public_url(
267
325
  user_id: REQUIRED - User ID for file upload (LLM can get this from system prompt)
268
326
 
269
327
  Returns:
270
- str: A public URL that can be used to access the file for 7 days
328
+ str: A temporary public URL that may be deleted periodically
271
329
 
272
330
  Raises:
273
331
  FileNotFoundError: If the specified file does not exist
@@ -289,7 +347,9 @@ async def create_html_page(
289
347
  user_id: str,
290
348
  ) -> str:
291
349
  """
292
- Create a custom HTML page and return its URL.
350
+ Create a custom HTML page and return a PERMANENT URL that never expires.
351
+
352
+ The URL created by this tool will remain accessible indefinitely.
293
353
 
294
354
  This tool supports complete HTML documents, including JavaScript and CSS, which can be used to create
295
355
  complex interactive pages.
@@ -314,8 +374,9 @@ async def create_html_page(
314
374
  user_id: REQUIRED - User ID for file upload (LLM can get this from system prompt)
315
375
 
316
376
  Returns:
317
- str: URL for the HTML page. This URL should be provided to the user,
318
- as they will need to access it to view the content in their web browser.
377
+ str: A permanent URL for the HTML page that never expires.
378
+ This URL should be provided to the user, as they will need to
379
+ access it to view the content in their web browser.
319
380
  """
320
381
  try:
321
382
  logger.info(f"create_html_page html_content: {html_content} title: {title}")
@@ -692,11 +753,23 @@ async def web_search(
692
753
  user_id: Optional user ID (not used for this tool)
693
754
 
694
755
  Returns:
695
- dict: A dictionary containing:
696
- - content (str): The detailed answer based on web search results
697
- - citations (list): A list of URLs, citations are important to provide to the user
698
- - images (list): A list of image URLs (only when return_images is True)
756
+ dict: {
757
+ "result": {
758
+ "content": str, # The detailed answer based on web search results
759
+ "citations": list, # A list of URLs
760
+ "images": list # A list of image URLs (only when return_images is True)
761
+ },
762
+ "usage_metadata": List[Dict] # Token usage for each LLM call, with format:
763
+ - prompt_tokens: int
764
+ - completion_tokens: int
765
+ - total_tokens: int
766
+ - cache_creation_input_tokens: int
767
+ - cache_read_input_tokens: int
768
+ - model: str
769
+ }
699
770
  """
771
+ usage_list = []
772
+
700
773
  try:
701
774
  logger.info(f"web_search user_input: {user_input}")
702
775
 
@@ -717,6 +790,7 @@ async def web_search(
717
790
  "content": "",
718
791
  "citations": [],
719
792
  }
793
+ raw_response = None
720
794
 
721
795
  async for event in respond_with_perplexity_search(
722
796
  final_input,
@@ -729,16 +803,52 @@ async def web_search(
729
803
  ):
730
804
  if event and isinstance(event.chunk, str):
731
805
  search_result = json.loads(event.chunk)
806
+ if event and event.raw_json:
807
+ raw_response = event.raw_json
808
+
809
+ # Extract usage from raw response
810
+ if raw_response and "usage" in raw_response:
811
+ usage = raw_response["usage"]
812
+ prompt_tokens = usage.get("prompt_tokens", 0) or 0
813
+ citation_tokens = usage.get("citation_tokens", 0) or 0
814
+ completion_tokens = usage.get("completion_tokens", 0) or 0
815
+ model = raw_response.get("model", "sonar-reasoning-pro")
816
+
817
+ # 判斷是否使用 OpenRouter(與 perplexity_search.py 邏輯一致)
818
+ is_use_openrouter = os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL")
819
+ if return_images:
820
+ is_use_openrouter = False
821
+
822
+ # 加上 API 來源前綴
823
+ if is_use_openrouter:
824
+ model = f"openrouter/{model}"
825
+ else:
826
+ # 直接使用 Perplexity API,加上 perplexity/ 前綴
827
+ if not model.startswith("perplexity/"):
828
+ model = f"perplexity/{model}"
829
+
830
+ usage_list.append({
831
+ "prompt_tokens": prompt_tokens + citation_tokens,
832
+ "completion_tokens": completion_tokens,
833
+ "total_tokens": prompt_tokens + citation_tokens + completion_tokens,
834
+ "cache_creation_input_tokens": 0,
835
+ "cache_read_input_tokens": 0,
836
+ "model": model,
837
+ })
732
838
 
733
839
  logger.info(
734
840
  f"web_search completed============> {len(search_result.get('content', ''))}"
735
841
  )
736
- return (
737
- search_result
738
- if search_result
739
- else {"content": "No results found.", "citations": []}
740
- )
842
+
843
+ result = search_result if search_result else {"content": "No results found.", "citations": []}
844
+ return {
845
+ "result": result,
846
+ "usage_metadata": usage_list,
847
+ }
741
848
 
742
849
  except Exception as e:
743
850
  logger.error(f"web_search error: {e}", error=str(e), exc_info=True)
744
- return {"content": f"Error during web search: {str(e)}", "citations": []}
851
+ return {
852
+ "result": {"content": f"Error during web search: {str(e)}", "citations": []},
853
+ "usage_metadata": usage_list,
854
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: botrun-flow-lang
3
- Version: 5.12.264
3
+ Version: 6.2.21
4
4
  Summary: A flow language for botrun
5
5
  Author-email: sebastian-hsu <sebastian.hsu@gmail.com>
6
6
  License: MIT
@@ -27,12 +27,12 @@ Requires-Dist: google-cloud-storage<3,>=2.18
27
27
  Requires-Dist: google-genai>=1.28.0
28
28
  Requires-Dist: jinja2>=3.1.6
29
29
  Requires-Dist: langchain-anthropic>=0.3.10
30
- Requires-Dist: langchain-aws>=0.2.17
30
+ Requires-Dist: langchain-aws>=1.0.0
31
31
  Requires-Dist: langchain-community>=0.3.27
32
- Requires-Dist: langchain-core>=0.3.72
32
+ Requires-Dist: langchain-core>=1.1.2
33
33
  Requires-Dist: langchain-google-community>=2.0.3
34
- Requires-Dist: langchain-google-genai>=2.0.9
35
- Requires-Dist: langchain-google-vertexai<3.0.0,>=2.1.2
34
+ Requires-Dist: langchain-google-genai>=4.0.0
35
+ Requires-Dist: langchain-google-vertexai<4.0.0,>=3.2.0
36
36
  Requires-Dist: langchain-mcp-adapters>=0.1.7
37
37
  Requires-Dist: langchain-openai>=0.3.28
38
38
  Requires-Dist: langchain>=0.3.27
@@ -41,7 +41,7 @@ Requires-Dist: langgraph-supervisor>=0.0.20
41
41
  Requires-Dist: langgraph>=0.6.3
42
42
  Requires-Dist: line-bot-sdk>=3.17.1
43
43
  Requires-Dist: mcp<1.11.0,>=1.10.1
44
- Requires-Dist: numpy<2,>=1
44
+ Requires-Dist: numpy>=1.24.0
45
45
  Requires-Dist: openai>=1.99.1
46
46
  Requires-Dist: pandas>=2.2.3
47
47
  Requires-Dist: pdfminer-six==20250506
@@ -9,7 +9,8 @@ botrun_flow_lang/api/auth_utils.py,sha256=KoVTZUMOBaATWvdyjjYKdBjDu8MaQGGvmhE8gn
9
9
  botrun_flow_lang/api/botrun_back_api.py,sha256=qNIQqMFZ969XaLE4qsbM659bcoK11o9sy85gfmeO8Kw,2462
10
10
  botrun_flow_lang/api/flow_api.py,sha256=I6ZMohJOpuVcs8q2euUjdydz0xYvavRei7f3LQFmjbQ,111
11
11
  botrun_flow_lang/api/hatch_api.py,sha256=S-_bNt4Y8oKtlzXW7JA3TuMRFO-Pb4-5OobhnwfiqFE,17492
12
- botrun_flow_lang/api/langgraph_api.py,sha256=PP0K_H5-BRJsjFGIvZEZubLtQ97FXp35Ts8SGSKxKe8,30102
12
+ botrun_flow_lang/api/langgraph_api.py,sha256=E1FDme6CUnMRXRaQFhp_S-uI4m7vtQo5CZOd0o_X1nA,30228
13
+ botrun_flow_lang/api/langgraph_constants.py,sha256=oxh3Rj940mZ7ekKIiQodvpQs_pek_R0atqgda9yxSV0,411
13
14
  botrun_flow_lang/api/line_bot_api.py,sha256=JluAbySIU42zWc0NaMwL1fhfRCEGMjAVWTfwKXp2F0A,56984
14
15
  botrun_flow_lang/api/model_api.py,sha256=vkzVvzxxsAhqbiMcVAeqiQoheJVbPLAXBqwoU5PgWMw,9783
15
16
  botrun_flow_lang/api/rate_limit_api.py,sha256=zrQ9wFILNqYMiLDM8NqdfcDg87BdyzbBC2Kns89WIGo,980
@@ -24,7 +25,7 @@ botrun_flow_lang/api/youtube_api.py,sha256=9eGr--gR2OoM9JZ6Nf9KqPiE-FeXEx8R-QeJv
24
25
  botrun_flow_lang/langgraph_agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
26
  botrun_flow_lang/langgraph_agents/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
27
  botrun_flow_lang/langgraph_agents/agents/agent_runner.py,sha256=tiuPIqAcM8rIWBTjo8NS4owTepCsX3QkIHaUEDakOTc,6673
27
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py,sha256=ANXWs6WKRULX9eECipCj_ivY-mnyt-gcu9_-xmLGKSg,30672
28
+ botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py,sha256=hTQf2lzlQy5n64i32e3iQTPwx7dXS3GMwaZqYt21zMc,31352
28
29
  botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py,sha256=hWDPt0U09Gj-3-NNWhsn9xaakYbOcHExIXqcL8TeZxw,32046
29
30
  botrun_flow_lang/langgraph_agents/agents/agent_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
31
  botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py,sha256=S4TYt0ZhgdAZ-2ndH8hJoEaIyDKdNJdWHjEZ49Lg_NQ,2427
@@ -40,16 +41,17 @@ botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py,sha256=E
40
41
  botrun_flow_lang/langgraph_agents/agents/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
42
  botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py,sha256=JTfH9WJNDlpvMvfzXyZy3bHeCN58MTnEOiamQGMsqh0,2884
42
43
  botrun_flow_lang/langgraph_agents/agents/util/html_util.py,sha256=g5yJO0qTqRq_kb-xhSnWX3WAbHDIjNQYl7ErRBPQwHs,13230
43
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py,sha256=px_ymI5U8WkFujqF-nbxZZNFOKPxJyj5HYV2IQdpYl4,10405
44
+ botrun_flow_lang/langgraph_agents/agents/util/img_util.py,sha256=6OERtpGGimlev4Pb_O1UbMNaT_DMBHSmAgo9gB-R8xk,12385
44
45
  botrun_flow_lang/langgraph_agents/agents/util/local_files.py,sha256=b7N4B3P9zPPDj7_C9y8JaU5oROQostCXBt7wfxi_L64,13529
45
46
  botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py,sha256=o65I979wA7jzzB_Zp-t2CxBjNdXyFtdTkdofi4bJlb0,2642
46
47
  botrun_flow_lang/langgraph_agents/agents/util/model_utils.py,sha256=oeYEwiEtlrNGomKZ98M3F_OvXYjAIoCV9IJCY9eMuug,4954
47
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py,sha256=Q8cuTYvYG5ZSCdjPFiH96dWjXrSXA9DSZpLicKMx_u0,16177
48
+ botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py,sha256=Pu5hZGquvgm1Iy6qWD5SDG2--9tumTHdJ8EGLcTF8LU,20305
48
49
  botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py,sha256=nmHA-qf2px3ywUtC_5kXIQHg-Gl2W8DOSnL8gOFR3xY,7375
49
50
  botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py,sha256=1YbB4zpabQB-8HwRvd4LRyye4oSusLNUW_iJfmUNANw,6244
50
51
  botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py,sha256=dZmb4tSECEXWOSZkqdMhFrmnGwMhdHSUJvLT7IAVu_s,19537
51
52
  botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py,sha256=WwAPcmMDnQnrsxH_92377G0yRWf-dF-g8uOG9KnkcCk,1972
52
53
  botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py,sha256=jjuA8dko9YRSs_LcvMduAsSGDaix3UEzw4cIllVVFh0,6822
54
+ botrun_flow_lang/langgraph_agents/agents/util/usage_metadata.py,sha256=ahjulhlNUdyLG-KNBL-0pQPkxbzpiYKjGR2YqQCF1fA,1207
53
55
  botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py,sha256=PHGDpJqRZNHLoLMAFpfpQiz_vlZWG3u53GZQajjjEpI,3007
54
56
  botrun_flow_lang/langgraph_agents/cache/__init__.py,sha256=SnKEKUXeTReKzUeNVXfvP3BEZypgKBQ4TKs_-T8ZdtI,36
55
57
  botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py,sha256=HgwP7HKIglm24LFy7sIddLNi-nAfmIkxqvkwV6FnxVk,6364
@@ -58,7 +60,7 @@ botrun_flow_lang/llm_agent/llm_agent.py,sha256=1yPws2a5MVouOBnsNudh-c1POviYaz9hn
58
60
  botrun_flow_lang/llm_agent/llm_agent_util.py,sha256=cvnkHYH1D1V1_chgIByCb1Cn7iytNxtlJpfrFlYa_a4,3131
59
61
  botrun_flow_lang/log/.gitignore,sha256=ZeCRrK8PsUdGyHBMDfCkk1Jl9XrN9VkgJmyeCIUCxGU,18
60
62
  botrun_flow_lang/mcp_server/__init__.py,sha256=lbhwcb-QsYmdXA8bS3pSD-CLVbcbCKfl1XeOaUm380Y,218
61
- botrun_flow_lang/mcp_server/default_mcp.py,sha256=wNzK0WRS62ABFb5f0QuJQMDpNs4yZSreGTzmD8fwXqs,29234
63
+ botrun_flow_lang/mcp_server/default_mcp.py,sha256=azFgYEo0yXwBm-n-SqFwSw2YhcrvgAloS_YNR8eXsO0,33194
62
64
  botrun_flow_lang/models/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
63
65
  botrun_flow_lang/models/token_usage.py,sha256=PHLPwzLaGwFhWzxaHBYcahztlyTZEpRso5XI6pscWVM,876
64
66
  botrun_flow_lang/models/nodes/utils.py,sha256=uKCdPYQfzjGf8Bzoy-FSZxtcwVifwnCviaJM4qnEyrI,6904
@@ -97,6 +99,6 @@ botrun_flow_lang/utils/yaml_utils.py,sha256=dPlabIol-Clhnwc7N5nuffCaLSq8dyvmvjRw
97
99
  botrun_flow_lang/utils/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
98
100
  botrun_flow_lang/utils/clients/rate_limit_client.py,sha256=96NNCHB9I5C5bpVFF6sfPhmh4oAx3UdOLb-Z4PAXLdg,8558
99
101
  botrun_flow_lang/utils/clients/token_verify_client.py,sha256=-AnYApJ9CvxVn-RhCCZZ2LCrf065fgskhwLKAm-aiN0,5893
100
- botrun_flow_lang-5.12.264.dist-info/METADATA,sha256=u_ZfQqG71uK5H-aV7BNSjYAHTB5MF6kjW0sh4VOioD0,6221
101
- botrun_flow_lang-5.12.264.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
102
- botrun_flow_lang-5.12.264.dist-info/RECORD,,
102
+ botrun_flow_lang-6.2.21.dist-info/METADATA,sha256=oF4wNjTpal4vp58e5a0le51qAekPxomZJwOrzR-2uXs,6219
103
+ botrun_flow_lang-6.2.21.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
104
+ botrun_flow_lang-6.2.21.dist-info/RECORD,,