botrun-flow-lang 5.11.11__py3-none-any.whl → 5.11.281__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/line_bot_api.py +148 -5
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +31 -0
- {botrun_flow_lang-5.11.11.dist-info → botrun_flow_lang-5.11.281.dist-info}/METADATA +1 -1
- {botrun_flow_lang-5.11.11.dist-info → botrun_flow_lang-5.11.281.dist-info}/RECORD +5 -5
- {botrun_flow_lang-5.11.11.dist-info → botrun_flow_lang-5.11.281.dist-info}/WHEEL +1 -1
|
@@ -100,6 +100,11 @@ BOTRUN_FRONT_URL = os.getenv("BOTRUN_FRONT_URL", None)
|
|
|
100
100
|
SUBSIDY_API_TOKEN = os.getenv("SUBSIDY_API_TOKEN", None)
|
|
101
101
|
SUBSIDY_API_URL = os.getenv("SUBSIDY_API_URL", "https://p271-subsidy-ie7vwovclq-de.a.run.app/v1/generateContent")
|
|
102
102
|
|
|
103
|
+
# BigQuery Token Logging API 相關環境變數
|
|
104
|
+
BIGQUERY_TOKEN_LOG_API_URL = os.getenv("BIGQUERY_TOKEN_LOG_API_URL", "http://localhost:8002/api/v1/logs/text")
|
|
105
|
+
BIGQUERY_TOKEN_LOG_ENABLED = os.getenv("BIGQUERY_TOKEN_LOG_ENABLED", "true").lower() == "true"
|
|
106
|
+
SUBSIDY_LINE_BOT_MODEL_NAME = os.getenv("SUBSIDY_LINE_BOT_MODEL_NAME", "gemini-2.0-flash-thinking-exp")
|
|
107
|
+
|
|
103
108
|
# 全局變數
|
|
104
109
|
# 用於追蹤正在處理訊息的使用者,避免同一使用者同時發送多條訊息造成處理衝突
|
|
105
110
|
_processing_users = set()
|
|
@@ -202,6 +207,111 @@ async def log_to_bigquery(
|
|
|
202
207
|
)
|
|
203
208
|
|
|
204
209
|
|
|
210
|
+
async def log_tokens_to_bigquery(
|
|
211
|
+
user_id: str,
|
|
212
|
+
display_name: str,
|
|
213
|
+
log_content: str,
|
|
214
|
+
model: str,
|
|
215
|
+
input_tokens: int | None,
|
|
216
|
+
output_tokens: int | None,
|
|
217
|
+
total_tokens: int | None,
|
|
218
|
+
request: Request,
|
|
219
|
+
session_id: str = "",
|
|
220
|
+
) -> None:
|
|
221
|
+
"""
|
|
222
|
+
記錄 token 使用量到 BigQuery logging API
|
|
223
|
+
|
|
224
|
+
Args:
|
|
225
|
+
user_id: LINE 使用者 ID
|
|
226
|
+
display_name: 使用者顯示名稱
|
|
227
|
+
log_content: 使用者輸入訊息
|
|
228
|
+
model: 使用的 AI 模型
|
|
229
|
+
input_tokens: 輸入 token 數量
|
|
230
|
+
output_tokens: 輸出 token 數量
|
|
231
|
+
total_tokens: 總 token 數量
|
|
232
|
+
request: FastAPI Request 物件
|
|
233
|
+
session_id: Session ID (可選,預設使用 user_id)
|
|
234
|
+
"""
|
|
235
|
+
# 檢查功能是否啟用
|
|
236
|
+
if not BIGQUERY_TOKEN_LOG_ENABLED:
|
|
237
|
+
logging.debug("[Token Logger] BigQuery token logging is disabled")
|
|
238
|
+
return
|
|
239
|
+
|
|
240
|
+
start_time = time.time()
|
|
241
|
+
|
|
242
|
+
try:
|
|
243
|
+
tz = pytz.timezone("Asia/Taipei")
|
|
244
|
+
current_time = datetime.now(tz)
|
|
245
|
+
|
|
246
|
+
# 組裝 payload
|
|
247
|
+
payload = {
|
|
248
|
+
"action_details": log_content,
|
|
249
|
+
"action_type": "call_subsidy_api",
|
|
250
|
+
"botrun": "subsidy_line_bot",
|
|
251
|
+
"dataset_name": os.getenv("BOTRUN_LOG_DATASET_NAME", "subsidy_line_bot"),
|
|
252
|
+
"department": os.getenv("BOTRUN_LOG_DEPARTMENT", "subsidy_line_bot"),
|
|
253
|
+
"developer": "",
|
|
254
|
+
"domain_name": "subsidy_line_bot",
|
|
255
|
+
"input_tokens": input_tokens,
|
|
256
|
+
"model": model,
|
|
257
|
+
"output_tokens": output_tokens,
|
|
258
|
+
"resource_id": json.dumps({
|
|
259
|
+
"user_id": user_id,
|
|
260
|
+
"timestamp": current_time.strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
261
|
+
}),
|
|
262
|
+
"session_id": session_id or user_id,
|
|
263
|
+
"total_tokens": total_tokens,
|
|
264
|
+
"user_agent": request.headers.get("user-agent", "Line Platform"),
|
|
265
|
+
"user_name": display_name
|
|
266
|
+
}
|
|
267
|
+
|
|
268
|
+
logging.info(
|
|
269
|
+
f"[Token Logger] Logging tokens for user {display_name} ({user_id}): "
|
|
270
|
+
f"input={input_tokens}, output={output_tokens}, total={total_tokens}"
|
|
271
|
+
)
|
|
272
|
+
|
|
273
|
+
# 使用 aiohttp 非同步呼叫 API
|
|
274
|
+
timeout = aiohttp.ClientTimeout(total=10) # 10 秒超時
|
|
275
|
+
async with aiohttp.ClientSession(timeout=timeout) as session:
|
|
276
|
+
async with session.post(
|
|
277
|
+
BIGQUERY_TOKEN_LOG_API_URL,
|
|
278
|
+
json=payload,
|
|
279
|
+
headers={"Content-Type": "application/json"}
|
|
280
|
+
) as response:
|
|
281
|
+
response_text = await response.text()
|
|
282
|
+
|
|
283
|
+
if response.status == 200:
|
|
284
|
+
elapsed_time = time.time() - start_time
|
|
285
|
+
logging.info(
|
|
286
|
+
f"[Token Logger] Successfully logged tokens to BigQuery for user "
|
|
287
|
+
f"{display_name} ({user_id}), elapsed time: {elapsed_time:.3f}s"
|
|
288
|
+
)
|
|
289
|
+
else:
|
|
290
|
+
logging.error(
|
|
291
|
+
f"[Token Logger] Failed to log tokens, API returned status {response.status}: {response_text}"
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
except asyncio.TimeoutError:
|
|
295
|
+
elapsed_time = time.time() - start_time
|
|
296
|
+
logging.error(
|
|
297
|
+
f"[Token Logger] Timeout while logging tokens for user {display_name} ({user_id}), "
|
|
298
|
+
f"elapsed time: {elapsed_time:.3f}s"
|
|
299
|
+
)
|
|
300
|
+
except aiohttp.ClientError as e:
|
|
301
|
+
elapsed_time = time.time() - start_time
|
|
302
|
+
logging.error(
|
|
303
|
+
f"[Token Logger] Network error while logging tokens for user {display_name} ({user_id}): {e}, "
|
|
304
|
+
f"elapsed time: {elapsed_time:.3f}s"
|
|
305
|
+
)
|
|
306
|
+
except Exception as e:
|
|
307
|
+
elapsed_time = time.time() - start_time
|
|
308
|
+
logging.error(
|
|
309
|
+
f"[Token Logger] Unexpected error while logging tokens for user {display_name} ({user_id}): {e}, "
|
|
310
|
+
f"elapsed time: {elapsed_time:.3f}s"
|
|
311
|
+
)
|
|
312
|
+
traceback.print_exc()
|
|
313
|
+
|
|
314
|
+
|
|
205
315
|
def get_prompt_from_google_doc(tag_name: str, fallback_prompt: str = ""):
|
|
206
316
|
"""
|
|
207
317
|
從 Google 文件中提取指定標籤的內容
|
|
@@ -736,13 +846,37 @@ async def handle_message(
|
|
|
736
846
|
_processing_users.add(user_id)
|
|
737
847
|
|
|
738
848
|
try:
|
|
739
|
-
reply_text, related_questions = await get_reply_text(
|
|
849
|
+
reply_text, related_questions, usage_metadata = await get_reply_text(
|
|
740
850
|
user_message, user_id, display_name, request
|
|
741
851
|
)
|
|
742
852
|
logging.info(
|
|
743
853
|
f"[Line Bot Webhook: handle_message] Total response length: {len(reply_text)}"
|
|
744
854
|
)
|
|
745
855
|
|
|
856
|
+
# 記錄 token 使用量到 BigQuery (非阻塞式)
|
|
857
|
+
if usage_metadata:
|
|
858
|
+
# 把 user_message 跟 reply_text 合併成 json 格式紀錄
|
|
859
|
+
log_content = json.dumps(
|
|
860
|
+
{
|
|
861
|
+
"user_message": user_message,
|
|
862
|
+
"reply_text": reply_text
|
|
863
|
+
},
|
|
864
|
+
ensure_ascii=False
|
|
865
|
+
)
|
|
866
|
+
asyncio.create_task(
|
|
867
|
+
log_tokens_to_bigquery(
|
|
868
|
+
user_id=user_id,
|
|
869
|
+
display_name=display_name,
|
|
870
|
+
log_content=log_content,
|
|
871
|
+
model=usage_metadata.get("model", SUBSIDY_LINE_BOT_MODEL_NAME),
|
|
872
|
+
input_tokens=usage_metadata.get("promptTokenCount", None),
|
|
873
|
+
output_tokens=usage_metadata.get("candidatesTokenCount", None),
|
|
874
|
+
total_tokens=usage_metadata.get("totalTokenCount", None),
|
|
875
|
+
request=request,
|
|
876
|
+
session_id=user_id,
|
|
877
|
+
)
|
|
878
|
+
)
|
|
879
|
+
|
|
746
880
|
# 將長訊息分段,每段不超過 LINE_MAX_MESSAGE_LENGTH
|
|
747
881
|
message_chunks = []
|
|
748
882
|
remaining_text = reply_text
|
|
@@ -934,7 +1068,7 @@ async def get_reply_text(
|
|
|
934
1068
|
user_id: str,
|
|
935
1069
|
display_name: str,
|
|
936
1070
|
request: Request,
|
|
937
|
-
) -> tuple[str, list]:
|
|
1071
|
+
) -> tuple[str, list, dict]:
|
|
938
1072
|
"""
|
|
939
1073
|
使用外部 API 處理使用者訊息並回傳回覆內容
|
|
940
1074
|
|
|
@@ -945,7 +1079,7 @@ async def get_reply_text(
|
|
|
945
1079
|
request (Request): FastAPI request 物件,用於記錄到 BigQuery
|
|
946
1080
|
|
|
947
1081
|
Returns:
|
|
948
|
-
tuple[str, list]:
|
|
1082
|
+
tuple[str, list, dict]: 包含回覆訊息、相關問題和 token 使用量的元組
|
|
949
1083
|
"""
|
|
950
1084
|
start_time = time.time()
|
|
951
1085
|
|
|
@@ -961,6 +1095,15 @@ async def get_reply_text(
|
|
|
961
1095
|
system_instruction=system_instruction
|
|
962
1096
|
)
|
|
963
1097
|
|
|
1098
|
+
# 提取 token 使用量資訊
|
|
1099
|
+
usage_metadata = api_response.get("usageMetadata", {})
|
|
1100
|
+
logging.info(
|
|
1101
|
+
f"[Line Bot Webhook: get_reply_text] Token usage: "
|
|
1102
|
+
f"input={usage_metadata.get('promptTokenCount', 0)}, "
|
|
1103
|
+
f"output={usage_metadata.get('candidatesTokenCount', 0)}, "
|
|
1104
|
+
f"total={usage_metadata.get('totalTokenCount', 0)}"
|
|
1105
|
+
)
|
|
1106
|
+
|
|
964
1107
|
# 從 API 回應中提取文字內容
|
|
965
1108
|
full_response = ""
|
|
966
1109
|
if "candidates" in api_response and len(api_response["candidates"]) > 0:
|
|
@@ -1003,7 +1146,7 @@ async def get_reply_text(
|
|
|
1003
1146
|
f"[Line Bot Webhook: get_reply_text] total took {time.time() - start_time:.3f}s"
|
|
1004
1147
|
)
|
|
1005
1148
|
|
|
1006
|
-
return full_response, related_questions
|
|
1149
|
+
return full_response, related_questions, usage_metadata
|
|
1007
1150
|
|
|
1008
1151
|
except Exception as e:
|
|
1009
1152
|
import traceback
|
|
@@ -1012,7 +1155,7 @@ async def get_reply_text(
|
|
|
1012
1155
|
|
|
1013
1156
|
# 返回錯誤訊息
|
|
1014
1157
|
error_message = "抱歉,處理您的訊息時遇到問題,請稍後再試。"
|
|
1015
|
-
return error_message, []
|
|
1158
|
+
return error_message, [], {}
|
|
1016
1159
|
|
|
1017
1160
|
|
|
1018
1161
|
async def handle_feedback(
|
|
@@ -142,11 +142,42 @@ def get_react_agent_model_name(model_name: str = ""):
|
|
|
142
142
|
|
|
143
143
|
ANTHROPIC_MAX_TOKENS = 64000
|
|
144
144
|
GEMINI_MAX_TOKENS = 32000
|
|
145
|
+
TAIDE_MAX_TOKENS = 8192
|
|
145
146
|
|
|
146
147
|
|
|
147
148
|
def get_react_agent_model(model_name: str = ""):
|
|
148
149
|
final_model_name = get_react_agent_model_name(model_name).strip()
|
|
149
150
|
|
|
151
|
+
# 處理 taide/ 前綴的模型
|
|
152
|
+
if final_model_name.startswith("taide/"):
|
|
153
|
+
taide_api_key = os.getenv("TAIDE_API_KEY", "")
|
|
154
|
+
taide_base_url = os.getenv("TAIDE_BASE_URL", "")
|
|
155
|
+
|
|
156
|
+
if not taide_api_key or not taide_base_url:
|
|
157
|
+
raise ValueError(
|
|
158
|
+
f"Model name starts with 'taide/' but TAIDE_API_KEY or TAIDE_BASE_URL not set. "
|
|
159
|
+
f"Both environment variables are required for: {final_model_name}"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# 取得 taide/ 後面的模型名稱
|
|
163
|
+
taide_model_name = final_model_name[len("taide/"):]
|
|
164
|
+
|
|
165
|
+
if not taide_model_name:
|
|
166
|
+
raise ValueError(
|
|
167
|
+
f"Invalid taide model format: {final_model_name}. "
|
|
168
|
+
"Expected format: taide/<model_name>"
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
model = ChatOpenAI(
|
|
172
|
+
openai_api_key=taide_api_key,
|
|
173
|
+
openai_api_base=taide_base_url,
|
|
174
|
+
model_name=taide_model_name,
|
|
175
|
+
temperature=0,
|
|
176
|
+
max_tokens=TAIDE_MAX_TOKENS,
|
|
177
|
+
)
|
|
178
|
+
logger.info(f"model ChatOpenAI (TAIDE) {taide_model_name} @ {taide_base_url}")
|
|
179
|
+
return model
|
|
180
|
+
|
|
150
181
|
# 處理 vertexai/ 前綴的模型
|
|
151
182
|
if final_model_name.startswith("vertex-ai/"):
|
|
152
183
|
vertex_project = os.getenv("VERTEX_AI_LANGCHAIN_PROJECT", "")
|
|
@@ -10,7 +10,7 @@ botrun_flow_lang/api/botrun_back_api.py,sha256=mE2NSejaYIiE0L9GmNJbLc_FRWCy6BXlc
|
|
|
10
10
|
botrun_flow_lang/api/flow_api.py,sha256=DcxuoGE1OcbTgLSYKZ2SO9IdcH3UB5Ik3cVmX3v3-Po,108
|
|
11
11
|
botrun_flow_lang/api/hatch_api.py,sha256=qZG-Wwi_8SHPuWNfbt-dhz-O41VYetTxrJzcVjHbJCo,15913
|
|
12
12
|
botrun_flow_lang/api/langgraph_api.py,sha256=zqu0xeTiy2Pr4UL6vvGqVVAy2KX3ZUn1uzcq-Tfb_aM,29291
|
|
13
|
-
botrun_flow_lang/api/line_bot_api.py,sha256=
|
|
13
|
+
botrun_flow_lang/api/line_bot_api.py,sha256=__Rul_JWd7KwtgGIyziOKxD9PSconkjuN0dmI8JEwjg,54930
|
|
14
14
|
botrun_flow_lang/api/model_api.py,sha256=bXemey_XUUdylZwh7Z10eksoBWe9xSa8I9TEL7jIBtE,9483
|
|
15
15
|
botrun_flow_lang/api/rate_limit_api.py,sha256=SkpjfvShHRdP5XJzy3DdrH4jLtdYAEHROGBMBkC9OIY,948
|
|
16
16
|
botrun_flow_lang/api/routes.py,sha256=rd0IoMsteJT9BO3MQuyXirhPQbas6OeiKaEC8Yf2SZs,1570
|
|
@@ -24,7 +24,7 @@ botrun_flow_lang/api/youtube_api.py,sha256=R384jNRheMKnDyzvlLnbzackipZhiLYTZl4w4
|
|
|
24
24
|
botrun_flow_lang/langgraph_agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
25
25
|
botrun_flow_lang/langgraph_agents/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
26
|
botrun_flow_lang/langgraph_agents/agents/agent_runner.py,sha256=fOZgHDsCA_EDTTGQFBmhGUhpfLB3m_N6YW2UHgMpKBg,6241
|
|
27
|
-
botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py,sha256=
|
|
27
|
+
botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py,sha256=4fK_hMoUAqcEYv7rrHbAx6PFsJ7UcvGI0G2OgWhVhnw,29972
|
|
28
28
|
botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py,sha256=6fz-ewLQGacEx-uqGfF3-go9FdiioiMzW_sfANzYTcI,31182
|
|
29
29
|
botrun_flow_lang/langgraph_agents/agents/agent_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
30
30
|
botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py,sha256=CgEhfGR28Rq7ui9cKxj_DczfNfjJNXIP9DXQNIwBLv0,2350
|
|
@@ -95,6 +95,6 @@ botrun_flow_lang/utils/yaml_utils.py,sha256=1A6PSEE8TM0HSD_6l-fhUsjYnXJcrEKuPgot
|
|
|
95
95
|
botrun_flow_lang/utils/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
96
96
|
botrun_flow_lang/utils/clients/rate_limit_client.py,sha256=TRpA56OKrfYsoLoJ-TPYlC7Znp9s267-u6CX6BLyVko,8349
|
|
97
97
|
botrun_flow_lang/utils/clients/token_verify_client.py,sha256=BtrfLvMe-DtS8UKeDhaIkVKDZHphZVP7kyqXn9jhXEc,5740
|
|
98
|
-
botrun_flow_lang-5.11.
|
|
99
|
-
botrun_flow_lang-5.11.
|
|
100
|
-
botrun_flow_lang-5.11.
|
|
98
|
+
botrun_flow_lang-5.11.281.dist-info/METADATA,sha256=BN-Ufk4sSRSkuAr3c8mIbKfh78gZX6yOMMxzq4oRSMo,6192
|
|
99
|
+
botrun_flow_lang-5.11.281.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
100
|
+
botrun_flow_lang-5.11.281.dist-info/RECORD,,
|