botrun-flow-lang 5.9.301__py3-none-any.whl → 5.10.82__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/auth_api.py +39 -39
- botrun_flow_lang/api/auth_utils.py +183 -183
- botrun_flow_lang/api/botrun_back_api.py +65 -65
- botrun_flow_lang/api/flow_api.py +3 -3
- botrun_flow_lang/api/hatch_api.py +481 -481
- botrun_flow_lang/api/langgraph_api.py +796 -796
- botrun_flow_lang/api/line_bot_api.py +1357 -1357
- botrun_flow_lang/api/model_api.py +300 -300
- botrun_flow_lang/api/rate_limit_api.py +32 -32
- botrun_flow_lang/api/routes.py +79 -79
- botrun_flow_lang/api/search_api.py +53 -53
- botrun_flow_lang/api/storage_api.py +316 -316
- botrun_flow_lang/api/subsidy_api.py +290 -290
- botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
- botrun_flow_lang/api/user_setting_api.py +70 -70
- botrun_flow_lang/api/version_api.py +31 -31
- botrun_flow_lang/api/youtube_api.py +26 -26
- botrun_flow_lang/constants.py +13 -13
- botrun_flow_lang/langgraph_agents/agents/agent_runner.py +174 -174
- botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
- botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +548 -542
- botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
- botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
- botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
- botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
- botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
- botrun_flow_lang/langgraph_agents/agents/util/local_files.py +345 -345
- botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
- botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +160 -160
- botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
- botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
- botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
- botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
- botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
- botrun_flow_lang/llm_agent/llm_agent.py +19 -19
- botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
- botrun_flow_lang/log/.gitignore +2 -2
- botrun_flow_lang/main.py +61 -61
- botrun_flow_lang/main_fast.py +51 -51
- botrun_flow_lang/mcp_server/__init__.py +10 -10
- botrun_flow_lang/mcp_server/default_mcp.py +711 -711
- botrun_flow_lang/models/nodes/utils.py +205 -205
- botrun_flow_lang/models/token_usage.py +34 -34
- botrun_flow_lang/requirements.txt +21 -21
- botrun_flow_lang/services/base/firestore_base.py +30 -30
- botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
- botrun_flow_lang/services/hatch/hatch_fs_store.py +372 -372
- botrun_flow_lang/services/storage/storage_cs_store.py +202 -202
- botrun_flow_lang/services/storage/storage_factory.py +12 -12
- botrun_flow_lang/services/storage/storage_store.py +65 -65
- botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
- botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
- botrun_flow_lang/static/docs/tools/index.html +926 -926
- botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
- botrun_flow_lang/tests/api_stress_test.py +357 -357
- botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
- botrun_flow_lang/tests/test_botrun_app.py +46 -46
- botrun_flow_lang/tests/test_html_util.py +31 -31
- botrun_flow_lang/tests/test_img_analyzer.py +190 -190
- botrun_flow_lang/tests/test_img_util.py +39 -39
- botrun_flow_lang/tests/test_local_files.py +114 -114
- botrun_flow_lang/tests/test_mermaid_util.py +103 -103
- botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
- botrun_flow_lang/tests/test_plotly_util.py +151 -151
- botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
- botrun_flow_lang/tools/generate_docs.py +133 -133
- botrun_flow_lang/tools/templates/tools.html +153 -153
- botrun_flow_lang/utils/__init__.py +7 -7
- botrun_flow_lang/utils/botrun_logger.py +344 -344
- botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
- botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
- botrun_flow_lang/utils/google_drive_utils.py +654 -654
- botrun_flow_lang/utils/langchain_utils.py +324 -324
- botrun_flow_lang/utils/yaml_utils.py +9 -9
- {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/METADATA +2 -2
- botrun_flow_lang-5.10.82.dist-info/RECORD +99 -0
- botrun_flow_lang-5.9.301.dist-info/RECORD +0 -99
- {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/WHEEL +0 -0
|
@@ -1,66 +1,66 @@
|
|
|
1
|
-
from typing import AsyncGenerator
|
|
2
|
-
from google.generativeai import GenerativeModel
|
|
3
|
-
from google.generativeai.protos import Candidate
|
|
4
|
-
|
|
5
|
-
from pydantic import BaseModel
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
class GeminiGroundingEvent(BaseModel):
|
|
9
|
-
chunk: str
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
async def respond_with_gemini_grounding(
|
|
13
|
-
input_content, messages_for_llm
|
|
14
|
-
) -> AsyncGenerator[GeminiGroundingEvent, None]:
|
|
15
|
-
model = GenerativeModel("models/gemini-1.5-pro-002")
|
|
16
|
-
|
|
17
|
-
# 將 litellm 格式轉換為 Gemini 格式的對話歷史
|
|
18
|
-
chat_history = []
|
|
19
|
-
for message in messages_for_llm:
|
|
20
|
-
role = "user" if message["role"] == "user" else "model"
|
|
21
|
-
chat_history.append({"role": role, "parts": [message["content"]]})
|
|
22
|
-
|
|
23
|
-
# 建立聊天實例
|
|
24
|
-
chat = model.start_chat(history=chat_history)
|
|
25
|
-
|
|
26
|
-
# 使用 chat.send() 替代 generate_content
|
|
27
|
-
responses = chat.send_message(
|
|
28
|
-
input_content,
|
|
29
|
-
tools={"google_search_retrieval": {"dynamic_retrieval_config": {}}},
|
|
30
|
-
stream=True,
|
|
31
|
-
)
|
|
32
|
-
|
|
33
|
-
# answer_message = await cl.Message(content="").send()
|
|
34
|
-
full_response = ""
|
|
35
|
-
for response in responses:
|
|
36
|
-
if response.candidates[0].finish_reason != Candidate.FinishReason.STOP:
|
|
37
|
-
# await answer_message.stream_token(response.text)
|
|
38
|
-
yield GeminiGroundingEvent(chunk=response.text)
|
|
39
|
-
full_response += response.text
|
|
40
|
-
if response.candidates[0].grounding_metadata:
|
|
41
|
-
if len(response.candidates[0].grounding_metadata.grounding_chunks) > 0:
|
|
42
|
-
references = f"\n\n{tr('Sources:')}\n"
|
|
43
|
-
for grounding_chunk in response.candidates[
|
|
44
|
-
0
|
|
45
|
-
].grounding_metadata.grounding_chunks:
|
|
46
|
-
references += f"- [{grounding_chunk.web.title}]({grounding_chunk.web.uri})\n"
|
|
47
|
-
# await answer_message.stream_token(references)
|
|
48
|
-
yield GeminiGroundingEvent(chunk=references)
|
|
49
|
-
else:
|
|
50
|
-
if response.candidates[0].grounding_metadata:
|
|
51
|
-
if len(response.candidates[0].grounding_metadata.grounding_chunks) > 0:
|
|
52
|
-
references = f"\n\n參考來源:\n"
|
|
53
|
-
for grounding_chunk in response.candidates[
|
|
54
|
-
0
|
|
55
|
-
].grounding_metadata.grounding_chunks:
|
|
56
|
-
references += f"- [{grounding_chunk.web.title}]({grounding_chunk.web.uri})\n"
|
|
57
|
-
# await answer_message.stream_token(references)
|
|
58
|
-
yield GeminiGroundingEvent(chunk=references)
|
|
59
|
-
|
|
60
|
-
# await answer_message.update()
|
|
61
|
-
|
|
62
|
-
# chat_history = cl.user_session.get("chat_history", [])
|
|
63
|
-
# chat_history.append({"role": "user", "content": input_content})
|
|
64
|
-
# chat_history.append({"role": "assistant", "content": full_response})
|
|
65
|
-
# cl.user_session.set("chat_history", chat_history)
|
|
66
|
-
# return
|
|
1
|
+
from typing import AsyncGenerator
|
|
2
|
+
from google.generativeai import GenerativeModel
|
|
3
|
+
from google.generativeai.protos import Candidate
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class GeminiGroundingEvent(BaseModel):
|
|
9
|
+
chunk: str
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
async def respond_with_gemini_grounding(
|
|
13
|
+
input_content, messages_for_llm
|
|
14
|
+
) -> AsyncGenerator[GeminiGroundingEvent, None]:
|
|
15
|
+
model = GenerativeModel("models/gemini-1.5-pro-002")
|
|
16
|
+
|
|
17
|
+
# 將 litellm 格式轉換為 Gemini 格式的對話歷史
|
|
18
|
+
chat_history = []
|
|
19
|
+
for message in messages_for_llm:
|
|
20
|
+
role = "user" if message["role"] == "user" else "model"
|
|
21
|
+
chat_history.append({"role": role, "parts": [message["content"]]})
|
|
22
|
+
|
|
23
|
+
# 建立聊天實例
|
|
24
|
+
chat = model.start_chat(history=chat_history)
|
|
25
|
+
|
|
26
|
+
# 使用 chat.send() 替代 generate_content
|
|
27
|
+
responses = chat.send_message(
|
|
28
|
+
input_content,
|
|
29
|
+
tools={"google_search_retrieval": {"dynamic_retrieval_config": {}}},
|
|
30
|
+
stream=True,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
# answer_message = await cl.Message(content="").send()
|
|
34
|
+
full_response = ""
|
|
35
|
+
for response in responses:
|
|
36
|
+
if response.candidates[0].finish_reason != Candidate.FinishReason.STOP:
|
|
37
|
+
# await answer_message.stream_token(response.text)
|
|
38
|
+
yield GeminiGroundingEvent(chunk=response.text)
|
|
39
|
+
full_response += response.text
|
|
40
|
+
if response.candidates[0].grounding_metadata:
|
|
41
|
+
if len(response.candidates[0].grounding_metadata.grounding_chunks) > 0:
|
|
42
|
+
references = f"\n\n{tr('Sources:')}\n"
|
|
43
|
+
for grounding_chunk in response.candidates[
|
|
44
|
+
0
|
|
45
|
+
].grounding_metadata.grounding_chunks:
|
|
46
|
+
references += f"- [{grounding_chunk.web.title}]({grounding_chunk.web.uri})\n"
|
|
47
|
+
# await answer_message.stream_token(references)
|
|
48
|
+
yield GeminiGroundingEvent(chunk=references)
|
|
49
|
+
else:
|
|
50
|
+
if response.candidates[0].grounding_metadata:
|
|
51
|
+
if len(response.candidates[0].grounding_metadata.grounding_chunks) > 0:
|
|
52
|
+
references = f"\n\n參考來源:\n"
|
|
53
|
+
for grounding_chunk in response.candidates[
|
|
54
|
+
0
|
|
55
|
+
].grounding_metadata.grounding_chunks:
|
|
56
|
+
references += f"- [{grounding_chunk.web.title}]({grounding_chunk.web.uri})\n"
|
|
57
|
+
# await answer_message.stream_token(references)
|
|
58
|
+
yield GeminiGroundingEvent(chunk=references)
|
|
59
|
+
|
|
60
|
+
# await answer_message.update()
|
|
61
|
+
|
|
62
|
+
# chat_history = cl.user_session.get("chat_history", [])
|
|
63
|
+
# chat_history.append({"role": "user", "content": input_content})
|
|
64
|
+
# chat_history.append({"role": "assistant", "content": full_response})
|
|
65
|
+
# cl.user_session.set("chat_history", chat_history)
|
|
66
|
+
# return
|