botrun-flow-lang 5.9.301__py3-none-any.whl → 5.10.82__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. botrun_flow_lang/api/auth_api.py +39 -39
  2. botrun_flow_lang/api/auth_utils.py +183 -183
  3. botrun_flow_lang/api/botrun_back_api.py +65 -65
  4. botrun_flow_lang/api/flow_api.py +3 -3
  5. botrun_flow_lang/api/hatch_api.py +481 -481
  6. botrun_flow_lang/api/langgraph_api.py +796 -796
  7. botrun_flow_lang/api/line_bot_api.py +1357 -1357
  8. botrun_flow_lang/api/model_api.py +300 -300
  9. botrun_flow_lang/api/rate_limit_api.py +32 -32
  10. botrun_flow_lang/api/routes.py +79 -79
  11. botrun_flow_lang/api/search_api.py +53 -53
  12. botrun_flow_lang/api/storage_api.py +316 -316
  13. botrun_flow_lang/api/subsidy_api.py +290 -290
  14. botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
  15. botrun_flow_lang/api/user_setting_api.py +70 -70
  16. botrun_flow_lang/api/version_api.py +31 -31
  17. botrun_flow_lang/api/youtube_api.py +26 -26
  18. botrun_flow_lang/constants.py +13 -13
  19. botrun_flow_lang/langgraph_agents/agents/agent_runner.py +174 -174
  20. botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
  21. botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
  22. botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
  23. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
  24. botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
  25. botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +548 -542
  26. botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
  27. botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
  28. botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
  29. botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
  30. botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
  31. botrun_flow_lang/langgraph_agents/agents/util/img_util.py +294 -294
  32. botrun_flow_lang/langgraph_agents/agents/util/local_files.py +345 -345
  33. botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
  34. botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
  35. botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +160 -160
  36. botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
  37. botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
  38. botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
  39. botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
  40. botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
  41. botrun_flow_lang/llm_agent/llm_agent.py +19 -19
  42. botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
  43. botrun_flow_lang/log/.gitignore +2 -2
  44. botrun_flow_lang/main.py +61 -61
  45. botrun_flow_lang/main_fast.py +51 -51
  46. botrun_flow_lang/mcp_server/__init__.py +10 -10
  47. botrun_flow_lang/mcp_server/default_mcp.py +711 -711
  48. botrun_flow_lang/models/nodes/utils.py +205 -205
  49. botrun_flow_lang/models/token_usage.py +34 -34
  50. botrun_flow_lang/requirements.txt +21 -21
  51. botrun_flow_lang/services/base/firestore_base.py +30 -30
  52. botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
  53. botrun_flow_lang/services/hatch/hatch_fs_store.py +372 -372
  54. botrun_flow_lang/services/storage/storage_cs_store.py +202 -202
  55. botrun_flow_lang/services/storage/storage_factory.py +12 -12
  56. botrun_flow_lang/services/storage/storage_store.py +65 -65
  57. botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
  58. botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
  59. botrun_flow_lang/static/docs/tools/index.html +926 -926
  60. botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
  61. botrun_flow_lang/tests/api_stress_test.py +357 -357
  62. botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
  63. botrun_flow_lang/tests/test_botrun_app.py +46 -46
  64. botrun_flow_lang/tests/test_html_util.py +31 -31
  65. botrun_flow_lang/tests/test_img_analyzer.py +190 -190
  66. botrun_flow_lang/tests/test_img_util.py +39 -39
  67. botrun_flow_lang/tests/test_local_files.py +114 -114
  68. botrun_flow_lang/tests/test_mermaid_util.py +103 -103
  69. botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
  70. botrun_flow_lang/tests/test_plotly_util.py +151 -151
  71. botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
  72. botrun_flow_lang/tools/generate_docs.py +133 -133
  73. botrun_flow_lang/tools/templates/tools.html +153 -153
  74. botrun_flow_lang/utils/__init__.py +7 -7
  75. botrun_flow_lang/utils/botrun_logger.py +344 -344
  76. botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
  77. botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
  78. botrun_flow_lang/utils/google_drive_utils.py +654 -654
  79. botrun_flow_lang/utils/langchain_utils.py +324 -324
  80. botrun_flow_lang/utils/yaml_utils.py +9 -9
  81. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/METADATA +2 -2
  82. botrun_flow_lang-5.10.82.dist-info/RECORD +99 -0
  83. botrun_flow_lang-5.9.301.dist-info/RECORD +0 -99
  84. {botrun_flow_lang-5.9.301.dist-info → botrun_flow_lang-5.10.82.dist-info}/WHEEL +0 -0
@@ -1,66 +1,66 @@
1
- from typing import AsyncGenerator
2
- from google.generativeai import GenerativeModel
3
- from google.generativeai.protos import Candidate
4
-
5
- from pydantic import BaseModel
6
-
7
-
8
- class GeminiGroundingEvent(BaseModel):
9
- chunk: str
10
-
11
-
12
- async def respond_with_gemini_grounding(
13
- input_content, messages_for_llm
14
- ) -> AsyncGenerator[GeminiGroundingEvent, None]:
15
- model = GenerativeModel("models/gemini-1.5-pro-002")
16
-
17
- # 將 litellm 格式轉換為 Gemini 格式的對話歷史
18
- chat_history = []
19
- for message in messages_for_llm:
20
- role = "user" if message["role"] == "user" else "model"
21
- chat_history.append({"role": role, "parts": [message["content"]]})
22
-
23
- # 建立聊天實例
24
- chat = model.start_chat(history=chat_history)
25
-
26
- # 使用 chat.send() 替代 generate_content
27
- responses = chat.send_message(
28
- input_content,
29
- tools={"google_search_retrieval": {"dynamic_retrieval_config": {}}},
30
- stream=True,
31
- )
32
-
33
- # answer_message = await cl.Message(content="").send()
34
- full_response = ""
35
- for response in responses:
36
- if response.candidates[0].finish_reason != Candidate.FinishReason.STOP:
37
- # await answer_message.stream_token(response.text)
38
- yield GeminiGroundingEvent(chunk=response.text)
39
- full_response += response.text
40
- if response.candidates[0].grounding_metadata:
41
- if len(response.candidates[0].grounding_metadata.grounding_chunks) > 0:
42
- references = f"\n\n{tr('Sources:')}\n"
43
- for grounding_chunk in response.candidates[
44
- 0
45
- ].grounding_metadata.grounding_chunks:
46
- references += f"- [{grounding_chunk.web.title}]({grounding_chunk.web.uri})\n"
47
- # await answer_message.stream_token(references)
48
- yield GeminiGroundingEvent(chunk=references)
49
- else:
50
- if response.candidates[0].grounding_metadata:
51
- if len(response.candidates[0].grounding_metadata.grounding_chunks) > 0:
52
- references = f"\n\n參考來源:\n"
53
- for grounding_chunk in response.candidates[
54
- 0
55
- ].grounding_metadata.grounding_chunks:
56
- references += f"- [{grounding_chunk.web.title}]({grounding_chunk.web.uri})\n"
57
- # await answer_message.stream_token(references)
58
- yield GeminiGroundingEvent(chunk=references)
59
-
60
- # await answer_message.update()
61
-
62
- # chat_history = cl.user_session.get("chat_history", [])
63
- # chat_history.append({"role": "user", "content": input_content})
64
- # chat_history.append({"role": "assistant", "content": full_response})
65
- # cl.user_session.set("chat_history", chat_history)
66
- # return
1
+ from typing import AsyncGenerator
2
+ from google.generativeai import GenerativeModel
3
+ from google.generativeai.protos import Candidate
4
+
5
+ from pydantic import BaseModel
6
+
7
+
8
+ class GeminiGroundingEvent(BaseModel):
9
+ chunk: str
10
+
11
+
12
+ async def respond_with_gemini_grounding(
13
+ input_content, messages_for_llm
14
+ ) -> AsyncGenerator[GeminiGroundingEvent, None]:
15
+ model = GenerativeModel("models/gemini-1.5-pro-002")
16
+
17
+ # 將 litellm 格式轉換為 Gemini 格式的對話歷史
18
+ chat_history = []
19
+ for message in messages_for_llm:
20
+ role = "user" if message["role"] == "user" else "model"
21
+ chat_history.append({"role": role, "parts": [message["content"]]})
22
+
23
+ # 建立聊天實例
24
+ chat = model.start_chat(history=chat_history)
25
+
26
+ # 使用 chat.send() 替代 generate_content
27
+ responses = chat.send_message(
28
+ input_content,
29
+ tools={"google_search_retrieval": {"dynamic_retrieval_config": {}}},
30
+ stream=True,
31
+ )
32
+
33
+ # answer_message = await cl.Message(content="").send()
34
+ full_response = ""
35
+ for response in responses:
36
+ if response.candidates[0].finish_reason != Candidate.FinishReason.STOP:
37
+ # await answer_message.stream_token(response.text)
38
+ yield GeminiGroundingEvent(chunk=response.text)
39
+ full_response += response.text
40
+ if response.candidates[0].grounding_metadata:
41
+ if len(response.candidates[0].grounding_metadata.grounding_chunks) > 0:
42
+ references = f"\n\n{tr('Sources:')}\n"
43
+ for grounding_chunk in response.candidates[
44
+ 0
45
+ ].grounding_metadata.grounding_chunks:
46
+ references += f"- [{grounding_chunk.web.title}]({grounding_chunk.web.uri})\n"
47
+ # await answer_message.stream_token(references)
48
+ yield GeminiGroundingEvent(chunk=references)
49
+ else:
50
+ if response.candidates[0].grounding_metadata:
51
+ if len(response.candidates[0].grounding_metadata.grounding_chunks) > 0:
52
+ references = f"\n\n參考來源:\n"
53
+ for grounding_chunk in response.candidates[
54
+ 0
55
+ ].grounding_metadata.grounding_chunks:
56
+ references += f"- [{grounding_chunk.web.title}]({grounding_chunk.web.uri})\n"
57
+ # await answer_message.stream_token(references)
58
+ yield GeminiGroundingEvent(chunk=references)
59
+
60
+ # await answer_message.update()
61
+
62
+ # chat_history = cl.user_session.get("chat_history", [])
63
+ # chat_history.append({"role": "user", "content": input_content})
64
+ # chat_history.append({"role": "assistant", "content": full_response})
65
+ # cl.user_session.set("chat_history", chat_history)
66
+ # return