botrun-flow-lang 5.12.263__py3-none-any.whl → 6.2.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/api/auth_api.py +39 -39
- botrun_flow_lang/api/auth_utils.py +183 -183
- botrun_flow_lang/api/botrun_back_api.py +65 -65
- botrun_flow_lang/api/flow_api.py +3 -3
- botrun_flow_lang/api/hatch_api.py +508 -508
- botrun_flow_lang/api/langgraph_api.py +816 -811
- botrun_flow_lang/api/langgraph_constants.py +11 -0
- botrun_flow_lang/api/line_bot_api.py +1484 -1484
- botrun_flow_lang/api/model_api.py +300 -300
- botrun_flow_lang/api/rate_limit_api.py +32 -32
- botrun_flow_lang/api/routes.py +79 -79
- botrun_flow_lang/api/search_api.py +53 -53
- botrun_flow_lang/api/storage_api.py +395 -395
- botrun_flow_lang/api/subsidy_api.py +290 -290
- botrun_flow_lang/api/subsidy_api_system_prompt.txt +109 -109
- botrun_flow_lang/api/user_setting_api.py +70 -70
- botrun_flow_lang/api/version_api.py +31 -31
- botrun_flow_lang/api/youtube_api.py +26 -26
- botrun_flow_lang/constants.py +13 -13
- botrun_flow_lang/langgraph_agents/agents/agent_runner.py +178 -178
- botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py +77 -77
- botrun_flow_lang/langgraph_agents/agents/checkpointer/firestore_checkpointer.py +666 -666
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/GOV_RESEARCHER_PRD.md +192 -192
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gemini_subsidy_graph.py +460 -460
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_2_graph.py +1002 -1002
- botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py +822 -822
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +730 -723
- botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py +864 -864
- botrun_flow_lang/langgraph_agents/agents/tools/__init__.py +4 -4
- botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py +376 -376
- botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py +66 -66
- botrun_flow_lang/langgraph_agents/agents/util/html_util.py +316 -316
- botrun_flow_lang/langgraph_agents/agents/util/img_util.py +336 -294
- botrun_flow_lang/langgraph_agents/agents/util/local_files.py +419 -419
- botrun_flow_lang/langgraph_agents/agents/util/mermaid_util.py +86 -86
- botrun_flow_lang/langgraph_agents/agents/util/model_utils.py +143 -143
- botrun_flow_lang/langgraph_agents/agents/util/pdf_analyzer.py +562 -486
- botrun_flow_lang/langgraph_agents/agents/util/pdf_cache.py +250 -250
- botrun_flow_lang/langgraph_agents/agents/util/pdf_processor.py +204 -204
- botrun_flow_lang/langgraph_agents/agents/util/perplexity_search.py +464 -464
- botrun_flow_lang/langgraph_agents/agents/util/plotly_util.py +59 -59
- botrun_flow_lang/langgraph_agents/agents/util/tavily_search.py +199 -199
- botrun_flow_lang/langgraph_agents/agents/util/usage_metadata.py +34 -0
- botrun_flow_lang/langgraph_agents/agents/util/youtube_util.py +90 -90
- botrun_flow_lang/langgraph_agents/cache/langgraph_botrun_cache.py +197 -197
- botrun_flow_lang/llm_agent/llm_agent.py +19 -19
- botrun_flow_lang/llm_agent/llm_agent_util.py +83 -83
- botrun_flow_lang/log/.gitignore +2 -2
- botrun_flow_lang/main.py +61 -61
- botrun_flow_lang/main_fast.py +51 -51
- botrun_flow_lang/mcp_server/__init__.py +10 -10
- botrun_flow_lang/mcp_server/default_mcp.py +854 -744
- botrun_flow_lang/models/nodes/utils.py +205 -205
- botrun_flow_lang/models/token_usage.py +34 -34
- botrun_flow_lang/requirements.txt +21 -21
- botrun_flow_lang/services/base/firestore_base.py +30 -30
- botrun_flow_lang/services/hatch/hatch_factory.py +11 -11
- botrun_flow_lang/services/hatch/hatch_fs_store.py +419 -419
- botrun_flow_lang/services/storage/storage_cs_store.py +206 -206
- botrun_flow_lang/services/storage/storage_factory.py +12 -12
- botrun_flow_lang/services/storage/storage_store.py +65 -65
- botrun_flow_lang/services/user_setting/user_setting_factory.py +9 -9
- botrun_flow_lang/services/user_setting/user_setting_fs_store.py +66 -66
- botrun_flow_lang/static/docs/tools/index.html +926 -926
- botrun_flow_lang/tests/api_functional_tests.py +1525 -1525
- botrun_flow_lang/tests/api_stress_test.py +357 -357
- botrun_flow_lang/tests/shared_hatch_tests.py +333 -333
- botrun_flow_lang/tests/test_botrun_app.py +46 -46
- botrun_flow_lang/tests/test_html_util.py +31 -31
- botrun_flow_lang/tests/test_img_analyzer.py +190 -190
- botrun_flow_lang/tests/test_img_util.py +39 -39
- botrun_flow_lang/tests/test_local_files.py +114 -114
- botrun_flow_lang/tests/test_mermaid_util.py +103 -103
- botrun_flow_lang/tests/test_pdf_analyzer.py +104 -104
- botrun_flow_lang/tests/test_plotly_util.py +151 -151
- botrun_flow_lang/tests/test_run_workflow_engine.py +65 -65
- botrun_flow_lang/tools/generate_docs.py +133 -133
- botrun_flow_lang/tools/templates/tools.html +153 -153
- botrun_flow_lang/utils/__init__.py +7 -7
- botrun_flow_lang/utils/botrun_logger.py +344 -344
- botrun_flow_lang/utils/clients/rate_limit_client.py +209 -209
- botrun_flow_lang/utils/clients/token_verify_client.py +153 -153
- botrun_flow_lang/utils/google_drive_utils.py +654 -654
- botrun_flow_lang/utils/langchain_utils.py +324 -324
- botrun_flow_lang/utils/yaml_utils.py +9 -9
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/METADATA +6 -6
- botrun_flow_lang-6.2.21.dist-info/RECORD +104 -0
- botrun_flow_lang-5.12.263.dist-info/RECORD +0 -102
- {botrun_flow_lang-5.12.263.dist-info → botrun_flow_lang-6.2.21.dist-info}/WHEEL +0 -0
|
@@ -1,86 +1,86 @@
|
|
|
1
|
-
import os
|
|
2
|
-
from tempfile import NamedTemporaryFile
|
|
3
|
-
from typing import Dict, Any, Optional
|
|
4
|
-
from .local_files import upload_and_get_tmp_public_url, upload_html_and_get_public_url
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
async def generate_mermaid_files(
|
|
8
|
-
mermaid_data: str,
|
|
9
|
-
botrun_flow_lang_url: str,
|
|
10
|
-
user_id: str,
|
|
11
|
-
title: Optional[str] = None,
|
|
12
|
-
) -> str:
|
|
13
|
-
"""
|
|
14
|
-
Generate mermaid HTML file from mermaid definition and upload it to GCS.
|
|
15
|
-
|
|
16
|
-
Args:
|
|
17
|
-
mermaid_data: Mermaid diagram definition string
|
|
18
|
-
botrun_flow_lang_url: URL for the botrun flow lang API
|
|
19
|
-
user_id: User ID for file upload
|
|
20
|
-
title: Optional title for the diagram
|
|
21
|
-
|
|
22
|
-
Returns:
|
|
23
|
-
str: URL for the HTML file or error message starting with "Error: "
|
|
24
|
-
"""
|
|
25
|
-
try:
|
|
26
|
-
# Create HTML content with mermaid
|
|
27
|
-
html_content = f"""
|
|
28
|
-
<!DOCTYPE html>
|
|
29
|
-
<html>
|
|
30
|
-
<head>
|
|
31
|
-
<meta charset="utf-8">
|
|
32
|
-
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
|
33
|
-
<title>{title if title else 'Mermaid Diagram'}</title>
|
|
34
|
-
<style>
|
|
35
|
-
body {{
|
|
36
|
-
font-family: "Microsoft JhengHei", "微軟正黑體", "Heiti TC", "黑體-繁", sans-serif;
|
|
37
|
-
}}
|
|
38
|
-
.mermaid {{
|
|
39
|
-
margin: 20px;
|
|
40
|
-
}}
|
|
41
|
-
</style>
|
|
42
|
-
<script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
|
|
43
|
-
<script>
|
|
44
|
-
mermaid.initialize({{
|
|
45
|
-
startOnLoad: true,
|
|
46
|
-
theme: 'default',
|
|
47
|
-
themeVariables: {{
|
|
48
|
-
fontFamily: '"Microsoft JhengHei", "微軟正黑體", "Heiti TC", "黑體-繁", sans-serif'
|
|
49
|
-
}}
|
|
50
|
-
}});
|
|
51
|
-
</script>
|
|
52
|
-
</head>
|
|
53
|
-
<body>
|
|
54
|
-
<h1>{title if title else 'Mermaid Diagram'}</h1>
|
|
55
|
-
<div class="mermaid">
|
|
56
|
-
{mermaid_data}
|
|
57
|
-
</div>
|
|
58
|
-
</body>
|
|
59
|
-
</html>
|
|
60
|
-
"""
|
|
61
|
-
|
|
62
|
-
# Create temporary file
|
|
63
|
-
with NamedTemporaryFile(
|
|
64
|
-
suffix=".html", mode="w", encoding="utf-8", delete=False
|
|
65
|
-
) as html_temp:
|
|
66
|
-
try:
|
|
67
|
-
# Save HTML content
|
|
68
|
-
html_temp.write(html_content)
|
|
69
|
-
html_temp.flush()
|
|
70
|
-
|
|
71
|
-
# Upload file to GCS
|
|
72
|
-
html_url = await upload_html_and_get_public_url(
|
|
73
|
-
html_temp.name, botrun_flow_lang_url, user_id
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
# Clean up temporary file
|
|
77
|
-
os.unlink(html_temp.name)
|
|
78
|
-
|
|
79
|
-
return html_url
|
|
80
|
-
except Exception as e:
|
|
81
|
-
# Clean up temporary file in case of error
|
|
82
|
-
os.unlink(html_temp.name)
|
|
83
|
-
return f"Error: {str(e)}"
|
|
84
|
-
|
|
85
|
-
except Exception as e:
|
|
86
|
-
return f"Error: {str(e)}"
|
|
1
|
+
import os
|
|
2
|
+
from tempfile import NamedTemporaryFile
|
|
3
|
+
from typing import Dict, Any, Optional
|
|
4
|
+
from .local_files import upload_and_get_tmp_public_url, upload_html_and_get_public_url
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
async def generate_mermaid_files(
|
|
8
|
+
mermaid_data: str,
|
|
9
|
+
botrun_flow_lang_url: str,
|
|
10
|
+
user_id: str,
|
|
11
|
+
title: Optional[str] = None,
|
|
12
|
+
) -> str:
|
|
13
|
+
"""
|
|
14
|
+
Generate mermaid HTML file from mermaid definition and upload it to GCS.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
mermaid_data: Mermaid diagram definition string
|
|
18
|
+
botrun_flow_lang_url: URL for the botrun flow lang API
|
|
19
|
+
user_id: User ID for file upload
|
|
20
|
+
title: Optional title for the diagram
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
str: URL for the HTML file or error message starting with "Error: "
|
|
24
|
+
"""
|
|
25
|
+
try:
|
|
26
|
+
# Create HTML content with mermaid
|
|
27
|
+
html_content = f"""
|
|
28
|
+
<!DOCTYPE html>
|
|
29
|
+
<html>
|
|
30
|
+
<head>
|
|
31
|
+
<meta charset="utf-8">
|
|
32
|
+
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
|
33
|
+
<title>{title if title else 'Mermaid Diagram'}</title>
|
|
34
|
+
<style>
|
|
35
|
+
body {{
|
|
36
|
+
font-family: "Microsoft JhengHei", "微軟正黑體", "Heiti TC", "黑體-繁", sans-serif;
|
|
37
|
+
}}
|
|
38
|
+
.mermaid {{
|
|
39
|
+
margin: 20px;
|
|
40
|
+
}}
|
|
41
|
+
</style>
|
|
42
|
+
<script src="https://cdn.jsdelivr.net/npm/mermaid/dist/mermaid.min.js"></script>
|
|
43
|
+
<script>
|
|
44
|
+
mermaid.initialize({{
|
|
45
|
+
startOnLoad: true,
|
|
46
|
+
theme: 'default',
|
|
47
|
+
themeVariables: {{
|
|
48
|
+
fontFamily: '"Microsoft JhengHei", "微軟正黑體", "Heiti TC", "黑體-繁", sans-serif'
|
|
49
|
+
}}
|
|
50
|
+
}});
|
|
51
|
+
</script>
|
|
52
|
+
</head>
|
|
53
|
+
<body>
|
|
54
|
+
<h1>{title if title else 'Mermaid Diagram'}</h1>
|
|
55
|
+
<div class="mermaid">
|
|
56
|
+
{mermaid_data}
|
|
57
|
+
</div>
|
|
58
|
+
</body>
|
|
59
|
+
</html>
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
# Create temporary file
|
|
63
|
+
with NamedTemporaryFile(
|
|
64
|
+
suffix=".html", mode="w", encoding="utf-8", delete=False
|
|
65
|
+
) as html_temp:
|
|
66
|
+
try:
|
|
67
|
+
# Save HTML content
|
|
68
|
+
html_temp.write(html_content)
|
|
69
|
+
html_temp.flush()
|
|
70
|
+
|
|
71
|
+
# Upload file to GCS
|
|
72
|
+
html_url = await upload_html_and_get_public_url(
|
|
73
|
+
html_temp.name, botrun_flow_lang_url, user_id
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# Clean up temporary file
|
|
77
|
+
os.unlink(html_temp.name)
|
|
78
|
+
|
|
79
|
+
return html_url
|
|
80
|
+
except Exception as e:
|
|
81
|
+
# Clean up temporary file in case of error
|
|
82
|
+
os.unlink(html_temp.name)
|
|
83
|
+
return f"Error: {str(e)}"
|
|
84
|
+
|
|
85
|
+
except Exception as e:
|
|
86
|
+
return f"Error: {str(e)}"
|
|
@@ -1,143 +1,143 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Utility module for LLM model-related functionality, including API key rotation.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import os
|
|
6
|
-
import random
|
|
7
|
-
from langchain_anthropic import ChatAnthropic
|
|
8
|
-
from langchain_openai import ChatOpenAI
|
|
9
|
-
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def get_model_instance(
|
|
13
|
-
model_name: str, temperature: float = 0, enable_code_execution: bool = False
|
|
14
|
-
):
|
|
15
|
-
"""
|
|
16
|
-
統一的模型實例獲取函數
|
|
17
|
-
|
|
18
|
-
Args:
|
|
19
|
-
model_name: 模型名稱
|
|
20
|
-
temperature: 溫度參數
|
|
21
|
-
enable_code_execution: 是否啟用代碼執行(僅 Gemini 2.5 支援)
|
|
22
|
-
|
|
23
|
-
Returns:
|
|
24
|
-
對應的模型實例
|
|
25
|
-
"""
|
|
26
|
-
if model_name.startswith("gemini-"):
|
|
27
|
-
model_kwargs = {}
|
|
28
|
-
if enable_code_execution and "2.5" in model_name:
|
|
29
|
-
model_kwargs["enable_code_execution"] = True
|
|
30
|
-
|
|
31
|
-
return ChatGoogleGenerativeAI(
|
|
32
|
-
model=model_name, temperature=temperature, **model_kwargs
|
|
33
|
-
)
|
|
34
|
-
|
|
35
|
-
elif model_name.startswith("claude-"):
|
|
36
|
-
# 檢查是否有多個 Anthropic API keys
|
|
37
|
-
anthropic_api_keys_str = os.getenv("ANTHROPIC_API_KEYS", "")
|
|
38
|
-
anthropic_api_keys = [
|
|
39
|
-
key.strip() for key in anthropic_api_keys_str.split(",") if key.strip()
|
|
40
|
-
]
|
|
41
|
-
|
|
42
|
-
if anthropic_api_keys:
|
|
43
|
-
return RotatingChatAnthropic(
|
|
44
|
-
model_name=model_name,
|
|
45
|
-
keys=anthropic_api_keys,
|
|
46
|
-
temperature=temperature,
|
|
47
|
-
max_tokens=64000,
|
|
48
|
-
)
|
|
49
|
-
elif os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
|
|
50
|
-
openrouter_model_name = "anthropic/claude-sonnet-4.5"
|
|
51
|
-
return ChatOpenAI(
|
|
52
|
-
openai_api_key=os.getenv("OPENROUTER_API_KEY"),
|
|
53
|
-
openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
|
|
54
|
-
model_name=openrouter_model_name,
|
|
55
|
-
temperature=temperature,
|
|
56
|
-
max_tokens=64000,
|
|
57
|
-
)
|
|
58
|
-
else:
|
|
59
|
-
return ChatAnthropic(
|
|
60
|
-
model=model_name,
|
|
61
|
-
temperature=temperature,
|
|
62
|
-
max_tokens=64000,
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
elif model_name.startswith("gpt-"):
|
|
66
|
-
if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
|
|
67
|
-
return ChatOpenAI(
|
|
68
|
-
openai_api_key=os.getenv("OPENROUTER_API_KEY"),
|
|
69
|
-
openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
|
|
70
|
-
model_name=f"openai/{model_name}",
|
|
71
|
-
temperature=temperature,
|
|
72
|
-
max_tokens=8192,
|
|
73
|
-
)
|
|
74
|
-
else:
|
|
75
|
-
return ChatOpenAI(
|
|
76
|
-
model=model_name,
|
|
77
|
-
temperature=temperature,
|
|
78
|
-
max_tokens=8192,
|
|
79
|
-
)
|
|
80
|
-
|
|
81
|
-
else:
|
|
82
|
-
# 預設使用 Gemini
|
|
83
|
-
return ChatGoogleGenerativeAI(
|
|
84
|
-
model="gemini-2.5-flash",
|
|
85
|
-
temperature=temperature,
|
|
86
|
-
)
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
class RotatingChatAnthropic:
|
|
90
|
-
"""A wrapper class for ChatAnthropic that rotates through multiple API keys."""
|
|
91
|
-
|
|
92
|
-
def __init__(self, model_name, keys, temperature=0, max_tokens=8192):
|
|
93
|
-
"""
|
|
94
|
-
Initialize the rotating key model.
|
|
95
|
-
|
|
96
|
-
Args:
|
|
97
|
-
model_name: The name of the Anthropic model to use
|
|
98
|
-
keys: List of API keys to rotate through
|
|
99
|
-
temperature: The temperature for model generation
|
|
100
|
-
max_tokens: The maximum number of tokens to generate
|
|
101
|
-
"""
|
|
102
|
-
self.keys = keys
|
|
103
|
-
self.model_name = model_name
|
|
104
|
-
self.temperature = temperature
|
|
105
|
-
self.max_tokens = max_tokens
|
|
106
|
-
|
|
107
|
-
# Initialize with the first key
|
|
108
|
-
self.base_model = ChatAnthropic(
|
|
109
|
-
model=self.model_name,
|
|
110
|
-
temperature=self.temperature,
|
|
111
|
-
max_tokens=self.max_tokens,
|
|
112
|
-
api_key=random.choice(self.keys) if self.keys else None,
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
def invoke(self, *args, **kwargs):
|
|
116
|
-
"""
|
|
117
|
-
Invoke the model with a randomly selected API key.
|
|
118
|
-
|
|
119
|
-
This method is called when the model is invoked through LangChain.
|
|
120
|
-
"""
|
|
121
|
-
if self.keys:
|
|
122
|
-
# Select a random key for this invocation
|
|
123
|
-
self.base_model.client.api_key = random.choice(self.keys)
|
|
124
|
-
return self.base_model.invoke(*args, **kwargs)
|
|
125
|
-
|
|
126
|
-
def stream(self, *args, **kwargs):
|
|
127
|
-
"""
|
|
128
|
-
Stream the model response with a randomly selected API key.
|
|
129
|
-
|
|
130
|
-
This method handles streaming output from the model.
|
|
131
|
-
"""
|
|
132
|
-
if self.keys:
|
|
133
|
-
# Select a random key for this streaming invocation
|
|
134
|
-
self.base_model.client.api_key = random.choice(self.keys)
|
|
135
|
-
return self.base_model.stream(*args, **kwargs)
|
|
136
|
-
|
|
137
|
-
def __getattr__(self, name):
|
|
138
|
-
"""
|
|
139
|
-
Forward any other attribute access to the base model.
|
|
140
|
-
|
|
141
|
-
This ensures compatibility with the original ChatAnthropic class.
|
|
142
|
-
"""
|
|
143
|
-
return getattr(self.base_model, name)
|
|
1
|
+
"""
|
|
2
|
+
Utility module for LLM model-related functionality, including API key rotation.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import os
|
|
6
|
+
import random
|
|
7
|
+
from langchain_anthropic import ChatAnthropic
|
|
8
|
+
from langchain_openai import ChatOpenAI
|
|
9
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def get_model_instance(
|
|
13
|
+
model_name: str, temperature: float = 0, enable_code_execution: bool = False
|
|
14
|
+
):
|
|
15
|
+
"""
|
|
16
|
+
統一的模型實例獲取函數
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
model_name: 模型名稱
|
|
20
|
+
temperature: 溫度參數
|
|
21
|
+
enable_code_execution: 是否啟用代碼執行(僅 Gemini 2.5 支援)
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
對應的模型實例
|
|
25
|
+
"""
|
|
26
|
+
if model_name.startswith("gemini-"):
|
|
27
|
+
model_kwargs = {}
|
|
28
|
+
if enable_code_execution and "2.5" in model_name:
|
|
29
|
+
model_kwargs["enable_code_execution"] = True
|
|
30
|
+
|
|
31
|
+
return ChatGoogleGenerativeAI(
|
|
32
|
+
model=model_name, temperature=temperature, **model_kwargs
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
elif model_name.startswith("claude-"):
|
|
36
|
+
# 檢查是否有多個 Anthropic API keys
|
|
37
|
+
anthropic_api_keys_str = os.getenv("ANTHROPIC_API_KEYS", "")
|
|
38
|
+
anthropic_api_keys = [
|
|
39
|
+
key.strip() for key in anthropic_api_keys_str.split(",") if key.strip()
|
|
40
|
+
]
|
|
41
|
+
|
|
42
|
+
if anthropic_api_keys:
|
|
43
|
+
return RotatingChatAnthropic(
|
|
44
|
+
model_name=model_name,
|
|
45
|
+
keys=anthropic_api_keys,
|
|
46
|
+
temperature=temperature,
|
|
47
|
+
max_tokens=64000,
|
|
48
|
+
)
|
|
49
|
+
elif os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
|
|
50
|
+
openrouter_model_name = "anthropic/claude-sonnet-4.5"
|
|
51
|
+
return ChatOpenAI(
|
|
52
|
+
openai_api_key=os.getenv("OPENROUTER_API_KEY"),
|
|
53
|
+
openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
|
|
54
|
+
model_name=openrouter_model_name,
|
|
55
|
+
temperature=temperature,
|
|
56
|
+
max_tokens=64000,
|
|
57
|
+
)
|
|
58
|
+
else:
|
|
59
|
+
return ChatAnthropic(
|
|
60
|
+
model=model_name,
|
|
61
|
+
temperature=temperature,
|
|
62
|
+
max_tokens=64000,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
elif model_name.startswith("gpt-"):
|
|
66
|
+
if os.getenv("OPENROUTER_API_KEY") and os.getenv("OPENROUTER_BASE_URL"):
|
|
67
|
+
return ChatOpenAI(
|
|
68
|
+
openai_api_key=os.getenv("OPENROUTER_API_KEY"),
|
|
69
|
+
openai_api_base=os.getenv("OPENROUTER_BASE_URL"),
|
|
70
|
+
model_name=f"openai/{model_name}",
|
|
71
|
+
temperature=temperature,
|
|
72
|
+
max_tokens=8192,
|
|
73
|
+
)
|
|
74
|
+
else:
|
|
75
|
+
return ChatOpenAI(
|
|
76
|
+
model=model_name,
|
|
77
|
+
temperature=temperature,
|
|
78
|
+
max_tokens=8192,
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
else:
|
|
82
|
+
# 預設使用 Gemini
|
|
83
|
+
return ChatGoogleGenerativeAI(
|
|
84
|
+
model="gemini-2.5-flash",
|
|
85
|
+
temperature=temperature,
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
class RotatingChatAnthropic:
|
|
90
|
+
"""A wrapper class for ChatAnthropic that rotates through multiple API keys."""
|
|
91
|
+
|
|
92
|
+
def __init__(self, model_name, keys, temperature=0, max_tokens=8192):
|
|
93
|
+
"""
|
|
94
|
+
Initialize the rotating key model.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
model_name: The name of the Anthropic model to use
|
|
98
|
+
keys: List of API keys to rotate through
|
|
99
|
+
temperature: The temperature for model generation
|
|
100
|
+
max_tokens: The maximum number of tokens to generate
|
|
101
|
+
"""
|
|
102
|
+
self.keys = keys
|
|
103
|
+
self.model_name = model_name
|
|
104
|
+
self.temperature = temperature
|
|
105
|
+
self.max_tokens = max_tokens
|
|
106
|
+
|
|
107
|
+
# Initialize with the first key
|
|
108
|
+
self.base_model = ChatAnthropic(
|
|
109
|
+
model=self.model_name,
|
|
110
|
+
temperature=self.temperature,
|
|
111
|
+
max_tokens=self.max_tokens,
|
|
112
|
+
api_key=random.choice(self.keys) if self.keys else None,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
def invoke(self, *args, **kwargs):
|
|
116
|
+
"""
|
|
117
|
+
Invoke the model with a randomly selected API key.
|
|
118
|
+
|
|
119
|
+
This method is called when the model is invoked through LangChain.
|
|
120
|
+
"""
|
|
121
|
+
if self.keys:
|
|
122
|
+
# Select a random key for this invocation
|
|
123
|
+
self.base_model.client.api_key = random.choice(self.keys)
|
|
124
|
+
return self.base_model.invoke(*args, **kwargs)
|
|
125
|
+
|
|
126
|
+
def stream(self, *args, **kwargs):
|
|
127
|
+
"""
|
|
128
|
+
Stream the model response with a randomly selected API key.
|
|
129
|
+
|
|
130
|
+
This method handles streaming output from the model.
|
|
131
|
+
"""
|
|
132
|
+
if self.keys:
|
|
133
|
+
# Select a random key for this streaming invocation
|
|
134
|
+
self.base_model.client.api_key = random.choice(self.keys)
|
|
135
|
+
return self.base_model.stream(*args, **kwargs)
|
|
136
|
+
|
|
137
|
+
def __getattr__(self, name):
|
|
138
|
+
"""
|
|
139
|
+
Forward any other attribute access to the base model.
|
|
140
|
+
|
|
141
|
+
This ensures compatibility with the original ChatAnthropic class.
|
|
142
|
+
"""
|
|
143
|
+
return getattr(self.base_model, name)
|