botrun-flow-lang 6.2.21__py3-none-any.whl → 6.2.61__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py +13 -14
- botrun_flow_lang/langgraph_agents/agents/util/custom_vertex_claude.py +406 -0
- {botrun_flow_lang-6.2.21.dist-info → botrun_flow_lang-6.2.61.dist-info}/METADATA +1 -2
- {botrun_flow_lang-6.2.21.dist-info → botrun_flow_lang-6.2.61.dist-info}/RECORD +5 -4
- {botrun_flow_lang-6.2.21.dist-info → botrun_flow_lang-6.2.61.dist-info}/WHEEL +0 -0
|
@@ -86,9 +86,11 @@ from langchain_mcp_adapters.client import MultiServerMCPClient
|
|
|
86
86
|
# ========
|
|
87
87
|
# for Vertex AI
|
|
88
88
|
from google.oauth2 import service_account
|
|
89
|
-
#
|
|
90
|
-
#
|
|
91
|
-
|
|
89
|
+
# 使用自製的 ChatVertexAIClaude 取代 ChatAnthropicVertex,
|
|
90
|
+
# 透過 rawPredict REST API 直接呼叫 Claude,避免載入 google-cloud-aiplatform(約 26 秒)
|
|
91
|
+
from botrun_flow_lang.langgraph_agents.agents.util.custom_vertex_claude import (
|
|
92
|
+
ChatVertexAIClaude,
|
|
93
|
+
)
|
|
92
94
|
|
|
93
95
|
load_dotenv()
|
|
94
96
|
|
|
@@ -251,18 +253,17 @@ def get_react_agent_model(model_name: str = ""):
|
|
|
251
253
|
|
|
252
254
|
elif "claude" in vertex_model_name.lower() or vertex_model_name.startswith("maison/"):
|
|
253
255
|
# Anthropic Claude (model garden)
|
|
254
|
-
#
|
|
255
|
-
|
|
256
|
-
model = ChatAnthropicVertex(
|
|
256
|
+
# 使用自製的 ChatVertexAIClaude,避免載入 google-cloud-aiplatform
|
|
257
|
+
model = ChatVertexAIClaude(
|
|
257
258
|
model=vertex_model_name,
|
|
258
259
|
location=vertex_region,
|
|
259
|
-
|
|
260
|
+
project_id=vertex_project,
|
|
260
261
|
credentials=credentials,
|
|
261
262
|
temperature=0,
|
|
262
263
|
max_tokens=ANTHROPIC_MAX_TOKENS,
|
|
263
264
|
)
|
|
264
265
|
logger.info(
|
|
265
|
-
f"model
|
|
266
|
+
f"model ChatVertexAIClaude {vertex_model_name} @ {vertex_region} (project: {vertex_project})"
|
|
266
267
|
)
|
|
267
268
|
|
|
268
269
|
else:
|
|
@@ -306,11 +307,9 @@ def get_react_agent_model(model_name: str = ""):
|
|
|
306
307
|
"VERTEX_AI_GOOGLE_APPLICATION_CREDENTIALS not set or file not found. Using ADC if available."
|
|
307
308
|
)
|
|
308
309
|
|
|
309
|
-
#
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
model = ChatAnthropicVertex(
|
|
313
|
-
project=vertex_project,
|
|
310
|
+
# 使用自製的 ChatVertexAIClaude,避免載入 google-cloud-aiplatform
|
|
311
|
+
model = ChatVertexAIClaude(
|
|
312
|
+
project_id=vertex_project,
|
|
314
313
|
model=vertex_model,
|
|
315
314
|
location=vertex_location,
|
|
316
315
|
credentials=credentials,
|
|
@@ -318,7 +317,7 @@ def get_react_agent_model(model_name: str = ""):
|
|
|
318
317
|
max_tokens=ANTHROPIC_MAX_TOKENS,
|
|
319
318
|
)
|
|
320
319
|
logger.info(
|
|
321
|
-
f"model
|
|
320
|
+
f"model ChatVertexAIClaude {vertex_project} @ {vertex_model} @ {vertex_location}"
|
|
322
321
|
)
|
|
323
322
|
|
|
324
323
|
else:
|
|
@@ -0,0 +1,406 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Custom Vertex AI Claude chat model for LangGraph.
|
|
3
|
+
|
|
4
|
+
Lightweight BaseChatModel that calls Claude via Vertex AI's rawPredict REST API,
|
|
5
|
+
avoiding the heavy google-cloud-aiplatform dependency (~26s import time).
|
|
6
|
+
|
|
7
|
+
Supports tool calling for LangGraph react agent compatibility.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import json
|
|
11
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
12
|
+
|
|
13
|
+
import httpx
|
|
14
|
+
|
|
15
|
+
from google.auth.transport.requests import Request
|
|
16
|
+
from google.oauth2 import service_account
|
|
17
|
+
from langchain_core.callbacks import CallbackManagerForLLMRun
|
|
18
|
+
from langchain_core.language_models.chat_models import BaseChatModel
|
|
19
|
+
from langchain_core.messages import (
|
|
20
|
+
AIMessage,
|
|
21
|
+
BaseMessage,
|
|
22
|
+
HumanMessage,
|
|
23
|
+
SystemMessage,
|
|
24
|
+
ToolMessage,
|
|
25
|
+
)
|
|
26
|
+
from langchain_core.outputs import ChatGeneration, ChatResult
|
|
27
|
+
from pydantic import ConfigDict
|
|
28
|
+
|
|
29
|
+
from botrun_flow_lang.utils.botrun_logger import get_default_botrun_logger
|
|
30
|
+
|
|
31
|
+
logger = get_default_botrun_logger()
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class ChatVertexAIClaude(BaseChatModel):
|
|
35
|
+
"""
|
|
36
|
+
Lightweight Vertex AI Claude chat model using rawPredict REST API.
|
|
37
|
+
|
|
38
|
+
Replaces ChatAnthropicVertex without importing google-cloud-aiplatform.
|
|
39
|
+
Supports tool calling for LangGraph react agent.
|
|
40
|
+
|
|
41
|
+
Usage:
|
|
42
|
+
model = ChatVertexAIClaude(
|
|
43
|
+
model="claude-sonnet-4-5-20250929",
|
|
44
|
+
project_id="my-project",
|
|
45
|
+
location="asia-east1",
|
|
46
|
+
credentials=my_credentials, # or service_account_file="path/to/sa.json"
|
|
47
|
+
)
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
model: str = "claude-sonnet-4-5-20250929"
|
|
51
|
+
max_tokens: int = 64000
|
|
52
|
+
temperature: float = 0
|
|
53
|
+
project_id: str = ""
|
|
54
|
+
location: str = "asia-east1"
|
|
55
|
+
credentials: Any = None
|
|
56
|
+
service_account_file: str = ""
|
|
57
|
+
|
|
58
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def _llm_type(self) -> str:
|
|
62
|
+
return "vertex-ai-claude-custom"
|
|
63
|
+
|
|
64
|
+
@property
|
|
65
|
+
def _identifying_params(self) -> Dict[str, Any]:
|
|
66
|
+
return {
|
|
67
|
+
"model": self.model,
|
|
68
|
+
"max_tokens": self.max_tokens,
|
|
69
|
+
"temperature": self.temperature,
|
|
70
|
+
"project_id": self.project_id,
|
|
71
|
+
"location": self.location,
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
def bind_tools(
|
|
75
|
+
self,
|
|
76
|
+
tools: List[Any],
|
|
77
|
+
*,
|
|
78
|
+
tool_choice: Optional[Union[str, Dict]] = None,
|
|
79
|
+
**kwargs,
|
|
80
|
+
):
|
|
81
|
+
"""Convert tools to Anthropic format and bind via Runnable.bind()."""
|
|
82
|
+
formatted_tools = _convert_tools_to_anthropic(tools)
|
|
83
|
+
bind_kwargs: Dict[str, Any] = {"tools": formatted_tools, **kwargs}
|
|
84
|
+
if tool_choice is not None:
|
|
85
|
+
bind_kwargs["tool_choice"] = tool_choice
|
|
86
|
+
return self.bind(**bind_kwargs)
|
|
87
|
+
|
|
88
|
+
def _get_access_token(self) -> str:
|
|
89
|
+
"""Get OAuth2 access token for Vertex AI API."""
|
|
90
|
+
if self.credentials:
|
|
91
|
+
creds = self.credentials
|
|
92
|
+
elif self.service_account_file:
|
|
93
|
+
creds = service_account.Credentials.from_service_account_file(
|
|
94
|
+
self.service_account_file,
|
|
95
|
+
scopes=["https://www.googleapis.com/auth/cloud-platform"],
|
|
96
|
+
)
|
|
97
|
+
else:
|
|
98
|
+
raise ValueError(
|
|
99
|
+
"ChatVertexAIClaude requires either 'credentials' or 'service_account_file'"
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
if not creds.valid or creds.expired:
|
|
103
|
+
creds.refresh(Request())
|
|
104
|
+
return creds.token
|
|
105
|
+
|
|
106
|
+
def _convert_messages(
|
|
107
|
+
self, messages: List[BaseMessage]
|
|
108
|
+
) -> Tuple[Union[str, List[Dict]], List[Dict]]:
|
|
109
|
+
"""Convert LangChain messages to Anthropic API format.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
(system, api_messages) tuple.
|
|
113
|
+
system: str or list of content blocks (preserves cache_control).
|
|
114
|
+
api_messages: list of Anthropic-format message dicts.
|
|
115
|
+
"""
|
|
116
|
+
system_blocks: List[Any] = []
|
|
117
|
+
raw_messages: List[Dict] = []
|
|
118
|
+
|
|
119
|
+
for msg in messages:
|
|
120
|
+
if isinstance(msg, SystemMessage):
|
|
121
|
+
if isinstance(msg.content, str):
|
|
122
|
+
system_blocks.append({"type": "text", "text": msg.content})
|
|
123
|
+
elif isinstance(msg.content, list):
|
|
124
|
+
for block in msg.content:
|
|
125
|
+
if isinstance(block, dict):
|
|
126
|
+
system_blocks.append(block)
|
|
127
|
+
elif isinstance(block, str):
|
|
128
|
+
system_blocks.append({"type": "text", "text": block})
|
|
129
|
+
|
|
130
|
+
elif isinstance(msg, HumanMessage):
|
|
131
|
+
raw_messages.append({"role": "user", "content": msg.content})
|
|
132
|
+
|
|
133
|
+
elif isinstance(msg, AIMessage):
|
|
134
|
+
content_blocks = []
|
|
135
|
+
if msg.content:
|
|
136
|
+
if isinstance(msg.content, str):
|
|
137
|
+
content_blocks.append(
|
|
138
|
+
{"type": "text", "text": msg.content}
|
|
139
|
+
)
|
|
140
|
+
elif isinstance(msg.content, list):
|
|
141
|
+
for block in msg.content:
|
|
142
|
+
if isinstance(block, str):
|
|
143
|
+
content_blocks.append(
|
|
144
|
+
{"type": "text", "text": block}
|
|
145
|
+
)
|
|
146
|
+
elif isinstance(block, dict):
|
|
147
|
+
content_blocks.append(block)
|
|
148
|
+
for tc in msg.tool_calls or []:
|
|
149
|
+
content_blocks.append(
|
|
150
|
+
{
|
|
151
|
+
"type": "tool_use",
|
|
152
|
+
"id": tc["id"],
|
|
153
|
+
"name": tc["name"],
|
|
154
|
+
"input": tc["args"],
|
|
155
|
+
}
|
|
156
|
+
)
|
|
157
|
+
raw_messages.append(
|
|
158
|
+
{
|
|
159
|
+
"role": "assistant",
|
|
160
|
+
"content": content_blocks if content_blocks else "",
|
|
161
|
+
}
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
elif isinstance(msg, ToolMessage):
|
|
165
|
+
tool_content = msg.content
|
|
166
|
+
if not isinstance(tool_content, str):
|
|
167
|
+
tool_content = json.dumps(tool_content, ensure_ascii=False)
|
|
168
|
+
raw_messages.append(
|
|
169
|
+
{
|
|
170
|
+
"role": "user",
|
|
171
|
+
"content": [
|
|
172
|
+
{
|
|
173
|
+
"type": "tool_result",
|
|
174
|
+
"tool_use_id": msg.tool_call_id,
|
|
175
|
+
"content": tool_content,
|
|
176
|
+
}
|
|
177
|
+
],
|
|
178
|
+
}
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
merged = _merge_consecutive_messages(raw_messages)
|
|
182
|
+
|
|
183
|
+
# Return system as string (simple) or list (structured with cache_control)
|
|
184
|
+
has_cache_control = any(
|
|
185
|
+
isinstance(b, dict) and "cache_control" in b for b in system_blocks
|
|
186
|
+
)
|
|
187
|
+
if len(system_blocks) == 1 and not has_cache_control:
|
|
188
|
+
system: Union[str, List[Dict]] = system_blocks[0].get("text", "")
|
|
189
|
+
elif system_blocks:
|
|
190
|
+
system = system_blocks
|
|
191
|
+
else:
|
|
192
|
+
system = ""
|
|
193
|
+
|
|
194
|
+
return system, merged
|
|
195
|
+
|
|
196
|
+
def _generate(
|
|
197
|
+
self,
|
|
198
|
+
messages: List[BaseMessage],
|
|
199
|
+
stop: Optional[List[str]] = None,
|
|
200
|
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
201
|
+
**kwargs,
|
|
202
|
+
) -> ChatResult:
|
|
203
|
+
"""Call Vertex AI Claude via rawPredict (non-streaming)."""
|
|
204
|
+
system, api_messages = self._convert_messages(messages)
|
|
205
|
+
access_token = self._get_access_token()
|
|
206
|
+
|
|
207
|
+
url = (
|
|
208
|
+
f"https://{self.location}-aiplatform.googleapis.com/v1/"
|
|
209
|
+
f"projects/{self.project_id}/locations/{self.location}/"
|
|
210
|
+
f"publishers/anthropic/models/{self.model}:rawPredict"
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
payload: Dict[str, Any] = {
|
|
214
|
+
"anthropic_version": "vertex-2023-10-16",
|
|
215
|
+
"messages": api_messages,
|
|
216
|
+
"max_tokens": self.max_tokens,
|
|
217
|
+
"temperature": self.temperature,
|
|
218
|
+
}
|
|
219
|
+
if system:
|
|
220
|
+
payload["system"] = system
|
|
221
|
+
if stop:
|
|
222
|
+
payload["stop_sequences"] = stop
|
|
223
|
+
|
|
224
|
+
# Tools from bind_tools()
|
|
225
|
+
tools = kwargs.get("tools", [])
|
|
226
|
+
if tools:
|
|
227
|
+
payload["tools"] = tools
|
|
228
|
+
|
|
229
|
+
# Tool choice
|
|
230
|
+
tool_choice = kwargs.get("tool_choice")
|
|
231
|
+
if tool_choice:
|
|
232
|
+
if isinstance(tool_choice, str):
|
|
233
|
+
if tool_choice == "auto":
|
|
234
|
+
payload["tool_choice"] = {"type": "auto"}
|
|
235
|
+
elif tool_choice == "any":
|
|
236
|
+
payload["tool_choice"] = {"type": "any"}
|
|
237
|
+
elif tool_choice == "none":
|
|
238
|
+
payload.pop("tools", None)
|
|
239
|
+
else:
|
|
240
|
+
payload["tool_choice"] = {
|
|
241
|
+
"type": "tool",
|
|
242
|
+
"name": tool_choice,
|
|
243
|
+
}
|
|
244
|
+
elif isinstance(tool_choice, dict):
|
|
245
|
+
payload["tool_choice"] = tool_choice
|
|
246
|
+
|
|
247
|
+
logger.info(
|
|
248
|
+
f"[ChatVertexAIClaude] rawPredict: model={self.model}, "
|
|
249
|
+
f"location={self.location}, messages={len(api_messages)}, "
|
|
250
|
+
f"tools={len(tools)}"
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
# Make API call via httpx
|
|
254
|
+
data = _http_post_json(url, payload, access_token)
|
|
255
|
+
|
|
256
|
+
# Parse response
|
|
257
|
+
text_parts = []
|
|
258
|
+
tool_calls = []
|
|
259
|
+
for block in data.get("content", []):
|
|
260
|
+
block_type = block.get("type", "")
|
|
261
|
+
if block_type == "text":
|
|
262
|
+
text_parts.append(block.get("text", ""))
|
|
263
|
+
elif block_type == "tool_use":
|
|
264
|
+
tool_calls.append(
|
|
265
|
+
{
|
|
266
|
+
"id": block["id"],
|
|
267
|
+
"name": block["name"],
|
|
268
|
+
"args": block.get("input", {}),
|
|
269
|
+
}
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
usage = data.get("usage", {})
|
|
273
|
+
input_tokens = usage.get("input_tokens", 0)
|
|
274
|
+
output_tokens = usage.get("output_tokens", 0)
|
|
275
|
+
|
|
276
|
+
ai_message = AIMessage(
|
|
277
|
+
content="".join(text_parts),
|
|
278
|
+
tool_calls=tool_calls,
|
|
279
|
+
usage_metadata={
|
|
280
|
+
"input_tokens": input_tokens,
|
|
281
|
+
"output_tokens": output_tokens,
|
|
282
|
+
"total_tokens": input_tokens + output_tokens,
|
|
283
|
+
},
|
|
284
|
+
response_metadata={
|
|
285
|
+
"model": self.model,
|
|
286
|
+
"stop_reason": data.get("stop_reason", ""),
|
|
287
|
+
},
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
logger.info(
|
|
291
|
+
f"[ChatVertexAIClaude] Response: "
|
|
292
|
+
f"text_len={len(ai_message.content)}, "
|
|
293
|
+
f"tool_calls={len(tool_calls)}, "
|
|
294
|
+
f"tokens=({input_tokens}+{output_tokens}={input_tokens + output_tokens})"
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
return ChatResult(
|
|
298
|
+
generations=[ChatGeneration(message=ai_message)],
|
|
299
|
+
llm_output={
|
|
300
|
+
"model": self.model,
|
|
301
|
+
"usage": {
|
|
302
|
+
"input_tokens": input_tokens,
|
|
303
|
+
"output_tokens": output_tokens,
|
|
304
|
+
},
|
|
305
|
+
},
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def _http_post_json(
|
|
310
|
+
url: str, payload: Dict[str, Any], access_token: str
|
|
311
|
+
) -> Dict[str, Any]:
|
|
312
|
+
"""POST JSON to URL with Bearer auth. Returns parsed JSON response."""
|
|
313
|
+
headers = {
|
|
314
|
+
"Content-Type": "application/json",
|
|
315
|
+
"Authorization": f"Bearer {access_token}",
|
|
316
|
+
}
|
|
317
|
+
try:
|
|
318
|
+
with httpx.Client(timeout=300.0) as client:
|
|
319
|
+
response = client.post(url, headers=headers, json=payload)
|
|
320
|
+
if response.status_code != 200:
|
|
321
|
+
error_msg = (
|
|
322
|
+
f"Vertex AI API error: {response.status_code} - {response.text}"
|
|
323
|
+
)
|
|
324
|
+
logger.error(f"[ChatVertexAIClaude] {error_msg}")
|
|
325
|
+
raise Exception(error_msg)
|
|
326
|
+
return response.json()
|
|
327
|
+
except httpx.HTTPStatusError as e:
|
|
328
|
+
error_body = e.response.text if e.response else ""
|
|
329
|
+
error_msg = f"Vertex AI API error: {e.response.status_code} - {error_body}"
|
|
330
|
+
logger.error(f"[ChatVertexAIClaude] {error_msg}")
|
|
331
|
+
raise Exception(error_msg) from e
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def _convert_tools_to_anthropic(tools: List[Any]) -> List[Dict]:
|
|
335
|
+
"""Convert LangChain tools to Anthropic tool format."""
|
|
336
|
+
from langchain_core.utils.function_calling import convert_to_openai_tool
|
|
337
|
+
|
|
338
|
+
anthropic_tools = []
|
|
339
|
+
for tool in tools:
|
|
340
|
+
if isinstance(tool, dict):
|
|
341
|
+
if "input_schema" in tool:
|
|
342
|
+
anthropic_tools.append(tool)
|
|
343
|
+
elif "function" in tool:
|
|
344
|
+
func = tool["function"]
|
|
345
|
+
anthropic_tools.append(
|
|
346
|
+
{
|
|
347
|
+
"name": func["name"],
|
|
348
|
+
"description": func.get("description", ""),
|
|
349
|
+
"input_schema": func.get(
|
|
350
|
+
"parameters",
|
|
351
|
+
{"type": "object", "properties": {}},
|
|
352
|
+
),
|
|
353
|
+
}
|
|
354
|
+
)
|
|
355
|
+
else:
|
|
356
|
+
anthropic_tools.append(tool)
|
|
357
|
+
else:
|
|
358
|
+
try:
|
|
359
|
+
oai_tool = convert_to_openai_tool(tool)
|
|
360
|
+
func = oai_tool["function"]
|
|
361
|
+
anthropic_tools.append(
|
|
362
|
+
{
|
|
363
|
+
"name": func["name"],
|
|
364
|
+
"description": func.get("description", ""),
|
|
365
|
+
"input_schema": func.get(
|
|
366
|
+
"parameters",
|
|
367
|
+
{"type": "object", "properties": {}},
|
|
368
|
+
),
|
|
369
|
+
}
|
|
370
|
+
)
|
|
371
|
+
except Exception as e:
|
|
372
|
+
logger.warning(
|
|
373
|
+
f"[ChatVertexAIClaude] Failed to convert tool "
|
|
374
|
+
f"{getattr(tool, 'name', tool)}: {e}"
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
return anthropic_tools
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def _merge_consecutive_messages(messages: List[Dict]) -> List[Dict]:
|
|
381
|
+
"""Merge consecutive messages with the same role (required by Anthropic API)."""
|
|
382
|
+
if not messages:
|
|
383
|
+
return []
|
|
384
|
+
|
|
385
|
+
merged: List[Dict] = []
|
|
386
|
+
for msg in messages:
|
|
387
|
+
if merged and merged[-1]["role"] == msg["role"]:
|
|
388
|
+
prev_content = merged[-1]["content"]
|
|
389
|
+
curr_content = msg["content"]
|
|
390
|
+
|
|
391
|
+
# Normalize to list of content blocks
|
|
392
|
+
if isinstance(prev_content, str):
|
|
393
|
+
prev_content = [{"type": "text", "text": prev_content}]
|
|
394
|
+
elif not isinstance(prev_content, list):
|
|
395
|
+
prev_content = [prev_content]
|
|
396
|
+
|
|
397
|
+
if isinstance(curr_content, str):
|
|
398
|
+
curr_content = [{"type": "text", "text": curr_content}]
|
|
399
|
+
elif not isinstance(curr_content, list):
|
|
400
|
+
curr_content = [curr_content]
|
|
401
|
+
|
|
402
|
+
merged[-1]["content"] = prev_content + curr_content
|
|
403
|
+
else:
|
|
404
|
+
merged.append(msg)
|
|
405
|
+
|
|
406
|
+
return merged
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: botrun-flow-lang
|
|
3
|
-
Version: 6.2.
|
|
3
|
+
Version: 6.2.61
|
|
4
4
|
Summary: A flow language for botrun
|
|
5
5
|
Author-email: sebastian-hsu <sebastian.hsu@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -32,7 +32,6 @@ Requires-Dist: langchain-community>=0.3.27
|
|
|
32
32
|
Requires-Dist: langchain-core>=1.1.2
|
|
33
33
|
Requires-Dist: langchain-google-community>=2.0.3
|
|
34
34
|
Requires-Dist: langchain-google-genai>=4.0.0
|
|
35
|
-
Requires-Dist: langchain-google-vertexai<4.0.0,>=3.2.0
|
|
36
35
|
Requires-Dist: langchain-mcp-adapters>=0.1.7
|
|
37
36
|
Requires-Dist: langchain-openai>=0.3.28
|
|
38
37
|
Requires-Dist: langchain>=0.3.27
|
|
@@ -25,7 +25,7 @@ botrun_flow_lang/api/youtube_api.py,sha256=9eGr--gR2OoM9JZ6Nf9KqPiE-FeXEx8R-QeJv
|
|
|
25
25
|
botrun_flow_lang/langgraph_agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
26
26
|
botrun_flow_lang/langgraph_agents/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
27
27
|
botrun_flow_lang/langgraph_agents/agents/agent_runner.py,sha256=tiuPIqAcM8rIWBTjo8NS4owTepCsX3QkIHaUEDakOTc,6673
|
|
28
|
-
botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py,sha256
|
|
28
|
+
botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py,sha256=-4ejdM9SiocZyV92wCg1DOKNMBUTO6-wanj-D73RG7k,31156
|
|
29
29
|
botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py,sha256=hWDPt0U09Gj-3-NNWhsn9xaakYbOcHExIXqcL8TeZxw,32046
|
|
30
30
|
botrun_flow_lang/langgraph_agents/agents/agent_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
31
31
|
botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py,sha256=S4TYt0ZhgdAZ-2ndH8hJoEaIyDKdNJdWHjEZ49Lg_NQ,2427
|
|
@@ -39,6 +39,7 @@ botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py,
|
|
|
39
39
|
botrun_flow_lang/langgraph_agents/agents/tools/__init__.py,sha256=-z1uuC3IET02q8kPhPlr-L9eTGJqgHjEJlC__cG16H0,105
|
|
40
40
|
botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py,sha256=EEp8xhVU-Kj1Nk5qV8ObqdVZ8gT6GITrE4VyjIc2InA,14238
|
|
41
41
|
botrun_flow_lang/langgraph_agents/agents/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
42
|
+
botrun_flow_lang/langgraph_agents/agents/util/custom_vertex_claude.py,sha256=RDj1-PnoR36xdOUu7r6yMQrxAe-CXyGcSZEKiOlPxJY,14951
|
|
42
43
|
botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py,sha256=JTfH9WJNDlpvMvfzXyZy3bHeCN58MTnEOiamQGMsqh0,2884
|
|
43
44
|
botrun_flow_lang/langgraph_agents/agents/util/html_util.py,sha256=g5yJO0qTqRq_kb-xhSnWX3WAbHDIjNQYl7ErRBPQwHs,13230
|
|
44
45
|
botrun_flow_lang/langgraph_agents/agents/util/img_util.py,sha256=6OERtpGGimlev4Pb_O1UbMNaT_DMBHSmAgo9gB-R8xk,12385
|
|
@@ -99,6 +100,6 @@ botrun_flow_lang/utils/yaml_utils.py,sha256=dPlabIol-Clhnwc7N5nuffCaLSq8dyvmvjRw
|
|
|
99
100
|
botrun_flow_lang/utils/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
100
101
|
botrun_flow_lang/utils/clients/rate_limit_client.py,sha256=96NNCHB9I5C5bpVFF6sfPhmh4oAx3UdOLb-Z4PAXLdg,8558
|
|
101
102
|
botrun_flow_lang/utils/clients/token_verify_client.py,sha256=-AnYApJ9CvxVn-RhCCZZ2LCrf065fgskhwLKAm-aiN0,5893
|
|
102
|
-
botrun_flow_lang-6.2.
|
|
103
|
-
botrun_flow_lang-6.2.
|
|
104
|
-
botrun_flow_lang-6.2.
|
|
103
|
+
botrun_flow_lang-6.2.61.dist-info/METADATA,sha256=7mfsw0_BTSzWq5fSeKqCg0RmuOEyMfdKwItnzlRA0p8,6164
|
|
104
|
+
botrun_flow_lang-6.2.61.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
105
|
+
botrun_flow_lang-6.2.61.dist-info/RECORD,,
|
|
File without changes
|