botrun-flow-lang 6.2.21__py3-none-any.whl → 6.2.62__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -86,9 +86,11 @@ from langchain_mcp_adapters.client import MultiServerMCPClient
86
86
  # ========
87
87
  # for Vertex AI
88
88
  from google.oauth2 import service_account
89
- # 重型 import 改為延遲載入,避免啟動時載入 google-cloud-aiplatform(約 26 秒)
90
- # ChatVertexAI 已遷移至 ChatGoogleGenerativeAI(vertexai=True)
91
- # ChatAnthropicVertex 在需要時才 import(見 get_react_agent_model 函數內)
89
+ # 使用自製的 ChatVertexAIClaude 取代 ChatAnthropicVertex,
90
+ # 透過 rawPredict REST API 直接呼叫 Claude,避免載入 google-cloud-aiplatform(約 26 秒)
91
+ from botrun_flow_lang.langgraph_agents.agents.util.custom_vertex_claude import (
92
+ ChatVertexAIClaude,
93
+ )
92
94
 
93
95
  load_dotenv()
94
96
 
@@ -251,18 +253,17 @@ def get_react_agent_model(model_name: str = ""):
251
253
 
252
254
  elif "claude" in vertex_model_name.lower() or vertex_model_name.startswith("maison/"):
253
255
  # Anthropic Claude (model garden)
254
- # 延遲載入 ChatAnthropicVertex,只有在需要時才觸發 langchain_google_vertexai
255
- from langchain_google_vertexai.model_garden import ChatAnthropicVertex
256
- model = ChatAnthropicVertex(
256
+ # 使用自製的 ChatVertexAIClaude,避免載入 google-cloud-aiplatform
257
+ model = ChatVertexAIClaude(
257
258
  model=vertex_model_name,
258
259
  location=vertex_region,
259
- project=vertex_project,
260
+ project_id=vertex_project,
260
261
  credentials=credentials,
261
262
  temperature=0,
262
263
  max_tokens=ANTHROPIC_MAX_TOKENS,
263
264
  )
264
265
  logger.info(
265
- f"model ChatAnthropicVertex {vertex_model_name} @ {vertex_region} (project: {vertex_project})"
266
+ f"model ChatVertexAIClaude {vertex_model_name} @ {vertex_region} (project: {vertex_project})"
266
267
  )
267
268
 
268
269
  else:
@@ -306,11 +307,9 @@ def get_react_agent_model(model_name: str = ""):
306
307
  "VERTEX_AI_GOOGLE_APPLICATION_CREDENTIALS not set or file not found. Using ADC if available."
307
308
  )
308
309
 
309
- # 初始化 ChatAnthropicVertex
310
- # 延遲載入,只有在需要時才觸發 langchain_google_vertexai
311
- from langchain_google_vertexai.model_garden import ChatAnthropicVertex
312
- model = ChatAnthropicVertex(
313
- project=vertex_project,
310
+ # 使用自製的 ChatVertexAIClaude,避免載入 google-cloud-aiplatform
311
+ model = ChatVertexAIClaude(
312
+ project_id=vertex_project,
314
313
  model=vertex_model,
315
314
  location=vertex_location,
316
315
  credentials=credentials,
@@ -318,7 +317,7 @@ def get_react_agent_model(model_name: str = ""):
318
317
  max_tokens=ANTHROPIC_MAX_TOKENS,
319
318
  )
320
319
  logger.info(
321
- f"model ChatAnthropicVertex {vertex_project} @ {vertex_model} @ {vertex_location}"
320
+ f"model ChatVertexAIClaude {vertex_project} @ {vertex_model} @ {vertex_location}"
322
321
  )
323
322
 
324
323
  else:
@@ -0,0 +1,607 @@
1
+ """
2
+ Custom Vertex AI Claude chat model for LangGraph.
3
+
4
+ Lightweight BaseChatModel that calls Claude via Vertex AI's rawPredict REST API,
5
+ avoiding the heavy google-cloud-aiplatform dependency (~26s import time).
6
+
7
+ Supports tool calling for LangGraph react agent compatibility.
8
+ """
9
+
10
+ import json
11
+ from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
12
+
13
+ import httpx
14
+
15
+ from google.auth.transport.requests import Request
16
+ from google.oauth2 import service_account
17
+ from langchain_core.callbacks import CallbackManagerForLLMRun
18
+ from langchain_core.language_models.chat_models import BaseChatModel
19
+ from langchain_core.messages import (
20
+ AIMessage,
21
+ AIMessageChunk,
22
+ BaseMessage,
23
+ HumanMessage,
24
+ SystemMessage,
25
+ ToolMessage,
26
+ )
27
+ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
28
+ from pydantic import ConfigDict
29
+
30
+ from botrun_flow_lang.utils.botrun_logger import get_default_botrun_logger
31
+
32
+ logger = get_default_botrun_logger()
33
+
34
+
35
+ class ChatVertexAIClaude(BaseChatModel):
36
+ """
37
+ Lightweight Vertex AI Claude chat model using rawPredict REST API.
38
+
39
+ Replaces ChatAnthropicVertex without importing google-cloud-aiplatform.
40
+ Supports tool calling for LangGraph react agent.
41
+
42
+ Usage:
43
+ model = ChatVertexAIClaude(
44
+ model="claude-sonnet-4-5-20250929",
45
+ project_id="my-project",
46
+ location="asia-east1",
47
+ credentials=my_credentials, # or service_account_file="path/to/sa.json"
48
+ )
49
+ """
50
+
51
+ model: str = "claude-sonnet-4-5-20250929"
52
+ max_tokens: int = 64000
53
+ temperature: float = 0
54
+ project_id: str = ""
55
+ location: str = "asia-east1"
56
+ credentials: Any = None
57
+ service_account_file: str = ""
58
+
59
+ model_config = ConfigDict(arbitrary_types_allowed=True)
60
+
61
+ @property
62
+ def _llm_type(self) -> str:
63
+ return "vertex-ai-claude-custom"
64
+
65
+ @property
66
+ def _identifying_params(self) -> Dict[str, Any]:
67
+ return {
68
+ "model": self.model,
69
+ "max_tokens": self.max_tokens,
70
+ "temperature": self.temperature,
71
+ "project_id": self.project_id,
72
+ "location": self.location,
73
+ }
74
+
75
+ def bind_tools(
76
+ self,
77
+ tools: List[Any],
78
+ *,
79
+ tool_choice: Optional[Union[str, Dict]] = None,
80
+ **kwargs,
81
+ ):
82
+ """Convert tools to Anthropic format and bind via Runnable.bind()."""
83
+ formatted_tools = _convert_tools_to_anthropic(tools)
84
+ bind_kwargs: Dict[str, Any] = {"tools": formatted_tools, **kwargs}
85
+ if tool_choice is not None:
86
+ bind_kwargs["tool_choice"] = tool_choice
87
+ return self.bind(**bind_kwargs)
88
+
89
+ def _get_access_token(self) -> str:
90
+ """Get OAuth2 access token for Vertex AI API."""
91
+ if self.credentials:
92
+ creds = self.credentials
93
+ elif self.service_account_file:
94
+ creds = service_account.Credentials.from_service_account_file(
95
+ self.service_account_file,
96
+ scopes=["https://www.googleapis.com/auth/cloud-platform"],
97
+ )
98
+ else:
99
+ raise ValueError(
100
+ "ChatVertexAIClaude requires either 'credentials' or 'service_account_file'"
101
+ )
102
+
103
+ if not creds.valid or creds.expired:
104
+ creds.refresh(Request())
105
+ return creds.token
106
+
107
+ def _convert_messages(
108
+ self, messages: List[BaseMessage]
109
+ ) -> Tuple[Union[str, List[Dict]], List[Dict]]:
110
+ """Convert LangChain messages to Anthropic API format.
111
+
112
+ Returns:
113
+ (system, api_messages) tuple.
114
+ system: str or list of content blocks (preserves cache_control).
115
+ api_messages: list of Anthropic-format message dicts.
116
+ """
117
+ system_blocks: List[Any] = []
118
+ raw_messages: List[Dict] = []
119
+
120
+ for msg in messages:
121
+ if isinstance(msg, SystemMessage):
122
+ if isinstance(msg.content, str):
123
+ system_blocks.append({"type": "text", "text": msg.content})
124
+ elif isinstance(msg.content, list):
125
+ for block in msg.content:
126
+ if isinstance(block, dict):
127
+ system_blocks.append(block)
128
+ elif isinstance(block, str):
129
+ system_blocks.append({"type": "text", "text": block})
130
+
131
+ elif isinstance(msg, HumanMessage):
132
+ raw_messages.append({"role": "user", "content": msg.content})
133
+
134
+ elif isinstance(msg, AIMessage):
135
+ content_blocks = []
136
+ if msg.content:
137
+ if isinstance(msg.content, str):
138
+ content_blocks.append(
139
+ {"type": "text", "text": msg.content}
140
+ )
141
+ elif isinstance(msg.content, list):
142
+ for block in msg.content:
143
+ if isinstance(block, str):
144
+ content_blocks.append(
145
+ {"type": "text", "text": block}
146
+ )
147
+ elif isinstance(block, dict):
148
+ content_blocks.append(block)
149
+ for tc in msg.tool_calls or []:
150
+ content_blocks.append(
151
+ {
152
+ "type": "tool_use",
153
+ "id": tc["id"],
154
+ "name": tc["name"],
155
+ "input": tc["args"],
156
+ }
157
+ )
158
+ raw_messages.append(
159
+ {
160
+ "role": "assistant",
161
+ "content": content_blocks if content_blocks else "",
162
+ }
163
+ )
164
+
165
+ elif isinstance(msg, ToolMessage):
166
+ tool_content = msg.content
167
+ if not isinstance(tool_content, str):
168
+ tool_content = json.dumps(tool_content, ensure_ascii=False)
169
+ raw_messages.append(
170
+ {
171
+ "role": "user",
172
+ "content": [
173
+ {
174
+ "type": "tool_result",
175
+ "tool_use_id": msg.tool_call_id,
176
+ "content": tool_content,
177
+ }
178
+ ],
179
+ }
180
+ )
181
+
182
+ merged = _merge_consecutive_messages(raw_messages)
183
+
184
+ # Return system as string (simple) or list (structured with cache_control)
185
+ has_cache_control = any(
186
+ isinstance(b, dict) and "cache_control" in b for b in system_blocks
187
+ )
188
+ if len(system_blocks) == 1 and not has_cache_control:
189
+ system: Union[str, List[Dict]] = system_blocks[0].get("text", "")
190
+ elif system_blocks:
191
+ system = system_blocks
192
+ else:
193
+ system = ""
194
+
195
+ return system, merged
196
+
197
+ def _build_payload(
198
+ self,
199
+ messages: List[BaseMessage],
200
+ stop: Optional[List[str]] = None,
201
+ **kwargs,
202
+ ) -> Tuple[str, Dict[str, Any], int]:
203
+ """Build API payload shared by _generate and _stream.
204
+
205
+ Returns:
206
+ (access_token, payload, tools_count)
207
+ """
208
+ system, api_messages = self._convert_messages(messages)
209
+ access_token = self._get_access_token()
210
+
211
+ payload: Dict[str, Any] = {
212
+ "anthropic_version": "vertex-2023-10-16",
213
+ "messages": api_messages,
214
+ "max_tokens": self.max_tokens,
215
+ "temperature": self.temperature,
216
+ }
217
+ if system:
218
+ payload["system"] = system
219
+ if stop:
220
+ payload["stop_sequences"] = stop
221
+
222
+ tools = kwargs.get("tools", [])
223
+ if tools:
224
+ payload["tools"] = tools
225
+
226
+ tool_choice = kwargs.get("tool_choice")
227
+ if tool_choice:
228
+ if isinstance(tool_choice, str):
229
+ if tool_choice == "auto":
230
+ payload["tool_choice"] = {"type": "auto"}
231
+ elif tool_choice == "any":
232
+ payload["tool_choice"] = {"type": "any"}
233
+ elif tool_choice == "none":
234
+ payload.pop("tools", None)
235
+ else:
236
+ payload["tool_choice"] = {
237
+ "type": "tool",
238
+ "name": tool_choice,
239
+ }
240
+ elif isinstance(tool_choice, dict):
241
+ payload["tool_choice"] = tool_choice
242
+
243
+ return access_token, payload, len(tools)
244
+
245
+ def _generate(
246
+ self,
247
+ messages: List[BaseMessage],
248
+ stop: Optional[List[str]] = None,
249
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
250
+ **kwargs,
251
+ ) -> ChatResult:
252
+ """Call Vertex AI Claude via rawPredict (non-streaming)."""
253
+ access_token, payload, tools_count = self._build_payload(
254
+ messages, stop, **kwargs
255
+ )
256
+
257
+ url = (
258
+ f"https://{self.location}-aiplatform.googleapis.com/v1/"
259
+ f"projects/{self.project_id}/locations/{self.location}/"
260
+ f"publishers/anthropic/models/{self.model}:rawPredict"
261
+ )
262
+
263
+ logger.info(
264
+ f"[ChatVertexAIClaude] rawPredict: model={self.model}, "
265
+ f"location={self.location}, messages={len(payload['messages'])}, "
266
+ f"tools={tools_count}"
267
+ )
268
+
269
+ data = _http_post_json(url, payload, access_token)
270
+
271
+ # Parse response
272
+ text_parts = []
273
+ tool_calls = []
274
+ for block in data.get("content", []):
275
+ block_type = block.get("type", "")
276
+ if block_type == "text":
277
+ text_parts.append(block.get("text", ""))
278
+ elif block_type == "tool_use":
279
+ tool_calls.append(
280
+ {
281
+ "id": block["id"],
282
+ "name": block["name"],
283
+ "args": block.get("input", {}),
284
+ }
285
+ )
286
+
287
+ usage = data.get("usage", {})
288
+ input_tokens = usage.get("input_tokens", 0)
289
+ output_tokens = usage.get("output_tokens", 0)
290
+
291
+ ai_message = AIMessage(
292
+ content="".join(text_parts),
293
+ tool_calls=tool_calls,
294
+ usage_metadata={
295
+ "input_tokens": input_tokens,
296
+ "output_tokens": output_tokens,
297
+ "total_tokens": input_tokens + output_tokens,
298
+ },
299
+ response_metadata={
300
+ "model": self.model,
301
+ "stop_reason": data.get("stop_reason", ""),
302
+ },
303
+ )
304
+
305
+ logger.info(
306
+ f"[ChatVertexAIClaude] Response: "
307
+ f"text_len={len(ai_message.content)}, "
308
+ f"tool_calls={len(tool_calls)}, "
309
+ f"tokens=({input_tokens}+{output_tokens}={input_tokens + output_tokens})"
310
+ )
311
+
312
+ return ChatResult(
313
+ generations=[ChatGeneration(message=ai_message)],
314
+ llm_output={
315
+ "model": self.model,
316
+ "usage": {
317
+ "input_tokens": input_tokens,
318
+ "output_tokens": output_tokens,
319
+ },
320
+ },
321
+ )
322
+
323
+ def _stream(
324
+ self,
325
+ messages: List[BaseMessage],
326
+ stop: Optional[List[str]] = None,
327
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
328
+ **kwargs,
329
+ ) -> Iterator[ChatGenerationChunk]:
330
+ """Call Vertex AI Claude via streamRawPredict (streaming).
331
+
332
+ Yields ChatGenerationChunk with AIMessageChunk for each SSE event.
333
+ Handles both text and tool_use content blocks.
334
+ """
335
+ access_token, payload, tools_count = self._build_payload(
336
+ messages, stop, **kwargs
337
+ )
338
+ payload["stream"] = True
339
+
340
+ url = (
341
+ f"https://{self.location}-aiplatform.googleapis.com/v1/"
342
+ f"projects/{self.project_id}/locations/{self.location}/"
343
+ f"publishers/anthropic/models/{self.model}:streamRawPredict"
344
+ )
345
+
346
+ logger.info(
347
+ f"[ChatVertexAIClaude] streamRawPredict: model={self.model}, "
348
+ f"location={self.location}, messages={len(payload['messages'])}, "
349
+ f"tools={tools_count}"
350
+ )
351
+
352
+ headers = {
353
+ "Content-Type": "application/json",
354
+ "Authorization": f"Bearer {access_token}",
355
+ }
356
+
357
+ # Track state across SSE events
358
+ usage_metadata: Dict[str, int] = {}
359
+ # Map block index -> tool info for tool_use streaming
360
+ tool_use_blocks: Dict[int, Dict[str, Any]] = {}
361
+
362
+ try:
363
+ with httpx.Client(timeout=300.0) as client:
364
+ with client.stream(
365
+ "POST", url, headers=headers, json=payload
366
+ ) as response:
367
+ if response.status_code != 200:
368
+ error_body = response.read().decode("utf-8", errors="ignore")
369
+ error_msg = f"Vertex AI API error: {response.status_code} - {error_body}"
370
+ logger.error(f"[ChatVertexAIClaude] {error_msg}")
371
+ raise Exception(error_msg)
372
+
373
+ for line in response.iter_lines():
374
+ if isinstance(line, bytes):
375
+ line = line.decode("utf-8")
376
+ line = line.strip()
377
+
378
+ if not line or line.startswith("event:"):
379
+ continue
380
+
381
+ if line.startswith("data:"):
382
+ data_str = line[5:].strip()
383
+ else:
384
+ data_str = line
385
+
386
+ try:
387
+ data = json.loads(data_str)
388
+ except json.JSONDecodeError:
389
+ continue
390
+
391
+ if not data:
392
+ continue
393
+
394
+ event_type = data.get("type", "")
395
+
396
+ if event_type == "message_start":
397
+ msg = data.get("message", {})
398
+ if "usage" in msg:
399
+ usage_metadata["input_tokens"] = msg["usage"].get(
400
+ "input_tokens", 0
401
+ )
402
+
403
+ elif event_type == "content_block_start":
404
+ block = data.get("content_block", {})
405
+ index = data.get("index", 0)
406
+ if block.get("type") == "tool_use":
407
+ # Start of a tool_use block
408
+ tool_use_blocks[index] = {
409
+ "id": block.get("id", ""),
410
+ "name": block.get("name", ""),
411
+ "args_json": "",
412
+ }
413
+ chunk = AIMessageChunk(
414
+ content="",
415
+ tool_call_chunks=[
416
+ {
417
+ "name": block.get("name", ""),
418
+ "args": "",
419
+ "id": block.get("id", ""),
420
+ "index": index,
421
+ }
422
+ ],
423
+ )
424
+ yield ChatGenerationChunk(message=chunk)
425
+ if run_manager:
426
+ run_manager.on_llm_new_token(
427
+ "", chunk=chunk
428
+ )
429
+
430
+ elif event_type == "content_block_delta":
431
+ delta = data.get("delta", {})
432
+ index = data.get("index", 0)
433
+ delta_type = delta.get("type", "")
434
+
435
+ if delta_type == "text_delta":
436
+ text = delta.get("text", "")
437
+ if text:
438
+ chunk = AIMessageChunk(content=text)
439
+ yield ChatGenerationChunk(message=chunk)
440
+ if run_manager:
441
+ run_manager.on_llm_new_token(
442
+ text, chunk=chunk
443
+ )
444
+
445
+ elif delta_type == "input_json_delta":
446
+ partial_json = delta.get("partial_json", "")
447
+ if index in tool_use_blocks:
448
+ tool_use_blocks[index][
449
+ "args_json"
450
+ ] += partial_json
451
+ chunk = AIMessageChunk(
452
+ content="",
453
+ tool_call_chunks=[
454
+ {
455
+ "name": None,
456
+ "args": partial_json,
457
+ "id": None,
458
+ "index": index,
459
+ }
460
+ ],
461
+ )
462
+ yield ChatGenerationChunk(message=chunk)
463
+ if run_manager:
464
+ run_manager.on_llm_new_token(
465
+ "", chunk=chunk
466
+ )
467
+
468
+ elif event_type == "message_delta":
469
+ delta = data.get("delta", {})
470
+ if "usage" in data:
471
+ usage_metadata["output_tokens"] = data[
472
+ "usage"
473
+ ].get("output_tokens", 0)
474
+
475
+ elif event_type == "message_stop":
476
+ # Yield final chunk with usage metadata
477
+ input_tokens = usage_metadata.get("input_tokens", 0)
478
+ output_tokens = usage_metadata.get(
479
+ "output_tokens", 0
480
+ )
481
+ chunk = AIMessageChunk(
482
+ content="",
483
+ usage_metadata={
484
+ "input_tokens": input_tokens,
485
+ "output_tokens": output_tokens,
486
+ "total_tokens": input_tokens
487
+ + output_tokens,
488
+ },
489
+ response_metadata={
490
+ "model": self.model,
491
+ },
492
+ )
493
+ yield ChatGenerationChunk(message=chunk)
494
+
495
+ logger.info(
496
+ f"[ChatVertexAIClaude] Stream complete: "
497
+ f"tokens=({input_tokens}+{output_tokens}"
498
+ f"={input_tokens + output_tokens})"
499
+ )
500
+
501
+ except httpx.HTTPStatusError as e:
502
+ error_body = e.response.text if e.response else ""
503
+ error_msg = (
504
+ f"Vertex AI API error: {e.response.status_code} - {error_body}"
505
+ )
506
+ logger.error(f"[ChatVertexAIClaude] {error_msg}")
507
+ raise Exception(error_msg) from e
508
+
509
+
510
+ def _http_post_json(
511
+ url: str, payload: Dict[str, Any], access_token: str
512
+ ) -> Dict[str, Any]:
513
+ """POST JSON to URL with Bearer auth. Returns parsed JSON response."""
514
+ headers = {
515
+ "Content-Type": "application/json",
516
+ "Authorization": f"Bearer {access_token}",
517
+ }
518
+ try:
519
+ with httpx.Client(timeout=300.0) as client:
520
+ response = client.post(url, headers=headers, json=payload)
521
+ if response.status_code != 200:
522
+ error_msg = (
523
+ f"Vertex AI API error: {response.status_code} - {response.text}"
524
+ )
525
+ logger.error(f"[ChatVertexAIClaude] {error_msg}")
526
+ raise Exception(error_msg)
527
+ return response.json()
528
+ except httpx.HTTPStatusError as e:
529
+ error_body = e.response.text if e.response else ""
530
+ error_msg = f"Vertex AI API error: {e.response.status_code} - {error_body}"
531
+ logger.error(f"[ChatVertexAIClaude] {error_msg}")
532
+ raise Exception(error_msg) from e
533
+
534
+
535
+ def _convert_tools_to_anthropic(tools: List[Any]) -> List[Dict]:
536
+ """Convert LangChain tools to Anthropic tool format."""
537
+ from langchain_core.utils.function_calling import convert_to_openai_tool
538
+
539
+ anthropic_tools = []
540
+ for tool in tools:
541
+ if isinstance(tool, dict):
542
+ if "input_schema" in tool:
543
+ anthropic_tools.append(tool)
544
+ elif "function" in tool:
545
+ func = tool["function"]
546
+ anthropic_tools.append(
547
+ {
548
+ "name": func["name"],
549
+ "description": func.get("description", ""),
550
+ "input_schema": func.get(
551
+ "parameters",
552
+ {"type": "object", "properties": {}},
553
+ ),
554
+ }
555
+ )
556
+ else:
557
+ anthropic_tools.append(tool)
558
+ else:
559
+ try:
560
+ oai_tool = convert_to_openai_tool(tool)
561
+ func = oai_tool["function"]
562
+ anthropic_tools.append(
563
+ {
564
+ "name": func["name"],
565
+ "description": func.get("description", ""),
566
+ "input_schema": func.get(
567
+ "parameters",
568
+ {"type": "object", "properties": {}},
569
+ ),
570
+ }
571
+ )
572
+ except Exception as e:
573
+ logger.warning(
574
+ f"[ChatVertexAIClaude] Failed to convert tool "
575
+ f"{getattr(tool, 'name', tool)}: {e}"
576
+ )
577
+
578
+ return anthropic_tools
579
+
580
+
581
+ def _merge_consecutive_messages(messages: List[Dict]) -> List[Dict]:
582
+ """Merge consecutive messages with the same role (required by Anthropic API)."""
583
+ if not messages:
584
+ return []
585
+
586
+ merged: List[Dict] = []
587
+ for msg in messages:
588
+ if merged and merged[-1]["role"] == msg["role"]:
589
+ prev_content = merged[-1]["content"]
590
+ curr_content = msg["content"]
591
+
592
+ # Normalize to list of content blocks
593
+ if isinstance(prev_content, str):
594
+ prev_content = [{"type": "text", "text": prev_content}]
595
+ elif not isinstance(prev_content, list):
596
+ prev_content = [prev_content]
597
+
598
+ if isinstance(curr_content, str):
599
+ curr_content = [{"type": "text", "text": curr_content}]
600
+ elif not isinstance(curr_content, list):
601
+ curr_content = [curr_content]
602
+
603
+ merged[-1]["content"] = prev_content + curr_content
604
+ else:
605
+ merged.append(msg)
606
+
607
+ return merged
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: botrun-flow-lang
3
- Version: 6.2.21
3
+ Version: 6.2.62
4
4
  Summary: A flow language for botrun
5
5
  Author-email: sebastian-hsu <sebastian.hsu@gmail.com>
6
6
  License: MIT
@@ -32,7 +32,6 @@ Requires-Dist: langchain-community>=0.3.27
32
32
  Requires-Dist: langchain-core>=1.1.2
33
33
  Requires-Dist: langchain-google-community>=2.0.3
34
34
  Requires-Dist: langchain-google-genai>=4.0.0
35
- Requires-Dist: langchain-google-vertexai<4.0.0,>=3.2.0
36
35
  Requires-Dist: langchain-mcp-adapters>=0.1.7
37
36
  Requires-Dist: langchain-openai>=0.3.28
38
37
  Requires-Dist: langchain>=0.3.27
@@ -25,7 +25,7 @@ botrun_flow_lang/api/youtube_api.py,sha256=9eGr--gR2OoM9JZ6Nf9KqPiE-FeXEx8R-QeJv
25
25
  botrun_flow_lang/langgraph_agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
26
  botrun_flow_lang/langgraph_agents/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  botrun_flow_lang/langgraph_agents/agents/agent_runner.py,sha256=tiuPIqAcM8rIWBTjo8NS4owTepCsX3QkIHaUEDakOTc,6673
28
- botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py,sha256=hTQf2lzlQy5n64i32e3iQTPwx7dXS3GMwaZqYt21zMc,31352
28
+ botrun_flow_lang/langgraph_agents/agents/langgraph_react_agent.py,sha256=-4ejdM9SiocZyV92wCg1DOKNMBUTO6-wanj-D73RG7k,31156
29
29
  botrun_flow_lang/langgraph_agents/agents/search_agent_graph.py,sha256=hWDPt0U09Gj-3-NNWhsn9xaakYbOcHExIXqcL8TeZxw,32046
30
30
  botrun_flow_lang/langgraph_agents/agents/agent_tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
31
  botrun_flow_lang/langgraph_agents/agents/agent_tools/step_planner.py,sha256=S4TYt0ZhgdAZ-2ndH8hJoEaIyDKdNJdWHjEZ49Lg_NQ,2427
@@ -39,6 +39,7 @@ botrun_flow_lang/langgraph_agents/agents/gov_researcher/gov_researcher_graph.py,
39
39
  botrun_flow_lang/langgraph_agents/agents/tools/__init__.py,sha256=-z1uuC3IET02q8kPhPlr-L9eTGJqgHjEJlC__cG16H0,105
40
40
  botrun_flow_lang/langgraph_agents/agents/tools/gemini_code_execution.py,sha256=EEp8xhVU-Kj1Nk5qV8ObqdVZ8gT6GITrE4VyjIc2InA,14238
41
41
  botrun_flow_lang/langgraph_agents/agents/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
+ botrun_flow_lang/langgraph_agents/agents/util/custom_vertex_claude.py,sha256=4ZCNILUCJoZMMy_Dh1Slf-KvYuMzhZNamnfDZXhjSQE,24124
42
43
  botrun_flow_lang/langgraph_agents/agents/util/gemini_grounding.py,sha256=JTfH9WJNDlpvMvfzXyZy3bHeCN58MTnEOiamQGMsqh0,2884
43
44
  botrun_flow_lang/langgraph_agents/agents/util/html_util.py,sha256=g5yJO0qTqRq_kb-xhSnWX3WAbHDIjNQYl7ErRBPQwHs,13230
44
45
  botrun_flow_lang/langgraph_agents/agents/util/img_util.py,sha256=6OERtpGGimlev4Pb_O1UbMNaT_DMBHSmAgo9gB-R8xk,12385
@@ -99,6 +100,6 @@ botrun_flow_lang/utils/yaml_utils.py,sha256=dPlabIol-Clhnwc7N5nuffCaLSq8dyvmvjRw
99
100
  botrun_flow_lang/utils/clients/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
100
101
  botrun_flow_lang/utils/clients/rate_limit_client.py,sha256=96NNCHB9I5C5bpVFF6sfPhmh4oAx3UdOLb-Z4PAXLdg,8558
101
102
  botrun_flow_lang/utils/clients/token_verify_client.py,sha256=-AnYApJ9CvxVn-RhCCZZ2LCrf065fgskhwLKAm-aiN0,5893
102
- botrun_flow_lang-6.2.21.dist-info/METADATA,sha256=oF4wNjTpal4vp58e5a0le51qAekPxomZJwOrzR-2uXs,6219
103
- botrun_flow_lang-6.2.21.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
104
- botrun_flow_lang-6.2.21.dist-info/RECORD,,
103
+ botrun_flow_lang-6.2.62.dist-info/METADATA,sha256=ZI_XX1JwrqTbJ_SzdzSm1y1Iw882DxfAv2ESCLhJxNw,6164
104
+ botrun_flow_lang-6.2.62.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
105
+ botrun_flow_lang-6.2.62.dist-info/RECORD,,