langchain-dev-utils 1.2.7__py3-none-any.whl → 1.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__ = "1.2.7"
1
+ __version__ = "1.2.9"
@@ -8,9 +8,10 @@ def _check_pkg_install(
8
8
  pkg: Literal["langchain_openai", "json_repair"],
9
9
  ) -> None:
10
10
  if not util.find_spec(pkg):
11
- msg = (
12
- "Please install langchain_dev_utils[standard],when use 'openai-compatible'"
13
- )
11
+ if pkg == "langchain_openai":
12
+ msg = "Please install langchain_dev_utils[standard],when use 'openai-compatible'"
13
+ else:
14
+ msg = "Please install langchain_dev_utils[standard] to use ToolCallRepairMiddleware."
14
15
  raise ImportError(msg)
15
16
 
16
17
 
@@ -20,7 +20,7 @@ from langchain_core.callbacks import (
20
20
  CallbackManagerForLLMRun,
21
21
  )
22
22
  from langchain_core.language_models import LangSmithParams, LanguageModelInput
23
- from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
23
+ from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage, HumanMessage
24
24
  from langchain_core.outputs import ChatGenerationChunk, ChatResult
25
25
  from langchain_core.runnables import Runnable
26
26
  from langchain_core.tools import BaseTool
@@ -39,47 +39,63 @@ from pydantic import (
39
39
  )
40
40
  from typing_extensions import Self
41
41
 
42
- from ..types import CompatibilityOptions, ToolChoiceType
42
+ from ..types import (
43
+ CompatibilityOptions,
44
+ ReasoningKeepPolicy,
45
+ ResponseFormatType,
46
+ ToolChoiceType,
47
+ )
43
48
 
44
49
  _BM = TypeVar("_BM", bound=BaseModel)
45
50
  _DictOrPydanticClass = Union[dict[str, Any], type[_BM], type]
46
51
  _DictOrPydantic = Union[dict, _BM]
47
52
 
48
53
 
54
+ def _get_last_human_message_index(messages: list[BaseMessage]) -> int:
55
+ """find the index of the last HumanMessage in the messages list, return -1 if not found."""
56
+ return next(
57
+ (
58
+ i
59
+ for i in range(len(messages) - 1, -1, -1)
60
+ if isinstance(messages[i], HumanMessage)
61
+ ),
62
+ -1,
63
+ )
64
+
65
+
49
66
  class _BaseChatOpenAICompatible(BaseChatOpenAI):
50
67
  """
51
68
  Base template class for OpenAI-compatible chat model implementations.
52
69
 
53
- This class provides a foundation for integrating various LLM providers that
54
- offer OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot,
70
+ This class provides a foundation for integrating various LLM providers that
71
+ offer OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot,
55
72
  and many others). It enhances the base OpenAI functionality by:
56
73
 
57
74
  **1. Supports output of more types of reasoning content (reasoning_content)**
58
- ChatOpenAI can only output reasoning content natively supported by official
59
- OpenAI models, while OpenAICompatibleChatModel can output reasoning content
60
- from other model providers (e.g., OpenRouter).
75
+ ChatOpenAI can only output reasoning content natively supported by official
76
+ OpenAI models, while OpenAICompatibleChatModel can output reasoning content
77
+ from other model providers (e.g., OpenRouter, vLLM).
61
78
 
62
- **2. Optimizes default behavior for structured output**
63
- When calling with_structured_output, the default value of the method
64
- parameter is adjusted to "function_calling" (instead of the default
65
- "json_schema" in ChatOpenAI), providing better compatibility with other
66
- models.
79
+ **2. Dynamically adapts to choose the most suitable structured-output method**
80
+ OpenAICompatibleChatModel adds method="auto" (default), which selects the best
81
+ structured-output method (function_calling or json_schema) based on the actual
82
+ capabilities of the model provider.
67
83
 
68
84
  **3. Supports configuration of related parameters**
69
- For cases where parameters differ from the official OpenAI API, this library
70
- provides the compatibility_options parameter to address this issue. For
71
- example, when different model providers have inconsistent support for
72
- tool_choice, you can adapt by setting supported_tool_choice in
85
+ For cases where parameters differ from the official OpenAI API, this library
86
+ provides the compatibility_options parameter to address this issue. For
87
+ example, when different model providers have inconsistent support for
88
+ tool_choice, you can adapt by setting supported_tool_choice in
73
89
  compatibility_options.
74
90
 
75
- Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class
76
- extends capabilities to better support diverse OpenAI-compatible model
77
- providers while maintaining full compatibility with LangChain's chat model
91
+ Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class
92
+ extends capabilities to better support diverse OpenAI-compatible model
93
+ providers while maintaining full compatibility with LangChain's chat model
78
94
  interface.
79
95
 
80
- Note: This is a template class and should not be exported or instantiated
81
- directly. Instead, use it as a base class and provide the specific provider
82
- name through inheritance or the factory function
96
+ Note: This is a template class and should not be exported or instantiated
97
+ directly. Instead, use it as a base class and provide the specific provider
98
+ name through inheritance or the factory function
83
99
  `_create_openai_compatible_model()`.
84
100
  """
85
101
 
@@ -101,29 +117,13 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
101
117
  """Provider Compatibility Options"""
102
118
  supported_tool_choice: ToolChoiceType = Field(default_factory=list)
103
119
  """Supported tool choice"""
104
- keep_reasoning_content: bool = Field(default=False)
105
- """Whether to keep reasoning content in the messages"""
106
- support_json_mode: bool = Field(default=False)
107
- """Whether to support JSON mode"""
120
+ supported_response_format: ResponseFormatType = Field(default_factory=list)
121
+ """Supported response format"""
122
+ reasoning_keep_policy: ReasoningKeepPolicy = Field(default="never")
123
+ """How to keep reasoning content in the messages"""
108
124
  include_usage: bool = Field(default=True)
109
125
  """Whether to include usage information in the output"""
110
126
 
111
- @property
112
- def _supported_tool_choice(self) -> ToolChoiceType:
113
- return self.supported_tool_choice
114
-
115
- @property
116
- def _keep_reasoning_content(self) -> bool:
117
- return self.keep_reasoning_content
118
-
119
- @property
120
- def _support_json_mode(self) -> bool:
121
- return self.support_json_mode
122
-
123
- @property
124
- def _include_usage(self) -> bool:
125
- return self.include_usage
126
-
127
127
  @property
128
128
  def _llm_type(self) -> str:
129
129
  return f"chat-{self._provider}"
@@ -158,18 +158,29 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
158
158
  kwargs["stop"] = stop
159
159
 
160
160
  payload_messages = []
161
+ last_human_index = -1
162
+ if self.reasoning_keep_policy == "current":
163
+ last_human_index = _get_last_human_message_index(messages)
161
164
 
162
- for m in messages:
165
+ for index, m in enumerate(messages):
163
166
  if isinstance(m, AIMessage):
164
167
  msg_dict = _convert_message_to_dict(
165
168
  _convert_from_v1_to_chat_completions(m)
166
169
  )
167
- if self._keep_reasoning_content and m.additional_kwargs.get(
170
+ if self.reasoning_keep_policy == "all" and m.additional_kwargs.get(
168
171
  "reasoning_content"
169
172
  ):
170
173
  msg_dict["reasoning_content"] = m.additional_kwargs.get(
171
174
  "reasoning_content"
172
175
  )
176
+ elif (
177
+ self.reasoning_keep_policy == "current"
178
+ and index > last_human_index
179
+ and m.additional_kwargs.get("reasoning_content")
180
+ ):
181
+ msg_dict["reasoning_content"] = m.additional_kwargs.get(
182
+ "reasoning_content"
183
+ )
173
184
  payload_messages.append(msg_dict)
174
185
  else:
175
186
  payload_messages.append(_convert_message_to_dict(m))
@@ -316,7 +327,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
316
327
  run_manager: Optional[CallbackManagerForLLMRun] = None,
317
328
  **kwargs: Any,
318
329
  ) -> Iterator[ChatGenerationChunk]:
319
- if self._include_usage:
330
+ if self.include_usage:
320
331
  kwargs["stream_options"] = {"include_usage": True}
321
332
  try:
322
333
  for chunk in super()._stream(
@@ -338,7 +349,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
338
349
  run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
339
350
  **kwargs: Any,
340
351
  ) -> AsyncIterator[ChatGenerationChunk]:
341
- if self._include_usage:
352
+ if self.include_usage:
342
353
  kwargs["stream_options"] = {"include_usage": True}
343
354
  try:
344
355
  async for chunk in super()._astream(
@@ -422,11 +433,11 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
422
433
  if isinstance(tool_choice, str):
423
434
  if (
424
435
  tool_choice in ["auto", "none", "required"]
425
- and tool_choice in self._supported_tool_choice
436
+ and tool_choice in self.supported_tool_choice
426
437
  ):
427
438
  support_tool_choice = True
428
439
 
429
- elif "specific" in self._supported_tool_choice:
440
+ elif "specific" in self.supported_tool_choice:
430
441
  if tool_choice in tool_names:
431
442
  support_tool_choice = True
432
443
  tool_choice = {
@@ -443,10 +454,11 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
443
454
  schema: Optional[_DictOrPydanticClass] = None,
444
455
  *,
445
456
  method: Literal[
457
+ "auto",
446
458
  "function_calling",
447
459
  "json_mode",
448
460
  "json_schema",
449
- ] = "function_calling",
461
+ ] = "auto",
450
462
  include_raw: bool = False,
451
463
  strict: Optional[bool] = None,
452
464
  **kwargs: Any,
@@ -460,7 +472,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
460
472
 
461
473
  Args:
462
474
  schema: Output schema (Pydantic model class or dictionary definition)
463
- method: Extraction method - defaults to function_calling for compatibility
475
+ method: Extraction method - defaults to auto,it will choice best method based on provider supported response format
464
476
  include_raw: Whether to include raw model response alongside parsed output
465
477
  strict: Schema enforcement strictness (provider-dependent)
466
478
  **kwargs: Additional structured output parameters
@@ -468,10 +480,23 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
468
480
  Returns:
469
481
  Runnable configured for structured output extraction
470
482
  """
471
- # Many providers do not support json_schema method, so fallback to function_calling
472
- if method == "json_schema":
483
+ if method not in ["auto", "function_calling", "json_mode", "json_schema"]:
484
+ raise ValueError(
485
+ f"Unsupported method: {method}. Please choose from 'auto', 'function_calling', 'json_mode', 'json_schema'."
486
+ )
487
+ if method == "auto":
488
+ if "json_schema" in self.supported_response_format:
489
+ method = "json_schema"
490
+ else:
491
+ method = "function_calling"
492
+ elif (
493
+ method == "json_schema"
494
+ and "json_schema" not in self.supported_response_format
495
+ ):
473
496
  method = "function_calling"
474
- if method == "json_mode" and not self._support_json_mode:
497
+ elif (
498
+ method == "json_mode" and "json_mode" not in self.supported_response_format
499
+ ):
475
500
  method = "function_calling"
476
501
 
477
502
  return super().with_structured_output(
@@ -502,8 +527,8 @@ def _create_openai_compatible_model(
502
527
  Configured model class ready for instantiation with provider-specific settings
503
528
  """
504
529
  chat_model_cls_name = f"Chat{provider.title()}"
505
-
506
- compatibility_options = compatibility_options or {}
530
+ if compatibility_options is None:
531
+ compatibility_options = {}
507
532
 
508
533
  return create_model(
509
534
  chat_model_cls_name,
@@ -532,13 +557,13 @@ def _create_openai_compatible_model(
532
557
  ToolChoiceType,
533
558
  Field(default=compatibility_options.get("supported_tool_choice", ["auto"])),
534
559
  ),
535
- keep_reasoning_content=(
536
- bool,
537
- Field(default=compatibility_options.get("keep_reasoning_content", False)),
560
+ reasoning_keep_policy=(
561
+ ReasoningKeepPolicy,
562
+ Field(default=compatibility_options.get("reasoning_keep_policy", "never")),
538
563
  ),
539
- support_json_mode=(
540
- bool,
541
- Field(default=compatibility_options.get("support_json_mode", False)),
564
+ supported_response_format=(
565
+ ResponseFormatType,
566
+ Field(default=compatibility_options.get("supported_response_format", [])),
542
567
  ),
543
568
  include_usage=(
544
569
  bool,
@@ -7,9 +7,13 @@ ChatModelType = Union[type[BaseChatModel], Literal["openai-compatible"]]
7
7
 
8
8
  ToolChoiceType = list[Literal["auto", "none", "required", "specific"]]
9
9
 
10
+ ResponseFormatType = list[Literal["json_schema", "json_mode"]]
11
+
12
+ ReasoningKeepPolicy = Literal["never", "current", "all"]
13
+
10
14
 
11
15
  class CompatibilityOptions(TypedDict):
12
16
  supported_tool_choice: NotRequired[ToolChoiceType]
13
- keep_reasoning_content: NotRequired[bool]
14
- support_json_mode: NotRequired[bool]
17
+ supported_response_format: NotRequired[ResponseFormatType]
18
+ reasoning_keep_policy: NotRequired[ReasoningKeepPolicy]
15
19
  include_usage: NotRequired[bool]
@@ -218,30 +218,30 @@ def load_embeddings(
218
218
  ):
219
219
  raise ValueError(f"Provider {provider} not registered")
220
220
 
221
- if provider in _SUPPORTED_PROVIDERS:
222
- return init_embeddings(model, provider=provider, **kwargs)
223
-
224
- embeddings = _EMBEDDINGS_PROVIDERS_DICT[provider]["embeddings_model"]
225
- if isinstance(embeddings, str):
226
- if not (api_key := kwargs.get("api_key")):
227
- api_key = secret_from_env(f"{provider.upper()}_API_KEY", default=None)()
228
- if not api_key:
229
- raise ValueError(
230
- f"API key for {provider} not found. Please set it in the environment."
231
- )
232
- kwargs["api_key"] = api_key
233
- if embeddings == "openai-compatible":
234
- kwargs["check_embedding_ctx_length"] = False
235
- embeddings = "openai"
236
- return init_embeddings(
237
- model=model,
238
- provider=embeddings,
239
- base_url=_EMBEDDINGS_PROVIDERS_DICT[provider]["base_url"],
240
- **kwargs,
241
- )
221
+ if provider in _EMBEDDINGS_PROVIDERS_DICT:
222
+ embeddings = _EMBEDDINGS_PROVIDERS_DICT[provider]["embeddings_model"]
223
+ if isinstance(embeddings, str):
224
+ if not (api_key := kwargs.get("api_key")):
225
+ api_key = secret_from_env(f"{provider.upper()}_API_KEY", default=None)()
226
+ if not api_key:
227
+ raise ValueError(
228
+ f"API key for {provider} not found. Please set it in the environment."
229
+ )
230
+ kwargs["api_key"] = api_key
231
+ if embeddings == "openai-compatible":
232
+ kwargs["check_embedding_ctx_length"] = False
233
+ embeddings = "openai"
234
+ return init_embeddings(
235
+ model=model,
236
+ provider=embeddings,
237
+ base_url=_EMBEDDINGS_PROVIDERS_DICT[provider]["base_url"],
238
+ **kwargs,
239
+ )
240
+ else:
241
+ if base_url := _EMBEDDINGS_PROVIDERS_DICT[provider].get("base_url"):
242
+ url_key = _get_base_url_field_name(embeddings)
243
+ if url_key is not None:
244
+ kwargs.update({url_key: base_url})
245
+ return embeddings(model=model, **kwargs)
242
246
  else:
243
- if base_url := _EMBEDDINGS_PROVIDERS_DICT[provider].get("base_url"):
244
- url_key = _get_base_url_field_name(embeddings)
245
- if url_key is not None:
246
- kwargs.update({url_key: base_url})
247
- return embeddings(model=model, **kwargs)
247
+ return init_embeddings(model, provider=provider, **kwargs)
@@ -52,8 +52,10 @@ def convert_reasoning_content_for_ai_message(
52
52
  reasoning_content = _get_reasoning_content(model_response)
53
53
 
54
54
  if reasoning_content:
55
- model_response.content = (
56
- f"{think_tag[0]}{reasoning_content}{think_tag[1]}{model_response.content}"
55
+ return model_response.model_copy(
56
+ update={
57
+ "content": f"{think_tag[0]}{reasoning_content}{think_tag[1]}{model_response.content}"
58
+ }
57
59
  )
58
60
  return model_response
59
61
 
@@ -99,12 +101,16 @@ def convert_reasoning_content_for_chunk_iterator(
99
101
  reasoning_content = _get_reasoning_content(chunk)
100
102
  if reasoning_content:
101
103
  if isfirst:
102
- chunk.content = f"{think_tag[0]}{reasoning_content}"
104
+ chunk = chunk.model_copy(
105
+ update={"content": f"{think_tag[0]}{reasoning_content}"}
106
+ )
103
107
  isfirst = False
104
108
  else:
105
- chunk.content = reasoning_content
109
+ chunk = chunk.model_copy(update={"content": reasoning_content})
106
110
  elif chunk.content and isend and not isfirst:
107
- chunk.content = f"{think_tag[1]}{chunk.content}"
111
+ chunk = chunk.model_copy(
112
+ update={"content": f"{think_tag[1]}{chunk.content}"}
113
+ )
108
114
  isend = False
109
115
  yield chunk
110
116
 
@@ -149,12 +155,16 @@ async def aconvert_reasoning_content_for_chunk_iterator(
149
155
  reasoning_content = _get_reasoning_content(chunk)
150
156
  if reasoning_content:
151
157
  if isfirst:
152
- chunk.content = f"{think_tag[0]}{reasoning_content}"
158
+ chunk = chunk.model_copy(
159
+ update={"content": f"{think_tag[0]}{reasoning_content}"}
160
+ )
153
161
  isfirst = False
154
162
  else:
155
- chunk.content = reasoning_content
163
+ chunk = chunk.model_copy(update={"content": reasoning_content})
156
164
  elif chunk.content and isend and not isfirst:
157
- chunk.content = f"{think_tag[1]}{chunk.content}"
165
+ chunk = chunk.model_copy(
166
+ update={"content": f"{think_tag[1]}{chunk.content}"}
167
+ )
158
168
  isend = False
159
169
  yield chunk
160
170
 
@@ -62,20 +62,20 @@ def parse_tool_calling(
62
62
  ... tool_calls = parse_tool_calling(response)
63
63
  """
64
64
 
65
- tool_call = None
65
+ tool_calls = None
66
66
 
67
67
  tool_call_blocks = [
68
68
  block for block in message.content_blocks if block["type"] == "tool_call"
69
69
  ]
70
70
  if tool_call_blocks:
71
- tool_call = tool_call_blocks
71
+ tool_calls = tool_call_blocks
72
72
 
73
- if not tool_call:
74
- tool_call = message.tool_calls
73
+ if not tool_calls:
74
+ tool_calls = message.tool_calls
75
75
 
76
- if not tool_call:
76
+ if not tool_calls:
77
77
  raise ValueError("No tool call found in message")
78
78
 
79
79
  if first_tool_call_only:
80
- return (tool_call[0]["name"], tool_call[0]["args"])
81
- return [(tool_call["name"], tool_call["args"]) for tool_call in tool_call]
80
+ return (tool_calls[0]["name"], tool_calls[0]["args"])
81
+ return [(tool_call["name"], tool_call["args"]) for tool_call in tool_calls]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.7
3
+ Version: 1.2.9
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -192,52 +192,66 @@ def get_current_time() -> str:
192
192
 
193
193
  ### 4. **Agent Development**
194
194
 
195
- Includes the following features:
195
+ Includes the following capabilities:
196
196
 
197
- - Predefined agent factory functions
198
- - Common middleware components
197
+ - Multi-agent construction
198
+ - Commonly used middleware components
199
199
 
200
- #### 4.1 Agent Factory Functions
200
+ #### 4.1 Multi-Agent Construction
201
201
 
202
- In LangChain v1, the official `create_agent` function can be used to create a single agent; its `model` parameter accepts either a BaseChatModel instance or a specific string (when a string is provided, only models supported by `init_chat_model` are allowed). To extend the flexibility of specifying models via string, this library provides an equivalent `create_agent` function that lets you designate any model supported by `load_chat_model` (registration required beforehand).
202
+ Wrapping an agent as a tool is a common implementation pattern in multi-agent systems, as elaborated in the official LangChain documentation. To support this pattern, this library provides a pre-built utility function `wrap_agent_as_tool`, which encapsulates an agent instance into a tool that can be invoked by other agents.
203
203
 
204
- Usage example:
204
+ **Usage Example**:
205
205
 
206
206
  ```python
207
- from langchain_dev_utils.agents import create_agent
207
+ import datetime
208
+ from langchain_dev_utils.agents import create_agent, wrap_agent_as_tool
208
209
  from langchain.agents import AgentState
209
210
 
211
+
212
+ @tool
213
+ def get_current_time() -> str:
214
+ """Get the current time."""
215
+ return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
216
+
217
+
210
218
  agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
211
- response = agent.invoke({"messages": [{"role": "user", "content": "What time is it?"}]})
219
+ call_time_agent_tool = wrap_agent_as_tool(agent)
220
+ response = call_time_agent_tool.invoke(
221
+ {"messages": [{"role": "user", "content": "What time is it now?"}]}
222
+ )
212
223
  print(response)
213
224
  ```
214
225
 
215
226
  #### 4.2 Middleware
216
227
 
217
- Provides some commonly used middleware components. Below, we illustrate with `ToolCallRepairMiddleware` and `PlanMiddleware`.
218
-
219
- `ToolCallRepairMiddleware` is used to repair `invalid_tool_calls` generated by large language models.
228
+ Provides several commonly used middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
220
229
 
221
- `PlanMiddleware` is used for agent planning.
230
+ - `ToolCallRepairMiddleware` automatically repairs malformed tool calls found in the model's `invalid_tool_calls` output.
231
+ - `PlanMiddleware` enables task planning capabilities for agents.
222
232
 
223
233
  ```python
224
234
  from langchain_dev_utils.agents.middleware import (
225
- ToolcallRepairMiddleware,
235
+ ToolCallRepairMiddleware,
226
236
  PlanMiddleware,
227
237
  )
228
238
 
229
239
  agent = create_agent(
230
240
  "vllm:qwen3-4b",
231
241
  name="plan-agent",
232
- middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
233
- use_read_plan_tool=False
234
- )]
242
+ middleware=[
243
+ ToolCallRepairMiddleware(),
244
+ PlanMiddleware(use_read_plan_tool=False)
245
+ ]
235
246
  )
236
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]})
247
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan for visiting New York."}]})
237
248
  print(response)
238
249
  ```
239
250
 
240
- **For more information about agent development and all built-in middleware, please refer to**: [Pre-built Agent Functions](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/prebuilt.html), [Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
251
+ **For more details on agent development and a complete list of built-in middleware, please refer to**:
252
+ [Multi-Agent Construction](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/multi-agent.html),
253
+ [Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
254
+
241
255
 
242
256
  ### 5. **State Graph Orchestration**
243
257
 
@@ -1,5 +1,5 @@
1
- langchain_dev_utils/__init__.py,sha256=49prCLbE3fFzLfxem5rd2dr1iV4_L-bN0N4J7jxU5yA,22
2
- langchain_dev_utils/_utils.py,sha256=8Y8qzE9tWuF2UoDGa6xrTyEZRWtOmrGvXNroIF0SOCU,1207
1
+ langchain_dev_utils/__init__.py,sha256=Oh3Y6CIypkhAjW-aquBTyP3_cA-gKgKTwq9EpcWpjps,22
2
+ langchain_dev_utils/_utils.py,sha256=MFEzR1BjXMj6HEVwt2x2omttFuDJ_rYAEbNqe99r9pM,1338
3
3
  langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  langchain_dev_utils/agents/__init__.py,sha256=PJ-lSDZv_AXMYA3H4fx-HzJa14tPbkGmq1HX8LNfaPo,125
5
5
  langchain_dev_utils/agents/factory.py,sha256=XdGjktksfTDys7X4SgfPrQz10HUo5fTNAWESDQenIlE,3728
@@ -16,13 +16,13 @@ langchain_dev_utils/agents/middleware/tool_emulator.py,sha256=OgtPhqturaWzF4fRSJ
16
16
  langchain_dev_utils/agents/middleware/tool_selection.py,sha256=dRH5ejR6N02Djwxt6Gd63MYkg6SV5pySlzaRt53OoZk,3113
17
17
  langchain_dev_utils/chat_models/__init__.py,sha256=YSLUyHrWEEj4y4DtGFCOnDW02VIYZdfAH800m4Klgeg,224
18
18
  langchain_dev_utils/chat_models/base.py,sha256=CVMfgqMRnIKv8z4babusa2c4RKVuiWTL39mPD8cHAf4,11880
19
- langchain_dev_utils/chat_models/types.py,sha256=ch0t30oqjR-nePXWt_U5ybTohKeBZ1snTIOeQUEEBa8,510
19
+ langchain_dev_utils/chat_models/types.py,sha256=kVLbT-IbvNtWPVmyVmh58le5r8XCqrEwuFB9-TWCBJk,672
20
20
  langchain_dev_utils/chat_models/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=sG_qvdGyYN3bRFN28OyuR57tiqMv5QCq1gNA1QHAWco,20273
21
+ langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=YgC8ups0owOVmY-Fwi7oxiktsC9LG6UZka8XNTCBW9g,21457
22
22
  langchain_dev_utils/embeddings/__init__.py,sha256=zbEOaV86TUi9Zrg_dH9dpdgacWg31HMJTlTQknA9EKk,244
23
- langchain_dev_utils/embeddings/base.py,sha256=l4uCB5ecr3GAkfYGpYxqamOPIM6fkP1H_QK-277YEic,9295
23
+ langchain_dev_utils/embeddings/base.py,sha256=BGoWY0L7nG9iRV3d4sSagXhECXrwvS1xA-A_OVltn3k,9406
24
24
  langchain_dev_utils/message_convert/__init__.py,sha256=ZGrHGXPKMrZ_p9MqfIVZ4jgbEyb7aC4Q7X-muuThIYU,457
25
- langchain_dev_utils/message_convert/content.py,sha256=LhrFXL1zYkkpp4ave6SBorDLig5xnllQ2VYCgFz-eR4,7681
25
+ langchain_dev_utils/message_convert/content.py,sha256=2V1g21byg3iLv5RjUW8zv3jwYwV7IH2hNim7jGRsIes,8096
26
26
  langchain_dev_utils/message_convert/format.py,sha256=1TOcJ09atH7LRtn_IIuBshKDXAyqoy3Q9b0Po-S-F9g,2377
27
27
  langchain_dev_utils/pipeline/__init__.py,sha256=eE6WktaLHDkqMeXDIDaLtm-OPTwtsX_Av8iK9uYrceo,186
28
28
  langchain_dev_utils/pipeline/parallel.py,sha256=nwZWbdSNeyanC9WufoJBTceotgT--UnPOfStXjgNMOc,5271
@@ -30,8 +30,8 @@ langchain_dev_utils/pipeline/sequential.py,sha256=sYJXQzVHDKUc-UV-HMv38JTPnse1A7
30
30
  langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV2q58fDHs,112
31
31
  langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
32
32
  langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=7Z_QO5OZUR6K8nLoIcafc6osnvX2IYNorOJcbx6bVso,9672
33
- langchain_dev_utils/tool_calling/utils.py,sha256=W2ZRRMhn7SHHZxFfCXVaPIh2uFkY2XkO6EWrdRuv6VE,2757
34
- langchain_dev_utils-1.2.7.dist-info/METADATA,sha256=roKP7w_tdYwiRQiELlz1Bt_0U5F3uaE-cwa8iqk46-8,13100
35
- langchain_dev_utils-1.2.7.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
36
- langchain_dev_utils-1.2.7.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
37
- langchain_dev_utils-1.2.7.dist-info/RECORD,,
33
+ langchain_dev_utils/tool_calling/utils.py,sha256=S4-KXQ8jWmpGTXYZitovF8rxKpaSSUkFruM8LDwvcvE,2765
34
+ langchain_dev_utils-1.2.9.dist-info/METADATA,sha256=zzBTgY8EUmuz08ofh1t1letaKywRXuqQgfPdqWBVw2Q,13279
35
+ langchain_dev_utils-1.2.9.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
36
+ langchain_dev_utils-1.2.9.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
37
+ langchain_dev_utils-1.2.9.dist-info/RECORD,,