langchain-dev-utils 1.2.6__py3-none-any.whl → 1.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. langchain_dev_utils/__init__.py +1 -1
  2. langchain_dev_utils/_utils.py +5 -2
  3. langchain_dev_utils/agents/__init__.py +0 -1
  4. langchain_dev_utils/agents/factory.py +2 -10
  5. langchain_dev_utils/agents/file_system.py +1 -1
  6. langchain_dev_utils/agents/middleware/__init__.py +2 -0
  7. langchain_dev_utils/agents/middleware/model_fallback.py +1 -1
  8. langchain_dev_utils/agents/middleware/model_router.py +37 -46
  9. langchain_dev_utils/agents/middleware/plan.py +17 -18
  10. langchain_dev_utils/agents/middleware/summarization.py +6 -4
  11. langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
  12. langchain_dev_utils/agents/middleware/tool_emulator.py +3 -3
  13. langchain_dev_utils/agents/middleware/tool_selection.py +3 -3
  14. langchain_dev_utils/agents/plan.py +1 -1
  15. langchain_dev_utils/agents/wrap.py +8 -20
  16. langchain_dev_utils/chat_models/adapters/openai_compatible.py +33 -17
  17. langchain_dev_utils/chat_models/base.py +30 -15
  18. langchain_dev_utils/chat_models/types.py +0 -1
  19. langchain_dev_utils/embeddings/base.py +35 -18
  20. langchain_dev_utils/message_convert/__init__.py +0 -1
  21. langchain_dev_utils/message_convert/content.py +8 -11
  22. langchain_dev_utils/message_convert/format.py +2 -2
  23. langchain_dev_utils/pipeline/parallel.py +10 -41
  24. langchain_dev_utils/pipeline/sequential.py +6 -21
  25. langchain_dev_utils/tool_calling/human_in_the_loop.py +6 -6
  26. langchain_dev_utils/tool_calling/utils.py +3 -3
  27. {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.7.dist-info}/METADATA +24 -119
  28. langchain_dev_utils-1.2.7.dist-info/RECORD +37 -0
  29. langchain_dev_utils-1.2.6.dist-info/RECORD +0 -36
  30. {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.7.dist-info}/WHEEL +0 -0
  31. {langchain_dev_utils-1.2.6.dist-info → langchain_dev_utils-1.2.7.dist-info}/licenses/LICENSE +0 -0
@@ -1 +1 @@
1
- __version__ = "1.2.6"
1
+ __version__ = "1.2.7"
@@ -1,10 +1,13 @@
1
1
  from importlib import util
2
+ from typing import Literal
2
3
 
3
4
  from pydantic import BaseModel
4
5
 
5
6
 
6
- def _check_langchain_openai_install() -> None:
7
- if not util.find_spec("langchain_openai"):
7
+ def _check_pkg_install(
8
+ pkg: Literal["langchain_openai", "json_repair"],
9
+ ) -> None:
10
+ if not util.find_spec(pkg):
8
11
  msg = (
9
12
  "Please install langchain_dev_utils[standard],when use 'openai-compatible'"
10
13
  )
@@ -1,5 +1,4 @@
1
1
  from .factory import create_agent
2
2
  from .wrap import wrap_agent_as_tool
3
3
 
4
-
5
4
  __all__ = ["create_agent", "wrap_agent_as_tool"]
@@ -1,5 +1,4 @@
1
- from typing import Any, Sequence
2
- from typing import Callable
1
+ from typing import Any, Callable, Sequence
3
2
 
4
3
  from langchain.agents import create_agent as _create_agent
5
4
  from langchain.agents.middleware.types import (
@@ -63,21 +62,14 @@ def create_agent( # noqa: PLR0915
63
62
  Example:
64
63
  >>> from langchain_dev_utils.chat_models import register_model_provider
65
64
  >>> from langchain_dev_utils.agents import create_agent
66
- >>> from langchain_core.tools import tool
67
- >>> import datetime
68
65
  >>>
69
- >>> # Register a model provider
66
+ # Register a model provider, must be done before creating the agent
70
67
  >>> register_model_provider(
71
68
  ... provider_name="vllm",
72
69
  ... chat_model="openai-compatible",
73
70
  ... base_url="http://localhost:8000/v1",
74
71
  ... )
75
72
  >>>
76
- >>> @tool
77
- ... def get_current_time() -> str:
78
- ... \"\"\"Get current time.\"\"\"
79
- ... return str(datetime.datetime.now().timestamp())
80
- >>>
81
73
  >>> agent = create_agent(
82
74
  ... "vllm:qwen3-4b",
83
75
  ... tools=[get_current_time],
@@ -1,5 +1,5 @@
1
- from typing import Annotated, Literal, Optional
2
1
  import warnings
2
+ from typing import Annotated, Literal, Optional
3
3
 
4
4
  from langchain.tools import BaseTool, ToolRuntime, tool
5
5
  from langchain_core.messages import ToolMessage
@@ -7,6 +7,7 @@ from .plan import (
7
7
  create_write_plan_tool,
8
8
  )
9
9
  from .summarization import SummarizationMiddleware
10
+ from .tool_call_repair import ToolCallRepairMiddleware
10
11
  from .tool_emulator import LLMToolEmulator
11
12
  from .tool_selection import LLMToolSelectorMiddleware
12
13
 
@@ -20,4 +21,5 @@ __all__ = [
20
21
  "ModelFallbackMiddleware",
21
22
  "LLMToolEmulator",
22
23
  "ModelRouterMiddleware",
24
+ "ToolCallRepairMiddleware",
23
25
  ]
@@ -26,7 +26,7 @@ class ModelFallbackMiddleware(_ModelFallbackMiddleware):
26
26
  )
27
27
 
28
28
  agent = create_agent(
29
- model="vllm:qwen3-4b",#Primary model
29
+ model="vllm:qwen3-4b", #Primary model
30
30
  middleware=[fallback],
31
31
  )
32
32
 
@@ -68,11 +68,15 @@ class ModelRouterState(AgentState):
68
68
 
69
69
 
70
70
  class ModelRouterMiddleware(AgentMiddleware):
71
- """Model routing middleware that automatically selects the most suitable model based on input content.
71
+ """Model routing middleware that automatically selects the most suitable model
72
+ based on input content.
72
73
 
73
74
  Args:
74
- router_model: Model identifier used for routing selection, it can be a model name or a BaseChatModel instance
75
- model_list: List of available routing models, each containing model_name, model_description, tools(Optional), model_kwargs(Optional), model_system_prompt(Optional)
75
+ router_model: Model identifier used for routing selection, it can be a
76
+ model name or a BaseChatModel instance
77
+ model_list: List of available routing models, each containing model_name,
78
+ model_description, tools(Optional), model_kwargs(Optional),
79
+ model_system_prompt(Optional)
76
80
  router_prompt: Routing prompt template, uses default template if None
77
81
 
78
82
  Examples:
@@ -145,9 +149,7 @@ class ModelRouterMiddleware(AgentMiddleware):
145
149
  model_name = await self._aselect_model(state["messages"])
146
150
  return {"router_model_selection": model_name}
147
151
 
148
- def wrap_model_call(
149
- self, request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
150
- ) -> ModelCallResult:
152
+ def _get_override_kwargs(self, request: ModelRequest) -> dict[str, Any]:
151
153
  model_dict = {
152
154
  item["model_name"]: {
153
155
  "tools": item.get("tools", None),
@@ -159,49 +161,38 @@ class ModelRouterMiddleware(AgentMiddleware):
159
161
  select_model_name = request.state.get("router_model_selection", "default-model")
160
162
 
161
163
  override_kwargs = {}
162
- if select_model_name != "default-model":
163
- if select_model_name in model_dict:
164
- model_values = model_dict.get(select_model_name, {})
165
- if model_values["kwargs"] is not None:
166
- model = load_chat_model(select_model_name, **model_values["kwargs"])
167
- else:
168
- model = load_chat_model(select_model_name)
169
- override_kwargs["model"] = model
170
- if model_values["tools"] is not None:
171
- override_kwargs["tools"] = model_values["tools"]
172
- if model_values["system_prompt"] is not None:
173
- override_kwargs["system_message"] = SystemMessage(
174
- content=model_values["system_prompt"]
175
- )
176
- return handler(request.override(**override_kwargs))
164
+ if select_model_name != "default-model" and select_model_name in model_dict:
165
+ model_values = model_dict.get(select_model_name, {})
166
+ if model_values["kwargs"] is not None:
167
+ model = load_chat_model(select_model_name, **model_values["kwargs"])
168
+ else:
169
+ model = load_chat_model(select_model_name)
170
+ override_kwargs["model"] = model
171
+ if model_values["tools"] is not None:
172
+ override_kwargs["tools"] = model_values["tools"]
173
+ if model_values["system_prompt"] is not None:
174
+ override_kwargs["system_message"] = SystemMessage(
175
+ content=model_values["system_prompt"]
176
+ )
177
+
178
+ return override_kwargs
179
+
180
+ def wrap_model_call(
181
+ self, request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
182
+ ) -> ModelCallResult:
183
+ override_kwargs = self._get_override_kwargs(request)
184
+ if override_kwargs:
185
+ return handler(request.override(**override_kwargs))
186
+ else:
187
+ return handler(request)
177
188
 
178
189
  async def awrap_model_call(
179
190
  self,
180
191
  request: ModelRequest,
181
192
  handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
182
193
  ) -> ModelCallResult:
183
- model_dict = {
184
- item["model_name"]: {
185
- "tools": item.get("tools", None),
186
- "kwargs": item.get("model_kwargs", None),
187
- "system_prompt": item.get("model_system_prompt", None),
188
- }
189
- for item in self.model_list
190
- }
191
- select_model_name = request.state.get("router_model_selection", "default-model")
192
- override_kwargs = {}
193
- if select_model_name != "default-model":
194
- if select_model_name in model_dict:
195
- model_values = model_dict.get(select_model_name, {})
196
- if model_values["kwargs"] is not None:
197
- model = load_chat_model(select_model_name, **model_values["kwargs"])
198
- else:
199
- model = load_chat_model(select_model_name)
200
- override_kwargs["model"] = model
201
- if model_values["tools"] is not None:
202
- override_kwargs["tools"] = model_values["tools"]
203
- if model_values["system_prompt"] is not None:
204
- override_kwargs["system_message"] = SystemMessage(
205
- content=model_values["system_prompt"]
206
- )
207
- return await handler(request.override(**override_kwargs))
194
+ override_kwargs = self._get_override_kwargs(request)
195
+ if override_kwargs:
196
+ return await handler(request.override(**override_kwargs))
197
+ else:
198
+ return await handler(request)
@@ -1,6 +1,5 @@
1
1
  import json
2
- from typing import Awaitable, Callable, Literal, Optional, cast
3
- from typing import NotRequired
2
+ from typing import Awaitable, Callable, Literal, NotRequired, Optional, cast
4
3
 
5
4
  from langchain.agents.middleware import ModelRequest, ModelResponse
6
5
  from langchain.agents.middleware.types import (
@@ -160,7 +159,8 @@ def create_finish_sub_plan_tool(
160
159
  ) -> BaseTool:
161
160
  """Create a tool for finishing sub-plan tasks.
162
161
 
163
- This function creates a tool that allows agents to update the status of sub-plans in a plan. Sub-plans can be marked as "done" to track progress.
162
+ This function creates a tool that allows agents to update the status of sub-plans in a plan.
163
+ Sub-plans can be marked as "done" to track progress.
164
164
 
165
165
  Args:
166
166
  description: The description of the tool. Uses default description if not provided.
@@ -273,16 +273,20 @@ _PLAN_SYSTEM_PROMPT = """You can manage task plans using three simple tools:
273
273
  class PlanMiddleware(AgentMiddleware):
274
274
  """Middleware that provides plan management capabilities to agents.
275
275
 
276
- This middleware adds a `write_plan` and `finish_sub_plan` (and `read_plan` optional) tool that allows agents to create and manage
277
- structured plan lists for complex multi-step operations. It's designed to help
278
- agents track progress, organize complex tasks, and provide users with visibility
279
- into task completion status.
276
+ This middleware adds a `write_plan` and `finish_sub_plan` (and `read_plan`
277
+ optional) tool that allows agents to create and manage structured plan lists
278
+ for complex multi-step operations. It's designed to help agents track progress,
279
+ organize complex tasks, and provide users with visibility into task completion
280
+ status.
280
281
 
281
- The middleware automatically injects system prompts that guide the agent on how to use the plan functionality effectively.
282
+ The middleware automatically injects system prompts that guide the agent on
283
+ how to use the plan functionality effectively.
282
284
 
283
285
  Args:
284
- system_prompt: Custom system prompt to guide the agent on using the plan tool.
285
- If not provided, uses the default `_PLAN_SYSTEM_PROMPT` or `_PLAN_SYSTEM_PROMPT_NOT_READ_PLAN` based on the `use_read_plan_tool` parameter.
286
+ system_prompt: Custom system prompt to guide the agent on using the plan
287
+ tool. If not provided, uses the default `_PLAN_SYSTEM_PROMPT` or
288
+ `_PLAN_SYSTEM_PROMPT_NOT_READ_PLAN` based on the `use_read_plan_tool`
289
+ parameter.
286
290
  write_plan_tool_description: Description of the `write_plan` tool.
287
291
  If not provided, uses the default `_DEFAULT_WRITE_PLAN_TOOL_DESCRIPTION`.
288
292
  finish_sub_plan_tool_description: Description of the `finish_sub_plan` tool.
@@ -291,7 +295,7 @@ class PlanMiddleware(AgentMiddleware):
291
295
  If not provided, uses the default `_DEFAULT_READ_PLAN_TOOL_DESCRIPTION`.
292
296
  use_read_plan_tool: Whether to use the `read_plan` tool.
293
297
  If not provided, uses the default `True`.
294
- message_key: The key of the message to be updated. Defaults to "messages".
298
+
295
299
  Example:
296
300
  ```python
297
301
  from langchain_dev_utils.agents.middleware import PlanMiddleware
@@ -316,7 +320,6 @@ class PlanMiddleware(AgentMiddleware):
316
320
  finish_sub_plan_tool_description: Optional[str] = None,
317
321
  read_plan_tool_description: Optional[str] = None,
318
322
  use_read_plan_tool: bool = True,
319
- message_key: Optional[str] = None,
320
323
  ) -> None:
321
324
  super().__init__()
322
325
 
@@ -332,12 +335,8 @@ class PlanMiddleware(AgentMiddleware):
332
335
  )
333
336
 
334
337
  tools = [
335
- create_write_plan_tool(
336
- description=write_plan_tool_description, message_key=message_key
337
- ),
338
- create_finish_sub_plan_tool(
339
- description=finish_sub_plan_tool_description, message_key=message_key
340
- ),
338
+ create_write_plan_tool(description=write_plan_tool_description),
339
+ create_finish_sub_plan_tool(description=finish_sub_plan_tool_description),
341
340
  ]
342
341
 
343
342
  if use_read_plan_tool:
@@ -1,12 +1,14 @@
1
1
  from typing import Any
2
2
 
3
3
  from langchain.agents.middleware.summarization import (
4
- ContextSize,
5
- DEFAULT_SUMMARY_PROMPT,
6
- SummarizationMiddleware as _SummarizationMiddleware,
7
- TokenCounter,
8
4
  _DEFAULT_MESSAGES_TO_KEEP,
9
5
  _DEFAULT_TRIM_TOKEN_LIMIT,
6
+ DEFAULT_SUMMARY_PROMPT,
7
+ ContextSize,
8
+ TokenCounter,
9
+ )
10
+ from langchain.agents.middleware.summarization import (
11
+ SummarizationMiddleware as _SummarizationMiddleware,
10
12
  )
11
13
  from langchain_core.messages.utils import count_tokens_approximately
12
14
 
@@ -0,0 +1,96 @@
1
+ from typing import Any, Awaitable, Callable, cast
2
+
3
+ from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse
4
+ from langchain.agents.middleware.types import ModelCallResult
5
+ from langchain_core.messages import AIMessage, BaseMessage
6
+
7
+ from langchain_dev_utils._utils import _check_pkg_install
8
+
9
+
10
+ class ToolCallRepairMiddleware(AgentMiddleware):
11
+ """Middleware to repair invalid tool calls in AIMessages.
12
+
13
+ This middleware attempts to repair JSON-formatted tool arguments in
14
+ AIMessages that have invalid tool calls. It uses the `json_repair`
15
+ package to fix common JSON errors.
16
+
17
+ Example:
18
+ ```python
19
+ from langchain_dev_utils.agents.middleware import ToolCallRepairMiddleware
20
+
21
+ middleware = ToolCallRepairMiddleware()
22
+ ```
23
+ """
24
+
25
+ def _repair_msgs(self, messages: list[BaseMessage]) -> list[BaseMessage]:
26
+ _check_pkg_install("json_repair")
27
+ from json import JSONDecodeError
28
+
29
+ from json_repair import loads
30
+
31
+ results = []
32
+ for msg in messages:
33
+ if (
34
+ isinstance(msg, AIMessage)
35
+ and hasattr(msg, "invalid_tool_calls")
36
+ and len(msg.invalid_tool_calls) > 0
37
+ ):
38
+ new_invalid_toolcalls = []
39
+ new_tool_calls = [*msg.tool_calls]
40
+
41
+ for invalid_tool_call in msg.invalid_tool_calls:
42
+ args = invalid_tool_call.get("args")
43
+ if args:
44
+ try:
45
+ args = cast(dict[str, Any], loads(args))
46
+ new_tool_calls.append(
47
+ {
48
+ "name": invalid_tool_call.get(
49
+ "name",
50
+ )
51
+ or "",
52
+ "id": invalid_tool_call.get("id", ""),
53
+ "type": "tool_call",
54
+ "args": args,
55
+ }
56
+ )
57
+ except JSONDecodeError:
58
+ new_invalid_toolcalls.append(invalid_tool_call)
59
+ else:
60
+ new_invalid_toolcalls.append(invalid_tool_call)
61
+
62
+ new_msg = msg.model_copy(
63
+ update={
64
+ "tool_calls": new_tool_calls,
65
+ "invalid_tool_calls": new_invalid_toolcalls,
66
+ }
67
+ )
68
+ results.append(new_msg)
69
+ else:
70
+ results.append(msg)
71
+
72
+ return results
73
+
74
+ def wrap_model_call(
75
+ self, request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
76
+ ) -> ModelCallResult:
77
+ response = handler(request)
78
+ results = self._repair_msgs(response.result)
79
+
80
+ return ModelResponse(
81
+ result=results,
82
+ structured_response=response.structured_response,
83
+ )
84
+
85
+ async def awrap_model_call(
86
+ self,
87
+ request: ModelRequest,
88
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
89
+ ) -> ModelCallResult:
90
+ response = await handler(request)
91
+ results = self._repair_msgs(response.result)
92
+
93
+ return ModelResponse(
94
+ result=results,
95
+ structured_response=response.structured_response,
96
+ )
@@ -20,7 +20,7 @@ class LLMToolEmulator(_LLMToolEmulator):
20
20
  model: Model to use for emulation. Must be a string identifier.
21
21
 
22
22
  Examples:
23
- Emulate all tools (default behavior):
23
+ # Emulate all tools (default behavior):
24
24
  ```python
25
25
  from langchain_dev_utils.agents import create_agent
26
26
  from langchain_dev_utils.agents.middleware import LLMToolEmulator
@@ -36,12 +36,12 @@ class LLMToolEmulator(_LLMToolEmulator):
36
36
  )
37
37
  ```
38
38
 
39
- Emulate specific tools by name:
39
+ # Emulate specific tools by name:
40
40
  ```python
41
41
  middleware = LLMToolEmulator(model="vllm:qwen3-4b", tools=["get_weather", "get_user_location"])
42
42
  ```
43
43
 
44
- Emulate specific tools by passing tool instances:
44
+ # Emulate specific tools by passing tool instances:
45
45
  ```python
46
46
  middleware = LLMToolEmulator(model="vllm:qwen3-4b", tools=[get_weather, get_user_location])
47
47
  ```
@@ -30,7 +30,7 @@ class LLMToolSelectorMiddleware(_LLMToolSelectorMiddleware):
30
30
  against the max_tools limit.
31
31
 
32
32
  Examples:
33
- Basic usage with tool limit:
33
+ # Basic usage with tool limit:
34
34
  ```python
35
35
  from langchain_dev_utils.agents.middleware import LLMToolSelectorMiddleware
36
36
 
@@ -40,7 +40,7 @@ class LLMToolSelectorMiddleware(_LLMToolSelectorMiddleware):
40
40
  )
41
41
  ```
42
42
 
43
- With always-included tools:
43
+ # With always-included tools:
44
44
  ```python
45
45
  middleware = LLMToolSelectorMiddleware(
46
46
  model="vllm:qwen3-4b",
@@ -49,7 +49,7 @@ class LLMToolSelectorMiddleware(_LLMToolSelectorMiddleware):
49
49
  )
50
50
  ```
51
51
 
52
- With custom system prompt:
52
+ # With custom system prompt:
53
53
  ```python
54
54
  custom_prompt = "Select tools that can help answer user questions about data."
55
55
  middleware = LLMToolSelectorMiddleware(
@@ -1,5 +1,5 @@
1
- from typing import Literal, Optional
2
1
  import warnings
2
+ from typing import Literal, Optional
3
3
 
4
4
  from langchain.tools import BaseTool, ToolRuntime, tool
5
5
  from langchain_core.messages import ToolMessage
@@ -54,30 +54,18 @@ def wrap_agent_as_tool(
54
54
  BaseTool: The wrapped agent as a tool
55
55
 
56
56
  Example:
57
- >>> import datetime
58
- >>> from langchain_core.messages import HumanMessage
59
- >>> from langchain_core.tools import tool
60
57
  >>> from langchain_dev_utils.agents import wrap_agent_as_tool, create_agent
61
-
62
- >>> @tool
63
- ... def get_current_time() -> str:
64
- ... "\"\"Get the current timestamp.\"\"\"
65
- ... return str(datetime.datetime.now().timestamp())
66
-
67
- >>> # Define an agent for querying the time
68
- >>> time_agent = create_agent(
69
- ... "vllm:qwen3-4b", tools=[get_current_time], name="time_agent"
58
+ >>>
59
+ >>> call_time_agent_tool = wrap_agent_as_tool(
60
+ ... time_agent,
61
+ ... tool_name="call_time_agent",
62
+ ... tool_description="Used to invoke the time sub-agent to perform time-related tasks"
70
63
  ... )
71
- >>> tool = wrap_agent_as_tool(
72
- ... time_agent, tool_name="call_time_agent", tool_description="Invoke the time agent"
73
- ... )
74
- >>> print(tool)
75
-
76
- >>> # Use it as a tool
77
- >>> agent = create_agent("vllm:qwen3-4b", tools=[tool], name="agent")
64
+ >>>
65
+ >>> agent = create_agent("vllm:qwen3-4b", tools=[call_time_agent_tool], name="agent")
78
66
 
79
67
  >>> response = agent.invoke({"messages": [HumanMessage(content="What time is it now?")]})
80
- >>> print(response)
68
+ >>> response
81
69
  """
82
70
  if agent.name is None:
83
71
  raise ValueError("Agent name must not be None")
@@ -1,4 +1,5 @@
1
1
  from __future__ import annotations
2
+
2
3
  from collections.abc import AsyncIterator, Iterator
3
4
  from json import JSONDecodeError
4
5
  from typing import (
@@ -13,6 +14,7 @@ from typing import (
13
14
  Union,
14
15
  )
15
16
 
17
+ import openai
16
18
  from langchain_core.callbacks import (
17
19
  AsyncCallbackManagerForLLMRun,
18
20
  CallbackManagerForLLMRun,
@@ -26,7 +28,6 @@ from langchain_core.utils import from_env, secret_from_env
26
28
  from langchain_core.utils.function_calling import convert_to_openai_tool
27
29
  from langchain_openai.chat_models._compat import _convert_from_v1_to_chat_completions
28
30
  from langchain_openai.chat_models.base import BaseChatOpenAI, _convert_message_to_dict
29
- import openai
30
31
  from pydantic import (
31
32
  BaseModel,
32
33
  ConfigDict,
@@ -49,22 +50,37 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
49
50
  """
50
51
  Base template class for OpenAI-compatible chat model implementations.
51
52
 
52
- This class provides a foundation for integrating various LLM providers that offer OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot, and many others).
53
- It enhances the base OpenAI functionality by:
54
-
55
- **1.Supports output of more types of reasoning content (reasoning_content)**
56
- ChatOpenAI can only output reasoning content natively supported by official OpenAI models, while OpenAICompatibleChatModel can output reasoning content from other model providers (e.g., OpenRouter).
57
-
58
- **2.Optimizes default behavior for structured output**
59
- When calling with_structured_output, the default value of the method parameter is adjusted to "function_calling" (instead of the default "json_schema" in ChatOpenAI), providing better compatibility with other models.
60
-
61
- **3.Supports configuration of related parameters**
62
- For cases where parameters differ from the official OpenAI API, this library provides the compatibility_options parameter to address this issue. For example, when different model providers have inconsistent support for tool_choice, you can adapt by setting supported_tool_choice in compatibility_options.
63
-
64
- Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class extends capabilities to better support diverse OpenAI-compatible model providers while maintaining full compatibility with LangChain's chat model interface.
65
-
66
- Note: This is a template class and should not be exported or instantiated directly.
67
- Instead, use it as a base class and provide the specific provider name through inheritance or the factory function `_create_openai_compatible_model()`.
53
+ This class provides a foundation for integrating various LLM providers that
54
+ offer OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot,
55
+ and many others). It enhances the base OpenAI functionality by:
56
+
57
+ **1. Supports output of more types of reasoning content (reasoning_content)**
58
+ ChatOpenAI can only output reasoning content natively supported by official
59
+ OpenAI models, while OpenAICompatibleChatModel can output reasoning content
60
+ from other model providers (e.g., OpenRouter).
61
+
62
+ **2. Optimizes default behavior for structured output**
63
+ When calling with_structured_output, the default value of the method
64
+ parameter is adjusted to "function_calling" (instead of the default
65
+ "json_schema" in ChatOpenAI), providing better compatibility with other
66
+ models.
67
+
68
+ **3. Supports configuration of related parameters**
69
+ For cases where parameters differ from the official OpenAI API, this library
70
+ provides the compatibility_options parameter to address this issue. For
71
+ example, when different model providers have inconsistent support for
72
+ tool_choice, you can adapt by setting supported_tool_choice in
73
+ compatibility_options.
74
+
75
+ Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class
76
+ extends capabilities to better support diverse OpenAI-compatible model
77
+ providers while maintaining full compatibility with LangChain's chat model
78
+ interface.
79
+
80
+ Note: This is a template class and should not be exported or instantiated
81
+ directly. Instead, use it as a base class and provide the specific provider
82
+ name through inheritance or the factory function
83
+ `_create_openai_compatible_model()`.
68
84
  """
69
85
 
70
86
  model_name: str = Field(alias="model", default="openai compatible model")