langchain-dev-utils 1.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. langchain_dev_utils/__init__.py +1 -0
  2. langchain_dev_utils/_utils.py +131 -0
  3. langchain_dev_utils/agents/__init__.py +4 -0
  4. langchain_dev_utils/agents/factory.py +99 -0
  5. langchain_dev_utils/agents/file_system.py +252 -0
  6. langchain_dev_utils/agents/middleware/__init__.py +21 -0
  7. langchain_dev_utils/agents/middleware/format_prompt.py +66 -0
  8. langchain_dev_utils/agents/middleware/handoffs.py +214 -0
  9. langchain_dev_utils/agents/middleware/model_fallback.py +49 -0
  10. langchain_dev_utils/agents/middleware/model_router.py +200 -0
  11. langchain_dev_utils/agents/middleware/plan.py +367 -0
  12. langchain_dev_utils/agents/middleware/summarization.py +85 -0
  13. langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
  14. langchain_dev_utils/agents/middleware/tool_emulator.py +60 -0
  15. langchain_dev_utils/agents/middleware/tool_selection.py +82 -0
  16. langchain_dev_utils/agents/plan.py +188 -0
  17. langchain_dev_utils/agents/wrap.py +324 -0
  18. langchain_dev_utils/chat_models/__init__.py +11 -0
  19. langchain_dev_utils/chat_models/adapters/__init__.py +3 -0
  20. langchain_dev_utils/chat_models/adapters/create_utils.py +53 -0
  21. langchain_dev_utils/chat_models/adapters/openai_compatible.py +715 -0
  22. langchain_dev_utils/chat_models/adapters/register_profiles.py +15 -0
  23. langchain_dev_utils/chat_models/base.py +282 -0
  24. langchain_dev_utils/chat_models/types.py +27 -0
  25. langchain_dev_utils/embeddings/__init__.py +11 -0
  26. langchain_dev_utils/embeddings/adapters/__init__.py +3 -0
  27. langchain_dev_utils/embeddings/adapters/create_utils.py +45 -0
  28. langchain_dev_utils/embeddings/adapters/openai_compatible.py +91 -0
  29. langchain_dev_utils/embeddings/base.py +234 -0
  30. langchain_dev_utils/message_convert/__init__.py +15 -0
  31. langchain_dev_utils/message_convert/content.py +201 -0
  32. langchain_dev_utils/message_convert/format.py +69 -0
  33. langchain_dev_utils/pipeline/__init__.py +7 -0
  34. langchain_dev_utils/pipeline/parallel.py +135 -0
  35. langchain_dev_utils/pipeline/sequential.py +101 -0
  36. langchain_dev_utils/pipeline/types.py +3 -0
  37. langchain_dev_utils/py.typed +0 -0
  38. langchain_dev_utils/tool_calling/__init__.py +14 -0
  39. langchain_dev_utils/tool_calling/human_in_the_loop.py +284 -0
  40. langchain_dev_utils/tool_calling/utils.py +81 -0
  41. langchain_dev_utils-1.3.7.dist-info/METADATA +103 -0
  42. langchain_dev_utils-1.3.7.dist-info/RECORD +44 -0
  43. langchain_dev_utils-1.3.7.dist-info/WHEEL +4 -0
  44. langchain_dev_utils-1.3.7.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,188 @@
1
+ import warnings
2
+ from typing import Literal, Optional
3
+
4
+ from langchain.tools import BaseTool, ToolRuntime, tool
5
+ from langchain_core.messages import ToolMessage
6
+ from langgraph.types import Command
7
+ from typing_extensions import TypedDict
8
+
9
+ warnings.warn(
10
+ "langchain_dev_utils.agents.plan is deprecated, and it will be removed in a future version. Please use middleware in langchain-dev-utils instead.",
11
+ DeprecationWarning,
12
+ )
13
+
14
+ _DEFAULT_WRITE_PLAN_TOOL_DESCRIPTION = """
15
+ A tool for writing initial plan — can only be used once, at the very beginning.
16
+ Use update_plan for subsequent modifications.
17
+
18
+ Args:
19
+ plan: The list of plan items to write. Each string in the list represents
20
+ the content of one plan item.
21
+ """
22
+
23
+ _DEFAULT_UPDATE_PLAN_TOOL_DESCRIPTION = """
24
+ A tool for updating the status of plan tasks. Can be called multiple times to track task progress.
25
+
26
+ Args:
27
+ update_plans: A list of plan items to update. Each item is a dictionary containing
28
+ the following fields:
29
+ - content: str — The exact content of the plan task. Must match an
30
+ existing task verbatim.
31
+ - status: str — The task status. Must be either "in_progress" or "done".
32
+
33
+ Usage Guidelines:
34
+ - Only pass the tasks whose status needs to be updated — no need to include all tasks.
35
+ - Each call must include at least one task with status "done" AND at least one task with
36
+ status "in_progress":
37
+ - Mark completed tasks as "done"
38
+ - Mark the next tasks to work on as "in_progress"
39
+ - The "content" field must exactly match the content of an existing task
40
+ (case-sensitive, whitespace-sensitive).
41
+
42
+ Example:
43
+ Suppose the current task list is:
44
+ - Task 1 (in_progress)
45
+ - Task 2 (pending)
46
+ - Task 3 (pending)
47
+
48
+ When "Task 1" is completed and you are ready to start "Task 2", pass in:
49
+ [
50
+ {"content": "Task 1", "status": "done"},
51
+ {"content": "Task 2", "status": "in_progress"}
52
+ ]
53
+ """
54
+
55
+
56
+ class Plan(TypedDict):
57
+ content: str
58
+ status: Literal["pending", "in_progress", "done"]
59
+
60
+
61
+ class PlanStateMixin(TypedDict):
62
+ plan: list[Plan]
63
+
64
+
65
+ def create_write_plan_tool(
66
+ name: Optional[str] = None,
67
+ description: Optional[str] = None,
68
+ message_key: Optional[str] = None,
69
+ ) -> BaseTool:
70
+ """Create a tool for writing initial plan.
71
+
72
+ This function creates a tool that allows agents to write an initial plan
73
+ with a list of tasks. The first task in the plan will be marked as "in_progress"
74
+ and the rest as "pending".
75
+
76
+ Args:
77
+ name: The name of the tool. Defaults to "write_plan".
78
+ description: The description of the tool. Uses default description if not provided.
79
+ message_key: The key of the message to be updated. Defaults to "messages".
80
+
81
+ Returns:
82
+ BaseTool: The tool for writing initial plan.
83
+
84
+ Example:
85
+ Basic usage:
86
+ >>> from langchain_dev_utils.agents.plan import create_write_plan_tool
87
+ >>> write_plan_tool = create_write_plan_tool()
88
+ """
89
+
90
+ @tool(
91
+ name_or_callable=name or "write_plan",
92
+ description=description or _DEFAULT_WRITE_PLAN_TOOL_DESCRIPTION,
93
+ )
94
+ def write_plan(plan: list[str], runtime: ToolRuntime):
95
+ msg_key = message_key or "messages"
96
+ return Command(
97
+ update={
98
+ "plan": [
99
+ {
100
+ "content": content,
101
+ "status": "pending" if index > 0 else "in_progress",
102
+ }
103
+ for index, content in enumerate(plan)
104
+ ],
105
+ msg_key: [
106
+ ToolMessage(
107
+ content=f"Plan successfully written, please first execute the {plan[0]} task (no need to change the status to in_process)",
108
+ tool_call_id=runtime.tool_call_id,
109
+ )
110
+ ],
111
+ }
112
+ )
113
+
114
+ return write_plan
115
+
116
+
117
+ def create_update_plan_tool(
118
+ name: Optional[str] = None,
119
+ description: Optional[str] = None,
120
+ message_key: Optional[str] = None,
121
+ ) -> BaseTool:
122
+ """Create a tool for updating plan tasks.
123
+
124
+ This function creates a tool that allows agents to update the status of tasks
125
+ in a plan. Tasks can be marked as "in_progress" or "done" to track progress.
126
+
127
+ Args:
128
+ name: The name of the tool. Defaults to "update_plan".
129
+ description: The description of the tool. Uses default description if not provided.
130
+ message_key: The key of the message to be updated. Defaults to "messages".
131
+
132
+ Returns:
133
+ BaseTool: The tool for updating plan tasks.
134
+
135
+ Example:
136
+ Basic usage:
137
+ >>> from langchain_dev_utils.agents.plan import create_update_plan_tool
138
+ >>> update_plan_tool = create_update_plan_tool()
139
+ """
140
+
141
+ @tool(
142
+ name_or_callable=name or "update_plan",
143
+ description=description or _DEFAULT_UPDATE_PLAN_TOOL_DESCRIPTION,
144
+ )
145
+ def update_plan(
146
+ update_plans: list[Plan],
147
+ runtime: ToolRuntime,
148
+ ):
149
+ plan_list = runtime.state.get("plan", [])
150
+
151
+ updated_plan_list = []
152
+
153
+ for update_plan in update_plans:
154
+ for plan in plan_list:
155
+ if plan["content"] == update_plan["content"]:
156
+ plan["status"] = update_plan["status"]
157
+ updated_plan_list.append(plan)
158
+
159
+ if len(updated_plan_list) < len(update_plans):
160
+ raise ValueError(
161
+ "Not fullly updated plan, missing:"
162
+ + ",".join(
163
+ [
164
+ plan["content"]
165
+ for plan in update_plans
166
+ if plan not in updated_plan_list
167
+ ]
168
+ )
169
+ + "\nPlease check the plan list, the current plan list is:"
170
+ + "\n".join(
171
+ [plan["content"] for plan in plan_list if plan["status"] != "done"]
172
+ )
173
+ )
174
+ msg_key = message_key or "messages"
175
+
176
+ return Command(
177
+ update={
178
+ "plan": plan_list,
179
+ msg_key: [
180
+ ToolMessage(
181
+ content="Plan updated successfully",
182
+ tool_call_id=runtime.tool_call_id,
183
+ )
184
+ ],
185
+ }
186
+ )
187
+
188
+ return update_plan
@@ -0,0 +1,324 @@
1
+ import asyncio
2
+ from typing import Any, Awaitable, Callable, Optional
3
+
4
+ from langchain.tools import ToolRuntime
5
+ from langchain_core.messages import HumanMessage
6
+ from langchain_core.tools import BaseTool, StructuredTool
7
+ from langgraph.graph.state import CompiledStateGraph
8
+
9
+ from langchain_dev_utils.message_convert import format_sequence
10
+
11
+
12
+ def _process_input(request: str, runtime: ToolRuntime) -> str:
13
+ return request
14
+
15
+
16
+ def _process_output(
17
+ request: str, response: dict[str, Any], runtime: ToolRuntime
18
+ ) -> Any:
19
+ return response["messages"][-1].content
20
+
21
+
22
+ def wrap_agent_as_tool(
23
+ agent: CompiledStateGraph,
24
+ tool_name: Optional[str] = None,
25
+ tool_description: Optional[str] = None,
26
+ pre_input_hooks: Optional[
27
+ tuple[
28
+ Callable[[str, ToolRuntime], str | dict[str, Any]],
29
+ Callable[[str, ToolRuntime], Awaitable[str | dict[str, Any]]],
30
+ ]
31
+ | Callable[[str, ToolRuntime], str | dict[str, Any]]
32
+ ] = None,
33
+ post_output_hooks: Optional[
34
+ tuple[
35
+ Callable[[str, dict[str, Any], ToolRuntime], Any],
36
+ Callable[[str, dict[str, Any], ToolRuntime], Awaitable[Any]],
37
+ ]
38
+ | Callable[[str, dict[str, Any], ToolRuntime], Any]
39
+ ] = None,
40
+ ) -> BaseTool:
41
+ """Wraps an agent as a tool
42
+
43
+ Args:
44
+ agent: The agent to wrap
45
+ tool_name: The name of the tool
46
+ tool_description: The description of the tool
47
+ pre_input_hooks: Hooks to run before the input is processed
48
+ post_output_hooks: Hooks to run after the output is processed
49
+
50
+ Returns:
51
+ BaseTool: The wrapped agent as a tool
52
+
53
+ Example:
54
+ >>> from langchain_dev_utils.agents import wrap_agent_as_tool, create_agent
55
+ >>>
56
+ >>> call_time_agent_tool = wrap_agent_as_tool(
57
+ ... time_agent,
58
+ ... tool_name="call_time_agent",
59
+ ... tool_description="Used to invoke the time sub-agent to perform time-related tasks"
60
+ ... )
61
+ >>>
62
+ >>> agent = create_agent("vllm:qwen3-4b", tools=[call_time_agent_tool], name="agent")
63
+
64
+ >>> response = agent.invoke({"messages": [HumanMessage(content="What time is it now?")]})
65
+ >>> response
66
+ """
67
+ if agent.name is None:
68
+ raise ValueError("Agent name must not be None")
69
+
70
+ process_input = _process_input
71
+ process_input_async = _process_input
72
+ process_output = _process_output
73
+ process_output_async = _process_output
74
+
75
+ if pre_input_hooks:
76
+ if isinstance(pre_input_hooks, tuple):
77
+ process_input = pre_input_hooks[0]
78
+ process_input_async = pre_input_hooks[1]
79
+ else:
80
+ process_input = pre_input_hooks
81
+ process_input_async = pre_input_hooks
82
+
83
+ if post_output_hooks:
84
+ if isinstance(post_output_hooks, tuple):
85
+ process_output = post_output_hooks[0]
86
+ process_output_async = post_output_hooks[1]
87
+ else:
88
+ process_output = post_output_hooks
89
+ process_output_async = post_output_hooks
90
+
91
+ def call_agent(
92
+ request: str,
93
+ runtime: ToolRuntime,
94
+ ):
95
+ _processed_input = process_input(request, runtime) if process_input else request
96
+ if isinstance(_processed_input, str):
97
+ agent_input = {"messages": [HumanMessage(content=_processed_input)]}
98
+ elif isinstance(_processed_input, dict):
99
+ if "messages" not in _processed_input:
100
+ raise ValueError("Agent input must contain 'messages' key")
101
+ agent_input = _processed_input
102
+ else:
103
+ raise ValueError("Pre Hooks must return a string or a dict")
104
+
105
+ response = agent.invoke(agent_input)
106
+
107
+ response = (
108
+ process_output(request, response, runtime)
109
+ if process_output
110
+ else response["messages"][-1].content
111
+ )
112
+ return response
113
+
114
+ async def acall_agent(
115
+ request: str,
116
+ runtime: ToolRuntime,
117
+ ):
118
+ if asyncio.iscoroutinefunction(process_input_async):
119
+ _processed_input = await process_input_async(request, runtime)
120
+ else:
121
+ _processed_input = (
122
+ process_input_async(request, runtime)
123
+ if process_input_async
124
+ else request
125
+ )
126
+
127
+ if isinstance(_processed_input, str):
128
+ agent_input = {"messages": [HumanMessage(content=_processed_input)]}
129
+ elif isinstance(_processed_input, dict):
130
+ if "messages" not in _processed_input:
131
+ raise ValueError("Agent input must contain 'messages' key")
132
+ agent_input = _processed_input
133
+ else:
134
+ raise ValueError("Pre Hooks must return a string or a dict")
135
+
136
+ response = await agent.ainvoke(agent_input)
137
+
138
+ if asyncio.iscoroutinefunction(process_output_async):
139
+ response = await process_output_async(request, response, runtime)
140
+ else:
141
+ response = (
142
+ process_output(request, response, runtime)
143
+ if process_output
144
+ else response["messages"][-1].content
145
+ )
146
+
147
+ return response
148
+
149
+ if tool_name is None:
150
+ tool_name = f"transfor_to_{agent.name}"
151
+ if not tool_name.endswith("_agent"):
152
+ tool_name += "_agent"
153
+
154
+ if tool_description is None:
155
+ tool_description = f"This tool transforms input to {agent.name}"
156
+
157
+ return StructuredTool.from_function(
158
+ func=call_agent,
159
+ coroutine=acall_agent,
160
+ name=tool_name,
161
+ description=tool_description,
162
+ )
163
+
164
+
165
+ def wrap_all_agents_as_tool(
166
+ agents: list[CompiledStateGraph],
167
+ tool_name: Optional[str] = None,
168
+ tool_description: Optional[str] = None,
169
+ pre_input_hooks: Optional[
170
+ tuple[
171
+ Callable[[str, ToolRuntime], str | dict[str, Any]],
172
+ Callable[[str, ToolRuntime], Awaitable[str | dict[str, Any]]],
173
+ ]
174
+ | Callable[[str, ToolRuntime], str | dict[str, Any]]
175
+ ] = None,
176
+ post_output_hooks: Optional[
177
+ tuple[
178
+ Callable[[str, dict[str, Any], ToolRuntime], Any],
179
+ Callable[[str, dict[str, Any], ToolRuntime], Awaitable[Any]],
180
+ ]
181
+ | Callable[[str, dict[str, Any], ToolRuntime], Any]
182
+ ] = None,
183
+ ) -> BaseTool:
184
+ """Wraps all agents as single tool
185
+
186
+ Args:
187
+ agents: The agents to wrap
188
+ tool_name: The name of the tool, default to "task"
189
+ tool_description: The description of the tool
190
+ pre_input_hooks: Hooks to run before the input is processed
191
+ post_output_hooks: Hooks to run after the output is processed
192
+
193
+ Returns:
194
+ BaseTool: The wrapped agents as single tool
195
+
196
+ Example:
197
+ >>> from langchain_dev_utils.agents import wrap_all_agents_as_tool, create_agent
198
+ >>>
199
+ >>> call_agent_tool = wrap_all_agents_as_tool(
200
+ ... [time_agent,weather_agent],
201
+ ... tool_name="call_sub_agents",
202
+ ... tool_description="Used to invoke the sub-agents to perform tasks"
203
+ ... )
204
+ >>>
205
+ >>> agent = create_agent("vllm:qwen3-4b", tools=[call_sub_agents_tool], name="agent")
206
+
207
+ >>> response = agent.invoke({"messages": [HumanMessage(content="What time is it now?")]})
208
+ >>> response
209
+ """
210
+ if len(agents) <= 1:
211
+ raise ValueError("At least more than one agent must be provided")
212
+
213
+ agents_map = {}
214
+
215
+ for agent in agents:
216
+ if agent.name is None:
217
+ raise ValueError("Agent name must not be provided")
218
+ if agent.name in agents_map:
219
+ raise ValueError("Agent name must be unique")
220
+ agents_map[agent.name] = agent
221
+
222
+ process_input = _process_input
223
+ process_input_async = _process_input
224
+ process_output = _process_output
225
+ process_output_async = _process_output
226
+
227
+ if pre_input_hooks:
228
+ if isinstance(pre_input_hooks, tuple):
229
+ process_input = pre_input_hooks[0]
230
+ process_input_async = pre_input_hooks[1]
231
+ else:
232
+ process_input = pre_input_hooks
233
+ process_input_async = pre_input_hooks
234
+
235
+ if post_output_hooks:
236
+ if isinstance(post_output_hooks, tuple):
237
+ process_output = post_output_hooks[0]
238
+ process_output_async = post_output_hooks[1]
239
+ else:
240
+ process_output = post_output_hooks
241
+ process_output_async = post_output_hooks
242
+
243
+ def call_agent(
244
+ agent_name: str,
245
+ description: str,
246
+ runtime: ToolRuntime,
247
+ ):
248
+ if agent_name not in agents_map:
249
+ raise ValueError(f"Agent {agent_name} not found")
250
+
251
+ _processed_input = (
252
+ process_input(description, runtime) if process_input else description
253
+ )
254
+ if isinstance(_processed_input, str):
255
+ agent_input = {"messages": [HumanMessage(content=_processed_input)]}
256
+ elif isinstance(_processed_input, dict):
257
+ if "messages" not in _processed_input:
258
+ raise ValueError("Agent input must contain 'messages' key")
259
+ agent_input = _processed_input
260
+ else:
261
+ raise ValueError("Pre Hooks must return str or dict")
262
+
263
+ response = agent.invoke(agent_input)
264
+
265
+ response = (
266
+ process_output(description, response, runtime)
267
+ if process_output
268
+ else response["messages"][-1].content
269
+ )
270
+ return response
271
+
272
+ async def acall_agent(
273
+ agent_name: str,
274
+ description: str,
275
+ runtime: ToolRuntime,
276
+ ):
277
+ if agent_name not in agents_map:
278
+ raise ValueError(f"Agent {agent_name} not found")
279
+
280
+ if asyncio.iscoroutinefunction(process_input_async):
281
+ _processed_input = await process_input_async(description, runtime)
282
+ else:
283
+ _processed_input = (
284
+ process_input_async(description, runtime)
285
+ if process_input_async
286
+ else description
287
+ )
288
+
289
+ if isinstance(_processed_input, str):
290
+ agent_input = {"messages": [HumanMessage(content=_processed_input)]}
291
+ elif isinstance(_processed_input, dict):
292
+ if "messages" not in _processed_input:
293
+ raise ValueError("Agent input must contain 'messages' key")
294
+ agent_input = _processed_input
295
+ else:
296
+ raise ValueError("Pre Hooks must return str or dict")
297
+
298
+ response = await agents_map[agent_name].ainvoke(agent_input)
299
+
300
+ if asyncio.iscoroutinefunction(process_output_async):
301
+ response = await process_output_async(description, response, runtime)
302
+ else:
303
+ response = (
304
+ process_output(description, response, runtime)
305
+ if process_output
306
+ else response["messages"][-1].content
307
+ )
308
+
309
+ return response
310
+
311
+ if tool_name is None:
312
+ tool_name = "task"
313
+
314
+ if tool_description is None:
315
+ tool_description = (
316
+ "Launch an ephemeral subagent for a task.\nAvailable agents:\n "
317
+ + format_sequence(list(agents_map.keys()), with_num=True)
318
+ )
319
+ return StructuredTool.from_function(
320
+ func=call_agent,
321
+ coroutine=acall_agent,
322
+ name=tool_name,
323
+ description=tool_description,
324
+ )
@@ -0,0 +1,11 @@
1
+ from .base import (
2
+ batch_register_model_provider,
3
+ load_chat_model,
4
+ register_model_provider,
5
+ )
6
+
7
+ __all__ = [
8
+ "load_chat_model",
9
+ "register_model_provider",
10
+ "batch_register_model_provider",
11
+ ]
@@ -0,0 +1,3 @@
1
+ from .create_utils import create_openai_compatible_model
2
+
3
+ __all__ = ["create_openai_compatible_model"]
@@ -0,0 +1,53 @@
1
+ from typing import Any, Optional, cast
2
+
3
+ from langchain_core.utils import from_env
4
+
5
+ from langchain_dev_utils._utils import _check_pkg_install
6
+
7
+ from ..types import CompatibilityOptions
8
+
9
+
10
+ def create_openai_compatible_model(
11
+ model_provider: str,
12
+ base_url: Optional[str] = None,
13
+ compatibility_options: Optional[CompatibilityOptions] = None,
14
+ model_profiles: Optional[dict[str, dict[str, Any]]] = None,
15
+ chat_model_cls_name: Optional[str] = None,
16
+ ):
17
+ """Factory function for creating provider-specific OpenAI-compatible model classes.
18
+
19
+ Dynamically generates model classes for different OpenAI-compatible providers,
20
+ configuring environment variable mappings and default base URLs specific to each provider.
21
+
22
+ Args:
23
+ model_provider (str): Identifier for the OpenAI-compatible provider (e.g. `vllm`, `moonshot`)
24
+ base_url (Optional[str], optional): Default API base URL for the provider. Defaults to None. If not provided, will try to use the environment variable.
25
+ compatibility_options (Optional[CompatibilityOptions], optional): Optional configuration for compatibility options with the provider. Defaults to None.
26
+ model_profiles (Optional[dict[str, dict[str, Any]]], optional): Optional model profiles for the provider. Defaults to None.
27
+ chat_model_cls_name (Optional[str], optional): Optional custom class name for the generated model. Defaults to None.
28
+ Returns:
29
+ Type[_BaseChatOpenAICompatible]: Configured model class ready for instantiation with provider-specific settings
30
+
31
+ Examples:
32
+ >>> from langchain_dev_utils.chat_models.adapters import create_openai_compatible_chat_model
33
+ >>> ChatVLLM = create_openai_compatible_chat_model(
34
+ ... "vllm",
35
+ ... base_url="http://localhost:8000",
36
+ ... chat_model_cls_name="ChatVLLM",
37
+ ... )
38
+ >>> model = ChatVLLM(model="qwen3-4b")
39
+ >>> model.invoke("hello")
40
+ """
41
+ _check_pkg_install("langchain_openai")
42
+ from .openai_compatible import _create_openai_compatible_model
43
+
44
+ base_url = (
45
+ base_url or from_env(f"{model_provider.upper()}_API_BASE", default=None)()
46
+ )
47
+ return _create_openai_compatible_model(
48
+ chat_model_cls_name=chat_model_cls_name,
49
+ provider=model_provider,
50
+ base_url=cast(str, base_url),
51
+ compatibility_options=compatibility_options,
52
+ profiles=model_profiles,
53
+ )