langchain-dev-utils 1.3.4__tar.gz → 1.3.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/PKG-INFO +1 -1
  2. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/pyproject.toml +1 -1
  3. langchain_dev_utils-1.3.6/src/langchain_dev_utils/__init__.py +1 -0
  4. langchain_dev_utils-1.3.6/src/langchain_dev_utils/_utils.py +131 -0
  5. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/format_prompt.py +1 -1
  6. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/wrap.py +99 -49
  7. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +58 -0
  8. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/chat_models/base.py +2 -0
  9. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/embeddings/adapters/openai_compatible.py +16 -0
  10. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/embeddings/base.py +2 -1
  11. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_wrap_agent.py +12 -8
  12. langchain_dev_utils-1.3.4/src/langchain_dev_utils/__init__.py +0 -1
  13. langchain_dev_utils-1.3.4/src/langchain_dev_utils/_utils.py +0 -43
  14. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/.gitignore +0 -0
  15. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/.python-version +0 -0
  16. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/.vscode/settings.json +0 -0
  17. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/LICENSE +0 -0
  18. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/README.md +0 -0
  19. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/README_cn.md +0 -0
  20. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/__init__.py +0 -0
  21. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/factory.py +0 -0
  22. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/file_system.py +0 -0
  23. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/__init__.py +0 -0
  24. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/handoffs.py +0 -0
  25. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
  26. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -0
  27. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/plan.py +0 -0
  28. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
  29. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/tool_call_repair.py +0 -0
  30. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
  31. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
  32. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/agents/plan.py +0 -0
  33. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  34. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  35. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/chat_models/adapters/create_utils.py +0 -0
  36. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/chat_models/adapters/register_profiles.py +0 -0
  37. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/chat_models/types.py +0 -0
  38. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  39. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/embeddings/adapters/__init__.py +0 -0
  40. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/embeddings/adapters/create_utils.py +0 -0
  41. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/message_convert/__init__.py +0 -0
  42. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/message_convert/content.py +0 -0
  43. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/message_convert/format.py +0 -0
  44. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  45. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
  46. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
  47. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/pipeline/types.py +0 -0
  48. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/py.typed +0 -0
  49. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  50. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
  51. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/src/langchain_dev_utils/tool_calling/utils.py +0 -0
  52. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/__init__.py +0 -0
  53. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_agent.py +0 -0
  54. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_chat_models.py +0 -0
  55. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_embedding.py +0 -0
  56. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_handoffs_middleware.py +0 -0
  57. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_human_in_the_loop.py +0 -0
  58. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_load_embbeding.py +0 -0
  59. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_load_model.py +0 -0
  60. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_messages.py +0 -0
  61. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_model_tool_emulator.py +0 -0
  62. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_pipline.py +0 -0
  63. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_plan_middleware.py +0 -0
  64. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_router_model.py +0 -0
  65. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_tool_call_repair.py +0 -0
  66. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/test_tool_calling.py +0 -0
  67. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/utils/__init__.py +0 -0
  68. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/tests/utils/register.py +0 -0
  69. {langchain_dev_utils-1.3.4 → langchain_dev_utils-1.3.6}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.3.4
3
+ Version: 1.3.6
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "langchain-dev-utils"
3
- version = "1.3.4"
3
+ version = "1.3.6"
4
4
  description = "A practical utility library for LangChain and LangGraph development"
5
5
  readme = "README.md"
6
6
  authors = [{ name = "tiebingice", email = "tiebingice123@outlook.com" }]
@@ -0,0 +1 @@
1
+ __version__ = "1.3.6"
@@ -0,0 +1,131 @@
1
+ from importlib import util
2
+ from typing import Literal, Optional
3
+
4
+ from pydantic import BaseModel
5
+
6
+
7
+ def _check_pkg_install(
8
+ pkg: Literal["langchain_openai", "json_repair"],
9
+ ) -> None:
10
+ if not util.find_spec(pkg):
11
+ if pkg == "langchain_openai":
12
+ msg = "Please install langchain_dev_utils[standard],when use 'openai-compatible'"
13
+ else:
14
+ msg = "Please install langchain_dev_utils[standard] to use ToolCallRepairMiddleware."
15
+ raise ImportError(msg)
16
+
17
+
18
+ def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
19
+ """
20
+ Return 'base_url' if the model has a field named or aliased as 'base_url',
21
+ else return 'api_base' if it has a field named or aliased as 'api_base',
22
+ else return None.
23
+ The return value is always either 'base_url', 'api_base', or None.
24
+ """
25
+ model_fields = model_cls.model_fields
26
+
27
+ # try model_fields first
28
+ if "base_url" in model_fields:
29
+ return "base_url"
30
+
31
+ if "api_base" in model_fields:
32
+ return "api_base"
33
+
34
+ # then try aliases
35
+ for field_info in model_fields.values():
36
+ if field_info.alias == "base_url":
37
+ return "base_url"
38
+
39
+ for field_info in model_fields.values():
40
+ if field_info.alias == "api_base":
41
+ return "api_base"
42
+
43
+ return None
44
+
45
+
46
+ def _validate_base_url(base_url: Optional[str] = None) -> None:
47
+ """Validate base URL format.
48
+
49
+ Args:
50
+ base_url: Base URL to validate
51
+
52
+ Raises:
53
+ ValueError: If base URL is not a valid HTTP or HTTPS URL
54
+ """
55
+ if base_url is None:
56
+ return
57
+
58
+ from urllib.parse import urlparse
59
+
60
+ parsed = urlparse(base_url.strip())
61
+
62
+ if not parsed.scheme or not parsed.netloc:
63
+ raise ValueError(
64
+ f"base_url must be a valid HTTP or HTTPS URL. Received: {base_url}"
65
+ )
66
+
67
+ if parsed.scheme not in ("http", "https"):
68
+ raise ValueError(
69
+ f"base_url must use HTTP or HTTPS protocol. Received: {parsed.scheme}"
70
+ )
71
+
72
+
73
+ def _validate_model_cls_name(model_cls_name: str) -> None:
74
+ """Validate model class name follows Python naming conventions.
75
+
76
+ Args:
77
+ model_cls_name: Class name to validate
78
+
79
+ Raises:
80
+ ValueError: If class name is invalid
81
+ """
82
+ if not model_cls_name:
83
+ raise ValueError("model_cls_name cannot be empty")
84
+
85
+ if not model_cls_name[0].isalpha():
86
+ raise ValueError(
87
+ f"model_cls_name must start with a letter. Received: {model_cls_name}"
88
+ )
89
+
90
+ if not all(c.isalnum() or c == "_" for c in model_cls_name):
91
+ raise ValueError(
92
+ f"model_cls_name can only contain letters, numbers, and underscores. Received: {model_cls_name}"
93
+ )
94
+
95
+ if model_cls_name[0].islower():
96
+ raise ValueError(
97
+ f"model_cls_name should start with an uppercase letter (PEP 8). Received: {model_cls_name}"
98
+ )
99
+
100
+ if len(model_cls_name) > 30:
101
+ raise ValueError(
102
+ f"model_cls_name must be 30 characters or fewer. Received: {model_cls_name}"
103
+ )
104
+
105
+
106
+ def _validate_provider_name(provider_name: str) -> None:
107
+ """Validate provider name follows Python naming conventions.
108
+
109
+ Args:
110
+ provider_name: Provider name to validate
111
+
112
+ Raises:
113
+ ValueError: If provider name is invalid
114
+ """
115
+ if not provider_name:
116
+ raise ValueError("provider_name cannot be empty")
117
+
118
+ if not provider_name[0].isalnum():
119
+ raise ValueError(
120
+ f"provider_name must start with a letter or number. Received: {provider_name}"
121
+ )
122
+
123
+ if not all(c.isalnum() or c == "_" for c in provider_name):
124
+ raise ValueError(
125
+ f"provider_name can only contain letters, numbers, underscores. Received: {provider_name}"
126
+ )
127
+
128
+ if len(provider_name) > 20:
129
+ raise ValueError(
130
+ f"provider_name must be 20 characters or fewer. Received: {provider_name}"
131
+ )
@@ -11,7 +11,7 @@ def format_prompt(request: ModelRequest) -> str:
11
11
  Variables are first resolved from the state, then from the context if not found.
12
12
 
13
13
  Example:
14
- >>> from langchain_dev_utils.agents.middleware.format_prompt import format_prompt
14
+ >>> from langchain_dev_utils.agents.middleware import format_prompt
15
15
  >>> from langchain.agents import create_agent
16
16
  >>> from langchain_core.messages import HumanMessage
17
17
  >>> from dataclasses import dataclass
@@ -1,8 +1,8 @@
1
1
  import asyncio
2
- from typing import Any, Awaitable, Callable, Optional, cast
2
+ from typing import Any, Awaitable, Callable, Optional
3
3
 
4
4
  from langchain.tools import ToolRuntime
5
- from langchain_core.messages import AnyMessage, HumanMessage
5
+ from langchain_core.messages import HumanMessage
6
6
  from langchain_core.tools import BaseTool, StructuredTool
7
7
  from langgraph.graph.state import CompiledStateGraph
8
8
 
@@ -14,9 +14,9 @@ def _process_input(request: str, runtime: ToolRuntime) -> str:
14
14
 
15
15
 
16
16
  def _process_output(
17
- request: str, response: list[AnyMessage], runtime: ToolRuntime
17
+ request: str, response: dict[str, Any], runtime: ToolRuntime
18
18
  ) -> Any:
19
- return response[-1].content
19
+ return response["messages"][-1].content
20
20
 
21
21
 
22
22
  def wrap_agent_as_tool(
@@ -25,17 +25,17 @@ def wrap_agent_as_tool(
25
25
  tool_description: Optional[str] = None,
26
26
  pre_input_hooks: Optional[
27
27
  tuple[
28
- Callable[[str, ToolRuntime], str],
29
- Callable[[str, ToolRuntime], Awaitable[str]],
28
+ Callable[[str, ToolRuntime], str | dict[str, Any]],
29
+ Callable[[str, ToolRuntime], Awaitable[str | dict[str, Any]]],
30
30
  ]
31
- | Callable[[str, ToolRuntime], str]
31
+ | Callable[[str, ToolRuntime], str | dict[str, Any]]
32
32
  ] = None,
33
33
  post_output_hooks: Optional[
34
34
  tuple[
35
- Callable[[str, list[AnyMessage], ToolRuntime], Any],
36
- Callable[[str, list[AnyMessage], ToolRuntime], Awaitable[Any]],
35
+ Callable[[str, dict[str, Any], ToolRuntime], Any],
36
+ Callable[[str, dict[str, Any], ToolRuntime], Awaitable[Any]],
37
37
  ]
38
- | Callable[[str, list[AnyMessage], ToolRuntime], Any]
38
+ | Callable[[str, dict[str, Any], ToolRuntime], Any]
39
39
  ] = None,
40
40
  ) -> BaseTool:
41
41
  """Wraps an agent as a tool
@@ -91,33 +91,58 @@ def wrap_agent_as_tool(
91
91
  def call_agent(
92
92
  request: str,
93
93
  runtime: ToolRuntime,
94
- ) -> str:
95
- request = process_input(request, runtime) if process_input else request
94
+ ):
95
+ _processed_input = process_input(request, runtime) if process_input else request
96
+ if isinstance(_processed_input, str):
97
+ agent_input = {"messages": [HumanMessage(content=_processed_input)]}
98
+ elif isinstance(_processed_input, dict):
99
+ if "messages" not in _processed_input:
100
+ raise ValueError("Agent input must contain 'messages' key")
101
+ agent_input = _processed_input
102
+ else:
103
+ raise ValueError("Pre Hooks must return a string or a dict")
96
104
 
97
- messages = [HumanMessage(content=request)]
98
- response = agent.invoke({"messages": messages})
105
+ response = agent.invoke(agent_input)
99
106
 
100
- response = process_output(request, response["messages"], runtime)
107
+ response = (
108
+ process_output(request, response, runtime)
109
+ if process_output
110
+ else response["messages"][-1].content
111
+ )
101
112
  return response
102
113
 
103
114
  async def acall_agent(
104
115
  request: str,
105
116
  runtime: ToolRuntime,
106
- ) -> str:
117
+ ):
107
118
  if asyncio.iscoroutinefunction(process_input_async):
108
- request = await process_input_async(request, runtime)
119
+ _processed_input = await process_input_async(request, runtime)
120
+ else:
121
+ _processed_input = (
122
+ process_input_async(request, runtime)
123
+ if process_input_async
124
+ else request
125
+ )
126
+
127
+ if isinstance(_processed_input, str):
128
+ agent_input = {"messages": [HumanMessage(content=_processed_input)]}
129
+ elif isinstance(_processed_input, dict):
130
+ if "messages" not in _processed_input:
131
+ raise ValueError("Agent input must contain 'messages' key")
132
+ agent_input = _processed_input
109
133
  else:
110
- request = cast(str, process_input_async(request, runtime))
134
+ raise ValueError("Pre Hooks must return a string or a dict")
111
135
 
112
- messages = [HumanMessage(content=request)]
113
- response = await agent.ainvoke({"messages": messages})
136
+ response = await agent.ainvoke(agent_input)
114
137
 
115
138
  if asyncio.iscoroutinefunction(process_output_async):
116
- response = await process_output_async(
117
- request, response["messages"], runtime
118
- )
139
+ response = await process_output_async(request, response, runtime)
119
140
  else:
120
- response = process_output(request, response["messages"], runtime)
141
+ response = (
142
+ process_output(request, response, runtime)
143
+ if process_output
144
+ else response["messages"][-1].content
145
+ )
121
146
 
122
147
  return response
123
148
 
@@ -143,17 +168,17 @@ def wrap_all_agents_as_tool(
143
168
  tool_description: Optional[str] = None,
144
169
  pre_input_hooks: Optional[
145
170
  tuple[
146
- Callable[[str, ToolRuntime], str],
147
- Callable[[str, ToolRuntime], Awaitable[str]],
171
+ Callable[[str, ToolRuntime], str | dict[str, Any]],
172
+ Callable[[str, ToolRuntime], Awaitable[str | dict[str, Any]]],
148
173
  ]
149
- | Callable[[str, ToolRuntime], str]
174
+ | Callable[[str, ToolRuntime], str | dict[str, Any]]
150
175
  ] = None,
151
176
  post_output_hooks: Optional[
152
177
  tuple[
153
- Callable[[str, list[AnyMessage], ToolRuntime], Any],
154
- Callable[[str, list[AnyMessage], ToolRuntime], Awaitable[Any]],
178
+ Callable[[str, dict[str, Any], ToolRuntime], Any],
179
+ Callable[[str, dict[str, Any], ToolRuntime], Awaitable[Any]],
155
180
  ]
156
- | Callable[[str, list[AnyMessage], ToolRuntime], Any]
181
+ | Callable[[str, dict[str, Any], ToolRuntime], Any]
157
182
  ] = None,
158
183
  ) -> BaseTool:
159
184
  """Wraps all agents as single tool
@@ -219,42 +244,67 @@ def wrap_all_agents_as_tool(
219
244
  agent_name: str,
220
245
  description: str,
221
246
  runtime: ToolRuntime,
222
- ) -> str:
223
- task_description = (
224
- process_input(description, runtime) if process_input else description
225
- )
226
-
247
+ ):
227
248
  if agent_name not in agents_map:
228
249
  raise ValueError(f"Agent {agent_name} not found")
229
250
 
230
- messages = [HumanMessage(content=task_description)]
231
- response = agents_map[agent_name].invoke({"messages": messages})
251
+ _processed_input = (
252
+ process_input(description, runtime) if process_input else description
253
+ )
254
+ if isinstance(_processed_input, str):
255
+ agent_input = {"messages": [HumanMessage(content=_processed_input)]}
256
+ elif isinstance(_processed_input, dict):
257
+ if "messages" not in _processed_input:
258
+ raise ValueError("Agent input must contain 'messages' key")
259
+ agent_input = _processed_input
260
+ else:
261
+ raise ValueError("Pre Hooks must return str or dict")
262
+
263
+ response = agent.invoke(agent_input)
232
264
 
233
- response = process_output(task_description, response["messages"], runtime)
265
+ response = (
266
+ process_output(description, response, runtime)
267
+ if process_output
268
+ else response["messages"][-1].content
269
+ )
234
270
  return response
235
271
 
236
272
  async def acall_agent(
237
273
  agent_name: str,
238
274
  description: str,
239
275
  runtime: ToolRuntime,
240
- ) -> str:
276
+ ):
277
+ if agent_name not in agents_map:
278
+ raise ValueError(f"Agent {agent_name} not found")
279
+
241
280
  if asyncio.iscoroutinefunction(process_input_async):
242
- task_description = await process_input_async(description, runtime)
281
+ _processed_input = await process_input_async(description, runtime)
243
282
  else:
244
- task_description = cast(str, process_input_async(description, runtime))
283
+ _processed_input = (
284
+ process_input_async(description, runtime)
285
+ if process_input_async
286
+ else description
287
+ )
245
288
 
246
- if agent_name not in agents_map:
247
- raise ValueError(f"Agent {agent_name} not found")
289
+ if isinstance(_processed_input, str):
290
+ agent_input = {"messages": [HumanMessage(content=_processed_input)]}
291
+ elif isinstance(_processed_input, dict):
292
+ if "messages" not in _processed_input:
293
+ raise ValueError("Agent input must contain 'messages' key")
294
+ agent_input = _processed_input
295
+ else:
296
+ raise ValueError("Pre Hooks must return str or dict")
248
297
 
249
- messages = [HumanMessage(content=task_description)]
250
- response = await agents_map[agent_name].ainvoke({"messages": messages})
298
+ response = await agents_map[agent_name].ainvoke(agent_input)
251
299
 
252
300
  if asyncio.iscoroutinefunction(process_output_async):
253
- response = await process_output_async(
254
- task_description, response["messages"], runtime
255
- )
301
+ response = await process_output_async(description, response, runtime)
256
302
  else:
257
- response = process_output(task_description, response["messages"], runtime)
303
+ response = (
304
+ process_output(description, response, runtime)
305
+ if process_output
306
+ else response["messages"][-1].content
307
+ )
258
308
 
259
309
  return response
260
310
 
@@ -50,6 +50,11 @@ from pydantic import (
50
50
  )
51
51
  from typing_extensions import Self
52
52
 
53
+ from ..._utils import (
54
+ _validate_base_url,
55
+ _validate_model_cls_name,
56
+ _validate_provider_name,
57
+ )
53
58
  from ..types import (
54
59
  CompatibilityOptions,
55
60
  ReasoningKeepPolicy,
@@ -586,6 +591,51 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
586
591
  )
587
592
 
588
593
 
594
+ def _validate_compatibility_options(
595
+ compatibility_options: Optional[CompatibilityOptions] = None,
596
+ ) -> None:
597
+ """Validate provider configuration against supported features.
598
+
599
+ Args:
600
+ compatibility_options: Optional configuration for the provider
601
+
602
+ Raises:
603
+ ValueError: If provider configuration is invalid
604
+ """
605
+ if compatibility_options is None:
606
+ compatibility_options = {}
607
+
608
+ if "supported_tool_choice" in compatibility_options:
609
+ _supported_tool_choice = compatibility_options["supported_tool_choice"]
610
+ for tool_choice in _supported_tool_choice:
611
+ if tool_choice not in ["auto", "none", "required", "specific"]:
612
+ raise ValueError(
613
+ f"Unsupported tool_choice: {tool_choice}. Please choose from 'auto', 'none', 'required','specific'."
614
+ )
615
+
616
+ if "supported_response_format" in compatibility_options:
617
+ _supported_response_format = compatibility_options["supported_response_format"]
618
+ for response_format in _supported_response_format:
619
+ if response_format not in ["json_schema", "json_mode"]:
620
+ raise ValueError(
621
+ f"Unsupported response_format: {response_format}. Please choose from 'json_schema', 'json_mode'."
622
+ )
623
+
624
+ if "reasoning_keep_policy" in compatibility_options:
625
+ _reasoning_keep_policy = compatibility_options["reasoning_keep_policy"]
626
+ if _reasoning_keep_policy not in ["never", "current", "all"]:
627
+ raise ValueError(
628
+ f"Unsupported reasoning_keep_policy: {_reasoning_keep_policy}. Please choose from 'never', 'current', 'all'."
629
+ )
630
+
631
+ if "include_usage" in compatibility_options:
632
+ _include_usage = compatibility_options["include_usage"]
633
+ if not isinstance(_include_usage, bool):
634
+ raise ValueError(
635
+ f"include_usage must be a boolean value. Received: {_include_usage}"
636
+ )
637
+
638
+
589
639
  def _create_openai_compatible_model(
590
640
  provider: str,
591
641
  base_url: str,
@@ -615,6 +665,14 @@ def _create_openai_compatible_model(
615
665
  if profiles is not None:
616
666
  _register_profile_with_provider(provider, profiles)
617
667
 
668
+ _validate_compatibility_options(compatibility_options)
669
+
670
+ _validate_provider_name(provider)
671
+
672
+ _validate_model_cls_name(chat_model_cls_name)
673
+
674
+ _validate_base_url(base_url)
675
+
618
676
  return create_model(
619
677
  chat_model_cls_name,
620
678
  __base__=_BaseChatOpenAICompatible,
@@ -7,6 +7,7 @@ from langchain_core.utils import from_env
7
7
  from langchain_dev_utils._utils import (
8
8
  _check_pkg_install,
9
9
  _get_base_url_field_name,
10
+ _validate_provider_name,
10
11
  )
11
12
 
12
13
  from .types import ChatModelProvider, ChatModelType, CompatibilityOptions
@@ -126,6 +127,7 @@ def register_model_provider(
126
127
  >>> model = load_chat_model(model="vllm:qwen3-4b")
127
128
  >>> model.invoke("Hello")
128
129
  """
130
+ _validate_provider_name(provider_name)
129
131
  base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
130
132
  if isinstance(chat_model, str):
131
133
  _check_pkg_install("langchain_openai")
@@ -4,6 +4,12 @@ from langchain_core.utils import from_env, secret_from_env
4
4
  from langchain_openai.embeddings import OpenAIEmbeddings
5
5
  from pydantic import Field, SecretStr, create_model
6
6
 
7
+ from ..._utils import (
8
+ _validate_base_url,
9
+ _validate_model_cls_name,
10
+ _validate_provider_name,
11
+ )
12
+
7
13
 
8
14
  class _BaseEmbeddingOpenAICompatible(OpenAIEmbeddings):
9
15
  """Base class for OpenAI-Compatible embeddings.
@@ -53,6 +59,16 @@ def _create_openai_compatible_embedding(
53
59
  """
54
60
  embeddings_cls_name = embeddings_cls_name or f"{provider.title()}Embeddings"
55
61
 
62
+ if len(provider) >= 20:
63
+ raise ValueError(
64
+ f"provider must be less than 50 characters. Received: {provider}"
65
+ )
66
+
67
+ _validate_model_cls_name(embeddings_cls_name)
68
+ _validate_provider_name(provider)
69
+
70
+ _validate_base_url(base_url)
71
+
56
72
  return create_model(
57
73
  embeddings_cls_name,
58
74
  __base__=_BaseEmbeddingOpenAICompatible,
@@ -6,6 +6,7 @@ from langchain_core.utils import from_env
6
6
  from langchain_dev_utils._utils import (
7
7
  _check_pkg_install,
8
8
  _get_base_url_field_name,
9
+ _validate_provider_name,
9
10
  )
10
11
 
11
12
  _EMBEDDINGS_PROVIDERS_DICT = {}
@@ -87,7 +88,7 @@ def register_embeddings_provider(
87
88
  >>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
88
89
  >>> embeddings.embed_query("hello world")
89
90
  """
90
-
91
+ _validate_provider_name(provider_name)
91
92
  base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
92
93
  if isinstance(embeddings_model, str):
93
94
  if base_url is None:
@@ -31,18 +31,22 @@ async def process_input_async(request: str, runtime: ToolRuntime) -> str:
31
31
  return "<task_description>" + request + "</task_description>"
32
32
 
33
33
 
34
- def process_output(request: str, messages: list, runtime: ToolRuntime) -> str:
35
- assert request.startswith("<task_description>")
36
- assert request.endswith("</task_description>")
37
- return "<task_response>" + messages[-1].content + "</task_response>"
34
+ def process_output(request: str, response: dict[str, Any], runtime: ToolRuntime) -> str:
35
+ human_message = response["messages"][0]
36
+ assert human_message.content.startswith(
37
+ "<task_description>"
38
+ ) and human_message.content.endswith("</task_description>")
39
+ return "<task_response>" + response["messages"][-1].content + "</task_response>"
38
40
 
39
41
 
40
42
  async def process_output_async(
41
- request: str, messages: list, runtime: ToolRuntime
43
+ request: str, response: dict[str, Any], runtime: ToolRuntime
42
44
  ) -> str:
43
- assert request.startswith("<task_description>")
44
- assert request.endswith("</task_description>")
45
- return "<task_response>" + messages[-1].content + "</task_response>"
45
+ human_message = response["messages"][0]
46
+ assert human_message.content.startswith(
47
+ "<task_description>"
48
+ ) and human_message.content.endswith("</task_description>")
49
+ return "<task_response>" + response["messages"][-1].content + "</task_response>"
46
50
 
47
51
 
48
52
  def test_wrap_agent():
@@ -1 +0,0 @@
1
- __version__ = "1.3.4"
@@ -1,43 +0,0 @@
1
- from importlib import util
2
- from typing import Literal
3
-
4
- from pydantic import BaseModel
5
-
6
-
7
- def _check_pkg_install(
8
- pkg: Literal["langchain_openai", "json_repair"],
9
- ) -> None:
10
- if not util.find_spec(pkg):
11
- if pkg == "langchain_openai":
12
- msg = "Please install langchain_dev_utils[standard],when use 'openai-compatible'"
13
- else:
14
- msg = "Please install langchain_dev_utils[standard] to use ToolCallRepairMiddleware."
15
- raise ImportError(msg)
16
-
17
-
18
- def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
19
- """
20
- Return 'base_url' if the model has a field named or aliased as 'base_url',
21
- else return 'api_base' if it has a field named or aliased as 'api_base',
22
- else return None.
23
- The return value is always either 'base_url', 'api_base', or None.
24
- """
25
- model_fields = model_cls.model_fields
26
-
27
- # try model_fields first
28
- if "base_url" in model_fields:
29
- return "base_url"
30
-
31
- if "api_base" in model_fields:
32
- return "api_base"
33
-
34
- # then try aliases
35
- for field_info in model_fields.values():
36
- if field_info.alias == "base_url":
37
- return "base_url"
38
-
39
- for field_info in model_fields.values():
40
- if field_info.alias == "api_base":
41
- return "api_base"
42
-
43
- return None