langchain-dev-utils 1.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. langchain_dev_utils/__init__.py +1 -0
  2. langchain_dev_utils/_utils.py +131 -0
  3. langchain_dev_utils/agents/__init__.py +4 -0
  4. langchain_dev_utils/agents/factory.py +99 -0
  5. langchain_dev_utils/agents/file_system.py +252 -0
  6. langchain_dev_utils/agents/middleware/__init__.py +21 -0
  7. langchain_dev_utils/agents/middleware/format_prompt.py +66 -0
  8. langchain_dev_utils/agents/middleware/handoffs.py +214 -0
  9. langchain_dev_utils/agents/middleware/model_fallback.py +49 -0
  10. langchain_dev_utils/agents/middleware/model_router.py +200 -0
  11. langchain_dev_utils/agents/middleware/plan.py +367 -0
  12. langchain_dev_utils/agents/middleware/summarization.py +85 -0
  13. langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
  14. langchain_dev_utils/agents/middleware/tool_emulator.py +60 -0
  15. langchain_dev_utils/agents/middleware/tool_selection.py +82 -0
  16. langchain_dev_utils/agents/plan.py +188 -0
  17. langchain_dev_utils/agents/wrap.py +324 -0
  18. langchain_dev_utils/chat_models/__init__.py +11 -0
  19. langchain_dev_utils/chat_models/adapters/__init__.py +3 -0
  20. langchain_dev_utils/chat_models/adapters/create_utils.py +53 -0
  21. langchain_dev_utils/chat_models/adapters/openai_compatible.py +715 -0
  22. langchain_dev_utils/chat_models/adapters/register_profiles.py +15 -0
  23. langchain_dev_utils/chat_models/base.py +282 -0
  24. langchain_dev_utils/chat_models/types.py +27 -0
  25. langchain_dev_utils/embeddings/__init__.py +11 -0
  26. langchain_dev_utils/embeddings/adapters/__init__.py +3 -0
  27. langchain_dev_utils/embeddings/adapters/create_utils.py +45 -0
  28. langchain_dev_utils/embeddings/adapters/openai_compatible.py +91 -0
  29. langchain_dev_utils/embeddings/base.py +234 -0
  30. langchain_dev_utils/message_convert/__init__.py +15 -0
  31. langchain_dev_utils/message_convert/content.py +201 -0
  32. langchain_dev_utils/message_convert/format.py +69 -0
  33. langchain_dev_utils/pipeline/__init__.py +7 -0
  34. langchain_dev_utils/pipeline/parallel.py +135 -0
  35. langchain_dev_utils/pipeline/sequential.py +101 -0
  36. langchain_dev_utils/pipeline/types.py +3 -0
  37. langchain_dev_utils/py.typed +0 -0
  38. langchain_dev_utils/tool_calling/__init__.py +14 -0
  39. langchain_dev_utils/tool_calling/human_in_the_loop.py +284 -0
  40. langchain_dev_utils/tool_calling/utils.py +81 -0
  41. langchain_dev_utils-1.3.7.dist-info/METADATA +103 -0
  42. langchain_dev_utils-1.3.7.dist-info/RECORD +44 -0
  43. langchain_dev_utils-1.3.7.dist-info/WHEEL +4 -0
  44. langchain_dev_utils-1.3.7.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,214 @@
1
+ from typing import Any, Awaitable, Callable, Literal, cast
2
+
3
+ from langchain.agents import AgentState
4
+ from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse
5
+ from langchain.agents.middleware.types import ModelCallResult
6
+ from langchain.tools import BaseTool, ToolRuntime, tool
7
+ from langchain_core.language_models.chat_models import BaseChatModel
8
+ from langchain_core.messages import SystemMessage, ToolMessage
9
+ from langgraph.types import Command
10
+ from typing_extensions import NotRequired, Optional, TypedDict
11
+
12
+ from langchain_dev_utils.chat_models import load_chat_model
13
+
14
+
15
+ class MultiAgentState(AgentState):
16
+ active_agent: NotRequired[str]
17
+
18
+
19
+ class AgentConfig(TypedDict):
20
+ model: NotRequired[str | BaseChatModel]
21
+ prompt: str | SystemMessage
22
+ tools: NotRequired[list[BaseTool | dict[str, Any]]]
23
+ default: NotRequired[bool]
24
+ handoffs: list[str] | Literal["all"]
25
+
26
+
27
+ def _create_handoffs_tool(agent_name: str, tool_description: Optional[str] = None):
28
+ """Create a tool for handoffs to a specified agent.
29
+
30
+ Args:
31
+ agent_name (str): The name of the agent to transfer to.
32
+
33
+ Returns:
34
+ BaseTool: A tool instance for handoffs to the specified agent.
35
+ """
36
+
37
+ tool_name = f"transfer_to_{agent_name}"
38
+ if not tool_name.endswith("_agent"):
39
+ tool_name += "_agent"
40
+ if tool_description is None:
41
+ tool_description = f"Transfer to the {agent_name}"
42
+
43
+ @tool(name_or_callable=tool_name, description=tool_description)
44
+ def handoffs_tool(runtime: ToolRuntime) -> Command:
45
+ return Command(
46
+ update={
47
+ "messages": [
48
+ ToolMessage(
49
+ content=f"Transferred to {agent_name}",
50
+ tool_call_id=runtime.tool_call_id,
51
+ )
52
+ ],
53
+ "active_agent": agent_name,
54
+ }
55
+ )
56
+
57
+ return handoffs_tool
58
+
59
+
60
+ def _get_default_active_agent(state: dict[str, AgentConfig]) -> Optional[str]:
61
+ for agent_name, config in state.items():
62
+ if config.get("default", False):
63
+ return agent_name
64
+ return None
65
+
66
+
67
+ def _transform_agent_config(
68
+ config: dict[str, AgentConfig],
69
+ handoffs_tools: list[BaseTool],
70
+ ) -> dict[str, AgentConfig]:
71
+ """Transform the agent config to add handoffs tools.
72
+
73
+ Args:
74
+ config (dict[str, AgentConfig]): The agent config.
75
+ handoffs_tools (list[BaseTool]): The list of handoffs tools.
76
+
77
+ Returns:
78
+ dict[str, AgentConfig]: The transformed agent config.
79
+ """
80
+
81
+ new_config = {}
82
+ for agent_name, _cfg in config.items():
83
+ new_config[agent_name] = {}
84
+
85
+ if "model" in _cfg:
86
+ new_config[agent_name]["model"] = _cfg["model"]
87
+ if "prompt" in _cfg:
88
+ new_config[agent_name]["prompt"] = _cfg["prompt"]
89
+ if "default" in _cfg:
90
+ new_config[agent_name]["default"] = _cfg["default"]
91
+ if "tools" in _cfg:
92
+ new_config[agent_name]["tools"] = _cfg["tools"]
93
+
94
+ handoffs = _cfg.get("handoffs", [])
95
+ if handoffs == "all":
96
+ handoff_tools = [
97
+ handoff_tool
98
+ for handoff_tool in handoffs_tools
99
+ if handoff_tool.name != f"transfer_to_{agent_name}"
100
+ ]
101
+ else:
102
+ if not isinstance(handoffs, list):
103
+ raise ValueError(
104
+ f"handoffs for agent {agent_name} must be a list of agent names or 'all'"
105
+ )
106
+
107
+ handoff_tools = [
108
+ handoff_tool
109
+ for handoff_tool in handoffs_tools
110
+ if handoff_tool.name
111
+ in [
112
+ f"transfer_to_{_handoff_agent_name}"
113
+ for _handoff_agent_name in handoffs
114
+ ]
115
+ ]
116
+
117
+ new_config[agent_name]["tools"] = [
118
+ *new_config[agent_name].get("tools", []),
119
+ *handoff_tools,
120
+ ]
121
+ return new_config
122
+
123
+
124
+ class HandoffAgentMiddleware(AgentMiddleware):
125
+ """Agent middleware for switching between multiple agents.
126
+ This middleware dynamically replaces model call parameters based on the currently active agent configuration, enabling seamless switching between different agents.
127
+
128
+ Args:
129
+ agents_config (dict[str, AgentConfig]): A dictionary of agent configurations.
130
+ custom_handoffs_tool_descriptions (Optional[dict[str, str]]): A dictionary of custom tool descriptions for handoffs tools. Defaults to None.
131
+ handoffs_tool_overrides (Optional[dict[str, BaseTool]]): A dictionary of handoffs tools to override. Defaults to None.
132
+
133
+ Examples:
134
+ ```python
135
+ from langchain_dev_utils.agents.middleware import HandoffAgentMiddleware
136
+ middleware = HandoffAgentMiddleware(agents_config)
137
+ ```
138
+ """
139
+
140
+ state_schema = MultiAgentState
141
+
142
+ def __init__(
143
+ self,
144
+ agents_config: dict[str, AgentConfig],
145
+ custom_handoffs_tool_descriptions: Optional[dict[str, str]] = None,
146
+ handoffs_tool_overrides: Optional[dict[str, BaseTool]] = None,
147
+ ) -> None:
148
+ default_agent_name = _get_default_active_agent(agents_config)
149
+ if default_agent_name is None:
150
+ raise ValueError(
151
+ "No default agent found, you must set one by set default=True"
152
+ )
153
+
154
+ if custom_handoffs_tool_descriptions is None:
155
+ custom_handoffs_tool_descriptions = {}
156
+
157
+ if handoffs_tool_overrides is None:
158
+ handoffs_tool_overrides = {}
159
+
160
+ handoffs_tools = []
161
+ for agent_name in agents_config.keys():
162
+ if not handoffs_tool_overrides.get(agent_name):
163
+ handoffs_tools.append(
164
+ _create_handoffs_tool(
165
+ agent_name,
166
+ custom_handoffs_tool_descriptions.get(agent_name),
167
+ )
168
+ )
169
+ else:
170
+ handoffs_tools.append(
171
+ cast(BaseTool, handoffs_tool_overrides.get(agent_name))
172
+ )
173
+
174
+ self.default_agent_name = default_agent_name
175
+ self.agents_config = _transform_agent_config(
176
+ agents_config,
177
+ handoffs_tools,
178
+ )
179
+ self.tools = handoffs_tools
180
+
181
+ def _get_override_request(self, request: ModelRequest) -> ModelRequest:
182
+ active_agent_name = request.state.get("active_agent", self.default_agent_name)
183
+
184
+ _config = self.agents_config[active_agent_name]
185
+
186
+ params = {}
187
+ if _config.get("model"):
188
+ model = _config.get("model")
189
+ if isinstance(model, str):
190
+ model = load_chat_model(model)
191
+ params["model"] = model
192
+ if _config.get("prompt"):
193
+ params["system_prompt"] = _config.get("prompt")
194
+ if _config.get("tools"):
195
+ params["tools"] = _config.get("tools")
196
+
197
+ if params:
198
+ return request.override(**params)
199
+ else:
200
+ return request
201
+
202
+ def wrap_model_call(
203
+ self, request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
204
+ ) -> ModelCallResult:
205
+ override_request = self._get_override_request(request)
206
+ return handler(override_request)
207
+
208
+ async def awrap_model_call(
209
+ self,
210
+ request: ModelRequest,
211
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
212
+ ) -> ModelCallResult:
213
+ override_request = self._get_override_request(request)
214
+ return await handler(override_request)
@@ -0,0 +1,49 @@
1
+ from langchain.agents.middleware.model_fallback import (
2
+ ModelFallbackMiddleware as _ModelFallbackMiddleware,
3
+ )
4
+
5
+ from langchain_dev_utils.chat_models.base import load_chat_model
6
+
7
+
8
+ class ModelFallbackMiddleware(_ModelFallbackMiddleware):
9
+ """Automatic fallback to alternative models on errors.
10
+
11
+ Retries failed model calls with alternative models in sequence until
12
+ success or all models exhausted. Primary model specified in create_agent().
13
+
14
+ Args:
15
+ first_model: The first model to try on error. Must be a string identifier.
16
+ additional_models: Additional models to try in sequence on error.
17
+
18
+ Example:
19
+ ```python
20
+ from langchain_dev_utils.agents.middleware import ModelFallbackMiddleware
21
+ from langchain_dev_utils.agents import create_agent
22
+
23
+ fallback = ModelFallbackMiddleware(
24
+ "vllm:qwen3-8b", ## Try first on error
25
+ "vllm:gpt-oss-20b", #Then this
26
+ )
27
+
28
+ agent = create_agent(
29
+ model="vllm:qwen3-4b", #Primary model
30
+ middleware=[fallback],
31
+ )
32
+
33
+ # If primary fails: tries qwen3-8b, then gpt-oss-20b
34
+ result = await agent.invoke({"messages": [HumanMessage("Hello")]})
35
+ ```
36
+ """
37
+
38
+ def __init__(
39
+ self,
40
+ first_model: str,
41
+ *additional_models: str,
42
+ ) -> None:
43
+ first_chat_model = load_chat_model(first_model)
44
+
45
+ additional_chat_models = [load_chat_model(model) for model in additional_models]
46
+ super().__init__(
47
+ first_chat_model,
48
+ *additional_chat_models,
49
+ )
@@ -0,0 +1,200 @@
1
+ from typing import Annotated, Any, Awaitable, Callable, NotRequired, Optional, cast
2
+
3
+ from langchain.agents import AgentState
4
+ from langchain.agents.middleware import AgentMiddleware, ModelRequest, ModelResponse
5
+ from langchain.agents.middleware.types import ModelCallResult, OmitFromInput
6
+ from langchain_core.language_models import BaseChatModel
7
+ from langchain_core.messages import AnyMessage, SystemMessage
8
+ from langchain_core.tools import BaseTool
9
+ from langgraph.runtime import Runtime
10
+ from pydantic import BaseModel, Field
11
+ from typing_extensions import TypedDict
12
+
13
+ from langchain_dev_utils.chat_models import load_chat_model
14
+ from langchain_dev_utils.message_convert import format_sequence
15
+
16
+
17
+ class ModelDict(TypedDict):
18
+ model_name: str
19
+ model_description: str
20
+ tools: NotRequired[list[BaseTool | dict[str, Any]]]
21
+ model_kwargs: NotRequired[dict[str, Any]]
22
+ model_instance: NotRequired[BaseChatModel]
23
+ model_system_prompt: NotRequired[str]
24
+
25
+
26
+ class SelectModel(BaseModel):
27
+ """Tool for model selection - Must call this tool to return the finally selected model"""
28
+
29
+ model_name: str = Field(
30
+ ...,
31
+ description="Selected model name (must be the full model name, for example, openai:gpt-4o)",
32
+ )
33
+
34
+
35
+ _ROUTER_MODEL_PROMPT = """
36
+ # Role Description
37
+ You are an intelligent routing model, specializing in analyzing task requirements and matching appropriate AI models.
38
+
39
+ # Available Model List
40
+ {model_card}
41
+
42
+ # Core Responsibilities
43
+ 1. **Task Analysis**: Deeply understand the type, complexity, and special needs of the user's task
44
+ 2. **Model Matching**: Select the most appropriate model based on the task characteristics
45
+ 3. **Tool Call**: **Must call SelectModel tool** to return the final selection
46
+
47
+ # ⚠️ Important Instructions
48
+ **After completing the analysis, you must immediately call the SelectModel tool to return the model selection result.**
49
+ **This is the only way to output, and you are forbidden to return the result in any other form.**
50
+
51
+ # Selection Standards
52
+ - Consider the type of the task (dialogue, inference, creation, analysis, etc.)
53
+ - Evaluate the complexity of the task
54
+ - Match the professional ability and applicable scenario of the model
55
+ - Ensure that the model's ability matches the task requirements to a high degree
56
+
57
+ # Execution Process
58
+ 1. Analyze user task requirements
59
+ 2. Compare the capabilities of available models
60
+ 3. Call **SelectModel tool** to submit selection
61
+ 4. Task completion
62
+
63
+ Strictly adhere to tool call requirements!
64
+ """
65
+
66
+
67
+ class ModelRouterState(AgentState):
68
+ router_model_selection: Annotated[str, OmitFromInput]
69
+
70
+
71
+ class ModelRouterMiddleware(AgentMiddleware):
72
+ """Model routing middleware that automatically selects the most suitable model
73
+ based on input content.
74
+
75
+ Args:
76
+ router_model: Model identifier used for routing selection, it can be a
77
+ model name or a BaseChatModel instance
78
+ model_list: List of available routing models, each containing model_name,
79
+ model_description, tools(Optional), model_kwargs(Optional),
80
+ model_instance(Optional), model_system_prompt(Optional)
81
+ router_prompt: Routing prompt template, uses default template if None
82
+
83
+ Examples:
84
+ ```python
85
+ from langchain_dev_utils.agents.middleware import ModelRouterMiddleware
86
+ middleware = ModelRouterMiddleware(
87
+ router_model="vllm:qwen3-4b",
88
+ model_list=model_list
89
+ )
90
+ ```
91
+ """
92
+
93
+ state_schema = ModelRouterState
94
+
95
+ def __init__(
96
+ self,
97
+ router_model: str | BaseChatModel,
98
+ model_list: list[ModelDict],
99
+ router_prompt: Optional[str] = None,
100
+ ) -> None:
101
+ super().__init__()
102
+ if isinstance(router_model, BaseChatModel):
103
+ self.router_model = router_model.with_structured_output(SelectModel)
104
+ else:
105
+ self.router_model = load_chat_model(router_model).with_structured_output(
106
+ SelectModel
107
+ )
108
+ self.model_list = model_list
109
+
110
+ if router_prompt is None:
111
+ router_prompt = _ROUTER_MODEL_PROMPT.format(
112
+ model_card=format_sequence(
113
+ [
114
+ f"model_name:\n {model['model_name']}\n model_description:\n {model['model_description']}"
115
+ for model in model_list
116
+ ],
117
+ with_num=True,
118
+ )
119
+ )
120
+
121
+ self.router_prompt = router_prompt
122
+
123
+ def _select_model(self, messages: list[AnyMessage]):
124
+ response = cast(
125
+ SelectModel,
126
+ self.router_model.invoke(
127
+ [SystemMessage(content=self.router_prompt), *messages]
128
+ ),
129
+ )
130
+ return response.model_name if response is not None else "default-model"
131
+
132
+ async def _aselect_model(self, messages: list[AnyMessage]):
133
+ response = cast(
134
+ SelectModel,
135
+ await self.router_model.ainvoke(
136
+ [SystemMessage(content=self.router_prompt), *messages]
137
+ ),
138
+ )
139
+ return response.model_name if response is not None else "default-model"
140
+
141
+ def before_agent(
142
+ self, state: ModelRouterState, runtime: Runtime
143
+ ) -> dict[str, Any] | None:
144
+ model_name = self._select_model(state["messages"])
145
+ return {"router_model_selection": model_name}
146
+
147
+ async def abefore_agent(
148
+ self, state: ModelRouterState, runtime: Runtime
149
+ ) -> dict[str, Any] | None:
150
+ model_name = await self._aselect_model(state["messages"])
151
+ return {"router_model_selection": model_name}
152
+
153
+ def _get_override_request(self, request: ModelRequest) -> ModelRequest:
154
+ model_dict = {
155
+ item["model_name"]: {
156
+ "tools": item.get("tools", None),
157
+ "kwargs": item.get("model_kwargs", None),
158
+ "system_prompt": item.get("model_system_prompt", None),
159
+ "model_instance": item.get("model_instance", None),
160
+ }
161
+ for item in self.model_list
162
+ }
163
+ select_model_name = request.state.get("router_model_selection", "default-model")
164
+
165
+ override_kwargs = {}
166
+ if select_model_name != "default-model" and select_model_name in model_dict:
167
+ model_values = model_dict.get(select_model_name, {})
168
+ if model_values["model_instance"] is not None:
169
+ model = model_values["model_instance"]
170
+ else:
171
+ if model_values["kwargs"] is not None:
172
+ model = load_chat_model(select_model_name, **model_values["kwargs"])
173
+ else:
174
+ model = load_chat_model(select_model_name)
175
+ override_kwargs["model"] = model
176
+ if model_values["tools"] is not None:
177
+ override_kwargs["tools"] = model_values["tools"]
178
+ if model_values["system_prompt"] is not None:
179
+ override_kwargs["system_message"] = SystemMessage(
180
+ content=model_values["system_prompt"]
181
+ )
182
+
183
+ if override_kwargs:
184
+ return request.override(**override_kwargs)
185
+ else:
186
+ return request
187
+
188
+ def wrap_model_call(
189
+ self, request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
190
+ ) -> ModelCallResult:
191
+ override_request = self._get_override_request(request)
192
+ return handler(override_request)
193
+
194
+ async def awrap_model_call(
195
+ self,
196
+ request: ModelRequest,
197
+ handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
198
+ ) -> ModelCallResult:
199
+ override_request = self._get_override_request(request)
200
+ return await handler(override_request)