langchain-dev-utils 1.3.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. langchain_dev_utils/__init__.py +1 -0
  2. langchain_dev_utils/_utils.py +131 -0
  3. langchain_dev_utils/agents/__init__.py +4 -0
  4. langchain_dev_utils/agents/factory.py +99 -0
  5. langchain_dev_utils/agents/file_system.py +252 -0
  6. langchain_dev_utils/agents/middleware/__init__.py +21 -0
  7. langchain_dev_utils/agents/middleware/format_prompt.py +66 -0
  8. langchain_dev_utils/agents/middleware/handoffs.py +214 -0
  9. langchain_dev_utils/agents/middleware/model_fallback.py +49 -0
  10. langchain_dev_utils/agents/middleware/model_router.py +200 -0
  11. langchain_dev_utils/agents/middleware/plan.py +367 -0
  12. langchain_dev_utils/agents/middleware/summarization.py +85 -0
  13. langchain_dev_utils/agents/middleware/tool_call_repair.py +96 -0
  14. langchain_dev_utils/agents/middleware/tool_emulator.py +60 -0
  15. langchain_dev_utils/agents/middleware/tool_selection.py +82 -0
  16. langchain_dev_utils/agents/plan.py +188 -0
  17. langchain_dev_utils/agents/wrap.py +324 -0
  18. langchain_dev_utils/chat_models/__init__.py +11 -0
  19. langchain_dev_utils/chat_models/adapters/__init__.py +3 -0
  20. langchain_dev_utils/chat_models/adapters/create_utils.py +53 -0
  21. langchain_dev_utils/chat_models/adapters/openai_compatible.py +715 -0
  22. langchain_dev_utils/chat_models/adapters/register_profiles.py +15 -0
  23. langchain_dev_utils/chat_models/base.py +282 -0
  24. langchain_dev_utils/chat_models/types.py +27 -0
  25. langchain_dev_utils/embeddings/__init__.py +11 -0
  26. langchain_dev_utils/embeddings/adapters/__init__.py +3 -0
  27. langchain_dev_utils/embeddings/adapters/create_utils.py +45 -0
  28. langchain_dev_utils/embeddings/adapters/openai_compatible.py +91 -0
  29. langchain_dev_utils/embeddings/base.py +234 -0
  30. langchain_dev_utils/message_convert/__init__.py +15 -0
  31. langchain_dev_utils/message_convert/content.py +201 -0
  32. langchain_dev_utils/message_convert/format.py +69 -0
  33. langchain_dev_utils/pipeline/__init__.py +7 -0
  34. langchain_dev_utils/pipeline/parallel.py +135 -0
  35. langchain_dev_utils/pipeline/sequential.py +101 -0
  36. langchain_dev_utils/pipeline/types.py +3 -0
  37. langchain_dev_utils/py.typed +0 -0
  38. langchain_dev_utils/tool_calling/__init__.py +14 -0
  39. langchain_dev_utils/tool_calling/human_in_the_loop.py +284 -0
  40. langchain_dev_utils/tool_calling/utils.py +81 -0
  41. langchain_dev_utils-1.3.7.dist-info/METADATA +103 -0
  42. langchain_dev_utils-1.3.7.dist-info/RECORD +44 -0
  43. langchain_dev_utils-1.3.7.dist-info/WHEEL +4 -0
  44. langchain_dev_utils-1.3.7.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,101 @@
1
+ from typing import Optional
2
+
3
+ from langgraph.cache.base import BaseCache
4
+ from langgraph.graph import StateGraph
5
+ from langgraph.graph.state import CompiledStateGraph
6
+ from langgraph.store.base import BaseStore
7
+ from langgraph.types import Checkpointer
8
+ from langgraph.typing import ContextT, InputT, OutputT, StateT
9
+
10
+ from .types import SubGraph
11
+
12
+
13
+ def create_sequential_pipeline(
14
+ sub_graphs: list[SubGraph],
15
+ state_schema: type[StateT],
16
+ graph_name: Optional[str] = None,
17
+ context_schema: type[ContextT] | None = None,
18
+ input_schema: type[InputT] | None = None,
19
+ output_schema: type[OutputT] | None = None,
20
+ checkpointer: Checkpointer | None = None,
21
+ store: BaseStore | None = None,
22
+ cache: BaseCache | None = None,
23
+ ) -> CompiledStateGraph[StateT, ContextT, InputT, OutputT]:
24
+ """
25
+ Create a sequential pipeline from a list of subgraphs.
26
+
27
+ This function allows you to compose multiple StateGraphs in a sequential fashion,
28
+ where each subgraph executes one after another. This is useful for creating
29
+ complex multi-agent workflows where agents need to work in a specific order.
30
+
31
+ Args:
32
+ sub_graphs: List of sub-graphs to execute sequentially
33
+ state_schema: state schema of the final constructed graph
34
+ graph_name: Name of the final constructed graph
35
+ context_schema: context schema of the final constructed graph
36
+ input_schema: input schema of the final constructed graph
37
+ output_schema: output schema of the final constructed graph
38
+ checkpointer: Optional LangGraph checkpointer for the final constructed
39
+ graph
40
+ store: Optional LangGraph store for the final constructed graph
41
+ cache: Optional LangGraph cache for the final constructed graph
42
+
43
+ Returns:
44
+ CompiledStateGraph[StateT, ContextT, InputT, OutputT]: Compiled state
45
+ graph of the pipeline.
46
+
47
+ Example:
48
+ # Basic sequential pipeline with multiple specialized agents:
49
+ >>> from langchain_dev_utils.pipeline import create_sequential_pipeline
50
+ >>>
51
+ >>> graph = create_sequential_pipeline(
52
+ ... sub_graphs=[
53
+ ... time_agent, weather_agent, user_agent
54
+ ... ],
55
+ ... state_schema=AgentState,
56
+ ... graph_name="sequential_agents_pipeline",
57
+ ... )
58
+ >>>
59
+ >>> response = graph.invoke({"messages": [HumanMessage("Hello")]})
60
+ """
61
+ graph = StateGraph(
62
+ state_schema=state_schema,
63
+ context_schema=context_schema,
64
+ input_schema=input_schema,
65
+ output_schema=output_schema,
66
+ )
67
+
68
+ subgraphs_names = set()
69
+
70
+ compiled_subgraphs: list[CompiledStateGraph] = []
71
+ for subgraph in sub_graphs:
72
+ if isinstance(subgraph, StateGraph):
73
+ subgraph = subgraph.compile()
74
+
75
+ compiled_subgraphs.append(subgraph)
76
+ if subgraph.name is None or subgraph.name == "LangGraph":
77
+ raise ValueError(
78
+ "Please specify a name when you create your agent, either via `create_react_agent(..., name=agent_name)` "
79
+ "or via `graph.compile(name=name)`."
80
+ )
81
+
82
+ if subgraph.name in subgraphs_names:
83
+ raise ValueError(
84
+ f"Subgraph with name '{subgraph.name}' already exists. Subgraph names must be unique."
85
+ )
86
+
87
+ subgraphs_names.add(subgraph.name)
88
+
89
+ graph.add_sequence(
90
+ [
91
+ (compiled_subgraphs[i].name, compiled_subgraphs[i])
92
+ for i in range(len(compiled_subgraphs))
93
+ ]
94
+ )
95
+ graph.add_edge("__start__", compiled_subgraphs[0].name)
96
+ return graph.compile(
97
+ name=graph_name or "sequential graph",
98
+ checkpointer=checkpointer,
99
+ store=store,
100
+ cache=cache,
101
+ )
@@ -0,0 +1,3 @@
1
+ from langgraph.graph.state import CompiledStateGraph, StateGraph
2
+
3
+ SubGraph = StateGraph | CompiledStateGraph
File without changes
@@ -0,0 +1,14 @@
1
+ from .human_in_the_loop import (
2
+ InterruptParams,
3
+ human_in_the_loop,
4
+ human_in_the_loop_async,
5
+ )
6
+ from .utils import has_tool_calling, parse_tool_calling
7
+
8
+ __all__ = [
9
+ "has_tool_calling",
10
+ "parse_tool_calling",
11
+ "human_in_the_loop",
12
+ "human_in_the_loop_async",
13
+ "InterruptParams",
14
+ ]
@@ -0,0 +1,284 @@
1
+ from typing import Any, Callable, Optional, TypedDict, Union, overload
2
+
3
+ from langchain_core.tools import BaseTool
4
+ from langchain_core.tools import tool as create_tool
5
+ from langgraph.types import interrupt
6
+
7
+
8
+ class InterruptParams(TypedDict):
9
+ tool_call_name: str
10
+ tool_call_args: dict[str, Any]
11
+ tool: BaseTool
12
+
13
+
14
+ HumanInterruptHandler = Callable[[InterruptParams], Any]
15
+
16
+
17
+ @overload
18
+ def human_in_the_loop(
19
+ func: Callable,
20
+ ) -> BaseTool:
21
+ """
22
+ Decorator for adding human-in-the-loop review to a synchronous tool function.
23
+
24
+ Usage: @human_in_the_loop
25
+ """
26
+ ...
27
+
28
+
29
+ @overload
30
+ def human_in_the_loop(
31
+ *,
32
+ handler: Optional[HumanInterruptHandler] = None,
33
+ ) -> Callable[[Callable], BaseTool]:
34
+ """
35
+ Decorator for adding human-in-the-loop review to a synchronous tool function with custom handler.
36
+
37
+ Usage: @human_in_the_loop(handler=custom_handler)
38
+ """
39
+ ...
40
+
41
+
42
+ @overload
43
+ def human_in_the_loop_async(
44
+ func: Callable,
45
+ ) -> BaseTool:
46
+ """
47
+ Decorator for adding human-in-the-loop review to an asynchronous tool function.
48
+
49
+ Usage: @human_in_the_loop_async
50
+ """
51
+ ...
52
+
53
+
54
+ @overload
55
+ def human_in_the_loop_async(
56
+ *,
57
+ handler: Optional[HumanInterruptHandler] = None,
58
+ ) -> Callable[[Callable], BaseTool]:
59
+ """
60
+ Decorator for adding human-in-the-loop review to an asynchronous tool function with custom handler.
61
+
62
+ Usage: @human_in_the_loop_async(handler=custom_handler)
63
+ """
64
+ ...
65
+
66
+
67
+ def _get_human_in_the_loop_request(params: InterruptParams) -> dict[str, Any]:
68
+ return {
69
+ "action_request": {
70
+ "action": params["tool_call_name"],
71
+ "args": params["tool_call_args"],
72
+ },
73
+ "config": {
74
+ "allow_accept": True,
75
+ "allow_edit": True,
76
+ "allow_respond": True,
77
+ },
78
+ "description": f"Please review tool call: {params['tool_call_name']}",
79
+ }
80
+
81
+
82
+ def default_handler(params: InterruptParams) -> Any:
83
+ request = _get_human_in_the_loop_request(params)
84
+ response = interrupt(request)
85
+
86
+ if response["type"] == "accept":
87
+ return params["tool"].invoke(params["tool_call_args"])
88
+ elif response["type"] == "edit":
89
+ updated_args = response["args"]
90
+ return params["tool"].invoke(updated_args)
91
+ elif response["type"] == "response":
92
+ return response["args"]
93
+ else:
94
+ raise ValueError(f"Unsupported interrupt response type: {response['type']}")
95
+
96
+
97
+ async def default_handler_async(params: InterruptParams) -> Any:
98
+ request = _get_human_in_the_loop_request(params)
99
+ response = interrupt(request)
100
+
101
+ if response["type"] == "accept":
102
+ return await params["tool"].ainvoke(params["tool_call_args"])
103
+ elif response["type"] == "edit":
104
+ updated_args = response["args"]
105
+ return await params["tool"].ainvoke(updated_args)
106
+ elif response["type"] == "response":
107
+ return response["args"]
108
+ else:
109
+ raise ValueError(f"Unsupported interrupt response type: {response['type']}")
110
+
111
+
112
+ def human_in_the_loop(
113
+ func: Optional[Callable] = None,
114
+ *,
115
+ handler: Optional[HumanInterruptHandler] = None,
116
+ ) -> Union[Callable[[Callable], BaseTool], BaseTool]:
117
+ """
118
+ A decorator that adds human-in-the-loop review support to a synchronous tool.
119
+
120
+ This decorator allows you to add human review functionality to tools, enabling
121
+ users to approve, edit, or reject tool invocations before they are executed.
122
+
123
+ Supports both syntaxes:
124
+ @human_in_the_loop
125
+ @human_in_the_loop(handler=fn)
126
+
127
+ Args:
128
+ func: The function to decorate. **Do not pass this directly.**
129
+ handler: Configuration for the human interrupt. If not provided, uses default_handler.
130
+
131
+ Returns:
132
+ If `func` is provided, returns the decorated BaseTool.
133
+ If `func` is None, returns a decorator that will decorate the target function.
134
+
135
+ Example:
136
+ # Basic usage with default handler:
137
+ >>> from langchain_dev_utils.tool_calling import human_in_the_loop
138
+ >>> from langchain_core.tools import tool
139
+ >>> import datetime
140
+ >>>
141
+ >>> @human_in_the_loop
142
+ >>> @tool
143
+ >>> def get_current_time() -> str:
144
+ ... \"\"\"Get current timestamp\"\"\"
145
+ ... return str(datetime.datetime.now().timestamp())
146
+
147
+ # Usage with custom handler:
148
+ >>> def custom_handler(params: InterruptParams) -> Any:
149
+ ... response = interrupt(
150
+ ... # Please add your custom interrupt response content here
151
+ ... )
152
+ ... if response["type"] == "accept":
153
+ ... return params["tool"].invoke(params["tool_call_args"])
154
+ ... elif response["type"] == "reject":
155
+ ... return "User rejected this tool call"
156
+ ... else:
157
+ ... raise ValueError(f"Unsupported response type: {response['type']}")
158
+ >>>
159
+ >>> @human_in_the_loop(handler=custom_handler)
160
+ >>> @tool
161
+ >>> def sensitive_operation(data: str) -> str:
162
+ ... \"\"\"Perform sensitive operation on data\"\"\"
163
+ ... return f"Processed: {data}"
164
+ """
165
+
166
+ def decorator(target_func: Callable) -> BaseTool:
167
+ """The actual decorator that wraps the target function."""
168
+ if not isinstance(target_func, BaseTool):
169
+ tool_obj = create_tool(target_func)
170
+ else:
171
+ tool_obj = target_func
172
+
173
+ handler_func: HumanInterruptHandler = handler or default_handler
174
+
175
+ @create_tool(
176
+ tool_obj.name,
177
+ description=tool_obj.description,
178
+ args_schema=tool_obj.args_schema,
179
+ )
180
+ def tool_with_human_review(**tool_input: Any) -> Any:
181
+ return handler_func(
182
+ {
183
+ "tool_call_name": tool_obj.name,
184
+ "tool_call_args": tool_input,
185
+ "tool": tool_obj,
186
+ }
187
+ )
188
+
189
+ return tool_with_human_review
190
+
191
+ if func is not None:
192
+ return decorator(func)
193
+ else:
194
+ return decorator
195
+
196
+
197
+ def human_in_the_loop_async(
198
+ func: Optional[Callable] = None,
199
+ *,
200
+ handler: Optional[HumanInterruptHandler] = None,
201
+ ) -> Union[Callable[[Callable], BaseTool], BaseTool]:
202
+ """
203
+ A decorator that adds human-in-the-loop review support to an asynchronous tool.
204
+
205
+ This is the async version of human_in_the_loop. It allows you to add human review
206
+ functionality to async tools, enabling users to approve, edit, or reject tool
207
+ invocations before they are executed.
208
+
209
+ Supports both syntaxes:
210
+ @human_in_the_loop_async
211
+ @human_in_the_loop_async(handler=fn)
212
+
213
+ Args:
214
+ func: The function to decorate. **Do not pass this directly.**
215
+ handler: Configuration for the human interrupt. If not provided, uses default_handler_async.
216
+
217
+ Returns:
218
+ If `func` is provided, returns the decorated BaseTool.
219
+ If `func` is None, returns a decorator that will decorate the target function.
220
+
221
+ Example:
222
+ # Basic usage with default handler:
223
+ >>> from langchain_dev_utils.tool_calling import human_in_the_loop_async
224
+ >>> from langchain_core.tools import tool
225
+ >>> import asyncio
226
+ >>> import datetime
227
+ >>>
228
+ >>> @human_in_the_loop_async
229
+ >>> @tool
230
+ >>> async def async_get_current_time() -> str:
231
+ ... \"\"\"Asynchronously get current timestamp\"\"\"
232
+ ... await asyncio.sleep(1)
233
+ ... return str(datetime.datetime.now().timestamp())
234
+
235
+ # Usage with custom handler:
236
+ >>> async def custom_handler(params: InterruptParams) -> Any:
237
+ ... response = interrupt(
238
+ ... ... # Please add your custom interrupt response content here
239
+ ... )
240
+ ... if response["type"] == "accept":
241
+ ... return await params["tool"].ainvoke(params["tool_call_args"])
242
+ ... elif response["type"] == "reject":
243
+ ... return "User rejected this tool call"
244
+ ... else:
245
+ ... raise ValueError(f"Unsupported response type: {response['type']}")
246
+ >>> @human_in_the_loop_async(handler=custom_handler)
247
+ >>> @tool
248
+ >>> async def async_sensitive_operation(data: str) -> str:
249
+ ... \"\"\"Perform sensitive async operation on data\"\"\"
250
+ ... await asyncio.sleep(0.1) # Simulate async work
251
+ ... return f"Processed: {data}"
252
+ """
253
+
254
+ def decorator(target_func: Callable) -> BaseTool:
255
+ """The actual decorator that wraps the target function."""
256
+ if not isinstance(target_func, BaseTool):
257
+ tool_obj = create_tool(target_func)
258
+ else:
259
+ tool_obj = target_func
260
+
261
+ handler_func: HumanInterruptHandler = handler or default_handler_async
262
+
263
+ @create_tool(
264
+ tool_obj.name,
265
+ description=tool_obj.description,
266
+ args_schema=tool_obj.args_schema,
267
+ )
268
+ async def atool_with_human_review(
269
+ **tool_input: Any,
270
+ ) -> Any:
271
+ return await handler_func(
272
+ {
273
+ "tool_call_name": tool_obj.name,
274
+ "tool_call_args": tool_input,
275
+ "tool": tool_obj,
276
+ }
277
+ )
278
+
279
+ return atool_with_human_review
280
+
281
+ if func is not None:
282
+ return decorator(func)
283
+ else:
284
+ return decorator
@@ -0,0 +1,81 @@
1
+ from typing import Union
2
+
3
+ from langchain_core.messages import AIMessage
4
+
5
+
6
+ def has_tool_calling(message: AIMessage) -> bool:
7
+ """Check if a message contains tool calls.
8
+
9
+ This function determines whether an AI message contains tool calls,
10
+ which is useful for routing messages to appropriate handlers.
11
+
12
+ Args:
13
+ message: Any message type to check for tool calls
14
+
15
+ Returns:
16
+ bool: True if message is an AIMessage with tool calls, False otherwise
17
+
18
+ Example:
19
+ # Check for tool calls in response:
20
+ >>> from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_calling
21
+ >>> response = model.invoke("What time is it now?")
22
+ >>> if has_tool_calling(response):
23
+ ... print("Tool calls found in response")
24
+ """
25
+
26
+ if any([block for block in message.content_blocks if block["type"] == "tool_call"]):
27
+ return True
28
+ if (
29
+ isinstance(message, AIMessage)
30
+ and hasattr(message, "tool_calls")
31
+ and len(message.tool_calls) > 0
32
+ ):
33
+ return True
34
+ return False
35
+
36
+
37
+ def parse_tool_calling(
38
+ message: AIMessage, first_tool_call_only: bool = False
39
+ ) -> Union[tuple[str, dict], list[tuple[str, dict]]]:
40
+ """Parse a tool call from a message.
41
+
42
+ This function extracts tool call information from an AI message, returning
43
+ either the first tool call or all tool calls depending on the parameter.
44
+
45
+ Args:
46
+ message: Any message type to parse for tool calls
47
+ first_tool_call_only: If True, only the first tool call will be parsed
48
+
49
+ Returns:
50
+ Union[tuple[str, dict], list[tuple[str, dict]]]: The tool call name and args
51
+
52
+ Example:
53
+ # Parse single tool call:
54
+ >>> from langchain_dev_utils.tool_calling import has_tool_calling, parse_tool_calling
55
+ >>> response = model.invoke("What time is it now?")
56
+ >>> response
57
+ >>> if has_tool_calling(response):
58
+ ... tool_name, tool_args = parse_tool_calling(response, first_tool_call_only=True)
59
+
60
+ # Parse multiple tool calls:
61
+ >>> if has_tool_calling(response):
62
+ ... tool_calls = parse_tool_calling(response)
63
+ """
64
+
65
+ tool_calls = None
66
+
67
+ tool_call_blocks = [
68
+ block for block in message.content_blocks if block["type"] == "tool_call"
69
+ ]
70
+ if tool_call_blocks:
71
+ tool_calls = tool_call_blocks
72
+
73
+ if not tool_calls:
74
+ tool_calls = message.tool_calls
75
+
76
+ if not tool_calls:
77
+ raise ValueError("No tool call found in message")
78
+
79
+ if first_tool_call_only:
80
+ return (tool_calls[0]["name"], tool_calls[0]["args"])
81
+ return [(tool_call["name"], tool_call["args"]) for tool_call in tool_calls]
@@ -0,0 +1,103 @@
1
+ Metadata-Version: 2.4
2
+ Name: langchain-dev-utils
3
+ Version: 1.3.7
4
+ Summary: A practical utility library for LangChain and LangGraph development
5
+ Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
+ Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
7
+ Project-URL: documentation, https://tbice123123.github.io/langchain-dev-utils
8
+ Author-email: tiebingice <tiebingice123@outlook.com>
9
+ License-File: LICENSE
10
+ Requires-Python: >=3.11
11
+ Requires-Dist: langchain-core>=1.2.5
12
+ Requires-Dist: langchain>=1.2.0
13
+ Requires-Dist: langgraph>=1.0.0
14
+ Provides-Extra: standard
15
+ Requires-Dist: json-repair>=0.53.1; extra == 'standard'
16
+ Requires-Dist: langchain-openai; extra == 'standard'
17
+ Description-Content-Type: text/markdown
18
+
19
+ # 🦜️🧰 langchain-dev-utils
20
+
21
+ <p align="center">
22
+ <em>🚀 High-efficiency toolkit designed for LangChain and LangGraph developers</em>
23
+ </p>
24
+
25
+ <p align="center">
26
+ 📚 <a href="https://tbice123123.github.io/langchain-dev-utils/">English</a> •
27
+ <a href="https://tbice123123.github.io/langchain-dev-utils/zh/">中文</a>
28
+ </p>
29
+
30
+ [![PyPI](https://img.shields.io/pypi/v/langchain-dev-utils.svg?color=%2334D058&label=pypi%20package)](https://pypi.org/project/langchain-dev-utils/)
31
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
32
+ [![Python](https://img.shields.io/badge/python-3.11|3.12|3.13|3.14-%2334D058)](https://www.python.org/downloads)
33
+ [![Downloads](https://static.pepy.tech/badge/langchain-dev-utils/month)](https://pepy.tech/project/langchain-dev-utils)
34
+ [![Documentation](https://img.shields.io/badge/docs-latest-blue)](https://tbice123123.github.io/langchain-dev-utils)
35
+
36
+ > This is the English version. For the Chinese version, please visit [中文版本](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
37
+
38
+ ## ✨ Why choose langchain-dev-utils?
39
+
40
+ Tired of writing repetitive code in LangChain development? `langchain-dev-utils` is the solution you need! This lightweight yet powerful toolkit is designed to enhance the development experience of LangChain and LangGraph, helping you:
41
+
42
+ - ⚡ **Boost development efficiency** - Reduce boilerplate code, allowing you to focus on core functionality
43
+ - 🧩 **Simplify complex workflows** - Easily manage multi-model, multi-tool, and multi-agent applications
44
+ - 🔧 **Enhance code quality** - Improve consistency and readability, reducing maintenance costs
45
+ - 🎯 **Accelerate prototype development** - Quickly implement ideas, iterate and validate faster
46
+
47
+
48
+ ## 🎯 Core Features
49
+
50
+ - **🔌 Unified model management** - Specify model providers through strings, easily switch and combine different models
51
+ - **💬 Flexible message handling** - Support for chain-of-thought concatenation, streaming processing, and message formatting
52
+ - **🛠️ Powerful tool calling** - Built-in tool call detection, parameter parsing, and human review functionality
53
+ - **🤖 Efficient Agent development** - Simplify agent creation process, expand more common middleware
54
+ - **📊 Flexible state graph composition** - Support for serial and parallel composition of multiple StateGraphs
55
+
56
+ ## ⚡ Quick Start
57
+
58
+ **1. Install `langchain-dev-utils`**
59
+
60
+ ```bash
61
+ pip install -U "langchain-dev-utils[standard]"
62
+ ```
63
+
64
+ **2. Start using**
65
+
66
+ ```python
67
+ from langchain.tools import tool
68
+ from langchain_core.messages import HumanMessage
69
+ from langchain_dev_utils.chat_models import register_model_provider, load_chat_model
70
+ from langchain_dev_utils.agents import create_agent
71
+
72
+ # Register model provider
73
+ register_model_provider("vllm", "openai-compatible", base_url="http://localhost:8000/v1")
74
+
75
+ @tool
76
+ def get_current_weather(location: str) -> str:
77
+ """Get the current weather for the specified location"""
78
+ return f"25 degrees, {location}"
79
+
80
+ # Dynamically load model using string
81
+ model = load_chat_model("vllm:qwen3-4b")
82
+ response = model.invoke("Hello")
83
+ print(response)
84
+
85
+ # Create agent
86
+ agent = create_agent("vllm:qwen3-4b", tools=[get_current_weather])
87
+ response = agent.invoke({"messages": [HumanMessage(content="What's the weather like in New York today?")]})
88
+ print(response)
89
+ ```
90
+
91
+ **For more features of this library, please visit the [full documentation](https://tbice123123.github.io/langchain-dev-utils/)**
92
+
93
+
94
+ ## 🛠️ GitHub Repository
95
+
96
+ Visit the [GitHub repository](https://github.com/TBice123123/langchain-dev-utils) to view the source code and report issues.
97
+
98
+ ---
99
+
100
+ <div align="center">
101
+ <p>Developed with ❤️ and ☕</p>
102
+ <p>If this project helps you, please give us a ⭐️</p>
103
+ </div>
@@ -0,0 +1,44 @@
1
+ langchain_dev_utils/__init__.py,sha256=_mC35C1yWAueGFOoiHyTIrBPPJjSh3G1KA8Ela0nE_w,23
2
+ langchain_dev_utils/_utils.py,sha256=hWuxzxIlCPkT02xglWqkRnroky2mCmW5qtK-zHDH4RY,4032
3
+ langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ langchain_dev_utils/agents/__init__.py,sha256=69_biZzyJvW9OBT1g8TX_77mp9-I_TvWo9QtlvHq83E,177
5
+ langchain_dev_utils/agents/factory.py,sha256=8XB6y_ddf58vXlTLHBL6KCirFqkD2GjtzsuOt98sS7U,3732
6
+ langchain_dev_utils/agents/file_system.py,sha256=Yk3eetREE26WNrnTWLoiDUpOyCJ-rhjlfFDk6foLa1E,8468
7
+ langchain_dev_utils/agents/plan.py,sha256=WwhoiJBmVYVI9bT8HfjCzTJ_SIp9WFil0gOeznv2omQ,6497
8
+ langchain_dev_utils/agents/wrap.py,sha256=Tw6KYMdZ5ESsWVjoImZimZI2Eg5rEipsqUVJ0tSVbUw,11536
9
+ langchain_dev_utils/agents/middleware/__init__.py,sha256=QVQibaNHvHPyNTZ2UNFfYL153ZboaCHcoioTHK0FsiY,710
10
+ langchain_dev_utils/agents/middleware/format_prompt.py,sha256=yIkoSVPp0FemkjezvGsOmtgOkZDyEYQ8yh4YWYYGtVc,2343
11
+ langchain_dev_utils/agents/middleware/handoffs.py,sha256=rSkNXxqtjB8_xT0HUdxnKbchgY76BuTPX-Zc69H-_wI,7687
12
+ langchain_dev_utils/agents/middleware/model_fallback.py,sha256=8xiNjTJ0yiRkPLCRfAGNnqY1TLstj1Anmiqyv5w2mA8,1633
13
+ langchain_dev_utils/agents/middleware/model_router.py,sha256=IidYq72tPLa053gEg5IQpPzDzyCxYYEvpgT1K4qBwXw,7862
14
+ langchain_dev_utils/agents/middleware/plan.py,sha256=Zz0dh1BRbsVgROmhjH2IIqylSsuKHZXJx0iztMBm8EU,14719
15
+ langchain_dev_utils/agents/middleware/summarization.py,sha256=IoZ2PM1OC3AXwf0DWpfreuPOAipeiYu0KPmAABWXuY0,3087
16
+ langchain_dev_utils/agents/middleware/tool_call_repair.py,sha256=oZF0Oejemqs9kSn8xbW79FWyVVarL4IGCz0gpqYBkFM,3529
17
+ langchain_dev_utils/agents/middleware/tool_emulator.py,sha256=OgtPhqturaWzF4fRSJ3f_IXvIrYrrAjlpOC5zmLtrkY,2031
18
+ langchain_dev_utils/agents/middleware/tool_selection.py,sha256=dRH5ejR6N02Djwxt6Gd63MYkg6SV5pySlzaRt53OoZk,3113
19
+ langchain_dev_utils/chat_models/__init__.py,sha256=YSLUyHrWEEj4y4DtGFCOnDW02VIYZdfAH800m4Klgeg,224
20
+ langchain_dev_utils/chat_models/base.py,sha256=G_SNvd53ogho-LRgD7DCD65xj51J2JxmOkA4URNW6ZQ,11560
21
+ langchain_dev_utils/chat_models/types.py,sha256=MD3cv_ZIe9fCdgwisNfuxAOhy-j4YSs1ZOQYyCjlNKs,927
22
+ langchain_dev_utils/chat_models/adapters/__init__.py,sha256=4tTbhAAQdpX_gWyWeH97hqS5HnaoqQqW6QBh9Qd1SKs,106
23
+ langchain_dev_utils/chat_models/adapters/create_utils.py,sha256=r8_XWLNF3Yc6sumlBhmgG1QcBa4Dsba7X3f_9YeMeGA,2479
24
+ langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=Xsd6HN1zGGDl87bZ5NMfwKfxWkgdP4DpszEqlb4Z-MY,27198
25
+ langchain_dev_utils/chat_models/adapters/register_profiles.py,sha256=YS9ItCEq2ISoB_bp6QH5NVKOVR9-7la3r7B_xQNxZxE,366
26
+ langchain_dev_utils/embeddings/__init__.py,sha256=zbEOaV86TUi9Zrg_dH9dpdgacWg31HMJTlTQknA9EKk,244
27
+ langchain_dev_utils/embeddings/base.py,sha256=GXFKZSAExMtCFUpsd6mY4NxCWCrq7JAatBw3kS9LaKY,8803
28
+ langchain_dev_utils/embeddings/adapters/__init__.py,sha256=yJEZZdzZ2fv1ExezLaNxo0VU9HJTHKYbS3T_XP8Ab9c,114
29
+ langchain_dev_utils/embeddings/adapters/create_utils.py,sha256=K4JlbjG-O5xLY3wxaVt0UZ3QwI--cVb4qyxLATKVAWQ,2012
30
+ langchain_dev_utils/embeddings/adapters/openai_compatible.py,sha256=fo7-m7dcWL4xrhSqdAHHVREsiXfVOvIrlaotaYTEiyE,3159
31
+ langchain_dev_utils/message_convert/__init__.py,sha256=nnkDa_Im0dCb5u4aa2FRB9tqB8e6H6sEGYK6Vg81u2s,472
32
+ langchain_dev_utils/message_convert/content.py,sha256=2V1g21byg3iLv5RjUW8zv3jwYwV7IH2hNim7jGRsIes,8096
33
+ langchain_dev_utils/message_convert/format.py,sha256=NdrYX0cJn2-G1ArLSjJ7yO788KV1d83F4Kimpyft0IM,2446
34
+ langchain_dev_utils/pipeline/__init__.py,sha256=eE6WktaLHDkqMeXDIDaLtm-OPTwtsX_Av8iK9uYrceo,186
35
+ langchain_dev_utils/pipeline/parallel.py,sha256=nwZWbdSNeyanC9WufoJBTceotgT--UnPOfStXjgNMOc,5271
36
+ langchain_dev_utils/pipeline/sequential.py,sha256=sYJXQzVHDKUc-UV-HMv38JTPnse1A7sRM0vqSdpHK0k,3850
37
+ langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV2q58fDHs,112
38
+ langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
39
+ langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=7Z_QO5OZUR6K8nLoIcafc6osnvX2IYNorOJcbx6bVso,9672
40
+ langchain_dev_utils/tool_calling/utils.py,sha256=S4-KXQ8jWmpGTXYZitovF8rxKpaSSUkFruM8LDwvcvE,2765
41
+ langchain_dev_utils-1.3.7.dist-info/METADATA,sha256=bleCSa7JHZ_1zYiYT-qYKnRSnFf5yU7eKX-nTWja9rw,4552
42
+ langchain_dev_utils-1.3.7.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
43
+ langchain_dev_utils-1.3.7.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
44
+ langchain_dev_utils-1.3.7.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.28.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 tbice
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.