langchain-dev-utils 1.3.3__tar.gz → 1.3.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/.gitignore +15 -15
  2. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/.python-version +1 -1
  3. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/PKG-INFO +1 -1
  4. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/README_cn.md +85 -85
  5. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/pyproject.toml +47 -47
  6. langchain_dev_utils-1.3.5/src/langchain_dev_utils/__init__.py +1 -0
  7. langchain_dev_utils-1.3.5/src/langchain_dev_utils/_utils.py +126 -0
  8. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/format_prompt.py +1 -1
  9. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/wrap.py +1 -1
  10. langchain_dev_utils-1.3.5/src/langchain_dev_utils/chat_models/adapters/__init__.py +3 -0
  11. langchain_dev_utils-1.3.5/src/langchain_dev_utils/chat_models/adapters/create_utils.py +53 -0
  12. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +81 -4
  13. langchain_dev_utils-1.3.5/src/langchain_dev_utils/chat_models/adapters/register_profiles.py +15 -0
  14. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/chat_models/base.py +6 -11
  15. langchain_dev_utils-1.3.5/src/langchain_dev_utils/embeddings/adapters/__init__.py +3 -0
  16. langchain_dev_utils-1.3.5/src/langchain_dev_utils/embeddings/adapters/create_utils.py +45 -0
  17. langchain_dev_utils-1.3.5/src/langchain_dev_utils/embeddings/adapters/openai_compatible.py +91 -0
  18. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/embeddings/base.py +13 -26
  19. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/message_convert/__init__.py +15 -15
  20. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/message_convert/format.py +69 -69
  21. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_chat_models.py +191 -185
  22. langchain_dev_utils-1.3.5/tests/test_embedding.py +22 -0
  23. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_handoffs_middleware.py +0 -1
  24. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_load_model.py +104 -104
  25. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_messages.py +164 -164
  26. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_plan_middleware.py +68 -68
  27. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_wrap_agent.py +15 -12
  28. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/uv.lock +2883 -2883
  29. langchain_dev_utils-1.3.3/src/langchain_dev_utils/__init__.py +0 -1
  30. langchain_dev_utils-1.3.3/src/langchain_dev_utils/_utils.py +0 -43
  31. langchain_dev_utils-1.3.3/tests/utils/__init__.py +0 -0
  32. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/.vscode/settings.json +0 -0
  33. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/LICENSE +0 -0
  34. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/README.md +0 -0
  35. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/__init__.py +0 -0
  36. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/factory.py +0 -0
  37. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/file_system.py +0 -0
  38. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/__init__.py +0 -0
  39. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/handoffs.py +0 -0
  40. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
  41. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -0
  42. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/plan.py +0 -0
  43. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
  44. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/tool_call_repair.py +0 -0
  45. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
  46. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
  47. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/agents/plan.py +0 -0
  48. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  49. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/chat_models/types.py +0 -0
  50. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  51. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/message_convert/content.py +0 -0
  52. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  53. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
  54. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
  55. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/pipeline/types.py +0 -0
  56. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/py.typed +0 -0
  57. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  58. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
  59. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/src/langchain_dev_utils/tool_calling/utils.py +0 -0
  60. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/__init__.py +0 -0
  61. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_agent.py +0 -0
  62. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_human_in_the_loop.py +0 -0
  63. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_load_embbeding.py +0 -0
  64. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_model_tool_emulator.py +0 -0
  65. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_pipline.py +0 -0
  66. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_router_model.py +0 -0
  67. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_tool_call_repair.py +0 -0
  68. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/test_tool_calling.py +0 -0
  69. {langchain_dev_utils-1.3.3/src/langchain_dev_utils/chat_models/adapters → langchain_dev_utils-1.3.5/tests/utils}/__init__.py +0 -0
  70. {langchain_dev_utils-1.3.3 → langchain_dev_utils-1.3.5}/tests/utils/register.py +0 -0
@@ -1,16 +1,16 @@
1
- # Python-generated files
2
- __pycache__/
3
- *.py[oc]
4
- build/
5
- dist/
6
- wheels/
7
- *.egg-info
8
-
9
- # Virtual environments
10
- .venv
11
- .env
12
- .benchmarks
13
- data/
14
- node_modules
15
- dist
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
11
+ .env
12
+ .benchmarks
13
+ data/
14
+ node_modules
15
+ dist
16
16
  site/
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.3.3
3
+ Version: 1.3.5
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -1,86 +1,86 @@
1
- # 🦜️🧰 langchain-dev-utils
2
-
3
- <p align="center">
4
- <em>🚀 专为 LangChain 和 LangGraph 开发者打造的高效工具库</em>
5
- </p>
6
-
7
- <p align="center">
8
- 📚 <a href="https://tbice123123.github.io/langchain-dev-utils/">English</a> •
9
- <a href="https://tbice123123.github.io/langchain-dev-utils/zh/">中文</a>
10
- </p>
11
-
12
- [![PyPI](https://img.shields.io/pypi/v/langchain-dev-utils.svg?color=%2334D058&label=pypi%20package)](https://pypi.org/project/langchain-dev-utils/)
13
- [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
14
- [![Python](https://img.shields.io/badge/python-3.11|3.12|3.13|3.14-%2334D058)](https://www.python.org/downloads)
15
- [![Downloads](https://static.pepy.tech/badge/langchain-dev-utils/month)](https://pepy.tech/project/langchain-dev-utils)
16
- [![Documentation](https://img.shields.io/badge/docs-latest-blue)](https://tbice123123.github.io/langchain-dev-utils/zh/)
17
-
18
- > 当前为中文版,英文版请访问[English Version](https://github.com/TBice123123/langchain-dev-utils/blob/master/README.md)
19
-
20
- ## ✨ 为什么选择 langchain-dev-utils?
21
-
22
- 厌倦了在 LangChain 开发中编写重复代码?`langchain-dev-utils` 正是您需要的解决方案!这个轻量但功能强大的工具库专为提升 LangChain 和 LangGraph 开发体验而设计,帮助您:
23
-
24
- - ⚡ **提升开发效率** - 减少样板代码,让您专注于核心功能
25
- - 🧩 **简化复杂流程** - 轻松管理多模型、多工具和多智能体应用
26
- - 🔧 **增强代码质量** - 提高一致性和可读性,减少维护成本
27
- - 🎯 **加速原型开发** - 快速实现想法,更快迭代验证
28
-
29
-
30
- ## 🎯 核心功能
31
-
32
- - **🔌 统一的模型管理** - 通过字符串指定模型提供商,轻松切换和组合不同模型
33
- - **💬 灵活的消息处理** - 支持思维链拼接、流式处理和消息格式化
34
- - **🛠️ 强大的工具调用** - 内置工具调用检测、参数解析和人工审核功能
35
- - **🤖 高效的 Agent 开发** - 简化智能体创建流程,扩充更多的常用中间件
36
- - **📊 灵活的状态图组合** - 支持串行和并行方式组合多个 StateGraph
37
-
38
- ## ⚡ 快速开始
39
-
40
- **1. 安装 `langchain-dev-utils`**
41
-
42
- ```bash
43
- pip install -U "langchain-dev-utils[standard]"
44
- ```
45
-
46
- **2. 开始使用**
47
-
48
- ```python
49
- from langchain.tools import tool
50
- from langchain_core.messages import HumanMessage
51
- from langchain_dev_utils.chat_models import register_model_provider, load_chat_model
52
- from langchain_dev_utils.agents import create_agent
53
-
54
- # 注册模型提供商
55
- register_model_provider("vllm", "openai-compatible", base_url="http://localhost:8000/v1")
56
-
57
- @tool
58
- def get_current_weather(location: str) -> str:
59
- """获取指定地点的当前天气"""
60
- return f"25度,{location}"
61
-
62
- # 使用字符串动态加载模型
63
- model = load_chat_model("vllm:qwen3-4b")
64
- response = model.invoke("你好")
65
- print(response)
66
-
67
- # 创建智能体
68
- agent = create_agent("vllm:qwen3-4b", tools=[get_current_weather])
69
- response = agent.invoke({"messages": [HumanMessage(content="今天纽约的天气如何?")]})
70
- print(response)
71
- ```
72
-
73
- **获取更多的本库功能,请访问[完整文档](https://tbice123123.github.io/langchain-dev-utils/zh/)**
74
-
75
-
76
- ## 🛠️ GitHub 仓库
77
-
78
- 访问 [GitHub 仓库](https://github.com/TBice123123/langchain-dev-utils) 查看源代码和问题。
79
-
80
-
81
- ---
82
-
83
- <div align="center">
84
- <p>由 ❤️ 和 ☕ 驱动开发</p>
85
- <p>如果这个项目对您有帮助,请给我们一个 ⭐️</p>
1
+ # 🦜️🧰 langchain-dev-utils
2
+
3
+ <p align="center">
4
+ <em>🚀 专为 LangChain 和 LangGraph 开发者打造的高效工具库</em>
5
+ </p>
6
+
7
+ <p align="center">
8
+ 📚 <a href="https://tbice123123.github.io/langchain-dev-utils/">English</a> •
9
+ <a href="https://tbice123123.github.io/langchain-dev-utils/zh/">中文</a>
10
+ </p>
11
+
12
+ [![PyPI](https://img.shields.io/pypi/v/langchain-dev-utils.svg?color=%2334D058&label=pypi%20package)](https://pypi.org/project/langchain-dev-utils/)
13
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
14
+ [![Python](https://img.shields.io/badge/python-3.11|3.12|3.13|3.14-%2334D058)](https://www.python.org/downloads)
15
+ [![Downloads](https://static.pepy.tech/badge/langchain-dev-utils/month)](https://pepy.tech/project/langchain-dev-utils)
16
+ [![Documentation](https://img.shields.io/badge/docs-latest-blue)](https://tbice123123.github.io/langchain-dev-utils/zh/)
17
+
18
+ > 当前为中文版,英文版请访问[English Version](https://github.com/TBice123123/langchain-dev-utils/blob/master/README.md)
19
+
20
+ ## ✨ 为什么选择 langchain-dev-utils?
21
+
22
+ 厌倦了在 LangChain 开发中编写重复代码?`langchain-dev-utils` 正是您需要的解决方案!这个轻量但功能强大的工具库专为提升 LangChain 和 LangGraph 开发体验而设计,帮助您:
23
+
24
+ - ⚡ **提升开发效率** - 减少样板代码,让您专注于核心功能
25
+ - 🧩 **简化复杂流程** - 轻松管理多模型、多工具和多智能体应用
26
+ - 🔧 **增强代码质量** - 提高一致性和可读性,减少维护成本
27
+ - 🎯 **加速原型开发** - 快速实现想法,更快迭代验证
28
+
29
+
30
+ ## 🎯 核心功能
31
+
32
+ - **🔌 统一的模型管理** - 通过字符串指定模型提供商,轻松切换和组合不同模型
33
+ - **💬 灵活的消息处理** - 支持思维链拼接、流式处理和消息格式化
34
+ - **🛠️ 强大的工具调用** - 内置工具调用检测、参数解析和人工审核功能
35
+ - **🤖 高效的 Agent 开发** - 简化智能体创建流程,扩充更多的常用中间件
36
+ - **📊 灵活的状态图组合** - 支持串行和并行方式组合多个 StateGraph
37
+
38
+ ## ⚡ 快速开始
39
+
40
+ **1. 安装 `langchain-dev-utils`**
41
+
42
+ ```bash
43
+ pip install -U "langchain-dev-utils[standard]"
44
+ ```
45
+
46
+ **2. 开始使用**
47
+
48
+ ```python
49
+ from langchain.tools import tool
50
+ from langchain_core.messages import HumanMessage
51
+ from langchain_dev_utils.chat_models import register_model_provider, load_chat_model
52
+ from langchain_dev_utils.agents import create_agent
53
+
54
+ # 注册模型提供商
55
+ register_model_provider("vllm", "openai-compatible", base_url="http://localhost:8000/v1")
56
+
57
+ @tool
58
+ def get_current_weather(location: str) -> str:
59
+ """获取指定地点的当前天气"""
60
+ return f"25度,{location}"
61
+
62
+ # 使用字符串动态加载模型
63
+ model = load_chat_model("vllm:qwen3-4b")
64
+ response = model.invoke("你好")
65
+ print(response)
66
+
67
+ # 创建智能体
68
+ agent = create_agent("vllm:qwen3-4b", tools=[get_current_weather])
69
+ response = agent.invoke({"messages": [HumanMessage(content="今天纽约的天气如何?")]})
70
+ print(response)
71
+ ```
72
+
73
+ **获取更多的本库功能,请访问[完整文档](https://tbice123123.github.io/langchain-dev-utils/zh/)**
74
+
75
+
76
+ ## 🛠️ GitHub 仓库
77
+
78
+ 访问 [GitHub 仓库](https://github.com/TBice123123/langchain-dev-utils) 查看源代码和问题。
79
+
80
+
81
+ ---
82
+
83
+ <div align="center">
84
+ <p>由 ❤️ 和 ☕ 驱动开发</p>
85
+ <p>如果这个项目对您有帮助,请给我们一个 ⭐️</p>
86
86
  </div>
@@ -1,47 +1,47 @@
1
- [project]
2
- name = "langchain-dev-utils"
3
- version = "1.3.3"
4
- description = "A practical utility library for LangChain and LangGraph development"
5
- readme = "README.md"
6
- authors = [{ name = "tiebingice", email = "tiebingice123@outlook.com" }]
7
- requires-python = ">=3.11"
8
- dependencies = ["langchain>=1.2.0", "langchain-core>=1.2.5", "langgraph>=1.0.0"]
9
-
10
- [project.urls]
11
- "Source Code" = "https://github.com/TBice123123/langchain-dev-utils"
12
- repository = "https://github.com/TBice123123/langchain-dev-utils"
13
- documentation = "https://tbice123123.github.io/langchain-dev-utils"
14
-
15
-
16
- [project.optional-dependencies]
17
- standard = ["json-repair>=0.53.1", "langchain-openai"]
18
-
19
- [build-system]
20
- requires = ["hatchling"]
21
- build-backend = "hatchling.build"
22
-
23
- [tool.hatch.build]
24
- exclude = ["/data", "/docs", "mkdocs.yml"]
25
-
26
- [tool.pytest.ini_options]
27
- asyncio_mode = "auto"
28
- testpaths = ["tests"]
29
- python_files = ["test_*.py"]
30
- python_functions = ["test_*"]
31
-
32
- [dependency-groups]
33
- dev = ["langchain-model-profiles>=0.0.5", "ruff>=0.14.5"]
34
- docs = ["mkdocs-material>=9.7.0", "mkdocs-static-i18n>=1.3.0"]
35
- tests = [
36
- "python-dotenv>=1.1.1",
37
- "langchain-tests>=1.0.0",
38
- "langchain-deepseek>=1.0.0",
39
- "langchain-qwq>=0.3.0",
40
- "langchain-ollama>=1.0.0",
41
- "langchain-community>=0.4.1",
42
- ]
43
-
44
-
45
- [tool.ruff.lint]
46
- select = ["E", "F", "I", "PGH003", "T201"]
47
- ignore = ["E501"]
1
+ [project]
2
+ name = "langchain-dev-utils"
3
+ version = "1.3.5"
4
+ description = "A practical utility library for LangChain and LangGraph development"
5
+ readme = "README.md"
6
+ authors = [{ name = "tiebingice", email = "tiebingice123@outlook.com" }]
7
+ requires-python = ">=3.11"
8
+ dependencies = ["langchain>=1.2.0", "langchain-core>=1.2.5", "langgraph>=1.0.0"]
9
+
10
+ [project.urls]
11
+ "Source Code" = "https://github.com/TBice123123/langchain-dev-utils"
12
+ repository = "https://github.com/TBice123123/langchain-dev-utils"
13
+ documentation = "https://tbice123123.github.io/langchain-dev-utils"
14
+
15
+
16
+ [project.optional-dependencies]
17
+ standard = ["json-repair>=0.53.1", "langchain-openai"]
18
+
19
+ [build-system]
20
+ requires = ["hatchling"]
21
+ build-backend = "hatchling.build"
22
+
23
+ [tool.hatch.build]
24
+ exclude = ["/data", "/docs", "mkdocs.yml"]
25
+
26
+ [tool.pytest.ini_options]
27
+ asyncio_mode = "auto"
28
+ testpaths = ["tests"]
29
+ python_files = ["test_*.py"]
30
+ python_functions = ["test_*"]
31
+
32
+ [dependency-groups]
33
+ dev = ["langchain-model-profiles>=0.0.5", "ruff>=0.14.5"]
34
+ docs = ["mkdocs-material>=9.7.0", "mkdocs-static-i18n>=1.3.0"]
35
+ tests = [
36
+ "python-dotenv>=1.1.1",
37
+ "langchain-tests>=1.0.0",
38
+ "langchain-deepseek>=1.0.0",
39
+ "langchain-qwq>=0.3.0",
40
+ "langchain-ollama>=1.0.0",
41
+ "langchain-community>=0.4.1",
42
+ ]
43
+
44
+
45
+ [tool.ruff.lint]
46
+ select = ["E", "F", "I", "PGH003", "T201"]
47
+ ignore = ["E501"]
@@ -0,0 +1 @@
1
+ __version__ = "1.3.5"
@@ -0,0 +1,126 @@
1
+ from importlib import util
2
+ from typing import Literal, Optional
3
+
4
+ from pydantic import BaseModel
5
+
6
+
7
+ def _check_pkg_install(
8
+ pkg: Literal["langchain_openai", "json_repair"],
9
+ ) -> None:
10
+ if not util.find_spec(pkg):
11
+ if pkg == "langchain_openai":
12
+ msg = "Please install langchain_dev_utils[standard],when use 'openai-compatible'"
13
+ else:
14
+ msg = "Please install langchain_dev_utils[standard] to use ToolCallRepairMiddleware."
15
+ raise ImportError(msg)
16
+
17
+
18
+ def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
19
+ """
20
+ Return 'base_url' if the model has a field named or aliased as 'base_url',
21
+ else return 'api_base' if it has a field named or aliased as 'api_base',
22
+ else return None.
23
+ The return value is always either 'base_url', 'api_base', or None.
24
+ """
25
+ model_fields = model_cls.model_fields
26
+
27
+ # try model_fields first
28
+ if "base_url" in model_fields:
29
+ return "base_url"
30
+
31
+ if "api_base" in model_fields:
32
+ return "api_base"
33
+
34
+ # then try aliases
35
+ for field_info in model_fields.values():
36
+ if field_info.alias == "base_url":
37
+ return "base_url"
38
+
39
+ for field_info in model_fields.values():
40
+ if field_info.alias == "api_base":
41
+ return "api_base"
42
+
43
+ return None
44
+
45
+
46
+ def _validate_base_url(base_url: Optional[str] = None) -> None:
47
+ """Validate base URL format.
48
+
49
+ Args:
50
+ base_url: Base URL to validate
51
+
52
+ Raises:
53
+ ValueError: If base URL is not a valid HTTP or HTTPS URL
54
+ """
55
+ if base_url is None:
56
+ return
57
+
58
+ from urllib.parse import urlparse
59
+
60
+ parsed = urlparse(base_url.strip())
61
+
62
+ if not parsed.scheme or not parsed.netloc:
63
+ raise ValueError(
64
+ f"base_url must be a valid HTTP or HTTPS URL. Received: {base_url}"
65
+ )
66
+
67
+ if parsed.scheme not in ("http", "https"):
68
+ raise ValueError(
69
+ f"base_url must use HTTP or HTTPS protocol. Received: {parsed.scheme}"
70
+ )
71
+
72
+
73
+ def _validate_model_cls_name(model_cls_name: str) -> None:
74
+ """Validate model class name follows Python naming conventions.
75
+
76
+ Args:
77
+ model_cls_name: Class name to validate
78
+
79
+ Raises:
80
+ ValueError: If class name is invalid
81
+ """
82
+ if not model_cls_name:
83
+ raise ValueError("model_cls_name cannot be empty")
84
+
85
+ if not model_cls_name[0].isalpha():
86
+ raise ValueError(
87
+ f"model_cls_name must start with a letter. Received: {model_cls_name}"
88
+ )
89
+
90
+ if not all(c.isalnum() or c == "_" for c in model_cls_name):
91
+ raise ValueError(
92
+ f"model_cls_name can only contain letters, numbers, and underscores. Received: {model_cls_name}"
93
+ )
94
+
95
+ if model_cls_name[0].islower():
96
+ raise ValueError(
97
+ f"model_cls_name should start with an uppercase letter (PEP 8). Received: {model_cls_name}"
98
+ )
99
+
100
+
101
+ def _validate_provider_name(provider_name: str) -> None:
102
+ """Validate provider name follows Python naming conventions.
103
+
104
+ Args:
105
+ provider_name: Provider name to validate
106
+
107
+ Raises:
108
+ ValueError: If provider name is invalid
109
+ """
110
+ if not provider_name:
111
+ raise ValueError("provider_name cannot be empty")
112
+
113
+ if not provider_name[0].isalnum():
114
+ raise ValueError(
115
+ f"provider_name must start with a letter. Received: {provider_name}"
116
+ )
117
+
118
+ if not all(c.isalnum() or c == "_" for c in provider_name):
119
+ raise ValueError(
120
+ f"provider_name can only contain letters, numbers, underscores. Received: {provider_name}"
121
+ )
122
+
123
+ if len(provider_name) > 20:
124
+ raise ValueError(
125
+ f"provider_name must be 20 characters or fewer. Received: {provider_name}"
126
+ )
@@ -11,7 +11,7 @@ def format_prompt(request: ModelRequest) -> str:
11
11
  Variables are first resolved from the state, then from the context if not found.
12
12
 
13
13
  Example:
14
- >>> from langchain_dev_utils.agents.middleware.format_prompt import format_prompt
14
+ >>> from langchain_dev_utils.agents.middleware import format_prompt
15
15
  >>> from langchain.agents import create_agent
16
16
  >>> from langchain_core.messages import HumanMessage
17
17
  >>> from dataclasses import dataclass
@@ -171,7 +171,7 @@ def wrap_all_agents_as_tool(
171
171
  Example:
172
172
  >>> from langchain_dev_utils.agents import wrap_all_agents_as_tool, create_agent
173
173
  >>>
174
- >>> call_time_agent_tool = wrap_all_agents_as_tool(
174
+ >>> call_agent_tool = wrap_all_agents_as_tool(
175
175
  ... [time_agent,weather_agent],
176
176
  ... tool_name="call_sub_agents",
177
177
  ... tool_description="Used to invoke the sub-agents to perform tasks"
@@ -0,0 +1,3 @@
1
+ from .create_utils import create_openai_compatible_model
2
+
3
+ __all__ = ["create_openai_compatible_model"]
@@ -0,0 +1,53 @@
1
+ from typing import Any, Optional, cast
2
+
3
+ from langchain_core.utils import from_env
4
+
5
+ from langchain_dev_utils._utils import _check_pkg_install
6
+
7
+ from ..types import CompatibilityOptions
8
+
9
+
10
+ def create_openai_compatible_model(
11
+ model_provider: str,
12
+ base_url: Optional[str] = None,
13
+ compatibility_options: Optional[CompatibilityOptions] = None,
14
+ model_profiles: Optional[dict[str, dict[str, Any]]] = None,
15
+ chat_model_cls_name: Optional[str] = None,
16
+ ):
17
+ """Factory function for creating provider-specific OpenAI-compatible model classes.
18
+
19
+ Dynamically generates model classes for different OpenAI-compatible providers,
20
+ configuring environment variable mappings and default base URLs specific to each provider.
21
+
22
+ Args:
23
+ model_provider (str): Identifier for the OpenAI-compatible provider (e.g. `vllm`, `moonshot`)
24
+ base_url (Optional[str], optional): Default API base URL for the provider. Defaults to None. If not provided, will try to use the environment variable.
25
+ compatibility_options (Optional[CompatibilityOptions], optional): Optional configuration for compatibility options with the provider. Defaults to None.
26
+ model_profiles (Optional[dict[str, dict[str, Any]]], optional): Optional model profiles for the provider. Defaults to None.
27
+ chat_model_cls_name (Optional[str], optional): Optional custom class name for the generated model. Defaults to None.
28
+ Returns:
29
+ Type[_BaseChatOpenAICompatible]: Configured model class ready for instantiation with provider-specific settings
30
+
31
+ Examples:
32
+ >>> from langchain_dev_utils.chat_models.adapters import create_openai_compatible_chat_model
33
+ >>> ChatVLLM = create_openai_compatible_chat_model(
34
+ ... "vllm",
35
+ ... base_url="http://localhost:8000",
36
+ ... chat_model_cls_name="ChatVLLM",
37
+ ... )
38
+ >>> model = ChatVLLM(model="qwen3-4b")
39
+ >>> model.invoke("hello")
40
+ """
41
+ _check_pkg_install("langchain_openai")
42
+ from .openai_compatible import _create_openai_compatible_model
43
+
44
+ base_url = (
45
+ base_url or from_env(f"{model_provider.upper()}_API_BASE", default=None)()
46
+ )
47
+ return _create_openai_compatible_model(
48
+ chat_model_cls_name=chat_model_cls_name,
49
+ provider=model_provider,
50
+ base_url=cast(str, base_url),
51
+ compatibility_options=compatibility_options,
52
+ profiles=model_profiles,
53
+ )
@@ -12,6 +12,7 @@ from typing import (
12
12
  Type,
13
13
  TypeVar,
14
14
  Union,
15
+ cast,
15
16
  )
16
17
 
17
18
  import openai
@@ -19,7 +20,11 @@ from langchain_core.callbacks import (
19
20
  AsyncCallbackManagerForLLMRun,
20
21
  CallbackManagerForLLMRun,
21
22
  )
22
- from langchain_core.language_models import LangSmithParams, LanguageModelInput
23
+ from langchain_core.language_models import (
24
+ LangSmithParams,
25
+ LanguageModelInput,
26
+ ModelProfile,
27
+ )
23
28
  from langchain_core.messages import (
24
29
  AIMessage,
25
30
  AIMessageChunk,
@@ -45,12 +50,21 @@ from pydantic import (
45
50
  )
46
51
  from typing_extensions import Self
47
52
 
53
+ from ..._utils import (
54
+ _validate_base_url,
55
+ _validate_model_cls_name,
56
+ _validate_provider_name,
57
+ )
48
58
  from ..types import (
49
59
  CompatibilityOptions,
50
60
  ReasoningKeepPolicy,
51
61
  ResponseFormatType,
52
62
  ToolChoiceType,
53
63
  )
64
+ from .register_profiles import (
65
+ _get_profile_by_provider_and_model,
66
+ _register_profile_with_provider,
67
+ )
54
68
 
55
69
  _BM = TypeVar("_BM", bound=BaseModel)
56
70
  _DictOrPydanticClass = Union[dict[str, Any], type[_BM], type]
@@ -152,7 +166,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
152
166
  Note: This is a template class and should not be exported or instantiated
153
167
  directly. Instead, use it as a base class and provide the specific provider
154
168
  name through inheritance or the factory function
155
- `_create_openai_compatible_model()`.
169
+ `create_openai_compatible_model()`.
156
170
  """
157
171
 
158
172
  model_name: str = Field(alias="model", default="openai compatible model")
@@ -283,7 +297,10 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
283
297
  def _set_model_profile(self) -> Self:
284
298
  """Set model profile if not overridden."""
285
299
  if self.profile is None:
286
- self.profile = {}
300
+ self.profile = cast(
301
+ ModelProfile,
302
+ _get_profile_by_provider_and_model(self._provider, self.model_name),
303
+ )
287
304
  return self
288
305
 
289
306
  def _create_chat_result(
@@ -574,10 +591,57 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
574
591
  )
575
592
 
576
593
 
594
+ def _validate_compatibility_options(
595
+ compatibility_options: Optional[CompatibilityOptions] = None,
596
+ ) -> None:
597
+ """Validate provider configuration against supported features.
598
+
599
+ Args:
600
+ compatibility_options: Optional configuration for the provider
601
+
602
+ Raises:
603
+ ValueError: If provider configuration is invalid
604
+ """
605
+ if compatibility_options is None:
606
+ compatibility_options = {}
607
+
608
+ if "supported_tool_choice" in compatibility_options:
609
+ _supported_tool_choice = compatibility_options["supported_tool_choice"]
610
+ for tool_choice in _supported_tool_choice:
611
+ if tool_choice not in ["auto", "none", "required", "specific"]:
612
+ raise ValueError(
613
+ f"Unsupported tool_choice: {tool_choice}. Please choose from 'auto', 'none', 'required','specific'."
614
+ )
615
+
616
+ if "supported_response_format" in compatibility_options:
617
+ _supported_response_format = compatibility_options["supported_response_format"]
618
+ for response_format in _supported_response_format:
619
+ if response_format not in ["json_schema", "json_mode"]:
620
+ raise ValueError(
621
+ f"Unsupported response_format: {response_format}. Please choose from 'json_schema', 'json_mode'."
622
+ )
623
+
624
+ if "reasoning_keep_policy" in compatibility_options:
625
+ _reasoning_keep_policy = compatibility_options["reasoning_keep_policy"]
626
+ if _reasoning_keep_policy not in ["never", "current", "all"]:
627
+ raise ValueError(
628
+ f"Unsupported reasoning_keep_policy: {_reasoning_keep_policy}. Please choose from 'never', 'current', 'all'."
629
+ )
630
+
631
+ if "include_usage" in compatibility_options:
632
+ _include_usage = compatibility_options["include_usage"]
633
+ if not isinstance(_include_usage, bool):
634
+ raise ValueError(
635
+ f"include_usage must be a boolean value. Received: {_include_usage}"
636
+ )
637
+
638
+
577
639
  def _create_openai_compatible_model(
578
640
  provider: str,
579
641
  base_url: str,
580
642
  compatibility_options: Optional[CompatibilityOptions] = None,
643
+ profiles: Optional[dict[str, dict[str, Any]]] = None,
644
+ chat_model_cls_name: Optional[str] = None,
581
645
  ) -> Type[_BaseChatOpenAICompatible]:
582
646
  """Factory function for creating provider-specific OpenAI-compatible model classes.
583
647
 
@@ -588,14 +652,27 @@ def _create_openai_compatible_model(
588
652
  provider: Provider identifier (e.g.`vllm`)
589
653
  base_url: Default API base URL for the provider
590
654
  compatibility_options: Optional configuration for the provider
655
+ profiles: Optional profiles for the provider
656
+ chat_model_cls_name: Optional name for the model class
591
657
 
592
658
  Returns:
593
659
  Configured model class ready for instantiation with provider-specific settings
594
660
  """
595
- chat_model_cls_name = f"Chat{provider.title()}"
661
+ chat_model_cls_name = chat_model_cls_name or f"Chat{provider.title()}"
596
662
  if compatibility_options is None:
597
663
  compatibility_options = {}
598
664
 
665
+ if profiles is not None:
666
+ _register_profile_with_provider(provider, profiles)
667
+
668
+ _validate_compatibility_options(compatibility_options)
669
+
670
+ _validate_provider_name(provider)
671
+
672
+ _validate_model_cls_name(chat_model_cls_name)
673
+
674
+ _validate_base_url(base_url)
675
+
599
676
  return create_model(
600
677
  chat_model_cls_name,
601
678
  __base__=_BaseChatOpenAICompatible,