langchain-dev-utils 1.1.12__tar.gz → 1.1.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/PKG-INFO +3 -3
  2. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/README.md +2 -2
  3. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/README_cn.md +2 -2
  4. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/pyproject.toml +2 -1
  5. langchain_dev_utils-1.1.13/src/langchain_dev_utils/__init__.py +1 -0
  6. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/middleware/plan.py +19 -7
  7. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +1 -1
  8. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/chat_models/base.py +45 -6
  9. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/embeddings/base.py +52 -8
  10. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_agent.py +2 -2
  11. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_load_model.py +2 -2
  12. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_model_tool_emulator.py +4 -2
  13. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_plan_middleware.py +4 -1
  14. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_router_model.py +2 -2
  15. langchain_dev_utils-1.1.13/uv.lock +1980 -0
  16. langchain_dev_utils-1.1.12/src/langchain_dev_utils/__init__.py +0 -1
  17. langchain_dev_utils-1.1.12/uv.lock +0 -1955
  18. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/.gitignore +0 -0
  19. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/.python-version +0 -0
  20. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/.vscode/settings.json +0 -0
  21. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/LICENSE +0 -0
  22. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/__init__.py +0 -0
  23. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/factory.py +0 -0
  24. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/file_system.py +0 -0
  25. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/middleware/__init__.py +0 -0
  26. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
  27. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -0
  28. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
  29. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
  30. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
  31. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/plan.py +0 -0
  32. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/agents/wrap.py +0 -0
  33. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  34. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  35. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/chat_models/types.py +0 -0
  36. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  37. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/message_convert/__init__.py +0 -0
  38. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/message_convert/content.py +0 -0
  39. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/message_convert/format.py +0 -0
  40. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  41. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
  42. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
  43. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/pipeline/types.py +0 -0
  44. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/py.typed +0 -0
  45. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  46. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
  47. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/src/langchain_dev_utils/tool_calling/utils.py +0 -0
  48. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_chat_models.py +0 -0
  49. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_human_in_the_loop.py +0 -0
  50. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_load_embbeding.py +0 -0
  51. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_messages.py +0 -0
  52. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_pipline.py +0 -0
  53. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_tool_calling.py +0 -0
  54. {langchain_dev_utils-1.1.12 → langchain_dev_utils-1.1.13}/tests/test_wrap_agent.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.1.12
3
+ Version: 1.1.13
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -61,7 +61,7 @@ Mainly consists of the following two functions:
61
61
 
62
62
  - `provider_name`: Model provider name, used as an identifier for subsequent model loading
63
63
  - `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
64
- - `base_url`: API address of the model provider (optional, valid when `chat_model` is a string and is "openai-compatible")
64
+ - `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
65
65
  - `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
66
66
 
67
67
  `load_chat_model` parameter description:
@@ -101,7 +101,7 @@ Mainly consists of the following two functions:
101
101
 
102
102
  - `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
103
103
  - `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
104
- - `base_url`: API address of the model provider (optional, valid when `embeddings_model` is a string and is "openai-compatible")
104
+ - `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
105
105
 
106
106
  `load_embeddings` parameter description:
107
107
 
@@ -45,7 +45,7 @@ Mainly consists of the following two functions:
45
45
 
46
46
  - `provider_name`: Model provider name, used as an identifier for subsequent model loading
47
47
  - `chat_model`: Chat model, can be a ChatModel or a string (currently supports "openai-compatible")
48
- - `base_url`: API address of the model provider (optional, valid when `chat_model` is a string and is "openai-compatible")
48
+ - `base_url`: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
49
49
  - `provider_config`: Relevant configuration for the model provider (optional, valid when `chat_model` is a string and is "openai-compatible"), can configure some provider-related parameters, such as whether to support structured output in json_mode, list of supported tool_choices, etc.
50
50
 
51
51
  `load_chat_model` parameter description:
@@ -85,7 +85,7 @@ Mainly consists of the following two functions:
85
85
 
86
86
  - `provider_name`: Embedding model provider name, used as an identifier for subsequent model loading
87
87
  - `embeddings_model`: Embedding model, can be Embeddings or a string (currently supports "openai-compatible")
88
- - `base_url`: API address of the model provider (optional, valid when `embeddings_model` is a string and is "openai-compatible")
88
+ - `base_url`: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
89
89
 
90
90
  `load_embeddings` parameter description:
91
91
 
@@ -45,7 +45,7 @@ pip install -U langchain-dev-utils[standard]
45
45
 
46
46
  - `provider_name`:模型提供商名称,作为后续模型加载的标识
47
47
  - `chat_model`:对话模型,可以是 ChatModel 或字符串(目前支持 "openai-compatible")
48
- - `base_url`:模型提供商的 API 地址(可选,当 `chat_model` 为字符串且是"openai-compatible"时有效)
48
+ - `base_url`:模型提供商的 API 地址(可选,对于`chat_model`的两种类型情况都有效,但是主要用于`chat_model`为字符串且是"openai-compatible"的情况)
49
49
  - `provider_config`:模型提供商的相关配置(可选,当 `chat_model` 为字符串且是 "openai-compatible" 时有效),可以配置一些提供商的相关参数,例如是否支持 json_mode 的结构化输出方式、支持的 tool_choice 列表等
50
50
 
51
51
  `load_chat_model` 参数说明:
@@ -85,7 +85,7 @@ print(model.invoke("Hello"))
85
85
 
86
86
  - `provider_name`:嵌入模型提供商名称,作为后续模型加载的标识
87
87
  - `embeddings_model`:嵌入模型,可以是 Embeddings 或字符串(目前支持 "openai-compatible")
88
- - `base_url`:模型提供商的 API 地址(可选,当 `embeddings_model` 为字符串且是"openai-compatible"时有效)
88
+ - `base_url`:嵌入模型提供商的 API 地址(可选,对于`embeddings_model`的两种类型情况都有效,但是主要用于`embeddings_model`为字符串且是"openai-compatible"的情况)
89
89
 
90
90
  `load_embeddings` 参数说明:
91
91
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "langchain-dev-utils"
3
- version = "1.1.12"
3
+ version = "1.1.13"
4
4
  description = "A practical utility library for LangChain and LangGraph development"
5
5
  readme = "README.md"
6
6
  authors = [{ name = "tiebingice", email = "tiebingice123@outlook.com" }]
@@ -32,4 +32,5 @@ tests = [
32
32
  "python-dotenv>=1.1.1",
33
33
  "langchain-tests>=1.0.0",
34
34
  "langchain-deepseek>=1.0.0",
35
+ "langchain-qwq>=0.3.0",
35
36
  ]
@@ -0,0 +1 @@
1
+ __version__ = "1.1.13"
@@ -244,9 +244,10 @@ def create_read_plan_tool(
244
244
 
245
245
  _PLAN_SYSTEM_PROMPT_NOT_READ_PLAN = """You can manage task plans using two simple tools:
246
246
 
247
- 1. **write_plan**
247
+ ## write_plan
248
248
  - Use it to break complex tasks (3+ steps) into a clear, actionable list. Only include next steps to execute — the first becomes `"in_progress"`, the rest `"pending"`. Don’t use it for simple tasks (<3 steps).
249
- 2. **finish_sub_plan**
249
+
250
+ ## finish_sub_plan
250
251
  - Call it **only when the current task is 100% done**. It automatically marks it `"done"` and promotes the next `"pending"` task to `"in_progress"`. No parameters needed. Never use it mid-task or if anything’s incomplete.
251
252
  Keep plans lean, update immediately, and never batch completions.
252
253
  """
@@ -277,9 +278,15 @@ class PlanMiddleware(AgentMiddleware):
277
278
 
278
279
  Args:
279
280
  system_prompt: Custom system prompt to guide the agent on using the plan tool.
280
- If not provided, uses the default `_PLAN_MIDDLEWARE_SYSTEM_PROMPT`.
281
- tools: List of tools to be added to the agent. The tools must be created by `create_write_plan_tool`, `create_finish_sub_plan_tool`, and `create_read_plan_tool`(optional).
282
-
281
+ If not provided, uses the default `_PLAN_SYSTEM_PROMPT` or `_PLAN_SYSTEM_PROMPT_NOT_READ_PLAN` based on the `use_read_plan_tool` parameter.
282
+ write_plan_tool_description: Description of the `write_plan` tool.
283
+ If not provided, uses the default `_DEFAULT_WRITE_PLAN_TOOL_DESCRIPTION`.
284
+ finish_sub_plan_tool_description: Description of the `finish_sub_plan` tool.
285
+ If not provided, uses the default `_DEFAULT_FINISH_SUB_PLAN_TOOL_DESCRIPTION`.
286
+ read_plan_tool_description: Description of the `read_plan` tool.
287
+ If not provided, uses the default `_DEFAULT_READ_PLAN_TOOL_DESCRIPTION`.
288
+ use_read_plan_tool: Whether to use the `read_plan` tool.
289
+ If not provided, uses the default `True`.
283
290
  Example:
284
291
  ```python
285
292
  from langchain_dev_utils.agents.middleware.plan import PlanMiddleware
@@ -304,6 +311,7 @@ class PlanMiddleware(AgentMiddleware):
304
311
  finish_sub_plan_tool_description: Optional[str] = None,
305
312
  read_plan_tool_description: Optional[str] = None,
306
313
  use_read_plan_tool: bool = True,
314
+ message_key: Optional[str] = None,
307
315
  ) -> None:
308
316
  super().__init__()
309
317
 
@@ -319,8 +327,12 @@ class PlanMiddleware(AgentMiddleware):
319
327
  )
320
328
 
321
329
  tools = [
322
- create_write_plan_tool(description=write_plan_tool_description),
323
- create_finish_sub_plan_tool(description=finish_sub_plan_tool_description),
330
+ create_write_plan_tool(
331
+ description=write_plan_tool_description, message_key=message_key
332
+ ),
333
+ create_finish_sub_plan_tool(
334
+ description=finish_sub_plan_tool_description, message_key=message_key
335
+ ),
324
336
  ]
325
337
 
326
338
  if use_read_plan_tool:
@@ -46,7 +46,7 @@ _DictOrPydantic = Union[dict, _BM]
46
46
 
47
47
 
48
48
  class _ModelProviderConfigType(BaseModel):
49
- supported_tool_choice: ToolChoiceType = Field(default=[])
49
+ supported_tool_choice: ToolChoiceType = Field(default_factory=list)
50
50
  keep_reasoning_content: bool = Field(default=False)
51
51
  support_json_mode: bool = Field(default=False)
52
52
 
@@ -1,10 +1,11 @@
1
- import os
2
1
  from typing import Any, NotRequired, Optional, TypedDict, cast
3
2
 
4
3
  from langchain.chat_models.base import _SUPPORTED_PROVIDERS, _init_chat_model_helper
5
4
  from langchain_core.language_models.chat_models import BaseChatModel
5
+ from langchain_core.utils import from_env
6
6
 
7
7
  from .types import ChatModelType, ToolChoiceType
8
+ from pydantic import BaseModel
8
9
 
9
10
  _MODEL_PROVIDERS_DICT = {}
10
11
 
@@ -22,6 +23,34 @@ class ChatModelProvider(TypedDict):
22
23
  provider_config: NotRequired[ProviderConfig]
23
24
 
24
25
 
26
+ def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
27
+ """
28
+ Return 'base_url' if the model has a field named or aliased as 'base_url',
29
+ else return 'api_base' if it has a field named or aliased as 'api_base',
30
+ else return None.
31
+ The return value is always either 'base_url', 'api_base', or None.
32
+ """
33
+ model_fields = model_cls.model_fields
34
+
35
+ # try model_fields first
36
+ if "base_url" in model_fields:
37
+ return "base_url"
38
+
39
+ if "api_base" in model_fields:
40
+ return "api_base"
41
+
42
+ # then try aliases
43
+ for field_info in model_fields.values():
44
+ if field_info.alias == "base_url":
45
+ return "base_url"
46
+
47
+ for field_info in model_fields.values():
48
+ if field_info.alias == "api_base":
49
+ return "api_base"
50
+
51
+ return None
52
+
53
+
25
54
  def _parse_model(model: str, model_provider: Optional[str]) -> tuple[str, str]:
26
55
  """Parse model string and provider.
27
56
 
@@ -71,6 +100,11 @@ def _load_chat_model_helper(
71
100
  "provider_config"
72
101
  ):
73
102
  kwargs.update({"provider_config": provider_config})
103
+
104
+ if base_url := _MODEL_PROVIDERS_DICT[model_provider].get("base_url"):
105
+ url_key = _get_base_url_field_name(chat_model)
106
+ if url_key:
107
+ kwargs.update({url_key: base_url})
74
108
  return chat_model(model=model, **kwargs)
75
109
 
76
110
  return _init_chat_model_helper(model, model_provider=model_provider, **kwargs)
@@ -91,7 +125,7 @@ def register_model_provider(
91
125
  Args:
92
126
  provider_name: Name of the provider to register
93
127
  chat_model: Either a BaseChatModel class or a string identifier for a supported provider
94
- base_url: Optional base URL for API endpoints (Optional parameter;effective only when `chat_model` is a string and is "openai-compatible".)
128
+ base_url: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
95
129
  provider_config: The configuration of the model provider (Optional parameter;effective only when `chat_model` is a string and is "openai-compatible".)
96
130
  It can be configured to configure some related parameters of the provider, such as whether to support json_mode structured output mode, the list of supported tool_choice
97
131
  Raises:
@@ -113,6 +147,7 @@ def register_model_provider(
113
147
  >>> model = load_chat_model(model="vllm:qwen3-4b")
114
148
  >>> model.invoke("Hello")
115
149
  """
150
+ base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
116
151
  if isinstance(chat_model, str):
117
152
  try:
118
153
  from .adapters.openai_compatible import _create_openai_compatible_model
@@ -120,8 +155,6 @@ def register_model_provider(
120
155
  raise ImportError(
121
156
  "Please install langchain_dev_utils[standard],when chat_model is a 'openai-compatible'"
122
157
  )
123
-
124
- base_url = base_url or os.getenv(f"{provider_name.upper()}_API_BASE")
125
158
  if base_url is None:
126
159
  raise ValueError(
127
160
  f"base_url must be provided or set {provider_name.upper()}_API_BASE environment variable when chat_model is a string"
@@ -140,11 +173,17 @@ def register_model_provider(
140
173
  provider_name: {
141
174
  "chat_model": chat_model,
142
175
  "provider_config": provider_config,
176
+ "base_url": base_url,
143
177
  }
144
178
  }
145
179
  )
146
180
  else:
147
- _MODEL_PROVIDERS_DICT.update({provider_name: {"chat_model": chat_model}})
181
+ if base_url is not None:
182
+ _MODEL_PROVIDERS_DICT.update(
183
+ {provider_name: {"chat_model": chat_model, "base_url": base_url}}
184
+ )
185
+ else:
186
+ _MODEL_PROVIDERS_DICT.update({provider_name: {"chat_model": chat_model}})
148
187
 
149
188
 
150
189
  def batch_register_model_provider(
@@ -159,7 +198,7 @@ def batch_register_model_provider(
159
198
  providers: List of ChatModelProvider dictionaries, each containing:
160
199
  - provider_name: Name of the provider to register
161
200
  - chat_model: Either a BaseChatModel class or a string identifier for a supported provider
162
- - base_url: Optional base URL for API endpoints(Optional parameter; effective only when `chat_model` is a string and is "openai-compatible".)
201
+ - base_url: The API address of the model provider (optional, valid for both types of `chat_model`, but mainly used when `chat_model` is a string and is "openai-compatible")
163
202
  - provider_config: The configuration of the model provider(Optional parameter; effective only when `chat_model` is a string and is "openai-compatible".)
164
203
  It can be configured to configure some related parameters of the provider, such as whether to support json_mode structured output mode, the list of supported tool_choice
165
204
 
@@ -1,7 +1,8 @@
1
- import os
2
1
  from typing import Any, Literal, NotRequired, Optional, TypedDict, Union
3
2
 
4
3
  from langchain.embeddings.base import Embeddings, _SUPPORTED_PROVIDERS, init_embeddings
4
+ from langchain_core.utils import from_env, secret_from_env
5
+ from pydantic import BaseModel
5
6
 
6
7
  _EMBEDDINGS_PROVIDERS_DICT = {}
7
8
 
@@ -14,6 +15,34 @@ class EmbeddingProvider(TypedDict):
14
15
  base_url: NotRequired[str]
15
16
 
16
17
 
18
+ def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
19
+ """
20
+ Return 'base_url' if the model has a field named or aliased as 'base_url',
21
+ else return 'api_base' if it has a field named or aliased as 'api_base',
22
+ else return None.
23
+ The return value is always either 'base_url', 'api_base', or None.
24
+ """
25
+ model_fields = model_cls.model_fields
26
+
27
+ # try model_fields first
28
+ if "base_url" in model_fields:
29
+ return "base_url"
30
+
31
+ if "api_base" in model_fields:
32
+ return "api_base"
33
+
34
+ # then try aliases
35
+ for field_info in model_fields.values():
36
+ if field_info.alias == "base_url":
37
+ return "base_url"
38
+
39
+ for field_info in model_fields.values():
40
+ if field_info.alias == "api_base":
41
+ return "api_base"
42
+
43
+ return None
44
+
45
+
17
46
  def _parse_model_string(model_name: str) -> tuple[str, str]:
18
47
  """Parse model string into provider and model name.
19
48
 
@@ -56,7 +85,7 @@ def register_embeddings_provider(
56
85
  Args:
57
86
  provider_name: Name of the provider to register
58
87
  embeddings_model: Either an Embeddings class or a string identifier for a supported provider
59
- base_url: Optional base URL for API endpoints (required when embeddings_model is a string)
88
+ base_url: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
60
89
 
61
90
  Raises:
62
91
  ValueError: If base_url is not provided when embeddings_model is a string
@@ -77,8 +106,9 @@ def register_embeddings_provider(
77
106
  >>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
78
107
  >>> embeddings.embed_query("hello world")
79
108
  """
109
+
110
+ base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
80
111
  if isinstance(embeddings_model, str):
81
- base_url = base_url or os.getenv(f"{provider_name.upper()}_API_BASE")
82
112
  if base_url is None:
83
113
  raise ValueError(
84
114
  f"base_url must be provided or set {provider_name.upper()}_API_BASE environment variable when embeddings_model is a string"
@@ -98,9 +128,19 @@ def register_embeddings_provider(
98
128
  }
99
129
  )
100
130
  else:
101
- _EMBEDDINGS_PROVIDERS_DICT.update(
102
- {provider_name: {"embeddings_model": embeddings_model}}
103
- )
131
+ if base_url is not None:
132
+ _EMBEDDINGS_PROVIDERS_DICT.update(
133
+ {
134
+ provider_name: {
135
+ "embeddings_model": embeddings_model,
136
+ "base_url": base_url,
137
+ }
138
+ }
139
+ )
140
+ else:
141
+ _EMBEDDINGS_PROVIDERS_DICT.update(
142
+ {provider_name: {"embeddings_model": embeddings_model}}
143
+ )
104
144
 
105
145
 
106
146
  def batch_register_embeddings_provider(
@@ -115,7 +155,7 @@ def batch_register_embeddings_provider(
115
155
  providers: List of EmbeddingProvider dictionaries, each containing:
116
156
  - provider_name: str - Provider name
117
157
  - embeddings_model: Union[Type[Embeddings], str] - Model class or provider string
118
- - base_url: Optional[str] - Base URL for API endpoints
158
+ - base_url: The API address of the Embedding model provider (optional, valid for both types of `embeddings_model`, but mainly used when `embeddings_model` is a string and is "openai-compatible")
119
159
 
120
160
  Raises:
121
161
  ValueError: If any of the providers are invalid
@@ -186,7 +226,7 @@ def load_embeddings(
186
226
  embeddings = _EMBEDDINGS_PROVIDERS_DICT[provider]["embeddings_model"]
187
227
  if isinstance(embeddings, str):
188
228
  if not (api_key := kwargs.get("api_key")):
189
- api_key = os.getenv(f"{provider.upper()}_API_KEY")
229
+ api_key = secret_from_env(f"{provider.upper()}_API_KEY", default=None)()
190
230
  if not api_key:
191
231
  raise ValueError(
192
232
  f"API key for {provider} not found. Please set it in the environment."
@@ -203,4 +243,8 @@ def load_embeddings(
203
243
  **kwargs,
204
244
  )
205
245
  else:
246
+ if base_url := _EMBEDDINGS_PROVIDERS_DICT[provider].get("base_url"):
247
+ url_key = _get_base_url_field_name(embeddings)
248
+ if url_key is not None:
249
+ kwargs.update({url_key: base_url})
206
250
  return embeddings(model=model, **kwargs)
@@ -1,6 +1,6 @@
1
1
  from dotenv import load_dotenv
2
2
  from langchain.agents.structured_output import ToolStrategy
3
- from langchain_community.chat_models import ChatTongyi
3
+ from langchain_qwq import ChatQwen
4
4
  from langchain_core.messages import HumanMessage
5
5
  from pydantic import BaseModel, Field
6
6
  import pytest
@@ -14,7 +14,7 @@ batch_register_model_provider(
14
14
  [
15
15
  {
16
16
  "provider_name": "dashscope",
17
- "chat_model": ChatTongyi,
17
+ "chat_model": ChatQwen,
18
18
  },
19
19
  {"provider_name": "zai", "chat_model": "openai-compatible"},
20
20
  ]
@@ -1,6 +1,6 @@
1
1
  from dotenv import load_dotenv
2
- from langchain_community.chat_models import ChatTongyi
3
2
  from langchain_core.language_models import BaseChatModel
3
+ from langchain_qwq import ChatQwen
4
4
  import pytest
5
5
 
6
6
  from langchain_dev_utils.chat_models import (
@@ -14,7 +14,7 @@ batch_register_model_provider(
14
14
  [
15
15
  {
16
16
  "provider_name": "dashscope",
17
- "chat_model": ChatTongyi,
17
+ "chat_model": ChatQwen,
18
18
  },
19
19
  {"provider_name": "zai", "chat_model": "openai-compatible"},
20
20
  ]
@@ -1,15 +1,17 @@
1
1
  from langchain.tools import tool
2
- from langchain_community.chat_models import ChatTongyi
2
+ from langchain_qwq import ChatQwen
3
3
  from langchain_core.messages import HumanMessage, ToolMessage
4
4
  import pytest
5
5
 
6
6
  from langchain_dev_utils.agents import create_agent
7
7
  from langchain_dev_utils.agents.middleware import LLMToolEmulator
8
8
  from langchain_dev_utils.chat_models import register_model_provider
9
+ from dotenv import load_dotenv
9
10
 
11
+ load_dotenv()
10
12
  register_model_provider(
11
13
  "dashscope",
12
- ChatTongyi,
14
+ ChatQwen,
13
15
  )
14
16
 
15
17
 
@@ -54,7 +54,10 @@ def test_plan_middleware():
54
54
 
55
55
  assert result["plan"]
56
56
  assert len(result["plan"]) == 3
57
- assert all([plan["status"] == "done" for plan in result["plan"]])
57
+ assert all([plan["status"] == "done" for plan in result["plan"]]) or all(
58
+ [plan["status"] == "done" for plan in result["plan"][:-1]]
59
+ + [result["plan"][-1]["status"] == "in_progress"]
60
+ )
58
61
 
59
62
  write_plan_count = 0
60
63
  finish_sub_plan_count = 0
@@ -1,6 +1,6 @@
1
1
  from dotenv import load_dotenv
2
2
  from langchain.tools import tool
3
- from langchain_community.chat_models import ChatTongyi
3
+ from langchain_qwq import ChatQwen
4
4
  from langchain_core.messages import HumanMessage, ToolMessage
5
5
 
6
6
  from langchain_dev_utils.agents import create_agent
@@ -13,7 +13,7 @@ batch_register_model_provider(
13
13
  [
14
14
  {
15
15
  "provider_name": "dashscope",
16
- "chat_model": ChatTongyi,
16
+ "chat_model": ChatQwen,
17
17
  },
18
18
  {"provider_name": "zai", "chat_model": "openai-compatible"},
19
19
  ]