langchain-dev-utils 1.2.8__tar.gz → 1.2.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/PKG-INFO +33 -19
  2. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/README.md +32 -18
  3. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/README_cn.md +17 -6
  4. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/pyproject.toml +1 -1
  5. langchain_dev_utils-1.2.9/src/langchain_dev_utils/__init__.py +1 -0
  6. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/chat_models/adapters/openai_compatible.py +9 -14
  7. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/chat_models/types.py +2 -2
  8. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/embeddings/base.py +26 -26
  9. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/message_convert/content.py +18 -8
  10. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/tool_calling/utils.py +7 -7
  11. langchain_dev_utils-1.2.8/src/langchain_dev_utils/__init__.py +0 -1
  12. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/.gitignore +0 -0
  13. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/.python-version +0 -0
  14. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/.vscode/settings.json +0 -0
  15. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/LICENSE +0 -0
  16. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/_utils.py +0 -0
  17. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/__init__.py +0 -0
  18. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/factory.py +0 -0
  19. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/file_system.py +0 -0
  20. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/middleware/__init__.py +0 -0
  21. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/middleware/model_fallback.py +0 -0
  22. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/middleware/model_router.py +0 -0
  23. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/middleware/plan.py +0 -0
  24. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/middleware/summarization.py +0 -0
  25. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/middleware/tool_call_repair.py +0 -0
  26. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/middleware/tool_emulator.py +0 -0
  27. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/middleware/tool_selection.py +0 -0
  28. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/plan.py +0 -0
  29. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/agents/wrap.py +0 -0
  30. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/chat_models/__init__.py +0 -0
  31. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/chat_models/adapters/__init__.py +0 -0
  32. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/chat_models/base.py +0 -0
  33. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/embeddings/__init__.py +0 -0
  34. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/message_convert/__init__.py +0 -0
  35. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/message_convert/format.py +0 -0
  36. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/pipeline/__init__.py +0 -0
  37. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/pipeline/parallel.py +0 -0
  38. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/pipeline/sequential.py +0 -0
  39. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/pipeline/types.py +0 -0
  40. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/py.typed +0 -0
  41. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/tool_calling/__init__.py +0 -0
  42. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/src/langchain_dev_utils/tool_calling/human_in_the_loop.py +0 -0
  43. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/__init__.py +0 -0
  44. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_agent.py +0 -0
  45. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_chat_models.py +0 -0
  46. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_human_in_the_loop.py +0 -0
  47. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_load_embbeding.py +0 -0
  48. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_load_model.py +0 -0
  49. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_messages.py +0 -0
  50. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_model_tool_emulator.py +0 -0
  51. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_pipline.py +0 -0
  52. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_plan_middleware.py +0 -0
  53. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_router_model.py +0 -0
  54. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_tool_call_repair.py +0 -0
  55. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_tool_calling.py +0 -0
  56. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/tests/test_wrap_agent.py +0 -0
  57. {langchain_dev_utils-1.2.8 → langchain_dev_utils-1.2.9}/uv.lock +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.8
3
+ Version: 1.2.9
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -192,52 +192,66 @@ def get_current_time() -> str:
192
192
 
193
193
  ### 4. **Agent Development**
194
194
 
195
- Includes the following features:
195
+ Includes the following capabilities:
196
196
 
197
- - Predefined agent factory functions
198
- - Common middleware components
197
+ - Multi-agent construction
198
+ - Commonly used middleware components
199
199
 
200
- #### 4.1 Agent Factory Functions
200
+ #### 4.1 Multi-Agent Construction
201
201
 
202
- In LangChain v1, the official `create_agent` function can be used to create a single agent; its `model` parameter accepts either a BaseChatModel instance or a specific string (when a string is provided, only models supported by `init_chat_model` are allowed). To extend the flexibility of specifying models via string, this library provides an equivalent `create_agent` function that lets you designate any model supported by `load_chat_model` (registration required beforehand).
202
+ Wrapping an agent as a tool is a common implementation pattern in multi-agent systems, as elaborated in the official LangChain documentation. To support this pattern, this library provides a pre-built utility function `wrap_agent_as_tool`, which encapsulates an agent instance into a tool that can be invoked by other agents.
203
203
 
204
- Usage example:
204
+ **Usage Example**:
205
205
 
206
206
  ```python
207
- from langchain_dev_utils.agents import create_agent
207
+ import datetime
208
+ from langchain_dev_utils.agents import create_agent, wrap_agent_as_tool
208
209
  from langchain.agents import AgentState
209
210
 
211
+
212
+ @tool
213
+ def get_current_time() -> str:
214
+ """Get the current time."""
215
+ return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
216
+
217
+
210
218
  agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
211
- response = agent.invoke({"messages": [{"role": "user", "content": "What time is it?"}]})
219
+ call_time_agent_tool = wrap_agent_as_tool(agent)
220
+ response = call_time_agent_tool.invoke(
221
+ {"messages": [{"role": "user", "content": "What time is it now?"}]}
222
+ )
212
223
  print(response)
213
224
  ```
214
225
 
215
226
  #### 4.2 Middleware
216
227
 
217
- Provides some commonly used middleware components. Below, we illustrate with `ToolCallRepairMiddleware` and `PlanMiddleware`.
218
-
219
- `ToolCallRepairMiddleware` is used to repair `invalid_tool_calls` generated by large language models.
228
+ Provides several commonly used middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
220
229
 
221
- `PlanMiddleware` is used for agent planning.
230
+ - `ToolCallRepairMiddleware` automatically repairs malformed tool calls found in the model's `invalid_tool_calls` output.
231
+ - `PlanMiddleware` enables task planning capabilities for agents.
222
232
 
223
233
  ```python
224
234
  from langchain_dev_utils.agents.middleware import (
225
- ToolcallRepairMiddleware,
235
+ ToolCallRepairMiddleware,
226
236
  PlanMiddleware,
227
237
  )
228
238
 
229
239
  agent = create_agent(
230
240
  "vllm:qwen3-4b",
231
241
  name="plan-agent",
232
- middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
233
- use_read_plan_tool=False
234
- )]
242
+ middleware=[
243
+ ToolCallRepairMiddleware(),
244
+ PlanMiddleware(use_read_plan_tool=False)
245
+ ]
235
246
  )
236
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]})
247
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan for visiting New York."}]})
237
248
  print(response)
238
249
  ```
239
250
 
240
- **For more information about agent development and all built-in middleware, please refer to**: [Pre-built Agent Functions](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/prebuilt.html), [Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
251
+ **For more details on agent development and a complete list of built-in middleware, please refer to**:
252
+ [Multi-Agent Construction](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/multi-agent.html),
253
+ [Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
254
+
241
255
 
242
256
  ### 5. **State Graph Orchestration**
243
257
 
@@ -175,52 +175,66 @@ def get_current_time() -> str:
175
175
 
176
176
  ### 4. **Agent Development**
177
177
 
178
- Includes the following features:
178
+ Includes the following capabilities:
179
179
 
180
- - Predefined agent factory functions
181
- - Common middleware components
180
+ - Multi-agent construction
181
+ - Commonly used middleware components
182
182
 
183
- #### 4.1 Agent Factory Functions
183
+ #### 4.1 Multi-Agent Construction
184
184
 
185
- In LangChain v1, the official `create_agent` function can be used to create a single agent; its `model` parameter accepts either a BaseChatModel instance or a specific string (when a string is provided, only models supported by `init_chat_model` are allowed). To extend the flexibility of specifying models via string, this library provides an equivalent `create_agent` function that lets you designate any model supported by `load_chat_model` (registration required beforehand).
185
+ Wrapping an agent as a tool is a common implementation pattern in multi-agent systems, as elaborated in the official LangChain documentation. To support this pattern, this library provides a pre-built utility function `wrap_agent_as_tool`, which encapsulates an agent instance into a tool that can be invoked by other agents.
186
186
 
187
- Usage example:
187
+ **Usage Example**:
188
188
 
189
189
  ```python
190
- from langchain_dev_utils.agents import create_agent
190
+ import datetime
191
+ from langchain_dev_utils.agents import create_agent, wrap_agent_as_tool
191
192
  from langchain.agents import AgentState
192
193
 
194
+
195
+ @tool
196
+ def get_current_time() -> str:
197
+ """Get the current time."""
198
+ return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
199
+
200
+
193
201
  agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
194
- response = agent.invoke({"messages": [{"role": "user", "content": "What time is it?"}]})
202
+ call_time_agent_tool = wrap_agent_as_tool(agent)
203
+ response = call_time_agent_tool.invoke(
204
+ {"messages": [{"role": "user", "content": "What time is it now?"}]}
205
+ )
195
206
  print(response)
196
207
  ```
197
208
 
198
209
  #### 4.2 Middleware
199
210
 
200
- Provides some commonly used middleware components. Below, we illustrate with `ToolCallRepairMiddleware` and `PlanMiddleware`.
201
-
202
- `ToolCallRepairMiddleware` is used to repair `invalid_tool_calls` generated by large language models.
211
+ Provides several commonly used middleware components. Below are examples using `ToolCallRepairMiddleware` and `PlanMiddleware`.
203
212
 
204
- `PlanMiddleware` is used for agent planning.
213
+ - `ToolCallRepairMiddleware` automatically repairs malformed tool calls found in the model's `invalid_tool_calls` output.
214
+ - `PlanMiddleware` enables task planning capabilities for agents.
205
215
 
206
216
  ```python
207
217
  from langchain_dev_utils.agents.middleware import (
208
- ToolcallRepairMiddleware,
218
+ ToolCallRepairMiddleware,
209
219
  PlanMiddleware,
210
220
  )
211
221
 
212
222
  agent = create_agent(
213
223
  "vllm:qwen3-4b",
214
224
  name="plan-agent",
215
- middleware=[ToolCallRepairMiddleware(), PlanMiddleware(
216
- use_read_plan_tool=False
217
- )]
225
+ middleware=[
226
+ ToolCallRepairMiddleware(),
227
+ PlanMiddleware(use_read_plan_tool=False)
228
+ ]
218
229
  )
219
- response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan to New York"}]})
230
+ response = agent.invoke({"messages": [{"role": "user", "content": "Give me a travel plan for visiting New York."}]})
220
231
  print(response)
221
232
  ```
222
233
 
223
- **For more information about agent development and all built-in middleware, please refer to**: [Pre-built Agent Functions](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/prebuilt.html), [Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
234
+ **For more details on agent development and a complete list of built-in middleware, please refer to**:
235
+ [Multi-Agent Construction](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/multi-agent.html),
236
+ [Middleware](https://tbice123123.github.io/langchain-dev-utils-docs/en/agent-development/middleware.html)
237
+
224
238
 
225
239
  ### 5. **State Graph Orchestration**
226
240
 
@@ -176,21 +176,32 @@ def get_current_time() -> str:
176
176
 
177
177
  包含以下功能:
178
178
 
179
- - 预设的智能体工厂函数
179
+ - 多智能体构建
180
180
  - 常用的中间件组件
181
181
 
182
- #### 4.1 智能体工厂函数
182
+ #### 4.1 多智能体构建
183
183
 
184
- LangChain v1 版本中,官方提供的 `create_agent` 函数可以用于创建单智能体,其中 model 参数支持传入 BaseChatModel 实例或特定字符串(当传入字符串时,仅限于 `init_chat_model` 支持的模型)。为扩展字符串指定模型的灵活性,本库提供了功能相同的 `create_agent` 函数,使您能够通过字符串指定 `load_chat_model` 支持的模型(需要提前注册)。
184
+ 将智能体封装为工具是多智能体系统中的一种常见实现模式,LangChain 官方文档对此有详细阐述。为此,本库提供了预构建函数`wrap_agent_as_tool` 来实现此模式,该函数能够将一个智能体实例封装成一个可供其它智能体调用的工具。
185
185
 
186
186
  使用示例:
187
187
 
188
188
  ```python
189
- from langchain_dev_utils.agents import create_agent
189
+ import datetime
190
+ from langchain_dev_utils.agents import create_agent, wrap_agent_as_tool
190
191
  from langchain.agents import AgentState
191
192
 
193
+
194
+ @tool
195
+ def get_current_time() -> str:
196
+ """获取当前时间"""
197
+ return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
198
+
199
+
192
200
  agent = create_agent("vllm:qwen3-4b", tools=[get_current_time], name="time-agent")
193
- response = agent.invoke({"messages": [{"role": "user", "content": "现在几点了?"}]})
201
+ call_time_agent_tool = wrap_agent_as_tool(agent)
202
+ response = call_time_agent_tool.invoke(
203
+ {"messages": [{"role": "user", "content": "现在几点了?"}]}
204
+ )
194
205
  print(response)
195
206
  ```
196
207
 
@@ -219,7 +230,7 @@ response = agent.invoke({"messages": [{"role": "user", "content": "给我一个
219
230
  print(response)
220
231
  ```
221
232
 
222
- **对于更多关于智能体开发以及所有的内置中间件的相关介绍,请参考**: [预构建智能体函数](https://tbice123123.github.io/langchain-dev-utils-docs/zh/agent-development/prebuilt.html),[中间件](https://tbice123123.github.io/langchain-dev-utils-docs/zh/agent-development/middleware.html)
233
+ **对于更多关于智能体开发以及所有的内置中间件的相关介绍,请参考**: [多智能体构建](https://tbice123123.github.io/langchain-dev-utils-docs/zh/agent-development/multi-agent.html),[中间件](https://tbice123123.github.io/langchain-dev-utils-docs/zh/agent-development/middleware.html)
223
234
 
224
235
  ### 5. **状态图编排**
225
236
 
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "langchain-dev-utils"
3
- version = "1.2.8"
3
+ version = "1.2.9"
4
4
  description = "A practical utility library for LangChain and LangGraph development"
5
5
  readme = "README.md"
6
6
  authors = [{ name = "tiebingice", email = "tiebingice123@outlook.com" }]
@@ -0,0 +1 @@
1
+ __version__ = "1.2.9"
@@ -41,7 +41,7 @@ from typing_extensions import Self
41
41
 
42
42
  from ..types import (
43
43
  CompatibilityOptions,
44
- ReasoningContentKeepType,
44
+ ReasoningKeepPolicy,
45
45
  ResponseFormatType,
46
46
  ToolChoiceType,
47
47
  )
@@ -119,7 +119,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
119
119
  """Supported tool choice"""
120
120
  supported_response_format: ResponseFormatType = Field(default_factory=list)
121
121
  """Supported response format"""
122
- reasoning_content_keep_type: ReasoningContentKeepType = Field(default="discard")
122
+ reasoning_keep_policy: ReasoningKeepPolicy = Field(default="never")
123
123
  """How to keep reasoning content in the messages"""
124
124
  include_usage: bool = Field(default=True)
125
125
  """Whether to include usage information in the output"""
@@ -159,7 +159,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
159
159
 
160
160
  payload_messages = []
161
161
  last_human_index = -1
162
- if self.reasoning_content_keep_type == "temp":
162
+ if self.reasoning_keep_policy == "current":
163
163
  last_human_index = _get_last_human_message_index(messages)
164
164
 
165
165
  for index, m in enumerate(messages):
@@ -167,15 +167,14 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
167
167
  msg_dict = _convert_message_to_dict(
168
168
  _convert_from_v1_to_chat_completions(m)
169
169
  )
170
- if (
171
- self.reasoning_content_keep_type == "retain"
172
- and m.additional_kwargs.get("reasoning_content")
170
+ if self.reasoning_keep_policy == "all" and m.additional_kwargs.get(
171
+ "reasoning_content"
173
172
  ):
174
173
  msg_dict["reasoning_content"] = m.additional_kwargs.get(
175
174
  "reasoning_content"
176
175
  )
177
176
  elif (
178
- self.reasoning_content_keep_type == "temp"
177
+ self.reasoning_keep_policy == "current"
179
178
  and index > last_human_index
180
179
  and m.additional_kwargs.get("reasoning_content")
181
180
  ):
@@ -558,13 +557,9 @@ def _create_openai_compatible_model(
558
557
  ToolChoiceType,
559
558
  Field(default=compatibility_options.get("supported_tool_choice", ["auto"])),
560
559
  ),
561
- reasoning_content_keep_type=(
562
- ReasoningContentKeepType,
563
- Field(
564
- default=compatibility_options.get(
565
- "reasoning_content_keep_type", "discard"
566
- )
567
- ),
560
+ reasoning_keep_policy=(
561
+ ReasoningKeepPolicy,
562
+ Field(default=compatibility_options.get("reasoning_keep_policy", "never")),
568
563
  ),
569
564
  supported_response_format=(
570
565
  ResponseFormatType,
@@ -9,11 +9,11 @@ ToolChoiceType = list[Literal["auto", "none", "required", "specific"]]
9
9
 
10
10
  ResponseFormatType = list[Literal["json_schema", "json_mode"]]
11
11
 
12
- ReasoningContentKeepType = Literal["discard", "temp", "retain"]
12
+ ReasoningKeepPolicy = Literal["never", "current", "all"]
13
13
 
14
14
 
15
15
  class CompatibilityOptions(TypedDict):
16
16
  supported_tool_choice: NotRequired[ToolChoiceType]
17
17
  supported_response_format: NotRequired[ResponseFormatType]
18
- reasoning_content_keep_type: NotRequired[ReasoningContentKeepType]
18
+ reasoning_keep_policy: NotRequired[ReasoningKeepPolicy]
19
19
  include_usage: NotRequired[bool]
@@ -218,30 +218,30 @@ def load_embeddings(
218
218
  ):
219
219
  raise ValueError(f"Provider {provider} not registered")
220
220
 
221
- if provider in _SUPPORTED_PROVIDERS:
222
- return init_embeddings(model, provider=provider, **kwargs)
223
-
224
- embeddings = _EMBEDDINGS_PROVIDERS_DICT[provider]["embeddings_model"]
225
- if isinstance(embeddings, str):
226
- if not (api_key := kwargs.get("api_key")):
227
- api_key = secret_from_env(f"{provider.upper()}_API_KEY", default=None)()
228
- if not api_key:
229
- raise ValueError(
230
- f"API key for {provider} not found. Please set it in the environment."
231
- )
232
- kwargs["api_key"] = api_key
233
- if embeddings == "openai-compatible":
234
- kwargs["check_embedding_ctx_length"] = False
235
- embeddings = "openai"
236
- return init_embeddings(
237
- model=model,
238
- provider=embeddings,
239
- base_url=_EMBEDDINGS_PROVIDERS_DICT[provider]["base_url"],
240
- **kwargs,
241
- )
221
+ if provider in _EMBEDDINGS_PROVIDERS_DICT:
222
+ embeddings = _EMBEDDINGS_PROVIDERS_DICT[provider]["embeddings_model"]
223
+ if isinstance(embeddings, str):
224
+ if not (api_key := kwargs.get("api_key")):
225
+ api_key = secret_from_env(f"{provider.upper()}_API_KEY", default=None)()
226
+ if not api_key:
227
+ raise ValueError(
228
+ f"API key for {provider} not found. Please set it in the environment."
229
+ )
230
+ kwargs["api_key"] = api_key
231
+ if embeddings == "openai-compatible":
232
+ kwargs["check_embedding_ctx_length"] = False
233
+ embeddings = "openai"
234
+ return init_embeddings(
235
+ model=model,
236
+ provider=embeddings,
237
+ base_url=_EMBEDDINGS_PROVIDERS_DICT[provider]["base_url"],
238
+ **kwargs,
239
+ )
240
+ else:
241
+ if base_url := _EMBEDDINGS_PROVIDERS_DICT[provider].get("base_url"):
242
+ url_key = _get_base_url_field_name(embeddings)
243
+ if url_key is not None:
244
+ kwargs.update({url_key: base_url})
245
+ return embeddings(model=model, **kwargs)
242
246
  else:
243
- if base_url := _EMBEDDINGS_PROVIDERS_DICT[provider].get("base_url"):
244
- url_key = _get_base_url_field_name(embeddings)
245
- if url_key is not None:
246
- kwargs.update({url_key: base_url})
247
- return embeddings(model=model, **kwargs)
247
+ return init_embeddings(model, provider=provider, **kwargs)
@@ -52,8 +52,10 @@ def convert_reasoning_content_for_ai_message(
52
52
  reasoning_content = _get_reasoning_content(model_response)
53
53
 
54
54
  if reasoning_content:
55
- model_response.content = (
56
- f"{think_tag[0]}{reasoning_content}{think_tag[1]}{model_response.content}"
55
+ return model_response.model_copy(
56
+ update={
57
+ "content": f"{think_tag[0]}{reasoning_content}{think_tag[1]}{model_response.content}"
58
+ }
57
59
  )
58
60
  return model_response
59
61
 
@@ -99,12 +101,16 @@ def convert_reasoning_content_for_chunk_iterator(
99
101
  reasoning_content = _get_reasoning_content(chunk)
100
102
  if reasoning_content:
101
103
  if isfirst:
102
- chunk.content = f"{think_tag[0]}{reasoning_content}"
104
+ chunk = chunk.model_copy(
105
+ update={"content": f"{think_tag[0]}{reasoning_content}"}
106
+ )
103
107
  isfirst = False
104
108
  else:
105
- chunk.content = reasoning_content
109
+ chunk = chunk.model_copy(update={"content": reasoning_content})
106
110
  elif chunk.content and isend and not isfirst:
107
- chunk.content = f"{think_tag[1]}{chunk.content}"
111
+ chunk = chunk.model_copy(
112
+ update={"content": f"{think_tag[1]}{chunk.content}"}
113
+ )
108
114
  isend = False
109
115
  yield chunk
110
116
 
@@ -149,12 +155,16 @@ async def aconvert_reasoning_content_for_chunk_iterator(
149
155
  reasoning_content = _get_reasoning_content(chunk)
150
156
  if reasoning_content:
151
157
  if isfirst:
152
- chunk.content = f"{think_tag[0]}{reasoning_content}"
158
+ chunk = chunk.model_copy(
159
+ update={"content": f"{think_tag[0]}{reasoning_content}"}
160
+ )
153
161
  isfirst = False
154
162
  else:
155
- chunk.content = reasoning_content
163
+ chunk = chunk.model_copy(update={"content": reasoning_content})
156
164
  elif chunk.content and isend and not isfirst:
157
- chunk.content = f"{think_tag[1]}{chunk.content}"
165
+ chunk = chunk.model_copy(
166
+ update={"content": f"{think_tag[1]}{chunk.content}"}
167
+ )
158
168
  isend = False
159
169
  yield chunk
160
170
 
@@ -62,20 +62,20 @@ def parse_tool_calling(
62
62
  ... tool_calls = parse_tool_calling(response)
63
63
  """
64
64
 
65
- tool_call = None
65
+ tool_calls = None
66
66
 
67
67
  tool_call_blocks = [
68
68
  block for block in message.content_blocks if block["type"] == "tool_call"
69
69
  ]
70
70
  if tool_call_blocks:
71
- tool_call = tool_call_blocks
71
+ tool_calls = tool_call_blocks
72
72
 
73
- if not tool_call:
74
- tool_call = message.tool_calls
73
+ if not tool_calls:
74
+ tool_calls = message.tool_calls
75
75
 
76
- if not tool_call:
76
+ if not tool_calls:
77
77
  raise ValueError("No tool call found in message")
78
78
 
79
79
  if first_tool_call_only:
80
- return (tool_call[0]["name"], tool_call[0]["args"])
81
- return [(tool_call["name"], tool_call["args"]) for tool_call in tool_call]
80
+ return (tool_calls[0]["name"], tool_calls[0]["args"])
81
+ return [(tool_call["name"], tool_call["args"]) for tool_call in tool_calls]
@@ -1 +0,0 @@
1
- __version__ = "1.2.8"