langchain-dev-utils 1.2.0__py3-none-any.whl → 1.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +1 @@
1
- __version__ = "1.2.0"
1
+ __version__ = "1.2.2"
@@ -175,20 +175,23 @@ class ModelRouterMiddleware(AgentMiddleware):
175
175
  for item in self.model_list
176
176
  }
177
177
  select_model_name = request.state.get("router_model_selection", "default-model")
178
+
179
+ override_kwargs = {}
178
180
  if select_model_name != "default-model":
179
181
  if select_model_name in model_dict:
180
182
  model_values = model_dict.get(select_model_name, {})
181
183
  if model_values["kwargs"] is not None:
182
- request.model = load_chat_model(
183
- select_model_name, **model_values["kwargs"]
184
- )
184
+ model = load_chat_model(select_model_name, **model_values["kwargs"])
185
185
  else:
186
- request.model = load_chat_model(select_model_name)
186
+ model = load_chat_model(select_model_name)
187
+ override_kwargs["model"] = model
187
188
  if model_values["tools"] is not None:
188
- request.tools = model_values["tools"]
189
+ override_kwargs["tools"] = model_values["tools"]
189
190
  if model_values["system_prompt"] is not None:
190
- request.system_prompt = model_values["system_prompt"]
191
- return handler(request)
191
+ override_kwargs["system_message"] = SystemMessage(
192
+ content=model_values["system_prompt"]
193
+ )
194
+ return handler(request.override(**override_kwargs))
192
195
 
193
196
  async def awrap_model_call(
194
197
  self,
@@ -204,17 +207,19 @@ class ModelRouterMiddleware(AgentMiddleware):
204
207
  for item in self.model_list
205
208
  }
206
209
  select_model_name = request.state.get("router_model_selection", "default-model")
210
+ override_kwargs = {}
207
211
  if select_model_name != "default-model":
208
212
  if select_model_name in model_dict:
209
213
  model_values = model_dict.get(select_model_name, {})
210
214
  if model_values["kwargs"] is not None:
211
- request.model = load_chat_model(
212
- select_model_name, **model_values["kwargs"]
213
- )
215
+ model = load_chat_model(select_model_name, **model_values["kwargs"])
214
216
  else:
215
- request.model = load_chat_model(select_model_name)
217
+ model = load_chat_model(select_model_name)
218
+ override_kwargs["model"] = model
216
219
  if model_values["tools"] is not None:
217
- request.tools = model_values["tools"]
220
+ override_kwargs["tools"] = model_values["tools"]
218
221
  if model_values["system_prompt"] is not None:
219
- request.system_prompt = model_values["system_prompt"]
220
- return await handler(request)
222
+ override_kwargs["system_message"] = SystemMessage(
223
+ content=model_values["system_prompt"]
224
+ )
225
+ return await handler(request.override(**override_kwargs))
@@ -1,5 +1,5 @@
1
1
  import json
2
- from typing import Awaitable, Callable, Literal, Optional
2
+ from typing import Awaitable, Callable, Literal, Optional, cast
3
3
  from typing import NotRequired
4
4
 
5
5
  from langchain.agents.middleware import ModelRequest, ModelResponse
@@ -9,7 +9,7 @@ from langchain.agents.middleware.types import (
9
9
  ModelCallResult,
10
10
  )
11
11
  from langchain.tools import BaseTool, ToolRuntime, tool
12
- from langchain_core.messages import ToolMessage
12
+ from langchain_core.messages import SystemMessage, ToolMessage
13
13
  from langgraph.types import Command
14
14
  from typing_extensions import TypedDict
15
15
 
@@ -353,21 +353,33 @@ class PlanMiddleware(AgentMiddleware):
353
353
  request: ModelRequest,
354
354
  handler: Callable[[ModelRequest], ModelResponse],
355
355
  ) -> ModelCallResult:
356
- request.system_prompt = (
357
- request.system_prompt + "\n\n" + self.system_prompt
358
- if request.system_prompt
359
- else self.system_prompt
356
+ """Update the system message to include the plan system prompt."""
357
+ if request.system_message is not None:
358
+ new_system_content = [
359
+ *request.system_message.content_blocks,
360
+ {"type": "text", "text": f"\n\n{self.system_prompt}"},
361
+ ]
362
+ else:
363
+ new_system_content = [{"type": "text", "text": self.system_prompt}]
364
+ new_system_message = SystemMessage(
365
+ content=cast("list[str | dict[str, str]]", new_system_content)
360
366
  )
361
- return handler(request)
367
+ return handler(request.override(system_message=new_system_message))
362
368
 
363
369
  async def awrap_model_call(
364
370
  self,
365
371
  request: ModelRequest,
366
372
  handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
367
373
  ) -> ModelCallResult:
368
- request.system_prompt = (
369
- request.system_prompt + "\n\n" + self.system_prompt
370
- if request.system_prompt
371
- else self.system_prompt
374
+ """Update the system message to include the plan system prompt."""
375
+ if request.system_message is not None:
376
+ new_system_content = [
377
+ *request.system_message.content_blocks,
378
+ {"type": "text", "text": f"\n\n{self.system_prompt}"},
379
+ ]
380
+ else:
381
+ new_system_content = [{"type": "text", "text": self.system_prompt}]
382
+ new_system_message = SystemMessage(
383
+ content=cast("list[str | dict[str, str]]", new_system_content)
372
384
  )
373
- return await handler(request)
385
+ return await handler(request.override(system_message=new_system_message))
@@ -1,55 +1,81 @@
1
- from typing import Optional
1
+ from typing import Any
2
2
 
3
3
  from langchain.agents.middleware.summarization import (
4
+ ContextSize,
5
+ DEFAULT_SUMMARY_PROMPT,
4
6
  SummarizationMiddleware as _SummarizationMiddleware,
5
7
  TokenCounter,
8
+ _DEFAULT_MESSAGES_TO_KEEP,
9
+ _DEFAULT_TRIM_TOKEN_LIMIT,
6
10
  )
11
+ from langchain_core.messages.utils import count_tokens_approximately
7
12
 
8
13
  from langchain_dev_utils.chat_models.base import load_chat_model
9
14
 
10
15
 
11
16
  class SummarizationMiddleware(_SummarizationMiddleware):
12
- """Initialize the summarization middleware.
17
+ """Initialize summarization middleware.
13
18
 
14
19
  Args:
15
- model: The language model to use for generating summaries. Only string identifiers are supported.
16
- max_tokens_before_summary: Token threshold to trigger summarization.
17
- If `None`, summarization is disabled.
18
- messages_to_keep: Number of recent messages to preserve after summarization.
20
+ model: The language model to use for generating summaries.
21
+ trigger: One or more thresholds that trigger summarization.
22
+
23
+ Provide a single `ContextSize` tuple or a list of tuples, in which case
24
+ summarization runs when any threshold is breached.
25
+
26
+ Examples: `("messages", 50)`, `("tokens", 3000)`, `[("fraction", 0.8),
27
+ ("messages", 100)]`.
28
+ keep: Context retention policy applied after summarization.
29
+
30
+ Provide a `ContextSize` tuple to specify how much history to preserve.
31
+
32
+ Defaults to keeping the most recent 20 messages.
33
+
34
+ Examples: `("messages", 20)`, `("tokens", 3000)`, or
35
+ `("fraction", 0.3)`.
19
36
  token_counter: Function to count tokens in messages.
20
37
  summary_prompt: Prompt template for generating summaries.
21
- summary_prefix: Prefix added to system message when including summary.
38
+ trim_tokens_to_summarize: Maximum tokens to keep when preparing messages for
39
+ the summarization call.
40
+
41
+ Pass `None` to skip trimming entirely.
22
42
 
23
43
  Examples:
24
44
  ```python
25
45
  from langchain_dev_utils.agents.middleware import SummarizationMiddleware
26
46
 
27
- middleware = SummarizationMiddleware(model="vllm:qwen3-4b", max_tokens_before_summary=100)
47
+ middleware = SummarizationMiddleware(
48
+ model="vllm:qwen3-4b",
49
+ trigger=("tokens", 100),
50
+ keep=("messages", 2),
51
+ )
28
52
  ```
29
53
  """
30
54
 
31
55
  def __init__(
32
56
  self,
33
57
  model: str,
34
- max_tokens_before_summary: Optional[int] = None,
35
- messages_to_keep: Optional[int] = None,
36
- token_counter: Optional[TokenCounter] = None,
37
- summary_prompt: Optional[str] = None,
38
- summary_prefix: Optional[str] = None,
58
+ *,
59
+ trigger: ContextSize | list[ContextSize] | None = None,
60
+ keep: ContextSize = ("messages", _DEFAULT_MESSAGES_TO_KEEP),
61
+ token_counter: TokenCounter = count_tokens_approximately,
62
+ summary_prompt: str = DEFAULT_SUMMARY_PROMPT,
63
+ trim_tokens_to_summarize: int | None = _DEFAULT_TRIM_TOKEN_LIMIT,
64
+ **deprecated_kwargs: Any,
39
65
  ) -> None:
40
66
  chat_model = load_chat_model(model)
41
67
 
42
68
  middleware_kwargs = {}
43
- if max_tokens_before_summary is not None:
44
- middleware_kwargs["max_tokens_before_summary"] = max_tokens_before_summary
45
- if messages_to_keep is not None:
46
- middleware_kwargs["messages_to_keep"] = messages_to_keep
69
+ if trigger is not None:
70
+ middleware_kwargs["trigger"] = trigger
71
+ if keep is not None:
72
+ middleware_kwargs["keep"] = keep
47
73
  if token_counter is not None:
48
74
  middleware_kwargs["token_counter"] = token_counter
49
75
  if summary_prompt is not None:
50
76
  middleware_kwargs["summary_prompt"] = summary_prompt
51
- if summary_prefix is not None:
52
- middleware_kwargs["summary_prefix"] = summary_prefix
77
+ if trim_tokens_to_summarize is not None:
78
+ middleware_kwargs["trim_tokens_to_summarize"] = trim_tokens_to_summarize
53
79
 
54
80
  super().__init__(
55
81
  model=chat_model,
@@ -38,37 +38,33 @@ from pydantic import (
38
38
  )
39
39
  from typing_extensions import Self
40
40
 
41
- from ..types import ToolChoiceType
41
+ from ..types import ProviderConfig, ToolChoiceType
42
42
 
43
43
  _BM = TypeVar("_BM", bound=BaseModel)
44
44
  _DictOrPydanticClass = Union[dict[str, Any], type[_BM], type]
45
45
  _DictOrPydantic = Union[dict, _BM]
46
46
 
47
47
 
48
- class _ModelProviderConfigType(BaseModel):
49
- supported_tool_choice: ToolChoiceType = Field(default_factory=list)
50
- keep_reasoning_content: bool = Field(default=False)
51
- support_json_mode: bool = Field(default=False)
52
-
53
-
54
48
  class _BaseChatOpenAICompatible(BaseChatOpenAI):
55
49
  """
56
50
  Base template class for OpenAI-compatible chat model implementations.
57
51
 
58
- This class provides a foundation for integrating various LLM providers that offer
59
- OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot, and many others).
52
+ This class provides a foundation for integrating various LLM providers that offer OpenAI-compatible APIs (such as vLLM, OpenRouter, ZAI, Moonshot, and many others).
60
53
  It enhances the base OpenAI functionality by:
61
- 1. Supporting `reasoning_content` generation and parsing.
62
- 2. Modifying the default implementation method for structured outputs to ensure broader compatibility.
63
- 3. Improving error messages when API responses are invalid.
64
54
 
65
- Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class extends
66
- capabilities to better support diverse OpenAI-compatible model providers while
67
- maintaining full compatibility with LangChain's chat model interface.
55
+ **1.Supports output of more types of reasoning content (reasoning_content)**
56
+ ChatOpenAI can only output reasoning content natively supported by official OpenAI models, while OpenAICompatibleChatModel can output reasoning content from other model providers (e.g., OpenRouter).
57
+
58
+ **2.Optimizes default behavior for structured output**
59
+ When calling with_structured_output, the default value of the method parameter is adjusted to "function_calling" (instead of the default "json_schema" in ChatOpenAI), providing better compatibility with other models.
60
+
61
+ **3.Supports configuration of related parameters**
62
+ For cases where parameters differ from the official OpenAI API, this library provides the provider_config parameter to address this issue. For example, when different model providers have inconsistent support for tool_choice, you can adapt by setting supported_tool_choice in provider_config.
63
+
64
+ Built on top of `langchain-openai`'s `BaseChatOpenAI`, this template class extends capabilities to better support diverse OpenAI-compatible model providers while maintaining full compatibility with LangChain's chat model interface.
68
65
 
69
66
  Note: This is a template class and should not be exported or instantiated directly.
70
- Instead, use it as a base class and provide the specific provider name through
71
- inheritance or the factory function `_create_openai_compatible_model()`.
67
+ Instead, use it as a base class and provide the specific provider name through inheritance or the factory function `_create_openai_compatible_model()`.
72
68
  """
73
69
 
74
70
  model_name: str = Field(alias="model", default="openai compatible model")
@@ -86,21 +82,21 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
86
82
 
87
83
  _provider: str = PrivateAttr(default="openai-compatible")
88
84
 
89
- provider_config: _ModelProviderConfigType = Field(
90
- default_factory=lambda: _ModelProviderConfigType(),
91
- )
85
+ supported_tool_choice: ToolChoiceType = Field(default_factory=list)
86
+ keep_reasoning_content: bool = Field(default=False)
87
+ support_json_mode: bool = Field(default=False)
92
88
 
93
89
  @property
94
90
  def _supported_tool_choice(self) -> ToolChoiceType:
95
- return self.provider_config.supported_tool_choice
91
+ return self.supported_tool_choice
96
92
 
97
93
  @property
98
94
  def _keep_reasoning_content(self) -> bool:
99
- return self.provider_config.keep_reasoning_content
95
+ return self.keep_reasoning_content
100
96
 
101
97
  @property
102
98
  def _support_json_mode(self) -> bool:
103
- return self.provider_config.support_json_mode
99
+ return self.support_json_mode
104
100
 
105
101
  @property
106
102
  def _llm_type(self) -> str:
@@ -186,6 +182,13 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
186
182
  self.async_client = self.root_async_client.chat.completions
187
183
  return self
188
184
 
185
+ @model_validator(mode="after")
186
+ def _set_model_profile(self) -> Self:
187
+ """Set model profile if not overridden."""
188
+ if self.profile is None:
189
+ self.profile = {}
190
+ return self
191
+
189
192
  def _create_chat_result(
190
193
  self,
191
194
  response: Union[dict, openai.BaseModel],
@@ -453,7 +456,10 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
453
456
 
454
457
 
455
458
  def _create_openai_compatible_model(
456
- provider: str, base_url: str
459
+ provider: str,
460
+ base_url: str,
461
+ provider_config: Optional[ProviderConfig] = None,
462
+ chat_model_cls_name: Optional[str] = None,
457
463
  ) -> Type[_BaseChatOpenAICompatible]:
458
464
  """Factory function for creating provider-specific OpenAI-compatible model classes.
459
465
 
@@ -461,16 +467,20 @@ def _create_openai_compatible_model(
461
467
  configuring environment variable mappings and default base URLs specific to each provider.
462
468
 
463
469
  Args:
464
- provider: Provider identifier (e.g., `vllm`)
470
+ provider: Provider identifier (e.g., `vllm`,`openrouter`)
465
471
  base_url: Default API base URL for the provider
466
- tool_choice: List of tool choices for the model (e.g., ["auto", "none", "any", "required", "specific"])
467
- keep_reasoning_content: Whether to keep reasoning content in the messages
472
+ provider_config: Optional configuration for the provider
473
+ chat_model_cls_name: Optional custom name for the chat model class
468
474
 
469
475
  Returns:
470
476
  Configured model class ready for instantiation with provider-specific settings
471
477
  """
478
+ chat_model_cls_name = chat_model_cls_name or f"Chat{provider.title()}"
479
+
480
+ if provider_config is None:
481
+ provider_config = {}
472
482
  return create_model(
473
- f"Chat{provider.title()}",
483
+ chat_model_cls_name,
474
484
  __base__=_BaseChatOpenAICompatible,
475
485
  api_base=(
476
486
  str,
@@ -492,4 +502,16 @@ def _create_openai_compatible_model(
492
502
  str,
493
503
  PrivateAttr(default=provider),
494
504
  ),
505
+ supported_tool_choice=(
506
+ ToolChoiceType,
507
+ Field(default=provider_config.get("supported_tool_choice", ["auto"])),
508
+ ),
509
+ keep_reasoning_content=(
510
+ bool,
511
+ Field(default=provider_config.get("keep_reasoning_content", False)),
512
+ ),
513
+ support_json_mode=(
514
+ bool,
515
+ Field(default=provider_config.get("support_json_mode", False)),
516
+ ),
495
517
  )
@@ -5,17 +5,11 @@ from langchain_core.language_models.chat_models import BaseChatModel
5
5
  from langchain_core.utils import from_env
6
6
  from pydantic import BaseModel
7
7
 
8
- from .types import ChatModelType, ToolChoiceType
8
+ from .types import ChatModelType, ProviderConfig
9
9
 
10
10
  _MODEL_PROVIDERS_DICT = {}
11
11
 
12
12
 
13
- class ProviderConfig(TypedDict):
14
- supported_tool_choice: NotRequired[ToolChoiceType]
15
- keep_reasoning_content: NotRequired[bool]
16
- support_json_mode: NotRequired[bool]
17
-
18
-
19
13
  class ChatModelProvider(TypedDict):
20
14
  provider_name: str
21
15
  chat_model: ChatModelType
@@ -96,11 +90,6 @@ def _load_chat_model_helper(
96
90
  model, model_provider = _parse_model(model, model_provider)
97
91
  if model_provider in _MODEL_PROVIDERS_DICT.keys():
98
92
  chat_model = _MODEL_PROVIDERS_DICT[model_provider]["chat_model"]
99
- if provider_config := _MODEL_PROVIDERS_DICT[model_provider].get(
100
- "provider_config"
101
- ):
102
- kwargs.update({"provider_config": provider_config})
103
-
104
93
  if base_url := _MODEL_PROVIDERS_DICT[model_provider].get("base_url"):
105
94
  url_key = _get_base_url_field_name(chat_model)
106
95
  if url_key:
@@ -167,6 +156,7 @@ def register_model_provider(
167
156
  chat_model = _create_openai_compatible_model(
168
157
  provider_name,
169
158
  base_url,
159
+ provider_config=provider_config,
170
160
  )
171
161
  _MODEL_PROVIDERS_DICT.update(
172
162
  {
@@ -274,10 +264,6 @@ def load_chat_model(
274
264
  ... )
275
265
  >>> model.invoke("Hello, how are you?")
276
266
  """
277
- if "provider_config" in kwargs:
278
- raise ValueError(
279
- "provider_config is not a valid parameter in load_chat_model ,you can only set it when register model provider"
280
- )
281
267
  return _load_chat_model_helper(
282
268
  cast(str, model),
283
269
  model_provider=model_provider,
@@ -1,4 +1,4 @@
1
- from typing import Literal, Union
1
+ from typing import Literal, NotRequired, TypedDict, Union
2
2
 
3
3
  from langchain_core.language_models.chat_models import BaseChatModel
4
4
 
@@ -7,3 +7,9 @@ ChatModelType = Union[type[BaseChatModel], Literal["openai-compatible"]]
7
7
 
8
8
 
9
9
  ToolChoiceType = list[Literal["auto", "none", "required", "specific"]]
10
+
11
+
12
+ class ProviderConfig(TypedDict):
13
+ supported_tool_choice: NotRequired[ToolChoiceType]
14
+ keep_reasoning_content: NotRequired[bool]
15
+ support_json_mode: NotRequired[bool]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: langchain-dev-utils
3
- Version: 1.2.0
3
+ Version: 1.2.2
4
4
  Summary: A practical utility library for LangChain and LangGraph development
5
5
  Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
6
6
  Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
@@ -8,7 +8,7 @@ Project-URL: documentation, https://tbice123123.github.io/langchain-dev-utils-do
8
8
  Author-email: tiebingice <tiebingice123@outlook.com>
9
9
  License-File: LICENSE
10
10
  Requires-Python: >=3.11
11
- Requires-Dist: langchain>=1.0.0
11
+ Requires-Dist: langchain>=1.1.0
12
12
  Requires-Dist: langgraph>=1.0.0
13
13
  Provides-Extra: standard
14
14
  Requires-Dist: langchain-openai; extra == 'standard'
@@ -29,7 +29,7 @@ Description-Content-Type: text/markdown
29
29
  [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
30
30
  [![Python](https://img.shields.io/badge/python-3.11|3.12|3.13|3.14-%2334D058)](https://www.python.org/downloads)
31
31
  [![Downloads](https://static.pepy.tech/badge/langchain-dev-utils/month)](https://pepy.tech/project/langchain-dev-utils)
32
- [![Documentation](https://img.shields.io/badge/docs-latest-blue)](https://tbice123123.github.io/langchain-dev-utils-docs/zh/)
32
+ [![Documentation](https://img.shields.io/badge/docs-latest-blue)](https://tbice123123.github.io/langchain-dev-utils-docs/en/)
33
33
 
34
34
  > This is the English version. For the Chinese version, please visit [Chinese Documentation](https://github.com/TBice123123/langchain-dev-utils/blob/master/README_cn.md)
35
35
 
@@ -1,4 +1,4 @@
1
- langchain_dev_utils/__init__.py,sha256=MpAT5hgNoHnTtG1XRD_GV_A7QrHVU6vJjGSw_8qMGA4,22
1
+ langchain_dev_utils/__init__.py,sha256=uuf4VNtTNA93fMhoAur9YafzaKJFnczY-H1SSCSuRVQ,22
2
2
  langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
3
  langchain_dev_utils/agents/__init__.py,sha256=e17SMQdJIQngbUCr2N1tY-yw0tD3tEnH7PSvyDmVPeQ,127
4
4
  langchain_dev_utils/agents/factory.py,sha256=h4uAkid2NJMMs4qV6RYFexew-ixfhEvpa254eDeDEcU,3912
@@ -7,16 +7,16 @@ langchain_dev_utils/agents/plan.py,sha256=ydJuJLlNydheQvLPl2uCc3TBVv42YxGzPhKgtl
7
7
  langchain_dev_utils/agents/wrap.py,sha256=4BWksU9DRz8c3ZHQiUi4GHwGhNysDLNs8pmLWV7BeAI,5165
8
8
  langchain_dev_utils/agents/middleware/__init__.py,sha256=cjrb8Rue5uukl9pKPF7CjSrHtcYsUBj3Mdvv2szlp7E,679
9
9
  langchain_dev_utils/agents/middleware/model_fallback.py,sha256=cvTj_sOw3r4B4ErMAVdsrniMImWnUpLMECmQErxdsUU,1688
10
- langchain_dev_utils/agents/middleware/model_router.py,sha256=YkaPpYmIZaGj--YlUjm7dVcNzRt3Au317eor4SDYsQs,8799
11
- langchain_dev_utils/agents/middleware/plan.py,sha256=ew9AiJLgLawnlBpiqBPXxyT9CZ1i4aqtwlbi_Qbly4w,15282
12
- langchain_dev_utils/agents/middleware/summarization.py,sha256=Ws-_cxSQQfa5rn5Spq1gSLpgIleUCno3QmWRvN4-u9E,2213
10
+ langchain_dev_utils/agents/middleware/model_router.py,sha256=bq-sQ7wmvpeRM3cjFPErLQxiWfS6l5nEVBN01NNWjAU,9077
11
+ langchain_dev_utils/agents/middleware/plan.py,sha256=lJClVTD0h1_fyu_aTqpWIMKjPWm52YCeDx_TKBgfLI8,16083
12
+ langchain_dev_utils/agents/middleware/summarization.py,sha256=BtWPJcQBssGAT0nb1c0xsGEOsb8x5sAAE6xqujYjHhY,3027
13
13
  langchain_dev_utils/agents/middleware/tool_emulator.py,sha256=u9rV24yUB-dyc1uUfUe74B1wOGVI3TZRwxkE1bvGm18,2025
14
14
  langchain_dev_utils/agents/middleware/tool_selection.py,sha256=ZqdyK4Yhp2u3GM6B_D6U7Srca9vy1o7s6N_LrV24-dQ,3107
15
15
  langchain_dev_utils/chat_models/__init__.py,sha256=YSLUyHrWEEj4y4DtGFCOnDW02VIYZdfAH800m4Klgeg,224
16
- langchain_dev_utils/chat_models/base.py,sha256=l2833l9RuTnS3OecoRL2RWeYc17kLWoC6nuWWUpo5FU,11232
17
- langchain_dev_utils/chat_models/types.py,sha256=oPXFsfho9amnwek5v3ey8LcnsfKVzecWSJcKVBG4ETc,261
16
+ langchain_dev_utils/chat_models/base.py,sha256=d4kGadmX-AfMSHqlELflEbkdqCUlOfiOqzCv9_fTYU8,10711
17
+ langchain_dev_utils/chat_models/types.py,sha256=FM_RyiGRTb1dy59MovhDYM4Kj9cpybt2BFha0e2u0qA,468
18
18
  langchain_dev_utils/chat_models/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=6ZTRCFqgW8fk8nbZs0OarmuHP5M6wr-0mbFogZuLTWY,18409
19
+ langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=wR7Uym9rQWsxhsBt8LrE30dayWcMnbsU8AXC4kOtnyI,19719
20
20
  langchain_dev_utils/embeddings/__init__.py,sha256=zbEOaV86TUi9Zrg_dH9dpdgacWg31HMJTlTQknA9EKk,244
21
21
  langchain_dev_utils/embeddings/base.py,sha256=25ebUEaf7075h8NARgTHjAvwK6JddhHor5upiucJqu0,9686
22
22
  langchain_dev_utils/message_convert/__init__.py,sha256=xwjaQ1oJoc80xy70oQI4uW3gAmgV5JymJd5hgnA6s3g,458
@@ -29,7 +29,7 @@ langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV
29
29
  langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
30
30
  langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=nbaON9806pv5tpMRQUA_Ch3HJA5HBFgzZR7kQRf6PiY,9819
31
31
  langchain_dev_utils/tool_calling/utils.py,sha256=3cNv_Zx32KxdsGn8IkxjWUzxYEEwVJeJgTZTbfSg0pA,2751
32
- langchain_dev_utils-1.2.0.dist-info/METADATA,sha256=W2k20B9-sXvoum-6a1sdpOZlFMvnJf0pMqugYejo9JE,16305
33
- langchain_dev_utils-1.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
34
- langchain_dev_utils-1.2.0.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
35
- langchain_dev_utils-1.2.0.dist-info/RECORD,,
32
+ langchain_dev_utils-1.2.2.dist-info/METADATA,sha256=Gia4zy_wdyWGd_0HXYTsglTut7i-3or0z2Zzikbgq2Q,16305
33
+ langchain_dev_utils-1.2.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
34
+ langchain_dev_utils-1.2.2.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
35
+ langchain_dev_utils-1.2.2.dist-info/RECORD,,