langchain-dev-utils 1.3.3__py3-none-any.whl → 1.3.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_dev_utils/__init__.py +1 -1
- langchain_dev_utils/_utils.py +84 -1
- langchain_dev_utils/agents/middleware/format_prompt.py +1 -1
- langchain_dev_utils/agents/wrap.py +1 -1
- langchain_dev_utils/chat_models/adapters/__init__.py +3 -0
- langchain_dev_utils/chat_models/adapters/create_utils.py +53 -0
- langchain_dev_utils/chat_models/adapters/openai_compatible.py +81 -4
- langchain_dev_utils/chat_models/adapters/register_profiles.py +15 -0
- langchain_dev_utils/chat_models/base.py +6 -11
- langchain_dev_utils/embeddings/adapters/__init__.py +3 -0
- langchain_dev_utils/embeddings/adapters/create_utils.py +45 -0
- langchain_dev_utils/embeddings/adapters/openai_compatible.py +91 -0
- langchain_dev_utils/embeddings/base.py +13 -26
- langchain_dev_utils/message_convert/__init__.py +15 -15
- langchain_dev_utils/message_convert/format.py +69 -69
- {langchain_dev_utils-1.3.3.dist-info → langchain_dev_utils-1.3.5.dist-info}/METADATA +1 -1
- {langchain_dev_utils-1.3.3.dist-info → langchain_dev_utils-1.3.5.dist-info}/RECORD +19 -14
- {langchain_dev_utils-1.3.3.dist-info → langchain_dev_utils-1.3.5.dist-info}/WHEEL +0 -0
- {langchain_dev_utils-1.3.3.dist-info → langchain_dev_utils-1.3.5.dist-info}/licenses/LICENSE +0 -0
langchain_dev_utils/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "1.3.
|
|
1
|
+
__version__ = "1.3.5"
|
langchain_dev_utils/_utils.py
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
from importlib import util
|
|
2
|
-
from typing import Literal
|
|
2
|
+
from typing import Literal, Optional
|
|
3
3
|
|
|
4
4
|
from pydantic import BaseModel
|
|
5
5
|
|
|
@@ -41,3 +41,86 @@ def _get_base_url_field_name(model_cls: type[BaseModel]) -> str | None:
|
|
|
41
41
|
return "api_base"
|
|
42
42
|
|
|
43
43
|
return None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _validate_base_url(base_url: Optional[str] = None) -> None:
|
|
47
|
+
"""Validate base URL format.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
base_url: Base URL to validate
|
|
51
|
+
|
|
52
|
+
Raises:
|
|
53
|
+
ValueError: If base URL is not a valid HTTP or HTTPS URL
|
|
54
|
+
"""
|
|
55
|
+
if base_url is None:
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
from urllib.parse import urlparse
|
|
59
|
+
|
|
60
|
+
parsed = urlparse(base_url.strip())
|
|
61
|
+
|
|
62
|
+
if not parsed.scheme or not parsed.netloc:
|
|
63
|
+
raise ValueError(
|
|
64
|
+
f"base_url must be a valid HTTP or HTTPS URL. Received: {base_url}"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
if parsed.scheme not in ("http", "https"):
|
|
68
|
+
raise ValueError(
|
|
69
|
+
f"base_url must use HTTP or HTTPS protocol. Received: {parsed.scheme}"
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _validate_model_cls_name(model_cls_name: str) -> None:
|
|
74
|
+
"""Validate model class name follows Python naming conventions.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
model_cls_name: Class name to validate
|
|
78
|
+
|
|
79
|
+
Raises:
|
|
80
|
+
ValueError: If class name is invalid
|
|
81
|
+
"""
|
|
82
|
+
if not model_cls_name:
|
|
83
|
+
raise ValueError("model_cls_name cannot be empty")
|
|
84
|
+
|
|
85
|
+
if not model_cls_name[0].isalpha():
|
|
86
|
+
raise ValueError(
|
|
87
|
+
f"model_cls_name must start with a letter. Received: {model_cls_name}"
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
if not all(c.isalnum() or c == "_" for c in model_cls_name):
|
|
91
|
+
raise ValueError(
|
|
92
|
+
f"model_cls_name can only contain letters, numbers, and underscores. Received: {model_cls_name}"
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
if model_cls_name[0].islower():
|
|
96
|
+
raise ValueError(
|
|
97
|
+
f"model_cls_name should start with an uppercase letter (PEP 8). Received: {model_cls_name}"
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _validate_provider_name(provider_name: str) -> None:
|
|
102
|
+
"""Validate provider name follows Python naming conventions.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
provider_name: Provider name to validate
|
|
106
|
+
|
|
107
|
+
Raises:
|
|
108
|
+
ValueError: If provider name is invalid
|
|
109
|
+
"""
|
|
110
|
+
if not provider_name:
|
|
111
|
+
raise ValueError("provider_name cannot be empty")
|
|
112
|
+
|
|
113
|
+
if not provider_name[0].isalnum():
|
|
114
|
+
raise ValueError(
|
|
115
|
+
f"provider_name must start with a letter. Received: {provider_name}"
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
if not all(c.isalnum() or c == "_" for c in provider_name):
|
|
119
|
+
raise ValueError(
|
|
120
|
+
f"provider_name can only contain letters, numbers, underscores. Received: {provider_name}"
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
if len(provider_name) > 20:
|
|
124
|
+
raise ValueError(
|
|
125
|
+
f"provider_name must be 20 characters or fewer. Received: {provider_name}"
|
|
126
|
+
)
|
|
@@ -11,7 +11,7 @@ def format_prompt(request: ModelRequest) -> str:
|
|
|
11
11
|
Variables are first resolved from the state, then from the context if not found.
|
|
12
12
|
|
|
13
13
|
Example:
|
|
14
|
-
>>> from langchain_dev_utils.agents.middleware
|
|
14
|
+
>>> from langchain_dev_utils.agents.middleware import format_prompt
|
|
15
15
|
>>> from langchain.agents import create_agent
|
|
16
16
|
>>> from langchain_core.messages import HumanMessage
|
|
17
17
|
>>> from dataclasses import dataclass
|
|
@@ -171,7 +171,7 @@ def wrap_all_agents_as_tool(
|
|
|
171
171
|
Example:
|
|
172
172
|
>>> from langchain_dev_utils.agents import wrap_all_agents_as_tool, create_agent
|
|
173
173
|
>>>
|
|
174
|
-
>>>
|
|
174
|
+
>>> call_agent_tool = wrap_all_agents_as_tool(
|
|
175
175
|
... [time_agent,weather_agent],
|
|
176
176
|
... tool_name="call_sub_agents",
|
|
177
177
|
... tool_description="Used to invoke the sub-agents to perform tasks"
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from typing import Any, Optional, cast
|
|
2
|
+
|
|
3
|
+
from langchain_core.utils import from_env
|
|
4
|
+
|
|
5
|
+
from langchain_dev_utils._utils import _check_pkg_install
|
|
6
|
+
|
|
7
|
+
from ..types import CompatibilityOptions
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def create_openai_compatible_model(
|
|
11
|
+
model_provider: str,
|
|
12
|
+
base_url: Optional[str] = None,
|
|
13
|
+
compatibility_options: Optional[CompatibilityOptions] = None,
|
|
14
|
+
model_profiles: Optional[dict[str, dict[str, Any]]] = None,
|
|
15
|
+
chat_model_cls_name: Optional[str] = None,
|
|
16
|
+
):
|
|
17
|
+
"""Factory function for creating provider-specific OpenAI-compatible model classes.
|
|
18
|
+
|
|
19
|
+
Dynamically generates model classes for different OpenAI-compatible providers,
|
|
20
|
+
configuring environment variable mappings and default base URLs specific to each provider.
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
model_provider (str): Identifier for the OpenAI-compatible provider (e.g. `vllm`, `moonshot`)
|
|
24
|
+
base_url (Optional[str], optional): Default API base URL for the provider. Defaults to None. If not provided, will try to use the environment variable.
|
|
25
|
+
compatibility_options (Optional[CompatibilityOptions], optional): Optional configuration for compatibility options with the provider. Defaults to None.
|
|
26
|
+
model_profiles (Optional[dict[str, dict[str, Any]]], optional): Optional model profiles for the provider. Defaults to None.
|
|
27
|
+
chat_model_cls_name (Optional[str], optional): Optional custom class name for the generated model. Defaults to None.
|
|
28
|
+
Returns:
|
|
29
|
+
Type[_BaseChatOpenAICompatible]: Configured model class ready for instantiation with provider-specific settings
|
|
30
|
+
|
|
31
|
+
Examples:
|
|
32
|
+
>>> from langchain_dev_utils.chat_models.adapters import create_openai_compatible_chat_model
|
|
33
|
+
>>> ChatVLLM = create_openai_compatible_chat_model(
|
|
34
|
+
... "vllm",
|
|
35
|
+
... base_url="http://localhost:8000",
|
|
36
|
+
... chat_model_cls_name="ChatVLLM",
|
|
37
|
+
... )
|
|
38
|
+
>>> model = ChatVLLM(model="qwen3-4b")
|
|
39
|
+
>>> model.invoke("hello")
|
|
40
|
+
"""
|
|
41
|
+
_check_pkg_install("langchain_openai")
|
|
42
|
+
from .openai_compatible import _create_openai_compatible_model
|
|
43
|
+
|
|
44
|
+
base_url = (
|
|
45
|
+
base_url or from_env(f"{model_provider.upper()}_API_BASE", default=None)()
|
|
46
|
+
)
|
|
47
|
+
return _create_openai_compatible_model(
|
|
48
|
+
chat_model_cls_name=chat_model_cls_name,
|
|
49
|
+
provider=model_provider,
|
|
50
|
+
base_url=cast(str, base_url),
|
|
51
|
+
compatibility_options=compatibility_options,
|
|
52
|
+
profiles=model_profiles,
|
|
53
|
+
)
|
|
@@ -12,6 +12,7 @@ from typing import (
|
|
|
12
12
|
Type,
|
|
13
13
|
TypeVar,
|
|
14
14
|
Union,
|
|
15
|
+
cast,
|
|
15
16
|
)
|
|
16
17
|
|
|
17
18
|
import openai
|
|
@@ -19,7 +20,11 @@ from langchain_core.callbacks import (
|
|
|
19
20
|
AsyncCallbackManagerForLLMRun,
|
|
20
21
|
CallbackManagerForLLMRun,
|
|
21
22
|
)
|
|
22
|
-
from langchain_core.language_models import
|
|
23
|
+
from langchain_core.language_models import (
|
|
24
|
+
LangSmithParams,
|
|
25
|
+
LanguageModelInput,
|
|
26
|
+
ModelProfile,
|
|
27
|
+
)
|
|
23
28
|
from langchain_core.messages import (
|
|
24
29
|
AIMessage,
|
|
25
30
|
AIMessageChunk,
|
|
@@ -45,12 +50,21 @@ from pydantic import (
|
|
|
45
50
|
)
|
|
46
51
|
from typing_extensions import Self
|
|
47
52
|
|
|
53
|
+
from ..._utils import (
|
|
54
|
+
_validate_base_url,
|
|
55
|
+
_validate_model_cls_name,
|
|
56
|
+
_validate_provider_name,
|
|
57
|
+
)
|
|
48
58
|
from ..types import (
|
|
49
59
|
CompatibilityOptions,
|
|
50
60
|
ReasoningKeepPolicy,
|
|
51
61
|
ResponseFormatType,
|
|
52
62
|
ToolChoiceType,
|
|
53
63
|
)
|
|
64
|
+
from .register_profiles import (
|
|
65
|
+
_get_profile_by_provider_and_model,
|
|
66
|
+
_register_profile_with_provider,
|
|
67
|
+
)
|
|
54
68
|
|
|
55
69
|
_BM = TypeVar("_BM", bound=BaseModel)
|
|
56
70
|
_DictOrPydanticClass = Union[dict[str, Any], type[_BM], type]
|
|
@@ -152,7 +166,7 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
|
|
|
152
166
|
Note: This is a template class and should not be exported or instantiated
|
|
153
167
|
directly. Instead, use it as a base class and provide the specific provider
|
|
154
168
|
name through inheritance or the factory function
|
|
155
|
-
`
|
|
169
|
+
`create_openai_compatible_model()`.
|
|
156
170
|
"""
|
|
157
171
|
|
|
158
172
|
model_name: str = Field(alias="model", default="openai compatible model")
|
|
@@ -283,7 +297,10 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
|
|
|
283
297
|
def _set_model_profile(self) -> Self:
|
|
284
298
|
"""Set model profile if not overridden."""
|
|
285
299
|
if self.profile is None:
|
|
286
|
-
self.profile =
|
|
300
|
+
self.profile = cast(
|
|
301
|
+
ModelProfile,
|
|
302
|
+
_get_profile_by_provider_and_model(self._provider, self.model_name),
|
|
303
|
+
)
|
|
287
304
|
return self
|
|
288
305
|
|
|
289
306
|
def _create_chat_result(
|
|
@@ -574,10 +591,57 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
|
|
|
574
591
|
)
|
|
575
592
|
|
|
576
593
|
|
|
594
|
+
def _validate_compatibility_options(
|
|
595
|
+
compatibility_options: Optional[CompatibilityOptions] = None,
|
|
596
|
+
) -> None:
|
|
597
|
+
"""Validate provider configuration against supported features.
|
|
598
|
+
|
|
599
|
+
Args:
|
|
600
|
+
compatibility_options: Optional configuration for the provider
|
|
601
|
+
|
|
602
|
+
Raises:
|
|
603
|
+
ValueError: If provider configuration is invalid
|
|
604
|
+
"""
|
|
605
|
+
if compatibility_options is None:
|
|
606
|
+
compatibility_options = {}
|
|
607
|
+
|
|
608
|
+
if "supported_tool_choice" in compatibility_options:
|
|
609
|
+
_supported_tool_choice = compatibility_options["supported_tool_choice"]
|
|
610
|
+
for tool_choice in _supported_tool_choice:
|
|
611
|
+
if tool_choice not in ["auto", "none", "required", "specific"]:
|
|
612
|
+
raise ValueError(
|
|
613
|
+
f"Unsupported tool_choice: {tool_choice}. Please choose from 'auto', 'none', 'required','specific'."
|
|
614
|
+
)
|
|
615
|
+
|
|
616
|
+
if "supported_response_format" in compatibility_options:
|
|
617
|
+
_supported_response_format = compatibility_options["supported_response_format"]
|
|
618
|
+
for response_format in _supported_response_format:
|
|
619
|
+
if response_format not in ["json_schema", "json_mode"]:
|
|
620
|
+
raise ValueError(
|
|
621
|
+
f"Unsupported response_format: {response_format}. Please choose from 'json_schema', 'json_mode'."
|
|
622
|
+
)
|
|
623
|
+
|
|
624
|
+
if "reasoning_keep_policy" in compatibility_options:
|
|
625
|
+
_reasoning_keep_policy = compatibility_options["reasoning_keep_policy"]
|
|
626
|
+
if _reasoning_keep_policy not in ["never", "current", "all"]:
|
|
627
|
+
raise ValueError(
|
|
628
|
+
f"Unsupported reasoning_keep_policy: {_reasoning_keep_policy}. Please choose from 'never', 'current', 'all'."
|
|
629
|
+
)
|
|
630
|
+
|
|
631
|
+
if "include_usage" in compatibility_options:
|
|
632
|
+
_include_usage = compatibility_options["include_usage"]
|
|
633
|
+
if not isinstance(_include_usage, bool):
|
|
634
|
+
raise ValueError(
|
|
635
|
+
f"include_usage must be a boolean value. Received: {_include_usage}"
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
|
|
577
639
|
def _create_openai_compatible_model(
|
|
578
640
|
provider: str,
|
|
579
641
|
base_url: str,
|
|
580
642
|
compatibility_options: Optional[CompatibilityOptions] = None,
|
|
643
|
+
profiles: Optional[dict[str, dict[str, Any]]] = None,
|
|
644
|
+
chat_model_cls_name: Optional[str] = None,
|
|
581
645
|
) -> Type[_BaseChatOpenAICompatible]:
|
|
582
646
|
"""Factory function for creating provider-specific OpenAI-compatible model classes.
|
|
583
647
|
|
|
@@ -588,14 +652,27 @@ def _create_openai_compatible_model(
|
|
|
588
652
|
provider: Provider identifier (e.g.`vllm`)
|
|
589
653
|
base_url: Default API base URL for the provider
|
|
590
654
|
compatibility_options: Optional configuration for the provider
|
|
655
|
+
profiles: Optional profiles for the provider
|
|
656
|
+
chat_model_cls_name: Optional name for the model class
|
|
591
657
|
|
|
592
658
|
Returns:
|
|
593
659
|
Configured model class ready for instantiation with provider-specific settings
|
|
594
660
|
"""
|
|
595
|
-
chat_model_cls_name = f"Chat{provider.title()}"
|
|
661
|
+
chat_model_cls_name = chat_model_cls_name or f"Chat{provider.title()}"
|
|
596
662
|
if compatibility_options is None:
|
|
597
663
|
compatibility_options = {}
|
|
598
664
|
|
|
665
|
+
if profiles is not None:
|
|
666
|
+
_register_profile_with_provider(provider, profiles)
|
|
667
|
+
|
|
668
|
+
_validate_compatibility_options(compatibility_options)
|
|
669
|
+
|
|
670
|
+
_validate_provider_name(provider)
|
|
671
|
+
|
|
672
|
+
_validate_model_cls_name(chat_model_cls_name)
|
|
673
|
+
|
|
674
|
+
_validate_base_url(base_url)
|
|
675
|
+
|
|
599
676
|
return create_model(
|
|
600
677
|
chat_model_cls_name,
|
|
601
678
|
__base__=_BaseChatOpenAICompatible,
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
_PROFILES = {}
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _register_profile_with_provider(
|
|
7
|
+
provider_name: str, profile: dict[str, Any]
|
|
8
|
+
) -> None:
|
|
9
|
+
_PROFILES.update({provider_name: profile})
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
def _get_profile_by_provider_and_model(
|
|
13
|
+
provider_name: str, model_name: str
|
|
14
|
+
) -> dict[str, Any]:
|
|
15
|
+
return _PROFILES.get(provider_name, {}).get(model_name, {})
|
|
@@ -7,6 +7,7 @@ from langchain_core.utils import from_env
|
|
|
7
7
|
from langchain_dev_utils._utils import (
|
|
8
8
|
_check_pkg_install,
|
|
9
9
|
_get_base_url_field_name,
|
|
10
|
+
_validate_provider_name,
|
|
10
11
|
)
|
|
11
12
|
|
|
12
13
|
from .types import ChatModelProvider, ChatModelType, CompatibilityOptions
|
|
@@ -126,6 +127,7 @@ def register_model_provider(
|
|
|
126
127
|
>>> model = load_chat_model(model="vllm:qwen3-4b")
|
|
127
128
|
>>> model.invoke("Hello")
|
|
128
129
|
"""
|
|
130
|
+
_validate_provider_name(provider_name)
|
|
129
131
|
base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
|
|
130
132
|
if isinstance(chat_model, str):
|
|
131
133
|
_check_pkg_install("langchain_openai")
|
|
@@ -141,19 +143,12 @@ def register_model_provider(
|
|
|
141
143
|
"when chat_model is a string, the value must be 'openai-compatible'"
|
|
142
144
|
)
|
|
143
145
|
chat_model = _create_openai_compatible_model(
|
|
144
|
-
provider_name,
|
|
145
|
-
base_url,
|
|
146
|
+
provider=provider_name,
|
|
147
|
+
base_url=base_url,
|
|
146
148
|
compatibility_options=compatibility_options,
|
|
149
|
+
profiles=model_profiles,
|
|
147
150
|
)
|
|
148
|
-
_MODEL_PROVIDERS_DICT.update(
|
|
149
|
-
{
|
|
150
|
-
provider_name: {
|
|
151
|
-
"chat_model": chat_model,
|
|
152
|
-
"base_url": base_url,
|
|
153
|
-
"model_profiles": model_profiles,
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
)
|
|
151
|
+
_MODEL_PROVIDERS_DICT.update({provider_name: {"chat_model": chat_model}})
|
|
157
152
|
else:
|
|
158
153
|
if base_url is not None:
|
|
159
154
|
_MODEL_PROVIDERS_DICT.update(
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from typing import Optional, cast
|
|
2
|
+
|
|
3
|
+
from langchain_core.utils import from_env
|
|
4
|
+
|
|
5
|
+
from langchain_dev_utils._utils import _check_pkg_install
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def create_openai_compatible_embedding(
|
|
9
|
+
embedding_provider: str,
|
|
10
|
+
base_url: Optional[str] = None,
|
|
11
|
+
embedding_model_cls_name: Optional[str] = None,
|
|
12
|
+
):
|
|
13
|
+
"""Factory function for creating provider-specific OpenAI-compatible embedding classes.
|
|
14
|
+
|
|
15
|
+
Dynamically generates embedding classes for different OpenAI-compatible providers,
|
|
16
|
+
configuring environment variable mappings and default base URLs specific to each provider.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
embedding_provider (str): Identifier for the OpenAI-compatible provider (e.g. `vllm`, `moonshot`)
|
|
20
|
+
base_url (Optional[str], optional): Default API base URL for the provider. Defaults to None. If not provided, will try to use the environment variable.
|
|
21
|
+
embedding_model_cls_name (Optional[str], optional): Optional custom class name for the generated embedding. Defaults to None.
|
|
22
|
+
Returns:
|
|
23
|
+
Type[_BaseEmbeddingOpenAICompatible]: Configured embedding class ready for instantiation with provider-specific settings
|
|
24
|
+
|
|
25
|
+
Examples:
|
|
26
|
+
>>> from langchain_dev_utils.embeddings.adapters import create_openai_compatible_embedding
|
|
27
|
+
>>> VLLMEmbedding = create_openai_compatible_embedding(
|
|
28
|
+
... "vllm",
|
|
29
|
+
... base_url="http://localhost:8000",
|
|
30
|
+
... embedding_model_cls_name="VLLMEmbedding",
|
|
31
|
+
... )
|
|
32
|
+
>>> model = VLLMEmbedding(model="qwen3-embedding-8b")
|
|
33
|
+
>>> model.embed_query("hello")
|
|
34
|
+
"""
|
|
35
|
+
_check_pkg_install("langchain_openai")
|
|
36
|
+
from .openai_compatible import _create_openai_compatible_embedding
|
|
37
|
+
|
|
38
|
+
base_url = (
|
|
39
|
+
base_url or from_env(f"{embedding_provider.upper()}_API_BASE", default=None)()
|
|
40
|
+
)
|
|
41
|
+
return _create_openai_compatible_embedding(
|
|
42
|
+
provider=embedding_provider,
|
|
43
|
+
base_url=cast(str, base_url),
|
|
44
|
+
embeddings_cls_name=embedding_model_cls_name,
|
|
45
|
+
)
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
from typing import Optional, Type
|
|
2
|
+
|
|
3
|
+
from langchain_core.utils import from_env, secret_from_env
|
|
4
|
+
from langchain_openai.embeddings import OpenAIEmbeddings
|
|
5
|
+
from pydantic import Field, SecretStr, create_model
|
|
6
|
+
|
|
7
|
+
from ..._utils import (
|
|
8
|
+
_validate_base_url,
|
|
9
|
+
_validate_model_cls_name,
|
|
10
|
+
_validate_provider_name,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class _BaseEmbeddingOpenAICompatible(OpenAIEmbeddings):
|
|
15
|
+
"""Base class for OpenAI-Compatible embeddings.
|
|
16
|
+
|
|
17
|
+
This class extends the OpenAIEmbeddings class to support
|
|
18
|
+
custom API keys and base URLs for OpenAI-Compatible models.
|
|
19
|
+
|
|
20
|
+
Note: This is a template class and should not be exported or instantiated
|
|
21
|
+
directly. Instead, use it as a base class and provide the specific provider
|
|
22
|
+
name through inheritance or the factory function
|
|
23
|
+
`create_openai_compatible_embedding()`.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
openai_api_key: Optional[SecretStr] = Field(
|
|
27
|
+
default_factory=secret_from_env("OPENAI_COMPATIBLE_API_KEY", default=None),
|
|
28
|
+
alias="api_key",
|
|
29
|
+
)
|
|
30
|
+
"""OpenAI Compatible API key"""
|
|
31
|
+
openai_api_base: str = Field(
|
|
32
|
+
default_factory=from_env("OPENAI_COMPATIBLE_API_BASE", default=""),
|
|
33
|
+
alias="base_url",
|
|
34
|
+
)
|
|
35
|
+
"""OpenAI Compatible API base URL"""
|
|
36
|
+
|
|
37
|
+
check_embedding_ctx_length: bool = False
|
|
38
|
+
"""Whether to check the token length of inputs and automatically split inputs
|
|
39
|
+
longer than embedding_ctx_length. Defaults to False. """
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _create_openai_compatible_embedding(
|
|
43
|
+
provider: str,
|
|
44
|
+
base_url: str,
|
|
45
|
+
embeddings_cls_name: Optional[str] = None,
|
|
46
|
+
) -> Type[_BaseEmbeddingOpenAICompatible]:
|
|
47
|
+
"""Factory function for creating provider-specific OpenAI-compatible embeddings classes.
|
|
48
|
+
|
|
49
|
+
Dynamically generates embeddings classes for different OpenAI-compatible providers,
|
|
50
|
+
configuring environment variable mappings and default base URLs specific to each provider.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
provider: Provider identifier (e.g.`vllm`)
|
|
54
|
+
base_url: Default API base URL for the provider
|
|
55
|
+
embeddings_cls_name: Optional custom class name for the generated embeddings. Defaults to None.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
Configured embeddings class ready for instantiation with provider-specific settings
|
|
59
|
+
"""
|
|
60
|
+
embeddings_cls_name = embeddings_cls_name or f"{provider.title()}Embeddings"
|
|
61
|
+
|
|
62
|
+
if len(provider) >= 20:
|
|
63
|
+
raise ValueError(
|
|
64
|
+
f"provider must be less than 50 characters. Received: {provider}"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
_validate_model_cls_name(embeddings_cls_name)
|
|
68
|
+
_validate_provider_name(provider)
|
|
69
|
+
|
|
70
|
+
_validate_base_url(base_url)
|
|
71
|
+
|
|
72
|
+
return create_model(
|
|
73
|
+
embeddings_cls_name,
|
|
74
|
+
__base__=_BaseEmbeddingOpenAICompatible,
|
|
75
|
+
openai_api_base=(
|
|
76
|
+
str,
|
|
77
|
+
Field(
|
|
78
|
+
default_factory=from_env(
|
|
79
|
+
f"{provider.upper()}_API_BASE", default=base_url
|
|
80
|
+
),
|
|
81
|
+
),
|
|
82
|
+
),
|
|
83
|
+
openai_api_key=(
|
|
84
|
+
str,
|
|
85
|
+
Field(
|
|
86
|
+
default_factory=secret_from_env(
|
|
87
|
+
f"{provider.upper()}_API_KEY", default=None
|
|
88
|
+
),
|
|
89
|
+
),
|
|
90
|
+
),
|
|
91
|
+
)
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
from typing import Any, Literal, NotRequired, Optional, TypedDict, Union
|
|
2
2
|
|
|
3
3
|
from langchain.embeddings.base import _SUPPORTED_PROVIDERS, Embeddings, init_embeddings
|
|
4
|
-
from langchain_core.utils import from_env
|
|
4
|
+
from langchain_core.utils import from_env
|
|
5
5
|
|
|
6
6
|
from langchain_dev_utils._utils import (
|
|
7
7
|
_check_pkg_install,
|
|
8
8
|
_get_base_url_field_name,
|
|
9
|
+
_validate_provider_name,
|
|
9
10
|
)
|
|
10
11
|
|
|
11
12
|
_EMBEDDINGS_PROVIDERS_DICT = {}
|
|
@@ -87,7 +88,7 @@ def register_embeddings_provider(
|
|
|
87
88
|
>>> embeddings = load_embeddings("vllm:qwen3-embedding-4b")
|
|
88
89
|
>>> embeddings.embed_query("hello world")
|
|
89
90
|
"""
|
|
90
|
-
|
|
91
|
+
_validate_provider_name(provider_name)
|
|
91
92
|
base_url = base_url or from_env(f"{provider_name.upper()}_API_BASE", default=None)()
|
|
92
93
|
if isinstance(embeddings_model, str):
|
|
93
94
|
if base_url is None:
|
|
@@ -101,12 +102,16 @@ def register_embeddings_provider(
|
|
|
101
102
|
)
|
|
102
103
|
|
|
103
104
|
_check_pkg_install("langchain_openai")
|
|
105
|
+
from .adapters.openai_compatible import _create_openai_compatible_embedding
|
|
104
106
|
|
|
107
|
+
embeddings_model = _create_openai_compatible_embedding(
|
|
108
|
+
provider=provider_name,
|
|
109
|
+
base_url=base_url,
|
|
110
|
+
)
|
|
105
111
|
_EMBEDDINGS_PROVIDERS_DICT.update(
|
|
106
112
|
{
|
|
107
113
|
provider_name: {
|
|
108
114
|
"embeddings_model": embeddings_model,
|
|
109
|
-
"base_url": base_url,
|
|
110
115
|
}
|
|
111
116
|
}
|
|
112
117
|
)
|
|
@@ -220,28 +225,10 @@ def load_embeddings(
|
|
|
220
225
|
|
|
221
226
|
if provider in _EMBEDDINGS_PROVIDERS_DICT:
|
|
222
227
|
embeddings = _EMBEDDINGS_PROVIDERS_DICT[provider]["embeddings_model"]
|
|
223
|
-
if
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
f"API key for {provider} not found. Please set it in the environment."
|
|
229
|
-
)
|
|
230
|
-
kwargs["api_key"] = api_key
|
|
231
|
-
if embeddings == "openai-compatible":
|
|
232
|
-
kwargs["check_embedding_ctx_length"] = False
|
|
233
|
-
embeddings = "openai"
|
|
234
|
-
return init_embeddings(
|
|
235
|
-
model=model,
|
|
236
|
-
provider=embeddings,
|
|
237
|
-
base_url=_EMBEDDINGS_PROVIDERS_DICT[provider]["base_url"],
|
|
238
|
-
**kwargs,
|
|
239
|
-
)
|
|
240
|
-
else:
|
|
241
|
-
if base_url := _EMBEDDINGS_PROVIDERS_DICT[provider].get("base_url"):
|
|
242
|
-
url_key = _get_base_url_field_name(embeddings)
|
|
243
|
-
if url_key is not None:
|
|
244
|
-
kwargs.update({url_key: base_url})
|
|
245
|
-
return embeddings(model=model, **kwargs)
|
|
228
|
+
if base_url := _EMBEDDINGS_PROVIDERS_DICT[provider].get("base_url"):
|
|
229
|
+
url_key = _get_base_url_field_name(embeddings)
|
|
230
|
+
if url_key is not None:
|
|
231
|
+
kwargs.update({url_key: base_url})
|
|
232
|
+
return embeddings(model=model, **kwargs)
|
|
246
233
|
else:
|
|
247
234
|
return init_embeddings(model, provider=provider, **kwargs)
|
|
@@ -1,15 +1,15 @@
|
|
|
1
|
-
from .content import (
|
|
2
|
-
aconvert_reasoning_content_for_chunk_iterator,
|
|
3
|
-
convert_reasoning_content_for_ai_message,
|
|
4
|
-
convert_reasoning_content_for_chunk_iterator,
|
|
5
|
-
merge_ai_message_chunk,
|
|
6
|
-
)
|
|
7
|
-
from .format import format_sequence
|
|
8
|
-
|
|
9
|
-
__all__ = [
|
|
10
|
-
"convert_reasoning_content_for_ai_message",
|
|
11
|
-
"convert_reasoning_content_for_chunk_iterator",
|
|
12
|
-
"aconvert_reasoning_content_for_chunk_iterator",
|
|
13
|
-
"merge_ai_message_chunk",
|
|
14
|
-
"format_sequence",
|
|
15
|
-
]
|
|
1
|
+
from .content import (
|
|
2
|
+
aconvert_reasoning_content_for_chunk_iterator,
|
|
3
|
+
convert_reasoning_content_for_ai_message,
|
|
4
|
+
convert_reasoning_content_for_chunk_iterator,
|
|
5
|
+
merge_ai_message_chunk,
|
|
6
|
+
)
|
|
7
|
+
from .format import format_sequence
|
|
8
|
+
|
|
9
|
+
__all__ = [
|
|
10
|
+
"convert_reasoning_content_for_ai_message",
|
|
11
|
+
"convert_reasoning_content_for_chunk_iterator",
|
|
12
|
+
"aconvert_reasoning_content_for_chunk_iterator",
|
|
13
|
+
"merge_ai_message_chunk",
|
|
14
|
+
"format_sequence",
|
|
15
|
+
]
|
|
@@ -1,69 +1,69 @@
|
|
|
1
|
-
from typing import Sequence, Union
|
|
2
|
-
|
|
3
|
-
from langchain_core.documents import Document
|
|
4
|
-
from langchain_core.messages import (
|
|
5
|
-
AIMessage,
|
|
6
|
-
BaseMessage,
|
|
7
|
-
HumanMessage,
|
|
8
|
-
SystemMessage,
|
|
9
|
-
ToolMessage,
|
|
10
|
-
)
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def format_sequence(
|
|
14
|
-
inputs: Union[Sequence[Document], Sequence[BaseMessage], Sequence[str]],
|
|
15
|
-
separator: str = "-",
|
|
16
|
-
with_num: bool = False,
|
|
17
|
-
) -> str:
|
|
18
|
-
"""Convert a list of messages, documents, or strings into a formatted string.
|
|
19
|
-
|
|
20
|
-
This function extracts text content from various types (e.g., HumanMessage, Document)
|
|
21
|
-
and joins them into a single string. Optionally adds serial numbers and a custom
|
|
22
|
-
separator between items.
|
|
23
|
-
|
|
24
|
-
Args:
|
|
25
|
-
inputs: A list of inputs. Supported types:
|
|
26
|
-
- langchain_core.messages: HumanMessage, AIMessage, SystemMessage, ToolMessage
|
|
27
|
-
- langchain_core.documents.Document
|
|
28
|
-
- str
|
|
29
|
-
separator: The separator used to join the items. Defaults to "-".
|
|
30
|
-
with_num: If True, prefixes each item with a serial number (e.g., "1. Hello").
|
|
31
|
-
Defaults to False.
|
|
32
|
-
|
|
33
|
-
Returns:
|
|
34
|
-
A formatted string composed of the input contents, joined by `separator`.
|
|
35
|
-
|
|
36
|
-
Example:
|
|
37
|
-
# Format messages with default separator:
|
|
38
|
-
>>> from langchain_dev_utils.message_convert import format_sequence
|
|
39
|
-
>>> from langchain_core.messages import HumanMessage, AIMessage
|
|
40
|
-
>>> messages = [
|
|
41
|
-
... HumanMessage(content="Hello, how are you?"),
|
|
42
|
-
... AIMessage(content="I'm doing well, thank you!")
|
|
43
|
-
... ]
|
|
44
|
-
>>> formatted = format_sequence(messages)
|
|
45
|
-
>>> formatted
|
|
46
|
-
|
|
47
|
-
# Format with custom separator and numbering:
|
|
48
|
-
>>> formatted = format_sequence(messages, separator="---", with_num=True)
|
|
49
|
-
>>> formatted
|
|
50
|
-
"""
|
|
51
|
-
if not inputs:
|
|
52
|
-
return ""
|
|
53
|
-
|
|
54
|
-
outputs = []
|
|
55
|
-
|
|
56
|
-
for input_item in inputs:
|
|
57
|
-
if isinstance(
|
|
58
|
-
input_item, (HumanMessage, AIMessage, SystemMessage, ToolMessage)
|
|
59
|
-
):
|
|
60
|
-
outputs.append(input_item.content)
|
|
61
|
-
elif isinstance(input_item, Document):
|
|
62
|
-
outputs.append(input_item.page_content)
|
|
63
|
-
elif isinstance(input_item, str):
|
|
64
|
-
outputs.append(input_item)
|
|
65
|
-
if with_num:
|
|
66
|
-
outputs = [f"{i + 1}. {output}" for i, output in enumerate(outputs)]
|
|
67
|
-
|
|
68
|
-
str_ = "\n" + separator
|
|
69
|
-
return separator + str_.join(outputs)
|
|
1
|
+
from typing import Sequence, Union
|
|
2
|
+
|
|
3
|
+
from langchain_core.documents import Document
|
|
4
|
+
from langchain_core.messages import (
|
|
5
|
+
AIMessage,
|
|
6
|
+
BaseMessage,
|
|
7
|
+
HumanMessage,
|
|
8
|
+
SystemMessage,
|
|
9
|
+
ToolMessage,
|
|
10
|
+
)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def format_sequence(
|
|
14
|
+
inputs: Union[Sequence[Document], Sequence[BaseMessage], Sequence[str]],
|
|
15
|
+
separator: str = "-",
|
|
16
|
+
with_num: bool = False,
|
|
17
|
+
) -> str:
|
|
18
|
+
"""Convert a list of messages, documents, or strings into a formatted string.
|
|
19
|
+
|
|
20
|
+
This function extracts text content from various types (e.g., HumanMessage, Document)
|
|
21
|
+
and joins them into a single string. Optionally adds serial numbers and a custom
|
|
22
|
+
separator between items.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
inputs: A list of inputs. Supported types:
|
|
26
|
+
- langchain_core.messages: HumanMessage, AIMessage, SystemMessage, ToolMessage
|
|
27
|
+
- langchain_core.documents.Document
|
|
28
|
+
- str
|
|
29
|
+
separator: The separator used to join the items. Defaults to "-".
|
|
30
|
+
with_num: If True, prefixes each item with a serial number (e.g., "1. Hello").
|
|
31
|
+
Defaults to False.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
A formatted string composed of the input contents, joined by `separator`.
|
|
35
|
+
|
|
36
|
+
Example:
|
|
37
|
+
# Format messages with default separator:
|
|
38
|
+
>>> from langchain_dev_utils.message_convert import format_sequence
|
|
39
|
+
>>> from langchain_core.messages import HumanMessage, AIMessage
|
|
40
|
+
>>> messages = [
|
|
41
|
+
... HumanMessage(content="Hello, how are you?"),
|
|
42
|
+
... AIMessage(content="I'm doing well, thank you!")
|
|
43
|
+
... ]
|
|
44
|
+
>>> formatted = format_sequence(messages)
|
|
45
|
+
>>> formatted
|
|
46
|
+
|
|
47
|
+
# Format with custom separator and numbering:
|
|
48
|
+
>>> formatted = format_sequence(messages, separator="---", with_num=True)
|
|
49
|
+
>>> formatted
|
|
50
|
+
"""
|
|
51
|
+
if not inputs:
|
|
52
|
+
return ""
|
|
53
|
+
|
|
54
|
+
outputs = []
|
|
55
|
+
|
|
56
|
+
for input_item in inputs:
|
|
57
|
+
if isinstance(
|
|
58
|
+
input_item, (HumanMessage, AIMessage, SystemMessage, ToolMessage)
|
|
59
|
+
):
|
|
60
|
+
outputs.append(input_item.content)
|
|
61
|
+
elif isinstance(input_item, Document):
|
|
62
|
+
outputs.append(input_item.page_content)
|
|
63
|
+
elif isinstance(input_item, str):
|
|
64
|
+
outputs.append(input_item)
|
|
65
|
+
if with_num:
|
|
66
|
+
outputs = [f"{i + 1}. {output}" for i, output in enumerate(outputs)]
|
|
67
|
+
|
|
68
|
+
str_ = "\n" + separator
|
|
69
|
+
return separator + str_.join(outputs)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain-dev-utils
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.5
|
|
4
4
|
Summary: A practical utility library for LangChain and LangGraph development
|
|
5
5
|
Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
|
|
6
6
|
Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
|
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
langchain_dev_utils/__init__.py,sha256=
|
|
2
|
-
langchain_dev_utils/_utils.py,sha256=
|
|
1
|
+
langchain_dev_utils/__init__.py,sha256=HxBccdYuMvYSmkuVJi1a6zde7NfWgx8iuIRvVLBi-XA,23
|
|
2
|
+
langchain_dev_utils/_utils.py,sha256=rYsVM6ceU-VzZsJyf7ikfMh3OD84xCt0MLQIB3bwf_A,3858
|
|
3
3
|
langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
langchain_dev_utils/agents/__init__.py,sha256=69_biZzyJvW9OBT1g8TX_77mp9-I_TvWo9QtlvHq83E,177
|
|
5
5
|
langchain_dev_utils/agents/factory.py,sha256=8XB6y_ddf58vXlTLHBL6KCirFqkD2GjtzsuOt98sS7U,3732
|
|
6
6
|
langchain_dev_utils/agents/file_system.py,sha256=Yk3eetREE26WNrnTWLoiDUpOyCJ-rhjlfFDk6foLa1E,8468
|
|
7
7
|
langchain_dev_utils/agents/plan.py,sha256=WwhoiJBmVYVI9bT8HfjCzTJ_SIp9WFil0gOeznv2omQ,6497
|
|
8
|
-
langchain_dev_utils/agents/wrap.py,sha256=
|
|
8
|
+
langchain_dev_utils/agents/wrap.py,sha256=dJbCvljyw-ee43__2uws4H4ZhsyyX1AuavNtHrdWqN8,9485
|
|
9
9
|
langchain_dev_utils/agents/middleware/__init__.py,sha256=QVQibaNHvHPyNTZ2UNFfYL153ZboaCHcoioTHK0FsiY,710
|
|
10
|
-
langchain_dev_utils/agents/middleware/format_prompt.py,sha256=
|
|
10
|
+
langchain_dev_utils/agents/middleware/format_prompt.py,sha256=yIkoSVPp0FemkjezvGsOmtgOkZDyEYQ8yh4YWYYGtVc,2343
|
|
11
11
|
langchain_dev_utils/agents/middleware/handoffs.py,sha256=r196Xk0Jws1Tz6JQuvy5HEc3HAAQejCxFmJpB6KrvLU,7230
|
|
12
12
|
langchain_dev_utils/agents/middleware/model_fallback.py,sha256=8xiNjTJ0yiRkPLCRfAGNnqY1TLstj1Anmiqyv5w2mA8,1633
|
|
13
13
|
langchain_dev_utils/agents/middleware/model_router.py,sha256=qBspvj9ZoKfmC1pHWTO0EHHfxjgCUd-TuSbqvZl0kmg,7977
|
|
@@ -17,15 +17,20 @@ langchain_dev_utils/agents/middleware/tool_call_repair.py,sha256=oZF0Oejemqs9kSn
|
|
|
17
17
|
langchain_dev_utils/agents/middleware/tool_emulator.py,sha256=OgtPhqturaWzF4fRSJ3f_IXvIrYrrAjlpOC5zmLtrkY,2031
|
|
18
18
|
langchain_dev_utils/agents/middleware/tool_selection.py,sha256=dRH5ejR6N02Djwxt6Gd63MYkg6SV5pySlzaRt53OoZk,3113
|
|
19
19
|
langchain_dev_utils/chat_models/__init__.py,sha256=YSLUyHrWEEj4y4DtGFCOnDW02VIYZdfAH800m4Klgeg,224
|
|
20
|
-
langchain_dev_utils/chat_models/base.py,sha256=
|
|
20
|
+
langchain_dev_utils/chat_models/base.py,sha256=G_SNvd53ogho-LRgD7DCD65xj51J2JxmOkA4URNW6ZQ,11560
|
|
21
21
|
langchain_dev_utils/chat_models/types.py,sha256=MD3cv_ZIe9fCdgwisNfuxAOhy-j4YSs1ZOQYyCjlNKs,927
|
|
22
|
-
langchain_dev_utils/chat_models/adapters/__init__.py,sha256=
|
|
23
|
-
langchain_dev_utils/chat_models/adapters/
|
|
22
|
+
langchain_dev_utils/chat_models/adapters/__init__.py,sha256=4tTbhAAQdpX_gWyWeH97hqS5HnaoqQqW6QBh9Qd1SKs,106
|
|
23
|
+
langchain_dev_utils/chat_models/adapters/create_utils.py,sha256=r8_XWLNF3Yc6sumlBhmgG1QcBa4Dsba7X3f_9YeMeGA,2479
|
|
24
|
+
langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=Xsd6HN1zGGDl87bZ5NMfwKfxWkgdP4DpszEqlb4Z-MY,27198
|
|
25
|
+
langchain_dev_utils/chat_models/adapters/register_profiles.py,sha256=YS9ItCEq2ISoB_bp6QH5NVKOVR9-7la3r7B_xQNxZxE,366
|
|
24
26
|
langchain_dev_utils/embeddings/__init__.py,sha256=zbEOaV86TUi9Zrg_dH9dpdgacWg31HMJTlTQknA9EKk,244
|
|
25
|
-
langchain_dev_utils/embeddings/base.py,sha256=
|
|
26
|
-
langchain_dev_utils/
|
|
27
|
+
langchain_dev_utils/embeddings/base.py,sha256=GXFKZSAExMtCFUpsd6mY4NxCWCrq7JAatBw3kS9LaKY,8803
|
|
28
|
+
langchain_dev_utils/embeddings/adapters/__init__.py,sha256=yJEZZdzZ2fv1ExezLaNxo0VU9HJTHKYbS3T_XP8Ab9c,114
|
|
29
|
+
langchain_dev_utils/embeddings/adapters/create_utils.py,sha256=K4JlbjG-O5xLY3wxaVt0UZ3QwI--cVb4qyxLATKVAWQ,2012
|
|
30
|
+
langchain_dev_utils/embeddings/adapters/openai_compatible.py,sha256=fo7-m7dcWL4xrhSqdAHHVREsiXfVOvIrlaotaYTEiyE,3159
|
|
31
|
+
langchain_dev_utils/message_convert/__init__.py,sha256=nnkDa_Im0dCb5u4aa2FRB9tqB8e6H6sEGYK6Vg81u2s,472
|
|
27
32
|
langchain_dev_utils/message_convert/content.py,sha256=2V1g21byg3iLv5RjUW8zv3jwYwV7IH2hNim7jGRsIes,8096
|
|
28
|
-
langchain_dev_utils/message_convert/format.py,sha256=
|
|
33
|
+
langchain_dev_utils/message_convert/format.py,sha256=NdrYX0cJn2-G1ArLSjJ7yO788KV1d83F4Kimpyft0IM,2446
|
|
29
34
|
langchain_dev_utils/pipeline/__init__.py,sha256=eE6WktaLHDkqMeXDIDaLtm-OPTwtsX_Av8iK9uYrceo,186
|
|
30
35
|
langchain_dev_utils/pipeline/parallel.py,sha256=nwZWbdSNeyanC9WufoJBTceotgT--UnPOfStXjgNMOc,5271
|
|
31
36
|
langchain_dev_utils/pipeline/sequential.py,sha256=sYJXQzVHDKUc-UV-HMv38JTPnse1A7sRM0vqSdpHK0k,3850
|
|
@@ -33,7 +38,7 @@ langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV
|
|
|
33
38
|
langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
|
|
34
39
|
langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=7Z_QO5OZUR6K8nLoIcafc6osnvX2IYNorOJcbx6bVso,9672
|
|
35
40
|
langchain_dev_utils/tool_calling/utils.py,sha256=S4-KXQ8jWmpGTXYZitovF8rxKpaSSUkFruM8LDwvcvE,2765
|
|
36
|
-
langchain_dev_utils-1.3.
|
|
37
|
-
langchain_dev_utils-1.3.
|
|
38
|
-
langchain_dev_utils-1.3.
|
|
39
|
-
langchain_dev_utils-1.3.
|
|
41
|
+
langchain_dev_utils-1.3.5.dist-info/METADATA,sha256=ftpdThaRWWEeTsrAYWNlf6WJPhP9xrEVTPez-JonkTk,4552
|
|
42
|
+
langchain_dev_utils-1.3.5.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
43
|
+
langchain_dev_utils-1.3.5.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
|
|
44
|
+
langchain_dev_utils-1.3.5.dist-info/RECORD,,
|
|
File without changes
|
{langchain_dev_utils-1.3.3.dist-info → langchain_dev_utils-1.3.5.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|