langchain-dev-utils 1.2.11__py3-none-any.whl → 1.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_dev_utils/__init__.py +1 -1
- langchain_dev_utils/agents/middleware/__init__.py +2 -0
- langchain_dev_utils/agents/middleware/format_prompt.py +66 -0
- langchain_dev_utils/chat_models/adapters/openai_compatible.py +63 -1
- langchain_dev_utils/chat_models/base.py +0 -1
- {langchain_dev_utils-1.2.11.dist-info → langchain_dev_utils-1.2.13.dist-info}/METADATA +2 -2
- {langchain_dev_utils-1.2.11.dist-info → langchain_dev_utils-1.2.13.dist-info}/RECORD +9 -8
- {langchain_dev_utils-1.2.11.dist-info → langchain_dev_utils-1.2.13.dist-info}/WHEEL +0 -0
- {langchain_dev_utils-1.2.11.dist-info → langchain_dev_utils-1.2.13.dist-info}/licenses/LICENSE +0 -0
langchain_dev_utils/__init__.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "1.2.
|
|
1
|
+
__version__ = "1.2.13"
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from .format_prompt import format_prompt
|
|
1
2
|
from .model_fallback import ModelFallbackMiddleware
|
|
2
3
|
from .model_router import ModelRouterMiddleware
|
|
3
4
|
from .plan import (
|
|
@@ -22,4 +23,5 @@ __all__ = [
|
|
|
22
23
|
"LLMToolEmulator",
|
|
23
24
|
"ModelRouterMiddleware",
|
|
24
25
|
"ToolCallRepairMiddleware",
|
|
26
|
+
"format_prompt",
|
|
25
27
|
]
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
from langchain.agents.middleware import ModelRequest, dynamic_prompt
|
|
2
|
+
from langchain_core.prompts.string import get_template_variables
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
@dynamic_prompt
|
|
6
|
+
def format_prompt(request: ModelRequest) -> str:
|
|
7
|
+
"""Format the system prompt with variables from state and context.
|
|
8
|
+
|
|
9
|
+
This middleware function extracts template variables from the system prompt
|
|
10
|
+
and populates them with values from the agent's state and runtime context.
|
|
11
|
+
Variables are first resolved from the state, then from the context if not found.
|
|
12
|
+
|
|
13
|
+
Example:
|
|
14
|
+
>>> from langchain_dev_utils.agents.middleware.format_prompt import format_prompt
|
|
15
|
+
>>> from langchain.agents import create_agent
|
|
16
|
+
>>> from langchain_core.messages import HumanMessage
|
|
17
|
+
>>> from dataclasses import dataclass
|
|
18
|
+
>>>
|
|
19
|
+
>>> @dataclass
|
|
20
|
+
... class Context:
|
|
21
|
+
... name: str
|
|
22
|
+
... user: str
|
|
23
|
+
>>>
|
|
24
|
+
>>> agent=create_agent(
|
|
25
|
+
... model=model,
|
|
26
|
+
... tools=tools,
|
|
27
|
+
... system_prompt="You are a helpful assistant. Your name is {name}. Your user is {user}.",
|
|
28
|
+
... middleware=[format_prompt],
|
|
29
|
+
... context_schema=Context,
|
|
30
|
+
... )
|
|
31
|
+
>>> agent.invoke(
|
|
32
|
+
... {
|
|
33
|
+
... "messages": [HumanMessage(content="Hello")],
|
|
34
|
+
... },
|
|
35
|
+
... context=Context(name="assistant", user="tbice"),
|
|
36
|
+
... )
|
|
37
|
+
|
|
38
|
+
"""
|
|
39
|
+
system_msg = request.system_message
|
|
40
|
+
if system_msg is None:
|
|
41
|
+
raise ValueError(
|
|
42
|
+
"system_message must be provided,while use format_prompt in middleware."
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
system_prompt = "\n".join(
|
|
46
|
+
[content.get("text", "") for content in system_msg.content_blocks]
|
|
47
|
+
)
|
|
48
|
+
variables = get_template_variables(system_prompt, "f-string")
|
|
49
|
+
|
|
50
|
+
format_params = {}
|
|
51
|
+
|
|
52
|
+
state = request.state
|
|
53
|
+
for key in variables:
|
|
54
|
+
if var := state.get(key, None):
|
|
55
|
+
format_params[key] = var
|
|
56
|
+
|
|
57
|
+
other_var_keys = set(variables) - set(format_params.keys())
|
|
58
|
+
|
|
59
|
+
if other_var_keys:
|
|
60
|
+
context = request.runtime.context
|
|
61
|
+
if context is not None:
|
|
62
|
+
for key in other_var_keys:
|
|
63
|
+
if var := getattr(context, key, None):
|
|
64
|
+
format_params[key] = var
|
|
65
|
+
|
|
66
|
+
return system_prompt.format(**format_params)
|
|
@@ -20,7 +20,13 @@ from langchain_core.callbacks import (
|
|
|
20
20
|
CallbackManagerForLLMRun,
|
|
21
21
|
)
|
|
22
22
|
from langchain_core.language_models import LangSmithParams, LanguageModelInput
|
|
23
|
-
from langchain_core.messages import
|
|
23
|
+
from langchain_core.messages import (
|
|
24
|
+
AIMessage,
|
|
25
|
+
AIMessageChunk,
|
|
26
|
+
BaseMessage,
|
|
27
|
+
HumanMessage,
|
|
28
|
+
ToolMessage,
|
|
29
|
+
)
|
|
24
30
|
from langchain_core.outputs import ChatGenerationChunk, ChatResult
|
|
25
31
|
from langchain_core.runnables import Runnable
|
|
26
32
|
from langchain_core.tools import BaseTool
|
|
@@ -63,6 +69,58 @@ def _get_last_human_message_index(messages: list[BaseMessage]) -> int:
|
|
|
63
69
|
)
|
|
64
70
|
|
|
65
71
|
|
|
72
|
+
def _transform_video_block(block: dict[str, Any]) -> dict:
|
|
73
|
+
"""Transform video block to video_url block."""
|
|
74
|
+
if "url" in block:
|
|
75
|
+
return {
|
|
76
|
+
"type": "video_url",
|
|
77
|
+
"video_url": {
|
|
78
|
+
"url": block["url"],
|
|
79
|
+
},
|
|
80
|
+
}
|
|
81
|
+
if "base64" in block or block.get("source_type") == "base64":
|
|
82
|
+
if "mime_type" not in block:
|
|
83
|
+
error_message = "mime_type key is required for base64 data."
|
|
84
|
+
raise ValueError(error_message)
|
|
85
|
+
mime_type = block["mime_type"]
|
|
86
|
+
base64_data = block["data"] if "data" in block else block["base64"]
|
|
87
|
+
return {
|
|
88
|
+
"type": "video_url",
|
|
89
|
+
"video_url": {
|
|
90
|
+
"url": f"data:{mime_type};base64,{base64_data}",
|
|
91
|
+
},
|
|
92
|
+
}
|
|
93
|
+
error_message = "Unsupported source type. Only 'url' and 'base64' are supported."
|
|
94
|
+
raise ValueError(error_message)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def _process_video_input(message: BaseMessage):
|
|
98
|
+
"""
|
|
99
|
+
Process BaseMessage with video input.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
message (BaseMessage): The HumanMessage instance to process.
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
None: The method modifies the message in-place.
|
|
106
|
+
"""
|
|
107
|
+
if not message.content:
|
|
108
|
+
return message
|
|
109
|
+
content = message.content
|
|
110
|
+
|
|
111
|
+
if not isinstance(content, list):
|
|
112
|
+
return message
|
|
113
|
+
|
|
114
|
+
formatted_content = []
|
|
115
|
+
for block in content:
|
|
116
|
+
if isinstance(block, dict) and block.get("type") == "video":
|
|
117
|
+
formatted_content.append(_transform_video_block(block))
|
|
118
|
+
else:
|
|
119
|
+
formatted_content.append(block)
|
|
120
|
+
message = message.model_copy(update={"content": formatted_content})
|
|
121
|
+
return message
|
|
122
|
+
|
|
123
|
+
|
|
66
124
|
class _BaseChatOpenAICompatible(BaseChatOpenAI):
|
|
67
125
|
"""
|
|
68
126
|
Base template class for OpenAI-compatible chat model implementations.
|
|
@@ -183,6 +241,10 @@ class _BaseChatOpenAICompatible(BaseChatOpenAI):
|
|
|
183
241
|
)
|
|
184
242
|
payload_messages.append(msg_dict)
|
|
185
243
|
else:
|
|
244
|
+
if (
|
|
245
|
+
isinstance(m, HumanMessage) or isinstance(m, ToolMessage)
|
|
246
|
+
) and isinstance(m.content, list):
|
|
247
|
+
m = _process_video_input(m)
|
|
186
248
|
payload_messages.append(_convert_message_to_dict(m))
|
|
187
249
|
|
|
188
250
|
payload["messages"] = payload_messages
|
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: langchain-dev-utils
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.13
|
|
4
4
|
Summary: A practical utility library for LangChain and LangGraph development
|
|
5
5
|
Project-URL: Source Code, https://github.com/TBice123123/langchain-dev-utils
|
|
6
6
|
Project-URL: repository, https://github.com/TBice123123/langchain-dev-utils
|
|
7
|
-
Project-URL: documentation, https://tbice123123.github.io/langchain-dev-utils
|
|
7
|
+
Project-URL: documentation, https://tbice123123.github.io/langchain-dev-utils
|
|
8
8
|
Author-email: tiebingice <tiebingice123@outlook.com>
|
|
9
9
|
License-File: LICENSE
|
|
10
10
|
Requires-Python: >=3.11
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
langchain_dev_utils/__init__.py,sha256=
|
|
1
|
+
langchain_dev_utils/__init__.py,sha256=rQSlPcfj4yT4krIq6epTVQyBzIX4etVOgfupVkM-RnU,23
|
|
2
2
|
langchain_dev_utils/_utils.py,sha256=MFEzR1BjXMj6HEVwt2x2omttFuDJ_rYAEbNqe99r9pM,1338
|
|
3
3
|
langchain_dev_utils/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
langchain_dev_utils/agents/__init__.py,sha256=PJ-lSDZv_AXMYA3H4fx-HzJa14tPbkGmq1HX8LNfaPo,125
|
|
@@ -6,7 +6,8 @@ langchain_dev_utils/agents/factory.py,sha256=XdGjktksfTDys7X4SgfPrQz10HUo5fTNAWE
|
|
|
6
6
|
langchain_dev_utils/agents/file_system.py,sha256=Yk3eetREE26WNrnTWLoiDUpOyCJ-rhjlfFDk6foLa1E,8468
|
|
7
7
|
langchain_dev_utils/agents/plan.py,sha256=WwhoiJBmVYVI9bT8HfjCzTJ_SIp9WFil0gOeznv2omQ,6497
|
|
8
8
|
langchain_dev_utils/agents/wrap.py,sha256=RuchoH_VotPmKFuYEn2SXoSgNxZhSA9jKM0Iv_8oHLk,4718
|
|
9
|
-
langchain_dev_utils/agents/middleware/__init__.py,sha256=
|
|
9
|
+
langchain_dev_utils/agents/middleware/__init__.py,sha256=EECbcYcHXQAMA-guJNRGwCVi9jG957d0nOaoIuyIKC0,832
|
|
10
|
+
langchain_dev_utils/agents/middleware/format_prompt.py,sha256=rfii98tmOqkjaNHxWy7hovhEYKXrF0CdzsMLO54_CDI,2359
|
|
10
11
|
langchain_dev_utils/agents/middleware/model_fallback.py,sha256=nivtXXF4cwyOBv6p7RW12nXtNg87wjTWxO3BKIYiroI,1674
|
|
11
12
|
langchain_dev_utils/agents/middleware/model_router.py,sha256=pOK-4PNTLrmjaQA9poHoQnsaVwoX0JeJrLVysulv9iU,7631
|
|
12
13
|
langchain_dev_utils/agents/middleware/plan.py,sha256=0qDCmenxgY_zrwMfOyYlgLfhYNw-HszNLeeOkfj14NA,16002
|
|
@@ -15,10 +16,10 @@ langchain_dev_utils/agents/middleware/tool_call_repair.py,sha256=oZF0Oejemqs9kSn
|
|
|
15
16
|
langchain_dev_utils/agents/middleware/tool_emulator.py,sha256=OgtPhqturaWzF4fRSJ3f_IXvIrYrrAjlpOC5zmLtrkY,2031
|
|
16
17
|
langchain_dev_utils/agents/middleware/tool_selection.py,sha256=dRH5ejR6N02Djwxt6Gd63MYkg6SV5pySlzaRt53OoZk,3113
|
|
17
18
|
langchain_dev_utils/chat_models/__init__.py,sha256=YSLUyHrWEEj4y4DtGFCOnDW02VIYZdfAH800m4Klgeg,224
|
|
18
|
-
langchain_dev_utils/chat_models/base.py,sha256=
|
|
19
|
+
langchain_dev_utils/chat_models/base.py,sha256=BzaoCIv145eE8b5wNDsbZDHn4EAxe4vdlptp7qXPWKk,11625
|
|
19
20
|
langchain_dev_utils/chat_models/types.py,sha256=MD3cv_ZIe9fCdgwisNfuxAOhy-j4YSs1ZOQYyCjlNKs,927
|
|
20
21
|
langchain_dev_utils/chat_models/adapters/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
|
-
langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=
|
|
22
|
+
langchain_dev_utils/chat_models/adapters/openai_compatible.py,sha256=4iJgMGAReiJ668O9ZGAZbduxNY2PhEJJonDhQDBge44,24596
|
|
22
23
|
langchain_dev_utils/embeddings/__init__.py,sha256=zbEOaV86TUi9Zrg_dH9dpdgacWg31HMJTlTQknA9EKk,244
|
|
23
24
|
langchain_dev_utils/embeddings/base.py,sha256=BGoWY0L7nG9iRV3d4sSagXhECXrwvS1xA-A_OVltn3k,9406
|
|
24
25
|
langchain_dev_utils/message_convert/__init__.py,sha256=ZGrHGXPKMrZ_p9MqfIVZ4jgbEyb7aC4Q7X-muuThIYU,457
|
|
@@ -31,7 +32,7 @@ langchain_dev_utils/pipeline/types.py,sha256=T3aROKKXeWvd0jcH5XkgMDQfEkLfPaiOhhV
|
|
|
31
32
|
langchain_dev_utils/tool_calling/__init__.py,sha256=mu_WxKMcu6RoTf4vkTPbA1WSBSNc6YIqyBtOQ6iVQj4,322
|
|
32
33
|
langchain_dev_utils/tool_calling/human_in_the_loop.py,sha256=7Z_QO5OZUR6K8nLoIcafc6osnvX2IYNorOJcbx6bVso,9672
|
|
33
34
|
langchain_dev_utils/tool_calling/utils.py,sha256=S4-KXQ8jWmpGTXYZitovF8rxKpaSSUkFruM8LDwvcvE,2765
|
|
34
|
-
langchain_dev_utils-1.2.
|
|
35
|
-
langchain_dev_utils-1.2.
|
|
36
|
-
langchain_dev_utils-1.2.
|
|
37
|
-
langchain_dev_utils-1.2.
|
|
35
|
+
langchain_dev_utils-1.2.13.dist-info/METADATA,sha256=SUHFdkF4J5_VxYmZGL7Aqv_79JTXRBoCn--gMTrSjHw,11844
|
|
36
|
+
langchain_dev_utils-1.2.13.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
37
|
+
langchain_dev_utils-1.2.13.dist-info/licenses/LICENSE,sha256=AWAOzNEcsvCEzHOF0qby5OKxviVH_eT9Yce1sgJTico,1084
|
|
38
|
+
langchain_dev_utils-1.2.13.dist-info/RECORD,,
|
|
File without changes
|
{langchain_dev_utils-1.2.11.dist-info → langchain_dev_utils-1.2.13.dist-info}/licenses/LICENSE
RENAMED
|
File without changes
|