langchain-core 0.3.79__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/__init__.py +1 -1
- langchain_core/_api/__init__.py +3 -4
- langchain_core/_api/beta_decorator.py +23 -26
- langchain_core/_api/deprecation.py +52 -65
- langchain_core/_api/path.py +3 -6
- langchain_core/_import_utils.py +3 -4
- langchain_core/agents.py +19 -19
- langchain_core/caches.py +53 -63
- langchain_core/callbacks/__init__.py +1 -8
- langchain_core/callbacks/base.py +323 -334
- langchain_core/callbacks/file.py +44 -44
- langchain_core/callbacks/manager.py +441 -507
- langchain_core/callbacks/stdout.py +29 -30
- langchain_core/callbacks/streaming_stdout.py +32 -32
- langchain_core/callbacks/usage.py +60 -57
- langchain_core/chat_history.py +48 -63
- langchain_core/document_loaders/base.py +23 -23
- langchain_core/document_loaders/langsmith.py +37 -37
- langchain_core/documents/__init__.py +0 -1
- langchain_core/documents/base.py +62 -65
- langchain_core/documents/compressor.py +4 -4
- langchain_core/documents/transformers.py +28 -29
- langchain_core/embeddings/fake.py +50 -54
- langchain_core/example_selectors/length_based.py +1 -1
- langchain_core/example_selectors/semantic_similarity.py +21 -25
- langchain_core/exceptions.py +10 -11
- langchain_core/globals.py +3 -151
- langchain_core/indexing/api.py +61 -66
- langchain_core/indexing/base.py +58 -58
- langchain_core/indexing/in_memory.py +3 -3
- langchain_core/language_models/__init__.py +14 -27
- langchain_core/language_models/_utils.py +270 -84
- langchain_core/language_models/base.py +55 -162
- langchain_core/language_models/chat_models.py +442 -402
- langchain_core/language_models/fake.py +11 -11
- langchain_core/language_models/fake_chat_models.py +61 -39
- langchain_core/language_models/llms.py +123 -231
- langchain_core/load/dump.py +4 -5
- langchain_core/load/load.py +18 -28
- langchain_core/load/mapping.py +2 -4
- langchain_core/load/serializable.py +39 -40
- langchain_core/messages/__init__.py +61 -22
- langchain_core/messages/ai.py +368 -163
- langchain_core/messages/base.py +214 -43
- langchain_core/messages/block_translators/__init__.py +111 -0
- langchain_core/messages/block_translators/anthropic.py +470 -0
- langchain_core/messages/block_translators/bedrock.py +94 -0
- langchain_core/messages/block_translators/bedrock_converse.py +297 -0
- langchain_core/messages/block_translators/google_genai.py +530 -0
- langchain_core/messages/block_translators/google_vertexai.py +21 -0
- langchain_core/messages/block_translators/groq.py +143 -0
- langchain_core/messages/block_translators/langchain_v0.py +301 -0
- langchain_core/messages/block_translators/openai.py +1010 -0
- langchain_core/messages/chat.py +2 -6
- langchain_core/messages/content.py +1423 -0
- langchain_core/messages/function.py +6 -10
- langchain_core/messages/human.py +41 -38
- langchain_core/messages/modifier.py +2 -2
- langchain_core/messages/system.py +38 -28
- langchain_core/messages/tool.py +96 -103
- langchain_core/messages/utils.py +478 -504
- langchain_core/output_parsers/__init__.py +1 -14
- langchain_core/output_parsers/base.py +58 -61
- langchain_core/output_parsers/json.py +7 -8
- langchain_core/output_parsers/list.py +5 -7
- langchain_core/output_parsers/openai_functions.py +49 -47
- langchain_core/output_parsers/openai_tools.py +14 -19
- langchain_core/output_parsers/pydantic.py +12 -13
- langchain_core/output_parsers/string.py +2 -2
- langchain_core/output_parsers/transform.py +15 -17
- langchain_core/output_parsers/xml.py +8 -10
- langchain_core/outputs/__init__.py +1 -1
- langchain_core/outputs/chat_generation.py +18 -18
- langchain_core/outputs/chat_result.py +1 -3
- langchain_core/outputs/generation.py +8 -8
- langchain_core/outputs/llm_result.py +10 -10
- langchain_core/prompt_values.py +12 -12
- langchain_core/prompts/__init__.py +3 -27
- langchain_core/prompts/base.py +45 -55
- langchain_core/prompts/chat.py +254 -313
- langchain_core/prompts/dict.py +5 -5
- langchain_core/prompts/few_shot.py +81 -88
- langchain_core/prompts/few_shot_with_templates.py +11 -13
- langchain_core/prompts/image.py +12 -14
- langchain_core/prompts/loading.py +6 -8
- langchain_core/prompts/message.py +3 -3
- langchain_core/prompts/prompt.py +24 -39
- langchain_core/prompts/string.py +4 -4
- langchain_core/prompts/structured.py +42 -50
- langchain_core/rate_limiters.py +51 -60
- langchain_core/retrievers.py +49 -190
- langchain_core/runnables/base.py +1484 -1709
- langchain_core/runnables/branch.py +45 -61
- langchain_core/runnables/config.py +80 -88
- langchain_core/runnables/configurable.py +117 -134
- langchain_core/runnables/fallbacks.py +83 -79
- langchain_core/runnables/graph.py +85 -95
- langchain_core/runnables/graph_ascii.py +27 -28
- langchain_core/runnables/graph_mermaid.py +38 -50
- langchain_core/runnables/graph_png.py +15 -16
- langchain_core/runnables/history.py +135 -148
- langchain_core/runnables/passthrough.py +124 -150
- langchain_core/runnables/retry.py +46 -51
- langchain_core/runnables/router.py +25 -30
- langchain_core/runnables/schema.py +79 -74
- langchain_core/runnables/utils.py +62 -68
- langchain_core/stores.py +81 -115
- langchain_core/structured_query.py +8 -8
- langchain_core/sys_info.py +27 -29
- langchain_core/tools/__init__.py +1 -14
- langchain_core/tools/base.py +179 -187
- langchain_core/tools/convert.py +131 -139
- langchain_core/tools/render.py +10 -10
- langchain_core/tools/retriever.py +11 -11
- langchain_core/tools/simple.py +19 -24
- langchain_core/tools/structured.py +30 -39
- langchain_core/tracers/__init__.py +1 -9
- langchain_core/tracers/base.py +97 -99
- langchain_core/tracers/context.py +29 -52
- langchain_core/tracers/core.py +50 -60
- langchain_core/tracers/evaluation.py +11 -11
- langchain_core/tracers/event_stream.py +115 -70
- langchain_core/tracers/langchain.py +21 -21
- langchain_core/tracers/log_stream.py +43 -43
- langchain_core/tracers/memory_stream.py +3 -3
- langchain_core/tracers/root_listeners.py +16 -16
- langchain_core/tracers/run_collector.py +2 -4
- langchain_core/tracers/schemas.py +0 -129
- langchain_core/tracers/stdout.py +3 -3
- langchain_core/utils/__init__.py +1 -4
- langchain_core/utils/_merge.py +46 -8
- langchain_core/utils/aiter.py +57 -61
- langchain_core/utils/env.py +9 -9
- langchain_core/utils/function_calling.py +89 -191
- langchain_core/utils/html.py +7 -8
- langchain_core/utils/input.py +6 -6
- langchain_core/utils/interactive_env.py +1 -1
- langchain_core/utils/iter.py +37 -42
- langchain_core/utils/json.py +4 -3
- langchain_core/utils/json_schema.py +8 -8
- langchain_core/utils/mustache.py +9 -11
- langchain_core/utils/pydantic.py +33 -35
- langchain_core/utils/strings.py +5 -5
- langchain_core/utils/usage.py +1 -1
- langchain_core/utils/utils.py +80 -54
- langchain_core/vectorstores/base.py +129 -164
- langchain_core/vectorstores/in_memory.py +99 -174
- langchain_core/vectorstores/utils.py +5 -5
- langchain_core/version.py +1 -1
- {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/METADATA +28 -27
- langchain_core-1.0.0.dist-info/RECORD +172 -0
- {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
- langchain_core/beta/__init__.py +0 -1
- langchain_core/beta/runnables/__init__.py +0 -1
- langchain_core/beta/runnables/context.py +0 -447
- langchain_core/memory.py +0 -120
- langchain_core/messages/content_blocks.py +0 -176
- langchain_core/prompts/pipeline.py +0 -138
- langchain_core/pydantic_v1/__init__.py +0 -30
- langchain_core/pydantic_v1/dataclasses.py +0 -23
- langchain_core/pydantic_v1/main.py +0 -23
- langchain_core/tracers/langchain_v1.py +0 -31
- langchain_core/utils/loading.py +0 -35
- langchain_core-0.3.79.dist-info/RECORD +0 -174
- langchain_core-0.3.79.dist-info/entry_points.txt +0 -4
langchain_core/memory.py
DELETED
|
@@ -1,120 +0,0 @@
|
|
|
1
|
-
"""**Memory** maintains Chain state, incorporating context from past runs.
|
|
2
|
-
|
|
3
|
-
This module contains memory abstractions from LangChain v0.0.x.
|
|
4
|
-
|
|
5
|
-
These abstractions are now deprecated and will be removed in LangChain v1.0.0.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
from __future__ import annotations
|
|
9
|
-
|
|
10
|
-
from abc import ABC, abstractmethod
|
|
11
|
-
from typing import Any
|
|
12
|
-
|
|
13
|
-
from pydantic import ConfigDict
|
|
14
|
-
|
|
15
|
-
from langchain_core._api import deprecated
|
|
16
|
-
from langchain_core.load.serializable import Serializable
|
|
17
|
-
from langchain_core.runnables import run_in_executor
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
@deprecated(
|
|
21
|
-
since="0.3.3",
|
|
22
|
-
removal="1.0.0",
|
|
23
|
-
message=(
|
|
24
|
-
"Please see the migration guide at: "
|
|
25
|
-
"https://python.langchain.com/docs/versions/migrating_memory/"
|
|
26
|
-
),
|
|
27
|
-
)
|
|
28
|
-
class BaseMemory(Serializable, ABC):
|
|
29
|
-
"""Abstract base class for memory in Chains.
|
|
30
|
-
|
|
31
|
-
Memory refers to state in Chains. Memory can be used to store information about
|
|
32
|
-
past executions of a Chain and inject that information into the inputs of
|
|
33
|
-
future executions of the Chain. For example, for conversational Chains Memory
|
|
34
|
-
can be used to store conversations and automatically add them to future model
|
|
35
|
-
prompts so that the model has the necessary context to respond coherently to
|
|
36
|
-
the latest input.
|
|
37
|
-
|
|
38
|
-
Example:
|
|
39
|
-
.. code-block:: python
|
|
40
|
-
|
|
41
|
-
class SimpleMemory(BaseMemory):
|
|
42
|
-
memories: dict[str, Any] = dict()
|
|
43
|
-
|
|
44
|
-
@property
|
|
45
|
-
def memory_variables(self) -> list[str]:
|
|
46
|
-
return list(self.memories.keys())
|
|
47
|
-
|
|
48
|
-
def load_memory_variables(
|
|
49
|
-
self, inputs: dict[str, Any]
|
|
50
|
-
) -> dict[str, str]:
|
|
51
|
-
return self.memories
|
|
52
|
-
|
|
53
|
-
def save_context(
|
|
54
|
-
self, inputs: dict[str, Any], outputs: dict[str, str]
|
|
55
|
-
) -> None:
|
|
56
|
-
pass
|
|
57
|
-
|
|
58
|
-
def clear(self) -> None:
|
|
59
|
-
pass
|
|
60
|
-
|
|
61
|
-
"""
|
|
62
|
-
|
|
63
|
-
model_config = ConfigDict(
|
|
64
|
-
arbitrary_types_allowed=True,
|
|
65
|
-
)
|
|
66
|
-
|
|
67
|
-
@property
|
|
68
|
-
@abstractmethod
|
|
69
|
-
def memory_variables(self) -> list[str]:
|
|
70
|
-
"""The string keys this memory class will add to chain inputs."""
|
|
71
|
-
|
|
72
|
-
@abstractmethod
|
|
73
|
-
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
74
|
-
"""Return key-value pairs given the text input to the chain.
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
inputs: The inputs to the chain.
|
|
78
|
-
|
|
79
|
-
Returns:
|
|
80
|
-
A dictionary of key-value pairs.
|
|
81
|
-
"""
|
|
82
|
-
|
|
83
|
-
async def aload_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
|
|
84
|
-
"""Async return key-value pairs given the text input to the chain.
|
|
85
|
-
|
|
86
|
-
Args:
|
|
87
|
-
inputs: The inputs to the chain.
|
|
88
|
-
|
|
89
|
-
Returns:
|
|
90
|
-
A dictionary of key-value pairs.
|
|
91
|
-
"""
|
|
92
|
-
return await run_in_executor(None, self.load_memory_variables, inputs)
|
|
93
|
-
|
|
94
|
-
@abstractmethod
|
|
95
|
-
def save_context(self, inputs: dict[str, Any], outputs: dict[str, str]) -> None:
|
|
96
|
-
"""Save the context of this chain run to memory.
|
|
97
|
-
|
|
98
|
-
Args:
|
|
99
|
-
inputs: The inputs to the chain.
|
|
100
|
-
outputs: The outputs of the chain.
|
|
101
|
-
"""
|
|
102
|
-
|
|
103
|
-
async def asave_context(
|
|
104
|
-
self, inputs: dict[str, Any], outputs: dict[str, str]
|
|
105
|
-
) -> None:
|
|
106
|
-
"""Async save the context of this chain run to memory.
|
|
107
|
-
|
|
108
|
-
Args:
|
|
109
|
-
inputs: The inputs to the chain.
|
|
110
|
-
outputs: The outputs of the chain.
|
|
111
|
-
"""
|
|
112
|
-
await run_in_executor(None, self.save_context, inputs, outputs)
|
|
113
|
-
|
|
114
|
-
@abstractmethod
|
|
115
|
-
def clear(self) -> None:
|
|
116
|
-
"""Clear memory contents."""
|
|
117
|
-
|
|
118
|
-
async def aclear(self) -> None:
|
|
119
|
-
"""Async clear memory contents."""
|
|
120
|
-
await run_in_executor(None, self.clear)
|
|
@@ -1,176 +0,0 @@
|
|
|
1
|
-
"""Types for content blocks."""
|
|
2
|
-
|
|
3
|
-
import warnings
|
|
4
|
-
from typing import Any, Literal, Union
|
|
5
|
-
|
|
6
|
-
from pydantic import TypeAdapter, ValidationError
|
|
7
|
-
from typing_extensions import NotRequired, TypedDict
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class BaseDataContentBlock(TypedDict, total=False):
|
|
11
|
-
"""Base class for data content blocks."""
|
|
12
|
-
|
|
13
|
-
mime_type: NotRequired[str]
|
|
14
|
-
"""MIME type of the content block (if needed)."""
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class URLContentBlock(BaseDataContentBlock):
|
|
18
|
-
"""Content block for data from a URL."""
|
|
19
|
-
|
|
20
|
-
type: Literal["image", "audio", "file"]
|
|
21
|
-
"""Type of the content block."""
|
|
22
|
-
source_type: Literal["url"]
|
|
23
|
-
"""Source type (url)."""
|
|
24
|
-
url: str
|
|
25
|
-
"""URL for data."""
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
class Base64ContentBlock(BaseDataContentBlock):
|
|
29
|
-
"""Content block for inline data from a base64 string."""
|
|
30
|
-
|
|
31
|
-
type: Literal["image", "audio", "file"]
|
|
32
|
-
"""Type of the content block."""
|
|
33
|
-
source_type: Literal["base64"]
|
|
34
|
-
"""Source type (base64)."""
|
|
35
|
-
data: str
|
|
36
|
-
"""Data as a base64 string."""
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
class PlainTextContentBlock(BaseDataContentBlock):
|
|
40
|
-
"""Content block for plain text data (e.g., from a document)."""
|
|
41
|
-
|
|
42
|
-
type: Literal["file"]
|
|
43
|
-
"""Type of the content block."""
|
|
44
|
-
source_type: Literal["text"]
|
|
45
|
-
"""Source type (text)."""
|
|
46
|
-
text: str
|
|
47
|
-
"""Text data."""
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
class IDContentBlock(TypedDict):
|
|
51
|
-
"""Content block for data specified by an identifier."""
|
|
52
|
-
|
|
53
|
-
type: Literal["image", "audio", "file"]
|
|
54
|
-
"""Type of the content block."""
|
|
55
|
-
source_type: Literal["id"]
|
|
56
|
-
"""Source type (id)."""
|
|
57
|
-
id: str
|
|
58
|
-
"""Identifier for data source."""
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
DataContentBlock = Union[
|
|
62
|
-
URLContentBlock,
|
|
63
|
-
Base64ContentBlock,
|
|
64
|
-
PlainTextContentBlock,
|
|
65
|
-
IDContentBlock,
|
|
66
|
-
]
|
|
67
|
-
|
|
68
|
-
_DataContentBlockAdapter: TypeAdapter[DataContentBlock] = TypeAdapter(DataContentBlock)
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
def is_data_content_block(
|
|
72
|
-
content_block: dict,
|
|
73
|
-
) -> bool:
|
|
74
|
-
"""Check if the content block is a standard data content block.
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
content_block: The content block to check.
|
|
78
|
-
|
|
79
|
-
Returns:
|
|
80
|
-
True if the content block is a data content block, False otherwise.
|
|
81
|
-
"""
|
|
82
|
-
try:
|
|
83
|
-
_ = _DataContentBlockAdapter.validate_python(content_block)
|
|
84
|
-
except ValidationError:
|
|
85
|
-
return False
|
|
86
|
-
else:
|
|
87
|
-
return True
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
def convert_to_openai_image_block(content_block: dict[str, Any]) -> dict:
|
|
91
|
-
"""Convert image content block to format expected by OpenAI Chat Completions API.
|
|
92
|
-
|
|
93
|
-
Args:
|
|
94
|
-
content_block: The content block to convert.
|
|
95
|
-
|
|
96
|
-
Raises:
|
|
97
|
-
ValueError: If the source type is not supported or if ``mime_type`` is missing
|
|
98
|
-
for base64 data.
|
|
99
|
-
|
|
100
|
-
Returns:
|
|
101
|
-
A dictionary formatted for OpenAI's API.
|
|
102
|
-
"""
|
|
103
|
-
if content_block["source_type"] == "url":
|
|
104
|
-
return {
|
|
105
|
-
"type": "image_url",
|
|
106
|
-
"image_url": {
|
|
107
|
-
"url": content_block["url"],
|
|
108
|
-
},
|
|
109
|
-
}
|
|
110
|
-
if content_block["source_type"] == "base64":
|
|
111
|
-
if "mime_type" not in content_block:
|
|
112
|
-
error_message = "mime_type key is required for base64 data."
|
|
113
|
-
raise ValueError(error_message)
|
|
114
|
-
mime_type = content_block["mime_type"]
|
|
115
|
-
return {
|
|
116
|
-
"type": "image_url",
|
|
117
|
-
"image_url": {
|
|
118
|
-
"url": f"data:{mime_type};base64,{content_block['data']}",
|
|
119
|
-
},
|
|
120
|
-
}
|
|
121
|
-
error_message = "Unsupported source type. Only 'url' and 'base64' are supported."
|
|
122
|
-
raise ValueError(error_message)
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
def convert_to_openai_data_block(block: dict) -> dict:
|
|
126
|
-
"""Format standard data content block to format expected by OpenAI.
|
|
127
|
-
|
|
128
|
-
Args:
|
|
129
|
-
block: A data content block.
|
|
130
|
-
|
|
131
|
-
Raises:
|
|
132
|
-
ValueError: If the block type or source type is not supported.
|
|
133
|
-
|
|
134
|
-
Returns:
|
|
135
|
-
A dictionary formatted for OpenAI's API.
|
|
136
|
-
"""
|
|
137
|
-
if block["type"] == "image":
|
|
138
|
-
formatted_block = convert_to_openai_image_block(block)
|
|
139
|
-
|
|
140
|
-
elif block["type"] == "file":
|
|
141
|
-
if block["source_type"] == "base64":
|
|
142
|
-
file = {"file_data": f"data:{block['mime_type']};base64,{block['data']}"}
|
|
143
|
-
if filename := block.get("filename"):
|
|
144
|
-
file["filename"] = filename
|
|
145
|
-
elif (metadata := block.get("metadata")) and ("filename" in metadata):
|
|
146
|
-
file["filename"] = metadata["filename"]
|
|
147
|
-
else:
|
|
148
|
-
warnings.warn(
|
|
149
|
-
"OpenAI may require a filename for file inputs. Specify a filename "
|
|
150
|
-
"in the content block: {'type': 'file', 'source_type': 'base64', "
|
|
151
|
-
"'mime_type': 'application/pdf', 'data': '...', "
|
|
152
|
-
"'filename': 'my-pdf'}",
|
|
153
|
-
stacklevel=1,
|
|
154
|
-
)
|
|
155
|
-
formatted_block = {"type": "file", "file": file}
|
|
156
|
-
elif block["source_type"] == "id":
|
|
157
|
-
formatted_block = {"type": "file", "file": {"file_id": block["id"]}}
|
|
158
|
-
else:
|
|
159
|
-
error_msg = "source_type base64 or id is required for file blocks."
|
|
160
|
-
raise ValueError(error_msg)
|
|
161
|
-
|
|
162
|
-
elif block["type"] == "audio":
|
|
163
|
-
if block["source_type"] == "base64":
|
|
164
|
-
audio_format = block["mime_type"].split("/")[-1]
|
|
165
|
-
formatted_block = {
|
|
166
|
-
"type": "input_audio",
|
|
167
|
-
"input_audio": {"data": block["data"], "format": audio_format},
|
|
168
|
-
}
|
|
169
|
-
else:
|
|
170
|
-
error_msg = "source_type base64 is required for audio blocks."
|
|
171
|
-
raise ValueError(error_msg)
|
|
172
|
-
else:
|
|
173
|
-
error_msg = f"Block of type {block['type']} is not supported."
|
|
174
|
-
raise ValueError(error_msg)
|
|
175
|
-
|
|
176
|
-
return formatted_block
|
|
@@ -1,138 +0,0 @@
|
|
|
1
|
-
"""[DEPRECATED] Pipeline prompt template."""
|
|
2
|
-
|
|
3
|
-
from typing import Any
|
|
4
|
-
|
|
5
|
-
from pydantic import model_validator
|
|
6
|
-
|
|
7
|
-
from langchain_core._api.deprecation import deprecated
|
|
8
|
-
from langchain_core.prompt_values import PromptValue
|
|
9
|
-
from langchain_core.prompts.base import BasePromptTemplate
|
|
10
|
-
from langchain_core.prompts.chat import BaseChatPromptTemplate
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def _get_inputs(inputs: dict, input_variables: list[str]) -> dict:
|
|
14
|
-
return {k: inputs[k] for k in input_variables}
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@deprecated(
|
|
18
|
-
since="0.3.22",
|
|
19
|
-
removal="1.0",
|
|
20
|
-
message=(
|
|
21
|
-
"This class is deprecated in favor of chaining individual prompts together."
|
|
22
|
-
),
|
|
23
|
-
)
|
|
24
|
-
class PipelinePromptTemplate(BasePromptTemplate):
|
|
25
|
-
"""Pipeline prompt template.
|
|
26
|
-
|
|
27
|
-
This has been deprecated in favor of chaining individual prompts together in your
|
|
28
|
-
code; e.g. using a for loop, you could do:
|
|
29
|
-
|
|
30
|
-
.. code-block:: python
|
|
31
|
-
|
|
32
|
-
my_input = {"key": "value"}
|
|
33
|
-
for name, prompt in pipeline_prompts:
|
|
34
|
-
my_input[name] = prompt.invoke(my_input).to_string()
|
|
35
|
-
my_output = final_prompt.invoke(my_input)
|
|
36
|
-
|
|
37
|
-
Prompt template for composing multiple prompt templates together.
|
|
38
|
-
|
|
39
|
-
This can be useful when you want to reuse parts of prompts.
|
|
40
|
-
|
|
41
|
-
A PipelinePrompt consists of two main parts:
|
|
42
|
-
|
|
43
|
-
- final_prompt: This is the final prompt that is returned
|
|
44
|
-
- pipeline_prompts: This is a list of tuples, consisting
|
|
45
|
-
of a string (``name``) and a Prompt Template.
|
|
46
|
-
Each PromptTemplate will be formatted and then passed
|
|
47
|
-
to future prompt templates as a variable with
|
|
48
|
-
the same name as ``name``
|
|
49
|
-
|
|
50
|
-
"""
|
|
51
|
-
|
|
52
|
-
final_prompt: BasePromptTemplate
|
|
53
|
-
"""The final prompt that is returned."""
|
|
54
|
-
pipeline_prompts: list[tuple[str, BasePromptTemplate]]
|
|
55
|
-
"""A list of tuples, consisting of a string (``name``) and a Prompt Template."""
|
|
56
|
-
|
|
57
|
-
@classmethod
|
|
58
|
-
def get_lc_namespace(cls) -> list[str]:
|
|
59
|
-
"""Get the namespace of the langchain object.
|
|
60
|
-
|
|
61
|
-
Returns:
|
|
62
|
-
``["langchain", "prompts", "pipeline"]``
|
|
63
|
-
"""
|
|
64
|
-
return ["langchain", "prompts", "pipeline"]
|
|
65
|
-
|
|
66
|
-
@model_validator(mode="before")
|
|
67
|
-
@classmethod
|
|
68
|
-
def get_input_variables(cls, values: dict) -> Any:
|
|
69
|
-
"""Get input variables."""
|
|
70
|
-
created_variables = set()
|
|
71
|
-
all_variables = set()
|
|
72
|
-
for k, prompt in values["pipeline_prompts"]:
|
|
73
|
-
created_variables.add(k)
|
|
74
|
-
all_variables.update(prompt.input_variables)
|
|
75
|
-
values["input_variables"] = list(all_variables.difference(created_variables))
|
|
76
|
-
return values
|
|
77
|
-
|
|
78
|
-
def format_prompt(self, **kwargs: Any) -> PromptValue:
|
|
79
|
-
"""Format the prompt with the inputs.
|
|
80
|
-
|
|
81
|
-
Args:
|
|
82
|
-
kwargs: Any arguments to be passed to the prompt template.
|
|
83
|
-
|
|
84
|
-
Returns:
|
|
85
|
-
A formatted string.
|
|
86
|
-
"""
|
|
87
|
-
for k, prompt in self.pipeline_prompts:
|
|
88
|
-
inputs = _get_inputs(kwargs, prompt.input_variables)
|
|
89
|
-
if isinstance(prompt, BaseChatPromptTemplate):
|
|
90
|
-
kwargs[k] = prompt.format_messages(**inputs)
|
|
91
|
-
else:
|
|
92
|
-
kwargs[k] = prompt.format(**inputs)
|
|
93
|
-
inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
|
|
94
|
-
return self.final_prompt.format_prompt(**inputs)
|
|
95
|
-
|
|
96
|
-
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
|
|
97
|
-
"""Async format the prompt with the inputs.
|
|
98
|
-
|
|
99
|
-
Args:
|
|
100
|
-
kwargs: Any arguments to be passed to the prompt template.
|
|
101
|
-
|
|
102
|
-
Returns:
|
|
103
|
-
A formatted string.
|
|
104
|
-
"""
|
|
105
|
-
for k, prompt in self.pipeline_prompts:
|
|
106
|
-
inputs = _get_inputs(kwargs, prompt.input_variables)
|
|
107
|
-
if isinstance(prompt, BaseChatPromptTemplate):
|
|
108
|
-
kwargs[k] = await prompt.aformat_messages(**inputs)
|
|
109
|
-
else:
|
|
110
|
-
kwargs[k] = await prompt.aformat(**inputs)
|
|
111
|
-
inputs = _get_inputs(kwargs, self.final_prompt.input_variables)
|
|
112
|
-
return await self.final_prompt.aformat_prompt(**inputs)
|
|
113
|
-
|
|
114
|
-
def format(self, **kwargs: Any) -> str:
|
|
115
|
-
"""Format the prompt with the inputs.
|
|
116
|
-
|
|
117
|
-
Args:
|
|
118
|
-
kwargs: Any arguments to be passed to the prompt template.
|
|
119
|
-
|
|
120
|
-
Returns:
|
|
121
|
-
A formatted string.
|
|
122
|
-
"""
|
|
123
|
-
return self.format_prompt(**kwargs).to_string()
|
|
124
|
-
|
|
125
|
-
async def aformat(self, **kwargs: Any) -> str:
|
|
126
|
-
"""Async format the prompt with the inputs.
|
|
127
|
-
|
|
128
|
-
Args:
|
|
129
|
-
kwargs: Any arguments to be passed to the prompt template.
|
|
130
|
-
|
|
131
|
-
Returns:
|
|
132
|
-
A formatted string.
|
|
133
|
-
"""
|
|
134
|
-
return (await self.aformat_prompt(**kwargs)).to_string()
|
|
135
|
-
|
|
136
|
-
@property
|
|
137
|
-
def _prompt_type(self) -> str:
|
|
138
|
-
raise ValueError
|
|
@@ -1,30 +0,0 @@
|
|
|
1
|
-
"""Pydantic v1 compatibility shim."""
|
|
2
|
-
|
|
3
|
-
from importlib import metadata
|
|
4
|
-
|
|
5
|
-
from pydantic.v1 import * # noqa: F403
|
|
6
|
-
|
|
7
|
-
from langchain_core._api.deprecation import warn_deprecated
|
|
8
|
-
|
|
9
|
-
try:
|
|
10
|
-
_PYDANTIC_MAJOR_VERSION: int = int(metadata.version("pydantic").split(".")[0])
|
|
11
|
-
except metadata.PackageNotFoundError:
|
|
12
|
-
_PYDANTIC_MAJOR_VERSION = 0
|
|
13
|
-
|
|
14
|
-
warn_deprecated(
|
|
15
|
-
"0.3.0",
|
|
16
|
-
removal="1.0.0",
|
|
17
|
-
alternative="pydantic.v1 or pydantic",
|
|
18
|
-
message=(
|
|
19
|
-
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
|
20
|
-
"The langchain_core.pydantic_v1 module was a "
|
|
21
|
-
"compatibility shim for pydantic v1, and should no longer be used. "
|
|
22
|
-
"Please update the code to import from Pydantic directly.\n\n"
|
|
23
|
-
"For example, replace imports like: "
|
|
24
|
-
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
|
25
|
-
"with: `from pydantic import BaseModel`\n"
|
|
26
|
-
"or the v1 compatibility namespace if you are working in a code base "
|
|
27
|
-
"that has not been fully upgraded to pydantic 2 yet. "
|
|
28
|
-
"\tfrom pydantic.v1 import BaseModel\n"
|
|
29
|
-
),
|
|
30
|
-
)
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
"""Pydantic v1 compatibility shim."""
|
|
2
|
-
|
|
3
|
-
from pydantic.v1.dataclasses import * # noqa: F403
|
|
4
|
-
|
|
5
|
-
from langchain_core._api import warn_deprecated
|
|
6
|
-
|
|
7
|
-
warn_deprecated(
|
|
8
|
-
"0.3.0",
|
|
9
|
-
removal="1.0.0",
|
|
10
|
-
alternative="pydantic.v1 or pydantic",
|
|
11
|
-
message=(
|
|
12
|
-
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
|
13
|
-
"The langchain_core.pydantic_v1 module was a "
|
|
14
|
-
"compatibility shim for pydantic v1, and should no longer be used. "
|
|
15
|
-
"Please update the code to import from Pydantic directly.\n\n"
|
|
16
|
-
"For example, replace imports like: "
|
|
17
|
-
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
|
18
|
-
"with: `from pydantic import BaseModel`\n"
|
|
19
|
-
"or the v1 compatibility namespace if you are working in a code base "
|
|
20
|
-
"that has not been fully upgraded to pydantic 2 yet. "
|
|
21
|
-
"\tfrom pydantic.v1 import BaseModel\n"
|
|
22
|
-
),
|
|
23
|
-
)
|
|
@@ -1,23 +0,0 @@
|
|
|
1
|
-
"""Pydantic v1 compatibility shim."""
|
|
2
|
-
|
|
3
|
-
from pydantic.v1.main import * # noqa: F403
|
|
4
|
-
|
|
5
|
-
from langchain_core._api import warn_deprecated
|
|
6
|
-
|
|
7
|
-
warn_deprecated(
|
|
8
|
-
"0.3.0",
|
|
9
|
-
removal="1.0.0",
|
|
10
|
-
alternative="pydantic.v1 or pydantic",
|
|
11
|
-
message=(
|
|
12
|
-
"As of langchain-core 0.3.0, LangChain uses pydantic v2 internally. "
|
|
13
|
-
"The langchain_core.pydantic_v1 module was a "
|
|
14
|
-
"compatibility shim for pydantic v1, and should no longer be used. "
|
|
15
|
-
"Please update the code to import from Pydantic directly.\n\n"
|
|
16
|
-
"For example, replace imports like: "
|
|
17
|
-
"`from langchain_core.pydantic_v1 import BaseModel`\n"
|
|
18
|
-
"with: `from pydantic import BaseModel`\n"
|
|
19
|
-
"or the v1 compatibility namespace if you are working in a code base "
|
|
20
|
-
"that has not been fully upgraded to pydantic 2 yet. "
|
|
21
|
-
"\tfrom pydantic.v1 import BaseModel\n"
|
|
22
|
-
),
|
|
23
|
-
)
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
"""This module is deprecated and will be removed in a future release.
|
|
2
|
-
|
|
3
|
-
Please use LangChainTracer instead.
|
|
4
|
-
"""
|
|
5
|
-
|
|
6
|
-
from typing import Any
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
def get_headers(*args: Any, **kwargs: Any) -> Any: # noqa: ARG001
|
|
10
|
-
"""Throw an error because this has been replaced by get_headers.
|
|
11
|
-
|
|
12
|
-
Raises:
|
|
13
|
-
RuntimeError: Always, because this function is deprecated.
|
|
14
|
-
"""
|
|
15
|
-
msg = (
|
|
16
|
-
"get_headers for LangChainTracerV1 is no longer supported. "
|
|
17
|
-
"Please use LangChainTracer instead."
|
|
18
|
-
)
|
|
19
|
-
raise RuntimeError(msg)
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def LangChainTracerV1(*args: Any, **kwargs: Any) -> Any: # noqa: N802,ARG001
|
|
23
|
-
"""Throw an error because this has been replaced by ``LangChainTracer``.
|
|
24
|
-
|
|
25
|
-
Raises:
|
|
26
|
-
RuntimeError: Always, because this class is deprecated.
|
|
27
|
-
"""
|
|
28
|
-
msg = (
|
|
29
|
-
"LangChainTracerV1 is no longer supported. Please use LangChainTracer instead."
|
|
30
|
-
)
|
|
31
|
-
raise RuntimeError(msg)
|
langchain_core/utils/loading.py
DELETED
|
@@ -1,35 +0,0 @@
|
|
|
1
|
-
"""Utilities for loading configurations from langchain_core-hub."""
|
|
2
|
-
|
|
3
|
-
import warnings
|
|
4
|
-
from typing import Any
|
|
5
|
-
|
|
6
|
-
from langchain_core._api.deprecation import deprecated
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
@deprecated(
|
|
10
|
-
since="0.1.30",
|
|
11
|
-
removal="1.0",
|
|
12
|
-
message=(
|
|
13
|
-
"Using the hwchase17/langchain-hub "
|
|
14
|
-
"repo for prompts is deprecated. Please use "
|
|
15
|
-
"<https://smith.langchain.com/hub> instead."
|
|
16
|
-
),
|
|
17
|
-
)
|
|
18
|
-
def try_load_from_hub(
|
|
19
|
-
*args: Any, # noqa: ARG001
|
|
20
|
-
**kwargs: Any, # noqa: ARG001
|
|
21
|
-
) -> Any:
|
|
22
|
-
"""[DEPRECATED] Try to load from the old Hub.
|
|
23
|
-
|
|
24
|
-
Returns:
|
|
25
|
-
None always, indicating that we shouldn't load from the old hub.
|
|
26
|
-
"""
|
|
27
|
-
warnings.warn(
|
|
28
|
-
"Loading from the deprecated github-based Hub is no longer supported. "
|
|
29
|
-
"Please use the new LangChain Hub at https://smith.langchain.com/hub instead.",
|
|
30
|
-
DeprecationWarning,
|
|
31
|
-
stacklevel=2,
|
|
32
|
-
)
|
|
33
|
-
# return None, which indicates that we shouldn't load from old hub
|
|
34
|
-
# and might just be a filepath for e.g. load_chain
|
|
35
|
-
return None
|