langchain-core 0.3.68__py3-none-any.whl → 0.3.70__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/deprecation.py +3 -3
- langchain_core/_import_utils.py +2 -2
- langchain_core/caches.py +1 -1
- langchain_core/callbacks/manager.py +2 -2
- langchain_core/chat_history.py +20 -16
- langchain_core/document_loaders/base.py +3 -3
- langchain_core/documents/base.py +3 -3
- langchain_core/indexing/api.py +6 -6
- langchain_core/language_models/_utils.py +1 -1
- langchain_core/language_models/base.py +1 -1
- langchain_core/language_models/chat_models.py +8 -8
- langchain_core/language_models/fake_chat_models.py +6 -2
- langchain_core/language_models/llms.py +23 -26
- langchain_core/load/load.py +23 -2
- langchain_core/load/serializable.py +4 -4
- langchain_core/messages/tool.py +1 -3
- langchain_core/messages/utils.py +29 -32
- langchain_core/output_parsers/base.py +1 -1
- langchain_core/output_parsers/openai_functions.py +7 -7
- langchain_core/output_parsers/openai_tools.py +38 -8
- langchain_core/output_parsers/xml.py +7 -7
- langchain_core/outputs/__init__.py +8 -9
- langchain_core/outputs/chat_generation.py +5 -3
- langchain_core/outputs/generation.py +2 -1
- langchain_core/outputs/llm_result.py +14 -14
- langchain_core/prompts/base.py +5 -5
- langchain_core/prompts/chat.py +22 -21
- langchain_core/prompts/dict.py +0 -2
- langchain_core/prompts/pipeline.py +13 -15
- langchain_core/prompts/prompt.py +4 -4
- langchain_core/prompts/string.py +4 -4
- langchain_core/rate_limiters.py +2 -3
- langchain_core/retrievers.py +6 -6
- langchain_core/runnables/base.py +21 -18
- langchain_core/runnables/branch.py +3 -3
- langchain_core/runnables/graph.py +1 -1
- langchain_core/runnables/history.py +3 -3
- langchain_core/runnables/router.py +1 -2
- langchain_core/runnables/utils.py +1 -1
- langchain_core/stores.py +1 -1
- langchain_core/sys_info.py +2 -2
- langchain_core/tools/base.py +7 -7
- langchain_core/tools/structured.py +8 -1
- langchain_core/tracers/core.py +4 -4
- langchain_core/tracers/event_stream.py +5 -5
- langchain_core/tracers/log_stream.py +5 -1
- langchain_core/utils/_merge.py +2 -0
- langchain_core/utils/env.py +2 -2
- langchain_core/utils/function_calling.py +4 -6
- langchain_core/utils/image.py +1 -1
- langchain_core/utils/json_schema.py +64 -59
- langchain_core/utils/mustache.py +9 -4
- langchain_core/vectorstores/base.py +10 -10
- langchain_core/vectorstores/in_memory.py +5 -5
- langchain_core/vectorstores/utils.py +21 -0
- langchain_core/version.py +1 -1
- {langchain_core-0.3.68.dist-info → langchain_core-0.3.70.dist-info}/METADATA +2 -2
- {langchain_core-0.3.68.dist-info → langchain_core-0.3.70.dist-info}/RECORD +60 -60
- {langchain_core-0.3.68.dist-info → langchain_core-0.3.70.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.68.dist-info → langchain_core-0.3.70.dist-info}/entry_points.txt +0 -0
|
@@ -469,7 +469,7 @@ def warn_deprecated(
|
|
|
469
469
|
|
|
470
470
|
if not message:
|
|
471
471
|
message = ""
|
|
472
|
-
|
|
472
|
+
package_ = (
|
|
473
473
|
package or name.split(".")[0].replace("_", "-")
|
|
474
474
|
if "." in name
|
|
475
475
|
else "LangChain"
|
|
@@ -483,14 +483,14 @@ def warn_deprecated(
|
|
|
483
483
|
if pending:
|
|
484
484
|
message += " will be deprecated in a future version"
|
|
485
485
|
else:
|
|
486
|
-
message += f" was deprecated in {
|
|
486
|
+
message += f" was deprecated in {package_} {since}"
|
|
487
487
|
|
|
488
488
|
if removal:
|
|
489
489
|
message += f" and will be removed {removal}"
|
|
490
490
|
|
|
491
491
|
if alternative_import:
|
|
492
492
|
alt_package = alternative_import.split(".")[0].replace("_", "-")
|
|
493
|
-
if alt_package ==
|
|
493
|
+
if alt_package == package_:
|
|
494
494
|
message += f". Use {alternative_import} instead."
|
|
495
495
|
else:
|
|
496
496
|
alt_module, alt_name = alternative_import.rsplit(".", 1)
|
langchain_core/_import_utils.py
CHANGED
|
@@ -27,8 +27,8 @@ def import_attr(
|
|
|
27
27
|
else:
|
|
28
28
|
try:
|
|
29
29
|
module = import_module(f".{module_name}", package=package)
|
|
30
|
-
except ModuleNotFoundError:
|
|
31
|
-
msg = f"module '{package!r}.{module_name!r}' not found"
|
|
30
|
+
except ModuleNotFoundError as err:
|
|
31
|
+
msg = f"module '{package!r}.{module_name!r}' not found ({err})"
|
|
32
32
|
raise ImportError(msg) from None
|
|
33
33
|
result = getattr(module, attr_name)
|
|
34
34
|
return result
|
langchain_core/caches.py
CHANGED
|
@@ -194,7 +194,7 @@ class InMemoryCache(BaseCache):
|
|
|
194
194
|
"""
|
|
195
195
|
if self._maxsize is not None and len(self._cache) == self._maxsize:
|
|
196
196
|
del self._cache[next(iter(self._cache))]
|
|
197
|
-
self._cache[
|
|
197
|
+
self._cache[prompt, llm_string] = return_val
|
|
198
198
|
|
|
199
199
|
@override
|
|
200
200
|
def clear(self, **kwargs: Any) -> None:
|
|
@@ -1066,7 +1066,7 @@ class CallbackManagerForToolRun(ParentRunManager, ToolManagerMixin):
|
|
|
1066
1066
|
|
|
1067
1067
|
Args:
|
|
1068
1068
|
output (Any): The output of the tool.
|
|
1069
|
-
**kwargs (Any):
|
|
1069
|
+
**kwargs (Any): The keyword arguments to pass to the event handler
|
|
1070
1070
|
"""
|
|
1071
1071
|
if not self.handlers:
|
|
1072
1072
|
return
|
|
@@ -1470,7 +1470,7 @@ class CallbackManager(BaseCallbackManager):
|
|
|
1470
1470
|
input is needed.
|
|
1471
1471
|
If provided, the inputs are expected to be formatted as a dict.
|
|
1472
1472
|
The keys will correspond to the named-arguments in the tool.
|
|
1473
|
-
**kwargs (Any):
|
|
1473
|
+
**kwargs (Any): The keyword arguments to pass to the event handler
|
|
1474
1474
|
|
|
1475
1475
|
Returns:
|
|
1476
1476
|
CallbackManagerForToolRun: The callback manager for the tool run.
|
langchain_core/chat_history.py
CHANGED
|
@@ -65,28 +65,32 @@ class BaseChatMessageHistory(ABC):
|
|
|
65
65
|
.. code-block:: python
|
|
66
66
|
|
|
67
67
|
class FileChatMessageHistory(BaseChatMessageHistory):
|
|
68
|
-
storage_path:
|
|
68
|
+
storage_path: str
|
|
69
69
|
session_id: str
|
|
70
70
|
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
71
|
+
@property
|
|
72
|
+
def messages(self):
|
|
73
|
+
with open(
|
|
74
|
+
os.path.join(storage_path, session_id),
|
|
75
|
+
"r",
|
|
76
|
+
encoding="utf-8",
|
|
77
|
+
) as f:
|
|
78
|
+
messages = json.loads(f.read())
|
|
75
79
|
return messages_from_dict(messages)
|
|
76
80
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
81
|
+
def add_messages(self, messages: Sequence[BaseMessage]) -> None:
|
|
82
|
+
all_messages = list(self.messages) # Existing messages
|
|
83
|
+
all_messages.extend(messages) # Add new messages
|
|
80
84
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
85
|
+
serialized = [message_to_dict(message) for message in all_messages]
|
|
86
|
+
# Can be further optimized by only writing new messages
|
|
87
|
+
# using append mode.
|
|
88
|
+
with open(os.path.join(storage_path, session_id), "w") as f:
|
|
89
|
+
json.dump(messages, f)
|
|
86
90
|
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
91
|
+
def clear(self):
|
|
92
|
+
with open(os.path.join(storage_path, session_id), "w") as f:
|
|
93
|
+
f.write("[]")
|
|
90
94
|
"""
|
|
91
95
|
|
|
92
96
|
messages: list[BaseMessage]
|
|
@@ -60,11 +60,11 @@ class BaseLoader(ABC): # noqa: B024
|
|
|
60
60
|
)
|
|
61
61
|
raise ImportError(msg) from e
|
|
62
62
|
|
|
63
|
-
|
|
63
|
+
text_splitter_: TextSplitter = RecursiveCharacterTextSplitter()
|
|
64
64
|
else:
|
|
65
|
-
|
|
65
|
+
text_splitter_ = text_splitter
|
|
66
66
|
docs = self.load()
|
|
67
|
-
return
|
|
67
|
+
return text_splitter_.split_documents(docs)
|
|
68
68
|
|
|
69
69
|
# Attention: This method will be upgraded into an abstractmethod once it's
|
|
70
70
|
# implemented in all the existing subclasses.
|
langchain_core/documents/base.py
CHANGED
|
@@ -201,14 +201,14 @@ class Blob(BaseMedia):
|
|
|
201
201
|
Blob instance
|
|
202
202
|
"""
|
|
203
203
|
if mime_type is None and guess_type:
|
|
204
|
-
|
|
204
|
+
mimetype = mimetypes.guess_type(path)[0] if guess_type else None
|
|
205
205
|
else:
|
|
206
|
-
|
|
206
|
+
mimetype = mime_type
|
|
207
207
|
# We do not load the data immediately, instead we treat the blob as a
|
|
208
208
|
# reference to the underlying data.
|
|
209
209
|
return cls(
|
|
210
210
|
data=None,
|
|
211
|
-
mimetype=
|
|
211
|
+
mimetype=mimetype,
|
|
212
212
|
encoding=encoding,
|
|
213
213
|
path=path,
|
|
214
214
|
metadata=metadata if metadata is not None else {},
|
langchain_core/indexing/api.py
CHANGED
|
@@ -273,7 +273,7 @@ def index(
|
|
|
273
273
|
vector_store: Union[VectorStore, DocumentIndex],
|
|
274
274
|
*,
|
|
275
275
|
batch_size: int = 100,
|
|
276
|
-
cleanup: Literal["incremental", "full", "scoped_full"
|
|
276
|
+
cleanup: Optional[Literal["incremental", "full", "scoped_full"]] = None,
|
|
277
277
|
source_id_key: Union[str, Callable[[Document], str], None] = None,
|
|
278
278
|
cleanup_batch_size: int = 1_000,
|
|
279
279
|
force_update: bool = False,
|
|
@@ -540,10 +540,10 @@ def index(
|
|
|
540
540
|
)
|
|
541
541
|
raise AssertionError(msg)
|
|
542
542
|
|
|
543
|
-
|
|
543
|
+
source_ids_ = cast("Sequence[str]", source_ids)
|
|
544
544
|
|
|
545
545
|
while uids_to_delete := record_manager.list_keys(
|
|
546
|
-
group_ids=
|
|
546
|
+
group_ids=source_ids_, before=index_start_dt, limit=cleanup_batch_size
|
|
547
547
|
):
|
|
548
548
|
# Then delete from vector store.
|
|
549
549
|
_delete(destination, uids_to_delete)
|
|
@@ -609,7 +609,7 @@ async def aindex(
|
|
|
609
609
|
vector_store: Union[VectorStore, DocumentIndex],
|
|
610
610
|
*,
|
|
611
611
|
batch_size: int = 100,
|
|
612
|
-
cleanup: Literal["incremental", "full", "scoped_full"
|
|
612
|
+
cleanup: Optional[Literal["incremental", "full", "scoped_full"]] = None,
|
|
613
613
|
source_id_key: Union[str, Callable[[Document], str], None] = None,
|
|
614
614
|
cleanup_batch_size: int = 1_000,
|
|
615
615
|
force_update: bool = False,
|
|
@@ -881,10 +881,10 @@ async def aindex(
|
|
|
881
881
|
)
|
|
882
882
|
raise AssertionError(msg)
|
|
883
883
|
|
|
884
|
-
|
|
884
|
+
source_ids_ = cast("Sequence[str]", source_ids)
|
|
885
885
|
|
|
886
886
|
while uids_to_delete := await record_manager.alist_keys(
|
|
887
|
-
group_ids=
|
|
887
|
+
group_ids=source_ids_, before=index_start_dt, limit=cleanup_batch_size
|
|
888
888
|
):
|
|
889
889
|
# Then delete from vector store.
|
|
890
890
|
await _adelete(destination, uids_to_delete)
|
|
@@ -123,7 +123,7 @@ def _normalize_messages(messages: Sequence[BaseMessage]) -> list[BaseMessage]:
|
|
|
123
123
|
# Subset to (PDF) files and audio, as most relevant chat models
|
|
124
124
|
# support images in OAI format (and some may not yet support the
|
|
125
125
|
# standard data block format)
|
|
126
|
-
and block.get("type") in
|
|
126
|
+
and block.get("type") in {"file", "input_audio"}
|
|
127
127
|
and _is_openai_data_block(block)
|
|
128
128
|
):
|
|
129
129
|
if formatted_message is message:
|
|
@@ -130,7 +130,7 @@ class BaseLanguageModel(
|
|
|
130
130
|
)
|
|
131
131
|
|
|
132
132
|
@field_validator("verbose", mode="before")
|
|
133
|
-
def set_verbose(cls, verbose: Optional[bool]) -> bool:
|
|
133
|
+
def set_verbose(cls, verbose: Optional[bool]) -> bool: # noqa: FBT001
|
|
134
134
|
"""If verbose is None, set it.
|
|
135
135
|
|
|
136
136
|
This allows users to pass in None as verbose to access the global setting.
|
|
@@ -1263,8 +1263,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1263
1263
|
Returns:
|
|
1264
1264
|
The predicted output string.
|
|
1265
1265
|
"""
|
|
1266
|
-
|
|
1267
|
-
result = self([HumanMessage(content=text)], stop=
|
|
1266
|
+
stop_ = None if stop is None else list(stop)
|
|
1267
|
+
result = self([HumanMessage(content=text)], stop=stop_, **kwargs)
|
|
1268
1268
|
if isinstance(result.content, str):
|
|
1269
1269
|
return result.content
|
|
1270
1270
|
msg = "Cannot use predict when output is not a string."
|
|
@@ -1279,17 +1279,17 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1279
1279
|
stop: Optional[Sequence[str]] = None,
|
|
1280
1280
|
**kwargs: Any,
|
|
1281
1281
|
) -> BaseMessage:
|
|
1282
|
-
|
|
1283
|
-
return self(messages, stop=
|
|
1282
|
+
stop_ = None if stop is None else list(stop)
|
|
1283
|
+
return self(messages, stop=stop_, **kwargs)
|
|
1284
1284
|
|
|
1285
1285
|
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
1286
1286
|
@override
|
|
1287
1287
|
async def apredict(
|
|
1288
1288
|
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
1289
1289
|
) -> str:
|
|
1290
|
-
|
|
1290
|
+
stop_ = None if stop is None else list(stop)
|
|
1291
1291
|
result = await self._call_async(
|
|
1292
|
-
[HumanMessage(content=text)], stop=
|
|
1292
|
+
[HumanMessage(content=text)], stop=stop_, **kwargs
|
|
1293
1293
|
)
|
|
1294
1294
|
if isinstance(result.content, str):
|
|
1295
1295
|
return result.content
|
|
@@ -1305,8 +1305,8 @@ class BaseChatModel(BaseLanguageModel[BaseMessage], ABC):
|
|
|
1305
1305
|
stop: Optional[Sequence[str]] = None,
|
|
1306
1306
|
**kwargs: Any,
|
|
1307
1307
|
) -> BaseMessage:
|
|
1308
|
-
|
|
1309
|
-
return await self._call_async(messages, stop=
|
|
1308
|
+
stop_ = None if stop is None else list(stop)
|
|
1309
|
+
return await self._call_async(messages, stop=stop_, **kwargs)
|
|
1310
1310
|
|
|
1311
1311
|
@property
|
|
1312
1312
|
@abstractmethod
|
|
@@ -36,6 +36,8 @@ class FakeMessagesListChatModel(BaseChatModel):
|
|
|
36
36
|
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
|
37
37
|
**kwargs: Any,
|
|
38
38
|
) -> ChatResult:
|
|
39
|
+
if self.sleep is not None:
|
|
40
|
+
time.sleep(self.sleep)
|
|
39
41
|
response = self.responses[self.i]
|
|
40
42
|
if self.i < len(self.responses) - 1:
|
|
41
43
|
self.i += 1
|
|
@@ -61,9 +63,9 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
61
63
|
"""List of responses to **cycle** through in order."""
|
|
62
64
|
sleep: Optional[float] = None
|
|
63
65
|
i: int = 0
|
|
64
|
-
"""List of responses to **cycle** through in order."""
|
|
65
|
-
error_on_chunk_number: Optional[int] = None
|
|
66
66
|
"""Internally incremented after every model invocation."""
|
|
67
|
+
error_on_chunk_number: Optional[int] = None
|
|
68
|
+
"""If set, raise an error on the specified chunk number during streaming."""
|
|
67
69
|
|
|
68
70
|
@property
|
|
69
71
|
@override
|
|
@@ -79,6 +81,8 @@ class FakeListChatModel(SimpleChatModel):
|
|
|
79
81
|
**kwargs: Any,
|
|
80
82
|
) -> str:
|
|
81
83
|
"""First try to lookup in queries, else return 'foo' or 'bar'."""
|
|
84
|
+
if self.sleep is not None:
|
|
85
|
+
time.sleep(self.sleep)
|
|
82
86
|
response = self.responses[self.i]
|
|
83
87
|
if self.i < len(self.responses) - 1:
|
|
84
88
|
self.i += 1
|
|
@@ -93,10 +93,10 @@ def create_base_retry_decorator(
|
|
|
93
93
|
Raises:
|
|
94
94
|
ValueError: If the cache is not set and cache is True.
|
|
95
95
|
"""
|
|
96
|
-
|
|
96
|
+
logging_ = before_sleep_log(logger, logging.WARNING)
|
|
97
97
|
|
|
98
98
|
def _before_sleep(retry_state: RetryCallState) -> None:
|
|
99
|
-
|
|
99
|
+
logging_(retry_state)
|
|
100
100
|
if run_manager:
|
|
101
101
|
if isinstance(run_manager, AsyncCallbackManagerForLLMRun):
|
|
102
102
|
coro = run_manager.on_retry(retry_state)
|
|
@@ -119,7 +119,7 @@ def create_base_retry_decorator(
|
|
|
119
119
|
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
|
|
120
120
|
retry_instance: retry_base = retry_if_exception_type(error_types[0])
|
|
121
121
|
for error in error_types[1:]:
|
|
122
|
-
retry_instance
|
|
122
|
+
retry_instance |= retry_if_exception_type(error)
|
|
123
123
|
return retry(
|
|
124
124
|
reraise=True,
|
|
125
125
|
stop=stop_after_attempt(max_retries),
|
|
@@ -129,7 +129,7 @@ def create_base_retry_decorator(
|
|
|
129
129
|
)
|
|
130
130
|
|
|
131
131
|
|
|
132
|
-
def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
|
132
|
+
def _resolve_cache(*, cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
|
133
133
|
"""Resolve the cache."""
|
|
134
134
|
if isinstance(cache, BaseCache):
|
|
135
135
|
llm_cache = cache
|
|
@@ -155,7 +155,7 @@ def _resolve_cache(cache: Union[BaseCache, bool, None]) -> Optional[BaseCache]:
|
|
|
155
155
|
def get_prompts(
|
|
156
156
|
params: dict[str, Any],
|
|
157
157
|
prompts: list[str],
|
|
158
|
-
cache:
|
|
158
|
+
cache: Union[BaseCache, bool, None] = None, # noqa: FBT001
|
|
159
159
|
) -> tuple[dict[int, list], str, list[int], list[str]]:
|
|
160
160
|
"""Get prompts that are already cached.
|
|
161
161
|
|
|
@@ -176,7 +176,7 @@ def get_prompts(
|
|
|
176
176
|
missing_prompt_idxs = []
|
|
177
177
|
existing_prompts = {}
|
|
178
178
|
|
|
179
|
-
llm_cache = _resolve_cache(cache)
|
|
179
|
+
llm_cache = _resolve_cache(cache=cache)
|
|
180
180
|
for i, prompt in enumerate(prompts):
|
|
181
181
|
if llm_cache:
|
|
182
182
|
cache_val = llm_cache.lookup(prompt, llm_string)
|
|
@@ -191,7 +191,7 @@ def get_prompts(
|
|
|
191
191
|
async def aget_prompts(
|
|
192
192
|
params: dict[str, Any],
|
|
193
193
|
prompts: list[str],
|
|
194
|
-
cache:
|
|
194
|
+
cache: Union[BaseCache, bool, None] = None, # noqa: FBT001
|
|
195
195
|
) -> tuple[dict[int, list], str, list[int], list[str]]:
|
|
196
196
|
"""Get prompts that are already cached. Async version.
|
|
197
197
|
|
|
@@ -211,7 +211,7 @@ async def aget_prompts(
|
|
|
211
211
|
missing_prompts = []
|
|
212
212
|
missing_prompt_idxs = []
|
|
213
213
|
existing_prompts = {}
|
|
214
|
-
llm_cache = _resolve_cache(cache)
|
|
214
|
+
llm_cache = _resolve_cache(cache=cache)
|
|
215
215
|
for i, prompt in enumerate(prompts):
|
|
216
216
|
if llm_cache:
|
|
217
217
|
cache_val = await llm_cache.alookup(prompt, llm_string)
|
|
@@ -224,7 +224,7 @@ async def aget_prompts(
|
|
|
224
224
|
|
|
225
225
|
|
|
226
226
|
def update_cache(
|
|
227
|
-
cache: Union[BaseCache, bool, None],
|
|
227
|
+
cache: Union[BaseCache, bool, None], # noqa: FBT001
|
|
228
228
|
existing_prompts: dict[int, list],
|
|
229
229
|
llm_string: str,
|
|
230
230
|
missing_prompt_idxs: list[int],
|
|
@@ -247,7 +247,7 @@ def update_cache(
|
|
|
247
247
|
Raises:
|
|
248
248
|
ValueError: If the cache is not set and cache is True.
|
|
249
249
|
"""
|
|
250
|
-
llm_cache = _resolve_cache(cache)
|
|
250
|
+
llm_cache = _resolve_cache(cache=cache)
|
|
251
251
|
for i, result in enumerate(new_results.generations):
|
|
252
252
|
existing_prompts[missing_prompt_idxs[i]] = result
|
|
253
253
|
prompt = prompts[missing_prompt_idxs[i]]
|
|
@@ -257,7 +257,7 @@ def update_cache(
|
|
|
257
257
|
|
|
258
258
|
|
|
259
259
|
async def aupdate_cache(
|
|
260
|
-
cache: Union[BaseCache, bool, None],
|
|
260
|
+
cache: Union[BaseCache, bool, None], # noqa: FBT001
|
|
261
261
|
existing_prompts: dict[int, list],
|
|
262
262
|
llm_string: str,
|
|
263
263
|
missing_prompt_idxs: list[int],
|
|
@@ -280,7 +280,7 @@ async def aupdate_cache(
|
|
|
280
280
|
Raises:
|
|
281
281
|
ValueError: If the cache is not set and cache is True.
|
|
282
282
|
"""
|
|
283
|
-
llm_cache = _resolve_cache(cache)
|
|
283
|
+
llm_cache = _resolve_cache(cache=cache)
|
|
284
284
|
for i, result in enumerate(new_results.generations):
|
|
285
285
|
existing_prompts[missing_prompt_idxs[i]] = result
|
|
286
286
|
prompt = prompts[missing_prompt_idxs[i]]
|
|
@@ -877,8 +877,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
877
877
|
**(metadata or {}),
|
|
878
878
|
**self._get_ls_params(stop=stop, **kwargs),
|
|
879
879
|
}
|
|
880
|
-
else:
|
|
881
|
-
pass
|
|
882
880
|
if (
|
|
883
881
|
isinstance(callbacks, list)
|
|
884
882
|
and callbacks
|
|
@@ -1132,8 +1130,6 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1132
1130
|
**(metadata or {}),
|
|
1133
1131
|
**self._get_ls_params(stop=stop, **kwargs),
|
|
1134
1132
|
}
|
|
1135
|
-
else:
|
|
1136
|
-
pass
|
|
1137
1133
|
# Create callback managers
|
|
1138
1134
|
if isinstance(callbacks, list) and (
|
|
1139
1135
|
isinstance(callbacks[0], (list, BaseCallbackManager))
|
|
@@ -1352,8 +1348,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1352
1348
|
def predict(
|
|
1353
1349
|
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
1354
1350
|
) -> str:
|
|
1355
|
-
|
|
1356
|
-
return self(text, stop=
|
|
1351
|
+
stop_ = None if stop is None else list(stop)
|
|
1352
|
+
return self(text, stop=stop_, **kwargs)
|
|
1357
1353
|
|
|
1358
1354
|
@deprecated("0.1.7", alternative="invoke", removal="1.0")
|
|
1359
1355
|
@override
|
|
@@ -1365,8 +1361,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1365
1361
|
**kwargs: Any,
|
|
1366
1362
|
) -> BaseMessage:
|
|
1367
1363
|
text = get_buffer_string(messages)
|
|
1368
|
-
|
|
1369
|
-
content = self(text, stop=
|
|
1364
|
+
stop_ = None if stop is None else list(stop)
|
|
1365
|
+
content = self(text, stop=stop_, **kwargs)
|
|
1370
1366
|
return AIMessage(content=content)
|
|
1371
1367
|
|
|
1372
1368
|
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
@@ -1374,8 +1370,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1374
1370
|
async def apredict(
|
|
1375
1371
|
self, text: str, *, stop: Optional[Sequence[str]] = None, **kwargs: Any
|
|
1376
1372
|
) -> str:
|
|
1377
|
-
|
|
1378
|
-
return await self._call_async(text, stop=
|
|
1373
|
+
stop_ = None if stop is None else list(stop)
|
|
1374
|
+
return await self._call_async(text, stop=stop_, **kwargs)
|
|
1379
1375
|
|
|
1380
1376
|
@deprecated("0.1.7", alternative="ainvoke", removal="1.0")
|
|
1381
1377
|
@override
|
|
@@ -1387,8 +1383,8 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1387
1383
|
**kwargs: Any,
|
|
1388
1384
|
) -> BaseMessage:
|
|
1389
1385
|
text = get_buffer_string(messages)
|
|
1390
|
-
|
|
1391
|
-
content = await self._call_async(text, stop=
|
|
1386
|
+
stop_ = None if stop is None else list(stop)
|
|
1387
|
+
content = await self._call_async(text, stop=stop_, **kwargs)
|
|
1392
1388
|
return AIMessage(content=content)
|
|
1393
1389
|
|
|
1394
1390
|
def __str__(self) -> str:
|
|
@@ -1418,9 +1414,10 @@ class BaseLLM(BaseLanguageModel[str], ABC):
|
|
|
1418
1414
|
ValueError: If the file path is not a string or Path object.
|
|
1419
1415
|
|
|
1420
1416
|
Example:
|
|
1421
|
-
.. code-block:: python
|
|
1422
1417
|
|
|
1423
|
-
|
|
1418
|
+
.. code-block:: python
|
|
1419
|
+
|
|
1420
|
+
llm.save(file_path="path/llm.yaml")
|
|
1424
1421
|
"""
|
|
1425
1422
|
# Convert file to Path object.
|
|
1426
1423
|
save_path = Path(file_path)
|
langchain_core/load/load.py
CHANGED
|
@@ -56,6 +56,8 @@ class Reviver:
|
|
|
56
56
|
additional_import_mappings: Optional[
|
|
57
57
|
dict[tuple[str, ...], tuple[str, ...]]
|
|
58
58
|
] = None,
|
|
59
|
+
*,
|
|
60
|
+
ignore_unserializable_fields: bool = False,
|
|
59
61
|
) -> None:
|
|
60
62
|
"""Initialize the reviver.
|
|
61
63
|
|
|
@@ -70,6 +72,8 @@ class Reviver:
|
|
|
70
72
|
additional_import_mappings: A dictionary of additional namespace mappings
|
|
71
73
|
You can use this to override default mappings or add new mappings.
|
|
72
74
|
Defaults to None.
|
|
75
|
+
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
|
76
|
+
Defaults to False.
|
|
73
77
|
"""
|
|
74
78
|
self.secrets_from_env = secrets_from_env
|
|
75
79
|
self.secrets_map = secrets_map or {}
|
|
@@ -88,6 +92,7 @@ class Reviver:
|
|
|
88
92
|
if self.additional_import_mappings
|
|
89
93
|
else ALL_SERIALIZABLE_MAPPINGS
|
|
90
94
|
)
|
|
95
|
+
self.ignore_unserializable_fields = ignore_unserializable_fields
|
|
91
96
|
|
|
92
97
|
def __call__(self, value: dict[str, Any]) -> Any:
|
|
93
98
|
"""Revive the value."""
|
|
@@ -108,6 +113,8 @@ class Reviver:
|
|
|
108
113
|
and value.get("type") == "not_implemented"
|
|
109
114
|
and value.get("id") is not None
|
|
110
115
|
):
|
|
116
|
+
if self.ignore_unserializable_fields:
|
|
117
|
+
return None
|
|
111
118
|
msg = (
|
|
112
119
|
"Trying to load an object that doesn't implement "
|
|
113
120
|
f"serialization: {value}"
|
|
@@ -170,6 +177,7 @@ def loads(
|
|
|
170
177
|
valid_namespaces: Optional[list[str]] = None,
|
|
171
178
|
secrets_from_env: bool = True,
|
|
172
179
|
additional_import_mappings: Optional[dict[tuple[str, ...], tuple[str, ...]]] = None,
|
|
180
|
+
ignore_unserializable_fields: bool = False,
|
|
173
181
|
) -> Any:
|
|
174
182
|
"""Revive a LangChain class from a JSON string.
|
|
175
183
|
|
|
@@ -187,6 +195,8 @@ def loads(
|
|
|
187
195
|
additional_import_mappings: A dictionary of additional namespace mappings
|
|
188
196
|
You can use this to override default mappings or add new mappings.
|
|
189
197
|
Defaults to None.
|
|
198
|
+
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
|
199
|
+
Defaults to False.
|
|
190
200
|
|
|
191
201
|
Returns:
|
|
192
202
|
Revived LangChain objects.
|
|
@@ -194,7 +204,11 @@ def loads(
|
|
|
194
204
|
return json.loads(
|
|
195
205
|
text,
|
|
196
206
|
object_hook=Reviver(
|
|
197
|
-
secrets_map,
|
|
207
|
+
secrets_map,
|
|
208
|
+
valid_namespaces,
|
|
209
|
+
secrets_from_env,
|
|
210
|
+
additional_import_mappings,
|
|
211
|
+
ignore_unserializable_fields=ignore_unserializable_fields,
|
|
198
212
|
),
|
|
199
213
|
)
|
|
200
214
|
|
|
@@ -207,6 +221,7 @@ def load(
|
|
|
207
221
|
valid_namespaces: Optional[list[str]] = None,
|
|
208
222
|
secrets_from_env: bool = True,
|
|
209
223
|
additional_import_mappings: Optional[dict[tuple[str, ...], tuple[str, ...]]] = None,
|
|
224
|
+
ignore_unserializable_fields: bool = False,
|
|
210
225
|
) -> Any:
|
|
211
226
|
"""Revive a LangChain class from a JSON object.
|
|
212
227
|
|
|
@@ -225,12 +240,18 @@ def load(
|
|
|
225
240
|
additional_import_mappings: A dictionary of additional namespace mappings
|
|
226
241
|
You can use this to override default mappings or add new mappings.
|
|
227
242
|
Defaults to None.
|
|
243
|
+
ignore_unserializable_fields: Whether to ignore unserializable fields.
|
|
244
|
+
Defaults to False.
|
|
228
245
|
|
|
229
246
|
Returns:
|
|
230
247
|
Revived LangChain objects.
|
|
231
248
|
"""
|
|
232
249
|
reviver = Reviver(
|
|
233
|
-
secrets_map,
|
|
250
|
+
secrets_map,
|
|
251
|
+
valid_namespaces,
|
|
252
|
+
secrets_from_env,
|
|
253
|
+
additional_import_mappings,
|
|
254
|
+
ignore_unserializable_fields=ignore_unserializable_fields,
|
|
234
255
|
)
|
|
235
256
|
|
|
236
257
|
def _load(obj: Any) -> Any:
|
|
@@ -355,19 +355,19 @@ def to_json_not_implemented(obj: object) -> SerializedNotImplemented:
|
|
|
355
355
|
Returns:
|
|
356
356
|
SerializedNotImplemented
|
|
357
357
|
"""
|
|
358
|
-
|
|
358
|
+
id_: list[str] = []
|
|
359
359
|
try:
|
|
360
360
|
if hasattr(obj, "__name__"):
|
|
361
|
-
|
|
361
|
+
id_ = [*obj.__module__.split("."), obj.__name__]
|
|
362
362
|
elif hasattr(obj, "__class__"):
|
|
363
|
-
|
|
363
|
+
id_ = [*obj.__class__.__module__.split("."), obj.__class__.__name__]
|
|
364
364
|
except Exception:
|
|
365
365
|
logger.debug("Failed to serialize object", exc_info=True)
|
|
366
366
|
|
|
367
367
|
result: SerializedNotImplemented = {
|
|
368
368
|
"lc": 1,
|
|
369
369
|
"type": "not_implemented",
|
|
370
|
-
"id":
|
|
370
|
+
"id": id_,
|
|
371
371
|
"repr": None,
|
|
372
372
|
}
|
|
373
373
|
with contextlib.suppress(Exception):
|
langchain_core/messages/tool.py
CHANGED
|
@@ -126,8 +126,6 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
126
126
|
raise ValueError(msg) from e
|
|
127
127
|
else:
|
|
128
128
|
values["content"].append(x)
|
|
129
|
-
else:
|
|
130
|
-
pass
|
|
131
129
|
|
|
132
130
|
tool_call_id = values["tool_call_id"]
|
|
133
131
|
if isinstance(tool_call_id, (UUID, int, float)):
|
|
@@ -366,4 +364,4 @@ def default_tool_chunk_parser(raw_tool_calls: list[dict]) -> list[ToolCallChunk]
|
|
|
366
364
|
def _merge_status(
|
|
367
365
|
left: Literal["success", "error"], right: Literal["success", "error"]
|
|
368
366
|
) -> Literal["success", "error"]:
|
|
369
|
-
return "error" if "error" in
|
|
367
|
+
return "error" if "error" in {left, right} else "success"
|