langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- langchain_core/_api/beta_decorator.py +2 -2
- langchain_core/_api/deprecation.py +1 -1
- langchain_core/beta/runnables/context.py +1 -1
- langchain_core/callbacks/base.py +14 -23
- langchain_core/callbacks/file.py +13 -2
- langchain_core/callbacks/manager.py +74 -157
- langchain_core/callbacks/streaming_stdout.py +3 -4
- langchain_core/callbacks/usage.py +2 -12
- langchain_core/chat_history.py +6 -6
- langchain_core/documents/base.py +1 -1
- langchain_core/documents/compressor.py +9 -6
- langchain_core/indexing/base.py +2 -2
- langchain_core/language_models/_utils.py +232 -101
- langchain_core/language_models/base.py +35 -23
- langchain_core/language_models/chat_models.py +248 -54
- langchain_core/language_models/fake_chat_models.py +28 -81
- langchain_core/load/dump.py +3 -4
- langchain_core/messages/__init__.py +30 -24
- langchain_core/messages/ai.py +188 -30
- langchain_core/messages/base.py +164 -25
- langchain_core/messages/block_translators/__init__.py +89 -0
- langchain_core/messages/block_translators/anthropic.py +451 -0
- langchain_core/messages/block_translators/bedrock.py +45 -0
- langchain_core/messages/block_translators/bedrock_converse.py +47 -0
- langchain_core/messages/block_translators/google_genai.py +45 -0
- langchain_core/messages/block_translators/google_vertexai.py +47 -0
- langchain_core/messages/block_translators/groq.py +45 -0
- langchain_core/messages/block_translators/langchain_v0.py +164 -0
- langchain_core/messages/block_translators/ollama.py +45 -0
- langchain_core/messages/block_translators/openai.py +798 -0
- langchain_core/messages/{content_blocks.py → content.py} +303 -278
- langchain_core/messages/human.py +29 -9
- langchain_core/messages/system.py +29 -9
- langchain_core/messages/tool.py +94 -13
- langchain_core/messages/utils.py +34 -234
- langchain_core/output_parsers/base.py +14 -50
- langchain_core/output_parsers/json.py +2 -5
- langchain_core/output_parsers/list.py +2 -7
- langchain_core/output_parsers/openai_functions.py +5 -28
- langchain_core/output_parsers/openai_tools.py +49 -90
- langchain_core/output_parsers/pydantic.py +2 -3
- langchain_core/output_parsers/transform.py +12 -53
- langchain_core/output_parsers/xml.py +9 -17
- langchain_core/prompt_values.py +8 -112
- langchain_core/prompts/chat.py +1 -3
- langchain_core/runnables/base.py +500 -451
- langchain_core/runnables/branch.py +1 -1
- langchain_core/runnables/fallbacks.py +4 -4
- langchain_core/runnables/history.py +1 -1
- langchain_core/runnables/passthrough.py +3 -3
- langchain_core/runnables/retry.py +1 -1
- langchain_core/runnables/router.py +1 -1
- langchain_core/structured_query.py +3 -7
- langchain_core/tools/base.py +14 -41
- langchain_core/tools/convert.py +2 -22
- langchain_core/tools/retriever.py +1 -8
- langchain_core/tools/structured.py +2 -10
- langchain_core/tracers/_streaming.py +6 -7
- langchain_core/tracers/base.py +7 -14
- langchain_core/tracers/core.py +4 -27
- langchain_core/tracers/event_stream.py +4 -15
- langchain_core/tracers/langchain.py +3 -14
- langchain_core/tracers/log_stream.py +2 -3
- langchain_core/utils/_merge.py +45 -7
- langchain_core/utils/function_calling.py +22 -9
- langchain_core/utils/utils.py +29 -0
- langchain_core/version.py +1 -1
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/METADATA +7 -9
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/RECORD +71 -64
- langchain_core/v1/__init__.py +0 -1
- langchain_core/v1/chat_models.py +0 -1047
- langchain_core/v1/messages.py +0 -755
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/WHEEL +0 -0
- {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0a2.dist-info}/entry_points.txt +0 -0
|
@@ -38,7 +38,6 @@ from langchain_core.runnables.utils import (
|
|
|
38
38
|
from langchain_core.tracers._streaming import _StreamingCallbackHandler
|
|
39
39
|
from langchain_core.tracers.memory_stream import _MemoryStream
|
|
40
40
|
from langchain_core.utils.aiter import aclosing, py_anext
|
|
41
|
-
from langchain_core.v1.messages import MessageV1
|
|
42
41
|
|
|
43
42
|
if TYPE_CHECKING:
|
|
44
43
|
from collections.abc import AsyncIterator, Iterator, Sequence
|
|
@@ -46,8 +45,6 @@ if TYPE_CHECKING:
|
|
|
46
45
|
from langchain_core.documents import Document
|
|
47
46
|
from langchain_core.runnables import Runnable, RunnableConfig
|
|
48
47
|
from langchain_core.tracers.log_stream import LogEntry
|
|
49
|
-
from langchain_core.v1.messages import AIMessage as AIMessageV1
|
|
50
|
-
from langchain_core.v1.messages import AIMessageChunk as AIMessageChunkV1
|
|
51
48
|
|
|
52
49
|
logger = logging.getLogger(__name__)
|
|
53
50
|
|
|
@@ -300,7 +297,7 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|
|
300
297
|
async def on_chat_model_start(
|
|
301
298
|
self,
|
|
302
299
|
serialized: dict[str, Any],
|
|
303
|
-
messages:
|
|
300
|
+
messages: list[list[BaseMessage]],
|
|
304
301
|
*,
|
|
305
302
|
run_id: UUID,
|
|
306
303
|
tags: Optional[list[str]] = None,
|
|
@@ -310,8 +307,6 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|
|
310
307
|
**kwargs: Any,
|
|
311
308
|
) -> None:
|
|
312
309
|
"""Start a trace for an LLM run."""
|
|
313
|
-
# below cast is because type is converted in handle_event
|
|
314
|
-
messages = cast("list[list[BaseMessage]]", messages)
|
|
315
310
|
name_ = _assign_name(name, serialized)
|
|
316
311
|
run_type = "chat_model"
|
|
317
312
|
|
|
@@ -412,18 +407,13 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|
|
412
407
|
self,
|
|
413
408
|
token: str,
|
|
414
409
|
*,
|
|
415
|
-
chunk: Optional[
|
|
416
|
-
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunkV1]
|
|
417
|
-
] = None,
|
|
410
|
+
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
|
|
418
411
|
run_id: UUID,
|
|
419
412
|
parent_run_id: Optional[UUID] = None,
|
|
420
413
|
**kwargs: Any,
|
|
421
414
|
) -> None:
|
|
422
415
|
"""Run on new LLM token. Only available when streaming is enabled."""
|
|
423
416
|
run_info = self.run_map.get(run_id)
|
|
424
|
-
chunk = cast(
|
|
425
|
-
"Optional[Union[GenerationChunk, ChatGenerationChunk]]", chunk
|
|
426
|
-
) # converted in handle_event
|
|
427
417
|
chunk_: Union[GenerationChunk, BaseMessageChunk]
|
|
428
418
|
|
|
429
419
|
if run_info is None:
|
|
@@ -466,10 +456,9 @@ class _AstreamEventsCallbackHandler(AsyncCallbackHandler, _StreamingCallbackHand
|
|
|
466
456
|
|
|
467
457
|
@override
|
|
468
458
|
async def on_llm_end(
|
|
469
|
-
self, response:
|
|
459
|
+
self, response: LLMResult, *, run_id: UUID, **kwargs: Any
|
|
470
460
|
) -> None:
|
|
471
461
|
"""End a trace for an LLM run."""
|
|
472
|
-
response = cast("LLMResult", response) # converted in handle_event
|
|
473
462
|
run_info = self.run_map.pop(run_id)
|
|
474
463
|
inputs_ = run_info["inputs"]
|
|
475
464
|
|
|
@@ -1010,7 +999,7 @@ async def _astream_events_implementation_v2(
|
|
|
1010
999
|
continue
|
|
1011
1000
|
|
|
1012
1001
|
# If it's the end event corresponding to the root runnable
|
|
1013
|
-
# we
|
|
1002
|
+
# we don't include the input in the event since it's guaranteed
|
|
1014
1003
|
# to be included in the first event.
|
|
1015
1004
|
if (
|
|
1016
1005
|
event["run_id"] == first_event_run_id
|
|
@@ -5,7 +5,7 @@ from __future__ import annotations
|
|
|
5
5
|
import logging
|
|
6
6
|
from concurrent.futures import ThreadPoolExecutor
|
|
7
7
|
from datetime import datetime, timezone
|
|
8
|
-
from typing import TYPE_CHECKING, Any, Optional, Union
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Optional, Union
|
|
9
9
|
from uuid import UUID
|
|
10
10
|
|
|
11
11
|
from langsmith import Client
|
|
@@ -21,15 +21,12 @@ from typing_extensions import override
|
|
|
21
21
|
|
|
22
22
|
from langchain_core.env import get_runtime_environment
|
|
23
23
|
from langchain_core.load import dumpd
|
|
24
|
-
from langchain_core.messages.utils import convert_from_v1_message
|
|
25
24
|
from langchain_core.tracers.base import BaseTracer
|
|
26
25
|
from langchain_core.tracers.schemas import Run
|
|
27
|
-
from langchain_core.v1.messages import MessageV1Types
|
|
28
26
|
|
|
29
27
|
if TYPE_CHECKING:
|
|
30
28
|
from langchain_core.messages import BaseMessage
|
|
31
29
|
from langchain_core.outputs import ChatGenerationChunk, GenerationChunk
|
|
32
|
-
from langchain_core.v1.messages import AIMessageChunk, MessageV1
|
|
33
30
|
|
|
34
31
|
logger = logging.getLogger(__name__)
|
|
35
32
|
_LOGGED = set()
|
|
@@ -116,7 +113,7 @@ class LangChainTracer(BaseTracer):
|
|
|
116
113
|
def on_chat_model_start(
|
|
117
114
|
self,
|
|
118
115
|
serialized: dict[str, Any],
|
|
119
|
-
messages:
|
|
116
|
+
messages: list[list[BaseMessage]],
|
|
120
117
|
*,
|
|
121
118
|
run_id: UUID,
|
|
122
119
|
tags: Optional[list[str]] = None,
|
|
@@ -143,12 +140,6 @@ class LangChainTracer(BaseTracer):
|
|
|
143
140
|
start_time = datetime.now(timezone.utc)
|
|
144
141
|
if metadata:
|
|
145
142
|
kwargs.update({"metadata": metadata})
|
|
146
|
-
if isinstance(messages[0], MessageV1Types):
|
|
147
|
-
# Convert from v1 messages to BaseMessage
|
|
148
|
-
messages = [
|
|
149
|
-
[convert_from_v1_message(msg) for msg in messages] # type: ignore[arg-type]
|
|
150
|
-
]
|
|
151
|
-
messages = cast("list[list[BaseMessage]]", messages)
|
|
152
143
|
chat_model_run = Run(
|
|
153
144
|
id=run_id,
|
|
154
145
|
parent_run_id=parent_run_id,
|
|
@@ -241,9 +232,7 @@ class LangChainTracer(BaseTracer):
|
|
|
241
232
|
self,
|
|
242
233
|
token: str,
|
|
243
234
|
run_id: UUID,
|
|
244
|
-
chunk: Optional[
|
|
245
|
-
Union[GenerationChunk, ChatGenerationChunk, AIMessageChunk]
|
|
246
|
-
] = None,
|
|
235
|
+
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]] = None,
|
|
247
236
|
parent_run_id: Optional[UUID] = None,
|
|
248
237
|
) -> Run:
|
|
249
238
|
"""Append token event to LLM run and return the run."""
|
|
@@ -34,7 +34,6 @@ if TYPE_CHECKING:
|
|
|
34
34
|
|
|
35
35
|
from langchain_core.runnables.utils import Input, Output
|
|
36
36
|
from langchain_core.tracers.schemas import Run
|
|
37
|
-
from langchain_core.v1.messages import AIMessageChunk
|
|
38
37
|
|
|
39
38
|
|
|
40
39
|
class LogEntry(TypedDict):
|
|
@@ -177,7 +176,7 @@ class RunLog(RunLogPatch):
|
|
|
177
176
|
# Then compare that the ops are the same
|
|
178
177
|
return super().__eq__(other)
|
|
179
178
|
|
|
180
|
-
__hash__ = None
|
|
179
|
+
__hash__ = None
|
|
181
180
|
|
|
182
181
|
|
|
183
182
|
T = TypeVar("T")
|
|
@@ -486,7 +485,7 @@ class LogStreamCallbackHandler(BaseTracer, _StreamingCallbackHandler):
|
|
|
486
485
|
self,
|
|
487
486
|
run: Run,
|
|
488
487
|
token: str,
|
|
489
|
-
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk
|
|
488
|
+
chunk: Optional[Union[GenerationChunk, ChatGenerationChunk]],
|
|
490
489
|
) -> None:
|
|
491
490
|
"""Process new LLM token."""
|
|
492
491
|
index = self._key_map_by_run_id.get(run.id)
|
langchain_core/utils/_merge.py
CHANGED
|
@@ -57,6 +57,11 @@ def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]
|
|
|
57
57
|
# "should either occur once or have the same value across "
|
|
58
58
|
# "all dicts."
|
|
59
59
|
# )
|
|
60
|
+
if (right_k == "index" and merged[right_k].startswith("lc_")) or (
|
|
61
|
+
right_k in ("id", "output_version", "model_provider")
|
|
62
|
+
and merged[right_k] == right_v
|
|
63
|
+
):
|
|
64
|
+
continue
|
|
60
65
|
merged[right_k] += right_v
|
|
61
66
|
elif isinstance(merged[right_k], dict):
|
|
62
67
|
merged[right_k] = merge_dicts(merged[right_k], right_v)
|
|
@@ -93,20 +98,53 @@ def merge_lists(left: Optional[list], *others: Optional[list]) -> Optional[list]
|
|
|
93
98
|
merged = other.copy()
|
|
94
99
|
else:
|
|
95
100
|
for e in other:
|
|
96
|
-
if
|
|
101
|
+
if (
|
|
102
|
+
isinstance(e, dict)
|
|
103
|
+
and "index" in e
|
|
104
|
+
and (
|
|
105
|
+
isinstance(e["index"], int)
|
|
106
|
+
or (
|
|
107
|
+
isinstance(e["index"], str) and e["index"].startswith("lc_")
|
|
108
|
+
)
|
|
109
|
+
)
|
|
110
|
+
):
|
|
97
111
|
to_merge = [
|
|
98
112
|
i
|
|
99
113
|
for i, e_left in enumerate(merged)
|
|
100
|
-
if e_left["index"] == e["index"]
|
|
114
|
+
if "index" in e_left and e_left["index"] == e["index"]
|
|
101
115
|
]
|
|
102
116
|
if to_merge:
|
|
103
117
|
# TODO: Remove this once merge_dict is updated with special
|
|
104
118
|
# handling for 'type'.
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
119
|
+
if (left_type := merged[to_merge[0]].get("type")) and (
|
|
120
|
+
e.get("type") == "non_standard" and "value" in e
|
|
121
|
+
):
|
|
122
|
+
if left_type != "non_standard":
|
|
123
|
+
# standard + non_standard
|
|
124
|
+
new_e: dict[str, Any] = {
|
|
125
|
+
"extras": {
|
|
126
|
+
k: v
|
|
127
|
+
for k, v in e["value"].items()
|
|
128
|
+
if k != "type"
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
else:
|
|
132
|
+
# non_standard + non_standard
|
|
133
|
+
new_e = {
|
|
134
|
+
"value": {
|
|
135
|
+
k: v
|
|
136
|
+
for k, v in e["value"].items()
|
|
137
|
+
if k != "type"
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
if "index" in e:
|
|
141
|
+
new_e["index"] = e["index"]
|
|
142
|
+
else:
|
|
143
|
+
new_e = (
|
|
144
|
+
{k: v for k, v in e.items() if k != "type"}
|
|
145
|
+
if "type" in e
|
|
146
|
+
else e
|
|
147
|
+
)
|
|
110
148
|
merged[to_merge[0]] = merge_dicts(merged[to_merge[0]], new_e)
|
|
111
149
|
else:
|
|
112
150
|
merged.append(e)
|
|
@@ -277,7 +277,7 @@ def _convert_any_typed_dicts_to_pydantic(
|
|
|
277
277
|
)
|
|
278
278
|
fields: dict = {}
|
|
279
279
|
for arg, arg_type in annotations_.items():
|
|
280
|
-
if get_origin(arg_type) is Annotated:
|
|
280
|
+
if get_origin(arg_type) is Annotated: # type: ignore[comparison-overlap]
|
|
281
281
|
annotated_args = get_args(arg_type)
|
|
282
282
|
new_arg_type = _convert_any_typed_dicts_to_pydantic(
|
|
283
283
|
annotated_args[0], depth=depth + 1, visited=visited
|
|
@@ -575,12 +575,23 @@ def convert_to_openai_tool(
|
|
|
575
575
|
|
|
576
576
|
Added support for OpenAI's image generation built-in tool.
|
|
577
577
|
"""
|
|
578
|
+
from langchain_core.tools import Tool
|
|
579
|
+
|
|
578
580
|
if isinstance(tool, dict):
|
|
579
581
|
if tool.get("type") in _WellKnownOpenAITools:
|
|
580
582
|
return tool
|
|
581
583
|
# As of 03.12.25 can be "web_search_preview" or "web_search_preview_2025_03_11"
|
|
582
584
|
if (tool.get("type") or "").startswith("web_search_preview"):
|
|
583
585
|
return tool
|
|
586
|
+
if isinstance(tool, Tool) and (tool.metadata or {}).get("type") == "custom_tool":
|
|
587
|
+
oai_tool = {
|
|
588
|
+
"type": "custom",
|
|
589
|
+
"name": tool.name,
|
|
590
|
+
"description": tool.description,
|
|
591
|
+
}
|
|
592
|
+
if tool.metadata is not None and "format" in tool.metadata:
|
|
593
|
+
oai_tool["format"] = tool.metadata["format"]
|
|
594
|
+
return oai_tool
|
|
584
595
|
oai_function = convert_to_openai_function(tool, strict=strict)
|
|
585
596
|
return {"type": "function", "function": oai_function}
|
|
586
597
|
|
|
@@ -629,15 +640,16 @@ def tool_example_to_messages(
|
|
|
629
640
|
|
|
630
641
|
The list of messages per example by default corresponds to:
|
|
631
642
|
|
|
632
|
-
1
|
|
633
|
-
2
|
|
634
|
-
3
|
|
635
|
-
correctly.
|
|
643
|
+
1. ``HumanMessage``: contains the content from which content should be extracted.
|
|
644
|
+
2. ``AIMessage``: contains the extracted information from the model
|
|
645
|
+
3. ``ToolMessage``: contains confirmation to the model that the model requested a
|
|
646
|
+
tool correctly.
|
|
636
647
|
|
|
637
|
-
If
|
|
648
|
+
If ``ai_response`` is specified, there will be a final ``AIMessage`` with that
|
|
649
|
+
response.
|
|
638
650
|
|
|
639
|
-
The ToolMessage is required because some chat models are hyper-optimized for
|
|
640
|
-
rather than for an extraction use case.
|
|
651
|
+
The ``ToolMessage`` is required because some chat models are hyper-optimized for
|
|
652
|
+
agents rather than for an extraction use case.
|
|
641
653
|
|
|
642
654
|
Arguments:
|
|
643
655
|
input: string, the user input
|
|
@@ -646,7 +658,7 @@ def tool_example_to_messages(
|
|
|
646
658
|
tool_outputs: Optional[list[str]], a list of tool call outputs.
|
|
647
659
|
Does not need to be provided. If not provided, a placeholder value
|
|
648
660
|
will be inserted. Defaults to None.
|
|
649
|
-
ai_response: Optional[str], if provided, content for a final AIMessage
|
|
661
|
+
ai_response: Optional[str], if provided, content for a final ``AIMessage``.
|
|
650
662
|
|
|
651
663
|
Returns:
|
|
652
664
|
A list of messages
|
|
@@ -728,6 +740,7 @@ def _parse_google_docstring(
|
|
|
728
740
|
"""Parse the function and argument descriptions from the docstring of a function.
|
|
729
741
|
|
|
730
742
|
Assumes the function docstring follows Google Python style guide.
|
|
743
|
+
|
|
731
744
|
"""
|
|
732
745
|
if docstring:
|
|
733
746
|
docstring_blocks = docstring.split("\n\n")
|
langchain_core/utils/utils.py
CHANGED
|
@@ -9,6 +9,7 @@ import warnings
|
|
|
9
9
|
from collections.abc import Iterator, Sequence
|
|
10
10
|
from importlib.metadata import version
|
|
11
11
|
from typing import Any, Callable, Optional, Union, overload
|
|
12
|
+
from uuid import uuid4
|
|
12
13
|
|
|
13
14
|
from packaging.version import parse
|
|
14
15
|
from pydantic import SecretStr
|
|
@@ -466,3 +467,31 @@ def secret_from_env(
|
|
|
466
467
|
raise ValueError(msg)
|
|
467
468
|
|
|
468
469
|
return get_secret_from_env
|
|
470
|
+
|
|
471
|
+
|
|
472
|
+
LC_AUTO_PREFIX = "lc_"
|
|
473
|
+
"""LangChain auto-generated ID prefix for messages and content blocks."""
|
|
474
|
+
|
|
475
|
+
LC_ID_PREFIX = "lc_run-"
|
|
476
|
+
"""Internal tracing/callback system identifier.
|
|
477
|
+
|
|
478
|
+
Used for:
|
|
479
|
+
- Tracing. Every LangChain operation (LLM call, chain execution, tool use, etc.)
|
|
480
|
+
gets a unique run_id (UUID)
|
|
481
|
+
- Enables tracking parent-child relationships between operations
|
|
482
|
+
"""
|
|
483
|
+
|
|
484
|
+
|
|
485
|
+
def ensure_id(id_val: Optional[str]) -> str:
|
|
486
|
+
"""Ensure the ID is a valid string, generating a new UUID if not provided.
|
|
487
|
+
|
|
488
|
+
Auto-generated UUIDs are prefixed by ``'lc_'`` to indicate they are
|
|
489
|
+
LangChain-generated IDs.
|
|
490
|
+
|
|
491
|
+
Args:
|
|
492
|
+
id_val: Optional string ID value to validate.
|
|
493
|
+
|
|
494
|
+
Returns:
|
|
495
|
+
A string ID, either the validated provided value or a newly generated UUID4.
|
|
496
|
+
"""
|
|
497
|
+
return id_val or str(f"{LC_AUTO_PREFIX}{uuid4()}")
|
langchain_core/version.py
CHANGED
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: langchain-core
|
|
3
|
-
Version:
|
|
3
|
+
Version: 1.0.0a2
|
|
4
4
|
Summary: Building applications with LLMs through composability
|
|
5
5
|
License: MIT
|
|
6
6
|
Project-URL: Source Code, https://github.com/langchain-ai/langchain/tree/master/libs/core
|
|
7
7
|
Project-URL: Release Notes, https://github.com/langchain-ai/langchain/releases?q=tag%3A%22langchain-core%3D%3D0%22&expanded=true
|
|
8
8
|
Project-URL: repository, https://github.com/langchain-ai/langchain
|
|
9
|
-
Requires-Python: >=3.
|
|
9
|
+
Requires-Python: >=3.10
|
|
10
10
|
Requires-Dist: langsmith>=0.3.45
|
|
11
11
|
Requires-Dist: tenacity!=8.4.0,<10.0.0,>=8.1.0
|
|
12
12
|
Requires-Dist: jsonpatch<2.0,>=1.33
|
|
@@ -39,13 +39,13 @@ For full documentation see the [API reference](https://python.langchain.com/api_
|
|
|
39
39
|
|
|
40
40
|
## 1️⃣ Core Interface: Runnables
|
|
41
41
|
|
|
42
|
-
The concept of a Runnable is central to LangChain Core – it is the interface that most LangChain Core components implement, giving them
|
|
42
|
+
The concept of a `Runnable` is central to LangChain Core – it is the interface that most LangChain Core components implement, giving them
|
|
43
43
|
|
|
44
|
-
- a common invocation interface (invoke
|
|
44
|
+
- a common invocation interface (`invoke()`, `batch()`, `stream()`, etc.)
|
|
45
45
|
- built-in utilities for retries, fallbacks, schemas and runtime configurability
|
|
46
|
-
- easy deployment with [
|
|
46
|
+
- easy deployment with [LangGraph](https://github.com/langchain-ai/langgraph)
|
|
47
47
|
|
|
48
|
-
For more check out the [runnable docs](https://python.langchain.com/docs/
|
|
48
|
+
For more check out the [runnable docs](https://python.langchain.com/docs/concepts/runnables/). Examples of components that implement the interface include: LLMs, Chat Models, Prompts, Retrievers, Tools, Output Parsers.
|
|
49
49
|
|
|
50
50
|
You can use LangChain Core objects in two ways:
|
|
51
51
|
|
|
@@ -69,7 +69,7 @@ LangChain Expression Language (LCEL) is a _declarative language_ for composing L
|
|
|
69
69
|
|
|
70
70
|
LangChain Core compiles LCEL sequences to an _optimized execution plan_, with automatic parallelization, streaming, tracing, and async support.
|
|
71
71
|
|
|
72
|
-
For more check out the [LCEL docs](https://python.langchain.com/docs/
|
|
72
|
+
For more check out the [LCEL docs](https://python.langchain.com/docs/concepts/lcel/).
|
|
73
73
|
|
|
74
74
|

|
|
75
75
|
|
|
@@ -77,8 +77,6 @@ For more advanced use cases, also check out [LangGraph](https://github.com/langc
|
|
|
77
77
|
|
|
78
78
|
## 📕 Releases & Versioning
|
|
79
79
|
|
|
80
|
-
`langchain-core` is currently on version `0.1.x`.
|
|
81
|
-
|
|
82
80
|
As `langchain-core` contains the base abstractions and runtime for the whole LangChain ecosystem, we will communicate any breaking changes with advance notice and version bumps. The exception for this is anything in `langchain_core.beta`. The reason for `langchain_core.beta` is that given the rate of change of the field, being able to move quickly is still a priority, and this module is our attempt to do so.
|
|
83
81
|
|
|
84
82
|
Minor version increases will occur for:
|