langchain-core 0.3.75__py3-none-any.whl → 0.3.76__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of langchain-core might be problematic. Click here for more details.
- langchain_core/_api/beta_decorator.py +17 -40
- langchain_core/_api/deprecation.py +19 -6
- langchain_core/_api/path.py +19 -2
- langchain_core/_import_utils.py +7 -0
- langchain_core/agents.py +10 -6
- langchain_core/beta/runnables/context.py +1 -2
- langchain_core/callbacks/base.py +11 -4
- langchain_core/callbacks/manager.py +81 -69
- langchain_core/callbacks/usage.py +4 -2
- langchain_core/chat_history.py +4 -6
- langchain_core/document_loaders/base.py +34 -9
- langchain_core/document_loaders/langsmith.py +3 -0
- langchain_core/documents/base.py +35 -10
- langchain_core/documents/transformers.py +4 -2
- langchain_core/embeddings/fake.py +8 -5
- langchain_core/env.py +2 -3
- langchain_core/example_selectors/base.py +12 -0
- langchain_core/exceptions.py +7 -0
- langchain_core/globals.py +17 -28
- langchain_core/indexing/api.py +56 -44
- langchain_core/indexing/base.py +5 -8
- langchain_core/indexing/in_memory.py +23 -3
- langchain_core/language_models/__init__.py +3 -2
- langchain_core/language_models/base.py +31 -20
- langchain_core/language_models/chat_models.py +94 -25
- langchain_core/language_models/fake_chat_models.py +5 -7
- langchain_core/language_models/llms.py +49 -17
- langchain_core/load/dump.py +2 -3
- langchain_core/load/load.py +15 -1
- langchain_core/load/serializable.py +38 -43
- langchain_core/memory.py +7 -3
- langchain_core/messages/ai.py +36 -19
- langchain_core/messages/base.py +13 -6
- langchain_core/messages/content_blocks.py +23 -2
- langchain_core/messages/human.py +2 -6
- langchain_core/messages/system.py +2 -6
- langchain_core/messages/tool.py +33 -13
- langchain_core/messages/utils.py +182 -72
- langchain_core/output_parsers/base.py +5 -2
- langchain_core/output_parsers/json.py +4 -4
- langchain_core/output_parsers/list.py +7 -22
- langchain_core/output_parsers/openai_functions.py +3 -0
- langchain_core/output_parsers/openai_tools.py +6 -1
- langchain_core/output_parsers/pydantic.py +4 -0
- langchain_core/output_parsers/string.py +5 -1
- langchain_core/output_parsers/xml.py +19 -19
- langchain_core/outputs/chat_generation.py +18 -7
- langchain_core/outputs/generation.py +14 -3
- langchain_core/outputs/llm_result.py +8 -1
- langchain_core/prompt_values.py +10 -4
- langchain_core/prompts/base.py +4 -9
- langchain_core/prompts/chat.py +87 -58
- langchain_core/prompts/dict.py +16 -8
- langchain_core/prompts/few_shot.py +9 -11
- langchain_core/prompts/few_shot_with_templates.py +5 -1
- langchain_core/prompts/image.py +12 -5
- langchain_core/prompts/message.py +5 -6
- langchain_core/prompts/pipeline.py +13 -8
- langchain_core/prompts/prompt.py +22 -8
- langchain_core/prompts/string.py +18 -10
- langchain_core/prompts/structured.py +7 -2
- langchain_core/rate_limiters.py +2 -2
- langchain_core/retrievers.py +7 -6
- langchain_core/runnables/base.py +402 -183
- langchain_core/runnables/branch.py +14 -19
- langchain_core/runnables/config.py +9 -15
- langchain_core/runnables/configurable.py +34 -19
- langchain_core/runnables/fallbacks.py +20 -13
- langchain_core/runnables/graph.py +44 -37
- langchain_core/runnables/graph_ascii.py +40 -17
- langchain_core/runnables/graph_mermaid.py +27 -15
- langchain_core/runnables/graph_png.py +27 -31
- langchain_core/runnables/history.py +55 -58
- langchain_core/runnables/passthrough.py +44 -21
- langchain_core/runnables/retry.py +9 -5
- langchain_core/runnables/router.py +9 -8
- langchain_core/runnables/schema.py +2 -0
- langchain_core/runnables/utils.py +51 -89
- langchain_core/stores.py +13 -25
- langchain_core/sys_info.py +9 -8
- langchain_core/tools/base.py +30 -23
- langchain_core/tools/convert.py +24 -13
- langchain_core/tools/simple.py +35 -3
- langchain_core/tools/structured.py +25 -2
- langchain_core/tracers/base.py +2 -2
- langchain_core/tracers/context.py +5 -1
- langchain_core/tracers/core.py +109 -39
- langchain_core/tracers/evaluation.py +22 -26
- langchain_core/tracers/event_stream.py +40 -27
- langchain_core/tracers/langchain.py +12 -3
- langchain_core/tracers/langchain_v1.py +10 -2
- langchain_core/tracers/log_stream.py +56 -17
- langchain_core/tracers/root_listeners.py +4 -20
- langchain_core/tracers/run_collector.py +6 -16
- langchain_core/tracers/schemas.py +5 -1
- langchain_core/utils/aiter.py +14 -6
- langchain_core/utils/env.py +3 -0
- langchain_core/utils/function_calling.py +37 -20
- langchain_core/utils/interactive_env.py +6 -2
- langchain_core/utils/iter.py +11 -3
- langchain_core/utils/json.py +5 -2
- langchain_core/utils/json_schema.py +15 -5
- langchain_core/utils/loading.py +5 -1
- langchain_core/utils/mustache.py +24 -15
- langchain_core/utils/pydantic.py +32 -4
- langchain_core/utils/utils.py +24 -8
- langchain_core/vectorstores/base.py +7 -20
- langchain_core/vectorstores/in_memory.py +18 -12
- langchain_core/vectorstores/utils.py +18 -12
- langchain_core/version.py +1 -1
- langchain_core-0.3.76.dist-info/METADATA +77 -0
- langchain_core-0.3.76.dist-info/RECORD +174 -0
- langchain_core-0.3.75.dist-info/METADATA +0 -106
- langchain_core-0.3.75.dist-info/RECORD +0 -174
- {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/WHEEL +0 -0
- {langchain_core-0.3.75.dist-info → langchain_core-0.3.76.dist-info}/entry_points.txt +0 -0
langchain_core/messages/tool.py
CHANGED
|
@@ -32,7 +32,7 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
32
32
|
|
|
33
33
|
from langchain_core.messages import ToolMessage
|
|
34
34
|
|
|
35
|
-
ToolMessage(content=
|
|
35
|
+
ToolMessage(content="42", tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL")
|
|
36
36
|
|
|
37
37
|
|
|
38
38
|
Example: A ToolMessage where only part of the tool output is sent to the model
|
|
@@ -45,7 +45,8 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
45
45
|
from langchain_core.messages import ToolMessage
|
|
46
46
|
|
|
47
47
|
tool_output = {
|
|
48
|
-
"stdout": "From the graph we can see that the correlation between
|
|
48
|
+
"stdout": "From the graph we can see that the correlation between "
|
|
49
|
+
"x and y is ...",
|
|
49
50
|
"stderr": None,
|
|
50
51
|
"artifacts": {"type": "image", "base64_data": "/9j/4gIcSU..."},
|
|
51
52
|
}
|
|
@@ -53,14 +54,14 @@ class ToolMessage(BaseMessage, ToolOutputMixin):
|
|
|
53
54
|
ToolMessage(
|
|
54
55
|
content=tool_output["stdout"],
|
|
55
56
|
artifact=tool_output,
|
|
56
|
-
tool_call_id=
|
|
57
|
+
tool_call_id="call_Jja7J89XsjrOLA5r!MEOW!SL",
|
|
57
58
|
)
|
|
58
59
|
|
|
59
60
|
The tool_call_id field is used to associate the tool call request with the
|
|
60
61
|
tool call response. This is useful in situations where a chat model is able
|
|
61
62
|
to request multiple tool calls in parallel.
|
|
62
63
|
|
|
63
|
-
"""
|
|
64
|
+
"""
|
|
64
65
|
|
|
65
66
|
tool_call_id: str
|
|
66
67
|
"""Tool call that this message is responding to."""
|
|
@@ -184,11 +185,7 @@ class ToolCall(TypedDict):
|
|
|
184
185
|
|
|
185
186
|
.. code-block:: python
|
|
186
187
|
|
|
187
|
-
{
|
|
188
|
-
"name": "foo",
|
|
189
|
-
"args": {"a": 1},
|
|
190
|
-
"id": "123"
|
|
191
|
-
}
|
|
188
|
+
{"name": "foo", "args": {"a": 1}, "id": "123"}
|
|
192
189
|
|
|
193
190
|
This represents a request to call the tool named "foo" with arguments {"a": 1}
|
|
194
191
|
and an identifier of "123".
|
|
@@ -220,6 +217,9 @@ def tool_call(
|
|
|
220
217
|
name: The name of the tool to be called.
|
|
221
218
|
args: The arguments to the tool call.
|
|
222
219
|
id: An identifier associated with the tool call.
|
|
220
|
+
|
|
221
|
+
Returns:
|
|
222
|
+
The created tool call.
|
|
223
223
|
"""
|
|
224
224
|
return ToolCall(name=name, args=args, id=id, type="tool_call")
|
|
225
225
|
|
|
@@ -236,12 +236,12 @@ class ToolCallChunk(TypedDict):
|
|
|
236
236
|
.. code-block:: python
|
|
237
237
|
|
|
238
238
|
left_chunks = [ToolCallChunk(name="foo", args='{"a":', index=0)]
|
|
239
|
-
right_chunks = [ToolCallChunk(name=None, args=
|
|
239
|
+
right_chunks = [ToolCallChunk(name=None, args="1}", index=0)]
|
|
240
240
|
|
|
241
241
|
(
|
|
242
242
|
AIMessageChunk(content="", tool_call_chunks=left_chunks)
|
|
243
243
|
+ AIMessageChunk(content="", tool_call_chunks=right_chunks)
|
|
244
|
-
).tool_call_chunks == [ToolCallChunk(name=
|
|
244
|
+
).tool_call_chunks == [ToolCallChunk(name="foo", args='{"a":1}', index=0)]
|
|
245
245
|
|
|
246
246
|
"""
|
|
247
247
|
|
|
@@ -270,6 +270,9 @@ def tool_call_chunk(
|
|
|
270
270
|
args: The arguments to the tool call.
|
|
271
271
|
id: An identifier associated with the tool call.
|
|
272
272
|
index: The index of the tool call in a sequence.
|
|
273
|
+
|
|
274
|
+
Returns:
|
|
275
|
+
The created tool call chunk.
|
|
273
276
|
"""
|
|
274
277
|
return ToolCallChunk(
|
|
275
278
|
name=name, args=args, id=id, index=index, type="tool_call_chunk"
|
|
@@ -308,6 +311,9 @@ def invalid_tool_call(
|
|
|
308
311
|
args: The arguments to the tool call.
|
|
309
312
|
id: An identifier associated with the tool call.
|
|
310
313
|
error: An error message associated with the tool call.
|
|
314
|
+
|
|
315
|
+
Returns:
|
|
316
|
+
The created invalid tool call.
|
|
311
317
|
"""
|
|
312
318
|
return InvalidToolCall(
|
|
313
319
|
name=name, args=args, id=id, error=error, type="invalid_tool_call"
|
|
@@ -317,7 +323,14 @@ def invalid_tool_call(
|
|
|
317
323
|
def default_tool_parser(
|
|
318
324
|
raw_tool_calls: list[dict],
|
|
319
325
|
) -> tuple[list[ToolCall], list[InvalidToolCall]]:
|
|
320
|
-
"""Best-effort parsing of tools.
|
|
326
|
+
"""Best-effort parsing of tools.
|
|
327
|
+
|
|
328
|
+
Args:
|
|
329
|
+
raw_tool_calls: List of raw tool call dicts to parse.
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
A list of tool calls and invalid tool calls.
|
|
333
|
+
"""
|
|
321
334
|
tool_calls = []
|
|
322
335
|
invalid_tool_calls = []
|
|
323
336
|
for raw_tool_call in raw_tool_calls:
|
|
@@ -345,7 +358,14 @@ def default_tool_parser(
|
|
|
345
358
|
|
|
346
359
|
|
|
347
360
|
def default_tool_chunk_parser(raw_tool_calls: list[dict]) -> list[ToolCallChunk]:
|
|
348
|
-
"""Best-effort parsing of tool chunks.
|
|
361
|
+
"""Best-effort parsing of tool chunks.
|
|
362
|
+
|
|
363
|
+
Args:
|
|
364
|
+
raw_tool_calls: List of raw tool call dicts to parse.
|
|
365
|
+
|
|
366
|
+
Returns:
|
|
367
|
+
List of parsed ToolCallChunk objects.
|
|
368
|
+
"""
|
|
349
369
|
tool_call_chunks = []
|
|
350
370
|
for tool_call in raw_tool_calls:
|
|
351
371
|
if "function" not in tool_call:
|
langchain_core/messages/utils.py
CHANGED
|
@@ -42,12 +42,17 @@ from langchain_core.messages.system import SystemMessage, SystemMessageChunk
|
|
|
42
42
|
from langchain_core.messages.tool import ToolCall, ToolMessage, ToolMessageChunk
|
|
43
43
|
|
|
44
44
|
if TYPE_CHECKING:
|
|
45
|
-
from langchain_text_splitters import TextSplitter
|
|
46
|
-
|
|
47
45
|
from langchain_core.language_models import BaseLanguageModel
|
|
48
46
|
from langchain_core.prompt_values import PromptValue
|
|
49
47
|
from langchain_core.runnables.base import Runnable
|
|
50
48
|
|
|
49
|
+
try:
|
|
50
|
+
from langchain_text_splitters import TextSplitter
|
|
51
|
+
|
|
52
|
+
_HAS_LANGCHAIN_TEXT_SPLITTERS = True
|
|
53
|
+
except ImportError:
|
|
54
|
+
_HAS_LANGCHAIN_TEXT_SPLITTERS = False
|
|
55
|
+
|
|
51
56
|
logger = logging.getLogger(__name__)
|
|
52
57
|
|
|
53
58
|
|
|
@@ -182,7 +187,7 @@ def messages_from_dict(messages: Sequence[dict]) -> list[BaseMessage]:
|
|
|
182
187
|
return [_message_from_dict(m) for m in messages]
|
|
183
188
|
|
|
184
189
|
|
|
185
|
-
def message_chunk_to_message(chunk:
|
|
190
|
+
def message_chunk_to_message(chunk: BaseMessage) -> BaseMessage:
|
|
186
191
|
"""Convert a message chunk to a message.
|
|
187
192
|
|
|
188
193
|
Args:
|
|
@@ -361,7 +366,7 @@ def convert_to_messages(
|
|
|
361
366
|
list of messages (BaseMessages).
|
|
362
367
|
"""
|
|
363
368
|
# Import here to avoid circular imports
|
|
364
|
-
from langchain_core.prompt_values import PromptValue
|
|
369
|
+
from langchain_core.prompt_values import PromptValue # noqa: PLC0415
|
|
365
370
|
|
|
366
371
|
if isinstance(messages, PromptValue):
|
|
367
372
|
return messages.to_messages()
|
|
@@ -386,7 +391,8 @@ def _runnable_support(func: Callable) -> Callable:
|
|
|
386
391
|
list[BaseMessage],
|
|
387
392
|
Runnable[Sequence[MessageLikeRepresentation], list[BaseMessage]],
|
|
388
393
|
]:
|
|
389
|
-
|
|
394
|
+
# Import locally to prevent circular import.
|
|
395
|
+
from langchain_core.runnables.base import RunnableLambda # noqa: PLC0415
|
|
390
396
|
|
|
391
397
|
if messages is not None:
|
|
392
398
|
return func(messages, **kwargs)
|
|
@@ -424,11 +430,16 @@ def filter_messages(
|
|
|
424
430
|
exclude_ids: Message IDs to exclude. Default is None.
|
|
425
431
|
exclude_tool_calls: Tool call IDs to exclude. Default is None.
|
|
426
432
|
Can be one of the following:
|
|
427
|
-
|
|
433
|
+
|
|
434
|
+
- ``True``: Each ``AIMessages`` with tool calls and all ``ToolMessages``
|
|
435
|
+
will be excluded.
|
|
428
436
|
- a sequence of tool call IDs to exclude:
|
|
437
|
+
|
|
429
438
|
- ToolMessages with the corresponding tool call ID will be excluded.
|
|
430
|
-
- The
|
|
431
|
-
|
|
439
|
+
- The ``tool_calls`` in the AIMessage will be updated to exclude matching
|
|
440
|
+
tool calls.
|
|
441
|
+
If all tool_calls are filtered from an AIMessage,
|
|
442
|
+
the whole message is excluded.
|
|
432
443
|
|
|
433
444
|
Returns:
|
|
434
445
|
A list of Messages that meets at least one of the incl_* conditions and none
|
|
@@ -441,14 +452,25 @@ def filter_messages(
|
|
|
441
452
|
Example:
|
|
442
453
|
.. code-block:: python
|
|
443
454
|
|
|
444
|
-
from langchain_core.messages import
|
|
455
|
+
from langchain_core.messages import (
|
|
456
|
+
filter_messages,
|
|
457
|
+
AIMessage,
|
|
458
|
+
HumanMessage,
|
|
459
|
+
SystemMessage,
|
|
460
|
+
)
|
|
445
461
|
|
|
446
462
|
messages = [
|
|
447
463
|
SystemMessage("you're a good assistant."),
|
|
448
464
|
HumanMessage("what's your name", id="foo", name="example_user"),
|
|
449
465
|
AIMessage("steve-o", id="bar", name="example_assistant"),
|
|
450
|
-
HumanMessage(
|
|
451
|
-
|
|
466
|
+
HumanMessage(
|
|
467
|
+
"what's your favorite color",
|
|
468
|
+
id="baz",
|
|
469
|
+
),
|
|
470
|
+
AIMessage(
|
|
471
|
+
"silicon blue",
|
|
472
|
+
id="blah",
|
|
473
|
+
),
|
|
452
474
|
]
|
|
453
475
|
|
|
454
476
|
filter_messages(
|
|
@@ -465,7 +487,7 @@ def filter_messages(
|
|
|
465
487
|
HumanMessage("what's your name", id="foo", name="example_user"),
|
|
466
488
|
]
|
|
467
489
|
|
|
468
|
-
"""
|
|
490
|
+
"""
|
|
469
491
|
messages = convert_to_messages(messages)
|
|
470
492
|
filtered: list[BaseMessage] = []
|
|
471
493
|
for msg in messages:
|
|
@@ -544,12 +566,14 @@ def merge_message_runs(
|
|
|
544
566
|
Returns:
|
|
545
567
|
list of BaseMessages with consecutive runs of message types merged into single
|
|
546
568
|
messages. By default, if two messages being merged both have string contents,
|
|
547
|
-
the merged content is a concatenation of the two strings with a new-line
|
|
569
|
+
the merged content is a concatenation of the two strings with a new-line
|
|
570
|
+
separator.
|
|
548
571
|
The separator inserted between message chunks can be controlled by specifying
|
|
549
|
-
any string with ``chunk_separator``. If at least one of the messages has a list
|
|
550
|
-
content blocks, the merged content is a list of content blocks.
|
|
572
|
+
any string with ``chunk_separator``. If at least one of the messages has a list
|
|
573
|
+
of content blocks, the merged content is a list of content blocks.
|
|
551
574
|
|
|
552
575
|
Example:
|
|
576
|
+
|
|
553
577
|
.. code-block:: python
|
|
554
578
|
|
|
555
579
|
from langchain_core.messages import (
|
|
@@ -562,16 +586,33 @@ def merge_message_runs(
|
|
|
562
586
|
|
|
563
587
|
messages = [
|
|
564
588
|
SystemMessage("you're a good assistant."),
|
|
565
|
-
HumanMessage(
|
|
566
|
-
|
|
589
|
+
HumanMessage(
|
|
590
|
+
"what's your favorite color",
|
|
591
|
+
id="foo",
|
|
592
|
+
),
|
|
593
|
+
HumanMessage(
|
|
594
|
+
"wait your favorite food",
|
|
595
|
+
id="bar",
|
|
596
|
+
),
|
|
567
597
|
AIMessage(
|
|
568
598
|
"my favorite colo",
|
|
569
|
-
tool_calls=[
|
|
599
|
+
tool_calls=[
|
|
600
|
+
ToolCall(
|
|
601
|
+
name="blah_tool", args={"x": 2}, id="123", type="tool_call"
|
|
602
|
+
)
|
|
603
|
+
],
|
|
570
604
|
id="baz",
|
|
571
605
|
),
|
|
572
606
|
AIMessage(
|
|
573
607
|
[{"type": "text", "text": "my favorite dish is lasagna"}],
|
|
574
|
-
tool_calls=[
|
|
608
|
+
tool_calls=[
|
|
609
|
+
ToolCall(
|
|
610
|
+
name="blah_tool",
|
|
611
|
+
args={"x": -10},
|
|
612
|
+
id="456",
|
|
613
|
+
type="tool_call",
|
|
614
|
+
)
|
|
615
|
+
],
|
|
575
616
|
id="blur",
|
|
576
617
|
),
|
|
577
618
|
]
|
|
@@ -582,21 +623,34 @@ def merge_message_runs(
|
|
|
582
623
|
|
|
583
624
|
[
|
|
584
625
|
SystemMessage("you're a good assistant."),
|
|
585
|
-
HumanMessage(
|
|
626
|
+
HumanMessage(
|
|
627
|
+
"what's your favorite color\\n"
|
|
628
|
+
"wait your favorite food", id="foo",
|
|
629
|
+
),
|
|
586
630
|
AIMessage(
|
|
587
631
|
[
|
|
588
632
|
"my favorite colo",
|
|
589
633
|
{"type": "text", "text": "my favorite dish is lasagna"}
|
|
590
634
|
],
|
|
591
635
|
tool_calls=[
|
|
592
|
-
ToolCall({
|
|
593
|
-
|
|
636
|
+
ToolCall({
|
|
637
|
+
"name": "blah_tool",
|
|
638
|
+
"args": {"x": 2},
|
|
639
|
+
"id": "123",
|
|
640
|
+
"type": "tool_call"
|
|
641
|
+
}),
|
|
642
|
+
ToolCall({
|
|
643
|
+
"name": "blah_tool",
|
|
644
|
+
"args": {"x": -10},
|
|
645
|
+
"id": "456",
|
|
646
|
+
"type": "tool_call"
|
|
647
|
+
})
|
|
594
648
|
]
|
|
595
649
|
id="baz"
|
|
596
650
|
),
|
|
597
651
|
]
|
|
598
652
|
|
|
599
|
-
"""
|
|
653
|
+
"""
|
|
600
654
|
if not messages:
|
|
601
655
|
return []
|
|
602
656
|
messages = convert_to_messages(messages)
|
|
@@ -656,8 +710,8 @@ def trim_messages(
|
|
|
656
710
|
properties:
|
|
657
711
|
|
|
658
712
|
1. The resulting chat history should be valid. Most chat models expect that chat
|
|
659
|
-
history starts with either (1) a ``HumanMessage`` or (2) a ``SystemMessage``
|
|
660
|
-
by a ``HumanMessage``. To achieve this, set ``start_on="human"``.
|
|
713
|
+
history starts with either (1) a ``HumanMessage`` or (2) a ``SystemMessage``
|
|
714
|
+
followed by a ``HumanMessage``. To achieve this, set ``start_on="human"``.
|
|
661
715
|
In addition, generally a ``ToolMessage`` can only appear after an ``AIMessage``
|
|
662
716
|
that involved a tool call.
|
|
663
717
|
Please see the following link for more information about messages:
|
|
@@ -688,9 +742,11 @@ def trim_messages(
|
|
|
688
742
|
exact token counting is not necessary.
|
|
689
743
|
|
|
690
744
|
strategy: Strategy for trimming.
|
|
745
|
+
|
|
691
746
|
- "first": Keep the first <= n_count tokens of the messages.
|
|
692
747
|
- "last": Keep the last <= n_count tokens of the messages.
|
|
693
|
-
|
|
748
|
+
|
|
749
|
+
Default is ``'last'``.
|
|
694
750
|
allow_partial: Whether to split a message if only part of the message can be
|
|
695
751
|
included. If ``strategy="last"`` then the last partial contents of a message
|
|
696
752
|
are included. If ``strategy="first"`` then the first partial contents of a
|
|
@@ -748,14 +804,18 @@ def trim_messages(
|
|
|
748
804
|
)
|
|
749
805
|
|
|
750
806
|
messages = [
|
|
751
|
-
SystemMessage(
|
|
807
|
+
SystemMessage(
|
|
808
|
+
"you're a good assistant, you always respond with a joke."
|
|
809
|
+
),
|
|
752
810
|
HumanMessage("i wonder why it's called langchain"),
|
|
753
811
|
AIMessage(
|
|
754
|
-
'Well, I guess they thought "WordRope" and "SentenceString" just
|
|
812
|
+
'Well, I guess they thought "WordRope" and "SentenceString" just '
|
|
813
|
+
"didn't have the same ring to it!"
|
|
755
814
|
),
|
|
756
815
|
HumanMessage("and who is harrison chasing anyways"),
|
|
757
816
|
AIMessage(
|
|
758
|
-
"Hmmm let me think.\n\nWhy, he's probably chasing after the last
|
|
817
|
+
"Hmmm let me think.\n\nWhy, he's probably chasing after the last "
|
|
818
|
+
"cup of coffee in the office!"
|
|
759
819
|
),
|
|
760
820
|
HumanMessage("what do you call a speechless parrot"),
|
|
761
821
|
]
|
|
@@ -780,8 +840,10 @@ def trim_messages(
|
|
|
780
840
|
.. code-block:: python
|
|
781
841
|
|
|
782
842
|
[
|
|
783
|
-
SystemMessage(
|
|
784
|
-
|
|
843
|
+
SystemMessage(
|
|
844
|
+
content="you're a good assistant, you always respond with a joke."
|
|
845
|
+
),
|
|
846
|
+
HumanMessage(content="what do you call a speechless parrot"),
|
|
785
847
|
]
|
|
786
848
|
|
|
787
849
|
Trim chat history based on the message count, keeping the SystemMessage if
|
|
@@ -811,10 +873,15 @@ def trim_messages(
|
|
|
811
873
|
.. code-block:: python
|
|
812
874
|
|
|
813
875
|
[
|
|
814
|
-
SystemMessage(
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
HumanMessage(content=
|
|
876
|
+
SystemMessage(
|
|
877
|
+
content="you're a good assistant, you always respond with a joke."
|
|
878
|
+
),
|
|
879
|
+
HumanMessage(content="and who is harrison chasing anyways"),
|
|
880
|
+
AIMessage(
|
|
881
|
+
content="Hmmm let me think.\n\nWhy, he's probably chasing after "
|
|
882
|
+
"the last cup of coffee in the office!"
|
|
883
|
+
),
|
|
884
|
+
HumanMessage(content="what do you call a speechless parrot"),
|
|
818
885
|
]
|
|
819
886
|
|
|
820
887
|
|
|
@@ -825,7 +892,9 @@ def trim_messages(
|
|
|
825
892
|
|
|
826
893
|
messages = [
|
|
827
894
|
SystemMessage("This is a 4 token text. The full message is 10 tokens."),
|
|
828
|
-
HumanMessage(
|
|
895
|
+
HumanMessage(
|
|
896
|
+
"This is a 4 token text. The full message is 10 tokens.", id="first"
|
|
897
|
+
),
|
|
829
898
|
AIMessage(
|
|
830
899
|
[
|
|
831
900
|
{"type": "text", "text": "This is the FIRST 4 token block."},
|
|
@@ -833,10 +902,16 @@ def trim_messages(
|
|
|
833
902
|
],
|
|
834
903
|
id="second",
|
|
835
904
|
),
|
|
836
|
-
HumanMessage(
|
|
837
|
-
|
|
905
|
+
HumanMessage(
|
|
906
|
+
"This is a 4 token text. The full message is 10 tokens.", id="third"
|
|
907
|
+
),
|
|
908
|
+
AIMessage(
|
|
909
|
+
"This is a 4 token text. The full message is 10 tokens.",
|
|
910
|
+
id="fourth",
|
|
911
|
+
),
|
|
838
912
|
]
|
|
839
913
|
|
|
914
|
+
|
|
840
915
|
def dummy_token_counter(messages: list[BaseMessage]) -> int:
|
|
841
916
|
# treat each message like it adds 3 default tokens at the beginning
|
|
842
917
|
# of the message and at the end of the message. 3 + 4 + 3 = 10 tokens
|
|
@@ -849,9 +924,17 @@ def trim_messages(
|
|
|
849
924
|
count = 0
|
|
850
925
|
for msg in messages:
|
|
851
926
|
if isinstance(msg.content, str):
|
|
852
|
-
count +=
|
|
927
|
+
count += (
|
|
928
|
+
default_msg_prefix_len
|
|
929
|
+
+ default_content_len
|
|
930
|
+
+ default_msg_suffix_len
|
|
931
|
+
)
|
|
853
932
|
if isinstance(msg.content, list):
|
|
854
|
-
count +=
|
|
933
|
+
count += (
|
|
934
|
+
default_msg_prefix_len
|
|
935
|
+
+ len(msg.content) * default_content_len
|
|
936
|
+
+ default_msg_suffix_len
|
|
937
|
+
)
|
|
855
938
|
return count
|
|
856
939
|
|
|
857
940
|
First 30 tokens, allowing partial messages:
|
|
@@ -868,12 +951,20 @@ def trim_messages(
|
|
|
868
951
|
.. code-block:: python
|
|
869
952
|
|
|
870
953
|
[
|
|
871
|
-
SystemMessage(
|
|
872
|
-
|
|
873
|
-
|
|
954
|
+
SystemMessage(
|
|
955
|
+
"This is a 4 token text. The full message is 10 tokens."
|
|
956
|
+
),
|
|
957
|
+
HumanMessage(
|
|
958
|
+
"This is a 4 token text. The full message is 10 tokens.",
|
|
959
|
+
id="first",
|
|
960
|
+
),
|
|
961
|
+
AIMessage(
|
|
962
|
+
[{"type": "text", "text": "This is the FIRST 4 token block."}],
|
|
963
|
+
id="second",
|
|
964
|
+
),
|
|
874
965
|
]
|
|
875
966
|
|
|
876
|
-
"""
|
|
967
|
+
"""
|
|
877
968
|
# Validate arguments
|
|
878
969
|
if start_on and strategy == "first":
|
|
879
970
|
msg = "start_on parameter is only valid with strategy='last'"
|
|
@@ -904,17 +995,12 @@ def trim_messages(
|
|
|
904
995
|
)
|
|
905
996
|
raise ValueError(msg)
|
|
906
997
|
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
text_splitter_fn
|
|
998
|
+
if _HAS_LANGCHAIN_TEXT_SPLITTERS and isinstance(text_splitter, TextSplitter):
|
|
999
|
+
text_splitter_fn = text_splitter.split_text
|
|
1000
|
+
elif text_splitter:
|
|
1001
|
+
text_splitter_fn = cast("Callable", text_splitter)
|
|
911
1002
|
else:
|
|
912
|
-
|
|
913
|
-
text_splitter_fn = text_splitter.split_text
|
|
914
|
-
else:
|
|
915
|
-
text_splitter_fn = text_splitter
|
|
916
|
-
|
|
917
|
-
text_splitter_fn = text_splitter_fn or _default_text_splitter
|
|
1003
|
+
text_splitter_fn = _default_text_splitter
|
|
918
1004
|
|
|
919
1005
|
if strategy == "first":
|
|
920
1006
|
return _first_max_tokens(
|
|
@@ -952,25 +1038,30 @@ def convert_to_openai_messages(
|
|
|
952
1038
|
in OpenAI, Anthropic, Bedrock Converse, or VertexAI formats.
|
|
953
1039
|
text_format: How to format string or text block contents:
|
|
954
1040
|
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
1041
|
+
- ``'string'``:
|
|
1042
|
+
If a message has a string content, this is left as a string. If
|
|
1043
|
+
a message has content blocks that are all of type 'text', these are
|
|
1044
|
+
joined with a newline to make a single string. If a message has
|
|
1045
|
+
content blocks and at least one isn't of type 'text', then
|
|
1046
|
+
all blocks are left as dicts.
|
|
1047
|
+
- ``'block'``:
|
|
1048
|
+
If a message has a string content, this is turned into a list
|
|
1049
|
+
with a single content block of type 'text'. If a message has content
|
|
1050
|
+
blocks these are left as is.
|
|
1051
|
+
|
|
1052
|
+
Raises:
|
|
1053
|
+
ValueError: if an unrecognized ``text_format`` is specified, or if a message
|
|
1054
|
+
content block is missing expected keys.
|
|
965
1055
|
|
|
966
1056
|
Returns:
|
|
967
1057
|
The return type depends on the input type:
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
1058
|
+
|
|
1059
|
+
- dict:
|
|
1060
|
+
If a single message-like object is passed in, a single OpenAI message
|
|
1061
|
+
dict is returned.
|
|
1062
|
+
- list[dict]:
|
|
1063
|
+
If a sequence of message-like objects are passed in, a list of OpenAI
|
|
1064
|
+
message dicts is returned.
|
|
974
1065
|
|
|
975
1066
|
Example:
|
|
976
1067
|
|
|
@@ -985,8 +1076,27 @@ def convert_to_openai_messages(
|
|
|
985
1076
|
|
|
986
1077
|
messages = [
|
|
987
1078
|
SystemMessage([{"type": "text", "text": "foo"}]),
|
|
988
|
-
{
|
|
989
|
-
|
|
1079
|
+
{
|
|
1080
|
+
"role": "user",
|
|
1081
|
+
"content": [
|
|
1082
|
+
{"type": "text", "text": "whats in this"},
|
|
1083
|
+
{
|
|
1084
|
+
"type": "image_url",
|
|
1085
|
+
"image_url": {"url": "data:image/png;base64,'/9j/4AAQSk'"},
|
|
1086
|
+
},
|
|
1087
|
+
],
|
|
1088
|
+
},
|
|
1089
|
+
AIMessage(
|
|
1090
|
+
"",
|
|
1091
|
+
tool_calls=[
|
|
1092
|
+
{
|
|
1093
|
+
"name": "analyze",
|
|
1094
|
+
"args": {"baz": "buz"},
|
|
1095
|
+
"id": "1",
|
|
1096
|
+
"type": "tool_call",
|
|
1097
|
+
}
|
|
1098
|
+
],
|
|
1099
|
+
),
|
|
990
1100
|
ToolMessage("foobar", tool_call_id="1", name="bar"),
|
|
991
1101
|
{"role": "assistant", "content": "thats nice"},
|
|
992
1102
|
]
|
|
@@ -144,7 +144,10 @@ class BaseOutputParser(
|
|
|
144
144
|
|
|
145
145
|
def parse(self, text: str) -> bool:
|
|
146
146
|
cleaned_text = text.strip().upper()
|
|
147
|
-
if cleaned_text not in (
|
|
147
|
+
if cleaned_text not in (
|
|
148
|
+
self.true_val.upper(),
|
|
149
|
+
self.false_val.upper(),
|
|
150
|
+
):
|
|
148
151
|
raise OutputParserException(
|
|
149
152
|
f"BooleanOutputParser expected output value to either be "
|
|
150
153
|
f"{self.true_val} or {self.false_val} (case-insensitive). "
|
|
@@ -156,7 +159,7 @@ class BaseOutputParser(
|
|
|
156
159
|
def _type(self) -> str:
|
|
157
160
|
return "boolean_output_parser"
|
|
158
161
|
|
|
159
|
-
"""
|
|
162
|
+
"""
|
|
160
163
|
|
|
161
164
|
@property
|
|
162
165
|
@override
|
|
@@ -46,13 +46,13 @@ class JsonOutputParser(BaseCumulativeTransformOutputParser[Any]):
|
|
|
46
46
|
def _diff(self, prev: Optional[Any], next: Any) -> Any:
|
|
47
47
|
return jsonpatch.make_patch(prev, next).patch
|
|
48
48
|
|
|
49
|
-
|
|
49
|
+
@staticmethod
|
|
50
|
+
def _get_schema(pydantic_object: type[TBaseModel]) -> dict[str, Any]:
|
|
50
51
|
if issubclass(pydantic_object, pydantic.BaseModel):
|
|
51
52
|
return pydantic_object.model_json_schema()
|
|
52
|
-
|
|
53
|
-
return pydantic_object.schema()
|
|
54
|
-
return None
|
|
53
|
+
return pydantic_object.schema()
|
|
55
54
|
|
|
55
|
+
@override
|
|
56
56
|
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
|
|
57
57
|
"""Parse the result of an LLM call to a JSON object.
|
|
58
58
|
|