langchain-core 0.3.71__py3-none-any.whl → 0.4.0.dev0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/beta_decorator.py +1 -0
  3. langchain_core/_api/deprecation.py +2 -0
  4. langchain_core/beta/runnables/context.py +1 -0
  5. langchain_core/callbacks/base.py +23 -14
  6. langchain_core/callbacks/file.py +1 -0
  7. langchain_core/callbacks/manager.py +145 -19
  8. langchain_core/callbacks/streaming_stdout.py +4 -3
  9. langchain_core/callbacks/usage.py +15 -3
  10. langchain_core/chat_history.py +1 -0
  11. langchain_core/document_loaders/langsmith.py +2 -1
  12. langchain_core/documents/base.py +2 -0
  13. langchain_core/embeddings/fake.py +2 -0
  14. langchain_core/indexing/api.py +10 -0
  15. langchain_core/language_models/_utils.py +37 -0
  16. langchain_core/language_models/base.py +4 -1
  17. langchain_core/language_models/chat_models.py +48 -27
  18. langchain_core/language_models/fake_chat_models.py +71 -1
  19. langchain_core/language_models/llms.py +1 -0
  20. langchain_core/memory.py +1 -0
  21. langchain_core/messages/__init__.py +54 -0
  22. langchain_core/messages/ai.py +31 -18
  23. langchain_core/messages/content_blocks.py +1349 -69
  24. langchain_core/messages/human.py +1 -0
  25. langchain_core/messages/modifier.py +1 -1
  26. langchain_core/messages/tool.py +8 -83
  27. langchain_core/messages/utils.py +221 -6
  28. langchain_core/output_parsers/base.py +51 -14
  29. langchain_core/output_parsers/json.py +5 -2
  30. langchain_core/output_parsers/list.py +7 -2
  31. langchain_core/output_parsers/openai_functions.py +29 -5
  32. langchain_core/output_parsers/openai_tools.py +90 -47
  33. langchain_core/output_parsers/pydantic.py +3 -2
  34. langchain_core/output_parsers/transform.py +53 -12
  35. langchain_core/output_parsers/xml.py +14 -5
  36. langchain_core/outputs/llm_result.py +4 -1
  37. langchain_core/prompt_values.py +111 -7
  38. langchain_core/prompts/base.py +4 -0
  39. langchain_core/prompts/chat.py +3 -0
  40. langchain_core/prompts/few_shot.py +1 -0
  41. langchain_core/prompts/few_shot_with_templates.py +1 -0
  42. langchain_core/prompts/image.py +1 -0
  43. langchain_core/prompts/pipeline.py +1 -0
  44. langchain_core/prompts/prompt.py +1 -0
  45. langchain_core/prompts/structured.py +1 -0
  46. langchain_core/rate_limiters.py +1 -0
  47. langchain_core/retrievers.py +3 -0
  48. langchain_core/runnables/base.py +75 -57
  49. langchain_core/runnables/branch.py +1 -0
  50. langchain_core/runnables/config.py +2 -2
  51. langchain_core/runnables/configurable.py +2 -1
  52. langchain_core/runnables/fallbacks.py +3 -7
  53. langchain_core/runnables/graph.py +5 -3
  54. langchain_core/runnables/graph_ascii.py +1 -0
  55. langchain_core/runnables/graph_mermaid.py +1 -0
  56. langchain_core/runnables/history.py +1 -0
  57. langchain_core/runnables/passthrough.py +3 -0
  58. langchain_core/runnables/retry.py +1 -0
  59. langchain_core/runnables/router.py +1 -0
  60. langchain_core/runnables/schema.py +1 -0
  61. langchain_core/stores.py +3 -0
  62. langchain_core/tools/base.py +43 -11
  63. langchain_core/tools/convert.py +25 -3
  64. langchain_core/tools/retriever.py +8 -1
  65. langchain_core/tools/structured.py +10 -1
  66. langchain_core/tracers/base.py +14 -7
  67. langchain_core/tracers/context.py +1 -1
  68. langchain_core/tracers/core.py +27 -4
  69. langchain_core/tracers/event_stream.py +14 -3
  70. langchain_core/tracers/langchain.py +14 -3
  71. langchain_core/tracers/log_stream.py +4 -1
  72. langchain_core/utils/aiter.py +5 -0
  73. langchain_core/utils/function_calling.py +2 -1
  74. langchain_core/utils/iter.py +1 -0
  75. langchain_core/utils/json_schema.py +1 -1
  76. langchain_core/v1/__init__.py +1 -0
  77. langchain_core/v1/chat_models.py +1047 -0
  78. langchain_core/v1/messages.py +755 -0
  79. langchain_core/vectorstores/base.py +1 -0
  80. langchain_core/version.py +1 -1
  81. {langchain_core-0.3.71.dist-info → langchain_core-0.4.0.dev0.dist-info}/METADATA +1 -1
  82. {langchain_core-0.3.71.dist-info → langchain_core-0.4.0.dev0.dist-info}/RECORD +84 -81
  83. {langchain_core-0.3.71.dist-info → langchain_core-0.4.0.dev0.dist-info}/WHEEL +0 -0
  84. {langchain_core-0.3.71.dist-info → langchain_core-0.4.0.dev0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,755 @@
1
+ """LangChain v1.0.0 message format.
2
+
3
+ Each message has content that may be comprised of content blocks, defined under
4
+ ``langchain_core.messages.content_blocks``.
5
+ """
6
+
7
+ import uuid
8
+ from dataclasses import dataclass, field
9
+ from typing import Any, Literal, Optional, Union, cast, get_args
10
+
11
+ from pydantic import BaseModel
12
+ from typing_extensions import TypedDict
13
+
14
+ import langchain_core.messages.content_blocks as types
15
+ from langchain_core.messages.ai import (
16
+ _LC_AUTO_PREFIX,
17
+ _LC_ID_PREFIX,
18
+ UsageMetadata,
19
+ add_usage,
20
+ )
21
+ from langchain_core.messages.base import merge_content
22
+ from langchain_core.messages.tool import ToolOutputMixin
23
+ from langchain_core.messages.tool import invalid_tool_call as create_invalid_tool_call
24
+ from langchain_core.messages.tool import tool_call as create_tool_call
25
+ from langchain_core.utils._merge import merge_dicts
26
+ from langchain_core.utils.json import parse_partial_json
27
+
28
+
29
+ def _ensure_id(id_val: Optional[str]) -> str:
30
+ """Ensure the ID is a valid string, generating a new UUID if not provided.
31
+
32
+ Auto-generated UUIDs are prefixed by ``'lc_'`` to indicate they are
33
+ LangChain-generated IDs.
34
+
35
+ Args:
36
+ id_val: Optional string ID value to validate.
37
+
38
+ Returns:
39
+ A valid string ID, either the provided value or a new UUID.
40
+ """
41
+ return id_val or str(f"{_LC_AUTO_PREFIX}{uuid.uuid4()}")
42
+
43
+
44
+ class ResponseMetadata(TypedDict, total=False):
45
+ """Metadata about the response from the AI provider.
46
+
47
+ Contains additional information returned by the provider, such as
48
+ response headers, service tiers, log probabilities, system fingerprints, etc.
49
+
50
+ Extra keys are permitted from what is typed here (via `total=False`), allowing
51
+ for provider-specific metadata to be included without breaking the type
52
+ definition.
53
+ """
54
+
55
+ model_provider: str
56
+ """Name and version of the provider that created the message (e.g., openai)."""
57
+
58
+ model_name: str
59
+ """Name of the model that generated the message."""
60
+
61
+
62
+ @dataclass
63
+ class AIMessage:
64
+ """A message generated by an AI assistant.
65
+
66
+ Represents a response from an AI model, including text content, tool calls,
67
+ and metadata about the generation process.
68
+
69
+ Attributes:
70
+ id: Unique identifier for the message.
71
+ type: Message type identifier, always "ai".
72
+ name: Optional human-readable name for the message.
73
+ lc_version: Encoding version for the message.
74
+ content: List of content blocks containing the message data.
75
+ tool_calls: Optional list of tool calls made by the AI.
76
+ invalid_tool_calls: Optional list of tool calls that failed validation.
77
+ usage: Optional dictionary containing usage statistics.
78
+ """
79
+
80
+ type: Literal["ai"] = "ai"
81
+ """The type of the message. Must be a string that is unique to the message type.
82
+
83
+ The purpose of this field is to allow for easy identification of the message type
84
+ when deserializing messages.
85
+ """
86
+
87
+ name: Optional[str] = None
88
+ """An optional name for the message.
89
+
90
+ This can be used to provide a human-readable name for the message.
91
+
92
+ Usage of this field is optional, and whether it's used or not is up to the
93
+ model implementation.
94
+ """
95
+
96
+ id: Optional[str] = None
97
+ """Unique identifier for the message.
98
+
99
+ If the provider assigns a meaningful ID, it should be used here. Otherwise, a
100
+ LangChain-generated ID will be used.
101
+ """
102
+
103
+ lc_version: str = "v1"
104
+ """Encoding version for the message. Used for serialization."""
105
+
106
+ content: list[types.ContentBlock] = field(default_factory=list)
107
+ """Message content as a list of content blocks."""
108
+
109
+ usage_metadata: Optional[UsageMetadata] = None
110
+ """If provided, usage metadata for a message, such as token counts."""
111
+
112
+ response_metadata: ResponseMetadata = field(
113
+ default_factory=lambda: ResponseMetadata()
114
+ )
115
+ """Metadata about the response.
116
+
117
+ This field should include non-standard data returned by the provider, such as
118
+ response headers, service tiers, or log probabilities.
119
+ """
120
+
121
+ parsed: Optional[Union[dict[str, Any], BaseModel]] = None
122
+ """Auto-parsed message contents, if applicable."""
123
+
124
+ def __init__(
125
+ self,
126
+ content: Union[str, list[types.ContentBlock]],
127
+ id: Optional[str] = None,
128
+ name: Optional[str] = None,
129
+ lc_version: str = "v1",
130
+ response_metadata: Optional[ResponseMetadata] = None,
131
+ usage_metadata: Optional[UsageMetadata] = None,
132
+ tool_calls: Optional[list[types.ToolCall]] = None,
133
+ invalid_tool_calls: Optional[list[types.InvalidToolCall]] = None,
134
+ parsed: Optional[Union[dict[str, Any], BaseModel]] = None,
135
+ ):
136
+ """Initialize an AI message.
137
+
138
+ Args:
139
+ content: Message content as string or list of content blocks.
140
+ id: Optional unique identifier for the message.
141
+ name: Optional human-readable name for the message.
142
+ lc_version: Encoding version for the message.
143
+ response_metadata: Optional metadata about the response.
144
+ usage_metadata: Optional metadata about token usage.
145
+ tool_calls: Optional list of tool calls made by the AI. Tool calls should
146
+ generally be included in message content. If passed on init, they will
147
+ be added to the content list.
148
+ invalid_tool_calls: Optional list of tool calls that failed validation.
149
+ parsed: Optional auto-parsed message contents, if applicable.
150
+ """
151
+ if isinstance(content, str):
152
+ self.content = [{"type": "text", "text": content}]
153
+ else:
154
+ self.content = content
155
+
156
+ self.id = _ensure_id(id)
157
+ self.name = name
158
+ self.lc_version = lc_version
159
+ self.usage_metadata = usage_metadata
160
+ self.parsed = parsed
161
+ if response_metadata is None:
162
+ self.response_metadata = {}
163
+ else:
164
+ self.response_metadata = response_metadata
165
+
166
+ # Add tool calls to content if provided on init
167
+ if tool_calls:
168
+ content_tool_calls = {
169
+ block["id"]
170
+ for block in self.content
171
+ if block["type"] == "tool_call" and "id" in block
172
+ }
173
+ for tool_call in tool_calls:
174
+ if "id" in tool_call and tool_call["id"] in content_tool_calls:
175
+ continue
176
+ self.content.append(tool_call)
177
+ if invalid_tool_calls:
178
+ content_tool_calls = {
179
+ block["id"]
180
+ for block in self.content
181
+ if block["type"] == "invalid_tool_call" and "id" in block
182
+ }
183
+ for invalid_tool_call in invalid_tool_calls:
184
+ if (
185
+ "id" in invalid_tool_call
186
+ and invalid_tool_call["id"] in content_tool_calls
187
+ ):
188
+ continue
189
+ self.content.append(invalid_tool_call)
190
+ self._tool_calls = [
191
+ block for block in self.content if block["type"] == "tool_call"
192
+ ]
193
+ self._invalid_tool_calls = [
194
+ block for block in self.content if block["type"] == "invalid_tool_call"
195
+ ]
196
+
197
+ @property
198
+ def text(self) -> str:
199
+ """Extract all text content from the AI message as a string."""
200
+ text_blocks = [block for block in self.content if block["type"] == "text"]
201
+ return "".join(block["text"] for block in text_blocks)
202
+
203
+ @property
204
+ def tool_calls(self) -> list[types.ToolCall]:
205
+ """Get the tool calls made by the AI."""
206
+ if not self._tool_calls:
207
+ self._tool_calls = [
208
+ block for block in self.content if block["type"] == "tool_call"
209
+ ]
210
+ return self._tool_calls
211
+
212
+ @tool_calls.setter
213
+ def tool_calls(self, value: list[types.ToolCall]) -> None:
214
+ """Set the tool calls for the AI message."""
215
+ self._tool_calls = value
216
+
217
+ @property
218
+ def invalid_tool_calls(self) -> list[types.InvalidToolCall]:
219
+ """Get the invalid tool calls made by the AI."""
220
+ if not self._invalid_tool_calls:
221
+ self._invalid_tool_calls = [
222
+ block for block in self.content if block["type"] == "invalid_tool_call"
223
+ ]
224
+ return self._invalid_tool_calls
225
+
226
+
227
+ @dataclass
228
+ class AIMessageChunk(AIMessage):
229
+ """A partial chunk of an AI message during streaming.
230
+
231
+ Represents a portion of an AI response that is delivered incrementally
232
+ during streaming generation. Contains partial content and metadata.
233
+
234
+ Attributes:
235
+ id: Unique identifier for the message chunk.
236
+ type: Message type identifier, always "ai_chunk".
237
+ name: Optional human-readable name for the message.
238
+ content: List of content blocks containing partial message data.
239
+ tool_call_chunks: Optional list of partial tool call data.
240
+ usage_metadata: Optional metadata about token usage and costs.
241
+ """
242
+
243
+ type: Literal["ai_chunk"] = "ai_chunk" # type: ignore[assignment]
244
+ """The type of the message. Must be a string that is unique to the message type.
245
+
246
+ The purpose of this field is to allow for easy identification of the message type
247
+ when deserializing messages.
248
+ """
249
+
250
+ def __init__(
251
+ self,
252
+ content: Union[str, list[types.ContentBlock]],
253
+ *,
254
+ id: Optional[str] = None,
255
+ name: Optional[str] = None,
256
+ lc_version: str = "v1",
257
+ response_metadata: Optional[ResponseMetadata] = None,
258
+ usage_metadata: Optional[UsageMetadata] = None,
259
+ tool_call_chunks: Optional[list[types.ToolCallChunk]] = None,
260
+ parsed: Optional[Union[dict[str, Any], BaseModel]] = None,
261
+ chunk_position: Optional[Literal["last"]] = None,
262
+ ):
263
+ """Initialize an AI message.
264
+
265
+ Args:
266
+ content: Message content as string or list of content blocks.
267
+ id: Optional unique identifier for the message.
268
+ name: Optional human-readable name for the message.
269
+ lc_version: Encoding version for the message.
270
+ response_metadata: Optional metadata about the response.
271
+ usage_metadata: Optional metadata about token usage.
272
+ tool_call_chunks: Optional list of partial tool call data.
273
+ parsed: Optional auto-parsed message contents, if applicable.
274
+ chunk_position: Optional position of the chunk in the stream. If "last",
275
+ tool calls will be parsed when aggregated into a stream.
276
+ """
277
+ if isinstance(content, str):
278
+ self.content = [{"type": "text", "text": content, "index": 0}]
279
+ else:
280
+ self.content = content
281
+
282
+ self.id = _ensure_id(id)
283
+ self.name = name
284
+ self.lc_version = lc_version
285
+ self.usage_metadata = usage_metadata
286
+ self.parsed = parsed
287
+ self.chunk_position = chunk_position
288
+ if response_metadata is None:
289
+ self.response_metadata = {}
290
+ else:
291
+ self.response_metadata = response_metadata
292
+
293
+ if tool_call_chunks:
294
+ content_tool_call_chunks = {
295
+ block["id"]
296
+ for block in self.content
297
+ if block.get("type") == "tool_call_chunk" and "id" in block
298
+ }
299
+ for chunk in tool_call_chunks:
300
+ if "id" in chunk and chunk["id"] in content_tool_call_chunks:
301
+ continue
302
+ self.content.append(chunk)
303
+ self._tool_call_chunks = [
304
+ block for block in self.content if block.get("type") == "tool_call_chunk"
305
+ ]
306
+
307
+ self._tool_calls: list[types.ToolCall] = []
308
+ self._invalid_tool_calls: list[types.InvalidToolCall] = []
309
+
310
+ @property
311
+ def tool_call_chunks(self) -> list[types.ToolCallChunk]:
312
+ """Get the tool calls made by the AI."""
313
+ if not self._tool_call_chunks:
314
+ self._tool_call_chunks = [
315
+ block
316
+ for block in self.content
317
+ if block.get("type") == "tool_call_chunk"
318
+ ]
319
+ return cast("list[types.ToolCallChunk]", self._tool_call_chunks)
320
+
321
+ @property
322
+ def tool_calls(self) -> list[types.ToolCall]:
323
+ """Get the tool calls made by the AI."""
324
+ if not self._tool_calls:
325
+ parsed_content = _init_tool_calls(self.content)
326
+ self._tool_calls = [
327
+ block for block in parsed_content if block["type"] == "tool_call"
328
+ ]
329
+ self._invalid_tool_calls = [
330
+ block
331
+ for block in parsed_content
332
+ if block["type"] == "invalid_tool_call"
333
+ ]
334
+ return self._tool_calls
335
+
336
+ @tool_calls.setter
337
+ def tool_calls(self, value: list[types.ToolCall]) -> None:
338
+ """Set the tool calls for the AI message."""
339
+ self._tool_calls = value
340
+
341
+ @property
342
+ def invalid_tool_calls(self) -> list[types.InvalidToolCall]:
343
+ """Get the invalid tool calls made by the AI."""
344
+ if not self._invalid_tool_calls:
345
+ parsed_content = _init_tool_calls(self.content)
346
+ self._tool_calls = [
347
+ block for block in parsed_content if block["type"] == "tool_call"
348
+ ]
349
+ self._invalid_tool_calls = [
350
+ block
351
+ for block in parsed_content
352
+ if block["type"] == "invalid_tool_call"
353
+ ]
354
+ return self._invalid_tool_calls
355
+
356
+ def __add__(self, other: Any) -> "AIMessageChunk":
357
+ """Add AIMessageChunk to this one."""
358
+ if isinstance(other, AIMessageChunk):
359
+ return add_ai_message_chunks(self, other)
360
+ if isinstance(other, (list, tuple)) and all(
361
+ isinstance(o, AIMessageChunk) for o in other
362
+ ):
363
+ return add_ai_message_chunks(self, *other)
364
+ error_msg = "Can only add AIMessageChunk or sequence of AIMessageChunk."
365
+ raise NotImplementedError(error_msg)
366
+
367
+ def to_message(self) -> "AIMessage":
368
+ """Convert this AIMessageChunk to an AIMessage."""
369
+ return AIMessage(
370
+ content=_init_tool_calls(self.content),
371
+ id=self.id,
372
+ name=self.name,
373
+ lc_version=self.lc_version,
374
+ response_metadata=self.response_metadata,
375
+ usage_metadata=self.usage_metadata,
376
+ parsed=self.parsed,
377
+ )
378
+
379
+
380
+ def _init_tool_calls(content: list[types.ContentBlock]) -> list[types.ContentBlock]:
381
+ """Parse tool call chunks in content into tool calls."""
382
+ new_content = []
383
+ for block in content:
384
+ if block.get("type") != "tool_call_chunk":
385
+ new_content.append(block)
386
+ continue
387
+ try:
388
+ args_ = (
389
+ parse_partial_json(cast("str", block.get("args") or ""))
390
+ if block.get("args")
391
+ else {}
392
+ )
393
+ if isinstance(args_, dict):
394
+ new_content.append(
395
+ create_tool_call(
396
+ name=cast("str", block.get("name") or ""),
397
+ args=args_,
398
+ id=cast("str", block.get("id", "")),
399
+ )
400
+ )
401
+ else:
402
+ new_content.append(
403
+ create_invalid_tool_call(
404
+ name=cast("str", block.get("name", "")),
405
+ args=cast("str", block.get("args", "")),
406
+ id=cast("str", block.get("id", "")),
407
+ error=None,
408
+ )
409
+ )
410
+ except Exception:
411
+ new_content.append(
412
+ create_invalid_tool_call(
413
+ name=cast("str", block.get("name", "")),
414
+ args=cast("str", block.get("args", "")),
415
+ id=cast("str", block.get("id", "")),
416
+ error=None,
417
+ )
418
+ )
419
+ return new_content
420
+
421
+
422
+ def add_ai_message_chunks(
423
+ left: AIMessageChunk, *others: AIMessageChunk
424
+ ) -> AIMessageChunk:
425
+ """Add multiple AIMessageChunks together."""
426
+ if not others:
427
+ return left
428
+ content = cast(
429
+ "list[types.ContentBlock]",
430
+ merge_content(
431
+ cast("list[str | dict[Any, Any]]", left.content),
432
+ *(cast("list[str | dict[Any, Any]]", o.content) for o in others),
433
+ ),
434
+ )
435
+ response_metadata = merge_dicts(
436
+ cast("dict", left.response_metadata),
437
+ *(cast("dict", o.response_metadata) for o in others),
438
+ )
439
+
440
+ # Token usage
441
+ if left.usage_metadata or any(o.usage_metadata is not None for o in others):
442
+ usage_metadata: Optional[UsageMetadata] = left.usage_metadata
443
+ for other in others:
444
+ usage_metadata = add_usage(usage_metadata, other.usage_metadata)
445
+ else:
446
+ usage_metadata = None
447
+
448
+ # Parsed
449
+ # 'parsed' always represents an aggregation not an incremental value, so the last
450
+ # non-null value is kept.
451
+ parsed = None
452
+ for m in reversed([left, *others]):
453
+ if m.parsed is not None:
454
+ parsed = m.parsed
455
+ break
456
+
457
+ chunk_id = None
458
+ candidates = [left.id] + [o.id for o in others]
459
+ # first pass: pick the first provider-assigned id (non-`run-*` and non-`lc_*`)
460
+ for id_ in candidates:
461
+ if (
462
+ id_
463
+ and not id_.startswith(_LC_ID_PREFIX)
464
+ and not id_.startswith(_LC_AUTO_PREFIX)
465
+ ):
466
+ chunk_id = id_
467
+ break
468
+ else:
469
+ # second pass: prefer lc_* ids over run-* ids
470
+ for id_ in candidates:
471
+ if id_ and id_.startswith(_LC_AUTO_PREFIX):
472
+ chunk_id = id_
473
+ break
474
+ else:
475
+ # third pass: take any remaining id (run-* ids)
476
+ for id_ in candidates:
477
+ if id_:
478
+ chunk_id = id_
479
+ break
480
+
481
+ chunk_position: Optional[Literal["last"]] = (
482
+ "last" if any(x.chunk_position == "last" for x in [left, *others]) else None
483
+ )
484
+ if chunk_position == "last":
485
+ content = _init_tool_calls(content)
486
+
487
+ return left.__class__(
488
+ content=content,
489
+ response_metadata=cast("ResponseMetadata", response_metadata),
490
+ usage_metadata=usage_metadata,
491
+ parsed=parsed,
492
+ id=chunk_id,
493
+ chunk_position=chunk_position,
494
+ )
495
+
496
+
497
+ @dataclass
498
+ class HumanMessage:
499
+ """A message from a human user.
500
+
501
+ Represents input from a human user in a conversation, containing text
502
+ or other content types like images.
503
+
504
+ Attributes:
505
+ id: Unique identifier for the message.
506
+ content: List of content blocks containing the user's input.
507
+ name: Optional human-readable name for the message.
508
+ type: Message type identifier, always "human".
509
+ """
510
+
511
+ id: str
512
+ """Used for serialization.
513
+
514
+ If the provider assigns a meaningful ID, it should be used here. Otherwise, a
515
+ LangChain-generated ID will be used.
516
+ """
517
+
518
+ content: list[types.ContentBlock]
519
+ """Message content as a list of content blocks."""
520
+
521
+ type: Literal["human"] = "human"
522
+ """The type of the message. Must be a string that is unique to the message type.
523
+
524
+ The purpose of this field is to allow for easy identification of the message type
525
+ when deserializing messages.
526
+ """
527
+
528
+ name: Optional[str] = None
529
+ """An optional name for the message.
530
+
531
+ This can be used to provide a human-readable name for the message.
532
+
533
+ Usage of this field is optional, and whether it's used or not is up to the
534
+ model implementation.
535
+ """
536
+
537
+ def __init__(
538
+ self,
539
+ content: Union[str, list[types.ContentBlock]],
540
+ *,
541
+ id: Optional[str] = None,
542
+ name: Optional[str] = None,
543
+ ):
544
+ """Initialize a human message.
545
+
546
+ Args:
547
+ content: Message content as string or list of content blocks.
548
+ id: Optional unique identifier for the message.
549
+ name: Optional human-readable name for the message.
550
+ """
551
+ self.id = _ensure_id(id)
552
+ if isinstance(content, str):
553
+ self.content = [{"type": "text", "text": content}]
554
+ else:
555
+ self.content = content
556
+ self.name = name
557
+
558
+ def text(self) -> str:
559
+ """Extract all text content from the message.
560
+
561
+ Returns:
562
+ Concatenated string of all text blocks in the message.
563
+ """
564
+ return "".join(
565
+ block["text"] for block in self.content if block["type"] == "text"
566
+ )
567
+
568
+
569
+ @dataclass
570
+ class SystemMessage:
571
+ """A system message containing instructions or context.
572
+
573
+ Represents system-level instructions or context that guides the AI's
574
+ behavior and understanding of the conversation.
575
+
576
+ Attributes:
577
+ id: Unique identifier for the message.
578
+ content: List of content blocks containing system instructions.
579
+ type: Message type identifier, always "system".
580
+ """
581
+
582
+ id: str
583
+ """Used for serialization.
584
+
585
+ If the provider assigns a meaningful ID, it should be used here. Otherwise, a
586
+ LangChain-generated ID will be used.
587
+ """
588
+
589
+ content: list[types.ContentBlock]
590
+ """Message content as a list of content blocks."""
591
+
592
+ type: Literal["system"] = "system"
593
+ """The type of the message. Must be a string that is unique to the message type.
594
+
595
+ The purpose of this field is to allow for easy identification of the message type
596
+ when deserializing messages.
597
+ """
598
+
599
+ name: Optional[str] = None
600
+ """An optional name for the message.
601
+
602
+ This can be used to provide a human-readable name for the message.
603
+
604
+ Usage of this field is optional, and whether it's used or not is up to the
605
+ model implementation.
606
+ """
607
+
608
+ custom_role: Optional[str] = None
609
+ """If provided, a custom role for the system message.
610
+
611
+ Example: ``"developer"``.
612
+
613
+ Integration packages may use this field to assign the system message role if it
614
+ contains a recognized value.
615
+ """
616
+
617
+ def __init__(
618
+ self,
619
+ content: Union[str, list[types.ContentBlock]],
620
+ *,
621
+ id: Optional[str] = None,
622
+ custom_role: Optional[str] = None,
623
+ name: Optional[str] = None,
624
+ ):
625
+ """Initialize a human message.
626
+
627
+ Args:
628
+ content: Message content as string or list of content blocks.
629
+ id: Optional unique identifier for the message.
630
+ custom_role: If provided, a custom role for the system message.
631
+ name: Optional human-readable name for the message.
632
+ """
633
+ self.id = _ensure_id(id)
634
+ if isinstance(content, str):
635
+ self.content = [{"type": "text", "text": content}]
636
+ else:
637
+ self.content = content
638
+ self.custom_role = custom_role
639
+ self.name = name
640
+
641
+ def text(self) -> str:
642
+ """Extract all text content from the system message."""
643
+ return "".join(
644
+ block["text"] for block in self.content if block["type"] == "text"
645
+ )
646
+
647
+
648
+ @dataclass
649
+ class ToolMessage(ToolOutputMixin):
650
+ """A message containing the result of a tool execution.
651
+
652
+ Represents the output from executing a tool or function call,
653
+ including the result data and execution status.
654
+
655
+ Attributes:
656
+ id: Unique identifier for the message.
657
+ tool_call_id: ID of the tool call this message responds to.
658
+ content: The result content from tool execution.
659
+ artifact: Optional app-side payload not intended for the model.
660
+ status: Execution status ("success" or "error").
661
+ type: Message type identifier, always "tool".
662
+ """
663
+
664
+ id: str
665
+ """Used for serialization."""
666
+
667
+ tool_call_id: str
668
+ """ID of the tool call this message responds to.
669
+
670
+ This should match the ID of the tool call that this message is responding to.
671
+ """
672
+
673
+ content: list[types.ContentBlock]
674
+ """Message content as a list of content blocks."""
675
+
676
+ type: Literal["tool"] = "tool"
677
+ """The type of the message. Must be a string that is unique to the message type.
678
+
679
+ The purpose of this field is to allow for easy identification of the message type
680
+ when deserializing messages.
681
+ """
682
+
683
+ artifact: Optional[Any] = None
684
+ """App-side payload not for the model."""
685
+
686
+ name: Optional[str] = None
687
+ """An optional name for the message.
688
+
689
+ This can be used to provide a human-readable name for the message.
690
+
691
+ Usage of this field is optional, and whether it's used or not is up to the
692
+ model implementation.
693
+ """
694
+
695
+ status: Literal["success", "error"] = "success"
696
+ """Execution status of the tool call.
697
+
698
+ Indicates whether the tool call was successful or encountered an error.
699
+ Defaults to "success".
700
+ """
701
+
702
+ def __init__(
703
+ self,
704
+ content: Union[str, list[types.ContentBlock]],
705
+ tool_call_id: str,
706
+ *,
707
+ id: Optional[str] = None,
708
+ name: Optional[str] = None,
709
+ artifact: Optional[Any] = None,
710
+ status: Literal["success", "error"] = "success",
711
+ ):
712
+ """Initialize a human message.
713
+
714
+ Args:
715
+ content: Message content as string or list of content blocks.
716
+ tool_call_id: ID of the tool call this message responds to.
717
+ id: Optional unique identifier for the message.
718
+ name: Optional human-readable name for the message.
719
+ artifact: Optional app-side payload not intended for the model.
720
+ status: Execution status ("success" or "error").
721
+ """
722
+ self.id = _ensure_id(id)
723
+ self.tool_call_id = tool_call_id
724
+ if isinstance(content, str):
725
+ self.content = [{"type": "text", "text": content}]
726
+ else:
727
+ self.content = content
728
+ self.name = name
729
+ self.artifact = artifact
730
+ self.status = status
731
+
732
+ @property
733
+ def text(self) -> str:
734
+ """Extract all text content from the tool message."""
735
+ return "".join(
736
+ block["text"] for block in self.content if block["type"] == "text"
737
+ )
738
+
739
+ def __post_init__(self) -> None:
740
+ """Initialize computed fields after dataclass creation.
741
+
742
+ Ensures the tool message has a valid ID.
743
+ """
744
+ self.id = _ensure_id(self.id)
745
+
746
+
747
+ # Alias for a message type that can be any of the defined message types
748
+ MessageV1 = Union[
749
+ AIMessage,
750
+ AIMessageChunk,
751
+ HumanMessage,
752
+ SystemMessage,
753
+ ToolMessage,
754
+ ]
755
+ MessageV1Types = get_args(MessageV1)