langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (142) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +0 -1
  3. langchain_core/_api/beta_decorator.py +17 -20
  4. langchain_core/_api/deprecation.py +30 -35
  5. langchain_core/_import_utils.py +1 -1
  6. langchain_core/agents.py +10 -9
  7. langchain_core/caches.py +46 -56
  8. langchain_core/callbacks/__init__.py +1 -8
  9. langchain_core/callbacks/base.py +232 -243
  10. langchain_core/callbacks/file.py +33 -33
  11. langchain_core/callbacks/manager.py +353 -416
  12. langchain_core/callbacks/stdout.py +21 -22
  13. langchain_core/callbacks/streaming_stdout.py +32 -32
  14. langchain_core/callbacks/usage.py +54 -51
  15. langchain_core/chat_history.py +43 -58
  16. langchain_core/document_loaders/base.py +21 -21
  17. langchain_core/document_loaders/langsmith.py +22 -22
  18. langchain_core/documents/__init__.py +0 -1
  19. langchain_core/documents/base.py +46 -49
  20. langchain_core/documents/transformers.py +28 -29
  21. langchain_core/embeddings/fake.py +50 -54
  22. langchain_core/example_selectors/semantic_similarity.py +4 -6
  23. langchain_core/exceptions.py +7 -8
  24. langchain_core/indexing/api.py +19 -25
  25. langchain_core/indexing/base.py +24 -24
  26. langchain_core/language_models/__init__.py +11 -27
  27. langchain_core/language_models/_utils.py +53 -54
  28. langchain_core/language_models/base.py +30 -24
  29. langchain_core/language_models/chat_models.py +123 -148
  30. langchain_core/language_models/fake_chat_models.py +7 -7
  31. langchain_core/language_models/llms.py +14 -16
  32. langchain_core/load/dump.py +3 -4
  33. langchain_core/load/load.py +7 -16
  34. langchain_core/load/serializable.py +37 -36
  35. langchain_core/messages/__init__.py +1 -16
  36. langchain_core/messages/ai.py +122 -123
  37. langchain_core/messages/base.py +31 -31
  38. langchain_core/messages/block_translators/__init__.py +17 -17
  39. langchain_core/messages/block_translators/anthropic.py +3 -3
  40. langchain_core/messages/block_translators/bedrock_converse.py +3 -3
  41. langchain_core/messages/block_translators/google_genai.py +5 -4
  42. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  43. langchain_core/messages/block_translators/groq.py +117 -21
  44. langchain_core/messages/block_translators/langchain_v0.py +3 -3
  45. langchain_core/messages/block_translators/openai.py +5 -5
  46. langchain_core/messages/chat.py +2 -6
  47. langchain_core/messages/content.py +222 -209
  48. langchain_core/messages/function.py +6 -10
  49. langchain_core/messages/human.py +17 -24
  50. langchain_core/messages/modifier.py +2 -2
  51. langchain_core/messages/system.py +12 -22
  52. langchain_core/messages/tool.py +53 -69
  53. langchain_core/messages/utils.py +399 -417
  54. langchain_core/output_parsers/__init__.py +1 -14
  55. langchain_core/output_parsers/base.py +46 -47
  56. langchain_core/output_parsers/json.py +3 -4
  57. langchain_core/output_parsers/list.py +2 -2
  58. langchain_core/output_parsers/openai_functions.py +46 -44
  59. langchain_core/output_parsers/openai_tools.py +11 -16
  60. langchain_core/output_parsers/pydantic.py +10 -11
  61. langchain_core/output_parsers/string.py +2 -2
  62. langchain_core/output_parsers/transform.py +2 -2
  63. langchain_core/output_parsers/xml.py +1 -1
  64. langchain_core/outputs/__init__.py +1 -1
  65. langchain_core/outputs/chat_generation.py +14 -14
  66. langchain_core/outputs/generation.py +6 -6
  67. langchain_core/outputs/llm_result.py +5 -5
  68. langchain_core/prompt_values.py +11 -11
  69. langchain_core/prompts/__init__.py +3 -23
  70. langchain_core/prompts/base.py +33 -38
  71. langchain_core/prompts/chat.py +222 -229
  72. langchain_core/prompts/dict.py +3 -3
  73. langchain_core/prompts/few_shot.py +76 -83
  74. langchain_core/prompts/few_shot_with_templates.py +7 -9
  75. langchain_core/prompts/image.py +12 -14
  76. langchain_core/prompts/loading.py +1 -1
  77. langchain_core/prompts/message.py +3 -3
  78. langchain_core/prompts/prompt.py +20 -23
  79. langchain_core/prompts/string.py +20 -8
  80. langchain_core/prompts/structured.py +26 -27
  81. langchain_core/rate_limiters.py +50 -58
  82. langchain_core/retrievers.py +41 -182
  83. langchain_core/runnables/base.py +565 -597
  84. langchain_core/runnables/branch.py +8 -8
  85. langchain_core/runnables/config.py +37 -44
  86. langchain_core/runnables/configurable.py +9 -10
  87. langchain_core/runnables/fallbacks.py +9 -9
  88. langchain_core/runnables/graph.py +46 -50
  89. langchain_core/runnables/graph_ascii.py +19 -18
  90. langchain_core/runnables/graph_mermaid.py +20 -31
  91. langchain_core/runnables/graph_png.py +7 -7
  92. langchain_core/runnables/history.py +22 -22
  93. langchain_core/runnables/passthrough.py +11 -11
  94. langchain_core/runnables/retry.py +3 -3
  95. langchain_core/runnables/router.py +2 -2
  96. langchain_core/runnables/schema.py +33 -33
  97. langchain_core/runnables/utils.py +30 -34
  98. langchain_core/stores.py +72 -102
  99. langchain_core/sys_info.py +27 -29
  100. langchain_core/tools/__init__.py +1 -14
  101. langchain_core/tools/base.py +70 -71
  102. langchain_core/tools/convert.py +100 -104
  103. langchain_core/tools/render.py +9 -9
  104. langchain_core/tools/retriever.py +7 -7
  105. langchain_core/tools/simple.py +6 -7
  106. langchain_core/tools/structured.py +18 -24
  107. langchain_core/tracers/__init__.py +1 -9
  108. langchain_core/tracers/base.py +35 -35
  109. langchain_core/tracers/context.py +12 -17
  110. langchain_core/tracers/event_stream.py +3 -3
  111. langchain_core/tracers/langchain.py +8 -8
  112. langchain_core/tracers/log_stream.py +17 -18
  113. langchain_core/tracers/memory_stream.py +3 -3
  114. langchain_core/tracers/root_listeners.py +2 -2
  115. langchain_core/tracers/schemas.py +0 -129
  116. langchain_core/tracers/stdout.py +1 -2
  117. langchain_core/utils/__init__.py +1 -1
  118. langchain_core/utils/aiter.py +32 -32
  119. langchain_core/utils/env.py +5 -5
  120. langchain_core/utils/function_calling.py +59 -154
  121. langchain_core/utils/html.py +4 -4
  122. langchain_core/utils/input.py +3 -3
  123. langchain_core/utils/interactive_env.py +1 -1
  124. langchain_core/utils/iter.py +20 -20
  125. langchain_core/utils/json.py +1 -1
  126. langchain_core/utils/json_schema.py +2 -2
  127. langchain_core/utils/mustache.py +5 -5
  128. langchain_core/utils/pydantic.py +17 -17
  129. langchain_core/utils/strings.py +5 -5
  130. langchain_core/utils/utils.py +25 -28
  131. langchain_core/vectorstores/base.py +55 -87
  132. langchain_core/vectorstores/in_memory.py +83 -85
  133. langchain_core/vectorstores/utils.py +2 -2
  134. langchain_core/version.py +1 -1
  135. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/METADATA +23 -11
  136. langchain_core-1.0.0rc2.dist-info/RECORD +172 -0
  137. langchain_core/memory.py +0 -120
  138. langchain_core/pydantic_v1/__init__.py +0 -30
  139. langchain_core/pydantic_v1/dataclasses.py +0 -23
  140. langchain_core/pydantic_v1/main.py +0 -23
  141. langchain_core-1.0.0a8.dist-info/RECORD +0 -176
  142. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc2.dist-info}/WHEEL +0 -0
@@ -31,12 +31,12 @@ def _convert_to_v1_from_anthropic_input(
31
31
  ) -> list[types.ContentBlock]:
32
32
  """Convert Anthropic format blocks to v1 format.
33
33
 
34
- During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
35
- block as a ``'non_standard'`` block with the original block stored in the ``value``
34
+ During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
35
+ block as a `'non_standard'` block with the original block stored in the `value`
36
36
  field. This function attempts to unpack those blocks and convert any blocks that
37
37
  might be Anthropic format to v1 ContentBlocks.
38
38
 
39
- If conversion fails, the block is left as a ``'non_standard'`` block.
39
+ If conversion fails, the block is left as a `'non_standard'` block.
40
40
 
41
41
  Args:
42
42
  content: List of content blocks to process.
@@ -35,12 +35,12 @@ def _convert_to_v1_from_converse_input(
35
35
  ) -> list[types.ContentBlock]:
36
36
  """Convert Bedrock Converse format blocks to v1 format.
37
37
 
38
- During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
39
- block as a ``'non_standard'`` block with the original block stored in the ``value``
38
+ During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
39
+ block as a `'non_standard'` block with the original block stored in the `value`
40
40
  field. This function attempts to unpack those blocks and convert any blocks that
41
41
  might be Converse format to v1 ContentBlocks.
42
42
 
43
- If conversion fails, the block is left as a ``'non_standard'`` block.
43
+ If conversion fails, the block is left as a `'non_standard'` block.
44
44
 
45
45
  Args:
46
46
  content: List of content blocks to process.
@@ -105,12 +105,12 @@ def _convert_to_v1_from_genai_input(
105
105
  Called when message isn't an `AIMessage` or `model_provider` isn't set on
106
106
  `response_metadata`.
107
107
 
108
- During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
109
- block as a ``'non_standard'`` block with the original block stored in the ``value``
108
+ During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
109
+ block as a `'non_standard'` block with the original block stored in the `value`
110
110
  field. This function attempts to unpack those blocks and convert any blocks that
111
111
  might be GenAI format to v1 ContentBlocks.
112
112
 
113
- If conversion fails, the block is left as a ``'non_standard'`` block.
113
+ If conversion fails, the block is left as a `'non_standard'` block.
114
114
 
115
115
  Args:
116
116
  content: List of content blocks to process.
@@ -453,9 +453,10 @@ def _convert_to_v1_from_genai(message: AIMessage) -> list[types.ContentBlock]:
453
453
  "status": status, # type: ignore[typeddict-item]
454
454
  "output": item.get("code_execution_result", ""),
455
455
  }
456
+ server_tool_result_block["extras"] = {"block_type": item_type}
456
457
  # Preserve original outcome in extras
457
458
  if outcome is not None:
458
- server_tool_result_block["extras"] = {"outcome": outcome}
459
+ server_tool_result_block["extras"]["outcome"] = outcome
459
460
  converted_blocks.append(server_tool_result_block)
460
461
  else:
461
462
  # Unknown type, preserve as non-standard
@@ -1,37 +1,9 @@
1
1
  """Derivations of standard content blocks from Google (VertexAI) content."""
2
2
 
3
- import warnings
4
-
5
- from langchain_core.messages import AIMessage, AIMessageChunk
6
- from langchain_core.messages import content as types
7
-
8
- WARNED = False
9
-
10
-
11
- def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
12
- """Derive standard content blocks from a message with Google (VertexAI) content."""
13
- global WARNED # noqa: PLW0603
14
- if not WARNED:
15
- warning_message = (
16
- "Content block standardization is not yet fully supported for Google "
17
- "VertexAI."
18
- )
19
- warnings.warn(warning_message, stacklevel=2)
20
- WARNED = True
21
- raise NotImplementedError
22
-
23
-
24
- def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
25
- """Derive standard content blocks from a chunk with Google (VertexAI) content."""
26
- global WARNED # noqa: PLW0603
27
- if not WARNED:
28
- warning_message = (
29
- "Content block standardization is not yet fully supported for Google "
30
- "VertexAI."
31
- )
32
- warnings.warn(warning_message, stacklevel=2)
33
- WARNED = True
34
- raise NotImplementedError
3
+ from langchain_core.messages.block_translators.google_genai import (
4
+ translate_content,
5
+ translate_content_chunk,
6
+ )
35
7
 
36
8
 
37
9
  def _register_google_vertexai_translator() -> None:
@@ -1,39 +1,135 @@
1
1
  """Derivations of standard content blocks from Groq content."""
2
2
 
3
- import warnings
3
+ import json
4
+ import re
5
+ from typing import Any
4
6
 
5
7
  from langchain_core.messages import AIMessage, AIMessageChunk
6
8
  from langchain_core.messages import content as types
9
+ from langchain_core.messages.base import _extract_reasoning_from_additional_kwargs
7
10
 
8
- WARNED = False
9
11
 
12
+ def _populate_extras(
13
+ standard_block: types.ContentBlock, block: dict[str, Any], known_fields: set[str]
14
+ ) -> types.ContentBlock:
15
+ """Mutate a block, populating extras."""
16
+ if standard_block.get("type") == "non_standard":
17
+ return standard_block
10
18
 
11
- def translate_content(message: AIMessage) -> list[types.ContentBlock]: # noqa: ARG001
12
- """Derive standard content blocks from a message with Groq content."""
13
- global WARNED # noqa: PLW0603
14
- if not WARNED:
15
- warning_message = (
16
- "Content block standardization is not yet fully supported for Groq."
19
+ for key, value in block.items():
20
+ if key not in known_fields:
21
+ if "extras" not in standard_block:
22
+ # Below type-ignores are because mypy thinks a non-standard block can
23
+ # get here, although we exclude them above.
24
+ standard_block["extras"] = {} # type: ignore[typeddict-unknown-key]
25
+ standard_block["extras"][key] = value # type: ignore[typeddict-item]
26
+
27
+ return standard_block
28
+
29
+
30
+ def _parse_code_json(s: str) -> dict:
31
+ """Extract Python code from Groq built-in tool content.
32
+
33
+ Extracts the value of the 'code' field from a string of the form:
34
+ {"code": some_arbitrary_text_with_unescaped_quotes}
35
+
36
+ As Groq may not escape quotes in the executed tools, e.g.:
37
+ ```
38
+ '{"code": "import math; print("The square root of 101 is: "); print(math.sqrt(101))"}'
39
+ ```
40
+ """ # noqa: E501
41
+ m = re.fullmatch(r'\s*\{\s*"code"\s*:\s*"(.*)"\s*\}\s*', s, flags=re.DOTALL)
42
+ if not m:
43
+ msg = (
44
+ "Could not extract Python code from Groq tool arguments. "
45
+ "Expected a JSON object with a 'code' field."
17
46
  )
18
- warnings.warn(warning_message, stacklevel=2)
19
- WARNED = True
20
- raise NotImplementedError
47
+ raise ValueError(msg)
48
+ return {"code": m.group(1)}
49
+
50
+
51
+ def _convert_to_v1_from_groq(message: AIMessage) -> list[types.ContentBlock]:
52
+ """Convert groq message content to v1 format."""
53
+ content_blocks: list[types.ContentBlock] = []
21
54
 
55
+ if reasoning_block := _extract_reasoning_from_additional_kwargs(message):
56
+ content_blocks.append(reasoning_block)
22
57
 
23
- def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]: # noqa: ARG001
24
- """Derive standard content blocks from a message chunk with Groq content."""
25
- global WARNED # noqa: PLW0603
26
- if not WARNED:
27
- warning_message = (
28
- "Content block standardization is not yet fully supported for Groq."
58
+ if executed_tools := message.additional_kwargs.get("executed_tools"):
59
+ for idx, executed_tool in enumerate(executed_tools):
60
+ args: dict[str, Any] | None = None
61
+ if arguments := executed_tool.get("arguments"):
62
+ try:
63
+ args = json.loads(arguments)
64
+ except json.JSONDecodeError:
65
+ if executed_tool.get("type") == "python":
66
+ try:
67
+ args = _parse_code_json(arguments)
68
+ except ValueError:
69
+ continue
70
+ elif (
71
+ executed_tool.get("type") == "function"
72
+ and executed_tool.get("name") == "python"
73
+ ):
74
+ # GPT-OSS
75
+ args = {"code": arguments}
76
+ else:
77
+ continue
78
+ if isinstance(args, dict):
79
+ name = ""
80
+ if executed_tool.get("type") == "search":
81
+ name = "web_search"
82
+ elif executed_tool.get("type") == "python" or (
83
+ executed_tool.get("type") == "function"
84
+ and executed_tool.get("name") == "python"
85
+ ):
86
+ name = "code_interpreter"
87
+ server_tool_call: types.ServerToolCall = {
88
+ "type": "server_tool_call",
89
+ "name": name,
90
+ "id": str(idx),
91
+ "args": args,
92
+ }
93
+ content_blocks.append(server_tool_call)
94
+ if tool_output := executed_tool.get("output"):
95
+ tool_result: types.ServerToolResult = {
96
+ "type": "server_tool_result",
97
+ "tool_call_id": str(idx),
98
+ "output": tool_output,
99
+ "status": "success",
100
+ }
101
+ known_fields = {"type", "arguments", "index", "output"}
102
+ _populate_extras(tool_result, executed_tool, known_fields)
103
+ content_blocks.append(tool_result)
104
+
105
+ if isinstance(message.content, str) and message.content:
106
+ content_blocks.append({"type": "text", "text": message.content})
107
+
108
+ for tool_call in message.tool_calls:
109
+ content_blocks.append( # noqa: PERF401
110
+ {
111
+ "type": "tool_call",
112
+ "name": tool_call["name"],
113
+ "args": tool_call["args"],
114
+ "id": tool_call.get("id"),
115
+ }
29
116
  )
30
- warnings.warn(warning_message, stacklevel=2)
31
- WARNED = True
32
- raise NotImplementedError
117
+
118
+ return content_blocks
119
+
120
+
121
+ def translate_content(message: AIMessage) -> list[types.ContentBlock]:
122
+ """Derive standard content blocks from a message with groq content."""
123
+ return _convert_to_v1_from_groq(message)
124
+
125
+
126
+ def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
127
+ """Derive standard content blocks from a message chunk with groq content."""
128
+ return _convert_to_v1_from_groq(message)
33
129
 
34
130
 
35
131
  def _register_groq_translator() -> None:
36
- """Register the Groq translator with the central registry.
132
+ """Register the groq translator with the central registry.
37
133
 
38
134
  Run automatically when the module is imported.
39
135
  """
@@ -10,12 +10,12 @@ def _convert_v0_multimodal_input_to_v1(
10
10
  ) -> list[types.ContentBlock]:
11
11
  """Convert v0 multimodal blocks to v1 format.
12
12
 
13
- During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
14
- block as a ``'non_standard'`` block with the original block stored in the ``value``
13
+ During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
14
+ block as a `'non_standard'` block with the original block stored in the `value`
15
15
  field. This function attempts to unpack those blocks and convert any v0 format
16
16
  blocks to v1 format.
17
17
 
18
- If conversion fails, the block is left as a ``'non_standard'`` block.
18
+ If conversion fails, the block is left as a `'non_standard'` block.
19
19
 
20
20
  Args:
21
21
  content: List of content blocks to process.
@@ -18,7 +18,7 @@ if TYPE_CHECKING:
18
18
 
19
19
 
20
20
  def convert_to_openai_image_block(block: dict[str, Any]) -> dict:
21
- """Convert ``ImageContentBlock`` to format expected by OpenAI Chat Completions."""
21
+ """Convert `ImageContentBlock` to format expected by OpenAI Chat Completions."""
22
22
  if "url" in block:
23
23
  return {
24
24
  "type": "image_url",
@@ -155,12 +155,12 @@ def _convert_to_v1_from_chat_completions_input(
155
155
  ) -> list[types.ContentBlock]:
156
156
  """Convert OpenAI Chat Completions format blocks to v1 format.
157
157
 
158
- During the `.content_blocks` parsing process, we wrap blocks not recognized as a v1
159
- block as a ``'non_standard'`` block with the original block stored in the ``value``
158
+ During the `content_blocks` parsing process, we wrap blocks not recognized as a v1
159
+ block as a `'non_standard'` block with the original block stored in the `value`
160
160
  field. This function attempts to unpack those blocks and convert any blocks that
161
161
  might be OpenAI format to v1 ContentBlocks.
162
162
 
163
- If conversion fails, the block is left as a ``'non_standard'`` block.
163
+ If conversion fails, the block is left as a `'non_standard'` block.
164
164
 
165
165
  Args:
166
166
  content: List of content blocks to process.
@@ -263,7 +263,7 @@ _FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
263
263
 
264
264
 
265
265
  def _convert_from_v03_ai_message(message: AIMessage) -> AIMessage:
266
- """Convert v0 AIMessage into ``output_version="responses/v1"`` format."""
266
+ """Convert v0 AIMessage into `output_version="responses/v1"` format."""
267
267
  from langchain_core.messages import AIMessageChunk # noqa: PLC0415
268
268
 
269
269
  # Only update ChatOpenAI v0.3 AIMessages
@@ -19,7 +19,7 @@ class ChatMessage(BaseMessage):
19
19
  """The speaker / role of the Message."""
20
20
 
21
21
  type: Literal["chat"] = "chat"
22
- """The type of the message (used during serialization). Defaults to "chat"."""
22
+ """The type of the message (used during serialization)."""
23
23
 
24
24
 
25
25
  class ChatMessageChunk(ChatMessage, BaseMessageChunk):
@@ -29,11 +29,7 @@ class ChatMessageChunk(ChatMessage, BaseMessageChunk):
29
29
  # to make sure that the chunk variant can be discriminated from the
30
30
  # non-chunk variant.
31
31
  type: Literal["ChatMessageChunk"] = "ChatMessageChunk" # type: ignore[assignment]
32
- """The type of the message (used during serialization).
33
-
34
- Defaults to ``'ChatMessageChunk'``.
35
-
36
- """
32
+ """The type of the message (used during serialization)."""
37
33
 
38
34
  @override
39
35
  def __add__(self, other: Any) -> BaseMessageChunk: # type: ignore[override]