langchain-core 1.0.0a8__py3-none-any.whl → 1.0.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (135) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +0 -1
  3. langchain_core/_api/beta_decorator.py +17 -20
  4. langchain_core/_api/deprecation.py +30 -35
  5. langchain_core/_import_utils.py +1 -1
  6. langchain_core/agents.py +7 -6
  7. langchain_core/caches.py +4 -10
  8. langchain_core/callbacks/__init__.py +1 -8
  9. langchain_core/callbacks/base.py +232 -243
  10. langchain_core/callbacks/file.py +33 -33
  11. langchain_core/callbacks/manager.py +353 -416
  12. langchain_core/callbacks/stdout.py +21 -22
  13. langchain_core/callbacks/streaming_stdout.py +32 -32
  14. langchain_core/callbacks/usage.py +54 -51
  15. langchain_core/chat_history.py +42 -57
  16. langchain_core/document_loaders/langsmith.py +21 -21
  17. langchain_core/documents/__init__.py +0 -1
  18. langchain_core/documents/base.py +37 -40
  19. langchain_core/documents/transformers.py +28 -29
  20. langchain_core/embeddings/fake.py +46 -52
  21. langchain_core/exceptions.py +5 -5
  22. langchain_core/indexing/api.py +11 -11
  23. langchain_core/indexing/base.py +24 -24
  24. langchain_core/language_models/__init__.py +0 -2
  25. langchain_core/language_models/_utils.py +51 -53
  26. langchain_core/language_models/base.py +23 -24
  27. langchain_core/language_models/chat_models.py +121 -144
  28. langchain_core/language_models/fake_chat_models.py +5 -5
  29. langchain_core/language_models/llms.py +10 -12
  30. langchain_core/load/dump.py +1 -1
  31. langchain_core/load/load.py +16 -16
  32. langchain_core/load/serializable.py +35 -34
  33. langchain_core/messages/__init__.py +1 -16
  34. langchain_core/messages/ai.py +105 -104
  35. langchain_core/messages/base.py +26 -26
  36. langchain_core/messages/block_translators/__init__.py +17 -17
  37. langchain_core/messages/block_translators/anthropic.py +2 -2
  38. langchain_core/messages/block_translators/bedrock_converse.py +2 -2
  39. langchain_core/messages/block_translators/google_genai.py +2 -2
  40. langchain_core/messages/block_translators/groq.py +117 -21
  41. langchain_core/messages/block_translators/langchain_v0.py +2 -2
  42. langchain_core/messages/block_translators/openai.py +4 -4
  43. langchain_core/messages/chat.py +1 -1
  44. langchain_core/messages/content.py +189 -193
  45. langchain_core/messages/function.py +5 -5
  46. langchain_core/messages/human.py +15 -17
  47. langchain_core/messages/modifier.py +1 -1
  48. langchain_core/messages/system.py +12 -14
  49. langchain_core/messages/tool.py +45 -49
  50. langchain_core/messages/utils.py +384 -396
  51. langchain_core/output_parsers/__init__.py +1 -14
  52. langchain_core/output_parsers/base.py +22 -23
  53. langchain_core/output_parsers/json.py +3 -3
  54. langchain_core/output_parsers/list.py +1 -1
  55. langchain_core/output_parsers/openai_functions.py +46 -44
  56. langchain_core/output_parsers/openai_tools.py +7 -7
  57. langchain_core/output_parsers/pydantic.py +10 -11
  58. langchain_core/output_parsers/string.py +1 -1
  59. langchain_core/output_parsers/transform.py +2 -2
  60. langchain_core/output_parsers/xml.py +1 -1
  61. langchain_core/outputs/__init__.py +1 -1
  62. langchain_core/outputs/chat_generation.py +14 -14
  63. langchain_core/outputs/generation.py +5 -5
  64. langchain_core/outputs/llm_result.py +5 -5
  65. langchain_core/prompt_values.py +5 -5
  66. langchain_core/prompts/__init__.py +3 -23
  67. langchain_core/prompts/base.py +32 -37
  68. langchain_core/prompts/chat.py +216 -222
  69. langchain_core/prompts/dict.py +2 -2
  70. langchain_core/prompts/few_shot.py +76 -83
  71. langchain_core/prompts/few_shot_with_templates.py +6 -8
  72. langchain_core/prompts/image.py +11 -13
  73. langchain_core/prompts/loading.py +1 -1
  74. langchain_core/prompts/message.py +2 -2
  75. langchain_core/prompts/prompt.py +14 -16
  76. langchain_core/prompts/string.py +19 -7
  77. langchain_core/prompts/structured.py +24 -25
  78. langchain_core/rate_limiters.py +36 -38
  79. langchain_core/retrievers.py +41 -182
  80. langchain_core/runnables/base.py +565 -590
  81. langchain_core/runnables/branch.py +7 -7
  82. langchain_core/runnables/config.py +37 -44
  83. langchain_core/runnables/configurable.py +8 -9
  84. langchain_core/runnables/fallbacks.py +8 -8
  85. langchain_core/runnables/graph.py +28 -27
  86. langchain_core/runnables/graph_ascii.py +19 -18
  87. langchain_core/runnables/graph_mermaid.py +20 -31
  88. langchain_core/runnables/graph_png.py +7 -7
  89. langchain_core/runnables/history.py +20 -20
  90. langchain_core/runnables/passthrough.py +8 -8
  91. langchain_core/runnables/retry.py +3 -3
  92. langchain_core/runnables/router.py +1 -1
  93. langchain_core/runnables/schema.py +33 -33
  94. langchain_core/runnables/utils.py +30 -34
  95. langchain_core/stores.py +72 -102
  96. langchain_core/sys_info.py +27 -29
  97. langchain_core/tools/__init__.py +1 -14
  98. langchain_core/tools/base.py +63 -63
  99. langchain_core/tools/convert.py +92 -92
  100. langchain_core/tools/render.py +9 -9
  101. langchain_core/tools/retriever.py +1 -1
  102. langchain_core/tools/simple.py +6 -7
  103. langchain_core/tools/structured.py +17 -18
  104. langchain_core/tracers/__init__.py +1 -9
  105. langchain_core/tracers/base.py +35 -35
  106. langchain_core/tracers/context.py +12 -17
  107. langchain_core/tracers/event_stream.py +3 -3
  108. langchain_core/tracers/langchain.py +8 -8
  109. langchain_core/tracers/log_stream.py +17 -18
  110. langchain_core/tracers/memory_stream.py +2 -2
  111. langchain_core/tracers/schemas.py +0 -129
  112. langchain_core/utils/aiter.py +31 -31
  113. langchain_core/utils/env.py +5 -5
  114. langchain_core/utils/function_calling.py +48 -120
  115. langchain_core/utils/html.py +4 -4
  116. langchain_core/utils/input.py +2 -2
  117. langchain_core/utils/interactive_env.py +1 -1
  118. langchain_core/utils/iter.py +19 -19
  119. langchain_core/utils/json.py +1 -1
  120. langchain_core/utils/json_schema.py +2 -2
  121. langchain_core/utils/mustache.py +5 -5
  122. langchain_core/utils/pydantic.py +17 -17
  123. langchain_core/utils/strings.py +4 -4
  124. langchain_core/utils/utils.py +25 -28
  125. langchain_core/vectorstores/base.py +43 -64
  126. langchain_core/vectorstores/in_memory.py +83 -85
  127. langchain_core/version.py +1 -1
  128. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc1.dist-info}/METADATA +23 -11
  129. langchain_core-1.0.0rc1.dist-info/RECORD +172 -0
  130. langchain_core/memory.py +0 -120
  131. langchain_core/pydantic_v1/__init__.py +0 -30
  132. langchain_core/pydantic_v1/dataclasses.py +0 -23
  133. langchain_core/pydantic_v1/main.py +0 -23
  134. langchain_core-1.0.0a8.dist-info/RECORD +0 -176
  135. {langchain_core-1.0.0a8.dist-info → langchain_core-1.0.0rc1.dist-info}/WHEEL +0 -0
@@ -19,7 +19,7 @@ def is_openai_data_block(
19
19
  ) -> bool:
20
20
  """Check whether a block contains multimodal data in OpenAI Chat Completions format.
21
21
 
22
- Supports both data and ID-style blocks (e.g. ``'file_data'`` and ``'file_id'``)
22
+ Supports both data and ID-style blocks (e.g. `'file_data'` and `'file_id'`)
23
23
 
24
24
  If additional keys are present, they are ignored / will not affect outcome as long
25
25
  as the required keys are present and valid.
@@ -30,12 +30,12 @@ def is_openai_data_block(
30
30
  - "image": Only match image_url blocks
31
31
  - "audio": Only match input_audio blocks
32
32
  - "file": Only match file blocks
33
- If None, match any valid OpenAI data block type. Note that this means that
33
+ If `None`, match any valid OpenAI data block type. Note that this means that
34
34
  if the block has a valid OpenAI data type but the filter_ is set to a
35
35
  different type, this function will return False.
36
36
 
37
37
  Returns:
38
- True if the block is a valid OpenAI data block and matches the filter_
38
+ `True` if the block is a valid OpenAI data block and matches the filter_
39
39
  (if provided).
40
40
 
41
41
  """
@@ -92,18 +92,16 @@ def _parse_data_uri(uri: str) -> ParsedDataUri | None:
92
92
  If parsing fails, return None. If either MIME type or data is missing, return None.
93
93
 
94
94
  Example:
95
-
96
- .. code-block:: python
97
-
98
- data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
99
- parsed = _parse_data_uri(data_uri)
100
-
101
- assert parsed == {
102
- "source_type": "base64",
103
- "mime_type": "image/jpeg",
104
- "data": "/9j/4AAQSkZJRg...",
105
- }
106
-
95
+ ```python
96
+ data_uri = "data:image/jpeg;base64,/9j/4AAQSkZJRg..."
97
+ parsed = _parse_data_uri(data_uri)
98
+
99
+ assert parsed == {
100
+ "source_type": "base64",
101
+ "mime_type": "image/jpeg",
102
+ "data": "/9j/4AAQSkZJRg...",
103
+ }
104
+ ```
107
105
  """
108
106
  regex = r"^data:(?P<mime_type>[^;]+);base64,(?P<data>.+)$"
109
107
  match = re.match(regex, uri)
@@ -133,8 +131,8 @@ def _normalize_messages(
133
131
  - LangChain v1 standard content blocks
134
132
 
135
133
  This function extends support to:
136
- - `Audio <https://platform.openai.com/docs/api-reference/chat/create>`__ and
137
- `file <https://platform.openai.com/docs/api-reference/files>`__ data in OpenAI
134
+ - `[Audio](https://platform.openai.com/docs/api-reference/chat/create) and
135
+ `[file](https://platform.openai.com/docs/api-reference/files) data in OpenAI
138
136
  Chat Completions format
139
137
  - Images are technically supported but we expect chat models to handle them
140
138
  directly; this may change in the future
@@ -148,50 +146,50 @@ def _normalize_messages(
148
146
 
149
147
  ??? note "v0 Content Block Schemas"
150
148
 
151
- ``URLContentBlock``:
149
+ `URLContentBlock`:
152
150
 
153
- .. codeblock::
154
-
155
- {
156
- mime_type: NotRequired[str]
157
- type: Literal['image', 'audio', 'file'],
158
- source_type: Literal['url'],
159
- url: str,
160
- }
161
-
162
- ``Base64ContentBlock``:
151
+ ```python
152
+ {
153
+ mime_type: NotRequired[str]
154
+ type: Literal['image', 'audio', 'file'],
155
+ source_type: Literal['url'],
156
+ url: str,
157
+ }
158
+ ```
163
159
 
164
- .. codeblock::
160
+ `Base64ContentBlock`:
165
161
 
166
- {
167
- mime_type: NotRequired[str]
168
- type: Literal['image', 'audio', 'file'],
169
- source_type: Literal['base64'],
170
- data: str,
171
- }
162
+ ```python
163
+ {
164
+ mime_type: NotRequired[str]
165
+ type: Literal['image', 'audio', 'file'],
166
+ source_type: Literal['base64'],
167
+ data: str,
168
+ }
169
+ ```
172
170
 
173
- ``IDContentBlock``:
171
+ `IDContentBlock`:
174
172
 
175
173
  (In practice, this was never used)
176
174
 
177
- .. codeblock::
178
-
179
- {
180
- type: Literal['image', 'audio', 'file'],
181
- source_type: Literal['id'],
182
- id: str,
183
- }
184
-
185
- ``PlainTextContentBlock``:
175
+ ```python
176
+ {
177
+ type: Literal["image", "audio", "file"],
178
+ source_type: Literal["id"],
179
+ id: str,
180
+ }
181
+ ```
186
182
 
187
- .. codeblock::
183
+ `PlainTextContentBlock`:
188
184
 
189
- {
190
- mime_type: NotRequired[str]
191
- type: Literal['file'],
192
- source_type: Literal['text'],
193
- url: str,
194
- }
185
+ ```python
186
+ {
187
+ mime_type: NotRequired[str]
188
+ type: Literal['file'],
189
+ source_type: Literal['text'],
190
+ url: str,
191
+ }
192
+ ```
195
193
 
196
194
  If a v1 message is passed in, it will be returned as-is, meaning it is safe to
197
195
  always pass in v1 messages to this function for assurance.
@@ -222,7 +220,7 @@ def _normalize_messages(
222
220
  "type": Literal['file'],
223
221
  "file": Union[
224
222
  {
225
- "filename": Optional[str] = "$FILENAME",
223
+ "filename": str | None = "$FILENAME",
226
224
  "file_data": str = "$BASE64_ENCODED_FILE",
227
225
  },
228
226
  {
@@ -110,20 +110,19 @@ class BaseLanguageModel(
110
110
  ):
111
111
  """Abstract base class for interfacing with language models.
112
112
 
113
- All language model wrappers inherited from ``BaseLanguageModel``.
113
+ All language model wrappers inherited from `BaseLanguageModel`.
114
114
 
115
115
  """
116
116
 
117
117
  cache: BaseCache | bool | None = Field(default=None, exclude=True)
118
118
  """Whether to cache the response.
119
119
 
120
- * If true, will use the global cache.
121
- * If false, will not use a cache
122
- * If None, will use the global cache if it's set, otherwise no cache.
123
- * If instance of ``BaseCache``, will use the provided cache.
120
+ * If `True`, will use the global cache.
121
+ * If `False`, will not use a cache
122
+ * If `None`, will use the global cache if it's set, otherwise no cache.
123
+ * If instance of `BaseCache`, will use the provided cache.
124
124
 
125
125
  Caching is not currently supported for streaming methods of models.
126
-
127
126
  """
128
127
  verbose: bool = Field(default_factory=_get_verbosity, exclude=True, repr=False)
129
128
  """Whether to print out response text."""
@@ -144,9 +143,9 @@ class BaseLanguageModel(
144
143
 
145
144
  @field_validator("verbose", mode="before")
146
145
  def set_verbose(cls, verbose: bool | None) -> bool: # noqa: FBT001
147
- """If verbose is None, set it.
146
+ """If verbose is `None`, set it.
148
147
 
149
- This allows users to pass in None as verbose to access the global setting.
148
+ This allows users to pass in `None` as verbose to access the global setting.
150
149
 
151
150
  Args:
152
151
  verbose: The verbosity setting to use.
@@ -162,7 +161,7 @@ class BaseLanguageModel(
162
161
  @property
163
162
  @override
164
163
  def InputType(self) -> TypeAlias:
165
- """Get the input type for this runnable."""
164
+ """Get the input type for this `Runnable`."""
166
165
  # This is a version of LanguageModelInput which replaces the abstract
167
166
  # base class BaseMessage with a union of its subclasses, which makes
168
167
  # for a much better schema.
@@ -186,12 +185,12 @@ class BaseLanguageModel(
186
185
  1. Take advantage of batched calls,
187
186
  2. Need more output from the model than just the top generated value,
188
187
  3. Are building chains that are agnostic to the underlying language model
189
- type (e.g., pure text completion models vs chat models).
188
+ type (e.g., pure text completion models vs chat models).
190
189
 
191
190
  Args:
192
- prompts: List of PromptValues. A PromptValue is an object that can be
193
- converted to match the format of any language model (string for pure
194
- text generation models and BaseMessages for chat models).
191
+ prompts: List of `PromptValue` objects. A `PromptValue` is an object that
192
+ can be converted to match the format of any language model (string for
193
+ pure text generation models and `BaseMessage` objects for chat models).
195
194
  stop: Stop words to use when generating. Model output is cut off at the
196
195
  first occurrence of any of these substrings.
197
196
  callbacks: Callbacks to pass through. Used for executing additional
@@ -200,8 +199,8 @@ class BaseLanguageModel(
200
199
  to the model provider API call.
201
200
 
202
201
  Returns:
203
- An LLMResult, which contains a list of candidate Generations for each input
204
- prompt and additional model provider-specific output.
202
+ An `LLMResult`, which contains a list of candidate `Generation` objects for
203
+ each input prompt and additional model provider-specific output.
205
204
 
206
205
  """
207
206
 
@@ -223,12 +222,12 @@ class BaseLanguageModel(
223
222
  1. Take advantage of batched calls,
224
223
  2. Need more output from the model than just the top generated value,
225
224
  3. Are building chains that are agnostic to the underlying language model
226
- type (e.g., pure text completion models vs chat models).
225
+ type (e.g., pure text completion models vs chat models).
227
226
 
228
227
  Args:
229
- prompts: List of PromptValues. A PromptValue is an object that can be
230
- converted to match the format of any language model (string for pure
231
- text generation models and BaseMessages for chat models).
228
+ prompts: List of `PromptValue` objects. A `PromptValue` is an object that
229
+ can be converted to match the format of any language model (string for
230
+ pure text generation models and `BaseMessage` objects for chat models).
232
231
  stop: Stop words to use when generating. Model output is cut off at the
233
232
  first occurrence of any of these substrings.
234
233
  callbacks: Callbacks to pass through. Used for executing additional
@@ -237,8 +236,8 @@ class BaseLanguageModel(
237
236
  to the model provider API call.
238
237
 
239
238
  Returns:
240
- An ``LLMResult``, which contains a list of candidate Generations for each
241
- input prompt and additional model provider-specific output.
239
+ An `LLMResult`, which contains a list of candidate `Generation` objects for
240
+ each input prompt and additional model provider-specific output.
242
241
 
243
242
  """
244
243
 
@@ -294,13 +293,13 @@ class BaseLanguageModel(
294
293
  Useful for checking if an input fits in a model's context window.
295
294
 
296
295
  !!! note
297
- The base implementation of ``get_num_tokens_from_messages`` ignores tool
296
+ The base implementation of `get_num_tokens_from_messages` ignores tool
298
297
  schemas.
299
298
 
300
299
  Args:
301
300
  messages: The message inputs to tokenize.
302
- tools: If provided, sequence of dict, ``BaseModel``, function, or
303
- ``BaseTools`` to be converted to tool schemas.
301
+ tools: If provided, sequence of dict, `BaseModel`, function, or
302
+ `BaseTool` objects to be converted to tool schemas.
304
303
 
305
304
  Returns:
306
305
  The sum of the number of tokens across the messages.