langchain-core 0.3.79__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +52 -65
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +19 -19
  8. langchain_core/caches.py +53 -63
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +323 -334
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +441 -507
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +48 -63
  17. langchain_core/document_loaders/base.py +23 -23
  18. langchain_core/document_loaders/langsmith.py +37 -37
  19. langchain_core/documents/__init__.py +0 -1
  20. langchain_core/documents/base.py +62 -65
  21. langchain_core/documents/compressor.py +4 -4
  22. langchain_core/documents/transformers.py +28 -29
  23. langchain_core/embeddings/fake.py +50 -54
  24. langchain_core/example_selectors/length_based.py +1 -1
  25. langchain_core/example_selectors/semantic_similarity.py +21 -25
  26. langchain_core/exceptions.py +10 -11
  27. langchain_core/globals.py +3 -151
  28. langchain_core/indexing/api.py +61 -66
  29. langchain_core/indexing/base.py +58 -58
  30. langchain_core/indexing/in_memory.py +3 -3
  31. langchain_core/language_models/__init__.py +14 -27
  32. langchain_core/language_models/_utils.py +270 -84
  33. langchain_core/language_models/base.py +55 -162
  34. langchain_core/language_models/chat_models.py +442 -402
  35. langchain_core/language_models/fake.py +11 -11
  36. langchain_core/language_models/fake_chat_models.py +61 -39
  37. langchain_core/language_models/llms.py +123 -231
  38. langchain_core/load/dump.py +4 -5
  39. langchain_core/load/load.py +18 -28
  40. langchain_core/load/mapping.py +2 -4
  41. langchain_core/load/serializable.py +39 -40
  42. langchain_core/messages/__init__.py +61 -22
  43. langchain_core/messages/ai.py +368 -163
  44. langchain_core/messages/base.py +214 -43
  45. langchain_core/messages/block_translators/__init__.py +111 -0
  46. langchain_core/messages/block_translators/anthropic.py +470 -0
  47. langchain_core/messages/block_translators/bedrock.py +94 -0
  48. langchain_core/messages/block_translators/bedrock_converse.py +297 -0
  49. langchain_core/messages/block_translators/google_genai.py +530 -0
  50. langchain_core/messages/block_translators/google_vertexai.py +21 -0
  51. langchain_core/messages/block_translators/groq.py +143 -0
  52. langchain_core/messages/block_translators/langchain_v0.py +301 -0
  53. langchain_core/messages/block_translators/openai.py +1010 -0
  54. langchain_core/messages/chat.py +2 -6
  55. langchain_core/messages/content.py +1423 -0
  56. langchain_core/messages/function.py +6 -10
  57. langchain_core/messages/human.py +41 -38
  58. langchain_core/messages/modifier.py +2 -2
  59. langchain_core/messages/system.py +38 -28
  60. langchain_core/messages/tool.py +96 -103
  61. langchain_core/messages/utils.py +478 -504
  62. langchain_core/output_parsers/__init__.py +1 -14
  63. langchain_core/output_parsers/base.py +58 -61
  64. langchain_core/output_parsers/json.py +7 -8
  65. langchain_core/output_parsers/list.py +5 -7
  66. langchain_core/output_parsers/openai_functions.py +49 -47
  67. langchain_core/output_parsers/openai_tools.py +14 -19
  68. langchain_core/output_parsers/pydantic.py +12 -13
  69. langchain_core/output_parsers/string.py +2 -2
  70. langchain_core/output_parsers/transform.py +15 -17
  71. langchain_core/output_parsers/xml.py +8 -10
  72. langchain_core/outputs/__init__.py +1 -1
  73. langchain_core/outputs/chat_generation.py +18 -18
  74. langchain_core/outputs/chat_result.py +1 -3
  75. langchain_core/outputs/generation.py +8 -8
  76. langchain_core/outputs/llm_result.py +10 -10
  77. langchain_core/prompt_values.py +12 -12
  78. langchain_core/prompts/__init__.py +3 -27
  79. langchain_core/prompts/base.py +45 -55
  80. langchain_core/prompts/chat.py +254 -313
  81. langchain_core/prompts/dict.py +5 -5
  82. langchain_core/prompts/few_shot.py +81 -88
  83. langchain_core/prompts/few_shot_with_templates.py +11 -13
  84. langchain_core/prompts/image.py +12 -14
  85. langchain_core/prompts/loading.py +6 -8
  86. langchain_core/prompts/message.py +3 -3
  87. langchain_core/prompts/prompt.py +24 -39
  88. langchain_core/prompts/string.py +4 -4
  89. langchain_core/prompts/structured.py +42 -50
  90. langchain_core/rate_limiters.py +51 -60
  91. langchain_core/retrievers.py +49 -190
  92. langchain_core/runnables/base.py +1484 -1709
  93. langchain_core/runnables/branch.py +45 -61
  94. langchain_core/runnables/config.py +80 -88
  95. langchain_core/runnables/configurable.py +117 -134
  96. langchain_core/runnables/fallbacks.py +83 -79
  97. langchain_core/runnables/graph.py +85 -95
  98. langchain_core/runnables/graph_ascii.py +27 -28
  99. langchain_core/runnables/graph_mermaid.py +38 -50
  100. langchain_core/runnables/graph_png.py +15 -16
  101. langchain_core/runnables/history.py +135 -148
  102. langchain_core/runnables/passthrough.py +124 -150
  103. langchain_core/runnables/retry.py +46 -51
  104. langchain_core/runnables/router.py +25 -30
  105. langchain_core/runnables/schema.py +79 -74
  106. langchain_core/runnables/utils.py +62 -68
  107. langchain_core/stores.py +81 -115
  108. langchain_core/structured_query.py +8 -8
  109. langchain_core/sys_info.py +27 -29
  110. langchain_core/tools/__init__.py +1 -14
  111. langchain_core/tools/base.py +179 -187
  112. langchain_core/tools/convert.py +131 -139
  113. langchain_core/tools/render.py +10 -10
  114. langchain_core/tools/retriever.py +11 -11
  115. langchain_core/tools/simple.py +19 -24
  116. langchain_core/tools/structured.py +30 -39
  117. langchain_core/tracers/__init__.py +1 -9
  118. langchain_core/tracers/base.py +97 -99
  119. langchain_core/tracers/context.py +29 -52
  120. langchain_core/tracers/core.py +50 -60
  121. langchain_core/tracers/evaluation.py +11 -11
  122. langchain_core/tracers/event_stream.py +115 -70
  123. langchain_core/tracers/langchain.py +21 -21
  124. langchain_core/tracers/log_stream.py +43 -43
  125. langchain_core/tracers/memory_stream.py +3 -3
  126. langchain_core/tracers/root_listeners.py +16 -16
  127. langchain_core/tracers/run_collector.py +2 -4
  128. langchain_core/tracers/schemas.py +0 -129
  129. langchain_core/tracers/stdout.py +3 -3
  130. langchain_core/utils/__init__.py +1 -4
  131. langchain_core/utils/_merge.py +46 -8
  132. langchain_core/utils/aiter.py +57 -61
  133. langchain_core/utils/env.py +9 -9
  134. langchain_core/utils/function_calling.py +89 -191
  135. langchain_core/utils/html.py +7 -8
  136. langchain_core/utils/input.py +6 -6
  137. langchain_core/utils/interactive_env.py +1 -1
  138. langchain_core/utils/iter.py +37 -42
  139. langchain_core/utils/json.py +4 -3
  140. langchain_core/utils/json_schema.py +8 -8
  141. langchain_core/utils/mustache.py +9 -11
  142. langchain_core/utils/pydantic.py +33 -35
  143. langchain_core/utils/strings.py +5 -5
  144. langchain_core/utils/usage.py +1 -1
  145. langchain_core/utils/utils.py +80 -54
  146. langchain_core/vectorstores/base.py +129 -164
  147. langchain_core/vectorstores/in_memory.py +99 -174
  148. langchain_core/vectorstores/utils.py +5 -5
  149. langchain_core/version.py +1 -1
  150. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/METADATA +28 -27
  151. langchain_core-1.0.0.dist-info/RECORD +172 -0
  152. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
  153. langchain_core/beta/__init__.py +0 -1
  154. langchain_core/beta/runnables/__init__.py +0 -1
  155. langchain_core/beta/runnables/context.py +0 -447
  156. langchain_core/memory.py +0 -120
  157. langchain_core/messages/content_blocks.py +0 -176
  158. langchain_core/prompts/pipeline.py +0 -138
  159. langchain_core/pydantic_v1/__init__.py +0 -30
  160. langchain_core/pydantic_v1/dataclasses.py +0 -23
  161. langchain_core/pydantic_v1/main.py +0 -23
  162. langchain_core/tracers/langchain_v1.py +0 -31
  163. langchain_core/utils/loading.py +0 -35
  164. langchain_core-0.3.79.dist-info/RECORD +0 -174
  165. langchain_core-0.3.79.dist-info/entry_points.txt +0 -4
langchain_core/caches.py CHANGED
@@ -1,31 +1,22 @@
1
- """Cache classes.
1
+ """`caches` provides an optional caching layer for language models.
2
2
 
3
- .. warning::
4
- Beta Feature!
3
+ !!! warning
4
+ This is a beta feature! Please be wary of deploying experimental code to production
5
+ unless you've taken appropriate precautions.
5
6
 
6
- **Cache** provides an optional caching layer for LLMs.
7
+ A cache is useful for two reasons:
7
8
 
8
- Cache is useful for two reasons:
9
-
10
- - It can save you money by reducing the number of API calls you make to the LLM
11
- provider if you're often requesting the same completion multiple times.
12
- - It can speed up your application by reducing the number of API calls you make
13
- to the LLM provider.
14
-
15
- Cache directly competes with Memory. See documentation for Pros and Cons.
16
-
17
- **Class hierarchy:**
18
-
19
- .. code-block::
20
-
21
- BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
9
+ 1. It can save you money by reducing the number of API calls you make to the LLM
10
+ provider if you're often requesting the same completion multiple times.
11
+ 2. It can speed up your application by reducing the number of API calls you make to the
12
+ LLM provider.
22
13
  """
23
14
 
24
15
  from __future__ import annotations
25
16
 
26
17
  from abc import ABC, abstractmethod
27
18
  from collections.abc import Sequence
28
- from typing import Any, Optional
19
+ from typing import Any
29
20
 
30
21
  from typing_extensions import override
31
22
 
@@ -40,8 +31,8 @@ class BaseCache(ABC):
40
31
 
41
32
  The cache interface consists of the following methods:
42
33
 
43
- - lookup: Look up a value based on a prompt and llm_string.
44
- - update: Update the cache based on a prompt and llm_string.
34
+ - lookup: Look up a value based on a prompt and `llm_string`.
35
+ - update: Update the cache based on a prompt and `llm_string`.
45
36
  - clear: Clear the cache.
46
37
 
47
38
  In addition, the cache interface provides an async version of each method.
@@ -52,15 +43,15 @@ class BaseCache(ABC):
52
43
  """
53
44
 
54
45
  @abstractmethod
55
- def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
56
- """Look up based on prompt and llm_string.
46
+ def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
47
+ """Look up based on `prompt` and `llm_string`.
57
48
 
58
49
  A cache implementation is expected to generate a key from the 2-tuple
59
50
  of prompt and llm_string (e.g., by concatenating them with a delimiter).
60
51
 
61
52
  Args:
62
- prompt: a string representation of the prompt.
63
- In the case of a Chat model, the prompt is a non-trivial
53
+ prompt: A string representation of the prompt.
54
+ In the case of a chat model, the prompt is a non-trivial
64
55
  serialization of the prompt into the language model.
65
56
  llm_string: A string representation of the LLM configuration.
66
57
  This is used to capture the invocation parameters of the LLM
@@ -69,27 +60,27 @@ class BaseCache(ABC):
69
60
  representation.
70
61
 
71
62
  Returns:
72
- On a cache miss, return None. On a cache hit, return the cached value.
73
- The cached value is a list of Generations (or subclasses).
63
+ On a cache miss, return `None`. On a cache hit, return the cached value.
64
+ The cached value is a list of `Generation` (or subclasses).
74
65
  """
75
66
 
76
67
  @abstractmethod
77
68
  def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
78
- """Update cache based on prompt and llm_string.
69
+ """Update cache based on `prompt` and `llm_string`.
79
70
 
80
71
  The prompt and llm_string are used to generate a key for the cache.
81
72
  The key should match that of the lookup method.
82
73
 
83
74
  Args:
84
- prompt: a string representation of the prompt.
85
- In the case of a Chat model, the prompt is a non-trivial
75
+ prompt: A string representation of the prompt.
76
+ In the case of a chat model, the prompt is a non-trivial
86
77
  serialization of the prompt into the language model.
87
78
  llm_string: A string representation of the LLM configuration.
88
79
  This is used to capture the invocation parameters of the LLM
89
80
  (e.g., model name, temperature, stop tokens, max tokens, etc.).
90
81
  These invocation parameters are serialized into a string
91
82
  representation.
92
- return_val: The value to be cached. The value is a list of Generations
83
+ return_val: The value to be cached. The value is a list of `Generation`
93
84
  (or subclasses).
94
85
  """
95
86
 
@@ -97,15 +88,15 @@ class BaseCache(ABC):
97
88
  def clear(self, **kwargs: Any) -> None:
98
89
  """Clear cache that can take additional keyword arguments."""
99
90
 
100
- async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
101
- """Async look up based on prompt and llm_string.
91
+ async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
92
+ """Async look up based on `prompt` and `llm_string`.
102
93
 
103
94
  A cache implementation is expected to generate a key from the 2-tuple
104
95
  of prompt and llm_string (e.g., by concatenating them with a delimiter).
105
96
 
106
97
  Args:
107
- prompt: a string representation of the prompt.
108
- In the case of a Chat model, the prompt is a non-trivial
98
+ prompt: A string representation of the prompt.
99
+ In the case of a chat model, the prompt is a non-trivial
109
100
  serialization of the prompt into the language model.
110
101
  llm_string: A string representation of the LLM configuration.
111
102
  This is used to capture the invocation parameters of the LLM
@@ -114,29 +105,29 @@ class BaseCache(ABC):
114
105
  representation.
115
106
 
116
107
  Returns:
117
- On a cache miss, return None. On a cache hit, return the cached value.
118
- The cached value is a list of Generations (or subclasses).
108
+ On a cache miss, return `None`. On a cache hit, return the cached value.
109
+ The cached value is a list of `Generation` (or subclasses).
119
110
  """
120
111
  return await run_in_executor(None, self.lookup, prompt, llm_string)
121
112
 
122
113
  async def aupdate(
123
114
  self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
124
115
  ) -> None:
125
- """Async update cache based on prompt and llm_string.
116
+ """Async update cache based on `prompt` and `llm_string`.
126
117
 
127
118
  The prompt and llm_string are used to generate a key for the cache.
128
119
  The key should match that of the look up method.
129
120
 
130
121
  Args:
131
- prompt: a string representation of the prompt.
132
- In the case of a Chat model, the prompt is a non-trivial
122
+ prompt: A string representation of the prompt.
123
+ In the case of a chat model, the prompt is a non-trivial
133
124
  serialization of the prompt into the language model.
134
125
  llm_string: A string representation of the LLM configuration.
135
126
  This is used to capture the invocation parameters of the LLM
136
127
  (e.g., model name, temperature, stop tokens, max tokens, etc.).
137
128
  These invocation parameters are serialized into a string
138
129
  representation.
139
- return_val: The value to be cached. The value is a list of Generations
130
+ return_val: The value to be cached. The value is a list of `Generation`
140
131
  (or subclasses).
141
132
  """
142
133
  return await run_in_executor(None, self.update, prompt, llm_string, return_val)
@@ -149,17 +140,16 @@ class BaseCache(ABC):
149
140
  class InMemoryCache(BaseCache):
150
141
  """Cache that stores things in memory."""
151
142
 
152
- def __init__(self, *, maxsize: Optional[int] = None) -> None:
143
+ def __init__(self, *, maxsize: int | None = None) -> None:
153
144
  """Initialize with empty cache.
154
145
 
155
146
  Args:
156
147
  maxsize: The maximum number of items to store in the cache.
157
- If None, the cache has no maximum size.
148
+ If `None`, the cache has no maximum size.
158
149
  If the cache exceeds the maximum size, the oldest items are removed.
159
- Default is None.
160
150
 
161
151
  Raises:
162
- ValueError: If maxsize is less than or equal to 0.
152
+ ValueError: If `maxsize` is less than or equal to `0`.
163
153
  """
164
154
  self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
165
155
  if maxsize is not None and maxsize <= 0:
@@ -167,29 +157,29 @@ class InMemoryCache(BaseCache):
167
157
  raise ValueError(msg)
168
158
  self._maxsize = maxsize
169
159
 
170
- def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
171
- """Look up based on prompt and llm_string.
160
+ def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
161
+ """Look up based on `prompt` and `llm_string`.
172
162
 
173
163
  Args:
174
- prompt: a string representation of the prompt.
175
- In the case of a Chat model, the prompt is a non-trivial
164
+ prompt: A string representation of the prompt.
165
+ In the case of a chat model, the prompt is a non-trivial
176
166
  serialization of the prompt into the language model.
177
167
  llm_string: A string representation of the LLM configuration.
178
168
 
179
169
  Returns:
180
- On a cache miss, return None. On a cache hit, return the cached value.
170
+ On a cache miss, return `None`. On a cache hit, return the cached value.
181
171
  """
182
172
  return self._cache.get((prompt, llm_string), None)
183
173
 
184
174
  def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
185
- """Update cache based on prompt and llm_string.
175
+ """Update cache based on `prompt` and `llm_string`.
186
176
 
187
177
  Args:
188
- prompt: a string representation of the prompt.
189
- In the case of a Chat model, the prompt is a non-trivial
178
+ prompt: A string representation of the prompt.
179
+ In the case of a chat model, the prompt is a non-trivial
190
180
  serialization of the prompt into the language model.
191
181
  llm_string: A string representation of the LLM configuration.
192
- return_val: The value to be cached. The value is a list of Generations
182
+ return_val: The value to be cached. The value is a list of `Generation`
193
183
  (or subclasses).
194
184
  """
195
185
  if self._maxsize is not None and len(self._cache) == self._maxsize:
@@ -201,31 +191,31 @@ class InMemoryCache(BaseCache):
201
191
  """Clear cache."""
202
192
  self._cache = {}
203
193
 
204
- async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
205
- """Async look up based on prompt and llm_string.
194
+ async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
195
+ """Async look up based on `prompt` and `llm_string`.
206
196
 
207
197
  Args:
208
- prompt: a string representation of the prompt.
209
- In the case of a Chat model, the prompt is a non-trivial
198
+ prompt: A string representation of the prompt.
199
+ In the case of a chat model, the prompt is a non-trivial
210
200
  serialization of the prompt into the language model.
211
201
  llm_string: A string representation of the LLM configuration.
212
202
 
213
203
  Returns:
214
- On a cache miss, return None. On a cache hit, return the cached value.
204
+ On a cache miss, return `None`. On a cache hit, return the cached value.
215
205
  """
216
206
  return self.lookup(prompt, llm_string)
217
207
 
218
208
  async def aupdate(
219
209
  self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
220
210
  ) -> None:
221
- """Async update cache based on prompt and llm_string.
211
+ """Async update cache based on `prompt` and `llm_string`.
222
212
 
223
213
  Args:
224
- prompt: a string representation of the prompt.
225
- In the case of a Chat model, the prompt is a non-trivial
214
+ prompt: A string representation of the prompt.
215
+ In the case of a chat model, the prompt is a non-trivial
226
216
  serialization of the prompt into the language model.
227
217
  llm_string: A string representation of the LLM configuration.
228
- return_val: The value to be cached. The value is a list of Generations
218
+ return_val: The value to be cached. The value is a list of `Generation`
229
219
  (or subclasses).
230
220
  """
231
221
  self.update(prompt, llm_string, return_val)
@@ -1,11 +1,4 @@
1
- """**Callback handlers** allow listening to events in LangChain.
2
-
3
- **Class hierarchy:**
4
-
5
- .. code-block::
6
-
7
- BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
8
- """
1
+ """**Callback handlers** allow listening to events in LangChain."""
9
2
 
10
3
  from typing import TYPE_CHECKING
11
4