langchain-core 1.0.0a5__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +51 -64
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +20 -22
  8. langchain_core/caches.py +65 -66
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +321 -336
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +436 -513
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +53 -68
  17. langchain_core/document_loaders/base.py +27 -25
  18. langchain_core/document_loaders/blob_loaders.py +1 -1
  19. langchain_core/document_loaders/langsmith.py +44 -48
  20. langchain_core/documents/__init__.py +23 -3
  21. langchain_core/documents/base.py +98 -90
  22. langchain_core/documents/compressor.py +10 -10
  23. langchain_core/documents/transformers.py +34 -35
  24. langchain_core/embeddings/fake.py +50 -54
  25. langchain_core/example_selectors/length_based.py +1 -1
  26. langchain_core/example_selectors/semantic_similarity.py +28 -32
  27. langchain_core/exceptions.py +21 -20
  28. langchain_core/globals.py +3 -151
  29. langchain_core/indexing/__init__.py +1 -1
  30. langchain_core/indexing/api.py +121 -126
  31. langchain_core/indexing/base.py +73 -75
  32. langchain_core/indexing/in_memory.py +4 -6
  33. langchain_core/language_models/__init__.py +14 -29
  34. langchain_core/language_models/_utils.py +58 -61
  35. langchain_core/language_models/base.py +53 -162
  36. langchain_core/language_models/chat_models.py +298 -387
  37. langchain_core/language_models/fake.py +11 -11
  38. langchain_core/language_models/fake_chat_models.py +42 -36
  39. langchain_core/language_models/llms.py +125 -235
  40. langchain_core/load/dump.py +9 -12
  41. langchain_core/load/load.py +18 -28
  42. langchain_core/load/mapping.py +2 -4
  43. langchain_core/load/serializable.py +42 -40
  44. langchain_core/messages/__init__.py +10 -16
  45. langchain_core/messages/ai.py +148 -148
  46. langchain_core/messages/base.py +58 -52
  47. langchain_core/messages/block_translators/__init__.py +27 -17
  48. langchain_core/messages/block_translators/anthropic.py +6 -6
  49. langchain_core/messages/block_translators/bedrock_converse.py +5 -5
  50. langchain_core/messages/block_translators/google_genai.py +505 -20
  51. langchain_core/messages/block_translators/google_vertexai.py +4 -32
  52. langchain_core/messages/block_translators/groq.py +117 -21
  53. langchain_core/messages/block_translators/langchain_v0.py +5 -5
  54. langchain_core/messages/block_translators/openai.py +11 -11
  55. langchain_core/messages/chat.py +2 -6
  56. langchain_core/messages/content.py +337 -328
  57. langchain_core/messages/function.py +6 -10
  58. langchain_core/messages/human.py +24 -31
  59. langchain_core/messages/modifier.py +2 -2
  60. langchain_core/messages/system.py +19 -29
  61. langchain_core/messages/tool.py +74 -90
  62. langchain_core/messages/utils.py +474 -504
  63. langchain_core/output_parsers/__init__.py +13 -10
  64. langchain_core/output_parsers/base.py +61 -61
  65. langchain_core/output_parsers/format_instructions.py +9 -4
  66. langchain_core/output_parsers/json.py +12 -10
  67. langchain_core/output_parsers/list.py +21 -23
  68. langchain_core/output_parsers/openai_functions.py +49 -47
  69. langchain_core/output_parsers/openai_tools.py +16 -21
  70. langchain_core/output_parsers/pydantic.py +13 -14
  71. langchain_core/output_parsers/string.py +5 -5
  72. langchain_core/output_parsers/transform.py +15 -17
  73. langchain_core/output_parsers/xml.py +35 -34
  74. langchain_core/outputs/__init__.py +1 -1
  75. langchain_core/outputs/chat_generation.py +18 -18
  76. langchain_core/outputs/chat_result.py +1 -3
  77. langchain_core/outputs/generation.py +10 -11
  78. langchain_core/outputs/llm_result.py +10 -10
  79. langchain_core/prompt_values.py +11 -17
  80. langchain_core/prompts/__init__.py +3 -27
  81. langchain_core/prompts/base.py +48 -56
  82. langchain_core/prompts/chat.py +275 -325
  83. langchain_core/prompts/dict.py +5 -5
  84. langchain_core/prompts/few_shot.py +81 -88
  85. langchain_core/prompts/few_shot_with_templates.py +11 -13
  86. langchain_core/prompts/image.py +12 -14
  87. langchain_core/prompts/loading.py +4 -6
  88. langchain_core/prompts/message.py +3 -3
  89. langchain_core/prompts/prompt.py +24 -39
  90. langchain_core/prompts/string.py +26 -10
  91. langchain_core/prompts/structured.py +49 -53
  92. langchain_core/rate_limiters.py +51 -60
  93. langchain_core/retrievers.py +61 -198
  94. langchain_core/runnables/base.py +1478 -1630
  95. langchain_core/runnables/branch.py +53 -57
  96. langchain_core/runnables/config.py +72 -89
  97. langchain_core/runnables/configurable.py +120 -137
  98. langchain_core/runnables/fallbacks.py +83 -79
  99. langchain_core/runnables/graph.py +91 -97
  100. langchain_core/runnables/graph_ascii.py +27 -28
  101. langchain_core/runnables/graph_mermaid.py +38 -50
  102. langchain_core/runnables/graph_png.py +15 -16
  103. langchain_core/runnables/history.py +135 -148
  104. langchain_core/runnables/passthrough.py +124 -150
  105. langchain_core/runnables/retry.py +46 -51
  106. langchain_core/runnables/router.py +25 -30
  107. langchain_core/runnables/schema.py +75 -80
  108. langchain_core/runnables/utils.py +60 -67
  109. langchain_core/stores.py +85 -121
  110. langchain_core/structured_query.py +8 -8
  111. langchain_core/sys_info.py +27 -29
  112. langchain_core/tools/__init__.py +1 -14
  113. langchain_core/tools/base.py +285 -229
  114. langchain_core/tools/convert.py +160 -155
  115. langchain_core/tools/render.py +10 -10
  116. langchain_core/tools/retriever.py +12 -11
  117. langchain_core/tools/simple.py +19 -24
  118. langchain_core/tools/structured.py +32 -39
  119. langchain_core/tracers/__init__.py +1 -9
  120. langchain_core/tracers/base.py +97 -99
  121. langchain_core/tracers/context.py +29 -52
  122. langchain_core/tracers/core.py +49 -53
  123. langchain_core/tracers/evaluation.py +11 -11
  124. langchain_core/tracers/event_stream.py +65 -64
  125. langchain_core/tracers/langchain.py +21 -21
  126. langchain_core/tracers/log_stream.py +45 -45
  127. langchain_core/tracers/memory_stream.py +3 -3
  128. langchain_core/tracers/root_listeners.py +16 -16
  129. langchain_core/tracers/run_collector.py +2 -4
  130. langchain_core/tracers/schemas.py +0 -129
  131. langchain_core/tracers/stdout.py +3 -3
  132. langchain_core/utils/__init__.py +1 -4
  133. langchain_core/utils/_merge.py +2 -2
  134. langchain_core/utils/aiter.py +57 -61
  135. langchain_core/utils/env.py +9 -9
  136. langchain_core/utils/function_calling.py +89 -186
  137. langchain_core/utils/html.py +7 -8
  138. langchain_core/utils/input.py +6 -6
  139. langchain_core/utils/interactive_env.py +1 -1
  140. langchain_core/utils/iter.py +36 -40
  141. langchain_core/utils/json.py +4 -3
  142. langchain_core/utils/json_schema.py +9 -9
  143. langchain_core/utils/mustache.py +8 -10
  144. langchain_core/utils/pydantic.py +33 -35
  145. langchain_core/utils/strings.py +6 -9
  146. langchain_core/utils/usage.py +1 -1
  147. langchain_core/utils/utils.py +66 -62
  148. langchain_core/vectorstores/base.py +182 -216
  149. langchain_core/vectorstores/in_memory.py +101 -176
  150. langchain_core/vectorstores/utils.py +5 -5
  151. langchain_core/version.py +1 -1
  152. langchain_core-1.0.3.dist-info/METADATA +69 -0
  153. langchain_core-1.0.3.dist-info/RECORD +172 -0
  154. {langchain_core-1.0.0a5.dist-info → langchain_core-1.0.3.dist-info}/WHEEL +1 -1
  155. langchain_core/memory.py +0 -120
  156. langchain_core/messages/block_translators/ollama.py +0 -47
  157. langchain_core/prompts/pipeline.py +0 -138
  158. langchain_core/pydantic_v1/__init__.py +0 -30
  159. langchain_core/pydantic_v1/dataclasses.py +0 -23
  160. langchain_core/pydantic_v1/main.py +0 -23
  161. langchain_core/tracers/langchain_v1.py +0 -31
  162. langchain_core/utils/loading.py +0 -35
  163. langchain_core-1.0.0a5.dist-info/METADATA +0 -77
  164. langchain_core-1.0.0a5.dist-info/RECORD +0 -181
  165. langchain_core-1.0.0a5.dist-info/entry_points.txt +0 -4
langchain_core/caches.py CHANGED
@@ -1,31 +1,24 @@
1
- """Cache classes.
1
+ """Optional caching layer for language models.
2
2
 
3
- .. warning::
4
- Beta Feature!
3
+ Distinct from provider-based [prompt caching](https://docs.langchain.com/oss/python/langchain/models#prompt-caching).
5
4
 
6
- **Cache** provides an optional caching layer for LLMs.
5
+ !!! warning "Beta feature"
6
+ This is a beta feature. Please be wary of deploying experimental code to production
7
+ unless you've taken appropriate precautions.
7
8
 
8
- Cache is useful for two reasons:
9
+ A cache is useful for two reasons:
9
10
 
10
- - It can save you money by reducing the number of API calls you make to the LLM
11
- provider if you're often requesting the same completion multiple times.
12
- - It can speed up your application by reducing the number of API calls you make
13
- to the LLM provider.
14
-
15
- Cache directly competes with Memory. See documentation for Pros and Cons.
16
-
17
- **Class hierarchy:**
18
-
19
- .. code-block::
20
-
21
- BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
11
+ 1. It can save you money by reducing the number of API calls you make to the LLM
12
+ provider if you're often requesting the same completion multiple times.
13
+ 2. It can speed up your application by reducing the number of API calls you make to the
14
+ LLM provider.
22
15
  """
23
16
 
24
17
  from __future__ import annotations
25
18
 
26
19
  from abc import ABC, abstractmethod
27
20
  from collections.abc import Sequence
28
- from typing import Any, Optional
21
+ from typing import Any
29
22
 
30
23
  from typing_extensions import override
31
24
 
@@ -40,8 +33,8 @@ class BaseCache(ABC):
40
33
 
41
34
  The cache interface consists of the following methods:
42
35
 
43
- - lookup: Look up a value based on a prompt and llm_string.
44
- - update: Update the cache based on a prompt and llm_string.
36
+ - lookup: Look up a value based on a prompt and `llm_string`.
37
+ - update: Update the cache based on a prompt and `llm_string`.
45
38
  - clear: Clear the cache.
46
39
 
47
40
  In addition, the cache interface provides an async version of each method.
@@ -52,44 +45,47 @@ class BaseCache(ABC):
52
45
  """
53
46
 
54
47
  @abstractmethod
55
- def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
56
- """Look up based on prompt and llm_string.
48
+ def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
49
+ """Look up based on `prompt` and `llm_string`.
57
50
 
58
51
  A cache implementation is expected to generate a key from the 2-tuple
59
- of prompt and llm_string (e.g., by concatenating them with a delimiter).
52
+ of `prompt` and `llm_string` (e.g., by concatenating them with a delimiter).
60
53
 
61
54
  Args:
62
- prompt: a string representation of the prompt.
63
- In the case of a Chat model, the prompt is a non-trivial
55
+ prompt: A string representation of the prompt.
56
+ In the case of a chat model, the prompt is a non-trivial
64
57
  serialization of the prompt into the language model.
65
58
  llm_string: A string representation of the LLM configuration.
59
+
66
60
  This is used to capture the invocation parameters of the LLM
67
61
  (e.g., model name, temperature, stop tokens, max tokens, etc.).
68
- These invocation parameters are serialized into a string
69
- representation.
62
+
63
+ These invocation parameters are serialized into a string representation.
70
64
 
71
65
  Returns:
72
- On a cache miss, return None. On a cache hit, return the cached value.
73
- The cached value is a list of Generations (or subclasses).
66
+ On a cache miss, return `None`. On a cache hit, return the cached value.
67
+ The cached value is a list of `Generation` (or subclasses).
74
68
  """
75
69
 
76
70
  @abstractmethod
77
71
  def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
78
- """Update cache based on prompt and llm_string.
72
+ """Update cache based on `prompt` and `llm_string`.
79
73
 
80
74
  The prompt and llm_string are used to generate a key for the cache.
81
75
  The key should match that of the lookup method.
82
76
 
83
77
  Args:
84
- prompt: a string representation of the prompt.
85
- In the case of a Chat model, the prompt is a non-trivial
78
+ prompt: A string representation of the prompt.
79
+ In the case of a chat model, the prompt is a non-trivial
86
80
  serialization of the prompt into the language model.
87
81
  llm_string: A string representation of the LLM configuration.
82
+
88
83
  This is used to capture the invocation parameters of the LLM
89
84
  (e.g., model name, temperature, stop tokens, max tokens, etc.).
85
+
90
86
  These invocation parameters are serialized into a string
91
87
  representation.
92
- return_val: The value to be cached. The value is a list of Generations
88
+ return_val: The value to be cached. The value is a list of `Generation`
93
89
  (or subclasses).
94
90
  """
95
91
 
@@ -97,46 +93,50 @@ class BaseCache(ABC):
97
93
  def clear(self, **kwargs: Any) -> None:
98
94
  """Clear cache that can take additional keyword arguments."""
99
95
 
100
- async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
101
- """Async look up based on prompt and llm_string.
96
+ async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
97
+ """Async look up based on `prompt` and `llm_string`.
102
98
 
103
99
  A cache implementation is expected to generate a key from the 2-tuple
104
- of prompt and llm_string (e.g., by concatenating them with a delimiter).
100
+ of `prompt` and `llm_string` (e.g., by concatenating them with a delimiter).
105
101
 
106
102
  Args:
107
- prompt: a string representation of the prompt.
108
- In the case of a Chat model, the prompt is a non-trivial
103
+ prompt: A string representation of the prompt.
104
+ In the case of a chat model, the prompt is a non-trivial
109
105
  serialization of the prompt into the language model.
110
106
  llm_string: A string representation of the LLM configuration.
107
+
111
108
  This is used to capture the invocation parameters of the LLM
112
109
  (e.g., model name, temperature, stop tokens, max tokens, etc.).
110
+
113
111
  These invocation parameters are serialized into a string
114
112
  representation.
115
113
 
116
114
  Returns:
117
- On a cache miss, return None. On a cache hit, return the cached value.
118
- The cached value is a list of Generations (or subclasses).
115
+ On a cache miss, return `None`. On a cache hit, return the cached value.
116
+ The cached value is a list of `Generation` (or subclasses).
119
117
  """
120
118
  return await run_in_executor(None, self.lookup, prompt, llm_string)
121
119
 
122
120
  async def aupdate(
123
121
  self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
124
122
  ) -> None:
125
- """Async update cache based on prompt and llm_string.
123
+ """Async update cache based on `prompt` and `llm_string`.
126
124
 
127
125
  The prompt and llm_string are used to generate a key for the cache.
128
126
  The key should match that of the look up method.
129
127
 
130
128
  Args:
131
- prompt: a string representation of the prompt.
132
- In the case of a Chat model, the prompt is a non-trivial
129
+ prompt: A string representation of the prompt.
130
+ In the case of a chat model, the prompt is a non-trivial
133
131
  serialization of the prompt into the language model.
134
132
  llm_string: A string representation of the LLM configuration.
133
+
135
134
  This is used to capture the invocation parameters of the LLM
136
135
  (e.g., model name, temperature, stop tokens, max tokens, etc.).
136
+
137
137
  These invocation parameters are serialized into a string
138
138
  representation.
139
- return_val: The value to be cached. The value is a list of Generations
139
+ return_val: The value to be cached. The value is a list of `Generation`
140
140
  (or subclasses).
141
141
  """
142
142
  return await run_in_executor(None, self.update, prompt, llm_string, return_val)
@@ -149,17 +149,16 @@ class BaseCache(ABC):
149
149
  class InMemoryCache(BaseCache):
150
150
  """Cache that stores things in memory."""
151
151
 
152
- def __init__(self, *, maxsize: Optional[int] = None) -> None:
152
+ def __init__(self, *, maxsize: int | None = None) -> None:
153
153
  """Initialize with empty cache.
154
154
 
155
155
  Args:
156
156
  maxsize: The maximum number of items to store in the cache.
157
- If None, the cache has no maximum size.
157
+ If `None`, the cache has no maximum size.
158
158
  If the cache exceeds the maximum size, the oldest items are removed.
159
- Default is None.
160
159
 
161
160
  Raises:
162
- ValueError: If maxsize is less than or equal to 0.
161
+ ValueError: If `maxsize` is less than or equal to `0`.
163
162
  """
164
163
  self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
165
164
  if maxsize is not None and maxsize <= 0:
@@ -167,29 +166,29 @@ class InMemoryCache(BaseCache):
167
166
  raise ValueError(msg)
168
167
  self._maxsize = maxsize
169
168
 
170
- def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
171
- """Look up based on prompt and llm_string.
169
+ def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
170
+ """Look up based on `prompt` and `llm_string`.
172
171
 
173
172
  Args:
174
- prompt: a string representation of the prompt.
175
- In the case of a Chat model, the prompt is a non-trivial
173
+ prompt: A string representation of the prompt.
174
+ In the case of a chat model, the prompt is a non-trivial
176
175
  serialization of the prompt into the language model.
177
176
  llm_string: A string representation of the LLM configuration.
178
177
 
179
178
  Returns:
180
- On a cache miss, return None. On a cache hit, return the cached value.
179
+ On a cache miss, return `None`. On a cache hit, return the cached value.
181
180
  """
182
181
  return self._cache.get((prompt, llm_string), None)
183
182
 
184
183
  def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
185
- """Update cache based on prompt and llm_string.
184
+ """Update cache based on `prompt` and `llm_string`.
186
185
 
187
186
  Args:
188
- prompt: a string representation of the prompt.
189
- In the case of a Chat model, the prompt is a non-trivial
187
+ prompt: A string representation of the prompt.
188
+ In the case of a chat model, the prompt is a non-trivial
190
189
  serialization of the prompt into the language model.
191
190
  llm_string: A string representation of the LLM configuration.
192
- return_val: The value to be cached. The value is a list of Generations
191
+ return_val: The value to be cached. The value is a list of `Generation`
193
192
  (or subclasses).
194
193
  """
195
194
  if self._maxsize is not None and len(self._cache) == self._maxsize:
@@ -201,31 +200,31 @@ class InMemoryCache(BaseCache):
201
200
  """Clear cache."""
202
201
  self._cache = {}
203
202
 
204
- async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
205
- """Async look up based on prompt and llm_string.
203
+ async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
204
+ """Async look up based on `prompt` and `llm_string`.
206
205
 
207
206
  Args:
208
- prompt: a string representation of the prompt.
209
- In the case of a Chat model, the prompt is a non-trivial
207
+ prompt: A string representation of the prompt.
208
+ In the case of a chat model, the prompt is a non-trivial
210
209
  serialization of the prompt into the language model.
211
210
  llm_string: A string representation of the LLM configuration.
212
211
 
213
212
  Returns:
214
- On a cache miss, return None. On a cache hit, return the cached value.
213
+ On a cache miss, return `None`. On a cache hit, return the cached value.
215
214
  """
216
215
  return self.lookup(prompt, llm_string)
217
216
 
218
217
  async def aupdate(
219
218
  self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
220
219
  ) -> None:
221
- """Async update cache based on prompt and llm_string.
220
+ """Async update cache based on `prompt` and `llm_string`.
222
221
 
223
222
  Args:
224
- prompt: a string representation of the prompt.
225
- In the case of a Chat model, the prompt is a non-trivial
223
+ prompt: A string representation of the prompt.
224
+ In the case of a chat model, the prompt is a non-trivial
226
225
  serialization of the prompt into the language model.
227
226
  llm_string: A string representation of the LLM configuration.
228
- return_val: The value to be cached. The value is a list of Generations
227
+ return_val: The value to be cached. The value is a list of `Generation`
229
228
  (or subclasses).
230
229
  """
231
230
  self.update(prompt, llm_string, return_val)
@@ -1,11 +1,4 @@
1
- """**Callback handlers** allow listening to events in LangChain.
2
-
3
- **Class hierarchy:**
4
-
5
- .. code-block::
6
-
7
- BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
8
- """
1
+ """**Callback handlers** allow listening to events in LangChain."""
9
2
 
10
3
  from typing import TYPE_CHECKING
11
4