langchain-core 0.4.0.dev0__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (172) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +45 -70
  4. langchain_core/_api/deprecation.py +80 -80
  5. langchain_core/_api/path.py +22 -8
  6. langchain_core/_import_utils.py +10 -4
  7. langchain_core/agents.py +25 -21
  8. langchain_core/caches.py +53 -63
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +341 -348
  11. langchain_core/callbacks/file.py +55 -44
  12. langchain_core/callbacks/manager.py +546 -683
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +35 -36
  15. langchain_core/callbacks/usage.py +65 -70
  16. langchain_core/chat_history.py +48 -55
  17. langchain_core/document_loaders/base.py +46 -21
  18. langchain_core/document_loaders/langsmith.py +39 -36
  19. langchain_core/documents/__init__.py +0 -1
  20. langchain_core/documents/base.py +96 -74
  21. langchain_core/documents/compressor.py +12 -9
  22. langchain_core/documents/transformers.py +29 -28
  23. langchain_core/embeddings/fake.py +56 -57
  24. langchain_core/env.py +2 -3
  25. langchain_core/example_selectors/base.py +12 -0
  26. langchain_core/example_selectors/length_based.py +1 -1
  27. langchain_core/example_selectors/semantic_similarity.py +21 -25
  28. langchain_core/exceptions.py +15 -9
  29. langchain_core/globals.py +4 -163
  30. langchain_core/indexing/api.py +132 -125
  31. langchain_core/indexing/base.py +64 -67
  32. langchain_core/indexing/in_memory.py +26 -6
  33. langchain_core/language_models/__init__.py +15 -27
  34. langchain_core/language_models/_utils.py +267 -117
  35. langchain_core/language_models/base.py +92 -177
  36. langchain_core/language_models/chat_models.py +547 -407
  37. langchain_core/language_models/fake.py +11 -11
  38. langchain_core/language_models/fake_chat_models.py +72 -118
  39. langchain_core/language_models/llms.py +168 -242
  40. langchain_core/load/dump.py +8 -11
  41. langchain_core/load/load.py +32 -28
  42. langchain_core/load/mapping.py +2 -4
  43. langchain_core/load/serializable.py +50 -56
  44. langchain_core/messages/__init__.py +36 -51
  45. langchain_core/messages/ai.py +377 -150
  46. langchain_core/messages/base.py +239 -47
  47. langchain_core/messages/block_translators/__init__.py +111 -0
  48. langchain_core/messages/block_translators/anthropic.py +470 -0
  49. langchain_core/messages/block_translators/bedrock.py +94 -0
  50. langchain_core/messages/block_translators/bedrock_converse.py +297 -0
  51. langchain_core/messages/block_translators/google_genai.py +530 -0
  52. langchain_core/messages/block_translators/google_vertexai.py +21 -0
  53. langchain_core/messages/block_translators/groq.py +143 -0
  54. langchain_core/messages/block_translators/langchain_v0.py +301 -0
  55. langchain_core/messages/block_translators/openai.py +1010 -0
  56. langchain_core/messages/chat.py +2 -3
  57. langchain_core/messages/content.py +1423 -0
  58. langchain_core/messages/function.py +7 -7
  59. langchain_core/messages/human.py +44 -38
  60. langchain_core/messages/modifier.py +3 -2
  61. langchain_core/messages/system.py +40 -27
  62. langchain_core/messages/tool.py +160 -58
  63. langchain_core/messages/utils.py +527 -638
  64. langchain_core/output_parsers/__init__.py +1 -14
  65. langchain_core/output_parsers/base.py +68 -104
  66. langchain_core/output_parsers/json.py +13 -17
  67. langchain_core/output_parsers/list.py +11 -33
  68. langchain_core/output_parsers/openai_functions.py +56 -74
  69. langchain_core/output_parsers/openai_tools.py +68 -109
  70. langchain_core/output_parsers/pydantic.py +15 -13
  71. langchain_core/output_parsers/string.py +6 -2
  72. langchain_core/output_parsers/transform.py +17 -60
  73. langchain_core/output_parsers/xml.py +34 -44
  74. langchain_core/outputs/__init__.py +1 -1
  75. langchain_core/outputs/chat_generation.py +26 -11
  76. langchain_core/outputs/chat_result.py +1 -3
  77. langchain_core/outputs/generation.py +17 -6
  78. langchain_core/outputs/llm_result.py +15 -8
  79. langchain_core/prompt_values.py +29 -123
  80. langchain_core/prompts/__init__.py +3 -27
  81. langchain_core/prompts/base.py +48 -63
  82. langchain_core/prompts/chat.py +259 -288
  83. langchain_core/prompts/dict.py +19 -11
  84. langchain_core/prompts/few_shot.py +84 -90
  85. langchain_core/prompts/few_shot_with_templates.py +14 -12
  86. langchain_core/prompts/image.py +19 -14
  87. langchain_core/prompts/loading.py +6 -8
  88. langchain_core/prompts/message.py +7 -8
  89. langchain_core/prompts/prompt.py +42 -43
  90. langchain_core/prompts/string.py +37 -16
  91. langchain_core/prompts/structured.py +43 -46
  92. langchain_core/rate_limiters.py +51 -60
  93. langchain_core/retrievers.py +52 -192
  94. langchain_core/runnables/base.py +1727 -1683
  95. langchain_core/runnables/branch.py +52 -73
  96. langchain_core/runnables/config.py +89 -103
  97. langchain_core/runnables/configurable.py +128 -130
  98. langchain_core/runnables/fallbacks.py +93 -82
  99. langchain_core/runnables/graph.py +127 -127
  100. langchain_core/runnables/graph_ascii.py +63 -41
  101. langchain_core/runnables/graph_mermaid.py +87 -70
  102. langchain_core/runnables/graph_png.py +31 -36
  103. langchain_core/runnables/history.py +145 -161
  104. langchain_core/runnables/passthrough.py +141 -144
  105. langchain_core/runnables/retry.py +84 -68
  106. langchain_core/runnables/router.py +33 -37
  107. langchain_core/runnables/schema.py +79 -72
  108. langchain_core/runnables/utils.py +95 -139
  109. langchain_core/stores.py +85 -131
  110. langchain_core/structured_query.py +11 -15
  111. langchain_core/sys_info.py +31 -32
  112. langchain_core/tools/__init__.py +1 -14
  113. langchain_core/tools/base.py +221 -247
  114. langchain_core/tools/convert.py +144 -161
  115. langchain_core/tools/render.py +10 -10
  116. langchain_core/tools/retriever.py +12 -19
  117. langchain_core/tools/simple.py +52 -29
  118. langchain_core/tools/structured.py +56 -60
  119. langchain_core/tracers/__init__.py +1 -9
  120. langchain_core/tracers/_streaming.py +6 -7
  121. langchain_core/tracers/base.py +103 -112
  122. langchain_core/tracers/context.py +29 -48
  123. langchain_core/tracers/core.py +142 -105
  124. langchain_core/tracers/evaluation.py +30 -34
  125. langchain_core/tracers/event_stream.py +162 -117
  126. langchain_core/tracers/langchain.py +34 -36
  127. langchain_core/tracers/log_stream.py +87 -49
  128. langchain_core/tracers/memory_stream.py +3 -3
  129. langchain_core/tracers/root_listeners.py +18 -34
  130. langchain_core/tracers/run_collector.py +8 -20
  131. langchain_core/tracers/schemas.py +0 -125
  132. langchain_core/tracers/stdout.py +3 -3
  133. langchain_core/utils/__init__.py +1 -4
  134. langchain_core/utils/_merge.py +47 -9
  135. langchain_core/utils/aiter.py +70 -66
  136. langchain_core/utils/env.py +12 -9
  137. langchain_core/utils/function_calling.py +139 -206
  138. langchain_core/utils/html.py +7 -8
  139. langchain_core/utils/input.py +6 -6
  140. langchain_core/utils/interactive_env.py +6 -2
  141. langchain_core/utils/iter.py +48 -45
  142. langchain_core/utils/json.py +14 -4
  143. langchain_core/utils/json_schema.py +159 -43
  144. langchain_core/utils/mustache.py +32 -25
  145. langchain_core/utils/pydantic.py +67 -40
  146. langchain_core/utils/strings.py +5 -5
  147. langchain_core/utils/usage.py +1 -1
  148. langchain_core/utils/utils.py +104 -62
  149. langchain_core/vectorstores/base.py +131 -179
  150. langchain_core/vectorstores/in_memory.py +113 -182
  151. langchain_core/vectorstores/utils.py +23 -17
  152. langchain_core/version.py +1 -1
  153. langchain_core-1.0.0.dist-info/METADATA +68 -0
  154. langchain_core-1.0.0.dist-info/RECORD +172 -0
  155. {langchain_core-0.4.0.dev0.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
  156. langchain_core/beta/__init__.py +0 -1
  157. langchain_core/beta/runnables/__init__.py +0 -1
  158. langchain_core/beta/runnables/context.py +0 -448
  159. langchain_core/memory.py +0 -116
  160. langchain_core/messages/content_blocks.py +0 -1435
  161. langchain_core/prompts/pipeline.py +0 -133
  162. langchain_core/pydantic_v1/__init__.py +0 -30
  163. langchain_core/pydantic_v1/dataclasses.py +0 -23
  164. langchain_core/pydantic_v1/main.py +0 -23
  165. langchain_core/tracers/langchain_v1.py +0 -23
  166. langchain_core/utils/loading.py +0 -31
  167. langchain_core/v1/__init__.py +0 -1
  168. langchain_core/v1/chat_models.py +0 -1047
  169. langchain_core/v1/messages.py +0 -755
  170. langchain_core-0.4.0.dev0.dist-info/METADATA +0 -108
  171. langchain_core-0.4.0.dev0.dist-info/RECORD +0 -177
  172. langchain_core-0.4.0.dev0.dist-info/entry_points.txt +0 -4
langchain_core/agents.py CHANGED
@@ -1,32 +1,36 @@
1
1
  """Schema definitions for representing agent actions, observations, and return values.
2
2
 
3
- **ATTENTION** The schema definitions are provided for backwards compatibility.
3
+ !!! warning
4
+ The schema definitions are provided for backwards compatibility.
4
5
 
5
- New agents should be built using the langgraph library
6
- (https://github.com/langchain-ai/langgraph)), which provides a simpler
7
- and more flexible way to define agents.
6
+ !!! warning
7
+ New agents should be built using the
8
+ [langgraph library](https://github.com/langchain-ai/langgraph), which provides a
9
+ simpler and more flexible way to define agents.
8
10
 
9
- Please see the migration guide for information on how to migrate existing
10
- agents to modern langgraph agents:
11
- https://python.langchain.com/docs/how_to/migrate_agent/
11
+ Please see the
12
+ [migration guide](https://python.langchain.com/docs/how_to/migrate_agent/) for
13
+ information on how to migrate existing agents to modern langgraph agents.
12
14
 
13
15
  Agents use language models to choose a sequence of actions to take.
14
16
 
15
17
  A basic agent works in the following manner:
16
18
 
17
- 1. Given a prompt an agent uses an LLM to request an action to take (e.g., a tool to run).
19
+ 1. Given a prompt an agent uses an LLM to request an action to take
20
+ (e.g., a tool to run).
18
21
  2. The agent executes the action (e.g., runs the tool), and receives an observation.
19
- 3. The agent returns the observation to the LLM, which can then be used to generate the next action.
22
+ 3. The agent returns the observation to the LLM, which can then be used to generate
23
+ the next action.
20
24
  4. When the agent reaches a stopping condition, it returns a final return value.
21
25
 
22
26
  The schemas for the agents themselves are defined in langchain.agents.agent.
23
- """ # noqa: E501
27
+ """
24
28
 
25
29
  from __future__ import annotations
26
30
 
27
31
  import json
28
32
  from collections.abc import Sequence
29
- from typing import Any, Literal, Union
33
+ from typing import Any, Literal
30
34
 
31
35
  from langchain_core.load.serializable import Serializable
32
36
  from langchain_core.messages import (
@@ -46,7 +50,7 @@ class AgentAction(Serializable):
46
50
 
47
51
  tool: str
48
52
  """The name of the Tool to execute."""
49
- tool_input: Union[str, dict]
53
+ tool_input: str | dict
50
54
  """The input to pass in to the Tool."""
51
55
  log: str
52
56
  """Additional information to log about the action.
@@ -59,9 +63,7 @@ class AgentAction(Serializable):
59
63
  type: Literal["AgentAction"] = "AgentAction"
60
64
 
61
65
  # Override init to support instantiation by position for backward compat.
62
- def __init__(
63
- self, tool: str, tool_input: Union[str, dict], log: str, **kwargs: Any
64
- ):
66
+ def __init__(self, tool: str, tool_input: str | dict, log: str, **kwargs: Any):
65
67
  """Create an AgentAction.
66
68
 
67
69
  Args:
@@ -82,9 +84,10 @@ class AgentAction(Serializable):
82
84
 
83
85
  @classmethod
84
86
  def get_lc_namespace(cls) -> list[str]:
85
- """Get the namespace of the langchain object.
87
+ """Get the namespace of the LangChain object.
86
88
 
87
- Default is ["langchain", "schema", "agent"].
89
+ Returns:
90
+ `["langchain", "schema", "agent"]`
88
91
  """
89
92
  return ["langchain", "schema", "agent"]
90
93
 
@@ -109,7 +112,7 @@ class AgentActionMessageLog(AgentAction):
109
112
  if (tool, tool_input) cannot be used to fully recreate the LLM
110
113
  prediction, and you need that LLM prediction (for future agent iteration).
111
114
  Compared to `log`, this is useful when the underlying LLM is a
112
- ChatModel (and therefore returns messages rather than a string)."""
115
+ chat model (and therefore returns messages rather than a string)."""
113
116
  # Ignoring type because we're overriding the type from AgentAction.
114
117
  # And this is the correct thing to do in this case.
115
118
  # The type literal is used for serialization purposes.
@@ -153,14 +156,15 @@ class AgentFinish(Serializable):
153
156
 
154
157
  @classmethod
155
158
  def is_lc_serializable(cls) -> bool:
156
- """Return whether or not the class is serializable."""
159
+ """Return True as this class is serializable."""
157
160
  return True
158
161
 
159
162
  @classmethod
160
163
  def get_lc_namespace(cls) -> list[str]:
161
- """Get the namespace of the langchain object.
164
+ """Get the namespace of the LangChain object.
162
165
 
163
- Default namespace is ["langchain", "schema", "agent"].
166
+ Returns:
167
+ `["langchain", "schema", "agent"]`
164
168
  """
165
169
  return ["langchain", "schema", "agent"]
166
170
 
langchain_core/caches.py CHANGED
@@ -1,31 +1,22 @@
1
- """Cache classes.
1
+ """`caches` provides an optional caching layer for language models.
2
2
 
3
- .. warning::
4
- Beta Feature!
3
+ !!! warning
4
+ This is a beta feature! Please be wary of deploying experimental code to production
5
+ unless you've taken appropriate precautions.
5
6
 
6
- **Cache** provides an optional caching layer for LLMs.
7
+ A cache is useful for two reasons:
7
8
 
8
- Cache is useful for two reasons:
9
-
10
- - It can save you money by reducing the number of API calls you make to the LLM
11
- provider if you're often requesting the same completion multiple times.
12
- - It can speed up your application by reducing the number of API calls you make
13
- to the LLM provider.
14
-
15
- Cache directly competes with Memory. See documentation for Pros and Cons.
16
-
17
- **Class hierarchy:**
18
-
19
- .. code-block::
20
-
21
- BaseCache --> <name>Cache # Examples: InMemoryCache, RedisCache, GPTCache
9
+ 1. It can save you money by reducing the number of API calls you make to the LLM
10
+ provider if you're often requesting the same completion multiple times.
11
+ 2. It can speed up your application by reducing the number of API calls you make to the
12
+ LLM provider.
22
13
  """
23
14
 
24
15
  from __future__ import annotations
25
16
 
26
17
  from abc import ABC, abstractmethod
27
18
  from collections.abc import Sequence
28
- from typing import Any, Optional
19
+ from typing import Any
29
20
 
30
21
  from typing_extensions import override
31
22
 
@@ -40,8 +31,8 @@ class BaseCache(ABC):
40
31
 
41
32
  The cache interface consists of the following methods:
42
33
 
43
- - lookup: Look up a value based on a prompt and llm_string.
44
- - update: Update the cache based on a prompt and llm_string.
34
+ - lookup: Look up a value based on a prompt and `llm_string`.
35
+ - update: Update the cache based on a prompt and `llm_string`.
45
36
  - clear: Clear the cache.
46
37
 
47
38
  In addition, the cache interface provides an async version of each method.
@@ -52,15 +43,15 @@ class BaseCache(ABC):
52
43
  """
53
44
 
54
45
  @abstractmethod
55
- def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
56
- """Look up based on prompt and llm_string.
46
+ def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
47
+ """Look up based on `prompt` and `llm_string`.
57
48
 
58
49
  A cache implementation is expected to generate a key from the 2-tuple
59
50
  of prompt and llm_string (e.g., by concatenating them with a delimiter).
60
51
 
61
52
  Args:
62
- prompt: a string representation of the prompt.
63
- In the case of a Chat model, the prompt is a non-trivial
53
+ prompt: A string representation of the prompt.
54
+ In the case of a chat model, the prompt is a non-trivial
64
55
  serialization of the prompt into the language model.
65
56
  llm_string: A string representation of the LLM configuration.
66
57
  This is used to capture the invocation parameters of the LLM
@@ -69,27 +60,27 @@ class BaseCache(ABC):
69
60
  representation.
70
61
 
71
62
  Returns:
72
- On a cache miss, return None. On a cache hit, return the cached value.
73
- The cached value is a list of Generations (or subclasses).
63
+ On a cache miss, return `None`. On a cache hit, return the cached value.
64
+ The cached value is a list of `Generation` (or subclasses).
74
65
  """
75
66
 
76
67
  @abstractmethod
77
68
  def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
78
- """Update cache based on prompt and llm_string.
69
+ """Update cache based on `prompt` and `llm_string`.
79
70
 
80
71
  The prompt and llm_string are used to generate a key for the cache.
81
72
  The key should match that of the lookup method.
82
73
 
83
74
  Args:
84
- prompt: a string representation of the prompt.
85
- In the case of a Chat model, the prompt is a non-trivial
75
+ prompt: A string representation of the prompt.
76
+ In the case of a chat model, the prompt is a non-trivial
86
77
  serialization of the prompt into the language model.
87
78
  llm_string: A string representation of the LLM configuration.
88
79
  This is used to capture the invocation parameters of the LLM
89
80
  (e.g., model name, temperature, stop tokens, max tokens, etc.).
90
81
  These invocation parameters are serialized into a string
91
82
  representation.
92
- return_val: The value to be cached. The value is a list of Generations
83
+ return_val: The value to be cached. The value is a list of `Generation`
93
84
  (or subclasses).
94
85
  """
95
86
 
@@ -97,15 +88,15 @@ class BaseCache(ABC):
97
88
  def clear(self, **kwargs: Any) -> None:
98
89
  """Clear cache that can take additional keyword arguments."""
99
90
 
100
- async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
101
- """Async look up based on prompt and llm_string.
91
+ async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
92
+ """Async look up based on `prompt` and `llm_string`.
102
93
 
103
94
  A cache implementation is expected to generate a key from the 2-tuple
104
95
  of prompt and llm_string (e.g., by concatenating them with a delimiter).
105
96
 
106
97
  Args:
107
- prompt: a string representation of the prompt.
108
- In the case of a Chat model, the prompt is a non-trivial
98
+ prompt: A string representation of the prompt.
99
+ In the case of a chat model, the prompt is a non-trivial
109
100
  serialization of the prompt into the language model.
110
101
  llm_string: A string representation of the LLM configuration.
111
102
  This is used to capture the invocation parameters of the LLM
@@ -114,29 +105,29 @@ class BaseCache(ABC):
114
105
  representation.
115
106
 
116
107
  Returns:
117
- On a cache miss, return None. On a cache hit, return the cached value.
118
- The cached value is a list of Generations (or subclasses).
108
+ On a cache miss, return `None`. On a cache hit, return the cached value.
109
+ The cached value is a list of `Generation` (or subclasses).
119
110
  """
120
111
  return await run_in_executor(None, self.lookup, prompt, llm_string)
121
112
 
122
113
  async def aupdate(
123
114
  self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
124
115
  ) -> None:
125
- """Async update cache based on prompt and llm_string.
116
+ """Async update cache based on `prompt` and `llm_string`.
126
117
 
127
118
  The prompt and llm_string are used to generate a key for the cache.
128
119
  The key should match that of the look up method.
129
120
 
130
121
  Args:
131
- prompt: a string representation of the prompt.
132
- In the case of a Chat model, the prompt is a non-trivial
122
+ prompt: A string representation of the prompt.
123
+ In the case of a chat model, the prompt is a non-trivial
133
124
  serialization of the prompt into the language model.
134
125
  llm_string: A string representation of the LLM configuration.
135
126
  This is used to capture the invocation parameters of the LLM
136
127
  (e.g., model name, temperature, stop tokens, max tokens, etc.).
137
128
  These invocation parameters are serialized into a string
138
129
  representation.
139
- return_val: The value to be cached. The value is a list of Generations
130
+ return_val: The value to be cached. The value is a list of `Generation`
140
131
  (or subclasses).
141
132
  """
142
133
  return await run_in_executor(None, self.update, prompt, llm_string, return_val)
@@ -149,17 +140,16 @@ class BaseCache(ABC):
149
140
  class InMemoryCache(BaseCache):
150
141
  """Cache that stores things in memory."""
151
142
 
152
- def __init__(self, *, maxsize: Optional[int] = None) -> None:
143
+ def __init__(self, *, maxsize: int | None = None) -> None:
153
144
  """Initialize with empty cache.
154
145
 
155
146
  Args:
156
147
  maxsize: The maximum number of items to store in the cache.
157
- If None, the cache has no maximum size.
148
+ If `None`, the cache has no maximum size.
158
149
  If the cache exceeds the maximum size, the oldest items are removed.
159
- Default is None.
160
150
 
161
151
  Raises:
162
- ValueError: If maxsize is less than or equal to 0.
152
+ ValueError: If `maxsize` is less than or equal to `0`.
163
153
  """
164
154
  self._cache: dict[tuple[str, str], RETURN_VAL_TYPE] = {}
165
155
  if maxsize is not None and maxsize <= 0:
@@ -167,29 +157,29 @@ class InMemoryCache(BaseCache):
167
157
  raise ValueError(msg)
168
158
  self._maxsize = maxsize
169
159
 
170
- def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
171
- """Look up based on prompt and llm_string.
160
+ def lookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
161
+ """Look up based on `prompt` and `llm_string`.
172
162
 
173
163
  Args:
174
- prompt: a string representation of the prompt.
175
- In the case of a Chat model, the prompt is a non-trivial
164
+ prompt: A string representation of the prompt.
165
+ In the case of a chat model, the prompt is a non-trivial
176
166
  serialization of the prompt into the language model.
177
167
  llm_string: A string representation of the LLM configuration.
178
168
 
179
169
  Returns:
180
- On a cache miss, return None. On a cache hit, return the cached value.
170
+ On a cache miss, return `None`. On a cache hit, return the cached value.
181
171
  """
182
172
  return self._cache.get((prompt, llm_string), None)
183
173
 
184
174
  def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
185
- """Update cache based on prompt and llm_string.
175
+ """Update cache based on `prompt` and `llm_string`.
186
176
 
187
177
  Args:
188
- prompt: a string representation of the prompt.
189
- In the case of a Chat model, the prompt is a non-trivial
178
+ prompt: A string representation of the prompt.
179
+ In the case of a chat model, the prompt is a non-trivial
190
180
  serialization of the prompt into the language model.
191
181
  llm_string: A string representation of the LLM configuration.
192
- return_val: The value to be cached. The value is a list of Generations
182
+ return_val: The value to be cached. The value is a list of `Generation`
193
183
  (or subclasses).
194
184
  """
195
185
  if self._maxsize is not None and len(self._cache) == self._maxsize:
@@ -201,31 +191,31 @@ class InMemoryCache(BaseCache):
201
191
  """Clear cache."""
202
192
  self._cache = {}
203
193
 
204
- async def alookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
205
- """Async look up based on prompt and llm_string.
194
+ async def alookup(self, prompt: str, llm_string: str) -> RETURN_VAL_TYPE | None:
195
+ """Async look up based on `prompt` and `llm_string`.
206
196
 
207
197
  Args:
208
- prompt: a string representation of the prompt.
209
- In the case of a Chat model, the prompt is a non-trivial
198
+ prompt: A string representation of the prompt.
199
+ In the case of a chat model, the prompt is a non-trivial
210
200
  serialization of the prompt into the language model.
211
201
  llm_string: A string representation of the LLM configuration.
212
202
 
213
203
  Returns:
214
- On a cache miss, return None. On a cache hit, return the cached value.
204
+ On a cache miss, return `None`. On a cache hit, return the cached value.
215
205
  """
216
206
  return self.lookup(prompt, llm_string)
217
207
 
218
208
  async def aupdate(
219
209
  self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE
220
210
  ) -> None:
221
- """Async update cache based on prompt and llm_string.
211
+ """Async update cache based on `prompt` and `llm_string`.
222
212
 
223
213
  Args:
224
- prompt: a string representation of the prompt.
225
- In the case of a Chat model, the prompt is a non-trivial
214
+ prompt: A string representation of the prompt.
215
+ In the case of a chat model, the prompt is a non-trivial
226
216
  serialization of the prompt into the language model.
227
217
  llm_string: A string representation of the LLM configuration.
228
- return_val: The value to be cached. The value is a list of Generations
218
+ return_val: The value to be cached. The value is a list of `Generation`
229
219
  (or subclasses).
230
220
  """
231
221
  self.update(prompt, llm_string, return_val)
@@ -1,11 +1,4 @@
1
- """**Callback handlers** allow listening to events in LangChain.
2
-
3
- **Class hierarchy:**
4
-
5
- .. code-block::
6
-
7
- BaseCallbackHandler --> <name>CallbackHandler # Example: AimCallbackHandler
8
- """
1
+ """**Callback handlers** allow listening to events in LangChain."""
9
2
 
10
3
  from typing import TYPE_CHECKING
11
4