langchain-core 0.3.79__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of langchain-core might be problematic. Click here for more details.

Files changed (165) hide show
  1. langchain_core/__init__.py +1 -1
  2. langchain_core/_api/__init__.py +3 -4
  3. langchain_core/_api/beta_decorator.py +23 -26
  4. langchain_core/_api/deprecation.py +52 -65
  5. langchain_core/_api/path.py +3 -6
  6. langchain_core/_import_utils.py +3 -4
  7. langchain_core/agents.py +19 -19
  8. langchain_core/caches.py +53 -63
  9. langchain_core/callbacks/__init__.py +1 -8
  10. langchain_core/callbacks/base.py +323 -334
  11. langchain_core/callbacks/file.py +44 -44
  12. langchain_core/callbacks/manager.py +441 -507
  13. langchain_core/callbacks/stdout.py +29 -30
  14. langchain_core/callbacks/streaming_stdout.py +32 -32
  15. langchain_core/callbacks/usage.py +60 -57
  16. langchain_core/chat_history.py +48 -63
  17. langchain_core/document_loaders/base.py +23 -23
  18. langchain_core/document_loaders/langsmith.py +37 -37
  19. langchain_core/documents/__init__.py +0 -1
  20. langchain_core/documents/base.py +62 -65
  21. langchain_core/documents/compressor.py +4 -4
  22. langchain_core/documents/transformers.py +28 -29
  23. langchain_core/embeddings/fake.py +50 -54
  24. langchain_core/example_selectors/length_based.py +1 -1
  25. langchain_core/example_selectors/semantic_similarity.py +21 -25
  26. langchain_core/exceptions.py +10 -11
  27. langchain_core/globals.py +3 -151
  28. langchain_core/indexing/api.py +61 -66
  29. langchain_core/indexing/base.py +58 -58
  30. langchain_core/indexing/in_memory.py +3 -3
  31. langchain_core/language_models/__init__.py +14 -27
  32. langchain_core/language_models/_utils.py +270 -84
  33. langchain_core/language_models/base.py +55 -162
  34. langchain_core/language_models/chat_models.py +442 -402
  35. langchain_core/language_models/fake.py +11 -11
  36. langchain_core/language_models/fake_chat_models.py +61 -39
  37. langchain_core/language_models/llms.py +123 -231
  38. langchain_core/load/dump.py +4 -5
  39. langchain_core/load/load.py +18 -28
  40. langchain_core/load/mapping.py +2 -4
  41. langchain_core/load/serializable.py +39 -40
  42. langchain_core/messages/__init__.py +61 -22
  43. langchain_core/messages/ai.py +368 -163
  44. langchain_core/messages/base.py +214 -43
  45. langchain_core/messages/block_translators/__init__.py +111 -0
  46. langchain_core/messages/block_translators/anthropic.py +470 -0
  47. langchain_core/messages/block_translators/bedrock.py +94 -0
  48. langchain_core/messages/block_translators/bedrock_converse.py +297 -0
  49. langchain_core/messages/block_translators/google_genai.py +530 -0
  50. langchain_core/messages/block_translators/google_vertexai.py +21 -0
  51. langchain_core/messages/block_translators/groq.py +143 -0
  52. langchain_core/messages/block_translators/langchain_v0.py +301 -0
  53. langchain_core/messages/block_translators/openai.py +1010 -0
  54. langchain_core/messages/chat.py +2 -6
  55. langchain_core/messages/content.py +1423 -0
  56. langchain_core/messages/function.py +6 -10
  57. langchain_core/messages/human.py +41 -38
  58. langchain_core/messages/modifier.py +2 -2
  59. langchain_core/messages/system.py +38 -28
  60. langchain_core/messages/tool.py +96 -103
  61. langchain_core/messages/utils.py +478 -504
  62. langchain_core/output_parsers/__init__.py +1 -14
  63. langchain_core/output_parsers/base.py +58 -61
  64. langchain_core/output_parsers/json.py +7 -8
  65. langchain_core/output_parsers/list.py +5 -7
  66. langchain_core/output_parsers/openai_functions.py +49 -47
  67. langchain_core/output_parsers/openai_tools.py +14 -19
  68. langchain_core/output_parsers/pydantic.py +12 -13
  69. langchain_core/output_parsers/string.py +2 -2
  70. langchain_core/output_parsers/transform.py +15 -17
  71. langchain_core/output_parsers/xml.py +8 -10
  72. langchain_core/outputs/__init__.py +1 -1
  73. langchain_core/outputs/chat_generation.py +18 -18
  74. langchain_core/outputs/chat_result.py +1 -3
  75. langchain_core/outputs/generation.py +8 -8
  76. langchain_core/outputs/llm_result.py +10 -10
  77. langchain_core/prompt_values.py +12 -12
  78. langchain_core/prompts/__init__.py +3 -27
  79. langchain_core/prompts/base.py +45 -55
  80. langchain_core/prompts/chat.py +254 -313
  81. langchain_core/prompts/dict.py +5 -5
  82. langchain_core/prompts/few_shot.py +81 -88
  83. langchain_core/prompts/few_shot_with_templates.py +11 -13
  84. langchain_core/prompts/image.py +12 -14
  85. langchain_core/prompts/loading.py +6 -8
  86. langchain_core/prompts/message.py +3 -3
  87. langchain_core/prompts/prompt.py +24 -39
  88. langchain_core/prompts/string.py +4 -4
  89. langchain_core/prompts/structured.py +42 -50
  90. langchain_core/rate_limiters.py +51 -60
  91. langchain_core/retrievers.py +49 -190
  92. langchain_core/runnables/base.py +1484 -1709
  93. langchain_core/runnables/branch.py +45 -61
  94. langchain_core/runnables/config.py +80 -88
  95. langchain_core/runnables/configurable.py +117 -134
  96. langchain_core/runnables/fallbacks.py +83 -79
  97. langchain_core/runnables/graph.py +85 -95
  98. langchain_core/runnables/graph_ascii.py +27 -28
  99. langchain_core/runnables/graph_mermaid.py +38 -50
  100. langchain_core/runnables/graph_png.py +15 -16
  101. langchain_core/runnables/history.py +135 -148
  102. langchain_core/runnables/passthrough.py +124 -150
  103. langchain_core/runnables/retry.py +46 -51
  104. langchain_core/runnables/router.py +25 -30
  105. langchain_core/runnables/schema.py +79 -74
  106. langchain_core/runnables/utils.py +62 -68
  107. langchain_core/stores.py +81 -115
  108. langchain_core/structured_query.py +8 -8
  109. langchain_core/sys_info.py +27 -29
  110. langchain_core/tools/__init__.py +1 -14
  111. langchain_core/tools/base.py +179 -187
  112. langchain_core/tools/convert.py +131 -139
  113. langchain_core/tools/render.py +10 -10
  114. langchain_core/tools/retriever.py +11 -11
  115. langchain_core/tools/simple.py +19 -24
  116. langchain_core/tools/structured.py +30 -39
  117. langchain_core/tracers/__init__.py +1 -9
  118. langchain_core/tracers/base.py +97 -99
  119. langchain_core/tracers/context.py +29 -52
  120. langchain_core/tracers/core.py +50 -60
  121. langchain_core/tracers/evaluation.py +11 -11
  122. langchain_core/tracers/event_stream.py +115 -70
  123. langchain_core/tracers/langchain.py +21 -21
  124. langchain_core/tracers/log_stream.py +43 -43
  125. langchain_core/tracers/memory_stream.py +3 -3
  126. langchain_core/tracers/root_listeners.py +16 -16
  127. langchain_core/tracers/run_collector.py +2 -4
  128. langchain_core/tracers/schemas.py +0 -129
  129. langchain_core/tracers/stdout.py +3 -3
  130. langchain_core/utils/__init__.py +1 -4
  131. langchain_core/utils/_merge.py +46 -8
  132. langchain_core/utils/aiter.py +57 -61
  133. langchain_core/utils/env.py +9 -9
  134. langchain_core/utils/function_calling.py +89 -191
  135. langchain_core/utils/html.py +7 -8
  136. langchain_core/utils/input.py +6 -6
  137. langchain_core/utils/interactive_env.py +1 -1
  138. langchain_core/utils/iter.py +37 -42
  139. langchain_core/utils/json.py +4 -3
  140. langchain_core/utils/json_schema.py +8 -8
  141. langchain_core/utils/mustache.py +9 -11
  142. langchain_core/utils/pydantic.py +33 -35
  143. langchain_core/utils/strings.py +5 -5
  144. langchain_core/utils/usage.py +1 -1
  145. langchain_core/utils/utils.py +80 -54
  146. langchain_core/vectorstores/base.py +129 -164
  147. langchain_core/vectorstores/in_memory.py +99 -174
  148. langchain_core/vectorstores/utils.py +5 -5
  149. langchain_core/version.py +1 -1
  150. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/METADATA +28 -27
  151. langchain_core-1.0.0.dist-info/RECORD +172 -0
  152. {langchain_core-0.3.79.dist-info → langchain_core-1.0.0.dist-info}/WHEEL +1 -1
  153. langchain_core/beta/__init__.py +0 -1
  154. langchain_core/beta/runnables/__init__.py +0 -1
  155. langchain_core/beta/runnables/context.py +0 -447
  156. langchain_core/memory.py +0 -120
  157. langchain_core/messages/content_blocks.py +0 -176
  158. langchain_core/prompts/pipeline.py +0 -138
  159. langchain_core/pydantic_v1/__init__.py +0 -30
  160. langchain_core/pydantic_v1/dataclasses.py +0 -23
  161. langchain_core/pydantic_v1/main.py +0 -23
  162. langchain_core/tracers/langchain_v1.py +0 -31
  163. langchain_core/utils/loading.py +0 -35
  164. langchain_core-0.3.79.dist-info/RECORD +0 -174
  165. langchain_core-0.3.79.dist-info/entry_points.txt +0 -4
@@ -1,11 +1,8 @@
1
1
  """Structured prompt template for a language model."""
2
2
 
3
- from collections.abc import AsyncIterator, Iterator, Mapping, Sequence
3
+ from collections.abc import AsyncIterator, Callable, Iterator, Mapping, Sequence
4
4
  from typing import (
5
5
  Any,
6
- Callable,
7
- Optional,
8
- Union,
9
6
  )
10
7
 
11
8
  from pydantic import BaseModel, Field
@@ -31,16 +28,16 @@ from langchain_core.utils import get_pydantic_field_names
31
28
  class StructuredPrompt(ChatPromptTemplate):
32
29
  """Structured prompt template for a language model."""
33
30
 
34
- schema_: Union[dict, type]
31
+ schema_: dict | type
35
32
  """Schema for the structured prompt."""
36
33
  structured_output_kwargs: dict[str, Any] = Field(default_factory=dict)
37
34
 
38
35
  def __init__(
39
36
  self,
40
37
  messages: Sequence[MessageLikeRepresentation],
41
- schema_: Optional[Union[dict, type[BaseModel]]] = None,
38
+ schema_: dict | type[BaseModel] | None = None,
42
39
  *,
43
- structured_output_kwargs: Optional[dict[str, Any]] = None,
40
+ structured_output_kwargs: dict[str, Any] | None = None,
44
41
  template_format: PromptTemplateFormat = "f-string",
45
42
  **kwargs: Any,
46
43
  ) -> None:
@@ -66,13 +63,13 @@ class StructuredPrompt(ChatPromptTemplate):
66
63
 
67
64
  @classmethod
68
65
  def get_lc_namespace(cls) -> list[str]:
69
- """Get the namespace of the langchain object.
66
+ """Get the namespace of the LangChain object.
70
67
 
71
- For example, if the class is ``langchain.llms.openai.OpenAI``, then the
72
- namespace is ``["langchain", "llms", "openai"]``
68
+ For example, if the class is `langchain.llms.openai.OpenAI`, then the
69
+ namespace is `["langchain", "llms", "openai"]`
73
70
 
74
71
  Returns:
75
- The namespace of the langchain object.
72
+ The namespace of the LangChain object.
76
73
  """
77
74
  return cls.__module__.split(".")
78
75
 
@@ -80,7 +77,7 @@ class StructuredPrompt(ChatPromptTemplate):
80
77
  def from_messages_and_schema(
81
78
  cls,
82
79
  messages: Sequence[MessageLikeRepresentation],
83
- schema: Union[dict, type],
80
+ schema: dict | type,
84
81
  **kwargs: Any,
85
82
  ) -> ChatPromptTemplate:
86
83
  """Create a chat prompt template from a variety of message formats.
@@ -88,35 +85,34 @@ class StructuredPrompt(ChatPromptTemplate):
88
85
  Examples:
89
86
  Instantiation from a list of message templates:
90
87
 
91
- .. code-block:: python
88
+ ```python
89
+ from langchain_core.prompts import StructuredPrompt
92
90
 
93
- from langchain_core.prompts import StructuredPrompt
94
91
 
92
+ class OutputSchema(BaseModel):
93
+ name: str
94
+ value: int
95
95
 
96
- class OutputSchema(BaseModel):
97
- name: str
98
- value: int
99
-
100
-
101
- template = StructuredPrompt(
102
- [
103
- ("human", "Hello, how are you?"),
104
- ("ai", "I'm doing well, thanks!"),
105
- ("human", "That's good to hear."),
106
- ],
107
- OutputSchema,
108
- )
109
96
 
97
+ template = StructuredPrompt(
98
+ [
99
+ ("human", "Hello, how are you?"),
100
+ ("ai", "I'm doing well, thanks!"),
101
+ ("human", "That's good to hear."),
102
+ ],
103
+ OutputSchema,
104
+ )
105
+ ```
110
106
  Args:
111
107
  messages: sequence of message representations.
112
- A message can be represented using the following formats:
113
- (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
114
- (message type, template); e.g., ("human", "{user_input}"),
115
- (4) 2-tuple of (message class, template), (5) a string which is
116
- shorthand for ("human", template); e.g., "{user_input}"
108
+ A message can be represented using the following formats:
109
+ (1) BaseMessagePromptTemplate, (2) BaseMessage, (3) 2-tuple of
110
+ (message type, template); e.g., ("human", "{user_input}"),
111
+ (4) 2-tuple of (message class, template), (5) a string which is
112
+ shorthand for ("human", template); e.g., "{user_input}"
117
113
  schema: a dictionary representation of function call, or a Pydantic model.
118
- kwargs: Any additional kwargs to pass through to
119
- ``ChatModel.with_structured_output(schema, **kwargs)``.
114
+ **kwargs: Any additional kwargs to pass through to
115
+ `ChatModel.with_structured_output(schema, **kwargs)`.
120
116
 
121
117
  Returns:
122
118
  a structured prompt template
@@ -127,32 +123,28 @@ class StructuredPrompt(ChatPromptTemplate):
127
123
  @override
128
124
  def __or__(
129
125
  self,
130
- other: Union[
131
- Runnable[Any, Other],
132
- Callable[[Iterator[Any]], Iterator[Other]],
133
- Callable[[AsyncIterator[Any]], AsyncIterator[Other]],
134
- Callable[[Any], Other],
135
- Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]],
136
- ],
126
+ other: Runnable[Any, Other]
127
+ | Callable[[Iterator[Any]], Iterator[Other]]
128
+ | Callable[[AsyncIterator[Any]], AsyncIterator[Other]]
129
+ | Callable[[Any], Other]
130
+ | Mapping[str, Runnable[Any, Other] | Callable[[Any], Other] | Any],
137
131
  ) -> RunnableSerializable[dict, Other]:
138
132
  return self.pipe(other)
139
133
 
140
134
  def pipe(
141
135
  self,
142
- *others: Union[
143
- Runnable[Any, Other],
144
- Callable[[Iterator[Any]], Iterator[Other]],
145
- Callable[[AsyncIterator[Any]], AsyncIterator[Other]],
146
- Callable[[Any], Other],
147
- Mapping[str, Union[Runnable[Any, Other], Callable[[Any], Other], Any]],
148
- ],
149
- name: Optional[str] = None,
136
+ *others: Runnable[Any, Other]
137
+ | Callable[[Iterator[Any]], Iterator[Other]]
138
+ | Callable[[AsyncIterator[Any]], AsyncIterator[Other]]
139
+ | Callable[[Any], Other]
140
+ | Mapping[str, Runnable[Any, Other] | Callable[[Any], Other] | Any],
141
+ name: str | None = None,
150
142
  ) -> RunnableSerializable[dict, Other]:
151
143
  """Pipe the structured prompt to a language model.
152
144
 
153
145
  Args:
154
146
  others: The language model to pipe the structured prompt to.
155
- name: The name of the pipeline. Defaults to None.
147
+ name: The name of the pipeline.
156
148
 
157
149
  Returns:
158
150
  A RunnableSequence object.
@@ -6,7 +6,6 @@ import abc
6
6
  import asyncio
7
7
  import threading
8
8
  import time
9
- from typing import Optional
10
9
 
11
10
 
12
11
  class BaseRateLimiter(abc.ABC):
@@ -22,11 +21,8 @@ class BaseRateLimiter(abc.ABC):
22
21
  Current limitations:
23
22
 
24
23
  - Rate limiting information is not surfaced in tracing or callbacks. This means
25
- that the total time it takes to invoke a chat model will encompass both
26
- the time spent waiting for tokens and the time spent making the request.
27
-
28
-
29
- .. versionadded:: 0.2.24
24
+ that the total time it takes to invoke a chat model will encompass both
25
+ the time spent waiting for tokens and the time spent making the request.
30
26
  """
31
27
 
32
28
  @abc.abstractmethod
@@ -34,18 +30,18 @@ class BaseRateLimiter(abc.ABC):
34
30
  """Attempt to acquire the necessary tokens for the rate limiter.
35
31
 
36
32
  This method blocks until the required tokens are available if `blocking`
37
- is set to True.
33
+ is set to `True`.
38
34
 
39
- If `blocking` is set to False, the method will immediately return the result
35
+ If `blocking` is set to `False`, the method will immediately return the result
40
36
  of the attempt to acquire the tokens.
41
37
 
42
38
  Args:
43
- blocking: If True, the method will block until the tokens are available.
44
- If False, the method will return immediately with the result of
45
- the attempt. Defaults to True.
39
+ blocking: If `True`, the method will block until the tokens are available.
40
+ If `False`, the method will return immediately with the result of
41
+ the attempt.
46
42
 
47
43
  Returns:
48
- True if the tokens were successfully acquired, False otherwise.
44
+ `True` if the tokens were successfully acquired, `False` otherwise.
49
45
  """
50
46
 
51
47
  @abc.abstractmethod
@@ -53,18 +49,18 @@ class BaseRateLimiter(abc.ABC):
53
49
  """Attempt to acquire the necessary tokens for the rate limiter.
54
50
 
55
51
  This method blocks until the required tokens are available if `blocking`
56
- is set to True.
52
+ is set to `True`.
57
53
 
58
- If `blocking` is set to False, the method will immediately return the result
54
+ If `blocking` is set to `False`, the method will immediately return the result
59
55
  of the attempt to acquire the tokens.
60
56
 
61
57
  Args:
62
- blocking: If True, the method will block until the tokens are available.
63
- If False, the method will return immediately with the result of
64
- the attempt. Defaults to True.
58
+ blocking: If `True`, the method will block until the tokens are available.
59
+ If `False`, the method will return immediately with the result of
60
+ the attempt.
65
61
 
66
62
  Returns:
67
- True if the tokens were successfully acquired, False otherwise.
63
+ `True` if the tokens were successfully acquired, `False` otherwise.
68
64
  """
69
65
 
70
66
 
@@ -85,45 +81,40 @@ class InMemoryRateLimiter(BaseRateLimiter):
85
81
  not enough tokens in the bucket, the request is blocked until there are
86
82
  enough tokens.
87
83
 
88
- These *tokens* have NOTHING to do with LLM tokens. They are just
84
+ These tokens have nothing to do with LLM tokens. They are just
89
85
  a way to keep track of how many requests can be made at a given time.
90
86
 
91
87
  Current limitations:
92
88
 
93
89
  - The rate limiter is not designed to work across different processes. It is
94
- an in-memory rate limiter, but it is thread safe.
90
+ an in-memory rate limiter, but it is thread safe.
95
91
  - The rate limiter only supports time-based rate limiting. It does not take
96
- into account the size of the request or any other factors.
92
+ into account the size of the request or any other factors.
97
93
 
98
94
  Example:
95
+ ```python
96
+ import time
99
97
 
100
- .. code-block:: python
101
-
102
- import time
103
-
104
- from langchain_core.rate_limiters import InMemoryRateLimiter
105
-
106
- rate_limiter = InMemoryRateLimiter(
107
- requests_per_second=0.1, # <-- Can only make a request once every 10 seconds!!
108
- check_every_n_seconds=0.1, # Wake up every 100 ms to check whether allowed to make a request,
109
- max_bucket_size=10, # Controls the maximum burst size.
110
- )
111
-
112
- from langchain_anthropic import ChatAnthropic
113
-
114
- model = ChatAnthropic(
115
- model_name="claude-3-opus-20240229", rate_limiter=rate_limiter
116
- )
98
+ from langchain_core.rate_limiters import InMemoryRateLimiter
117
99
 
118
- for _ in range(5):
119
- tic = time.time()
120
- model.invoke("hello")
121
- toc = time.time()
122
- print(toc - tic)
100
+ rate_limiter = InMemoryRateLimiter(
101
+ requests_per_second=0.1, # <-- Can only make a request once every 10 seconds!!
102
+ check_every_n_seconds=0.1, # Wake up every 100 ms to check whether allowed to make a request,
103
+ max_bucket_size=10, # Controls the maximum burst size.
104
+ )
123
105
 
106
+ from langchain_anthropic import ChatAnthropic
124
107
 
125
- .. versionadded:: 0.2.24
108
+ model = ChatAnthropic(
109
+ model_name="claude-sonnet-4-5-20250929", rate_limiter=rate_limiter
110
+ )
126
111
 
112
+ for _ in range(5):
113
+ tic = time.time()
114
+ model.invoke("hello")
115
+ toc = time.time()
116
+ print(toc - tic)
117
+ ```
127
118
  """ # noqa: E501
128
119
 
129
120
  def __init__(
@@ -135,7 +126,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
135
126
  ) -> None:
136
127
  """A rate limiter based on a token bucket.
137
128
 
138
- These *tokens* have NOTHING to do with LLM tokens. They are just
129
+ These tokens have nothing to do with LLM tokens. They are just
139
130
  a way to keep track of how many requests can be made at a given time.
140
131
 
141
132
  This rate limiter is designed to work in a threaded environment.
@@ -148,11 +139,11 @@ class InMemoryRateLimiter(BaseRateLimiter):
148
139
  Args:
149
140
  requests_per_second: The number of tokens to add per second to the bucket.
150
141
  The tokens represent "credit" that can be used to make requests.
151
- check_every_n_seconds: check whether the tokens are available
142
+ check_every_n_seconds: Check whether the tokens are available
152
143
  every this many seconds. Can be a float to represent
153
144
  fractions of a second.
154
145
  max_bucket_size: The maximum number of tokens that can be in the bucket.
155
- Must be at least 1. Used to prevent bursts of requests.
146
+ Must be at least `1`. Used to prevent bursts of requests.
156
147
  """
157
148
  # Number of requests that we can make per second.
158
149
  self.requests_per_second = requests_per_second
@@ -163,7 +154,7 @@ class InMemoryRateLimiter(BaseRateLimiter):
163
154
  # at a given time.
164
155
  self._consume_lock = threading.Lock()
165
156
  # The last time we tried to consume tokens.
166
- self.last: Optional[float] = None
157
+ self.last: float | None = None
167
158
  self.check_every_n_seconds = check_every_n_seconds
168
159
 
169
160
  def _consume(self) -> bool:
@@ -202,18 +193,18 @@ class InMemoryRateLimiter(BaseRateLimiter):
202
193
  """Attempt to acquire a token from the rate limiter.
203
194
 
204
195
  This method blocks until the required tokens are available if `blocking`
205
- is set to True.
196
+ is set to `True`.
206
197
 
207
- If `blocking` is set to False, the method will immediately return the result
198
+ If `blocking` is set to `False`, the method will immediately return the result
208
199
  of the attempt to acquire the tokens.
209
200
 
210
201
  Args:
211
- blocking: If True, the method will block until the tokens are available.
212
- If False, the method will return immediately with the result of
213
- the attempt. Defaults to True.
202
+ blocking: If `True`, the method will block until the tokens are available.
203
+ If `False`, the method will return immediately with the result of
204
+ the attempt.
214
205
 
215
206
  Returns:
216
- True if the tokens were successfully acquired, False otherwise.
207
+ `True` if the tokens were successfully acquired, `False` otherwise.
217
208
  """
218
209
  if not blocking:
219
210
  return self._consume()
@@ -226,18 +217,18 @@ class InMemoryRateLimiter(BaseRateLimiter):
226
217
  """Attempt to acquire a token from the rate limiter. Async version.
227
218
 
228
219
  This method blocks until the required tokens are available if `blocking`
229
- is set to True.
220
+ is set to `True`.
230
221
 
231
- If `blocking` is set to False, the method will immediately return the result
222
+ If `blocking` is set to `False`, the method will immediately return the result
232
223
  of the attempt to acquire the tokens.
233
224
 
234
225
  Args:
235
- blocking: If True, the method will block until the tokens are available.
236
- If False, the method will return immediately with the result of
237
- the attempt. Defaults to True.
226
+ blocking: If `True`, the method will block until the tokens are available.
227
+ If `False`, the method will return immediately with the result of
228
+ the attempt.
238
229
 
239
230
  Returns:
240
- True if the tokens were successfully acquired, False otherwise.
231
+ `True` if the tokens were successfully acquired, `False` otherwise.
241
232
  """
242
233
  if not blocking:
243
234
  return self._consume()