haiway 0.19.4__py3-none-any.whl → 0.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
haiway/context/tasks.py CHANGED
@@ -9,6 +9,14 @@ __all__ = ("TaskGroupContext",)
9
9
 
10
10
  @final
11
11
  class TaskGroupContext:
12
+ """
13
+ Context manager for managing task groups within a scope.
14
+
15
+ Provides a way to create and manage asyncio tasks within a context,
16
+ ensuring proper task lifecycle management and context propagation.
17
+ This class is immutable after initialization.
18
+ """
19
+
12
20
  _context = ContextVar[TaskGroup]("TaskGroupContext")
13
21
 
14
22
  @classmethod
@@ -19,6 +27,26 @@ class TaskGroupContext:
19
27
  *args: Arguments.args,
20
28
  **kwargs: Arguments.kwargs,
21
29
  ) -> Task[Result]:
30
+ """
31
+ Run a coroutine function as a task within the current task group.
32
+
33
+ If called within a TaskGroupContext, creates a task in that group.
34
+ If called outside any TaskGroupContext, creates a detached task.
35
+
36
+ Parameters
37
+ ----------
38
+ function: Callable[Arguments, Coroutine[Any, Any, Result]]
39
+ The coroutine function to run
40
+ *args: Arguments.args
41
+ Positional arguments to pass to the function
42
+ **kwargs: Arguments.kwargs
43
+ Keyword arguments to pass to the function
44
+
45
+ Returns
46
+ -------
47
+ Task[Result]
48
+ The created task
49
+ """
22
50
  try:
23
51
  return cls._context.get().create_task(
24
52
  function(*args, **kwargs),
@@ -40,6 +68,14 @@ class TaskGroupContext:
40
68
  self,
41
69
  task_group: TaskGroup | None = None,
42
70
  ) -> None:
71
+ """
72
+ Initialize a task group context.
73
+
74
+ Parameters
75
+ ----------
76
+ task_group: TaskGroup | None
77
+ The task group to use, or None to create a new one
78
+ """
43
79
  self._group: TaskGroup
44
80
  object.__setattr__(
45
81
  self,
@@ -73,6 +109,16 @@ class TaskGroupContext:
73
109
  )
74
110
 
75
111
  async def __aenter__(self) -> None:
112
+ """
113
+ Enter this task group context.
114
+
115
+ Enters the underlying task group and sets this context as current.
116
+
117
+ Raises
118
+ ------
119
+ AssertionError
120
+ If attempting to re-enter an already active context
121
+ """
76
122
  assert self._token is None, "Context reentrance is not allowed" # nosec: B101
77
123
  await self._group.__aenter__()
78
124
  object.__setattr__(
@@ -87,6 +133,26 @@ class TaskGroupContext:
87
133
  exc_val: BaseException | None,
88
134
  exc_tb: TracebackType | None,
89
135
  ) -> None:
136
+ """
137
+ Exit this task group context.
138
+
139
+ Restores the previous task group context and exits the underlying task group.
140
+ Silently ignores task group exceptions to avoid masking existing exceptions.
141
+
142
+ Parameters
143
+ ----------
144
+ exc_type: type[BaseException] | None
145
+ Type of exception that caused the exit
146
+ exc_val: BaseException | None
147
+ Exception instance that caused the exit
148
+ exc_tb: TracebackType | None
149
+ Traceback for the exception
150
+
151
+ Raises
152
+ ------
153
+ AssertionError
154
+ If the context is not active
155
+ """
90
156
  assert self._token is not None, "Unbalanced context enter/exit" # nosec: B101
91
157
  TaskGroupContext._context.reset(self._token)
92
158
  object.__setattr__(
haiway/context/types.py CHANGED
@@ -5,8 +5,24 @@ __all__ = (
5
5
 
6
6
 
7
7
  class MissingContext(Exception):
8
+ """
9
+ Exception raised when attempting to access a context that doesn't exist.
10
+
11
+ This exception is raised when code attempts to access the context system
12
+ outside of an active context, such as trying to access state or scope
13
+ identifiers when no context has been established.
14
+ """
15
+
8
16
  pass
9
17
 
10
18
 
11
19
  class MissingState(Exception):
20
+ """
21
+ Exception raised when attempting to access state that doesn't exist.
22
+
23
+ This exception is raised when code attempts to access a specific state type
24
+ that is not present in the current context and cannot be automatically
25
+ created (either because no default was provided or instantiation failed).
26
+ """
27
+
12
28
  pass
@@ -1,5 +1,6 @@
1
1
  from haiway.helpers.asynchrony import asynchronous, wrap_async
2
2
  from haiway.helpers.caching import CacheMakeKey, CacheRead, CacheWrite, cache
3
+ from haiway.helpers.concurrent import process_concurrently
3
4
  from haiway.helpers.observability import LoggerObservability
4
5
  from haiway.helpers.retries import retry
5
6
  from haiway.helpers.throttling import throttle
@@ -13,6 +14,7 @@ __all__ = (
13
14
  "LoggerObservability",
14
15
  "asynchronous",
15
16
  "cache",
17
+ "process_concurrently",
16
18
  "retry",
17
19
  "throttle",
18
20
  "timeout",
@@ -17,6 +17,24 @@ def wrap_async[**Args, Result](
17
17
  function: Callable[Args, Coroutine[Any, Any, Result]] | Callable[Args, Result],
18
18
  /,
19
19
  ) -> Callable[Args, Coroutine[Any, Any, Result]]:
20
+ """
21
+ Convert a synchronous function to an asynchronous one if it isn't already.
22
+
23
+ Takes a function that may be either synchronous or asynchronous and ensures it
24
+ returns a coroutine. If the input function is already asynchronous, it is returned
25
+ unchanged. If it's synchronous, it wraps it in an async function that executes
26
+ the original function and returns its result.
27
+
28
+ Parameters
29
+ ----------
30
+ function: Callable[Args, Coroutine[Any, Any, Result]] | Callable[Args, Result]
31
+ The function to ensure is asynchronous, can be either sync or async
32
+
33
+ Returns
34
+ -------
35
+ Callable[Args, Coroutine[Any, Any, Result]]
36
+ An asynchronous function that returns a coroutine
37
+ """
20
38
  if iscoroutinefunction(function):
21
39
  return function
22
40
 
@@ -57,6 +75,7 @@ def asynchronous[**Args, Result](
57
75
  def asynchronous[**Args, Result](
58
76
  function: Callable[Args, Result] | None = None,
59
77
  /,
78
+ *,
60
79
  loop: AbstractEventLoop | None = None,
61
80
  executor: Executor | Missing = MISSING,
62
81
  ) -> (
@@ -66,26 +85,56 @@ def asynchronous[**Args, Result](
66
85
  ]
67
86
  | Callable[Args, Coroutine[Any, Any, Result]]
68
87
  ):
69
- """\
70
- Wrapper for a sync function to convert it to an async function. \
71
- When specified an executor, it can be used to wrap long running or blocking synchronous \
72
- operations within coroutines system.
88
+ """
89
+ Convert a synchronous function to an asynchronous one that runs in an executor.
90
+
91
+ This decorator transforms synchronous, potentially blocking functions into
92
+ asynchronous coroutines that execute in an event loop's executor, allowing
93
+ them to be used with async/await syntax without blocking the event loop.
94
+
95
+ Can be used as a simple decorator (@asynchronous) or with configuration
96
+ parameters (@asynchronous(executor=my_executor)).
73
97
 
74
98
  Parameters
75
99
  ----------
76
- function: Callable[Args, Result]
77
- function to be wrapped as running in loop executor.
100
+ function: Callable[Args, Result] | None
101
+ The synchronous function to be wrapped. When used as a simple decorator,
102
+ this parameter is provided automatically.
78
103
  loop: AbstractEventLoop | None
79
- loop used to call the function. When None was provided the loop currently running while \
80
- executing the function will be used. Default is None.
104
+ The event loop to run the function in. When None is provided, the currently
105
+ running loop while executing the function will be used. Default is None.
81
106
  executor: Executor | Missing
82
- executor used to run the function. When not provided (Missing) default loop executor\
83
- will be used.
107
+ The executor used to run the function. When not provided, the default loop
108
+ executor will be used. Useful for CPU-bound tasks or operations that would
109
+ otherwise block the event loop.
84
110
 
85
111
  Returns
86
112
  -------
87
- Callable[_Args, _Result]
88
- function wrapped to async using loop executor.
113
+ Callable
114
+ When used as @asynchronous: Returns the wrapped function that can be awaited.
115
+ When used as @asynchronous(...): Returns a decorator that can be applied to a function.
116
+
117
+ Notes
118
+ -----
119
+ The function preserves the original function's signature, docstring, and other attributes.
120
+ Context variables from the calling context are preserved when executing in the executor.
121
+
122
+ Examples
123
+ --------
124
+ Basic usage:
125
+
126
+ >>> @asynchronous
127
+ ... def cpu_intensive_task(data):
128
+ ... # This runs in the default executor
129
+ ... return process_data(data)
130
+ ...
131
+ >>> await cpu_intensive_task(my_data) # Non-blocking
132
+
133
+ With custom executor:
134
+
135
+ >>> @asynchronous(executor=process_pool)
136
+ ... def cpu_intensive_task(data):
137
+ ... return process_data(data)
89
138
  """
90
139
 
91
140
  def wrap(
haiway/helpers/caching.py CHANGED
@@ -17,6 +17,17 @@ __all__ = (
17
17
 
18
18
 
19
19
  class CacheMakeKey[**Args, Key](Protocol):
20
+ """
21
+ Protocol for generating cache keys from function arguments.
22
+
23
+ Implementations of this protocol are responsible for creating a unique key
24
+ based on the arguments passed to a function, which can then be used for
25
+ cache lookups.
26
+
27
+ The key must be consistent for the same set of arguments, and different
28
+ for different sets of arguments that should be cached separately.
29
+ """
30
+
20
31
  def __call__(
21
32
  self,
22
33
  *args: Args.args,
@@ -25,6 +36,16 @@ class CacheMakeKey[**Args, Key](Protocol):
25
36
 
26
37
 
27
38
  class CacheRead[Key, Value](Protocol):
39
+ """
40
+ Protocol for reading values from a cache.
41
+
42
+ Implementations of this protocol are responsible for retrieving cached values
43
+ based on a key. If the key is not present in the cache, None should be returned.
44
+
45
+ This is designed as an asynchronous operation to support remote caches where
46
+ retrieval might involve network operations.
47
+ """
48
+
28
49
  async def __call__(
29
50
  self,
30
51
  key: Key,
@@ -32,6 +53,16 @@ class CacheRead[Key, Value](Protocol):
32
53
 
33
54
 
34
55
  class CacheWrite[Key, Value](Protocol):
56
+ """
57
+ Protocol for writing values to a cache.
58
+
59
+ Implementations of this protocol are responsible for storing values in a cache
60
+ using the specified key. Any existing value with the same key should be overwritten.
61
+
62
+ This is designed as an asynchronous operation to support remote caches where
63
+ writing might involve network operations.
64
+ """
65
+
35
66
  async def __call__(
36
67
  self,
37
68
  key: Key,
@@ -0,0 +1,74 @@
1
+ from asyncio import FIRST_COMPLETED, CancelledError, Task, wait
2
+ from collections.abc import AsyncIterator, Callable, Coroutine
3
+ from concurrent.futures import ALL_COMPLETED
4
+ from typing import Any
5
+
6
+ from haiway.context import ctx
7
+
8
+ __all__ = ("process_concurrently",)
9
+
10
+
11
+ async def process_concurrently[Element]( # noqa: C901
12
+ source: AsyncIterator[Element],
13
+ /,
14
+ handler: Callable[[Element], Coroutine[Any, Any, None]],
15
+ *,
16
+ concurrent_tasks: int = 2,
17
+ ignore_exceptions: bool = False,
18
+ ) -> None:
19
+ """Process elements from an async iterator concurrently.
20
+
21
+ Parameters
22
+ ----------
23
+ source: AsyncIterator[Element]
24
+ An async iterator providing elements to process.
25
+
26
+ handler: Callable[[Element], Coroutine[Any, Any, None]]
27
+ A coroutine function that processes each element.
28
+
29
+ concurrent_tasks: int
30
+ Maximum number of concurrent tasks (must be > 0), default is 2.
31
+
32
+ ignore_exceptions: bool
33
+ If True, exceptions from tasks will be logged but not propagated,
34
+ default is False.
35
+
36
+ """
37
+ assert concurrent_tasks > 0 # nosec: B101
38
+ running: set[Task[None]] = set()
39
+ try:
40
+ while element := await anext(source, None):
41
+ if len(running) < concurrent_tasks:
42
+ running.add(ctx.spawn(handler, element))
43
+ continue # keep spawning tasks
44
+
45
+ completed, running = await wait(running, return_when=FIRST_COMPLETED)
46
+
47
+ for task in completed:
48
+ if exc := task.exception():
49
+ if not ignore_exceptions:
50
+ raise exc
51
+
52
+ ctx.log_error(
53
+ f"Concurrent processing error - {type(exc)}: {exc}",
54
+ exception=exc,
55
+ )
56
+
57
+ except CancelledError as exc:
58
+ # Cancel all running tasks
59
+ for task in running:
60
+ task.cancel()
61
+
62
+ raise exc
63
+
64
+ finally:
65
+ completed, _ = await wait(running, return_when=ALL_COMPLETED)
66
+ for task in completed:
67
+ if exc := task.exception():
68
+ if not ignore_exceptions:
69
+ raise exc
70
+
71
+ ctx.log_error(
72
+ f"Concurrent processing error - {type(exc)}: {exc}",
73
+ exception=exc,
74
+ )
@@ -2,6 +2,7 @@ from collections.abc import Mapping
2
2
  from logging import Logger, getLogger
3
3
  from time import monotonic
4
4
  from typing import Any
5
+ from uuid import UUID, uuid4
5
6
 
6
7
  from haiway.context import Observability, ObservabilityLevel, ScopeIdentifier
7
8
  from haiway.context.observability import ObservabilityAttribute
@@ -12,6 +13,13 @@ __all__ = ("LoggerObservability",)
12
13
 
13
14
 
14
15
  class ScopeStore:
16
+ """
17
+ Internal class for storing scope information during observability tracking.
18
+
19
+ Tracks timing information, nested scopes, and recorded events for a specific scope.
20
+ Used by LoggerObservability to maintain the hierarchy of scopes and their data.
21
+ """
22
+
15
23
  __slots__ = (
16
24
  "_completed",
17
25
  "_exited",
@@ -35,21 +43,44 @@ class ScopeStore:
35
43
 
36
44
  @property
37
45
  def time(self) -> float:
46
+ """Calculate the elapsed time in seconds since this scope was entered."""
38
47
  return (self._completed or monotonic()) - self.entered
39
48
 
40
49
  @property
41
50
  def exited(self) -> bool:
51
+ """Check if this scope has been exited."""
42
52
  return self._exited is not None
43
53
 
44
54
  def exit(self) -> None:
55
+ """Mark this scope as exited and record the exit time."""
45
56
  assert self._exited is None # nosec: B101
46
57
  self._exited = monotonic()
47
58
 
48
59
  @property
49
60
  def completed(self) -> bool:
61
+ """
62
+ Check if this scope and all its nested scopes are completed.
63
+
64
+ A scope is considered completed when it has been exited and all its
65
+ nested scopes have also been completed.
66
+ """
50
67
  return self._completed is not None and all(nested.completed for nested in self.nested)
51
68
 
52
69
  def try_complete(self) -> bool:
70
+ """
71
+ Try to mark this scope as completed.
72
+
73
+ A scope can only be completed if:
74
+ - It has been exited
75
+ - It has not already been completed
76
+ - All its nested scopes are completed
77
+
78
+ Returns
79
+ -------
80
+ bool
81
+ True if the scope was successfully marked as completed,
82
+ False if any completion condition was not met
83
+ """
53
84
  if self._exited is None:
54
85
  return False # not elegible for completion yet
55
86
 
@@ -69,9 +100,47 @@ def LoggerObservability( # noqa: C901, PLR0915
69
100
  *,
70
101
  debug_context: bool = __debug__,
71
102
  ) -> Observability:
103
+ """
104
+ Create an Observability implementation that uses a standard Python logger.
105
+
106
+ This factory function creates an Observability instance that uses a Logger for recording
107
+ various types of observability data including logs, events, metrics, and attributes.
108
+ It maintains a hierarchical scope structure that tracks timing information and provides
109
+ a summary of all recorded data when the root scope exits.
110
+
111
+ Parameters
112
+ ----------
113
+ logger: Logger | None
114
+ The logger to use for recording observability data. If None, a logger will be
115
+ created based on the scope label when the first scope is entered.
116
+ debug_context: bool
117
+ Whether to store and display a detailed hierarchical summary when the root scope
118
+ exits. Defaults to True in debug mode (__debug__) and False otherwise.
119
+
120
+ Returns
121
+ -------
122
+ Observability
123
+ An Observability instance that uses the specified logger (or a default one)
124
+ for recording observability data.
125
+
126
+ Notes
127
+ -----
128
+ The created Observability instance tracks timing for each scope and records it
129
+ when the scope exits. When the root scope exits and debug_context is True,
130
+ it produces a hierarchical summary of all recorded events, metrics, and attributes.
131
+ """
72
132
  root_scope: ScopeIdentifier | None = None
73
133
  root_logger: Logger | None = logger
74
- scopes: dict[str, ScopeStore] = {}
134
+ scopes: dict[UUID, ScopeStore] = {}
135
+
136
+ trace_id: UUID = uuid4()
137
+ trace_id_hex: str = trace_id.hex
138
+
139
+ def trace_identifying(
140
+ scope: ScopeIdentifier,
141
+ /,
142
+ ) -> UUID:
143
+ return trace_id
75
144
 
76
145
  def log_recording(
77
146
  scope: ScopeIdentifier,
@@ -87,7 +156,7 @@ def LoggerObservability( # noqa: C901, PLR0915
87
156
 
88
157
  root_logger.log(
89
158
  level,
90
- f"{scope.unique_name} {message}",
159
+ f"[{trace_id_hex}] {scope.unique_name} {message}",
91
160
  *args,
92
161
  exc_info=exception,
93
162
  )
@@ -110,7 +179,7 @@ def LoggerObservability( # noqa: C901, PLR0915
110
179
 
111
180
  root_logger.log(
112
181
  level,
113
- f"{scope.unique_name} {event_str}",
182
+ f"[{trace_id_hex}] {scope.unique_name} {event_str}",
114
183
  )
115
184
 
116
185
  def metric_recording(
@@ -129,17 +198,17 @@ def LoggerObservability( # noqa: C901, PLR0915
129
198
 
130
199
  metric_str: str
131
200
  if attributes:
132
- metric_str = f"Metric: {metric} = {value}{unit or ''}\n{format_str(attributes)}"
201
+ metric_str = f"Metric: {metric} = {value} {unit or ''}\n{format_str(attributes)}"
133
202
 
134
203
  else:
135
- metric_str = f"Metric: {metric} = {value}{unit or ''}"
204
+ metric_str = f"Metric: {metric} = {value} {unit or ''}"
136
205
 
137
206
  if debug_context: # store only for summary
138
207
  scopes[scope.scope_id].store.append(metric_str)
139
208
 
140
209
  root_logger.log(
141
210
  level,
142
- f"{scope.unique_name} {metric_str}",
211
+ f"[{trace_id_hex}] {scope.unique_name} {metric_str}",
143
212
  )
144
213
 
145
214
  def attributes_recording(
@@ -160,7 +229,7 @@ def LoggerObservability( # noqa: C901, PLR0915
160
229
 
161
230
  root_logger.log(
162
231
  level,
163
- attributes_str,
232
+ f"[{trace_id_hex}] {scope.unique_name} {attributes_str}",
164
233
  )
165
234
 
166
235
  def scope_entering[Metric: State](
@@ -183,7 +252,7 @@ def LoggerObservability( # noqa: C901, PLR0915
183
252
  assert root_logger is not None # nosec: B101
184
253
  root_logger.log(
185
254
  ObservabilityLevel.INFO,
186
- f"{scope.unique_name} Entering scope: {scope.label}",
255
+ f"[{trace_id_hex}] {scope.unique_name} Entering scope: {scope.label}",
187
256
  )
188
257
 
189
258
  def scope_exiting[Metric: State](
@@ -206,7 +275,7 @@ def LoggerObservability( # noqa: C901, PLR0915
206
275
 
207
276
  root_logger.log(
208
277
  ObservabilityLevel.INFO,
209
- f"{scope.unique_name} Exiting scope: {scope.label}",
278
+ f"[{trace_id_hex}] {scope.unique_name} Exiting scope: {scope.label}",
210
279
  )
211
280
  metric_str: str = f"Metric - scope_time:{scopes[scope.scope_id].time:.3f}s"
212
281
  if debug_context: # store only for summary
@@ -214,12 +283,12 @@ def LoggerObservability( # noqa: C901, PLR0915
214
283
 
215
284
  root_logger.log(
216
285
  ObservabilityLevel.INFO,
217
- f"{scope.unique_name} {metric_str}",
286
+ f"[{trace_id_hex}] {scope.unique_name} {metric_str}",
218
287
  )
219
288
 
220
289
  # try complete parent scopes
221
290
  if scope != root_scope:
222
- parent_id: str = scope.parent_id
291
+ parent_id: UUID = scope.parent_id
223
292
  while scopes[parent_id].try_complete():
224
293
  if scopes[parent_id].identifier == root_scope:
225
294
  break
@@ -240,6 +309,7 @@ def LoggerObservability( # noqa: C901, PLR0915
240
309
  scopes = {}
241
310
 
242
311
  return Observability(
312
+ trace_identifying=trace_identifying,
243
313
  log_recording=log_recording,
244
314
  event_recording=event_recording,
245
315
  metric_recording=metric_recording,
@@ -250,6 +320,19 @@ def LoggerObservability( # noqa: C901, PLR0915
250
320
 
251
321
 
252
322
  def _tree_summary(scope_store: ScopeStore) -> str:
323
+ """
324
+ Generate a hierarchical text representation of a scope and its nested scopes.
325
+
326
+ Parameters
327
+ ----------
328
+ scope_store: ScopeStore
329
+ The scope store to generate a summary for
330
+
331
+ Returns
332
+ -------
333
+ str
334
+ A formatted string representation of the scope hierarchy with recorded events
335
+ """
253
336
  elements: list[str] = [
254
337
  f"┍━ {scope_store.identifier.label} [{scope_store.identifier.scope_id}]:"
255
338
  ]
haiway/helpers/retries.py CHANGED
@@ -72,32 +72,73 @@ def retry[**Args, Result](
72
72
  delay: Callable[[int, Exception], float] | float | None = None,
73
73
  catching: set[type[Exception]] | tuple[type[Exception], ...] | type[Exception] = Exception,
74
74
  ) -> Callable[[Callable[Args, Result]], Callable[Args, Result]] | Callable[Args, Result]:
75
- """\
76
- Function wrapper retrying the wrapped function again on fail. \
77
- Works for both sync and async functions. \
78
- It is not allowed to be used on class methods. \
79
- This wrapper is not thread safe.
75
+ """
76
+ Automatically retry a function on failure.
77
+
78
+ This decorator attempts to execute a function and, if it fails with a specified
79
+ exception type, retries the execution up to a configurable number of times,
80
+ with an optional delay between attempts.
81
+
82
+ Can be used as a simple decorator (@retry) or with configuration
83
+ parameters (@retry(limit=3, delay=1.0)).
80
84
 
81
85
  Parameters
82
86
  ----------
83
- function: Callable[_Args_T, _Result_T]
84
- function to wrap in auto retry, either sync or async.
87
+ function: Callable[Args, Result] | None
88
+ The function to wrap with retry logic. When used as a simple decorator,
89
+ this parameter is provided automatically.
85
90
  limit: int
86
- limit of retries, default is 1
91
+ Maximum number of retry attempts. Default is 1, meaning the function
92
+ will be called at most twice (initial attempt + 1 retry).
87
93
  delay: Callable[[int, Exception], float] | float | None
88
- retry delay time in seconds, either concrete value or a function producing it, \
89
- default is None (no delay)
90
- catching: set[type[Exception]] | type[Exception] | None
91
- Exception types that are triggering auto retry. Retry will trigger only when \
92
- exceptions of matching types (including subclasses) will occur. CancelledError \
93
- will be always propagated even if specified explicitly.
94
- Default is Exception - all subclasses of Exception will be handled.
94
+ Delay between retry attempts in seconds. Can be:
95
+ - None: No delay between retries (default)
96
+ - float: Fixed delay in seconds
97
+ - Callable: A function that calculates delay based on attempt number
98
+ and the caught exception, allowing for backoff strategies
99
+ catching: set[type[Exception]] | tuple[type[Exception], ...] | type[Exception]
100
+ Exception types that should trigger retry. Can be a single exception type,
101
+ a set, or a tuple of exception types. Default is Exception (all exception
102
+ types except for CancelledError, which is always propagated).
95
103
 
96
104
  Returns
97
105
  -------
98
- Callable[[Callable[_Args_T, _Result_T]], Callable[_Args_T, _Result_T]] | \
99
- Callable[_Args_T, _Result_T]
100
- function wrapper for adding auto retry or a wrapped function
106
+ Callable
107
+ When used as @retry: Returns the wrapped function with retry logic.
108
+ When used as @retry(...): Returns a decorator that can be applied to a function.
109
+
110
+ Notes
111
+ -----
112
+ - Works with both synchronous and asynchronous functions.
113
+ - Not thread-safe; concurrent invocations are not coordinated.
114
+ - Cannot be used on class methods.
115
+ - Always propagates asyncio.CancelledError regardless of catching parameter.
116
+ - The function preserves the original function's signature, docstring, and other attributes.
117
+
118
+ Examples
119
+ --------
120
+ Basic usage:
121
+
122
+ >>> @retry
123
+ ... def fetch_data():
124
+ ... # Will retry once if any exception occurs
125
+ ... return external_api.fetch()
126
+
127
+ With configuration:
128
+
129
+ >>> @retry(limit=3, delay=2.0, catching=ConnectionError)
130
+ ... async def connect():
131
+ ... # Will retry up to 3 times with 2 second delays on ConnectionError
132
+ ... return await establish_connection()
133
+
134
+ With exponential backoff:
135
+
136
+ >>> def backoff(attempt, exception):
137
+ ... return 0.5 * (2 ** attempt) # 1s, 2s, 4s, ...
138
+ ...
139
+ >>> @retry(limit=5, delay=backoff)
140
+ ... def unreliable_operation():
141
+ ... return perform_operation()
101
142
  """
102
143
 
103
144
  def _wrap(