haiway 0.10.15__py3-none-any.whl → 0.10.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. haiway/__init__.py +111 -0
  2. haiway/context/__init__.py +27 -0
  3. haiway/context/access.py +615 -0
  4. haiway/context/disposables.py +78 -0
  5. haiway/context/identifier.py +92 -0
  6. haiway/context/logging.py +176 -0
  7. haiway/context/metrics.py +165 -0
  8. haiway/context/state.py +113 -0
  9. haiway/context/tasks.py +64 -0
  10. haiway/context/types.py +12 -0
  11. haiway/helpers/__init__.py +21 -0
  12. haiway/helpers/asynchrony.py +225 -0
  13. haiway/helpers/caching.py +326 -0
  14. haiway/helpers/metrics.py +459 -0
  15. haiway/helpers/retries.py +223 -0
  16. haiway/helpers/throttling.py +133 -0
  17. haiway/helpers/timeouted.py +112 -0
  18. haiway/helpers/tracing.py +137 -0
  19. haiway/py.typed +0 -0
  20. haiway/state/__init__.py +12 -0
  21. haiway/state/attributes.py +747 -0
  22. haiway/state/path.py +542 -0
  23. haiway/state/requirement.py +229 -0
  24. haiway/state/structure.py +414 -0
  25. haiway/state/validation.py +468 -0
  26. haiway/types/__init__.py +14 -0
  27. haiway/types/default.py +108 -0
  28. haiway/types/frozen.py +5 -0
  29. haiway/types/missing.py +95 -0
  30. haiway/utils/__init__.py +28 -0
  31. haiway/utils/always.py +61 -0
  32. haiway/utils/collections.py +185 -0
  33. haiway/utils/env.py +230 -0
  34. haiway/utils/freezing.py +28 -0
  35. haiway/utils/logs.py +57 -0
  36. haiway/utils/mimic.py +77 -0
  37. haiway/utils/noop.py +24 -0
  38. haiway/utils/queue.py +82 -0
  39. {haiway-0.10.15.dist-info → haiway-0.10.17.dist-info}/METADATA +1 -1
  40. haiway-0.10.17.dist-info/RECORD +42 -0
  41. haiway-0.10.15.dist-info/RECORD +0 -4
  42. {haiway-0.10.15.dist-info → haiway-0.10.17.dist-info}/WHEEL +0 -0
  43. {haiway-0.10.15.dist-info → haiway-0.10.17.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,459 @@
1
+ from collections.abc import Sequence
2
+ from itertools import chain
3
+ from time import monotonic
4
+ from typing import Any, Self, cast, final, overload
5
+
6
+ from haiway.context import MetricsHandler, ScopeIdentifier, ctx
7
+ from haiway.state import State
8
+ from haiway.types import MISSING
9
+
10
+ __all_ = [
11
+ "MetricsLogger",
12
+ "MetricsHolder",
13
+ ]
14
+
15
+
16
+ class MetricsScopeStore:
17
+ def __init__(
18
+ self,
19
+ identifier: ScopeIdentifier,
20
+ /,
21
+ ) -> None:
22
+ self.identifier: ScopeIdentifier = identifier
23
+ self.entered: float = monotonic()
24
+ self.metrics: dict[type[State], State] = {}
25
+ self.exited: float | None = None
26
+ self.nested: list[MetricsScopeStore] = []
27
+
28
+ @property
29
+ def time(self) -> float:
30
+ return (self.exited or monotonic()) - self.entered
31
+
32
+ @property
33
+ def finished(self) -> float:
34
+ return self.exited is not None and all(nested.finished for nested in self.nested)
35
+
36
+ @overload
37
+ def merged[Metric: State](
38
+ self,
39
+ ) -> Sequence[State]: ...
40
+
41
+ @overload
42
+ def merged[Metric: State](
43
+ self,
44
+ metric: type[Metric],
45
+ ) -> Metric | None: ...
46
+
47
+ def merged[Metric: State](
48
+ self,
49
+ metric: type[Metric] | None = None,
50
+ ) -> Sequence[State] | Metric | None:
51
+ if metric is None:
52
+ merged_metrics: dict[type[State], State] = dict(self.metrics)
53
+ for nested in chain.from_iterable(nested.merged() for nested in self.nested):
54
+ metric_type: type[State] = type(nested)
55
+ current: State | None = merged_metrics.get(metric_type)
56
+
57
+ if current is None:
58
+ merged_metrics[metric_type] = nested
59
+ continue # keep going
60
+
61
+ if hasattr(current, "__add__"):
62
+ merged_metrics[metric_type] = current.__add__(nested) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
63
+ assert isinstance(merged_metrics[metric_type], State) # nosec: B101
64
+ continue # keep going
65
+
66
+ break # we have multiple value without a way to merge
67
+
68
+ return tuple(merged_metrics.values())
69
+
70
+ else:
71
+ merged_metric: State | None = self.metrics.get(metric)
72
+ for nested in self.nested:
73
+ nested_metric: Metric | None = nested.merged(metric)
74
+ if nested_metric is None:
75
+ continue # skip missing
76
+
77
+ if merged_metric is None:
78
+ merged_metric = nested_metric
79
+ continue # keep going
80
+
81
+ if hasattr(merged_metric, "__add__"):
82
+ merged_metric = merged_metric.__add__(nested_metric) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue, reportUnknownVariableType]
83
+ assert isinstance(merged_metric, metric) # nosec: B101
84
+ continue # keep going
85
+
86
+ break # we have multiple value without a way to merge
87
+
88
+ return cast(Metric | None, merged_metric)
89
+
90
+
91
+ @final
92
+ class MetricsHolder:
93
+ @classmethod
94
+ def handler(cls) -> MetricsHandler:
95
+ store_handler: Self = cls()
96
+ return MetricsHandler(
97
+ record=store_handler.record,
98
+ read=store_handler.read,
99
+ enter_scope=store_handler.enter_scope,
100
+ exit_scope=store_handler.exit_scope,
101
+ )
102
+
103
+ def __init__(self) -> None:
104
+ self.root_scope: ScopeIdentifier | None = None
105
+ self.scopes: dict[ScopeIdentifier, MetricsScopeStore] = {}
106
+
107
+ def record(
108
+ self,
109
+ scope: ScopeIdentifier,
110
+ /,
111
+ metric: State,
112
+ ) -> None:
113
+ assert self.root_scope is not None # nosec: B101
114
+ assert scope in self.scopes # nosec: B101
115
+
116
+ metric_type: type[State] = type(metric)
117
+ metrics: dict[type[State], State] = self.scopes[scope].metrics
118
+ if (current := metrics.get(metric_type)) and hasattr(current, "__add__"):
119
+ metrics[type(metric)] = current.__add__(metric) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
120
+
121
+ metrics[type(metric)] = metric
122
+
123
+ async def read[Metric: State](
124
+ self,
125
+ scope: ScopeIdentifier,
126
+ /,
127
+ *,
128
+ metric: type[Metric],
129
+ merged: bool,
130
+ ) -> Metric | None:
131
+ assert self.root_scope is not None # nosec: B101
132
+ assert scope in self.scopes # nosec: B101
133
+
134
+ if merged:
135
+ return self.scopes[scope].merged(metric)
136
+
137
+ else:
138
+ return cast(Metric | None, self.scopes[scope].metrics.get(metric))
139
+
140
+ def enter_scope[Metric: State](
141
+ self,
142
+ scope: ScopeIdentifier,
143
+ /,
144
+ ) -> None:
145
+ assert scope not in self.scopes # nosec: B101
146
+ scope_metrics = MetricsScopeStore(scope)
147
+ self.scopes[scope] = scope_metrics
148
+
149
+ if self.root_scope is None:
150
+ self.root_scope = scope
151
+
152
+ else:
153
+ for key in self.scopes.keys():
154
+ if key.scope_id == scope.parent_id:
155
+ self.scopes[key].nested.append(scope_metrics)
156
+ return
157
+
158
+ ctx.log_debug(
159
+ "Attempting to enter nested scope metrics without entering its parent first"
160
+ )
161
+
162
+ def exit_scope[Metric: State](
163
+ self,
164
+ scope: ScopeIdentifier,
165
+ /,
166
+ ) -> None:
167
+ assert scope in self.scopes # nosec: B101
168
+ self.scopes[scope].exited = monotonic()
169
+
170
+
171
+ @final
172
+ class MetricsLogger:
173
+ @classmethod
174
+ def handler(
175
+ cls,
176
+ items_limit: int | None = None,
177
+ redact_content: bool = False,
178
+ ) -> MetricsHandler:
179
+ logger_handler: Self = cls(
180
+ items_limit=items_limit,
181
+ redact_content=redact_content,
182
+ )
183
+ return MetricsHandler(
184
+ record=logger_handler.record,
185
+ read=logger_handler.read,
186
+ enter_scope=logger_handler.enter_scope,
187
+ exit_scope=logger_handler.exit_scope,
188
+ )
189
+
190
+ def __init__(
191
+ self,
192
+ items_limit: int | None,
193
+ redact_content: bool,
194
+ ) -> None:
195
+ self.root_scope: ScopeIdentifier | None = None
196
+ self.scopes: dict[ScopeIdentifier, MetricsScopeStore] = {}
197
+ self.items_limit: int | None = items_limit
198
+ self.redact_content: bool = redact_content
199
+
200
+ def record(
201
+ self,
202
+ scope: ScopeIdentifier,
203
+ /,
204
+ metric: State,
205
+ ) -> None:
206
+ assert self.root_scope is not None # nosec: B101
207
+ assert scope in self.scopes # nosec: B101
208
+
209
+ metric_type: type[State] = type(metric)
210
+ metrics: dict[type[State], State] = self.scopes[scope].metrics
211
+ if (current := metrics.get(metric_type)) and hasattr(current, "__add__"):
212
+ metrics[type(metric)] = current.__add__(metric) # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
213
+
214
+ metrics[type(metric)] = metric
215
+ if log := _state_log(
216
+ metric,
217
+ list_items_limit=self.items_limit,
218
+ redact_content=self.redact_content,
219
+ ):
220
+ ctx.log_debug(f"Recorded metric:\n⎡ {type(metric).__qualname__}:{log}\n⌊")
221
+
222
+ async def read[Metric: State](
223
+ self,
224
+ scope: ScopeIdentifier,
225
+ /,
226
+ *,
227
+ metric: type[Metric],
228
+ merged: bool,
229
+ ) -> Metric | None:
230
+ assert self.root_scope is not None # nosec: B101
231
+ assert scope in self.scopes # nosec: B101
232
+
233
+ if merged:
234
+ return self.scopes[scope].merged(metric)
235
+
236
+ else:
237
+ return cast(Metric | None, self.scopes[scope].metrics.get(metric))
238
+
239
+ def enter_scope[Metric: State](
240
+ self,
241
+ scope: ScopeIdentifier,
242
+ /,
243
+ ) -> None:
244
+ assert scope not in self.scopes # nosec: B101
245
+ scope_metrics = MetricsScopeStore(scope)
246
+ self.scopes[scope] = scope_metrics
247
+
248
+ if self.root_scope is None:
249
+ self.root_scope = scope
250
+
251
+ else:
252
+ for key in self.scopes.keys():
253
+ if key.scope_id == scope.parent_id:
254
+ self.scopes[key].nested.append(scope_metrics)
255
+ return
256
+
257
+ ctx.log_debug(
258
+ "Attempting to enter nested scope metrics without entering its parent first"
259
+ )
260
+
261
+ def exit_scope[Metric: State](
262
+ self,
263
+ scope: ScopeIdentifier,
264
+ /,
265
+ ) -> None:
266
+ assert scope in self.scopes # nosec: B101
267
+ self.scopes[scope].exited = monotonic()
268
+
269
+ if scope == self.root_scope and self.scopes[scope].finished:
270
+ if log := _tree_log(
271
+ self.scopes[scope],
272
+ list_items_limit=self.items_limit,
273
+ redact_content=self.redact_content,
274
+ ):
275
+ ctx.log_debug(f"Metrics summary:\n{log}")
276
+
277
+
278
+ def _tree_log(
279
+ metrics: MetricsScopeStore,
280
+ list_items_limit: int | None,
281
+ redact_content: bool,
282
+ ) -> str:
283
+ log: str = (
284
+ f"⎡ @{metrics.identifier.label} [{metrics.identifier.scope_id}]({metrics.time:.2f}s):"
285
+ )
286
+
287
+ for metric in metrics.merged():
288
+ if type(metric) not in metrics.metrics:
289
+ continue # skip metrics not available in this scope
290
+
291
+ metric_log: str = ""
292
+ for key, value in vars(metric).items():
293
+ if value_log := _value_log(
294
+ value,
295
+ list_items_limit=list_items_limit,
296
+ redact_content=redact_content,
297
+ ):
298
+ metric_log += f"\n├ {key}: {value_log}"
299
+
300
+ else:
301
+ continue # skip empty values
302
+
303
+ if not metric_log:
304
+ continue # skip empty logs
305
+
306
+ log += f"\n⎡ •{type(metric).__qualname__}:{metric_log.replace('\n', '\n| ')}\n⌊"
307
+
308
+ for nested in metrics.nested:
309
+ nested_log: str = _tree_log(
310
+ nested,
311
+ list_items_limit=list_items_limit,
312
+ redact_content=redact_content,
313
+ )
314
+
315
+ log += f"\n\n{nested_log}"
316
+
317
+ return log.strip().replace("\n", "\n| ") + "\n⌊"
318
+
319
+
320
+ def _state_log(
321
+ value: State,
322
+ /,
323
+ list_items_limit: int | None,
324
+ redact_content: bool,
325
+ ) -> str | None:
326
+ state_log: str = ""
327
+ for key, element in vars(value).items():
328
+ element_log: str | None = _value_log(
329
+ element,
330
+ list_items_limit=list_items_limit,
331
+ redact_content=redact_content,
332
+ )
333
+
334
+ if element_log:
335
+ state_log += f"\n├ {key}: {element_log}"
336
+
337
+ else:
338
+ continue # skip empty logs
339
+
340
+ if state_log:
341
+ return state_log
342
+
343
+ else:
344
+ return None # skip empty logs
345
+
346
+
347
+ def _dict_log(
348
+ value: dict[Any, Any],
349
+ /,
350
+ list_items_limit: int | None,
351
+ redact_content: bool,
352
+ ) -> str | None:
353
+ dict_log: str = ""
354
+ for key, element in value.items():
355
+ element_log: str | None = _value_log(
356
+ element,
357
+ list_items_limit=list_items_limit,
358
+ redact_content=redact_content,
359
+ )
360
+ if element_log:
361
+ dict_log += f"\n[{key}]: {element_log}"
362
+
363
+ else:
364
+ continue # skip empty logs
365
+
366
+ if dict_log:
367
+ return dict_log.replace("\n", "\n| ")
368
+
369
+ else:
370
+ return None # skip empty logs
371
+
372
+
373
+ def _list_log(
374
+ value: list[Any],
375
+ /,
376
+ list_items_limit: int | None,
377
+ redact_content: bool,
378
+ ) -> str | None:
379
+ list_log: str = ""
380
+ enumerated: list[tuple[int, Any]] = list(enumerate(value))
381
+ if list_items_limit:
382
+ if list_items_limit > 0:
383
+ enumerated = enumerated[:list_items_limit]
384
+
385
+ else:
386
+ enumerated = enumerated[list_items_limit:]
387
+
388
+ for idx, element in enumerated:
389
+ element_log: str | None = _value_log(
390
+ element,
391
+ list_items_limit=list_items_limit,
392
+ redact_content=redact_content,
393
+ )
394
+ if element_log:
395
+ list_log += f"\n[{idx}] {element_log}"
396
+
397
+ else:
398
+ continue # skip empty logs
399
+
400
+ if list_log:
401
+ return list_log.replace("\n", "\n| ")
402
+
403
+ else:
404
+ return None # skip empty logs
405
+
406
+
407
+ def _raw_value_log(
408
+ value: Any,
409
+ /,
410
+ redact_content: bool,
411
+ ) -> str | None:
412
+ if value is MISSING:
413
+ return None # skip missing
414
+
415
+ if redact_content:
416
+ return "[redacted]"
417
+
418
+ elif isinstance(value, str):
419
+ return f'"{value}"'.replace("\n", "\n| ")
420
+
421
+ else:
422
+ return str(value).strip().replace("\n", "\n| ")
423
+
424
+
425
+ def _value_log(
426
+ value: Any,
427
+ /,
428
+ list_items_limit: int | None,
429
+ redact_content: bool,
430
+ ) -> str | None:
431
+ # try unpack dicts
432
+ if isinstance(value, dict):
433
+ return _dict_log(
434
+ cast(dict[Any, Any], value),
435
+ list_items_limit=list_items_limit,
436
+ redact_content=redact_content,
437
+ )
438
+
439
+ # try unpack lists
440
+ elif isinstance(value, list):
441
+ return _list_log(
442
+ cast(list[Any], value),
443
+ list_items_limit=list_items_limit,
444
+ redact_content=redact_content,
445
+ )
446
+
447
+ # try unpack state
448
+ elif isinstance(value, State):
449
+ return _state_log(
450
+ value,
451
+ list_items_limit=list_items_limit,
452
+ redact_content=redact_content,
453
+ )
454
+
455
+ else:
456
+ return _raw_value_log(
457
+ value,
458
+ redact_content=redact_content,
459
+ )
@@ -0,0 +1,223 @@
1
+ from asyncio import CancelledError, iscoroutinefunction, sleep
2
+ from collections.abc import Callable, Coroutine
3
+ from time import sleep as sleep_sync
4
+ from typing import cast, overload
5
+
6
+ from haiway.context import ctx
7
+ from haiway.utils import mimic_function
8
+
9
+ __all__ = [
10
+ "retry",
11
+ ]
12
+
13
+
14
+ @overload
15
+ def retry[**Args, Result](
16
+ function: Callable[Args, Result],
17
+ /,
18
+ ) -> Callable[Args, Result]:
19
+ """\
20
+ Function wrapper retrying the wrapped function again on fail. \
21
+ Works for both sync and async functions. \
22
+ It is not allowed to be used on class methods. \
23
+ This wrapper is not thread safe.
24
+
25
+ Parameters
26
+ ----------
27
+ function: Callable[_Args_T, _Result_T]
28
+ function to wrap in auto retry, either sync or async.
29
+
30
+ Returns
31
+ -------
32
+ Callable[_Args_T, _Result_T]
33
+ provided function wrapped in auto retry with default configuration.
34
+ """
35
+
36
+
37
+ @overload
38
+ def retry[**Args, Result](
39
+ *,
40
+ limit: int = 1,
41
+ delay: Callable[[int, Exception], float] | float | None = None,
42
+ catching: set[type[Exception]] | tuple[type[Exception], ...] | type[Exception] = Exception,
43
+ ) -> Callable[[Callable[Args, Result]], Callable[Args, Result]]:
44
+ """\
45
+ Function wrapper retrying the wrapped function again on fail. \
46
+ Works for both sync and async functions. \
47
+ It is not allowed to be used on class methods. \
48
+ This wrapper is not thread safe.
49
+
50
+ Parameters
51
+ ----------
52
+ limit: int
53
+ limit of retries, default is 1
54
+ delay: Callable[[int, Exception], float] | float | None
55
+ retry delay time in seconds, either concrete value or a function producing it, \
56
+ default is None (no delay)
57
+ catching: set[type[Exception]] | type[Exception] | None
58
+ Exception types that are triggering auto retry. Retry will trigger only when \
59
+ exceptions of matching types (including subclasses) will occur. CancelledError \
60
+ will be always propagated even if specified explicitly.
61
+ Default is Exception - all subclasses of Exception will be handled.
62
+
63
+ Returns
64
+ -------
65
+ Callable[[Callable[_Args_T, _Result_T]], Callable[_Args_T, _Result_T]]
66
+ function wrapper for adding auto retry
67
+ """
68
+
69
+
70
+ def retry[**Args, Result](
71
+ function: Callable[Args, Result] | None = None,
72
+ *,
73
+ limit: int = 1,
74
+ delay: Callable[[int, Exception], float] | float | None = None,
75
+ catching: set[type[Exception]] | tuple[type[Exception], ...] | type[Exception] = Exception,
76
+ ) -> Callable[[Callable[Args, Result]], Callable[Args, Result]] | Callable[Args, Result]:
77
+ """\
78
+ Function wrapper retrying the wrapped function again on fail. \
79
+ Works for both sync and async functions. \
80
+ It is not allowed to be used on class methods. \
81
+ This wrapper is not thread safe.
82
+
83
+ Parameters
84
+ ----------
85
+ function: Callable[_Args_T, _Result_T]
86
+ function to wrap in auto retry, either sync or async.
87
+ limit: int
88
+ limit of retries, default is 1
89
+ delay: Callable[[int, Exception], float] | float | None
90
+ retry delay time in seconds, either concrete value or a function producing it, \
91
+ default is None (no delay)
92
+ catching: set[type[Exception]] | type[Exception] | None
93
+ Exception types that are triggering auto retry. Retry will trigger only when \
94
+ exceptions of matching types (including subclasses) will occur. CancelledError \
95
+ will be always propagated even if specified explicitly.
96
+ Default is Exception - all subclasses of Exception will be handled.
97
+
98
+ Returns
99
+ -------
100
+ Callable[[Callable[_Args_T, _Result_T]], Callable[_Args_T, _Result_T]] | \
101
+ Callable[_Args_T, _Result_T]
102
+ function wrapper for adding auto retry or a wrapped function
103
+ """
104
+
105
+ def _wrap(
106
+ function: Callable[Args, Result],
107
+ /,
108
+ ) -> Callable[Args, Result]:
109
+ if iscoroutinefunction(function):
110
+ return cast(
111
+ Callable[Args, Result],
112
+ _wrap_async(
113
+ function,
114
+ limit=limit,
115
+ delay=delay,
116
+ catching=catching if isinstance(catching, set | tuple) else {catching},
117
+ ),
118
+ )
119
+
120
+ else:
121
+ return _wrap_sync(
122
+ function,
123
+ limit=limit,
124
+ delay=delay,
125
+ catching=catching if isinstance(catching, set | tuple) else {catching},
126
+ )
127
+
128
+ if function := function:
129
+ return _wrap(function)
130
+ else:
131
+ return _wrap
132
+
133
+
134
+ def _wrap_sync[**Args, Result](
135
+ function: Callable[Args, Result],
136
+ *,
137
+ limit: int,
138
+ delay: Callable[[int, Exception], float] | float | None,
139
+ catching: set[type[Exception]] | tuple[type[Exception], ...],
140
+ ) -> Callable[Args, Result]:
141
+ assert limit > 0, "Limit has to be greater than zero" # nosec: B101
142
+
143
+ @mimic_function(function)
144
+ def wrapped(
145
+ *args: Args.args,
146
+ **kwargs: Args.kwargs,
147
+ ) -> Result:
148
+ attempt: int = 0
149
+ while True:
150
+ try:
151
+ return function(*args, **kwargs)
152
+ except CancelledError as exc:
153
+ raise exc
154
+
155
+ except Exception as exc:
156
+ if attempt < limit and any(isinstance(exc, exception) for exception in catching):
157
+ attempt += 1
158
+ ctx.log_error(
159
+ "Attempting to retry %s which failed due to an error: %s",
160
+ function.__name__,
161
+ exc,
162
+ )
163
+
164
+ match delay:
165
+ case None:
166
+ continue
167
+
168
+ case float(strict):
169
+ sleep_sync(strict)
170
+
171
+ case make_delay: # type: Callable[[], float]
172
+ sleep_sync(make_delay(attempt, exc)) # pyright: ignore[reportCallIssue, reportUnknownArgumentType]
173
+
174
+ else:
175
+ raise exc
176
+
177
+ return wrapped
178
+
179
+
180
+ def _wrap_async[**Args, Result](
181
+ function: Callable[Args, Coroutine[None, None, Result]],
182
+ *,
183
+ limit: int,
184
+ delay: Callable[[int, Exception], float] | float | None,
185
+ catching: set[type[Exception]] | tuple[type[Exception], ...],
186
+ ) -> Callable[Args, Coroutine[None, None, Result]]:
187
+ assert limit > 0, "Limit has to be greater than zero" # nosec: B101
188
+
189
+ @mimic_function(function)
190
+ async def wrapped(
191
+ *args: Args.args,
192
+ **kwargs: Args.kwargs,
193
+ ) -> Result:
194
+ attempt: int = 0
195
+ while True:
196
+ try:
197
+ return await function(*args, **kwargs)
198
+ except CancelledError as exc:
199
+ raise exc
200
+
201
+ except Exception as exc:
202
+ if attempt < limit and any(isinstance(exc, exception) for exception in catching):
203
+ attempt += 1
204
+ ctx.log_error(
205
+ "Attempting to retry %s which failed due to an error",
206
+ function.__name__,
207
+ exception=exc,
208
+ )
209
+
210
+ match delay:
211
+ case None:
212
+ continue
213
+
214
+ case float(strict):
215
+ await sleep(strict)
216
+
217
+ case make_delay: # type: Callable[[], float]
218
+ await sleep(make_delay(attempt, exc)) # pyright: ignore[reportCallIssue, reportUnknownArgumentType]
219
+
220
+ else:
221
+ raise exc
222
+
223
+ return wrapped