prefect-client 3.0.0rc7__py3-none-any.whl → 3.0.0rc9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prefect/futures.py CHANGED
@@ -2,11 +2,13 @@ import abc
2
2
  import concurrent.futures
3
3
  import inspect
4
4
  import uuid
5
+ from collections.abc import Iterator
5
6
  from functools import partial
6
- from typing import Any, Generic, Optional, Set, Union, cast
7
+ from typing import Any, Generic, List, Optional, Set, Union, cast
7
8
 
8
9
  from typing_extensions import TypeVar
9
10
 
11
+ from prefect._internal.compatibility.deprecated import deprecated_async_method
10
12
  from prefect.client.orchestration import get_client
11
13
  from prefect.client.schemas.objects import TaskRun
12
14
  from prefect.exceptions import ObjectNotFound
@@ -16,13 +18,15 @@ from prefect.task_runs import TaskRunWaiter
16
18
  from prefect.utilities.annotations import quote
17
19
  from prefect.utilities.asyncutils import run_coro_as_sync
18
20
  from prefect.utilities.collections import StopVisiting, visit_collection
21
+ from prefect.utilities.timeout import timeout as timeout_context
19
22
 
20
23
  F = TypeVar("F")
24
+ R = TypeVar("R")
21
25
 
22
26
  logger = get_logger(__name__)
23
27
 
24
28
 
25
- class PrefectFuture(abc.ABC):
29
+ class PrefectFuture(abc.ABC, Generic[R]):
26
30
  """
27
31
  Abstract base class for Prefect futures. A Prefect future is a handle to the
28
32
  asynchronous execution of a task run. It provides methods to wait for the task
@@ -31,7 +35,7 @@ class PrefectFuture(abc.ABC):
31
35
 
32
36
  def __init__(self, task_run_id: uuid.UUID):
33
37
  self._task_run_id = task_run_id
34
- self._final_state = None
38
+ self._final_state: Optional[State[R]] = None
35
39
 
36
40
  @property
37
41
  def task_run_id(self) -> uuid.UUID:
@@ -61,7 +65,7 @@ class PrefectFuture(abc.ABC):
61
65
  If the task run has already completed, this method will return immediately.
62
66
 
63
67
  Args:
64
- - timeout: The maximum number of seconds to wait for the task run to complete.
68
+ timeout: The maximum number of seconds to wait for the task run to complete.
65
69
  If the task run has not completed after the timeout has elapsed, this method will return.
66
70
  """
67
71
 
@@ -70,7 +74,7 @@ class PrefectFuture(abc.ABC):
70
74
  self,
71
75
  timeout: Optional[float] = None,
72
76
  raise_on_failure: bool = True,
73
- ) -> Any:
77
+ ) -> R:
74
78
  ...
75
79
  """
76
80
  Get the result of the task run associated with this future.
@@ -78,16 +82,16 @@ class PrefectFuture(abc.ABC):
78
82
  If the task run has not completed, this method will wait for the task run to complete.
79
83
 
80
84
  Args:
81
- - timeout: The maximum number of seconds to wait for the task run to complete.
85
+ timeout: The maximum number of seconds to wait for the task run to complete.
82
86
  If the task run has not completed after the timeout has elapsed, this method will return.
83
- - raise_on_failure: If `True`, an exception will be raised if the task run fails.
87
+ raise_on_failure: If `True`, an exception will be raised if the task run fails.
84
88
 
85
89
  Returns:
86
90
  The result of the task run.
87
91
  """
88
92
 
89
93
 
90
- class PrefectWrappedFuture(PrefectFuture, abc.ABC, Generic[F]):
94
+ class PrefectWrappedFuture(PrefectFuture, abc.ABC, Generic[R, F]):
91
95
  """
92
96
  A Prefect future that wraps another future object.
93
97
  """
@@ -102,12 +106,13 @@ class PrefectWrappedFuture(PrefectFuture, abc.ABC, Generic[F]):
102
106
  return self._wrapped_future
103
107
 
104
108
 
105
- class PrefectConcurrentFuture(PrefectWrappedFuture[concurrent.futures.Future]):
109
+ class PrefectConcurrentFuture(PrefectWrappedFuture[R, concurrent.futures.Future]):
106
110
  """
107
111
  A Prefect future that wraps a concurrent.futures.Future. This future is used
108
112
  when the task run is submitted to a ThreadPoolExecutor.
109
113
  """
110
114
 
115
+ @deprecated_async_method
111
116
  def wait(self, timeout: Optional[float] = None) -> None:
112
117
  try:
113
118
  result = self._wrapped_future.result(timeout=timeout)
@@ -116,11 +121,12 @@ class PrefectConcurrentFuture(PrefectWrappedFuture[concurrent.futures.Future]):
116
121
  if isinstance(result, State):
117
122
  self._final_state = result
118
123
 
124
+ @deprecated_async_method
119
125
  def result(
120
126
  self,
121
127
  timeout: Optional[float] = None,
122
128
  raise_on_failure: bool = True,
123
- ) -> Any:
129
+ ) -> R:
124
130
  if not self._final_state:
125
131
  try:
126
132
  future_result = self._wrapped_future.result(timeout=timeout)
@@ -156,7 +162,7 @@ class PrefectConcurrentFuture(PrefectWrappedFuture[concurrent.futures.Future]):
156
162
  )
157
163
 
158
164
 
159
- class PrefectDistributedFuture(PrefectFuture):
165
+ class PrefectDistributedFuture(PrefectFuture[R]):
160
166
  """
161
167
  Represents the result of a computation happening anywhere.
162
168
 
@@ -165,6 +171,7 @@ class PrefectDistributedFuture(PrefectFuture):
165
171
  any task run scheduled in Prefect's API.
166
172
  """
167
173
 
174
+ @deprecated_async_method
168
175
  def wait(self, timeout: Optional[float] = None) -> None:
169
176
  return run_coro_as_sync(self.wait_async(timeout=timeout))
170
177
 
@@ -201,11 +208,12 @@ class PrefectDistributedFuture(PrefectFuture):
201
208
  self._final_state = task_run.state
202
209
  return
203
210
 
211
+ @deprecated_async_method
204
212
  def result(
205
213
  self,
206
214
  timeout: Optional[float] = None,
207
215
  raise_on_failure: bool = True,
208
- ) -> Any:
216
+ ) -> R:
209
217
  return run_coro_as_sync(
210
218
  self.result_async(timeout=timeout, raise_on_failure=raise_on_failure)
211
219
  )
@@ -214,7 +222,7 @@ class PrefectDistributedFuture(PrefectFuture):
214
222
  self,
215
223
  timeout: Optional[float] = None,
216
224
  raise_on_failure: bool = True,
217
- ):
225
+ ) -> R:
218
226
  if not self._final_state:
219
227
  await self.wait_async(timeout=timeout)
220
228
  if not self._final_state:
@@ -232,6 +240,63 @@ class PrefectDistributedFuture(PrefectFuture):
232
240
  return self.task_run_id == other.task_run_id
233
241
 
234
242
 
243
+ class PrefectFutureList(list, Iterator, Generic[F]):
244
+ """
245
+ A list of Prefect futures.
246
+
247
+ This class provides methods to wait for all futures
248
+ in the list to complete and to retrieve the results of all task runs.
249
+ """
250
+
251
+ def wait(self, timeout: Optional[float] = None) -> None:
252
+ """
253
+ Wait for all futures in the list to complete.
254
+
255
+ Args:
256
+ timeout: The maximum number of seconds to wait for all futures to
257
+ complete. This method will not raise if the timeout is reached.
258
+ """
259
+ try:
260
+ with timeout_context(timeout):
261
+ for future in self:
262
+ future.wait()
263
+ except TimeoutError:
264
+ logger.debug("Timed out waiting for all futures to complete.")
265
+ return
266
+
267
+ def result(
268
+ self,
269
+ timeout: Optional[float] = None,
270
+ raise_on_failure: bool = True,
271
+ ) -> List:
272
+ """
273
+ Get the results of all task runs associated with the futures in the list.
274
+
275
+ Args:
276
+ timeout: The maximum number of seconds to wait for all futures to
277
+ complete.
278
+ raise_on_failure: If `True`, an exception will be raised if any task run fails.
279
+
280
+ Returns:
281
+ A list of results of the task runs.
282
+
283
+ Raises:
284
+ TimeoutError: If the timeout is reached before all futures complete.
285
+ """
286
+ try:
287
+ with timeout_context(timeout):
288
+ return [
289
+ future.result(raise_on_failure=raise_on_failure) for future in self
290
+ ]
291
+ except TimeoutError as exc:
292
+ # timeout came from inside the task
293
+ if "Scope timed out after {timeout} second(s)." not in str(exc):
294
+ raise
295
+ raise TimeoutError(
296
+ f"Timed out waiting for all futures to complete within {timeout} seconds"
297
+ ) from exc
298
+
299
+
235
300
  def resolve_futures_to_states(
236
301
  expr: Union[PrefectFuture, Any],
237
302
  ) -> Union[State, Any]:
@@ -115,7 +115,7 @@ def get_run_logger(
115
115
  addition to the run metadata
116
116
 
117
117
  Raises:
118
- RuntimeError: If no context can be found
118
+ MissingContextError: If no context can be found
119
119
  """
120
120
  # Check for existing contexts
121
121
  task_run_context = prefect.context.TaskRunContext.get()
prefect/main.py ADDED
@@ -0,0 +1,70 @@
1
+ # Import user-facing API
2
+ from prefect.deployments import deploy
3
+ from prefect.states import State
4
+ from prefect.logging import get_run_logger
5
+ from prefect.flows import flow, Flow, serve
6
+ from prefect.transactions import Transaction
7
+ from prefect.tasks import task, Task
8
+ from prefect.context import tags
9
+ from prefect.manifests import Manifest
10
+ from prefect.utilities.annotations import unmapped, allow_failure
11
+ from prefect.results import BaseResult
12
+ from prefect.flow_runs import pause_flow_run, resume_flow_run, suspend_flow_run
13
+ from prefect.client.orchestration import get_client, PrefectClient
14
+ from prefect.client.cloud import get_cloud_client, CloudClient
15
+ import prefect.variables
16
+ import prefect.runtime
17
+
18
+ # Import modules that register types
19
+ import prefect.serializers
20
+ import prefect.blocks.notifications
21
+ import prefect.blocks.system
22
+
23
+ # Initialize the process-wide profile and registry at import time
24
+ import prefect.context
25
+
26
+ # Perform any forward-ref updates needed for Pydantic models
27
+ import prefect.client.schemas
28
+
29
+ prefect.context.FlowRunContext.model_rebuild()
30
+ prefect.context.TaskRunContext.model_rebuild()
31
+ prefect.client.schemas.State.model_rebuild()
32
+ prefect.client.schemas.StateCreate.model_rebuild()
33
+ Transaction.model_rebuild()
34
+
35
+ # Configure logging
36
+ import prefect.logging.configuration
37
+
38
+ prefect.logging.configuration.setup_logging()
39
+ prefect.logging.get_logger("profiles").debug(
40
+ f"Using profile {prefect.context.get_settings_context().profile.name!r}"
41
+ )
42
+
43
+
44
+ from prefect._internal.compatibility.deprecated import (
45
+ inject_renamed_module_alias_finder,
46
+ )
47
+
48
+ inject_renamed_module_alias_finder()
49
+
50
+
51
+ # Declare API for type-checkers
52
+ __all__ = [
53
+ "allow_failure",
54
+ "flow",
55
+ "Flow",
56
+ "get_client",
57
+ "get_run_logger",
58
+ "Manifest",
59
+ "State",
60
+ "tags",
61
+ "task",
62
+ "Task",
63
+ "Transaction",
64
+ "unmapped",
65
+ "serve",
66
+ "deploy",
67
+ "pause_flow_run",
68
+ "resume_flow_run",
69
+ "suspend_flow_run",
70
+ ]
prefect/plugins.py CHANGED
@@ -8,12 +8,11 @@ Currently supported entrypoints:
8
8
  should be imported when Prefect is imported.
9
9
  """
10
10
 
11
- import sys
12
11
  from types import ModuleType
13
12
  from typing import Any, Dict, Union
14
13
 
15
14
  import prefect.settings
16
- from prefect.utilities.compat import EntryPoint, EntryPoints, entry_points
15
+ from prefect.utilities.compat import EntryPoints, entry_points
17
16
 
18
17
 
19
18
  def safe_load_entrypoints(entrypoints: EntryPoints) -> Dict[str, Union[Exception, Any]]:
@@ -39,68 +38,6 @@ def safe_load_entrypoints(entrypoints: EntryPoints) -> Dict[str, Union[Exception
39
38
  return results
40
39
 
41
40
 
42
- def load_extra_entrypoints() -> Dict[str, Union[Exception, Any]]:
43
- # Note: Return values are only exposed for testing.
44
- results = {}
45
-
46
- if not prefect.settings.PREFECT_EXTRA_ENTRYPOINTS.value():
47
- return results
48
-
49
- values = {
50
- value.strip()
51
- for value in prefect.settings.PREFECT_EXTRA_ENTRYPOINTS.value().split(",")
52
- }
53
-
54
- entrypoints = []
55
- for value in values:
56
- try:
57
- entrypoint = EntryPoint(name=None, value=value, group="prefect-extra")
58
- except Exception as exc:
59
- print(
60
- (
61
- f"Warning! Failed to parse extra entrypoint {value!r}:"
62
- f" {type(exc).__name__}: {exc}"
63
- ),
64
- file=sys.stderr,
65
- )
66
- results[value] = exc
67
- else:
68
- entrypoints.append(entrypoint)
69
-
70
- for value, result in zip(
71
- values, safe_load_entrypoints(EntryPoints(entrypoints)).values()
72
- ):
73
- results[value] = result
74
-
75
- if isinstance(result, Exception):
76
- print(
77
- (
78
- f"Warning! Failed to load extra entrypoint {value!r}:"
79
- f" {type(result).__name__}: {result}"
80
- ),
81
- file=sys.stderr,
82
- )
83
- elif callable(result):
84
- try:
85
- results[value] = result()
86
- except Exception as exc:
87
- print(
88
- (
89
- f"Warning! Failed to run callable entrypoint {value!r}:"
90
- f" {type(exc).__name__}: {exc}"
91
- ),
92
- file=sys.stderr,
93
- )
94
- results[value] = exc
95
- else:
96
- if prefect.settings.PREFECT_DEBUG_MODE:
97
- print(
98
- "Loaded extra entrypoint {value!r} successfully.", file=sys.stderr
99
- )
100
-
101
- return results
102
-
103
-
104
41
  def load_prefect_collections() -> Dict[str, ModuleType]:
105
42
  """
106
43
  Load all Prefect collections that define an entrypoint in the group
prefect/results.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import abc
2
+ import inspect
2
3
  import uuid
3
4
  from functools import partial
4
5
  from typing import (
@@ -142,38 +143,6 @@ def get_default_persist_setting() -> bool:
142
143
  return PREFECT_RESULTS_PERSIST_BY_DEFAULT.value()
143
144
 
144
145
 
145
- def flow_features_require_result_persistence(flow: "Flow") -> bool:
146
- """
147
- Returns `True` if the given flow uses features that require its result to be
148
- persisted.
149
- """
150
- if not flow.cache_result_in_memory:
151
- return True
152
- return False
153
-
154
-
155
- def flow_features_require_child_result_persistence(flow: "Flow") -> bool:
156
- """
157
- Returns `True` if the given flow uses features that require child flow and task
158
- runs to persist their results.
159
- """
160
- if flow and flow.retries:
161
- return True
162
- return False
163
-
164
-
165
- def task_features_require_result_persistence(task: "Task") -> bool:
166
- """
167
- Returns `True` if the given task uses features that require its result to be
168
- persisted.
169
- """
170
- if task.cache_key_fn:
171
- return True
172
- if not task.cache_result_in_memory:
173
- return True
174
- return False
175
-
176
-
177
146
  def _format_user_supplied_storage_key(key: str) -> str:
178
147
  # Note here we are pinning to task runs since flow runs do not support storage keys
179
148
  # yet; we'll need to split logic in the future or have two separate functions
@@ -235,17 +204,7 @@ class ResultFactory(BaseModel):
235
204
  result_storage=flow.result_storage or ctx.result_factory.storage_block,
236
205
  result_serializer=flow.result_serializer
237
206
  or ctx.result_factory.serializer,
238
- persist_result=(
239
- flow.persist_result
240
- if flow.persist_result is not None
241
- # !! Child flows persist their result by default if the it or the
242
- # parent flow uses a feature that requires it
243
- else (
244
- flow_features_require_result_persistence(flow)
245
- or flow_features_require_child_result_persistence(ctx.flow)
246
- or get_default_persist_setting()
247
- )
248
- ),
207
+ persist_result=flow.persist_result,
249
208
  cache_result_in_memory=flow.cache_result_in_memory,
250
209
  storage_key_fn=DEFAULT_STORAGE_KEY_FN,
251
210
  client=client,
@@ -258,16 +217,7 @@ class ResultFactory(BaseModel):
258
217
  client=client,
259
218
  result_storage=flow.result_storage,
260
219
  result_serializer=flow.result_serializer,
261
- persist_result=(
262
- flow.persist_result
263
- if flow.persist_result is not None
264
- # !! Flows persist their result by default if uses a feature that
265
- # requires it
266
- else (
267
- flow_features_require_result_persistence(flow)
268
- or get_default_persist_setting()
269
- )
270
- ),
220
+ persist_result=flow.persist_result,
271
221
  cache_result_in_memory=flow.cache_result_in_memory,
272
222
  storage_key_fn=DEFAULT_STORAGE_KEY_FN,
273
223
  )
@@ -318,21 +268,7 @@ class ResultFactory(BaseModel):
318
268
  if ctx and ctx.result_factory
319
269
  else get_default_result_serializer()
320
270
  )
321
- persist_result = (
322
- task.persist_result
323
- if task.persist_result is not None
324
- # !! Tasks persist their result by default if their parent flow uses a
325
- # feature that requires it or the task uses a feature that requires it
326
- else (
327
- (
328
- flow_features_require_child_result_persistence(ctx.flow)
329
- if ctx
330
- else False
331
- )
332
- or task_features_require_result_persistence(task)
333
- or get_default_persist_setting()
334
- )
335
- )
271
+ persist_result = task.persist_result
336
272
 
337
273
  cache_result_in_memory = task.cache_result_in_memory
338
274
 
@@ -355,11 +291,14 @@ class ResultFactory(BaseModel):
355
291
  cls: Type[Self],
356
292
  result_storage: ResultStorage,
357
293
  result_serializer: ResultSerializer,
358
- persist_result: bool,
294
+ persist_result: Optional[bool],
359
295
  cache_result_in_memory: bool,
360
296
  storage_key_fn: Callable[[], str],
361
297
  client: "PrefectClient",
362
298
  ) -> Self:
299
+ if persist_result is None:
300
+ persist_result = get_default_persist_setting()
301
+
363
302
  storage_block_id, storage_block = await cls.resolve_storage_block(
364
303
  result_storage, client=client, persist_result=persist_result
365
304
  )
@@ -614,7 +553,6 @@ class PersistedResult(BaseResult):
614
553
  """
615
554
  Retrieve the data and deserialize it into the original object.
616
555
  """
617
-
618
556
  if self.has_cached_object():
619
557
  return self._cache
620
558
 
@@ -680,7 +618,46 @@ class PersistedResult(BaseResult):
680
618
  # this could error if the serializer requires kwargs
681
619
  serializer = Serializer(type=self.serializer_type)
682
620
 
683
- data = serializer.dumps(obj)
621
+ try:
622
+ data = serializer.dumps(obj)
623
+ except Exception as exc:
624
+ extra_info = (
625
+ 'You can try a different serializer (e.g. result_serializer="json") '
626
+ "or disabling persistence (persist_result=False) for this flow or task."
627
+ )
628
+ # check if this is a known issue with cloudpickle and pydantic
629
+ # and add extra information to help the user recover
630
+
631
+ if (
632
+ isinstance(exc, TypeError)
633
+ and isinstance(obj, BaseModel)
634
+ and str(exc).startswith("cannot pickle")
635
+ ):
636
+ try:
637
+ from IPython import get_ipython
638
+
639
+ if get_ipython() is not None:
640
+ extra_info = inspect.cleandoc(
641
+ """
642
+ This is a known issue in Pydantic that prevents
643
+ locally-defined (non-imported) models from being
644
+ serialized by cloudpickle in IPython/Jupyter
645
+ environments. Please see
646
+ https://github.com/pydantic/pydantic/issues/8232 for
647
+ more information. To fix the issue, either: (1) move
648
+ your Pydantic class definition to an importable
649
+ location, (2) use the JSON serializer for your flow
650
+ or task (`result_serializer="json"`), or (3)
651
+ disable result persistence for your flow or task
652
+ (`persist_result=False`).
653
+ """
654
+ ).replace("\n", " ")
655
+ except ImportError:
656
+ pass
657
+ raise ValueError(
658
+ f"Failed to serialize object of type {type(obj).__name__!r} with "
659
+ f"serializer {serializer.type!r}. {extra_info}"
660
+ ) from exc
684
661
  blob = PersistedResultBlob(
685
662
  serializer=serializer, data=data, expiration=self.expiration
686
663
  )
prefect/runner/storage.py CHANGED
@@ -559,8 +559,70 @@ class BlockStorageAdapter:
559
559
  return False
560
560
 
561
561
 
562
- def create_storage_from_url(
563
- url: str, pull_interval: Optional[int] = 60
562
+ class LocalStorage:
563
+ """
564
+ Sets the working directory in the local filesystem.
565
+ Parameters:
566
+ Path: Local file path to set the working directory for the flow
567
+ Examples:
568
+ Sets the working directory for the local path to the flow:
569
+ ```python
570
+ from prefect.runner.storage import Localstorage
571
+ storage = LocalStorage(
572
+ path="/path/to/local/flow_directory",
573
+ )
574
+ ```
575
+ """
576
+
577
+ def __init__(
578
+ self,
579
+ path: str,
580
+ pull_interval: Optional[int] = None,
581
+ ):
582
+ self._path = Path(path).resolve()
583
+ self._logger = get_logger("runner.storage.local-storage")
584
+ self._storage_base_path = Path.cwd()
585
+ self._pull_interval = pull_interval
586
+
587
+ @property
588
+ def destination(self) -> Path:
589
+ return self._path
590
+
591
+ def set_base_path(self, path: Path):
592
+ self._storage_base_path = path
593
+
594
+ @property
595
+ def pull_interval(self) -> Optional[int]:
596
+ return self._pull_interval
597
+
598
+ async def pull_code(self):
599
+ # Local storage assumes the code already exists on the local filesystem
600
+ # and does not need to be pulled from a remote location
601
+ pass
602
+
603
+ def to_pull_step(self) -> dict:
604
+ """
605
+ Returns a dictionary representation of the storage object that can be
606
+ used as a deployment pull step.
607
+ """
608
+ step = {
609
+ "prefect.deployments.steps.set_working_directory": {
610
+ "directory": str(self.destination)
611
+ }
612
+ }
613
+ return step
614
+
615
+ def __eq__(self, __value) -> bool:
616
+ if isinstance(__value, LocalStorage):
617
+ return self._path == __value._path
618
+ return False
619
+
620
+ def __repr__(self) -> str:
621
+ return f"LocalStorage(path={self._path!r})"
622
+
623
+
624
+ def create_storage_from_source(
625
+ source: str, pull_interval: Optional[int] = 60
564
626
  ) -> RunnerStorage:
565
627
  """
566
628
  Creates a storage object from a URL.
@@ -574,11 +636,18 @@ def create_storage_from_url(
574
636
  Returns:
575
637
  RunnerStorage: A runner storage compatible object
576
638
  """
577
- parsed_url = urlparse(url)
578
- if parsed_url.scheme == "git" or parsed_url.path.endswith(".git"):
579
- return GitRepository(url=url, pull_interval=pull_interval)
639
+ logger = get_logger("runner.storage")
640
+ parsed_source = urlparse(source)
641
+ if parsed_source.scheme == "git" or parsed_source.path.endswith(".git"):
642
+ return GitRepository(url=source, pull_interval=pull_interval)
643
+ elif parsed_source.scheme in ("file", "local"):
644
+ source_path = source.split("://", 1)[-1]
645
+ return LocalStorage(path=source_path, pull_interval=pull_interval)
646
+ elif parsed_source.scheme in fsspec.available_protocols():
647
+ return RemoteStorage(url=source, pull_interval=pull_interval)
580
648
  else:
581
- return RemoteStorage(url=url, pull_interval=pull_interval)
649
+ logger.debug("No valid fsspec protocol found for URL, assuming local storage.")
650
+ return LocalStorage(path=source, pull_interval=pull_interval)
582
651
 
583
652
 
584
653
  def _format_token_from_credentials(netloc: str, credentials: dict) -> str: