opengris-parfun 7.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. opengris_parfun-7.3.0.dist-info/METADATA +165 -0
  2. opengris_parfun-7.3.0.dist-info/RECORD +43 -0
  3. opengris_parfun-7.3.0.dist-info/WHEEL +5 -0
  4. opengris_parfun-7.3.0.dist-info/licenses/LICENSE +201 -0
  5. opengris_parfun-7.3.0.dist-info/licenses/LICENSE.spdx +7 -0
  6. opengris_parfun-7.3.0.dist-info/licenses/NOTICE +7 -0
  7. opengris_parfun-7.3.0.dist-info/top_level.txt +1 -0
  8. parfun/__init__.py +26 -0
  9. parfun/about.py +1 -0
  10. parfun/backend/__init__.py +0 -0
  11. parfun/backend/dask.py +151 -0
  12. parfun/backend/local_multiprocessing.py +92 -0
  13. parfun/backend/local_single_process.py +47 -0
  14. parfun/backend/mixins.py +68 -0
  15. parfun/backend/profiled_future.py +50 -0
  16. parfun/backend/scaler.py +226 -0
  17. parfun/backend/utility.py +7 -0
  18. parfun/combine/__init__.py +0 -0
  19. parfun/combine/collection.py +13 -0
  20. parfun/combine/dataframe.py +13 -0
  21. parfun/dataframe.py +175 -0
  22. parfun/decorators.py +135 -0
  23. parfun/entry_point.py +180 -0
  24. parfun/functions.py +71 -0
  25. parfun/kernel/__init__.py +0 -0
  26. parfun/kernel/function_signature.py +197 -0
  27. parfun/kernel/parallel_function.py +262 -0
  28. parfun/object.py +7 -0
  29. parfun/partition/__init__.py +0 -0
  30. parfun/partition/api.py +136 -0
  31. parfun/partition/collection.py +13 -0
  32. parfun/partition/dataframe.py +16 -0
  33. parfun/partition/object.py +50 -0
  34. parfun/partition/primitives.py +317 -0
  35. parfun/partition/utility.py +54 -0
  36. parfun/partition_size_estimator/__init__.py +0 -0
  37. parfun/partition_size_estimator/linear_regression_estimator.py +189 -0
  38. parfun/partition_size_estimator/mixins.py +22 -0
  39. parfun/partition_size_estimator/object.py +19 -0
  40. parfun/profiler/__init__.py +0 -0
  41. parfun/profiler/functions.py +261 -0
  42. parfun/profiler/object.py +68 -0
  43. parfun/py_list.py +56 -0
parfun/decorators.py ADDED
@@ -0,0 +1,135 @@
1
+ """
2
+ A decorator that helps users run their functions in parallel.
3
+ """
4
+
5
+ import importlib
6
+ import warnings
7
+ from functools import wraps
8
+ from typing import Callable, Iterable, Optional, Tuple, Union
9
+
10
+ from parfun.kernel.function_signature import NamedArguments
11
+ from parfun.kernel.parallel_function import ParallelFunction
12
+ from parfun.object import FunctionInputType, FunctionOutputType
13
+ from parfun.partition.object import PartitionGenerator
14
+ from parfun.partition_size_estimator.linear_regression_estimator import LinearRegessionEstimator
15
+ from parfun.partition_size_estimator.mixins import PartitionSizeEstimator
16
+
17
+
18
+ def parallel(
19
+ split: Callable[[NamedArguments], Tuple[NamedArguments, PartitionGenerator[NamedArguments]]],
20
+ combine_with: Callable[[Iterable[FunctionOutputType]], FunctionOutputType],
21
+ initial_partition_size: Optional[Union[int, Callable[[FunctionInputType], int]]] = None,
22
+ fixed_partition_size: Optional[Union[int, Callable[[FunctionInputType], int]]] = None,
23
+ profile: bool = False,
24
+ trace_export: Optional[str] = None,
25
+ partition_size_estimator_factory: Callable[[], PartitionSizeEstimator] = LinearRegessionEstimator,
26
+ ) -> Callable:
27
+ """
28
+ Returns a function decorator that automatically parallelizes a function.
29
+
30
+ .. code:: python
31
+
32
+ @pf.parallel(
33
+ split=pf.per_argument(
34
+ values=pf.py_list.by_chunk,
35
+ ),
36
+ combine_with=pf.py_list.concat
37
+ )
38
+ def multiply_by_constant(values: Iterable[int], constant: int):
39
+ return [v * constant for v in values]
40
+
41
+ # This would be functionally equivalent to running the function inside a single for loop:
42
+
43
+ results = []
44
+ for partition in pf.py_list.by_chunk(values):
45
+ results.append(multiply_by_constant(partition, constant))
46
+
47
+ return combine_with(results)
48
+
49
+ :param split:
50
+ Partition the data based on the provided partitioning function.
51
+
52
+ See :py:mod:`~parfun.partition.api` for the list of predefined partitioning functions.
53
+
54
+ :param combine_with: aggregates the results by running the function.
55
+ :type combine_with: Callable
56
+ :param initial_partition_size:
57
+ Overrides the first estimate from the partition size estimator.
58
+
59
+ If the value is a callable, the function will be provided with the input to be partitioned and shall return the
60
+ initial partition size to use.
61
+
62
+ :type initial_partition_size: int | Callable[[PartitionType], int] | None
63
+ :param fixed_partition_size:
64
+ Uses a constant partition size and do not run the partition size estimator.
65
+
66
+ If the value is a callable, the function will be provided with the input to be partitioned and shall return the
67
+ partition size to use.
68
+ :type fixed_partition_size: int | Callable[[PartitionType], int] | None
69
+ :param profile: if true, prints additional debugging information about the parallelization overhead.
70
+ :type profile: bool
71
+ :param trace_export: if defined, will export the execution time to the provided CSV file's path.
72
+ :type trace_export: str
73
+ :param partition_size_estimator_factory: the partition size estimator class to use
74
+ :type partition_size_estimator_factory: Callable[[], PartitionSizeEstimator]
75
+
76
+ :return: a decorated function
77
+ :rtype: Callable
78
+ """
79
+
80
+ def decorator(function: Callable[[FunctionInputType], FunctionOutputType]):
81
+ # init a ParallelFunction object to handle parallel computations automatically
82
+ parallel_function = ParallelFunction(
83
+ function=function,
84
+ function_name=function.__name__,
85
+ split=split,
86
+ combine_with=combine_with,
87
+ initial_partition_size=initial_partition_size,
88
+ fixed_partition_size=fixed_partition_size,
89
+ profile=profile,
90
+ trace_export=trace_export,
91
+ partition_size_estimator_factory=partition_size_estimator_factory,
92
+ )
93
+
94
+ @wraps(function)
95
+ def wrapped(*args, **kwargs):
96
+ # Remark: we cannot decorate `parallel_function` with `wraps` directly as it's not a regular function.
97
+ return parallel_function(*args, **kwargs)
98
+
99
+ # Renames the original function as "_{function_name}_sequential" and adds it to the same module.
100
+ # This is required as Pickle requires all serialized functions to be accessible from a qualified module, which
101
+ # will not be the case for the original function as it gets overridden by the decorator.
102
+ if function.__module__ is not None:
103
+ module = importlib.import_module(function.__module__)
104
+ name = f"_{function.__name__}_sequential"
105
+ parent_qualname, parent_separator, old_qualname = function.__qualname__.rpartition(".")
106
+ qualname = f"{parent_qualname}{parent_separator}_{old_qualname}_sequential"
107
+ setattr(module, name, function)
108
+ getattr(module, name).__name__ = name
109
+ getattr(module, name).__qualname__ = qualname
110
+
111
+ return wrapped
112
+
113
+ return decorator
114
+
115
+
116
+ def parfun(
117
+ split: Callable[[NamedArguments], Tuple[NamedArguments, PartitionGenerator[NamedArguments]]],
118
+ combine_with: Callable[[Iterable[FunctionOutputType]], FunctionOutputType],
119
+ initial_partition_size: Optional[Union[int, Callable[[FunctionInputType], int]]] = None,
120
+ fixed_partition_size: Optional[Union[int, Callable[[FunctionInputType], int]]] = None,
121
+ profile: bool = False,
122
+ trace_export: Optional[str] = None,
123
+ partition_size_estimator_factory: Callable[[], PartitionSizeEstimator] = LinearRegessionEstimator,
124
+ ) -> Callable:
125
+ warnings.warn("parfun() is deprecated and will be removed in a future version.", DeprecationWarning)
126
+
127
+ return parallel(
128
+ split=split,
129
+ combine_with=combine_with,
130
+ initial_partition_size=initial_partition_size,
131
+ fixed_partition_size=fixed_partition_size,
132
+ profile=profile,
133
+ trace_export=trace_export,
134
+ partition_size_estimator_factory=partition_size_estimator_factory,
135
+ )
parfun/entry_point.py ADDED
@@ -0,0 +1,180 @@
1
+ """
2
+ APIs to manage backends and integrate the toolkit with other projects.
3
+ """
4
+
5
+ import argparse
6
+ import atexit
7
+ import contextlib
8
+ import logging
9
+ import os
10
+ from contextvars import ContextVar, Token
11
+ from typing import Callable, Dict, Optional, Union
12
+
13
+ from parfun.backend.local_multiprocessing import LocalMultiprocessingBackend
14
+ from parfun.backend.local_single_process import LocalSingleProcessBackend
15
+ from parfun.backend.mixins import BackendEngine
16
+
17
+ _backend_engine: ContextVar[Optional[BackendEngine]] = ContextVar("_backend_engine", default=None)
18
+
19
+ BACKEND_REGISTRY: Dict[str, Callable] = {
20
+ "none": lambda *_args, **_kwargs: None,
21
+ "local_single_process": LocalSingleProcessBackend,
22
+ "local_multiprocessing": LocalMultiprocessingBackend,
23
+ }
24
+
25
+ try:
26
+ from parfun.backend.dask import DaskCurrentBackend, DaskLocalClusterBackend, DaskRemoteClusterBackend
27
+
28
+ BACKEND_REGISTRY["dask_local"] = DaskLocalClusterBackend
29
+ BACKEND_REGISTRY["dask_remote"] = DaskRemoteClusterBackend
30
+ BACKEND_REGISTRY["dask_current"] = DaskCurrentBackend
31
+ except ImportError:
32
+ logging.debug("Dask backends disabled. Use `pip install 'opengris-parfun[dask]'` to install Dask dependencies.")
33
+
34
+ try:
35
+ from parfun.backend.scaler import ScalerLocalBackend, ScalerRemoteBackend
36
+
37
+ BACKEND_REGISTRY["scaler_local"] = ScalerLocalBackend
38
+ BACKEND_REGISTRY["scaler_remote"] = ScalerRemoteBackend
39
+
40
+ except ImportError:
41
+ logging.debug(
42
+ "Scaler backends disabled. Use `pip install 'opengris-parfun[scaler]'` to install Scaler dependencies."
43
+ )
44
+
45
+
46
+ def set_parallel_backend(backend: Union[str, BackendEngine], *args, **kwargs) -> None:
47
+ """
48
+ Initializes and sets the current parfun backend.
49
+
50
+ .. code:: python
51
+
52
+ set_parallel_backend("local_multiprocessing", max_workers=4, is_process=False)
53
+
54
+ :param backend:
55
+ Supported backend options:
56
+
57
+ * ``"none"``: disable the current parallel backend.
58
+
59
+ Functions decorated with :py:func:`~parfun.decorators.parfun` will run sequentially as if not decorated.
60
+
61
+ Partitioning and combining functions will be ignored.
62
+
63
+ * ``"local_single_process"``: runs the tasks inside the calling Python process.
64
+
65
+ Functions decorated with :py:func:`~parfun.decorators.parfun` will partition the input data, and run the
66
+ combining function on the output data, but will also execute the function inside the calling Python process.
67
+
68
+ Mostly intended for debugging purposes.
69
+
70
+ See :py:mod:`~parfun.backend.local_single_process.LocalSingleProcessBackend`.
71
+
72
+ * ``"local_multiprocessing"``: runs the tasks in parallel using Python ``multiprocessing`` processes.
73
+
74
+ See :py:mod:`~parfun.backend.local_multiprocessing.LocalMultiprocessingBackend`.
75
+
76
+ * ``"scaler_local"``: runs the tasks in parallel using an internally managed Scaler cluster.
77
+
78
+ See :py:mod:`~parfun.backend.scaler.ScalerLocalBackend`.
79
+
80
+ * ``"scaler_remote"``: runs the tasks in parallel using an externally managed Dask cluster.
81
+
82
+ See :py:mod:`~parfun.backend.scaler.ScalerRemoteBackend`.
83
+
84
+ * ``"dask_local"``: runs the tasks in parallel using an internally managed Dask cluster.
85
+
86
+ See :py:mod:`~parfun.backend.dask_local.DaskLocalClusterBackend`.
87
+
88
+ * ``"dask_remote"``: runs the tasks in parallel using an externally managed Dask cluster.
89
+
90
+ See :py:mod:`~parfun.backend.dask_remote.DaskRemoteClusterBackend`.
91
+
92
+ * ``"dask_current"``: runs the tasks in parallel using the currently running Dask client
93
+ (:py:func:`~distributed.get_client`).
94
+
95
+ See :py:mod:`~parfun.backend.dask_current.DaskCurrentBackend`.
96
+
97
+ :type backend: Union[str, BackendEngine]
98
+
99
+ :param args: Additional positional parameters for the backend constructor
100
+ :param kwargs: Additional keyword parameters for the backend constructor.
101
+ :rtype: None
102
+ """
103
+ _cleanup_current_backend()
104
+ _set_parallel_backend(backend, *args, **kwargs)
105
+
106
+
107
+ @contextlib.contextmanager
108
+ def set_parallel_backend_context(backend: Union[str, BackendEngine], *args, **kwargs):
109
+ """
110
+ Sets a new parallel backend instance in a contextlib's context.
111
+
112
+ .. code:: python
113
+
114
+ with set_parallel_backend_context("local_single_processing"):
115
+ some_parallel_computation()
116
+
117
+ :param backend: See :py:func:`set_parallel_backend`.
118
+ :type backend: Union[str, BackendEngine]
119
+ """
120
+ token = _set_parallel_backend(backend, *args, **kwargs)
121
+ try:
122
+ yield
123
+ finally:
124
+ _cleanup_current_backend()
125
+
126
+ _backend_engine.reset(token)
127
+
128
+
129
+ def get_parallel_backend() -> Optional[BackendEngine]:
130
+ """
131
+ :return: the current backend instance, or :py:obj:`None` if no backend is currently set.
132
+ :rtype: Optional[BackendEngine]
133
+ """
134
+ return _backend_engine.get()
135
+
136
+
137
+ def add_parallel_options(parser: argparse.ArgumentParser) -> None:
138
+ """
139
+ Adds argparse options required to initialize this parallel toolkit.
140
+
141
+ :type parser: argparse.ArgumentParser
142
+ :rtype: None
143
+ """
144
+ group = parser.add_argument_group()
145
+ group.add_argument(
146
+ "--parallel-backend",
147
+ type=str,
148
+ choices=list(BACKEND_REGISTRY.keys()),
149
+ default="local_multiprocessing",
150
+ help="The backend engine selected to run code. If 'none', disables parallel computations.",
151
+ )
152
+
153
+
154
+ def _set_parallel_backend(backend: Union[str, BackendEngine], *args, **kwargs) -> Token:
155
+ if isinstance(backend, BackendEngine):
156
+ if len(args) > 0 or len(kwargs) > 0:
157
+ raise ValueError("Cannot pass additional arguments when passing a backend instance")
158
+
159
+ backend_instance = backend
160
+ backend_name = backend.__class__.__name__
161
+ elif backend in BACKEND_REGISTRY:
162
+ backend_instance = BACKEND_REGISTRY[backend](*args, **kwargs)
163
+ backend_name = backend
164
+ else:
165
+ raise ValueError(f"Supported parallel backends are: {set(BACKEND_REGISTRY.keys())}")
166
+
167
+ if backend != "none":
168
+ # set numpy OpenBlas threads to be 1 each process only have 1 thread, easier to manage resources
169
+ os.environ["OPENBLAS_NUM_THREADS"] = "1"
170
+
171
+ logging.info(f"Set up parallel backend: {backend_name}")
172
+
173
+ return _backend_engine.set(backend_instance)
174
+
175
+
176
+ @atexit.register
177
+ def _cleanup_current_backend():
178
+ engine = _backend_engine.get()
179
+ if engine is not None:
180
+ engine.shutdown()
parfun/functions.py ADDED
@@ -0,0 +1,71 @@
1
+ import collections
2
+ import logging
3
+ from typing import Any, Callable, Deque, Iterable, Optional, Tuple
4
+
5
+ from parfun.backend.mixins import BackendSession, ProfiledFuture
6
+ from parfun.entry_point import get_parallel_backend
7
+
8
+
9
+ def parallel_map(func: Callable, *iterables, backend_session: Optional[BackendSession] = None) -> Iterable:
10
+ """
11
+ Similar to :py:func:`concurrent.futures.Executor.map()` but lazily consumes and returns the iterators' content as
12
+ worker nodes become available.
13
+
14
+ .. code:: python
15
+
16
+ parallel_map(math.sqrt, [4, 9, 16, 25]) # [2.0, 3.0, 4.0, 5.0]
17
+
18
+ parallel_map(operator.add, [10, 7, 15], [12, 15, 5]) # [22, 22, 20]
19
+
20
+
21
+ :param backend_session: the parallel backend session. If `None`, creates a new session from the current backend.
22
+ """
23
+
24
+ # Uses a generator function, so that we can use deque.pop() and thus discard the no longer required futures'
25
+ # references as we yield them.
26
+ def result_generator(backend_session: BackendSession):
27
+ futures: Deque[ProfiledFuture] = collections.deque()
28
+ try:
29
+ for args in zip(*iterables):
30
+ futures.append(backend_session.submit(func, *args))
31
+
32
+ # Yields any finished future from the head of the queue.
33
+ while len(futures) > 0 and futures[0].done():
34
+ yield futures.popleft().result()
35
+
36
+ # Yields the remaining results.
37
+ while len(futures) > 0:
38
+ yield futures.popleft().result()
39
+ finally:
40
+ # If any failure, cancels all unfinished tasks.
41
+ for future in futures:
42
+ future.cancel()
43
+
44
+ if backend_session is None:
45
+ current_backend = get_parallel_backend()
46
+
47
+ if current_backend is None:
48
+ logging.warning(f"no parallel backend engine set, run `{func.__name__}()` sequentially.")
49
+ return map(func, *iterables)
50
+
51
+ with current_backend.session() as current_backend_session:
52
+ return result_generator(current_backend_session)
53
+ else:
54
+ return result_generator(backend_session)
55
+
56
+
57
+ def parallel_starmap(
58
+ func: Callable,
59
+ iterable: Iterable[Tuple[Any, ...]],
60
+ backend_session: Optional[BackendSession] = None
61
+ ) -> Iterable:
62
+ """
63
+ Similar to :py:func:`concurrent.futures.Executor.starmap()` but lazily consumes and returns the iterators' content
64
+ as worker nodes become available.
65
+
66
+ .. code:: python
67
+
68
+ parallel_starmap(operator.add, [(10, 12), (7, 15), (15, 5)]) # [22, 22, 20]
69
+
70
+ """
71
+ yield from parallel_map(func, *zip(*iterable), backend_session=backend_session)
File without changes
@@ -0,0 +1,197 @@
1
+ import collections
2
+ import inspect
3
+ from inspect import Parameter
4
+ from typing import Any, Callable, Dict, Optional, OrderedDict, Set, Tuple, Type
5
+
6
+ import attrs
7
+
8
+
9
+ @attrs.define(frozen=True)
10
+ class FunctionSignature:
11
+ """
12
+ Helper class to inspect a function' parameter and return types.
13
+
14
+ In Python 3.8+ whether an argument is positional only or keyword only can be specified using the / and * syntax,
15
+ respectively. As an example:
16
+
17
+ def f(pos1, pos2, /, pos_or_kwd, *, kwd1, kwd2):
18
+ ----------- ---------- ----------
19
+ | | |
20
+ | Positional or |
21
+ | keyword Keyword only
22
+ Positional only
23
+
24
+ 1. Everything before / is positional only;
25
+ 2. Everything after * is keyword only.
26
+ 3. Note that order matters – / must come before *.
27
+ 4. If you don't explicitly specify positional or keyword only through the syntax,
28
+ all arguments are of the positional or keyword kind.
29
+ """
30
+
31
+ args: OrderedDict[str, inspect.Parameter] = attrs.field()
32
+ kwargs: Dict[str, inspect.Parameter] = attrs.field()
33
+
34
+ has_var_arg: bool = attrs.field()
35
+ has_var_kwarg: bool = attrs.field()
36
+
37
+ return_type: Optional[Type] = attrs.field()
38
+
39
+ @classmethod
40
+ def from_function(cls, function: Callable) -> "FunctionSignature":
41
+ signature = inspect.signature(function)
42
+
43
+ if signature.return_annotation not in (inspect.Signature.empty, None):
44
+ return_type = signature.return_annotation
45
+ else:
46
+ return_type = None
47
+
48
+ parameters = list(signature.parameters.values())
49
+
50
+ args = collections.OrderedDict(
51
+ (p.name, p) for p in parameters if p.kind in [Parameter.POSITIONAL_OR_KEYWORD, Parameter.POSITIONAL_ONLY]
52
+ )
53
+ kwargs = {p.name: p for p in parameters if p.kind in [Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY]}
54
+
55
+ has_var_arg = any(p.kind == Parameter.VAR_POSITIONAL for p in parameters)
56
+ has_var_kwarg = any(p.kind == Parameter.VAR_KEYWORD for p in parameters)
57
+
58
+ return cls(
59
+ args=args, kwargs=kwargs, has_var_arg=has_var_arg, has_var_kwarg=has_var_kwarg, return_type=return_type
60
+ )
61
+
62
+ def assign(self, args, kwargs) -> "NamedArguments":
63
+ """
64
+ Categorizes and names the ``args`` and ``kwargs`` arguments based on the function signature.
65
+
66
+ Raise an exception if the arguments do not match the function's signature.
67
+
68
+ :returns the assigned positional, keyword and variable parameters.
69
+ """
70
+
71
+ # Assigns positional arguments.
72
+
73
+ named_args = collections.OrderedDict(
74
+ (arg_type.name, arg_value) for arg_type, arg_value in zip(self.args.values(), args)
75
+ )
76
+
77
+ if len(args) > len(self.args):
78
+ if self.has_var_arg:
79
+ var_args = tuple(args[len(named_args) :])
80
+ else:
81
+ raise ValueError(f"expected {len(self.args)} arguments, got {len(args)}.")
82
+ else:
83
+ unassigned_args = [
84
+ a
85
+ for a in list(self.args.values())[len(args) :]
86
+ if a.kind == Parameter.POSITIONAL_ONLY and a.default == Parameter.empty
87
+ ]
88
+ if len(unassigned_args) > 0:
89
+ unassigned_kwarg_names = ", ".join(a.name for a in unassigned_args)
90
+ raise ValueError(f"unassigned positional parameter(s): {unassigned_kwarg_names}.")
91
+
92
+ var_args = tuple()
93
+
94
+ # Assign keyword arguments.
95
+
96
+ double_assigned_args = [a for a in kwargs.keys() if a in named_args]
97
+ if len(double_assigned_args) > 0:
98
+ double_assigned_arg_names = ", ".join(a for a in double_assigned_args)
99
+ raise ValueError(f"parameter(s) assigned twice: {double_assigned_arg_names}.")
100
+
101
+ if not self.has_var_kwarg:
102
+ invalid_kwargs = [a for a in kwargs.keys() if a not in self.kwargs]
103
+ if len(invalid_kwargs) > 0:
104
+ invalid_kwarg_names = ", ".join(a for a in invalid_kwargs)
105
+ raise ValueError(f"invalid keyword parameter(s): {invalid_kwarg_names}.")
106
+
107
+ unassigned_kwargs = [
108
+ a
109
+ for a in self.kwargs.values()
110
+ if a.default == Parameter.empty and a.name not in named_args and a.name not in kwargs
111
+ ]
112
+ if len(unassigned_kwargs) > 0:
113
+ unassigned_kwarg_names = ", ".join(a.name for a in unassigned_kwargs)
114
+ raise ValueError(f"unassigned keyword parameter(s): {unassigned_kwarg_names}.")
115
+
116
+ return NamedArguments(args=named_args, kwargs=kwargs, var_args=var_args)
117
+
118
+
119
+ @attrs.define(frozen=True)
120
+ class NamedArguments:
121
+ """Contains the argument values of a function call, but associated with their respective names, based on the
122
+ function's signature."""
123
+
124
+ args: OrderedDict[str, Any] = attrs.field(factory=OrderedDict)
125
+ kwargs: Dict[str, Any] = attrs.field(factory=dict)
126
+
127
+ var_args: Tuple = attrs.field(default=tuple())
128
+
129
+ def __getitem__(self, name: str) -> Any:
130
+ """Gets the value of an argument by name."""
131
+
132
+ if name in self.args:
133
+ return self.args[name]
134
+ elif name in self.kwargs:
135
+ return self.kwargs[name]
136
+ else:
137
+ raise KeyError(f"unknown argument name: {name}.")
138
+
139
+ def as_args_kwargs(self) -> Tuple[Tuple, Dict[str, Any]]:
140
+ """Returns a tuple of positional and keyword parameters that can be used to call the function."""
141
+
142
+ return self.var_args, {**self.args, **self.kwargs}
143
+
144
+ def keys(self) -> Set[str]:
145
+ """Returns all argument names."""
146
+
147
+ keys = set(self.args.keys())
148
+ keys.update(self.kwargs.keys())
149
+ return keys
150
+
151
+ def split(self, arg_names: Set[str]) -> Tuple["NamedArguments", "NamedArguments"]:
152
+ """Returns the subset of the arguments that matches the argument names, and those that do not."""
153
+
154
+ includes = NamedArguments(
155
+ args=OrderedDict((name, value) for name, value in self.args.items() if name in arg_names),
156
+ kwargs={name: value for name, value in self.kwargs.items() if name in arg_names},
157
+ var_args=tuple(),
158
+ )
159
+ excludes = NamedArguments(
160
+ args=OrderedDict((name, value) for name, value in self.args.items() if name not in arg_names),
161
+ kwargs={name: value for name, value in self.kwargs.items() if name not in arg_names},
162
+ var_args=self.var_args,
163
+ )
164
+
165
+ return includes, excludes
166
+
167
+ def reassigned(self, **changes) -> "NamedArguments":
168
+ """Returns a new ``NamedArguments`` objects with some of the values reassigned.
169
+
170
+ .. code:: python
171
+
172
+ named_args.reassign(arg_1="new_value", arg_2="new_value")
173
+
174
+ """
175
+
176
+ args = self.args.copy()
177
+ kwargs = self.kwargs.copy()
178
+
179
+ for arg_name, arg_value in changes.items():
180
+ if arg_name in args:
181
+ args[arg_name] = arg_value
182
+ elif arg_name in kwargs:
183
+ kwargs[arg_name] = arg_value
184
+ else:
185
+ raise ValueError(f"invalid argument key: `{arg_name}`.")
186
+
187
+ return attrs.evolve(self, args=args, kwargs=kwargs)
188
+
189
+ def merge(self, other: "NamedArguments") -> "NamedArguments":
190
+ """Returns a new ``NamedArguments`` object with the values of both objects merged."""
191
+
192
+ args = self.args.copy()
193
+ args.update(other.args)
194
+
195
+ return NamedArguments(
196
+ args=args, kwargs={**self.kwargs, **other.kwargs}, var_args=self.var_args + other.var_args
197
+ )