runnable 0.35.0__py3-none-any.whl → 0.36.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. extensions/job_executor/__init__.py +3 -4
  2. extensions/job_executor/emulate.py +106 -0
  3. extensions/job_executor/k8s.py +8 -8
  4. extensions/job_executor/local_container.py +13 -14
  5. extensions/nodes/__init__.py +0 -0
  6. extensions/nodes/conditional.py +7 -5
  7. extensions/nodes/fail.py +72 -0
  8. extensions/nodes/map.py +350 -0
  9. extensions/nodes/parallel.py +159 -0
  10. extensions/nodes/stub.py +89 -0
  11. extensions/nodes/success.py +72 -0
  12. extensions/nodes/task.py +92 -0
  13. extensions/pipeline_executor/__init__.py +24 -26
  14. extensions/pipeline_executor/argo.py +20 -20
  15. extensions/pipeline_executor/emulate.py +112 -0
  16. extensions/pipeline_executor/local.py +4 -4
  17. extensions/pipeline_executor/local_container.py +19 -79
  18. extensions/pipeline_executor/mocked.py +5 -9
  19. extensions/pipeline_executor/retry.py +6 -10
  20. runnable/__init__.py +0 -10
  21. runnable/catalog.py +1 -21
  22. runnable/cli.py +0 -59
  23. runnable/context.py +519 -28
  24. runnable/datastore.py +51 -54
  25. runnable/defaults.py +12 -34
  26. runnable/entrypoints.py +82 -440
  27. runnable/exceptions.py +35 -34
  28. runnable/executor.py +13 -20
  29. runnable/names.py +1 -1
  30. runnable/nodes.py +16 -15
  31. runnable/parameters.py +2 -2
  32. runnable/sdk.py +66 -205
  33. runnable/tasks.py +62 -81
  34. runnable/utils.py +6 -268
  35. {runnable-0.35.0.dist-info → runnable-0.36.1.dist-info}/METADATA +1 -4
  36. runnable-0.36.1.dist-info/RECORD +72 -0
  37. {runnable-0.35.0.dist-info → runnable-0.36.1.dist-info}/entry_points.txt +8 -7
  38. extensions/nodes/nodes.py +0 -778
  39. extensions/tasks/torch.py +0 -286
  40. extensions/tasks/torch_config.py +0 -76
  41. runnable-0.35.0.dist-info/RECORD +0 -66
  42. {runnable-0.35.0.dist-info → runnable-0.36.1.dist-info}/WHEEL +0 -0
  43. {runnable-0.35.0.dist-info → runnable-0.36.1.dist-info}/licenses/LICENSE +0 -0
extensions/tasks/torch.py DELETED
@@ -1,286 +0,0 @@
1
- import importlib
2
- import logging
3
- import os
4
- import random
5
- import string
6
- from datetime import datetime
7
- from pathlib import Path
8
- from typing import Any, Optional
9
-
10
- from pydantic import BaseModel, ConfigDict, Field, field_serializer, model_validator
11
- from ruamel.yaml import YAML
12
-
13
- import runnable.context as context
14
- from extensions.tasks.torch_config import EasyTorchConfig, TorchConfig
15
- from runnable import Catalog, defaults
16
- from runnable.datastore import StepAttempt
17
- from runnable.tasks import BaseTaskType
18
- from runnable.utils import get_module_and_attr_names
19
-
20
- logger = logging.getLogger(defaults.LOGGER_NAME)
21
-
22
- logger = logging.getLogger(defaults.LOGGER_NAME)
23
-
24
- try:
25
- from torch.distributed.elastic.multiprocessing.api import DefaultLogsSpecs, Std
26
- from torch.distributed.launcher.api import LaunchConfig, elastic_launch
27
-
28
- except ImportError as e:
29
- logger.exception("torch is not installed")
30
- raise Exception("torch is not installed") from e
31
-
32
-
33
- def get_min_max_nodes(nnodes: str) -> tuple[int, int]:
34
- min_nodes, max_nodes = (int(x) for x in nnodes.split(":"))
35
- return min_nodes, max_nodes
36
-
37
-
38
- class TorchTaskType(BaseTaskType, TorchConfig):
39
- task_type: str = Field(default="torch", serialization_alias="command_type")
40
- catalog: Optional[Catalog] = Field(default=None, alias="catalog")
41
- command: str
42
-
43
- @model_validator(mode="before")
44
- @classmethod
45
- def check_secrets_and_returns(cls, data: Any) -> Any:
46
- if isinstance(data, dict):
47
- if "secrets" in data and data["secrets"]:
48
- raise ValueError("'secrets' is not supported for torch")
49
- if "returns" in data and data["returns"]:
50
- raise ValueError("'secrets' is not supported for torch")
51
- return data
52
-
53
- def get_summary(self) -> dict[str, Any]:
54
- return self.model_dump(by_alias=True, exclude_none=True)
55
-
56
- @property
57
- def _context(self):
58
- return context.run_context
59
-
60
- def _get_launch_config(self) -> LaunchConfig:
61
- internal_log_spec = InternalLogSpecs(**self.model_dump(exclude_none=True))
62
- log_spec: DefaultLogsSpecs = DefaultLogsSpecs(
63
- **internal_log_spec.model_dump(exclude_none=True)
64
- )
65
- easy_torch_config = EasyTorchConfig(
66
- **self.model_dump(
67
- exclude_none=True,
68
- )
69
- )
70
- print("###", easy_torch_config)
71
- print("###", easy_torch_config)
72
- launch_config = LaunchConfig(
73
- **easy_torch_config.model_dump(
74
- exclude_none=True,
75
- ),
76
- logs_specs=log_spec,
77
- run_id=self._context.run_id,
78
- )
79
- logger.info(f"launch_config: {launch_config}")
80
- return launch_config
81
-
82
- def execute_command(
83
- self,
84
- map_variable: defaults.TypeMapVariable = None,
85
- ):
86
- assert map_variable is None, "map_variable is not supported for torch"
87
-
88
- # The below should happen only if we are in the node that we want to execute
89
- # For a single node, multi worker setup, this should be the entry point
90
- # For a multi-node, we need to:
91
- # - create a service config
92
- # - Create a stateful set with number of nodes
93
- # - Create a job to run the torch.distributed.launcher.api.elastic_launch on every node
94
- # - the entry point to runnnable could be a way to trigger execution instead of scaling
95
- is_execute = os.environ.get("RUNNABLE_TORCH_EXECUTE", "true") == "true"
96
-
97
- _, max_nodes = get_min_max_nodes(self.nnodes)
98
-
99
- if max_nodes > 1 and not is_execute:
100
- executor = self._context.executor
101
- executor.scale_up(self)
102
- return StepAttempt(
103
- status=defaults.SUCCESS,
104
- start_time=str(datetime.now()),
105
- end_time=str(datetime.now()),
106
- attempt_number=1,
107
- message="Triggered a scale up",
108
- )
109
-
110
- # The below should happen only if we are in the node that we want to execute
111
- # For a single node, multi worker setup, this should be the entry point
112
- # For a multi-node, we need to:
113
- # - create a service config
114
- # - Create a stateful set with number of nodes
115
- # - Create a job to run the torch.distributed.launcher.api.elastic_launch on every node
116
- # - the entry point to runnnable could be a way to trigger execution instead of scaling
117
- is_execute = os.environ.get("RUNNABLE_TORCH_EXECUTE", "true") == "true"
118
-
119
- _, max_nodes = get_min_max_nodes(self.nnodes)
120
-
121
- if max_nodes > 1 and not is_execute:
122
- executor = self._context.executor
123
- executor.scale_up(self)
124
- return StepAttempt(
125
- status=defaults.SUCCESS,
126
- start_time=str(datetime.now()),
127
- end_time=str(datetime.now()),
128
- attempt_number=1,
129
- message="Triggered a scale up",
130
- )
131
-
132
- launch_config = self._get_launch_config()
133
- print("###****", launch_config)
134
- print("###****", launch_config)
135
- logger.info(f"launch_config: {launch_config}")
136
-
137
- # ENV variables are shared with the subprocess, use that as communication
138
- os.environ["RUNNABLE_TORCH_COMMAND"] = self.command
139
- os.environ["RUNNABLE_TORCH_PARAMETERS_FILES"] = (
140
- self._context.parameters_file or ""
141
- )
142
- os.environ["RUNNABLE_TORCH_RUN_ID"] = self._context.run_id
143
-
144
- launcher = elastic_launch(
145
- launch_config,
146
- training_subprocess,
147
- )
148
- try:
149
- launcher()
150
- attempt_log = StepAttempt(
151
- status=defaults.SUCCESS,
152
- start_time=str(datetime.now()),
153
- end_time=str(datetime.now()),
154
- attempt_number=1,
155
- )
156
- except Exception as e:
157
- attempt_log = StepAttempt(
158
- status=defaults.FAIL,
159
- start_time=str(datetime.now()),
160
- end_time=str(datetime.now()),
161
- attempt_number=1,
162
- )
163
- logger.error(f"Error executing TorchNode: {e}")
164
- finally:
165
- # This can only come from the subprocess
166
- if Path("proc_logs").exists():
167
- # Move .catalog and torch_logs to the parent node's catalog location
168
- self._context.catalog_handler.put(
169
- "proc_logs/**/*", allow_file_not_found_exc=True
170
- )
171
-
172
- # TODO: This is not working!!
173
- if self.log_dir:
174
- self._context.catalog_handler.put(
175
- self.log_dir + "/**/*", allow_file_not_found_exc=True
176
- )
177
-
178
- delete_env_vars_with_prefix("RUNNABLE_TORCH")
179
- logger.info(f"attempt_log: {attempt_log}")
180
-
181
- return attempt_log
182
-
183
-
184
- # This internal model makes it easier to extract the required fields
185
- # of log specs from user specification.
186
- # https://github.com/pytorch/pytorch/blob/main/torch/distributed/elastic/multiprocessing/api.py#L243
187
- class InternalLogSpecs(BaseModel):
188
- log_dir: Optional[str] = Field(default="torch_logs")
189
- redirects: str = Field(default="0") # Std.NONE
190
- tee: str = Field(default="0") # Std.NONE
191
- local_ranks_filter: Optional[set[int]] = Field(default=None)
192
-
193
- model_config = ConfigDict(extra="ignore")
194
-
195
- @field_serializer("redirects")
196
- def convert_redirects(self, redirects: str) -> Std | dict[int, Std]:
197
- return Std.from_str(redirects)
198
-
199
- @field_serializer("tee")
200
- def convert_tee(self, tee: str) -> Std | dict[int, Std]:
201
- return Std.from_str(tee)
202
-
203
-
204
- def delete_env_vars_with_prefix(prefix):
205
- to_delete = [] # List to keep track of variables to delete
206
-
207
- # Iterate over a list of all environment variable keys
208
- for var in os.environ:
209
- if var.startswith(prefix):
210
- to_delete.append(var)
211
-
212
- # Delete each of the variables collected
213
- for var in to_delete:
214
- del os.environ[var]
215
-
216
-
217
- def training_subprocess():
218
- """
219
- This function is called by the torch.distributed.launcher.api.elastic_launch
220
- It happens in a subprocess and is responsible for executing the user's function
221
-
222
- It is unrelated to the actual node execution, so any cataloging, run_log_store should be
223
- handled to match to main process.
224
-
225
- We have these variables to use:
226
-
227
- os.environ["RUNNABLE_TORCH_COMMAND"] = self.executable.command
228
- os.environ["RUNNABLE_TORCH_PARAMETERS_FILES"] = (
229
- self._context.parameters_file or ""
230
- )
231
- os.environ["RUNNABLE_TORCH_RUN_ID"] = self._context.run_id
232
- os.environ["RUNNABLE_TORCH_TORCH_LOGS"] = self.log_dir or ""
233
-
234
- """
235
- from runnable import PythonJob # noqa: F401
236
-
237
- command = os.environ.get("RUNNABLE_TORCH_COMMAND")
238
- assert command, "Command is not provided"
239
-
240
- run_id = os.environ.get("RUNNABLE_TORCH_RUN_ID", "")
241
- parameters_files = os.environ.get("RUNNABLE_TORCH_PARAMETERS_FILES", "")
242
-
243
- process_run_id = (
244
- run_id
245
- + "-"
246
- + os.environ.get("RANK", "")
247
- + "-"
248
- + "".join(random.choices(string.ascii_lowercase, k=3))
249
- )
250
- os.environ["TORCH_DISTRIBUTED_DEBUG"] = "DETAIL"
251
-
252
- # In this subprocess there shoould not be any RUNNABLE environment variables
253
- delete_env_vars_with_prefix("RUNNABLE_")
254
-
255
- module_name, func_name = get_module_and_attr_names(command)
256
- module = importlib.import_module(module_name)
257
-
258
- callable_obj = getattr(module, func_name)
259
-
260
- # The job runs with the default configuration
261
- # ALl the execution logs are stored in .catalog
262
- job = PythonJob(function=callable_obj)
263
-
264
- config_content = {
265
- "catalog": {"type": "file-system", "config": {"catalog_location": "proc_logs"}}
266
- }
267
-
268
- temp_config_file = Path("runnable-config.yaml")
269
- with open(str(temp_config_file), "w", encoding="utf-8") as config_file:
270
- yaml = YAML(typ="safe", pure=True)
271
- yaml.dump(config_content, config_file)
272
-
273
- job.execute(
274
- parameters_file=parameters_files,
275
- job_id=process_run_id,
276
- )
277
-
278
- # delete the temp config file
279
- temp_config_file.unlink()
280
-
281
- from runnable.context import run_context
282
-
283
- job_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id)
284
-
285
- if job_log.status == defaults.FAIL:
286
- raise Exception(f"Job {process_run_id} failed")
@@ -1,76 +0,0 @@
1
- from enum import Enum
2
- from typing import Any, Optional
3
-
4
- from pydantic import BaseModel, ConfigDict, Field, computed_field
5
-
6
-
7
- class StartMethod(str, Enum):
8
- spawn = "spawn"
9
- fork = "fork"
10
- forkserver = "forkserver"
11
-
12
-
13
- ## The idea is the following:
14
- # Users can configure any of the options present in TorchConfig class.
15
- # The LaunchConfig class will be created from TorchConfig.
16
- # The LogSpecs is sent as a parameter to the launch config.
17
-
18
- ## NO idea of standalone and how to send it
19
-
20
-
21
- # The user sees this as part of the config of the node.
22
- # It is kept as similar as possible to torchrun
23
- class TorchConfig(BaseModel):
24
- model_config = ConfigDict(extra="forbid")
25
-
26
- # excluded as LaunchConfig requires min and max nodes
27
- nnodes: str = Field(default="1:1", exclude=True, description="min:max")
28
- nproc_per_node: int = Field(default=1, description="Number of processes per node")
29
-
30
- # will be used to create the log specs
31
- # But they are excluded from dump as logs specs is a class for LaunchConfig
32
- # from_str("0") -> Std.NONE
33
- # from_str("1") -> Std.OUT
34
- # from_str("0:3,1:0,2:1,3:2") -> {0: Std.ALL, 1: Std.NONE, 2: Std.OUT, 3: Std.ERR}
35
- log_dir: Optional[str] = Field(default="torch_logs", exclude=True)
36
- redirects: str = Field(default="0", exclude=True) # Std.NONE
37
- tee: str = Field(default="0", exclude=True) # Std.NONE
38
- local_ranks_filter: Optional[set[int]] = Field(default=None, exclude=True)
39
-
40
- role: str | None = Field(default=None)
41
-
42
- # run_id would be the run_id of the context
43
- # and sent at the creation of the LaunchConfig
44
-
45
- # This section is about the communication between nodes/processes
46
- rdzv_backend: str | None = Field(default="")
47
- rdzv_endpoint: str | None = Field(default="")
48
- rdzv_configs: dict[str, Any] = Field(default_factory=dict)
49
- rdzv_timeout: int | None = Field(default=None)
50
-
51
- max_restarts: int | None = Field(default=None)
52
- monitor_interval: float | None = Field(default=None)
53
- start_method: str | None = Field(default=StartMethod.spawn)
54
- log_line_prefix_template: str | None = Field(default=None)
55
- local_addr: Optional[str] = None
56
-
57
- # https://github.com/pytorch/pytorch/blob/main/torch/distributed/run.py#L753
58
- # master_addr: str | None = Field(default="localhost")
59
- # master_port: str | None = Field(default="29500")
60
- # training_script: str = Field(default="dummy_training_script")
61
- # training_script_args: str = Field(default="")
62
-
63
-
64
- class EasyTorchConfig(TorchConfig):
65
- model_config = ConfigDict(extra="ignore")
66
-
67
- # TODO: Validate min < max
68
- @computed_field # type: ignore
69
- @property
70
- def min_nodes(self) -> int:
71
- return int(self.nnodes.split(":")[0])
72
-
73
- @computed_field # type: ignore
74
- @property
75
- def max_nodes(self) -> int:
76
- return int(self.nnodes.split(":")[1])
@@ -1,66 +0,0 @@
1
- extensions/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- extensions/catalog/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- extensions/catalog/any_path.py,sha256=atB5gWPRX6ptW6zwYeCVb_fh0qhs7WAFO9HIsnMZl98,7350
5
- extensions/catalog/file_system.py,sha256=T_qFPFfrmykoAMc1rjNi_DBb437me8WPRcFglwAK744,1767
6
- extensions/catalog/minio.py,sha256=R3GvfCxN1GTcs4bQIAWh79_GHDTVd14gnpKlzwFeKUI,2363
7
- extensions/catalog/pyproject.toml,sha256=lLNxY6v04c8I5QK_zKw_E6sJTArSJRA_V-79ktaA3Hk,279
8
- extensions/catalog/s3.py,sha256=Sw5t8_kVRprn3uGGJCiHn7M9zw1CLaCOFj6YErtfG0o,287
9
- extensions/job_executor/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
- extensions/job_executor/__init__.py,sha256=VeLuYCcShCIYT0TNtAXfUF9tOk4ZHoLzdTEvbsz0spM,5870
11
- extensions/job_executor/k8s.py,sha256=Jl0s3YryISx-SJIhDhyNskzlUlhy4ynBHEc9DfAXjAY,16394
12
- extensions/job_executor/k8s_job_spec.yaml,sha256=7aFpxHdO_p6Hkc3YxusUOuAQTD1Myu0yTPX9DrhxbOg,1158
13
- extensions/job_executor/local.py,sha256=3ZbCFXBvbLlMp10JTmQJJrjBKG2keHI6SH8hEvmHDkA,2230
14
- extensions/job_executor/local_container.py,sha256=1JcLJ0zrNSNHdubrSO9miN54iwvPLHqKMZ08aOC8WWo,6886
15
- extensions/job_executor/pyproject.toml,sha256=UIEgiCYHTXcRWSByNMFuKJFKgxTBpQqTqyUecIsb_Vc,286
16
- extensions/nodes/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
- extensions/nodes/conditional.py,sha256=m4DGxjqWpjNd2KQPAdVSJ6ridt1BDx2Lt6kmEQa9ghY,8594
18
- extensions/nodes/nodes.py,sha256=s9ub1dqy4qHjRQG6YElCdL7rCOTYNs9RUIrStZ6tEB4,28256
19
- extensions/nodes/pyproject.toml,sha256=YTu-ETN3JNFSkMzzWeOwn4m-O2nbRH-PmiPBALDCUw4,278
20
- extensions/pipeline_executor/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- extensions/pipeline_executor/__init__.py,sha256=wfigTL2T9OHrmE8b2Ydmb8h6hr-oF--Yc2FectC7WaY,24623
22
- extensions/pipeline_executor/argo.py,sha256=17hHj3L5oIkoOpCSSbZlliLnOUoN5_JpK_DY0ELWXac,38233
23
- extensions/pipeline_executor/local.py,sha256=6oWUJ6b6NvIkpeQJBoCT1hbfX4_6WCB4HzMgHZ4ik1A,1887
24
- extensions/pipeline_executor/local_container.py,sha256=3kZ2QCsrq_YjH9dcAz8v05knKShQ_JtbIU-IA_-G538,12724
25
- extensions/pipeline_executor/mocked.py,sha256=0sMmypuvstBIv9uQg-WAcPrF3oOFpeEXNi6N8Nzdnl0,5680
26
- extensions/pipeline_executor/pyproject.toml,sha256=ykTX7srR10PBYb8LsIwEj8vIPPIEZQ5V_R7VYbZ-ido,291
27
- extensions/pipeline_executor/retry.py,sha256=6ClFXJYtr0M6nWIZiI-mbUGshobOtVH_KADN8JCfvH0,6881
28
- extensions/run_log_store/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
- extensions/run_log_store/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
- extensions/run_log_store/any_path.py,sha256=0nN_LHbm2W6AHkerQmsVHq3EoybFQF8lxpCicacHo8Y,2861
31
- extensions/run_log_store/chunked_fs.py,sha256=wHMKcAx6uFI4OOTp7QWCdGq9WvEFesbLp9VxHZU28l0,3341
32
- extensions/run_log_store/chunked_minio.py,sha256=Itfkw4Ycf0uLCqxH3Uk_itmVgT7ipJp05yKfD22WBiY,4007
33
- extensions/run_log_store/file_system.py,sha256=hhrbhSnuzv8yzBr6DAu45NT8-sawPP86WA2-LY70vjw,2781
34
- extensions/run_log_store/generic_chunked.py,sha256=EnhRxlqm1jG-Tdxul4sY8OeCX5fK9FY2v8DZanX9-5o,20455
35
- extensions/run_log_store/minio.py,sha256=omrKDSdRzmnVBg9xXkkdQb-icBIgBDRdpmwGRlMyCGk,3453
36
- extensions/run_log_store/pyproject.toml,sha256=YnmXsFvFG9uv_c0spLYBsNI_1sbktqxtHsOuClyvZ3g,288
37
- extensions/run_log_store/db/implementation_FF.py,sha256=euTnh0xzNF0e_DyfHQ4W-kG1AwTr8u7OuO3_cZkR5bM,5237
38
- extensions/run_log_store/db/integration_FF.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- extensions/secrets/README.md,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
40
- extensions/secrets/dotenv.py,sha256=nADHXI6KJ_LUYOIe5EbtYH-21OBebSNVr0Pjb1GlZ7w,1573
41
- extensions/secrets/pyproject.toml,sha256=mLJNImNcBlbLKHh-0ugVWT9V83R4RibyyYDtBCSqVF4,282
42
- extensions/tasks/torch.py,sha256=oeXRkmuttFIAuBwH7-h4SOVXMDOZXX5mvqI2aFrR3Vo,10283
43
- extensions/tasks/torch_config.py,sha256=UjfMitT-TXASRDGR30I2vDRnyk7JQnR-5CsOVidjpSY,2833
44
- runnable/__init__.py,sha256=eRXLgO-iiSUmNkjjzBjWdBP7Fp--I_vnImyhoGxZUek,709
45
- runnable/catalog.py,sha256=4msQxLhLKlsDDrHFnGauPYe-Or-q9g8_RYCn_4dpxaU,4466
46
- runnable/cli.py,sha256=3BiKSj95h2Drn__YlchMPZ5rBMafuRb2OGIsVpbsO5Y,8788
47
- runnable/context.py,sha256=by5uepmuCP0dmM9BmsliXihSes5QEFejwAsmekcqylE,1388
48
- runnable/datastore.py,sha256=ZobM1aVkgeUJ2fZYt63IFDsoNzObwc93hdByegS5YKQ,32396
49
- runnable/defaults.py,sha256=3o9IVGryyCE6PoQTOoaIaHHTbJGEzmdXMcwzOhwAYoI,3518
50
- runnable/entrypoints.py,sha256=1xCbWVUQLGmg5gkWnAVWFLAUf6j4avP9azX_vuGQUMY,18985
51
- runnable/exceptions.py,sha256=LFbp0-Qxg2PAMLEVt7w2whhBxSG-5pzUEv5qN-Rc4_c,3003
52
- runnable/executor.py,sha256=Jr9yJtSH7CzjXJLWx3VWIUAQblstuGqzpFtajv7d39M,15348
53
- runnable/graph.py,sha256=poQz5zcvq89ju_u5sYlunQLPbHnXTaUmjcvstPwvT4U,16536
54
- runnable/names.py,sha256=vn92Kv9ANROYSZX6Z4z1v_WA3WiEdIYmG6KEStBFZug,8134
55
- runnable/nodes.py,sha256=CWfKVuGNaKSQpvFYYE1gEiTNouG0xPaA8KKaOxFr8EI,16733
56
- runnable/parameters.py,sha256=u77CdqqDAbVdzNeBFPNUfGnWPy9-SpBVmwEJ56xmDm8,5289
57
- runnable/pickler.py,sha256=ydJ_eti_U1F4l-YacFp7BWm6g5vTn04UXye25S1HVok,2684
58
- runnable/sdk.py,sha256=1gerGsq6EMSbDh2-Ey1vk6e0Sls55t9R29KlblNahi0,36793
59
- runnable/secrets.py,sha256=4L_dBFxTgr8r_hHUD6RlZEtqaOHDRsFG5PXO5wlvMI0,2324
60
- runnable/tasks.py,sha256=lOtCninvosGI2bNIzblrzNa-lN7TMwel1KQ1g23M85A,32088
61
- runnable/utils.py,sha256=hBr7oGwGL2VgfITlQCTz-a1iwvvf7Mfl-HY8UdENZac,19929
62
- runnable-0.35.0.dist-info/METADATA,sha256=CgZbaiNCY_mUrcdyOGYV_6zkVwSrGMzqbUdrKQ-LL0U,10166
63
- runnable-0.35.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
64
- runnable-0.35.0.dist-info/entry_points.txt,sha256=bLH1QXcc-G8xgJTi4wf6SYQnsG_BxRRvobwa9dYm-js,1935
65
- runnable-0.35.0.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
66
- runnable-0.35.0.dist-info/RECORD,,