runnable 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runnable/__init__.py +34 -0
- runnable/catalog.py +141 -0
- runnable/cli.py +272 -0
- runnable/context.py +34 -0
- runnable/datastore.py +687 -0
- runnable/defaults.py +182 -0
- runnable/entrypoints.py +448 -0
- runnable/exceptions.py +94 -0
- runnable/executor.py +421 -0
- runnable/experiment_tracker.py +139 -0
- runnable/extensions/catalog/__init__.py +21 -0
- runnable/extensions/catalog/file_system/__init__.py +0 -0
- runnable/extensions/catalog/file_system/implementation.py +227 -0
- runnable/extensions/catalog/k8s_pvc/__init__.py +0 -0
- runnable/extensions/catalog/k8s_pvc/implementation.py +16 -0
- runnable/extensions/catalog/k8s_pvc/integration.py +59 -0
- runnable/extensions/executor/__init__.py +725 -0
- runnable/extensions/executor/argo/__init__.py +0 -0
- runnable/extensions/executor/argo/implementation.py +1183 -0
- runnable/extensions/executor/argo/specification.yaml +51 -0
- runnable/extensions/executor/k8s_job/__init__.py +0 -0
- runnable/extensions/executor/k8s_job/implementation_FF.py +259 -0
- runnable/extensions/executor/k8s_job/integration_FF.py +69 -0
- runnable/extensions/executor/local/__init__.py +0 -0
- runnable/extensions/executor/local/implementation.py +70 -0
- runnable/extensions/executor/local_container/__init__.py +0 -0
- runnable/extensions/executor/local_container/implementation.py +361 -0
- runnable/extensions/executor/mocked/__init__.py +0 -0
- runnable/extensions/executor/mocked/implementation.py +189 -0
- runnable/extensions/experiment_tracker/__init__.py +0 -0
- runnable/extensions/experiment_tracker/mlflow/__init__.py +0 -0
- runnable/extensions/experiment_tracker/mlflow/implementation.py +94 -0
- runnable/extensions/nodes.py +655 -0
- runnable/extensions/run_log_store/__init__.py +0 -0
- runnable/extensions/run_log_store/chunked_file_system/__init__.py +0 -0
- runnable/extensions/run_log_store/chunked_file_system/implementation.py +106 -0
- runnable/extensions/run_log_store/chunked_k8s_pvc/__init__.py +0 -0
- runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py +21 -0
- runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py +61 -0
- runnable/extensions/run_log_store/db/implementation_FF.py +157 -0
- runnable/extensions/run_log_store/db/integration_FF.py +0 -0
- runnable/extensions/run_log_store/file_system/__init__.py +0 -0
- runnable/extensions/run_log_store/file_system/implementation.py +136 -0
- runnable/extensions/run_log_store/generic_chunked.py +541 -0
- runnable/extensions/run_log_store/k8s_pvc/__init__.py +0 -0
- runnable/extensions/run_log_store/k8s_pvc/implementation.py +21 -0
- runnable/extensions/run_log_store/k8s_pvc/integration.py +56 -0
- runnable/extensions/secrets/__init__.py +0 -0
- runnable/extensions/secrets/dotenv/__init__.py +0 -0
- runnable/extensions/secrets/dotenv/implementation.py +100 -0
- runnable/extensions/secrets/env_secrets/__init__.py +0 -0
- runnable/extensions/secrets/env_secrets/implementation.py +42 -0
- runnable/graph.py +464 -0
- runnable/integration.py +205 -0
- runnable/interaction.py +404 -0
- runnable/names.py +546 -0
- runnable/nodes.py +501 -0
- runnable/parameters.py +183 -0
- runnable/pickler.py +102 -0
- runnable/sdk.py +472 -0
- runnable/secrets.py +95 -0
- runnable/tasks.py +395 -0
- runnable/utils.py +630 -0
- runnable-0.3.0.dist-info/METADATA +437 -0
- runnable-0.3.0.dist-info/RECORD +69 -0
- {runnable-0.1.0.dist-info → runnable-0.3.0.dist-info}/WHEEL +1 -1
- runnable-0.3.0.dist-info/entry_points.txt +44 -0
- runnable-0.1.0.dist-info/METADATA +0 -16
- runnable-0.1.0.dist-info/RECORD +0 -6
- /runnable/{.gitkeep → extensions/__init__.py} +0 -0
- {runnable-0.1.0.dist-info → runnable-0.3.0.dist-info}/LICENSE +0 -0
runnable/__init__.py
CHANGED
@@ -0,0 +1,34 @@
|
|
1
|
+
# ruff: noqa
|
2
|
+
|
3
|
+
# TODO: Might need to add Rich to pyinstaller part
|
4
|
+
import logging
|
5
|
+
from logging.config import dictConfig
|
6
|
+
|
7
|
+
from runnable import defaults
|
8
|
+
|
9
|
+
dictConfig(defaults.LOGGING_CONFIG)
|
10
|
+
logger = logging.getLogger(defaults.LOGGER_NAME)
|
11
|
+
|
12
|
+
from runnable.interaction import (
|
13
|
+
end_interactive_session,
|
14
|
+
get_experiment_tracker_context,
|
15
|
+
get_from_catalog,
|
16
|
+
get_object,
|
17
|
+
get_parameter,
|
18
|
+
get_run_id,
|
19
|
+
get_run_log,
|
20
|
+
get_secret,
|
21
|
+
put_in_catalog,
|
22
|
+
put_object,
|
23
|
+
start_interactive_session,
|
24
|
+
set_parameter,
|
25
|
+
track_this,
|
26
|
+
) # noqa
|
27
|
+
from runnable.sdk import Stub, Pipeline, Task, Parallel, Map, Catalog, Success, Fail # noqa
|
28
|
+
|
29
|
+
|
30
|
+
# TODO: Think of model registry as a central place to store models.
|
31
|
+
# TODO: Implement Sagemaker pipelines as a executor.
|
32
|
+
|
33
|
+
|
34
|
+
# TODO: Think of way of generating dag hash without executor configuration
|
runnable/catalog.py
ADDED
@@ -0,0 +1,141 @@
|
|
1
|
+
import logging
|
2
|
+
from abc import ABC, abstractmethod
|
3
|
+
from typing import List, Optional
|
4
|
+
|
5
|
+
from pydantic import BaseModel, ConfigDict
|
6
|
+
|
7
|
+
import runnable.context as context
|
8
|
+
from runnable import defaults
|
9
|
+
from runnable.datastore import DataCatalog
|
10
|
+
|
11
|
+
logger = logging.getLogger(defaults.LOGGER_NAME)
|
12
|
+
|
13
|
+
|
14
|
+
# --8<-- [start:docs]
|
15
|
+
|
16
|
+
|
17
|
+
class BaseCatalog(ABC, BaseModel):
|
18
|
+
"""
|
19
|
+
Base Catalog class definition.
|
20
|
+
|
21
|
+
All implementations of the catalog handler should inherit and extend this class.
|
22
|
+
"""
|
23
|
+
|
24
|
+
service_name: str = ""
|
25
|
+
service_type: str = "catalog"
|
26
|
+
model_config = ConfigDict(extra="forbid")
|
27
|
+
|
28
|
+
@property
|
29
|
+
def _context(self):
|
30
|
+
return context.run_context
|
31
|
+
|
32
|
+
@property
|
33
|
+
def compute_data_folder(self) -> str:
|
34
|
+
return defaults.COMPUTE_DATA_FOLDER
|
35
|
+
|
36
|
+
@abstractmethod
|
37
|
+
def get(self, name: str, run_id: str, compute_data_folder: str = "", **kwargs) -> List[DataCatalog]:
|
38
|
+
"""
|
39
|
+
Get the catalog item by 'name' for the 'run id' and store it in compute data folder.
|
40
|
+
|
41
|
+
The catalog location should have been created before you can get from it.
|
42
|
+
|
43
|
+
Args:
|
44
|
+
name (str): The name of the catalog item
|
45
|
+
run_id (str): The run_id of the run.
|
46
|
+
compute_data_folder (str, optional): The compute data folder. Defaults to runnable default (data/)
|
47
|
+
|
48
|
+
Raises:
|
49
|
+
NotImplementedError: Base class, hence not implemented
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
List(object) : A list of catalog objects
|
53
|
+
"""
|
54
|
+
raise NotImplementedError
|
55
|
+
|
56
|
+
@abstractmethod
|
57
|
+
def put(
|
58
|
+
self,
|
59
|
+
name: str,
|
60
|
+
run_id: str,
|
61
|
+
compute_data_folder: str = "",
|
62
|
+
synced_catalogs: Optional[List[DataCatalog]] = None,
|
63
|
+
**kwargs,
|
64
|
+
) -> List[DataCatalog]:
|
65
|
+
"""
|
66
|
+
Put the file by 'name' from the 'compute_data_folder' in the catalog for the run_id.
|
67
|
+
|
68
|
+
If previous syncing has happened and the file has not been changed, we do not sync again.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
name (str): The name of the catalog item.
|
72
|
+
run_id (str): The run_id of the run.
|
73
|
+
compute_data_folder (str, optional): The compute data folder. Defaults to runnable default (data/)
|
74
|
+
synced_catalogs (dict, optional): Any previously synced catalogs. Defaults to None.
|
75
|
+
|
76
|
+
Raises:
|
77
|
+
NotImplementedError: Base class, hence not implemented
|
78
|
+
|
79
|
+
Returns:
|
80
|
+
List(object) : A list of catalog objects
|
81
|
+
"""
|
82
|
+
raise NotImplementedError
|
83
|
+
|
84
|
+
@abstractmethod
|
85
|
+
def sync_between_runs(self, previous_run_id: str, run_id: str):
|
86
|
+
"""
|
87
|
+
Given run_id of a previous run, sync them to the catalog of the run given by run_id
|
88
|
+
|
89
|
+
Args:
|
90
|
+
previous_run_id (str): The run id of the previous run
|
91
|
+
run_id (str): The run_id to which the data catalogs should be synced to.
|
92
|
+
|
93
|
+
Raises:
|
94
|
+
NotImplementedError: Base class, hence not implemented
|
95
|
+
"""
|
96
|
+
raise NotImplementedError
|
97
|
+
|
98
|
+
|
99
|
+
# --8<-- [end:docs]
|
100
|
+
|
101
|
+
|
102
|
+
class DoNothingCatalog(BaseCatalog):
|
103
|
+
"""
|
104
|
+
A Catalog handler that does nothing.
|
105
|
+
|
106
|
+
Example config:
|
107
|
+
|
108
|
+
catalog:
|
109
|
+
type: do-nothing
|
110
|
+
|
111
|
+
"""
|
112
|
+
|
113
|
+
service_name: str = "do-nothing"
|
114
|
+
|
115
|
+
def get(self, name: str, run_id: str, compute_data_folder: str = "", **kwargs) -> List[DataCatalog]:
|
116
|
+
"""
|
117
|
+
Does nothing
|
118
|
+
"""
|
119
|
+
logger.info("Using a do-nothing catalog, doing nothing in get")
|
120
|
+
return []
|
121
|
+
|
122
|
+
def put(
|
123
|
+
self,
|
124
|
+
name: str,
|
125
|
+
run_id: str,
|
126
|
+
compute_data_folder: str = "",
|
127
|
+
synced_catalogs: Optional[List[DataCatalog]] = None,
|
128
|
+
**kwargs,
|
129
|
+
) -> List[DataCatalog]:
|
130
|
+
"""
|
131
|
+
Does nothing
|
132
|
+
"""
|
133
|
+
logger.info("Using a do-nothing catalog, doing nothing in put")
|
134
|
+
return []
|
135
|
+
|
136
|
+
def sync_between_runs(self, previous_run_id: str, run_id: str):
|
137
|
+
"""
|
138
|
+
Does nothing
|
139
|
+
"""
|
140
|
+
logger.info("Using a do-nothing catalog, doing nothing while sync between runs")
|
141
|
+
...
|
runnable/cli.py
ADDED
@@ -0,0 +1,272 @@
|
|
1
|
+
import logging
|
2
|
+
|
3
|
+
import click
|
4
|
+
from click_plugins import with_plugins
|
5
|
+
from pkg_resources import iter_entry_points
|
6
|
+
|
7
|
+
from runnable import defaults, entrypoints
|
8
|
+
|
9
|
+
logger = logging.getLogger(defaults.LOGGER_NAME)
|
10
|
+
|
11
|
+
|
12
|
+
@with_plugins(iter_entry_points("runnable.cli_plugins"))
|
13
|
+
@click.group()
|
14
|
+
@click.version_option()
|
15
|
+
def cli():
|
16
|
+
"""
|
17
|
+
Welcome to runnable. Please provide the command that you want to use.
|
18
|
+
All commands have options that you can see by runnable <command> --help
|
19
|
+
"""
|
20
|
+
pass
|
21
|
+
|
22
|
+
|
23
|
+
@cli.command("execute", short_help="Execute/translate a pipeline")
|
24
|
+
@click.option("-f", "--file", default="pipeline.yaml", help="The pipeline definition file", show_default=True)
|
25
|
+
@click.option(
|
26
|
+
"-c", "--config-file", default=None, help="config file, in yaml, to be used for the run", show_default=True
|
27
|
+
)
|
28
|
+
@click.option(
|
29
|
+
"-p",
|
30
|
+
"--parameters-file",
|
31
|
+
default=None,
|
32
|
+
help="Parameters, in yaml, accessible by the application",
|
33
|
+
show_default=True,
|
34
|
+
)
|
35
|
+
@click.option(
|
36
|
+
"--log-level",
|
37
|
+
default=defaults.LOG_LEVEL,
|
38
|
+
help="The log level",
|
39
|
+
show_default=True,
|
40
|
+
type=click.Choice(["INFO", "DEBUG", "WARNING", "ERROR", "FATAL"]),
|
41
|
+
)
|
42
|
+
@click.option("--tag", default="", help="A tag attached to the run")
|
43
|
+
@click.option("--run-id", help="An optional run_id, one would be generated if not provided")
|
44
|
+
@click.option("--use-cached", help="Provide the previous run_id to re-run.", show_default=True)
|
45
|
+
def execute(file, config_file, parameters_file, log_level, tag, run_id, use_cached): # pragma: no cover
|
46
|
+
"""
|
47
|
+
Execute a pipeline
|
48
|
+
|
49
|
+
Usage: runnable execute [OPTIONS]
|
50
|
+
|
51
|
+
Options:
|
52
|
+
-f, --file TEXT The pipeline definition file [default: pipeline.yaml]
|
53
|
+
-c, --config-file TEXT config file, in yaml, to be used for the run [default: None]
|
54
|
+
-p, --parameters-file TEXT Parameters, in yaml, accessible by the application [default: None]
|
55
|
+
--log-level One of [INFO|DEBUG|WARNING|ERROR|FATAL]
|
56
|
+
The log level
|
57
|
+
[default: INFO]
|
58
|
+
--tag TEXT A tag attached to the run
|
59
|
+
[default: ]
|
60
|
+
--run-id TEXT An optional run_id, one would be generated if not
|
61
|
+
provided
|
62
|
+
--use-cached TEXT Provide the previous run_id to re-run.
|
63
|
+
"""
|
64
|
+
logger.setLevel(log_level)
|
65
|
+
entrypoints.execute(
|
66
|
+
configuration_file=config_file,
|
67
|
+
pipeline_file=file,
|
68
|
+
tag=tag,
|
69
|
+
run_id=run_id,
|
70
|
+
use_cached=use_cached,
|
71
|
+
parameters_file=parameters_file,
|
72
|
+
)
|
73
|
+
|
74
|
+
|
75
|
+
@cli.command("execute_single_node", short_help="Internal entry point to execute a single node", hidden=True)
|
76
|
+
@click.argument("run_id")
|
77
|
+
@click.argument("step_name")
|
78
|
+
@click.option("--map-variable", default="", help="The map variable dictionary in str", show_default=True)
|
79
|
+
@click.option("-f", "--file", default="", help="The pipeline definition file", show_default=True)
|
80
|
+
@click.option(
|
81
|
+
"-c", "--config-file", default=None, help="config file, in yaml, to be used for the run", show_default=True
|
82
|
+
)
|
83
|
+
@click.option(
|
84
|
+
"-p",
|
85
|
+
"--parameters-file",
|
86
|
+
default=None,
|
87
|
+
help="Parameters, in yaml, accessible by the application",
|
88
|
+
show_default=True,
|
89
|
+
)
|
90
|
+
@click.option(
|
91
|
+
"--log-level",
|
92
|
+
default=defaults.LOG_LEVEL,
|
93
|
+
help="The log level",
|
94
|
+
show_default=True,
|
95
|
+
type=click.Choice(["INFO", "DEBUG", "WARNING", "ERROR", "FATAL"]),
|
96
|
+
)
|
97
|
+
@click.option("--tag", default="", help="A tag attached to the run")
|
98
|
+
def execute_single_node(run_id, step_name, map_variable, file, config_file, parameters_file, log_level, tag):
|
99
|
+
"""
|
100
|
+
Internal entrypoint for runnable to execute a single node.
|
101
|
+
|
102
|
+
Other than local executor, every other executor uses this entry point to execute a step in the context of runnable.
|
103
|
+
Only chained executions should use this method. Unchained executions should use execute_
|
104
|
+
"""
|
105
|
+
logger.setLevel(log_level)
|
106
|
+
|
107
|
+
# Execute the node as part of the graph execution.
|
108
|
+
entrypoints.execute_single_node(
|
109
|
+
configuration_file=config_file,
|
110
|
+
pipeline_file=file,
|
111
|
+
step_name=step_name,
|
112
|
+
map_variable=map_variable,
|
113
|
+
run_id=run_id,
|
114
|
+
tag=tag,
|
115
|
+
parameters_file=parameters_file,
|
116
|
+
)
|
117
|
+
|
118
|
+
|
119
|
+
@cli.command("execute_notebook", short_help="Entry point to execute a notebook")
|
120
|
+
@click.argument("filename")
|
121
|
+
@click.option("--entrypoint", default=defaults.ENTRYPOINT.USER.value, hidden=True)
|
122
|
+
@click.option(
|
123
|
+
"-c", "--config-file", default=None, help="config file, in yaml, to be used for the run", show_default=True
|
124
|
+
)
|
125
|
+
@click.option(
|
126
|
+
"-p",
|
127
|
+
"--parameters-file",
|
128
|
+
default=None,
|
129
|
+
help="Parameters, in yaml, accessible by the application",
|
130
|
+
show_default=True,
|
131
|
+
)
|
132
|
+
@click.option(
|
133
|
+
"--log-level",
|
134
|
+
default=defaults.LOG_LEVEL,
|
135
|
+
help="The log level",
|
136
|
+
show_default=True,
|
137
|
+
type=click.Choice(["INFO", "DEBUG", "WARNING", "ERROR", "FATAL"]),
|
138
|
+
)
|
139
|
+
@click.option("--data-folder", "-d", default="data/", help="The catalog data folder")
|
140
|
+
@click.option("--put-in-catalog", "-put", default=None, multiple=True, help="The data to put from the catalog")
|
141
|
+
@click.option("--notebook-output-path", default="", help="The output path for the notebook")
|
142
|
+
@click.option("--tag", help="A tag attached to the run")
|
143
|
+
@click.option("--run-id", help="An optional run_id, one would be generated if not provided")
|
144
|
+
def execute_notebook(
|
145
|
+
filename,
|
146
|
+
entrypoint,
|
147
|
+
config_file,
|
148
|
+
parameters_file,
|
149
|
+
log_level,
|
150
|
+
data_folder,
|
151
|
+
put_in_catalog,
|
152
|
+
notebook_output_path,
|
153
|
+
tag,
|
154
|
+
run_id,
|
155
|
+
):
|
156
|
+
"""
|
157
|
+
External entry point to execute a Jupyter notebook in isolation.
|
158
|
+
|
159
|
+
The notebook would be executed in the environment defined by the config file or default if none.
|
160
|
+
The execution plan is unchained.
|
161
|
+
"""
|
162
|
+
logger.setLevel(log_level)
|
163
|
+
catalog_config = {"compute_data_folder": data_folder, "put": list(put_in_catalog) if put_in_catalog else None}
|
164
|
+
if not filename.endswith(".ipynb"):
|
165
|
+
raise Exception("A notebook should always have ipynb as the extension")
|
166
|
+
|
167
|
+
entrypoints.execute_notebook(
|
168
|
+
entrypoint=entrypoint,
|
169
|
+
notebook_file=filename,
|
170
|
+
catalog_config=catalog_config,
|
171
|
+
configuration_file=config_file,
|
172
|
+
parameters_file=parameters_file,
|
173
|
+
notebook_output_path=notebook_output_path,
|
174
|
+
tag=tag,
|
175
|
+
run_id=run_id,
|
176
|
+
)
|
177
|
+
|
178
|
+
|
179
|
+
@cli.command("execute_function", short_help="Entry point to execute a python function")
|
180
|
+
@click.argument("command")
|
181
|
+
@click.option("--entrypoint", default=defaults.ENTRYPOINT.USER.value, hidden=True)
|
182
|
+
@click.option(
|
183
|
+
"-c", "--config-file", default=None, help="config file, in yaml, to be used for the run", show_default=True
|
184
|
+
)
|
185
|
+
@click.option(
|
186
|
+
"-p",
|
187
|
+
"--parameters-file",
|
188
|
+
default=None,
|
189
|
+
help="Parameters, in yaml, accessible by the application",
|
190
|
+
show_default=True,
|
191
|
+
)
|
192
|
+
@click.option(
|
193
|
+
"--log-level",
|
194
|
+
default=defaults.LOG_LEVEL,
|
195
|
+
help="The log level",
|
196
|
+
show_default=True,
|
197
|
+
type=click.Choice(["INFO", "DEBUG", "WARNING", "ERROR", "FATAL"]),
|
198
|
+
)
|
199
|
+
@click.option("--data-folder", "-d", default="data/", help="The catalog data folder")
|
200
|
+
@click.option("--put-in-catalog", "-put", default=None, multiple=True, help="The data to put from the catalog")
|
201
|
+
@click.option("--tag", help="A tag attached to the run")
|
202
|
+
@click.option("--run-id", help="An optional run_id, one would be generated if not provided")
|
203
|
+
def execute_function(
|
204
|
+
command, entrypoint, config_file, parameters_file, log_level, data_folder, put_in_catalog, tag, run_id
|
205
|
+
):
|
206
|
+
"""
|
207
|
+
External entry point to execute a python function in isolation.
|
208
|
+
|
209
|
+
The function would be executed in the environment defined by the config file or default if none.
|
210
|
+
The execution plan is unchained.
|
211
|
+
"""
|
212
|
+
logger.setLevel(log_level)
|
213
|
+
catalog_config = {"compute_data_folder": data_folder, "put": list(put_in_catalog) if put_in_catalog else None}
|
214
|
+
entrypoints.execute_function(
|
215
|
+
entrypoint=entrypoint,
|
216
|
+
command=command,
|
217
|
+
catalog_config=catalog_config,
|
218
|
+
configuration_file=config_file,
|
219
|
+
parameters_file=parameters_file,
|
220
|
+
tag=tag,
|
221
|
+
run_id=run_id,
|
222
|
+
)
|
223
|
+
|
224
|
+
|
225
|
+
@cli.command("fan", short_help="Internal entry point to fan in or out a composite node", hidden=True)
|
226
|
+
@click.argument("run_id")
|
227
|
+
@click.argument("step_name")
|
228
|
+
@click.option("-m", "--mode", help="fan in or fan out", required=True, type=click.Choice(["in", "out"]))
|
229
|
+
@click.option("--map-variable", default="", help="The map variable dictionary in str", show_default=True)
|
230
|
+
@click.option("-f", "--file", default="", help="The pipeline definition file", show_default=True)
|
231
|
+
@click.option(
|
232
|
+
"-c", "--config-file", default=None, help="config file, in yaml, to be used for the run", show_default=True
|
233
|
+
)
|
234
|
+
@click.option(
|
235
|
+
"-p",
|
236
|
+
"--parameters-file",
|
237
|
+
default=None,
|
238
|
+
help="Parameters, in yaml, accessible by the application",
|
239
|
+
show_default=True,
|
240
|
+
)
|
241
|
+
@click.option(
|
242
|
+
"--log-level",
|
243
|
+
default=defaults.LOG_LEVEL,
|
244
|
+
help="The log level",
|
245
|
+
show_default=True,
|
246
|
+
type=click.Choice(["INFO", "DEBUG", "WARNING", "ERROR", "FATAL"]),
|
247
|
+
)
|
248
|
+
@click.option("--tag", default="", help="A tag attached to the run")
|
249
|
+
def fan(run_id, step_name, mode, map_variable, file, config_file, parameters_file, log_level, tag):
|
250
|
+
"""
|
251
|
+
Internal entrypoint for runnable to fan in or out a composite node.
|
252
|
+
|
253
|
+
Only 3rd party orchestrators should use this entry point.
|
254
|
+
"""
|
255
|
+
logger.setLevel(log_level)
|
256
|
+
|
257
|
+
# Fan in or out
|
258
|
+
entrypoints.fan(
|
259
|
+
configuration_file=config_file,
|
260
|
+
pipeline_file=file,
|
261
|
+
step_name=step_name,
|
262
|
+
mode=mode,
|
263
|
+
map_variable=map_variable,
|
264
|
+
run_id=run_id,
|
265
|
+
tag=tag,
|
266
|
+
parameters_file=parameters_file,
|
267
|
+
)
|
268
|
+
|
269
|
+
|
270
|
+
# Needed for the binary creation
|
271
|
+
if __name__ == "__main__":
|
272
|
+
cli()
|
runnable/context.py
ADDED
@@ -0,0 +1,34 @@
|
|
1
|
+
from typing import Dict, Optional
|
2
|
+
|
3
|
+
from pydantic import BaseModel, SerializeAsAny
|
4
|
+
|
5
|
+
from runnable.catalog import BaseCatalog
|
6
|
+
from runnable.datastore import BaseRunLogStore
|
7
|
+
from runnable.executor import BaseExecutor
|
8
|
+
from runnable.experiment_tracker import BaseExperimentTracker
|
9
|
+
from runnable.graph import Graph
|
10
|
+
from runnable.secrets import BaseSecrets
|
11
|
+
|
12
|
+
|
13
|
+
class Context(BaseModel):
|
14
|
+
executor: SerializeAsAny[BaseExecutor]
|
15
|
+
run_log_store: SerializeAsAny[BaseRunLogStore]
|
16
|
+
secrets_handler: SerializeAsAny[BaseSecrets]
|
17
|
+
catalog_handler: SerializeAsAny[BaseCatalog]
|
18
|
+
experiment_tracker: SerializeAsAny[BaseExperimentTracker]
|
19
|
+
|
20
|
+
pipeline_file: Optional[str] = ""
|
21
|
+
parameters_file: Optional[str] = ""
|
22
|
+
configuration_file: Optional[str] = ""
|
23
|
+
|
24
|
+
tag: str = ""
|
25
|
+
run_id: str = ""
|
26
|
+
variables: Dict[str, str] = {}
|
27
|
+
use_cached: bool = False
|
28
|
+
original_run_id: str = ""
|
29
|
+
dag: Optional[Graph] = None
|
30
|
+
dag_hash: str = ""
|
31
|
+
execution_plan: str = ""
|
32
|
+
|
33
|
+
|
34
|
+
run_context = None # type: Context # type: ignore
|