pypeline-runner 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,428 @@
1
+ #!/usr/bin/env python3
2
+ # This file was generated by yanga.
3
+ # VERSION: 0.6.1
4
+ import configparser
5
+ import hashlib
6
+ import json
7
+ import logging
8
+ import os
9
+ import re
10
+ import subprocess # nosec
11
+ import sys
12
+ import tempfile
13
+ import venv
14
+ from abc import ABC, abstractmethod
15
+ from dataclasses import dataclass
16
+ from enum import Enum
17
+ from pathlib import Path
18
+ from typing import List, Optional
19
+
20
+ logging.basicConfig(level=logging.INFO)
21
+ logger = logging.getLogger("bootstrap")
22
+
23
+
24
+ this_dir = Path(__file__).parent
25
+ this_file = Path(__file__).name
26
+ bootstrap_json_path = Path(__file__).parent / "bootstrap.json"
27
+ if bootstrap_json_path.exists():
28
+ with bootstrap_json_path.open("r") as f:
29
+ config = json.load(f)
30
+ package_manager = config.get("python_package_manager", "poetry>=1.7.1")
31
+ else:
32
+ package_manager = "poetry>=1.7.1"
33
+
34
+
35
+ @dataclass
36
+ class PyPiSource:
37
+ name: str
38
+ url: str
39
+
40
+
41
+ @dataclass
42
+ class TomlSection:
43
+ name: str
44
+ content: str
45
+
46
+ def __str__(self) -> str:
47
+ return f"[{self.name}]\n{self.content}"
48
+
49
+
50
+ class PyPiSourceParser:
51
+ tool_poetry_source_section = "tool.poetry.source"
52
+
53
+ @staticmethod
54
+ def from_pyproject_toml(pyproject_toml: Path) -> Optional[PyPiSource]:
55
+ if not pyproject_toml.exists():
56
+ return None
57
+ return PyPiSourceParser.from_pyproject_toml_content(pyproject_toml.read_text())
58
+
59
+ @staticmethod
60
+ def from_pyproject_toml_content(content: str) -> Optional[PyPiSource]:
61
+ sections = PyPiSourceParser.get_toml_sections(content)
62
+ for section in sections:
63
+ if section.name == PyPiSourceParser.tool_poetry_source_section:
64
+ try:
65
+ parser = configparser.ConfigParser()
66
+ parser.read_string(str(section))
67
+ name = parser[section.name]["name"].strip('"')
68
+ url = parser[section.name]["url"].strip('"')
69
+ return PyPiSource(name, url)
70
+ except KeyError as e:
71
+ raise UserNotificationException(
72
+ f"Could not parse PyPi source from pyproject.toml section {section.name}. "
73
+ f"Please make sure the section has the following format:\n"
74
+ f"[{PyPiSourceParser.tool_poetry_source_section}]\n"
75
+ f'name = "name"\n'
76
+ f'url = "https://url"\n'
77
+ f"verify_ssl = true"
78
+ ) from e
79
+ return None
80
+
81
+ @staticmethod
82
+ def get_toml_sections(toml_content: str) -> List[TomlSection]:
83
+ # Use a regular expression to find all sections with [ or [[ at the beginning of the line
84
+ raw_sections = re.findall(r"^\[+.*\]+\n(?:[^[]*\n)*", toml_content, re.MULTILINE)
85
+
86
+ # Process each section
87
+ sections = []
88
+ for section in raw_sections:
89
+ # Split the lines, from the first line extract the section name
90
+ # and merge all the other lines into the content
91
+ lines = section.splitlines()
92
+ name_match = re.match(r"^\[+([^]]*)\]+", lines[0])
93
+ if name_match:
94
+ name = name_match.group(1).strip()
95
+ content = "\n".join(lines[1:]).strip()
96
+ sections.append(TomlSection(name, content))
97
+
98
+ return sections
99
+
100
+
101
+ class Runnable(ABC):
102
+ @abstractmethod
103
+ def run(self) -> int:
104
+ """Run stage"""
105
+
106
+ @abstractmethod
107
+ def get_name(self) -> str:
108
+ """Get stage name"""
109
+
110
+ @abstractmethod
111
+ def get_inputs(self) -> List[Path]:
112
+ """Get stage dependencies"""
113
+
114
+ @abstractmethod
115
+ def get_outputs(self) -> List[Path]:
116
+ """Get stage outputs"""
117
+
118
+
119
+ class RunInfoStatus(Enum):
120
+ MATCH = (False, "Nothing changed. Previous execution info matches.")
121
+ NO_INFO = (True, "No previous execution info found.")
122
+ FILE_NOT_FOUND = (True, "File not found.")
123
+ FILE_CHANGED = (True, "File has changed.")
124
+
125
+ def __init__(self, should_run: bool, message: str) -> None:
126
+ self.should_run = should_run
127
+ self.message = message
128
+
129
+
130
+ class Executor:
131
+ """
132
+ Accepts Runnable objects and executes them.
133
+ It create a file with the same name as the runnable's name
134
+ and stores the inputs and outputs with their hashes.
135
+ If the file exists, it checks the hashes of the inputs and outputs
136
+ and if they match, it skips the execution.
137
+ """
138
+
139
+ RUN_INFO_FILE_EXTENSION = ".deps.json"
140
+
141
+ def __init__(self, cache_dir: Path) -> None:
142
+ self.cache_dir = cache_dir
143
+
144
+ @staticmethod
145
+ def get_file_hash(path: Path) -> str:
146
+ with open(path, "rb") as file:
147
+ bytes = file.read()
148
+ readable_hash = hashlib.sha256(bytes).hexdigest()
149
+ return readable_hash
150
+
151
+ def store_run_info(self, runnable: Runnable) -> None:
152
+ file_info = {
153
+ "inputs": {str(path): self.get_file_hash(path) for path in runnable.get_inputs()},
154
+ "outputs": {str(path): self.get_file_hash(path) for path in runnable.get_outputs()},
155
+ }
156
+
157
+ run_info_path = self.get_runnable_run_info_file(runnable)
158
+ run_info_path.parent.mkdir(parents=True, exist_ok=True)
159
+ with run_info_path.open("w") as f:
160
+ # pretty print the json file
161
+ json.dump(file_info, f, indent=4)
162
+
163
+ def get_runnable_run_info_file(self, runnable: Runnable) -> Path:
164
+ return self.cache_dir / f"{runnable.get_name()}{self.RUN_INFO_FILE_EXTENSION}"
165
+
166
+ def previous_run_info_matches(self, runnable: Runnable) -> RunInfoStatus:
167
+ run_info_path = self.get_runnable_run_info_file(runnable)
168
+ if not run_info_path.exists():
169
+ return RunInfoStatus.NO_INFO
170
+
171
+ with run_info_path.open() as f:
172
+ previous_info = json.load(f)
173
+
174
+ for file_type in ["inputs", "outputs"]:
175
+ for path_str, previous_hash in previous_info[file_type].items():
176
+ path = Path(path_str)
177
+ if not path.exists():
178
+ return RunInfoStatus.FILE_NOT_FOUND
179
+ elif self.get_file_hash(path) != previous_hash:
180
+ return RunInfoStatus.FILE_CHANGED
181
+ return RunInfoStatus.MATCH
182
+
183
+ def execute(self, runnable: Runnable) -> int:
184
+ run_info_status = self.previous_run_info_matches(runnable)
185
+ if run_info_status.should_run:
186
+ logger.info(f"Runnable '{runnable.get_name()}' must run. {run_info_status.message}")
187
+ exit_code = runnable.run()
188
+ self.store_run_info(runnable)
189
+ return exit_code
190
+ logger.info(f"Runnable '{runnable.get_name()}' execution skipped. {run_info_status.message}")
191
+
192
+ return 0
193
+
194
+
195
+ class UserNotificationException(Exception):
196
+ pass
197
+
198
+
199
+ class SubprocessExecutor:
200
+ def __init__(
201
+ self,
202
+ command: List[str | Path],
203
+ cwd: Optional[Path] = None,
204
+ capture_output: bool = True,
205
+ ):
206
+ self.command = " ".join([str(cmd) for cmd in command])
207
+ self.current_working_directory = cwd
208
+ self.capture_output = capture_output
209
+
210
+ def execute(self) -> None:
211
+ result = None
212
+ try:
213
+ current_dir = (self.current_working_directory or Path.cwd()).as_posix()
214
+ logger.info(f"Running command: {self.command} in {current_dir}")
215
+ # print all virtual environment variables
216
+ logger.debug(json.dumps(dict(os.environ), indent=4))
217
+ result = subprocess.run(
218
+ self.command.split(), # noqa: S603
219
+ cwd=current_dir,
220
+ capture_output=self.capture_output,
221
+ text=True, # to get stdout and stderr as strings instead of bytes
222
+ ) # nosec
223
+ result.check_returncode()
224
+ except subprocess.CalledProcessError as e:
225
+ raise UserNotificationException(
226
+ f"Command '{self.command}' failed with:\n"
227
+ f"{result.stdout if result else ''}\n"
228
+ f"{result.stderr if result else e}"
229
+ ) from e
230
+
231
+
232
+ class VirtualEnvironment(ABC):
233
+ def __init__(self, venv_dir: Path) -> None:
234
+ self.venv_dir = venv_dir
235
+
236
+ def create(self, clear: bool = False) -> None:
237
+ """
238
+ Create a new virtual environment. This should configure the virtual environment such that
239
+ subsequent calls to `pip` and `run` operate within this environment.
240
+ """
241
+ try:
242
+ venv.create(self.venv_dir, with_pip=True, clear=clear)
243
+ except PermissionError as e:
244
+ if "python.exe" in str(e):
245
+ raise UserNotificationException(
246
+ f"Failed to create virtual environment in {self.venv_dir}.\n"
247
+ f"Virtual environment python.exe is still running. Please kill all instances and run again.\n"
248
+ f"Error: {e}"
249
+ ) from e
250
+ raise UserNotificationException(
251
+ f"Failed to create virtual environment in {self.venv_dir}.\n"
252
+ f"Please make sure you have the necessary permissions.\n"
253
+ f"Error: {e}"
254
+ ) from e
255
+
256
+ def pip_configure(self, index_url: str, verify_ssl: bool) -> None:
257
+ """
258
+ Configure pip to use the given index URL and SSL verification setting. This method should
259
+ behave as if the user had activated the virtual environment and run `pip config set
260
+ global.index-url <index_url>` and `pip config set global.cert <verify_ssl>` from the
261
+ command line.
262
+
263
+ Args:
264
+ ----
265
+ index_url: The index URL to use for pip.
266
+ verify_ssl: Whether to verify SSL certificates when using pip.
267
+
268
+ """
269
+ # The pip configuration file should be in the virtual environment directory %VIRTUAL_ENV%
270
+ pip_ini_path = self.pip_config_path()
271
+ with open(pip_ini_path, "w") as pip_ini_file:
272
+ match_host = re.match(r"https?://([^/]+)", index_url)
273
+ pip_ini_file.write(f"[global]\nindex-url = {index_url}\n")
274
+ if match_host:
275
+ pip_ini_file.write(f"trusted-host = {match_host.group(1)}\n")
276
+ if not verify_ssl:
277
+ pip_ini_file.write("cert = false\n")
278
+
279
+ def pip(self, args: List[str]) -> None:
280
+ SubprocessExecutor([self.pip_path().as_posix(), *args], this_dir).execute()
281
+
282
+ @abstractmethod
283
+ def pip_path(self) -> Path:
284
+ """
285
+ Get the path to the pip executable within the virtual environment.
286
+ """
287
+
288
+ @abstractmethod
289
+ def pip_config_path(self) -> Path:
290
+ """
291
+ Get the path to the pip configuration file within the virtual environment.
292
+ """
293
+
294
+ @abstractmethod
295
+ def run(self, args: List[str], capture_output: bool = True) -> None:
296
+ """
297
+ Run an arbitrary command within the virtual environment. This method should behave as if the
298
+ user had activated the virtual environment and run the given command from the command line.
299
+
300
+ Args:
301
+ ----
302
+ *args: Command-line arguments. For example, `run('python', 'setup.py', 'install')`
303
+ should behave similarly to `python setup.py install` at the command line.
304
+
305
+ """
306
+
307
+
308
+ class WindowsVirtualEnvironment(VirtualEnvironment):
309
+ def __init__(self, venv_dir: Path) -> None:
310
+ super().__init__(venv_dir)
311
+ self.activate_script = self.venv_dir.joinpath("Scripts/activate")
312
+
313
+ def pip_path(self) -> Path:
314
+ return self.venv_dir.joinpath("Scripts/pip")
315
+
316
+ def pip_config_path(self) -> Path:
317
+ return self.venv_dir.joinpath("pip.ini")
318
+
319
+ def run(self, args: List[str], capture_output: bool = True) -> None:
320
+ SubprocessExecutor(
321
+ [f"cmd /c {self.activate_script.as_posix()} && ", *args],
322
+ this_dir,
323
+ capture_output,
324
+ ).execute()
325
+
326
+
327
+ class UnixVirtualEnvironment(VirtualEnvironment):
328
+ def __init__(self, venv_dir: Path) -> None:
329
+ super().__init__(venv_dir)
330
+ self.activate_script = self.venv_dir.joinpath("bin/activate")
331
+
332
+ def pip_path(self) -> Path:
333
+ return self.venv_dir.joinpath("bin/pip")
334
+
335
+ def pip_config_path(self) -> Path:
336
+ return self.venv_dir.joinpath("pip.conf")
337
+
338
+ def run(self, args: List[str], capture_output: bool = True) -> None:
339
+ # Create a temporary shell script
340
+ with tempfile.NamedTemporaryFile("w", delete=False, suffix=".sh") as f:
341
+ f.write("#!/bin/bash\n") # Add a shebang line
342
+ f.write(f"source {self.activate_script.as_posix()}\n") # Write the activate command
343
+ f.write(" ".join(args)) # Write the provided command
344
+ temp_script_path = f.name # Get the path of the temporary script
345
+
346
+ # Make the temporary script executable
347
+ SubprocessExecutor(["chmod", "+x", temp_script_path]).execute()
348
+ # Run the temporary script
349
+ SubprocessExecutor([f"{Path(temp_script_path).as_posix()}"], this_dir, capture_output).execute()
350
+ # Delete the temporary script
351
+ os.remove(temp_script_path)
352
+
353
+
354
+ class CreateVirtualEnvironment(Runnable):
355
+ def __init__(
356
+ self,
357
+ ) -> None:
358
+ self.root_dir = this_dir
359
+ self.venv_dir = self.root_dir / ".venv"
360
+ self.virtual_env = self.instantiate_os_specific_venv(self.venv_dir)
361
+
362
+ @property
363
+ def package_manager_name(self) -> str:
364
+ match = re.match(r"^([a-zA-Z0-9_-]+)", package_manager)
365
+
366
+ if match:
367
+ return match.group(1)
368
+ else:
369
+ raise UserNotificationException(f"Could not extract the package manager name from {package_manager}")
370
+
371
+ def run(self) -> int:
372
+ logger.info("Running project build script")
373
+ self.virtual_env.create(clear=self.venv_dir.exists())
374
+ pypi_source = PyPiSourceParser.from_pyproject_toml(self.root_dir / "pyproject.toml")
375
+ if pypi_source:
376
+ self.virtual_env.pip_configure(index_url=pypi_source.url, verify_ssl=True)
377
+ self.virtual_env.pip(["install", package_manager])
378
+ self.virtual_env.run([self.package_manager_name, "install"])
379
+ return 0
380
+
381
+ @staticmethod
382
+ def instantiate_os_specific_venv(venv_dir: Path) -> VirtualEnvironment:
383
+ if sys.platform.startswith("win32"):
384
+ return WindowsVirtualEnvironment(venv_dir)
385
+ elif sys.platform.startswith("linux") or sys.platform.startswith("darwin"):
386
+ return UnixVirtualEnvironment(venv_dir)
387
+ else:
388
+ raise UserNotificationException(f"Unsupported operating system: {sys.platform}")
389
+
390
+ def get_name(self) -> str:
391
+ return "create-virtual-environment"
392
+
393
+ def get_inputs(self) -> List[Path]:
394
+ bootstrap_files = list(self.root_dir.glob("bootstrap.*"))
395
+ venv_relevant_files = ["poetry.lock", "poetry.toml", "pyproject.toml"]
396
+ return [self.root_dir / file for file in venv_relevant_files] + bootstrap_files
397
+
398
+ def get_outputs(self) -> List[Path]:
399
+ return []
400
+
401
+
402
+ def print_environment_info() -> None:
403
+ str_bar = "".join(["-" for _ in range(80)])
404
+ logger.debug(str_bar)
405
+ logger.debug("Environment: \n" + json.dumps(dict(os.environ), indent=4))
406
+ logger.info(str_bar)
407
+ logger.info(f"Arguments: {sys.argv[1:]}")
408
+ logger.info(str_bar)
409
+
410
+
411
+ def main() -> int:
412
+ try:
413
+ # print_environment_info()
414
+ build = CreateVirtualEnvironment()
415
+ Executor(build.venv_dir).execute(build)
416
+ except UserNotificationException as e:
417
+ logger.error(e)
418
+ return 1
419
+ return 0
420
+
421
+
422
+ if __name__ == "__main__":
423
+ sys.exit(main())
424
+
425
+ if __name__ == "__test_main__":
426
+ """This is used to execute the build script from a test and
427
+ it shall not call sys.exit()"""
428
+ main()
@@ -0,0 +1,2 @@
1
+ .venv
2
+ build/
@@ -0,0 +1,2 @@
1
+ [virtualenvs]
2
+ in-project = true
@@ -0,0 +1,10 @@
1
+ pipeline:
2
+ venv:
3
+ - step: CreateVEnv
4
+ module: pypeline.steps.create_venv
5
+ install:
6
+ - step: ScoopInstall
7
+ module: pypeline.steps.scoop_install
8
+ custom:
9
+ - step: MyStep
10
+ file: steps/my_step.py
@@ -0,0 +1,8 @@
1
+ [tool.poetry]
2
+ name = "Hello Pypeline"
3
+ version = "0.0.1"
4
+ description = "A simple generated project to get you started"
5
+ authors = ["Your Name"]
6
+
7
+ [tool.poetry.dependencies]
8
+ python = ">=3.10,<3.13"
@@ -0,0 +1,14 @@
1
+ {
2
+ "buckets": [
3
+ {
4
+ "Name": "versions",
5
+ "Source": "https://github.com/ScoopInstaller/Versions"
6
+ }
7
+ ],
8
+ "apps": [
9
+ {
10
+ "Source": "versions",
11
+ "Name": "mingw-winlibs-llvm-ucrt"
12
+ }
13
+ ]
14
+ }
@@ -0,0 +1,25 @@
1
+ from pathlib import Path
2
+ from typing import List
3
+
4
+ from py_app_dev.core.logging import logger
5
+
6
+ from pypeline.domain.pipeline import PipelineStep
7
+
8
+
9
+ class MyStep(PipelineStep):
10
+ def run(self) -> None:
11
+ logger.info(f"Run {self.get_name()} found install dirs:")
12
+ for install_dir in self.execution_context.install_dirs:
13
+ logger.info(f" {install_dir}")
14
+
15
+ def get_inputs(self) -> List[Path]:
16
+ return []
17
+
18
+ def get_outputs(self) -> List[Path]:
19
+ return []
20
+
21
+ def get_name(self) -> str:
22
+ return self.__class__.__name__
23
+
24
+ def update_execution_context(self) -> None:
25
+ pass
pypeline/main.py ADDED
@@ -0,0 +1,101 @@
1
+ import sys
2
+ from pathlib import Path
3
+ from typing import List, Optional
4
+
5
+ import typer
6
+ from py_app_dev.core.exceptions import UserNotificationException
7
+ from py_app_dev.core.logging import logger, setup_logger, time_it
8
+
9
+ from pypeline import __version__
10
+ from pypeline.domain.project_slurper import ProjectSlurper
11
+ from pypeline.kickstart.create import KickstartProject
12
+ from pypeline.pypeline import PipelineScheduler, PipelineStepsExecutor
13
+
14
+ package_name = "pypeline"
15
+
16
+ app = typer.Typer(name=package_name, help="Configure and execute steps for developing a python package.", no_args_is_help=True, add_completion=False)
17
+
18
+
19
+ @app.callback(invoke_without_command=True)
20
+ def version(
21
+ version: bool = typer.Option(None, "--version", "-v", is_eager=True, help="Show version and exit."),
22
+ ) -> None:
23
+ if version:
24
+ typer.echo(f"{package_name} {__version__}")
25
+ raise typer.Exit()
26
+
27
+
28
+ @app.command()
29
+ @time_it("init")
30
+ def init(
31
+ project_dir: Path = typer.Option(Path.cwd().absolute(), help="The project directory"), # noqa: B008
32
+ bootstrap_only: bool = typer.Option(False, help="Initialize only the bootstrap files."),
33
+ force: bool = typer.Option(False, help="Force the initialization of the project even if the directory is not empty."),
34
+ ) -> None:
35
+ KickstartProject(project_dir, bootstrap_only, force).run()
36
+
37
+
38
+ @app.command()
39
+ @time_it("run")
40
+ def run(
41
+ project_dir: Path = typer.Option(Path.cwd().absolute(), help="The project directory"), # noqa: B008,
42
+ step: Optional[str] = typer.Option(
43
+ None,
44
+ help="Name of the step to run (as written in the pipeline config).",
45
+ ),
46
+ single: bool = typer.Option(
47
+ False,
48
+ help="If provided, only the provided step will run, without running all previous steps in the pipeline.",
49
+ is_flag=True,
50
+ ),
51
+ print: bool = typer.Option(
52
+ False,
53
+ help="Print the pipeline steps.",
54
+ is_flag=True,
55
+ ),
56
+ force_run: bool = typer.Option(
57
+ False,
58
+ help="Force the execution of a step even if it is not dirty.",
59
+ is_flag=True,
60
+ ),
61
+ ) -> None:
62
+ project_slurper = ProjectSlurper(project_dir)
63
+ if print:
64
+ logger.warning("TODO: print pipeline steps")
65
+ logger.info("Pipeline steps:")
66
+ for group, step_configs in project_slurper.pipeline.items():
67
+ logger.info(f" Group: {group}")
68
+ for step_config in step_configs:
69
+ logger.info(f" {step_config.step}")
70
+ return
71
+ if not project_slurper.pipeline:
72
+ raise UserNotificationException("No pipeline found in the configuration.")
73
+ # Schedule the steps to run
74
+ steps_references = PipelineScheduler(project_slurper.pipeline, project_dir).get_steps_to_run(step, single)
75
+ if not steps_references:
76
+ if step:
77
+ raise UserNotificationException(f"Step '{step}' not found in the pipeline.")
78
+ logger.info("No steps to run.")
79
+ return
80
+
81
+ PipelineStepsExecutor(
82
+ project_slurper.artifacts_locator,
83
+ steps_references,
84
+ force_run,
85
+ ).run()
86
+
87
+
88
+ def main(args: Optional[List[str]] = None) -> int:
89
+ try:
90
+ setup_logger()
91
+ if args is None:
92
+ args = sys.argv[1:]
93
+ app(args)
94
+ return 0
95
+ except UserNotificationException as e:
96
+ logger.error(f"{e}")
97
+ return 1
98
+
99
+
100
+ if __name__ == "__main__":
101
+ sys.exit(main())
pypeline/py.typed ADDED
File without changes
pypeline/pypeline.py ADDED
@@ -0,0 +1,93 @@
1
+ from pathlib import Path
2
+ from typing import List, Optional
3
+
4
+ from py_app_dev.core.logging import logger
5
+ from py_app_dev.core.pipeline import PipelineConfig
6
+ from py_app_dev.core.pipeline import PipelineLoader as GenericPipelineLoader
7
+ from py_app_dev.core.runnable import Executor
8
+
9
+ from .domain.artifacts import ProjectArtifactsLocator
10
+ from .domain.execution_context import ExecutionContext
11
+ from .domain.pipeline import PipelineStep, PipelineStepReference
12
+
13
+
14
+ class PipelineLoader:
15
+ """
16
+ Loads pipeline steps from a pipeline configuration.
17
+
18
+ The steps are not instantiated, only the references are returned (lazy load).
19
+ The pipeline loader needs to know the project root directory to be able to find the
20
+ user custom local steps.
21
+ """
22
+
23
+ def __init__(self, pipeline_config: PipelineConfig, project_root_dir: Path) -> None:
24
+ self.pipeline_config = pipeline_config
25
+ self.project_root_dir = project_root_dir
26
+ self._loader = GenericPipelineLoader[PipelineStep](self.pipeline_config, self.project_root_dir)
27
+
28
+ def load_steps_references(self) -> List[PipelineStepReference]:
29
+ return [PipelineStepReference(step_reference.group_name, step_reference._class) for step_reference in self._loader.load_steps()]
30
+
31
+
32
+ class PipelineStepsExecutor:
33
+ """Executes a list of pipeline steps sequentially."""
34
+
35
+ def __init__(
36
+ self,
37
+ artifacts_locator: ProjectArtifactsLocator,
38
+ steps_references: List[PipelineStepReference],
39
+ force_run: bool = False,
40
+ ) -> None:
41
+ self.logger = logger.bind()
42
+ self.artifacts_locator = artifacts_locator
43
+ self.steps_references = steps_references
44
+ self.force_run = force_run
45
+
46
+ def run(self) -> None:
47
+ execution_context = ExecutionContext(project_root_dir=self.artifacts_locator.project_root_dir, install_dirs=[])
48
+ for step_reference in self.steps_references:
49
+ step_output_dir = self.artifacts_locator.build_dir / step_reference.group_name
50
+ # Create the step output directory, to make sure that files can be created.
51
+ step_output_dir.mkdir(parents=True, exist_ok=True)
52
+ step = step_reference._class(execution_context, step_output_dir)
53
+ # Execute the step is necessary. If the step is not dirty, it will not be executed
54
+ Executor(step.output_dir, self.force_run).execute(step)
55
+ # Independent if the step was executed or not, every step shall update the context
56
+ step.update_execution_context()
57
+
58
+ return
59
+
60
+
61
+ class PipelineScheduler:
62
+ """
63
+ Schedules which steps must be executed based on the provided configuration.
64
+
65
+ * If a step name is provided and the single flag is set, only that step will be executed.
66
+ * If a step name is provided and the single flag is not set, all steps up to the provided step will be executed.
67
+ * In case a command is provided, only the steps up to that command will be executed.
68
+ * If no step name is provided, all steps will be executed.
69
+ """
70
+
71
+ def __init__(self, pipeline: PipelineConfig, project_root_dir: Path) -> None:
72
+ self.pipeline = pipeline
73
+ self.project_root_dir = project_root_dir
74
+ self.logger = logger.bind()
75
+
76
+ def get_steps_to_run(self, step_name: Optional[str] = None, single: bool = False) -> List[PipelineStepReference]:
77
+ pipeline_loader = PipelineLoader(self.pipeline, self.project_root_dir)
78
+ return self.filter_steps_references(pipeline_loader.load_steps_references(), step_name, single)
79
+
80
+ @staticmethod
81
+ def filter_steps_references(
82
+ steps_references: List[PipelineStepReference],
83
+ step_name: Optional[str],
84
+ single: Optional[bool],
85
+ ) -> List[PipelineStepReference]:
86
+ if step_name:
87
+ step_reference = next((step for step in steps_references if step.name == step_name), None)
88
+ if not step_reference:
89
+ return []
90
+ if single:
91
+ return [step_reference]
92
+ return [step for step in steps_references if steps_references.index(step) <= steps_references.index(step_reference)]
93
+ return steps_references