ocean-runner 0.2.18__tar.gz → 0.2.21__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ocean_runner-0.2.18 → ocean_runner-0.2.21}/PKG-INFO +5 -1
- {ocean_runner-0.2.18 → ocean_runner-0.2.21}/ocean_runner/__init__.py +1 -1
- ocean_runner-0.2.21/ocean_runner/config.py +71 -0
- ocean_runner-0.2.21/ocean_runner/runner.py +194 -0
- {ocean_runner-0.2.18 → ocean_runner-0.2.21}/pyproject.toml +17 -2
- ocean_runner-0.2.18/ocean_runner/config.py +0 -55
- ocean_runner-0.2.18/ocean_runner/runner.py +0 -190
- {ocean_runner-0.2.18 → ocean_runner-0.2.21}/.gitignore +0 -0
- {ocean_runner-0.2.18 → ocean_runner-0.2.21}/LICENSE +0 -0
- {ocean_runner-0.2.18 → ocean_runner-0.2.21}/README.md +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ocean-runner
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.21
|
|
4
4
|
Summary: A fluent API for OceanProtocol algorithms
|
|
5
5
|
Project-URL: Homepage, https://github.com/AgrospAI/ocean-runner
|
|
6
6
|
Project-URL: Issues, https://github.com/AgrospAI/ocean-runner/issues
|
|
@@ -17,8 +17,12 @@ Classifier: License :: OSI Approved :: MIT License
|
|
|
17
17
|
Classifier: Operating System :: OS Independent
|
|
18
18
|
Classifier: Programming Language :: Python :: 3
|
|
19
19
|
Requires-Python: >=3.10
|
|
20
|
+
Requires-Dist: aiofiles>=25.1.0
|
|
20
21
|
Requires-Dist: oceanprotocol-job-details>=0.2.8
|
|
22
|
+
Requires-Dist: pydantic-settings>=2.12.0
|
|
23
|
+
Requires-Dist: pydantic>=2.12.5
|
|
21
24
|
Requires-Dist: pytest>=8.4.2
|
|
25
|
+
Requires-Dist: types-aiofiles>=25.1.0.20251011
|
|
22
26
|
Description-Content-Type: text/markdown
|
|
23
27
|
|
|
24
28
|
# ocean-runner
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
from enum import StrEnum, auto
|
|
2
|
+
from logging import Logger
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Generic, Sequence, TypeVar
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
7
|
+
from pydantic_settings import BaseSettings
|
|
8
|
+
|
|
9
|
+
InputT = TypeVar("InputT")
|
|
10
|
+
|
|
11
|
+
DEFAULT = "DEFAULT"
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Keys(StrEnum):
|
|
15
|
+
SECRET = auto()
|
|
16
|
+
BASE_DIR = auto()
|
|
17
|
+
TRANSFORMATION_DID = auto()
|
|
18
|
+
DIDS = auto()
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Environment(BaseSettings):
|
|
22
|
+
"""Environment configuration loaded from environment variables"""
|
|
23
|
+
|
|
24
|
+
base_dir: str | Path | None = Field(
|
|
25
|
+
default_factory=lambda: Path("/data"),
|
|
26
|
+
validation_alias=Keys.BASE_DIR.value,
|
|
27
|
+
description="Base data directory, defaults to '/data'",
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
dids: str | list[Path] | None = Field(
|
|
31
|
+
default=None,
|
|
32
|
+
validation_alias=Keys.DIDS.value,
|
|
33
|
+
description='Datasets DID\'s, format: ["XXXX"]',
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
transformation_did: str = Field(
|
|
37
|
+
default=DEFAULT,
|
|
38
|
+
validation_alias=Keys.TRANSFORMATION_DID.value,
|
|
39
|
+
description="Transformation (algorithm) DID",
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
secret: str = Field(
|
|
43
|
+
default=DEFAULT,
|
|
44
|
+
validation_alias=Keys.SECRET.value,
|
|
45
|
+
description="Super secret secret",
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class Config(BaseModel, Generic[InputT]):
|
|
50
|
+
"""Algorithm overall configuration"""
|
|
51
|
+
|
|
52
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
53
|
+
|
|
54
|
+
custom_input: InputT | None = Field(
|
|
55
|
+
default=None,
|
|
56
|
+
description="Algorithm's custom input types, must be a dataclass_json",
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
logger: Logger | None = Field(
|
|
60
|
+
default=None,
|
|
61
|
+
description="Logger to use in the algorithm",
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
source_paths: Sequence[Path] = Field(
|
|
65
|
+
default_factory=lambda: [Path("/algorithm/src")],
|
|
66
|
+
description="Paths that should be included so the code executes correctly",
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
environment: Environment = Field(
|
|
70
|
+
default_factory=Environment, description="Environment configuration"
|
|
71
|
+
)
|
|
@@ -0,0 +1,194 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import inspect
|
|
5
|
+
from dataclasses import InitVar, asdict, dataclass, field
|
|
6
|
+
from logging import Logger
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Awaitable, Callable, Generic, TypeAlias, TypeVar
|
|
9
|
+
|
|
10
|
+
from oceanprotocol_job_details import JobDetails # type: ignore
|
|
11
|
+
|
|
12
|
+
from ocean_runner.config import Config
|
|
13
|
+
|
|
14
|
+
InputT = TypeVar("InputT")
|
|
15
|
+
ResultT = TypeVar("ResultT")
|
|
16
|
+
T = TypeVar("T")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
Algo: TypeAlias = "Algorithm[InputT, ResultT]"
|
|
20
|
+
ValidateFuncT: TypeAlias = Callable[[Algo], None | Awaitable[None] | None]
|
|
21
|
+
RunFuncT: TypeAlias = Callable[[Algo], ResultT | Awaitable[ResultT]]
|
|
22
|
+
SaveFuncT: TypeAlias = Callable[[Algo, ResultT, Path], Awaitable[None] | None]
|
|
23
|
+
ErrorFuncT: TypeAlias = Callable[[Algo, Exception], Awaitable[None] | None]
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def default_error_callback(algorithm: Algorithm, error: Exception) -> None:
|
|
27
|
+
algorithm.logger.exception("Error during algorithm execution")
|
|
28
|
+
raise error
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def default_validation(algorithm: Algorithm) -> None:
|
|
32
|
+
algorithm.logger.info("Validating input using default validation")
|
|
33
|
+
assert algorithm.job_details.ddos, "DDOs missing"
|
|
34
|
+
assert algorithm.job_details.files, "Files missing"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
async def default_save(algorithm: Algorithm, result: ResultT, base: Path) -> None:
|
|
38
|
+
import aiofiles
|
|
39
|
+
|
|
40
|
+
algorithm.logger.info("Saving results using default save")
|
|
41
|
+
async with aiofiles.open(base / "result.txt", "w+") as f:
|
|
42
|
+
await f.write(str(result))
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
async def execute(
|
|
46
|
+
function: Callable[..., T | Awaitable[T]],
|
|
47
|
+
*args,
|
|
48
|
+
**kwargs,
|
|
49
|
+
) -> T:
|
|
50
|
+
result = function(*args, **kwargs)
|
|
51
|
+
|
|
52
|
+
if inspect.isawaitable(result):
|
|
53
|
+
return await result
|
|
54
|
+
|
|
55
|
+
return result
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
@dataclass(slots=True)
|
|
59
|
+
class Functions(Generic[InputT, ResultT]):
|
|
60
|
+
validate: ValidateFuncT = field(default=default_validation, init=False)
|
|
61
|
+
run: RunFuncT | None = field(default=None, init=False)
|
|
62
|
+
save: SaveFuncT = field(default=default_save, init=False)
|
|
63
|
+
error: ErrorFuncT = field(default=default_error_callback, init=False)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass
|
|
67
|
+
class Algorithm(Generic[InputT, ResultT]):
|
|
68
|
+
"""
|
|
69
|
+
A configurable algorithm runner that behaves like a FastAPI app:
|
|
70
|
+
- You register `validate`, `run`, and `save_results` via decorators.
|
|
71
|
+
- You execute the full pipeline by calling `app()`.
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
config: InitVar[Config[InputT] | None] = field(default=None)
|
|
75
|
+
|
|
76
|
+
logger: Logger = field(init=False, repr=False)
|
|
77
|
+
|
|
78
|
+
_job_details: JobDetails[InputT] = field(init=False)
|
|
79
|
+
_result: ResultT | None = field(default=None, init=False)
|
|
80
|
+
_functions: Functions[InputT, ResultT] = field(
|
|
81
|
+
default_factory=Functions, init=False, repr=False
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
def __post_init__(self, config: Config[InputT] | None) -> None:
|
|
85
|
+
configuration = config or Config()
|
|
86
|
+
|
|
87
|
+
# Configure logger
|
|
88
|
+
if configuration.logger:
|
|
89
|
+
self.logger = configuration.logger
|
|
90
|
+
else:
|
|
91
|
+
import logging
|
|
92
|
+
|
|
93
|
+
logging.basicConfig(
|
|
94
|
+
level=logging.DEBUG,
|
|
95
|
+
format="%(asctime)s | %(levelname)-8s | %(name)s | %(message)s",
|
|
96
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
97
|
+
)
|
|
98
|
+
self.logger = logging.getLogger(__name__)
|
|
99
|
+
|
|
100
|
+
# Normalize base_dir
|
|
101
|
+
if isinstance(configuration.environment.base_dir, str):
|
|
102
|
+
configuration.environment.base_dir = Path(
|
|
103
|
+
configuration.environment.base_dir
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Extend sys.path for custom imports
|
|
107
|
+
if configuration.source_paths:
|
|
108
|
+
import sys
|
|
109
|
+
|
|
110
|
+
sys.path.extend(
|
|
111
|
+
[str(path.absolute()) for path in configuration.source_paths]
|
|
112
|
+
)
|
|
113
|
+
self.logger.debug(
|
|
114
|
+
f"Added [{len(configuration.source_paths)}] entries to PATH"
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
self.configuration = configuration
|
|
118
|
+
|
|
119
|
+
class Error(RuntimeError): ...
|
|
120
|
+
|
|
121
|
+
@property
|
|
122
|
+
def job_details(self) -> JobDetails:
|
|
123
|
+
if not self._job_details:
|
|
124
|
+
raise Algorithm.Error("JobDetails not initialized or missing")
|
|
125
|
+
return self._job_details
|
|
126
|
+
|
|
127
|
+
@property
|
|
128
|
+
def result(self) -> ResultT:
|
|
129
|
+
if self._result is None:
|
|
130
|
+
raise Algorithm.Error("Result missing, run the algorithm first")
|
|
131
|
+
return self._result
|
|
132
|
+
|
|
133
|
+
# ---------------------------
|
|
134
|
+
# Decorators (FastAPI-style)
|
|
135
|
+
# ---------------------------
|
|
136
|
+
|
|
137
|
+
def validate(self, fn: ValidateFuncT) -> ValidateFuncT:
|
|
138
|
+
self._functions.validate = fn
|
|
139
|
+
return fn
|
|
140
|
+
|
|
141
|
+
def run(self, fn: RunFuncT) -> RunFuncT:
|
|
142
|
+
self._functions.run = fn
|
|
143
|
+
return fn
|
|
144
|
+
|
|
145
|
+
def save_results(self, fn: SaveFuncT) -> SaveFuncT:
|
|
146
|
+
self._functions.save = fn
|
|
147
|
+
return fn
|
|
148
|
+
|
|
149
|
+
def on_error(self, fn: ErrorFuncT) -> ErrorFuncT:
|
|
150
|
+
self._functions.error = fn
|
|
151
|
+
return fn
|
|
152
|
+
|
|
153
|
+
# ---------------------------
|
|
154
|
+
# Execution Pipeline
|
|
155
|
+
# ---------------------------
|
|
156
|
+
|
|
157
|
+
async def execute(self) -> ResultT | None:
|
|
158
|
+
# Load job details
|
|
159
|
+
self._job_details = JobDetails.load(
|
|
160
|
+
_type=self.configuration.custom_input,
|
|
161
|
+
base_dir=self.configuration.environment.base_dir,
|
|
162
|
+
dids=self.configuration.environment.dids,
|
|
163
|
+
transformation_did=self.configuration.environment.transformation_did,
|
|
164
|
+
secret=self.configuration.environment.secret,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
self.logger.info("Loaded JobDetails")
|
|
168
|
+
self.logger.debug(asdict(self.job_details))
|
|
169
|
+
|
|
170
|
+
try:
|
|
171
|
+
await execute(self._functions.validate, self)
|
|
172
|
+
|
|
173
|
+
if self._functions.run:
|
|
174
|
+
self.logger.info("Running algorithm...")
|
|
175
|
+
self._result = await execute(self._functions.run, self)
|
|
176
|
+
else:
|
|
177
|
+
self.logger.error("No run() function defined. Skipping execution.")
|
|
178
|
+
self._result = None
|
|
179
|
+
|
|
180
|
+
await execute(
|
|
181
|
+
self._functions.save,
|
|
182
|
+
algorithm=self,
|
|
183
|
+
result=self._result,
|
|
184
|
+
base=self.job_details.paths.outputs,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
except Exception as e:
|
|
188
|
+
await execute(self._functions.error, self, e)
|
|
189
|
+
|
|
190
|
+
return self._result
|
|
191
|
+
|
|
192
|
+
def __call__(self) -> ResultT | None:
|
|
193
|
+
"""Executes the algorithm pipeline: validate → run → save_results."""
|
|
194
|
+
return asyncio.run(self.execute())
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "ocean-runner"
|
|
3
|
-
version = "0.2.
|
|
3
|
+
version = "0.2.21"
|
|
4
4
|
description = "A fluent API for OceanProtocol algorithms"
|
|
5
5
|
authors = [
|
|
6
6
|
{ name = "AgrospAI", email = "agrospai@udl.cat" },
|
|
@@ -14,7 +14,14 @@ classifiers = [
|
|
|
14
14
|
"Operating System :: OS Independent",
|
|
15
15
|
"License :: OSI Approved :: MIT License",
|
|
16
16
|
]
|
|
17
|
-
dependencies = [
|
|
17
|
+
dependencies = [
|
|
18
|
+
"aiofiles>=25.1.0",
|
|
19
|
+
"oceanprotocol-job-details>=0.2.8",
|
|
20
|
+
"pydantic>=2.12.5",
|
|
21
|
+
"pydantic-settings>=2.12.0",
|
|
22
|
+
"pytest>=8.4.2",
|
|
23
|
+
"types-aiofiles>=25.1.0.20251011",
|
|
24
|
+
]
|
|
18
25
|
|
|
19
26
|
[project.urls]
|
|
20
27
|
Homepage = "https://github.com/AgrospAI/ocean-runner"
|
|
@@ -29,8 +36,16 @@ pythonpath = "ocean_runner"
|
|
|
29
36
|
requires = ["hatchling"]
|
|
30
37
|
build-backend = "hatchling.build"
|
|
31
38
|
|
|
39
|
+
[dependency-groups]
|
|
40
|
+
dev = [
|
|
41
|
+
"mypy>=1.19.1",
|
|
42
|
+
]
|
|
43
|
+
|
|
32
44
|
[tool.hatch.build.targets.sdist]
|
|
33
45
|
include = ["ocean_runner"]
|
|
34
46
|
|
|
35
47
|
[tool.hatch.build.targets.wheel]
|
|
36
48
|
include = ["ocean_runner"]
|
|
49
|
+
|
|
50
|
+
[tool.mypy]
|
|
51
|
+
plugins = ['pydantic.mypy']
|
|
@@ -1,55 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
from dataclasses import asdict, dataclass, field
|
|
3
|
-
from logging import Logger
|
|
4
|
-
from pathlib import Path
|
|
5
|
-
from typing import Iterable, TypeVar
|
|
6
|
-
|
|
7
|
-
T = TypeVar("T")
|
|
8
|
-
|
|
9
|
-
DEFAULT = "DEFAULT"
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
@dataclass
|
|
13
|
-
class Environment:
|
|
14
|
-
"""Environment variables mock"""
|
|
15
|
-
|
|
16
|
-
base_dir: str | None = field(
|
|
17
|
-
default_factory=lambda: os.environ.get("BASE_DIR", None),
|
|
18
|
-
)
|
|
19
|
-
"""Base data directory, defaults to '/data'"""
|
|
20
|
-
|
|
21
|
-
dids: str = field(
|
|
22
|
-
default_factory=lambda: os.environ.get("DIDS", None),
|
|
23
|
-
)
|
|
24
|
-
"""Datasets DID's, format: '["XXXX"]'"""
|
|
25
|
-
|
|
26
|
-
transformation_did: str = field(
|
|
27
|
-
default_factory=lambda: os.environ.get("TRANSFORMATION_DID", DEFAULT),
|
|
28
|
-
)
|
|
29
|
-
"""Transformation (algorithm) DID"""
|
|
30
|
-
|
|
31
|
-
secret: str = field(
|
|
32
|
-
default_factory=lambda: os.environ.get("SECRET", DEFAULT),
|
|
33
|
-
)
|
|
34
|
-
"""Super secret secret"""
|
|
35
|
-
|
|
36
|
-
dict = asdict
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
@dataclass
|
|
40
|
-
class Config:
|
|
41
|
-
"""Algorithm overall configuration"""
|
|
42
|
-
|
|
43
|
-
custom_input: T | None = None
|
|
44
|
-
"""Algorithm's custom input types, must be a dataclass_json"""
|
|
45
|
-
|
|
46
|
-
logger: Logger | None = None
|
|
47
|
-
"""Logger to use in the algorithm"""
|
|
48
|
-
|
|
49
|
-
source_paths: Iterable[Path] = field(
|
|
50
|
-
default_factory=lambda: [Path("/algorithm/src")]
|
|
51
|
-
)
|
|
52
|
-
"""Paths that should be included so the code executes correctly"""
|
|
53
|
-
|
|
54
|
-
environment: Environment = field(default_factory=lambda: Environment())
|
|
55
|
-
"""Mock of environment data"""
|
|
@@ -1,190 +0,0 @@
|
|
|
1
|
-
from __future__ import annotations
|
|
2
|
-
|
|
3
|
-
from dataclasses import InitVar, asdict, dataclass, field
|
|
4
|
-
from logging import Logger
|
|
5
|
-
from pathlib import Path
|
|
6
|
-
from typing import Callable, Generic, TypeVar
|
|
7
|
-
|
|
8
|
-
from oceanprotocol_job_details import JobDetails
|
|
9
|
-
|
|
10
|
-
from ocean_runner.config import Config
|
|
11
|
-
|
|
12
|
-
JobDetailsT = TypeVar("JobDetailsT")
|
|
13
|
-
ResultT = TypeVar("ResultT")
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
def default_error_callback(algorithm: Algorithm, e: Exception) -> None:
|
|
17
|
-
algorithm.logger.exception("Error during algorithm execution")
|
|
18
|
-
raise e
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
def default_validation(algorithm: Algorithm) -> None:
|
|
22
|
-
algorithm.logger.info("Validating input using default validation")
|
|
23
|
-
assert algorithm.job_details.ddos, "DDOs missing"
|
|
24
|
-
assert algorithm.job_details.files, "Files missing"
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
def default_save(*, result: ResultT, base: Path, algorithm: Algorithm) -> None:
|
|
28
|
-
algorithm.logger.info("Saving results using default save")
|
|
29
|
-
with open(base / "result.txt", "w+") as f:
|
|
30
|
-
f.write(str(result))
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
@dataclass
|
|
34
|
-
class Algorithm(Generic[JobDetailsT, ResultT]):
|
|
35
|
-
"""
|
|
36
|
-
A configurable algorithm runner that behaves like a FastAPI app:
|
|
37
|
-
- You register `validate`, `run`, and `save_results` via decorators.
|
|
38
|
-
- You execute the full pipeline by calling `app()`.
|
|
39
|
-
"""
|
|
40
|
-
|
|
41
|
-
config: InitVar[Config | None] = None
|
|
42
|
-
logger: Logger = field(init=False)
|
|
43
|
-
_job_details: JobDetails[JobDetailsT] = field(init=False)
|
|
44
|
-
_result: ResultT | None = field(default=None, init=False)
|
|
45
|
-
|
|
46
|
-
# Decorator-registered callbacks
|
|
47
|
-
_validate_fn: Callable[[Algorithm], None] | None = field(
|
|
48
|
-
default=None,
|
|
49
|
-
init=False,
|
|
50
|
-
repr=False,
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
_run_fn: Callable[[Algorithm], ResultT] | None = field(
|
|
54
|
-
default=None,
|
|
55
|
-
init=False,
|
|
56
|
-
repr=False,
|
|
57
|
-
)
|
|
58
|
-
|
|
59
|
-
_save_fn: Callable[[ResultT, Path, Algorithm], None] | None = field(
|
|
60
|
-
default=None,
|
|
61
|
-
init=False,
|
|
62
|
-
repr=False,
|
|
63
|
-
)
|
|
64
|
-
|
|
65
|
-
_error_callback: Callable[[Algorithm, Exception], None] | None = field(
|
|
66
|
-
default=None,
|
|
67
|
-
init=False,
|
|
68
|
-
repr=False,
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
def __post_init__(self, config: Config | None) -> None:
|
|
72
|
-
config: Config = config or Config()
|
|
73
|
-
|
|
74
|
-
# Configure logger
|
|
75
|
-
if config.logger:
|
|
76
|
-
self.logger = config.logger
|
|
77
|
-
else:
|
|
78
|
-
import logging
|
|
79
|
-
|
|
80
|
-
logging.basicConfig(
|
|
81
|
-
level=logging.DEBUG,
|
|
82
|
-
format="%(asctime)s | %(levelname)-8s | %(name)s | %(message)s",
|
|
83
|
-
datefmt="%Y-%m-%d %H:%M:%S",
|
|
84
|
-
)
|
|
85
|
-
self.logger = logging.getLogger("ocean_runner")
|
|
86
|
-
|
|
87
|
-
# Normalize base_dir
|
|
88
|
-
if isinstance(config.environment.base_dir, str):
|
|
89
|
-
config.environment.base_dir = Path(config.environment.base_dir)
|
|
90
|
-
|
|
91
|
-
# Extend sys.path for custom imports
|
|
92
|
-
if config.source_paths:
|
|
93
|
-
import sys
|
|
94
|
-
|
|
95
|
-
sys.path.extend([str(path.absolute()) for path in config.source_paths])
|
|
96
|
-
self.logger.debug(f"Added [{len(config.source_paths)}] entries to PATH")
|
|
97
|
-
|
|
98
|
-
self.config = config
|
|
99
|
-
|
|
100
|
-
class Error(RuntimeError): ...
|
|
101
|
-
|
|
102
|
-
@property
|
|
103
|
-
def job_details(self) -> JobDetails:
|
|
104
|
-
if not self._job_details:
|
|
105
|
-
raise Algorithm.Error("JobDetails not initialized or missing")
|
|
106
|
-
return self._job_details
|
|
107
|
-
|
|
108
|
-
@property
|
|
109
|
-
def result(self) -> ResultT:
|
|
110
|
-
if self._result is None:
|
|
111
|
-
raise Algorithm.Error("Result missing, run the algorithm first")
|
|
112
|
-
return self._result
|
|
113
|
-
|
|
114
|
-
# ---------------------------
|
|
115
|
-
# Decorators (FastAPI-style)
|
|
116
|
-
# ---------------------------
|
|
117
|
-
|
|
118
|
-
def validate(self, fn: Callable[[], None]) -> Callable[[], None]:
|
|
119
|
-
self._validate_fn = fn
|
|
120
|
-
return fn
|
|
121
|
-
|
|
122
|
-
def run(self, fn: Callable[[], ResultT]) -> Callable[[], ResultT]:
|
|
123
|
-
self._run_fn = fn
|
|
124
|
-
return fn
|
|
125
|
-
|
|
126
|
-
def save_results(self, fn: Callable[[ResultT, Path], None]) -> Callable:
|
|
127
|
-
self._save_fn = fn
|
|
128
|
-
return fn
|
|
129
|
-
|
|
130
|
-
def on_error(self, fn: Callable[[Exception], None]) -> Callable:
|
|
131
|
-
self._error_callback = fn
|
|
132
|
-
return fn
|
|
133
|
-
|
|
134
|
-
# ---------------------------
|
|
135
|
-
# Execution Pipeline
|
|
136
|
-
# ---------------------------
|
|
137
|
-
|
|
138
|
-
def __call__(self) -> ResultT | None:
|
|
139
|
-
"""Executes the algorithm pipeline: validate → run → save_results."""
|
|
140
|
-
# Load job details
|
|
141
|
-
self._job_details = JobDetails.load(
|
|
142
|
-
_type=self.config.custom_input,
|
|
143
|
-
base_dir=self.config.environment.base_dir,
|
|
144
|
-
dids=self.config.environment.dids,
|
|
145
|
-
transformation_did=self.config.environment.transformation_did,
|
|
146
|
-
secret=self.config.environment.secret,
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
self.logger.info("Loaded JobDetails")
|
|
150
|
-
self.logger.debug(asdict(self.job_details))
|
|
151
|
-
|
|
152
|
-
try:
|
|
153
|
-
# Validation step
|
|
154
|
-
if self._validate_fn:
|
|
155
|
-
self.logger.info("Running custom validation...")
|
|
156
|
-
self._validate_fn()
|
|
157
|
-
else:
|
|
158
|
-
self.logger.info("Running default validation...")
|
|
159
|
-
default_validation(self)
|
|
160
|
-
|
|
161
|
-
# Run step
|
|
162
|
-
if self._run_fn:
|
|
163
|
-
self.logger.info("Running algorithm...")
|
|
164
|
-
self._result = self._run_fn()
|
|
165
|
-
else:
|
|
166
|
-
self.logger.warning("No run() function defined. Skipping execution.")
|
|
167
|
-
self._result = None
|
|
168
|
-
|
|
169
|
-
# Save step
|
|
170
|
-
if self._save_fn:
|
|
171
|
-
self.logger.info("Saving results...")
|
|
172
|
-
self._save_fn(
|
|
173
|
-
self._result,
|
|
174
|
-
self.job_details.paths.outputs,
|
|
175
|
-
)
|
|
176
|
-
else:
|
|
177
|
-
self.logger.info("No save_results() defined. Using default.")
|
|
178
|
-
default_save(
|
|
179
|
-
result=self._result,
|
|
180
|
-
base=self.job_details.paths.outputs,
|
|
181
|
-
algorithm=self,
|
|
182
|
-
)
|
|
183
|
-
|
|
184
|
-
except Exception as e:
|
|
185
|
-
if self._error_callback:
|
|
186
|
-
self._error_callback(e)
|
|
187
|
-
else:
|
|
188
|
-
default_error_callback(self, e)
|
|
189
|
-
|
|
190
|
-
return self._result
|
|
File without changes
|
|
File without changes
|
|
File without changes
|