climate-ref 0.5.0__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- climate_ref/cli/__init__.py +18 -8
- climate_ref/cli/datasets.py +31 -27
- climate_ref/cli/executions.py +1 -1
- climate_ref/cli/providers.py +2 -4
- climate_ref/cli/solve.py +1 -2
- climate_ref/config.py +5 -6
- climate_ref/constants.py +1 -1
- climate_ref/database.py +1 -0
- climate_ref/dataset_registry/sample_data.txt +14 -0
- climate_ref/datasets/base.py +43 -39
- climate_ref/executor/__init__.py +4 -262
- climate_ref/executor/local.py +170 -37
- climate_ref/executor/result_handling.py +231 -0
- climate_ref/executor/synchronous.py +62 -0
- climate_ref/migrations/env.py +1 -0
- climate_ref/migrations/versions/2025-05-02T1418_341a4aa2551e_regenerate.py +0 -21
- climate_ref/migrations/versions/2025-05-09T2032_03dbb4998e49_series_metric_value.py +57 -0
- climate_ref/models/__init__.py +3 -1
- climate_ref/models/base.py +2 -0
- climate_ref/models/metric_value.py +138 -13
- climate_ref/provider_registry.py +1 -1
- climate_ref/solver.py +18 -30
- climate_ref/testing.py +11 -7
- {climate_ref-0.5.0.dist-info → climate_ref-0.5.1.dist-info}/METADATA +3 -1
- climate_ref-0.5.1.dist-info/RECORD +47 -0
- climate_ref-0.5.0.dist-info/RECORD +0 -44
- {climate_ref-0.5.0.dist-info → climate_ref-0.5.1.dist-info}/WHEEL +0 -0
- {climate_ref-0.5.0.dist-info → climate_ref-0.5.1.dist-info}/entry_points.txt +0 -0
- {climate_ref-0.5.0.dist-info → climate_ref-0.5.1.dist-info}/licenses/LICENCE +0 -0
- {climate_ref-0.5.0.dist-info → climate_ref-0.5.1.dist-info}/licenses/NOTICE +0 -0
climate_ref/executor/local.py
CHANGED
|
@@ -1,42 +1,128 @@
|
|
|
1
|
+
import concurrent.futures
|
|
2
|
+
import time
|
|
3
|
+
from concurrent.futures import Future, ProcessPoolExecutor
|
|
1
4
|
from typing import Any
|
|
2
5
|
|
|
6
|
+
from attrs import define
|
|
3
7
|
from loguru import logger
|
|
8
|
+
from tqdm import tqdm
|
|
4
9
|
|
|
5
10
|
from climate_ref.config import Config
|
|
6
11
|
from climate_ref.database import Database
|
|
7
|
-
from climate_ref.executor import handle_execution_result
|
|
8
12
|
from climate_ref.models import Execution
|
|
9
|
-
from climate_ref_core.diagnostics import
|
|
10
|
-
from climate_ref_core.
|
|
11
|
-
from climate_ref_core.
|
|
13
|
+
from climate_ref_core.diagnostics import ExecutionDefinition, ExecutionResult
|
|
14
|
+
from climate_ref_core.exceptions import ExecutionError
|
|
15
|
+
from climate_ref_core.executor import execute_locally
|
|
16
|
+
from climate_ref_core.logging import add_log_handler
|
|
17
|
+
|
|
18
|
+
from .result_handling import handle_execution_result
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def process_result(
|
|
22
|
+
config: Config, database: Database, result: ExecutionResult, execution: Execution | None
|
|
23
|
+
) -> None:
|
|
24
|
+
"""
|
|
25
|
+
Process the result of a diagnostic execution
|
|
26
|
+
|
|
27
|
+
Parameters
|
|
28
|
+
----------
|
|
29
|
+
config
|
|
30
|
+
The configuration object
|
|
31
|
+
database
|
|
32
|
+
The database object
|
|
33
|
+
result
|
|
34
|
+
The result of the diagnostic execution.
|
|
35
|
+
|
|
36
|
+
This could have either been a success or failure.
|
|
37
|
+
execution
|
|
38
|
+
A database model representing the execution of the diagnostic.
|
|
39
|
+
"""
|
|
40
|
+
if not result.successful:
|
|
41
|
+
if execution is not None: # pragma: no branch
|
|
42
|
+
info_msg = (
|
|
43
|
+
f"\nAdditional information about this execution can be viewed using: "
|
|
44
|
+
f"ref executions inspect {execution.execution_group_id}"
|
|
45
|
+
)
|
|
46
|
+
else:
|
|
47
|
+
info_msg = ""
|
|
48
|
+
|
|
49
|
+
logger.exception(f"Error running {result.definition.execution_slug()}. {info_msg}")
|
|
50
|
+
|
|
51
|
+
if execution:
|
|
52
|
+
handle_execution_result(config, database, execution, result)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@define
|
|
56
|
+
class ExecutionFuture:
|
|
57
|
+
"""
|
|
58
|
+
A container to hold the future and execution definition
|
|
59
|
+
"""
|
|
60
|
+
|
|
61
|
+
future: Future[ExecutionResult]
|
|
62
|
+
definition: ExecutionDefinition
|
|
63
|
+
execution_id: int | None = None
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _process_initialiser() -> None:
|
|
67
|
+
# Setup the logging for the process
|
|
68
|
+
# This replaces the loguru default handler
|
|
69
|
+
try:
|
|
70
|
+
add_log_handler()
|
|
71
|
+
except Exception as e:
|
|
72
|
+
# Don't raise an exception here as that would kill the process pool
|
|
73
|
+
# We want to log the error and continue
|
|
74
|
+
logger.error(f"Failed to add log handler: {e}")
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _process_run(definition: ExecutionDefinition, log_level: str) -> ExecutionResult:
|
|
78
|
+
# This is a catch-all for any exceptions that occur in the process
|
|
79
|
+
try:
|
|
80
|
+
return execute_locally(definition=definition, log_level=log_level)
|
|
81
|
+
except Exception: # pragma: no cover
|
|
82
|
+
# This isn't expected but if it happens we want to log the error before the process exits
|
|
83
|
+
logger.exception("Error running diagnostic")
|
|
84
|
+
# This will kill the process pool
|
|
85
|
+
raise
|
|
12
86
|
|
|
13
87
|
|
|
14
88
|
class LocalExecutor:
|
|
15
89
|
"""
|
|
16
|
-
Run a diagnostic locally
|
|
90
|
+
Run a diagnostic locally using a process pool.
|
|
17
91
|
|
|
18
|
-
This
|
|
19
|
-
The
|
|
20
|
-
|
|
92
|
+
This performs the diagnostic executions in parallel using different processes.
|
|
93
|
+
The maximum number of processes is determined by the `n` parameter and default to the number of CPUs.
|
|
94
|
+
|
|
95
|
+
This executor is the default executor and is used when no other executor is specified.
|
|
21
96
|
"""
|
|
22
97
|
|
|
23
98
|
name = "local"
|
|
24
99
|
|
|
25
100
|
def __init__(
|
|
26
|
-
self,
|
|
101
|
+
self,
|
|
102
|
+
*,
|
|
103
|
+
database: Database | None = None,
|
|
104
|
+
config: Config | None = None,
|
|
105
|
+
n: int | None = None,
|
|
106
|
+
pool: concurrent.futures.Executor | None = None,
|
|
107
|
+
**kwargs: Any,
|
|
27
108
|
) -> None:
|
|
28
109
|
if config is None:
|
|
29
110
|
config = Config.default()
|
|
30
111
|
if database is None:
|
|
31
112
|
database = Database.from_config(config, run_migrations=False)
|
|
113
|
+
self.n = n
|
|
32
114
|
|
|
33
115
|
self.database = database
|
|
34
116
|
self.config = config
|
|
35
117
|
|
|
118
|
+
if pool is not None:
|
|
119
|
+
self.pool = pool
|
|
120
|
+
else:
|
|
121
|
+
self.pool = ProcessPoolExecutor(max_workers=n, initializer=_process_initialiser)
|
|
122
|
+
self._results: list[ExecutionFuture] = []
|
|
123
|
+
|
|
36
124
|
def run(
|
|
37
125
|
self,
|
|
38
|
-
provider: DiagnosticProvider,
|
|
39
|
-
diagnostic: Diagnostic,
|
|
40
126
|
definition: ExecutionDefinition,
|
|
41
127
|
execution: Execution | None = None,
|
|
42
128
|
) -> None:
|
|
@@ -45,45 +131,92 @@ class LocalExecutor:
|
|
|
45
131
|
|
|
46
132
|
Parameters
|
|
47
133
|
----------
|
|
48
|
-
provider
|
|
49
|
-
The provider of the diagnostic
|
|
50
|
-
diagnostic
|
|
51
|
-
Diagnostic to run
|
|
52
134
|
definition
|
|
53
135
|
A description of the information needed for this execution of the diagnostic
|
|
54
136
|
execution
|
|
55
137
|
A database model representing the execution of the diagnostic.
|
|
56
138
|
If provided, the result will be updated in the database when completed.
|
|
57
139
|
"""
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
logger.exception(f"Error running diagnostic {diagnostic.slug}. {info_msg}")
|
|
73
|
-
result = ExecutionResult.build_from_failure(definition)
|
|
74
|
-
|
|
75
|
-
if execution:
|
|
76
|
-
handle_execution_result(self.config, self.database, execution, result)
|
|
140
|
+
# Submit the execution to the process pool
|
|
141
|
+
# and track the future so we can wait for it to complete
|
|
142
|
+
future = self.pool.submit(
|
|
143
|
+
_process_run,
|
|
144
|
+
definition=definition,
|
|
145
|
+
log_level=self.config.log_level,
|
|
146
|
+
)
|
|
147
|
+
self._results.append(
|
|
148
|
+
ExecutionFuture(
|
|
149
|
+
future=future,
|
|
150
|
+
definition=definition,
|
|
151
|
+
execution_id=execution.id if execution else None,
|
|
152
|
+
)
|
|
153
|
+
)
|
|
77
154
|
|
|
78
155
|
def join(self, timeout: float) -> None:
|
|
79
156
|
"""
|
|
80
157
|
Wait for all diagnostics to finish
|
|
81
158
|
|
|
82
|
-
This
|
|
159
|
+
This will block until all diagnostics have completed or the timeout is reached.
|
|
160
|
+
If the timeout is reached, the method will return and raise an exception.
|
|
83
161
|
|
|
84
162
|
Parameters
|
|
85
163
|
----------
|
|
86
164
|
timeout
|
|
87
|
-
Timeout in seconds
|
|
165
|
+
Timeout in seconds
|
|
166
|
+
|
|
167
|
+
Raises
|
|
168
|
+
------
|
|
169
|
+
TimeoutError
|
|
170
|
+
If the timeout is reached
|
|
88
171
|
"""
|
|
89
|
-
|
|
172
|
+
start_time = time.time()
|
|
173
|
+
refresh_time = 0.5 # Time to wait between checking for completed tasks in seconds
|
|
174
|
+
|
|
175
|
+
results = self._results
|
|
176
|
+
t = tqdm(total=len(results), desc="Waiting for executions to complete", unit="execution")
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
while results:
|
|
180
|
+
# Iterate over a copy of the list and remove finished tasks
|
|
181
|
+
for result in results[:]:
|
|
182
|
+
if result.future.done():
|
|
183
|
+
try:
|
|
184
|
+
execution_result = result.future.result(timeout=0)
|
|
185
|
+
except Exception as e:
|
|
186
|
+
# Something went wrong when attempting to run the execution
|
|
187
|
+
# This is likely a failure in the execution itself not the diagnostic
|
|
188
|
+
raise ExecutionError(
|
|
189
|
+
f"Failed to execute {result.definition.execution_slug()!r}"
|
|
190
|
+
) from e
|
|
191
|
+
|
|
192
|
+
assert execution_result is not None, "Execution result should not be None"
|
|
193
|
+
assert isinstance(execution_result, ExecutionResult), (
|
|
194
|
+
"Execution result should be of type ExecutionResult"
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
# Process the result in the main process
|
|
198
|
+
# The results should be committed after each execution
|
|
199
|
+
with self.database.session.begin():
|
|
200
|
+
execution = (
|
|
201
|
+
self.database.session.get(Execution, result.execution_id)
|
|
202
|
+
if result.execution_id
|
|
203
|
+
else None
|
|
204
|
+
)
|
|
205
|
+
process_result(self.config, self.database, result.future.result(), execution)
|
|
206
|
+
logger.debug(f"Execution completed: {result}")
|
|
207
|
+
t.update(n=1)
|
|
208
|
+
results.remove(result)
|
|
209
|
+
|
|
210
|
+
# Break early to avoid waiting for one more sleep cycle
|
|
211
|
+
if len(results) == 0:
|
|
212
|
+
break
|
|
213
|
+
|
|
214
|
+
elapsed_time = time.time() - start_time
|
|
215
|
+
|
|
216
|
+
if elapsed_time > timeout:
|
|
217
|
+
raise TimeoutError("Not all tasks completed within the specified timeout")
|
|
218
|
+
|
|
219
|
+
# Wait for a short time before checking for completed executions
|
|
220
|
+
time.sleep(refresh_time)
|
|
221
|
+
finally:
|
|
222
|
+
t.close()
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Execute diagnostics in different environments
|
|
3
|
+
|
|
4
|
+
We support running diagnostics in different environments, such as locally,
|
|
5
|
+
in a separate process, or in a container.
|
|
6
|
+
These environments are represented by `climate_ref.executor.Executor` classes.
|
|
7
|
+
|
|
8
|
+
The simplest executor is the `LocalExecutor`, which runs the diagnostic in the same process.
|
|
9
|
+
This is useful for local testing and debugging.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import pathlib
|
|
13
|
+
import shutil
|
|
14
|
+
from typing import TYPE_CHECKING
|
|
15
|
+
|
|
16
|
+
from loguru import logger
|
|
17
|
+
from sqlalchemy import insert
|
|
18
|
+
|
|
19
|
+
from climate_ref.database import Database
|
|
20
|
+
from climate_ref.models import ScalarMetricValue
|
|
21
|
+
from climate_ref.models.execution import Execution, ExecutionOutput, ResultOutputType
|
|
22
|
+
from climate_ref_core.diagnostics import ExecutionResult, ensure_relative_path
|
|
23
|
+
from climate_ref_core.exceptions import ResultValidationError
|
|
24
|
+
from climate_ref_core.logging import EXECUTION_LOG_FILENAME
|
|
25
|
+
from climate_ref_core.pycmec.controlled_vocabulary import CV
|
|
26
|
+
from climate_ref_core.pycmec.metric import CMECMetric
|
|
27
|
+
from climate_ref_core.pycmec.output import CMECOutput, OutputDict
|
|
28
|
+
|
|
29
|
+
if TYPE_CHECKING:
|
|
30
|
+
from climate_ref.config import Config
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def _copy_file_to_results(
|
|
34
|
+
scratch_directory: pathlib.Path,
|
|
35
|
+
results_directory: pathlib.Path,
|
|
36
|
+
fragment: pathlib.Path | str,
|
|
37
|
+
filename: pathlib.Path | str,
|
|
38
|
+
) -> None:
|
|
39
|
+
"""
|
|
40
|
+
Copy a file from the scratch directory to the executions directory
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
scratch_directory
|
|
45
|
+
The directory where the file is currently located
|
|
46
|
+
results_directory
|
|
47
|
+
The directory where the file should be copied to
|
|
48
|
+
fragment
|
|
49
|
+
The fragment of the executions directory where the file should be copied
|
|
50
|
+
filename
|
|
51
|
+
The name of the file to be copied
|
|
52
|
+
"""
|
|
53
|
+
assert results_directory != scratch_directory
|
|
54
|
+
input_directory = scratch_directory / fragment
|
|
55
|
+
output_directory = results_directory / fragment
|
|
56
|
+
|
|
57
|
+
filename = ensure_relative_path(filename, input_directory)
|
|
58
|
+
|
|
59
|
+
if not (input_directory / filename).exists():
|
|
60
|
+
raise FileNotFoundError(f"Could not find {filename} in {input_directory}")
|
|
61
|
+
|
|
62
|
+
output_filename = output_directory / filename
|
|
63
|
+
output_filename.parent.mkdir(parents=True, exist_ok=True)
|
|
64
|
+
|
|
65
|
+
shutil.copy(input_directory / filename, output_filename)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def handle_execution_result(
|
|
69
|
+
config: "Config",
|
|
70
|
+
database: Database,
|
|
71
|
+
execution: Execution,
|
|
72
|
+
result: "ExecutionResult",
|
|
73
|
+
) -> None:
|
|
74
|
+
"""
|
|
75
|
+
Handle the result of a diagnostic execution
|
|
76
|
+
|
|
77
|
+
This will update the diagnostic execution result with the output of the diagnostic execution.
|
|
78
|
+
The output will be copied from the scratch directory to the executions directory.
|
|
79
|
+
|
|
80
|
+
Parameters
|
|
81
|
+
----------
|
|
82
|
+
config
|
|
83
|
+
The configuration to use
|
|
84
|
+
database
|
|
85
|
+
The active database session to use
|
|
86
|
+
execution
|
|
87
|
+
The diagnostic execution result DB object to update
|
|
88
|
+
result
|
|
89
|
+
The result of the diagnostic execution, either successful or failed
|
|
90
|
+
"""
|
|
91
|
+
# Always copy log data
|
|
92
|
+
_copy_file_to_results(
|
|
93
|
+
config.paths.scratch,
|
|
94
|
+
config.paths.results,
|
|
95
|
+
execution.output_fragment,
|
|
96
|
+
EXECUTION_LOG_FILENAME,
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
if result.successful and result.metric_bundle_filename is not None:
|
|
100
|
+
logger.info(f"{execution} successful")
|
|
101
|
+
|
|
102
|
+
_copy_file_to_results(
|
|
103
|
+
config.paths.scratch,
|
|
104
|
+
config.paths.results,
|
|
105
|
+
execution.output_fragment,
|
|
106
|
+
result.metric_bundle_filename,
|
|
107
|
+
)
|
|
108
|
+
execution.mark_successful(result.as_relative_path(result.metric_bundle_filename))
|
|
109
|
+
|
|
110
|
+
if result.output_bundle_filename:
|
|
111
|
+
_copy_file_to_results(
|
|
112
|
+
config.paths.scratch,
|
|
113
|
+
config.paths.results,
|
|
114
|
+
execution.output_fragment,
|
|
115
|
+
result.output_bundle_filename,
|
|
116
|
+
)
|
|
117
|
+
_handle_output_bundle(
|
|
118
|
+
config,
|
|
119
|
+
database,
|
|
120
|
+
execution,
|
|
121
|
+
result.to_output_path(result.output_bundle_filename),
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
cmec_metric_bundle = CMECMetric.load_from_json(result.to_output_path(result.metric_bundle_filename))
|
|
125
|
+
|
|
126
|
+
# Check that the diagnostic values conform with the controlled vocabulary
|
|
127
|
+
try:
|
|
128
|
+
cv = CV.load_from_file(config.paths.dimensions_cv)
|
|
129
|
+
cv.validate_metrics(cmec_metric_bundle)
|
|
130
|
+
except (ResultValidationError, AssertionError):
|
|
131
|
+
logger.exception("Diagnostic values do not conform with the controlled vocabulary")
|
|
132
|
+
# TODO: Mark the diagnostic execution result as failed once the CV has stabilised
|
|
133
|
+
# execution.mark_failed()
|
|
134
|
+
|
|
135
|
+
# Perform a bulk insert of scalar values
|
|
136
|
+
# TODO: The section below will likely fail until we have agreed on a controlled vocabulary
|
|
137
|
+
# The current implementation will swallow the exception, but display a log message
|
|
138
|
+
try:
|
|
139
|
+
# Perform this in a nested transaction to (hopefully) gracefully rollback if something
|
|
140
|
+
# goes wrong
|
|
141
|
+
with database.session.begin_nested():
|
|
142
|
+
database.session.execute(
|
|
143
|
+
insert(ScalarMetricValue),
|
|
144
|
+
[
|
|
145
|
+
{
|
|
146
|
+
"execution_id": execution.id,
|
|
147
|
+
"value": result.value,
|
|
148
|
+
"attributes": result.attributes,
|
|
149
|
+
**result.dimensions,
|
|
150
|
+
}
|
|
151
|
+
for result in cmec_metric_bundle.iter_results()
|
|
152
|
+
],
|
|
153
|
+
)
|
|
154
|
+
except Exception:
|
|
155
|
+
# TODO: Remove once we have settled on a controlled vocabulary
|
|
156
|
+
logger.exception("Something went wrong when ingesting diagnostic values")
|
|
157
|
+
|
|
158
|
+
# TODO Ingest the series values
|
|
159
|
+
|
|
160
|
+
# TODO: This should check if the result is the most recent for the execution,
|
|
161
|
+
# if so then update the dirty fields
|
|
162
|
+
# i.e. if there are outstanding executions don't make as clean
|
|
163
|
+
execution.execution_group.dirty = False
|
|
164
|
+
else:
|
|
165
|
+
logger.error(f"{execution} failed")
|
|
166
|
+
execution.mark_failed()
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _handle_output_bundle(
|
|
170
|
+
config: "Config",
|
|
171
|
+
database: Database,
|
|
172
|
+
execution: Execution,
|
|
173
|
+
cmec_output_bundle_filename: pathlib.Path,
|
|
174
|
+
) -> None:
|
|
175
|
+
# Extract the registered outputs
|
|
176
|
+
# Copy the content to the output directory
|
|
177
|
+
# Track in the db
|
|
178
|
+
cmec_output_bundle = CMECOutput.load_from_json(cmec_output_bundle_filename)
|
|
179
|
+
_handle_outputs(
|
|
180
|
+
cmec_output_bundle.plots,
|
|
181
|
+
output_type=ResultOutputType.Plot,
|
|
182
|
+
config=config,
|
|
183
|
+
database=database,
|
|
184
|
+
execution=execution,
|
|
185
|
+
)
|
|
186
|
+
_handle_outputs(
|
|
187
|
+
cmec_output_bundle.data,
|
|
188
|
+
output_type=ResultOutputType.Data,
|
|
189
|
+
config=config,
|
|
190
|
+
database=database,
|
|
191
|
+
execution=execution,
|
|
192
|
+
)
|
|
193
|
+
_handle_outputs(
|
|
194
|
+
cmec_output_bundle.html,
|
|
195
|
+
output_type=ResultOutputType.HTML,
|
|
196
|
+
config=config,
|
|
197
|
+
database=database,
|
|
198
|
+
execution=execution,
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def _handle_outputs(
|
|
203
|
+
outputs: dict[str, OutputDict] | None,
|
|
204
|
+
output_type: ResultOutputType,
|
|
205
|
+
config: "Config",
|
|
206
|
+
database: Database,
|
|
207
|
+
execution: Execution,
|
|
208
|
+
) -> None:
|
|
209
|
+
outputs = outputs or {}
|
|
210
|
+
|
|
211
|
+
for key, output_info in outputs.items():
|
|
212
|
+
filename = ensure_relative_path(
|
|
213
|
+
output_info.filename, config.paths.scratch / execution.output_fragment
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
_copy_file_to_results(
|
|
217
|
+
config.paths.scratch,
|
|
218
|
+
config.paths.results,
|
|
219
|
+
execution.output_fragment,
|
|
220
|
+
filename,
|
|
221
|
+
)
|
|
222
|
+
database.session.add(
|
|
223
|
+
ExecutionOutput(
|
|
224
|
+
execution_id=execution.id,
|
|
225
|
+
output_type=output_type,
|
|
226
|
+
filename=str(filename),
|
|
227
|
+
description=output_info.description,
|
|
228
|
+
short_name=key,
|
|
229
|
+
long_name=output_info.long_name,
|
|
230
|
+
)
|
|
231
|
+
)
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from climate_ref.config import Config
|
|
4
|
+
from climate_ref.database import Database
|
|
5
|
+
from climate_ref.executor.local import process_result
|
|
6
|
+
from climate_ref.models import Execution
|
|
7
|
+
from climate_ref_core.diagnostics import ExecutionDefinition
|
|
8
|
+
from climate_ref_core.executor import execute_locally
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SynchronousExecutor:
|
|
12
|
+
"""
|
|
13
|
+
Run a diagnostic synchronously, in-process.
|
|
14
|
+
|
|
15
|
+
This is mainly useful for debugging and testing.
|
|
16
|
+
[climate_ref.executor.LocalExecutor][] is a more general purpose executor.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
name = "synchronous"
|
|
20
|
+
|
|
21
|
+
def __init__(
|
|
22
|
+
self, *, database: Database | None = None, config: Config | None = None, **kwargs: Any
|
|
23
|
+
) -> None:
|
|
24
|
+
if config is None:
|
|
25
|
+
config = Config.default()
|
|
26
|
+
if database is None:
|
|
27
|
+
database = Database.from_config(config, run_migrations=False)
|
|
28
|
+
|
|
29
|
+
self.database = database
|
|
30
|
+
self.config = config
|
|
31
|
+
|
|
32
|
+
def run(
|
|
33
|
+
self,
|
|
34
|
+
definition: ExecutionDefinition,
|
|
35
|
+
execution: Execution | None = None,
|
|
36
|
+
) -> None:
|
|
37
|
+
"""
|
|
38
|
+
Run a diagnostic in process
|
|
39
|
+
|
|
40
|
+
Parameters
|
|
41
|
+
----------
|
|
42
|
+
definition
|
|
43
|
+
A description of the information needed for this execution of the diagnostic
|
|
44
|
+
execution
|
|
45
|
+
A database model representing the execution of the diagnostic.
|
|
46
|
+
If provided, the result will be updated in the database when completed.
|
|
47
|
+
"""
|
|
48
|
+
result = execute_locally(definition, log_level=self.config.log_level)
|
|
49
|
+
process_result(self.config, self.database, result, execution)
|
|
50
|
+
|
|
51
|
+
def join(self, timeout: float) -> None:
|
|
52
|
+
"""
|
|
53
|
+
Wait for all diagnostics to finish
|
|
54
|
+
|
|
55
|
+
This returns immediately because the executor runs diagnostics synchronously.
|
|
56
|
+
|
|
57
|
+
Parameters
|
|
58
|
+
----------
|
|
59
|
+
timeout
|
|
60
|
+
Timeout in seconds (Not used)
|
|
61
|
+
"""
|
|
62
|
+
pass
|
climate_ref/migrations/env.py
CHANGED
|
@@ -235,38 +235,17 @@ def upgrade() -> None:
|
|
|
235
235
|
sa.Column("attributes", sa.JSON(), nullable=False),
|
|
236
236
|
sa.Column("created_at", sa.DateTime(), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=False),
|
|
237
237
|
sa.Column("updated_at", sa.DateTime(), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=False),
|
|
238
|
-
sa.Column("model", sa.Text(), nullable=True),
|
|
239
|
-
sa.Column("source_id", sa.Text(), nullable=True),
|
|
240
|
-
sa.Column("variant_label", sa.Text(), nullable=True),
|
|
241
|
-
sa.Column("metric", sa.Text(), nullable=True),
|
|
242
|
-
sa.Column("region", sa.Text(), nullable=True),
|
|
243
|
-
sa.Column("statistic", sa.Text(), nullable=True),
|
|
244
238
|
sa.ForeignKeyConstraint(
|
|
245
239
|
["execution_id"], ["execution.id"], name=op.f("fk_metric_value_execution_id_execution")
|
|
246
240
|
),
|
|
247
241
|
sa.PrimaryKeyConstraint("id", name=op.f("pk_metric_value")),
|
|
248
242
|
)
|
|
249
|
-
with op.batch_alter_table("metric_value", schema=None) as batch_op:
|
|
250
|
-
batch_op.create_index(batch_op.f("ix_metric_value_metric"), ["metric"], unique=False)
|
|
251
|
-
batch_op.create_index(batch_op.f("ix_metric_value_model"), ["model"], unique=False)
|
|
252
|
-
batch_op.create_index(batch_op.f("ix_metric_value_region"), ["region"], unique=False)
|
|
253
|
-
batch_op.create_index(batch_op.f("ix_metric_value_source_id"), ["source_id"], unique=False)
|
|
254
|
-
batch_op.create_index(batch_op.f("ix_metric_value_statistic"), ["statistic"], unique=False)
|
|
255
|
-
batch_op.create_index(batch_op.f("ix_metric_value_variant_label"), ["variant_label"], unique=False)
|
|
256
243
|
|
|
257
244
|
# ### end Alembic commands ###
|
|
258
245
|
|
|
259
246
|
|
|
260
247
|
def downgrade() -> None:
|
|
261
248
|
# ### commands auto generated by Alembic - please adjust! ###
|
|
262
|
-
with op.batch_alter_table("metric_value", schema=None) as batch_op:
|
|
263
|
-
batch_op.drop_index(batch_op.f("ix_metric_value_variant_label"))
|
|
264
|
-
batch_op.drop_index(batch_op.f("ix_metric_value_statistic"))
|
|
265
|
-
batch_op.drop_index(batch_op.f("ix_metric_value_source_id"))
|
|
266
|
-
batch_op.drop_index(batch_op.f("ix_metric_value_region"))
|
|
267
|
-
batch_op.drop_index(batch_op.f("ix_metric_value_model"))
|
|
268
|
-
batch_op.drop_index(batch_op.f("ix_metric_value_metric"))
|
|
269
|
-
|
|
270
249
|
op.drop_table("metric_value")
|
|
271
250
|
with op.batch_alter_table("execution_output", schema=None) as batch_op:
|
|
272
251
|
batch_op.drop_index(batch_op.f("ix_execution_output_output_type"))
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""series-metric-value
|
|
2
|
+
|
|
3
|
+
Revision ID: 03dbb4998e49
|
|
4
|
+
Revises: 341a4aa2551e
|
|
5
|
+
Create Date: 2025-05-09 20:32:08.664426
|
|
6
|
+
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from collections.abc import Sequence
|
|
10
|
+
from typing import Union
|
|
11
|
+
|
|
12
|
+
import sqlalchemy as sa
|
|
13
|
+
from alembic import op
|
|
14
|
+
from sqlalchemy.dialects import postgresql
|
|
15
|
+
|
|
16
|
+
# revision identifiers, used by Alembic.
|
|
17
|
+
revision: str = "03dbb4998e49"
|
|
18
|
+
down_revision: Union[str, None] = "341a4aa2551e"
|
|
19
|
+
branch_labels: Union[str, Sequence[str], None] = None
|
|
20
|
+
depends_on: Union[str, Sequence[str], None] = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def upgrade() -> None:
|
|
24
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
25
|
+
with op.batch_alter_table("metric_value", schema=None) as batch_op:
|
|
26
|
+
batch_op.add_column(sa.Column("values", sa.JSON(), nullable=True))
|
|
27
|
+
batch_op.add_column(sa.Column("index", sa.JSON(), nullable=True))
|
|
28
|
+
batch_op.add_column(sa.Column("index_name", sa.String(), nullable=True))
|
|
29
|
+
batch_op.alter_column("value", existing_type=sa.FLOAT(), nullable=True)
|
|
30
|
+
|
|
31
|
+
if sa.inspect(op.get_bind()).dialect.name == "postgresql":
|
|
32
|
+
sa.Enum("SCALAR", "SERIES", name="metricvaluetype").create(op.get_bind())
|
|
33
|
+
op.add_column(
|
|
34
|
+
"metric_value",
|
|
35
|
+
sa.Column(
|
|
36
|
+
"type",
|
|
37
|
+
postgresql.ENUM("SCALAR", "SERIES", name="metricvaluetype", create_type=False),
|
|
38
|
+
nullable=False,
|
|
39
|
+
),
|
|
40
|
+
)
|
|
41
|
+
else:
|
|
42
|
+
with op.batch_alter_table("metric_value", schema=None) as batch_op:
|
|
43
|
+
batch_op.add_column(
|
|
44
|
+
sa.Column("type", sa.Enum("SCALAR", "SERIES", name="metricvaluetype"), nullable=False)
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def downgrade() -> None:
|
|
49
|
+
# ### commands auto generated by Alembic - please adjust! ###
|
|
50
|
+
with op.batch_alter_table("metric_value", schema=None) as batch_op:
|
|
51
|
+
batch_op.alter_column("value", existing_type=sa.FLOAT(), nullable=False)
|
|
52
|
+
batch_op.drop_column("index_name")
|
|
53
|
+
batch_op.drop_column("index")
|
|
54
|
+
batch_op.drop_column("values")
|
|
55
|
+
batch_op.drop_column("type")
|
|
56
|
+
|
|
57
|
+
# ### end Alembic commands ###
|
climate_ref/models/__init__.py
CHANGED
|
@@ -14,7 +14,7 @@ from climate_ref.models.execution import (
|
|
|
14
14
|
ExecutionGroup,
|
|
15
15
|
ExecutionOutput,
|
|
16
16
|
)
|
|
17
|
-
from climate_ref.models.metric_value import MetricValue
|
|
17
|
+
from climate_ref.models.metric_value import MetricValue, ScalarMetricValue, SeriesMetricValue
|
|
18
18
|
from climate_ref.models.provider import Provider
|
|
19
19
|
|
|
20
20
|
Table = TypeVar("Table", bound=Base)
|
|
@@ -29,5 +29,7 @@ __all__ = [
|
|
|
29
29
|
"ExecutionOutput",
|
|
30
30
|
"MetricValue",
|
|
31
31
|
"Provider",
|
|
32
|
+
"ScalarMetricValue",
|
|
33
|
+
"SeriesMetricValue",
|
|
32
34
|
"Table",
|
|
33
35
|
]
|
climate_ref/models/base.py
CHANGED