climate-ref 0.6.0__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,8 +9,9 @@ The simplest executor is the `LocalExecutor`, which runs the diagnostic in the s
9
9
  This is useful for local testing and debugging.
10
10
  """
11
11
 
12
+ from .hpc import HPCExecutor
12
13
  from .local import LocalExecutor
13
14
  from .result_handling import handle_execution_result
14
15
  from .synchronous import SynchronousExecutor
15
16
 
16
- __all__ = ["LocalExecutor", "SynchronousExecutor", "handle_execution_result"]
17
+ __all__ = ["HPCExecutor", "LocalExecutor", "SynchronousExecutor", "handle_execution_result"]
@@ -0,0 +1,308 @@
1
+ """
2
+ HPC-based Executor to use job schedulers.
3
+
4
+ If you want to
5
+ - run REF under the HPC workflows
6
+ - run REF in multiple nodes
7
+
8
+ """
9
+
10
+ import os
11
+ import time
12
+ from typing import Any, cast
13
+
14
+ import parsl
15
+ from loguru import logger
16
+ from parsl import python_app
17
+ from parsl.config import Config as ParslConfig
18
+ from parsl.executors import HighThroughputExecutor
19
+ from parsl.launchers import SrunLauncher
20
+ from parsl.providers import SlurmProvider
21
+ from tqdm import tqdm
22
+
23
+ from climate_ref.config import Config
24
+ from climate_ref.database import Database
25
+ from climate_ref.models import Execution
26
+ from climate_ref.slurm import HAS_REAL_SLURM, SlurmChecker
27
+ from climate_ref_core.diagnostics import ExecutionDefinition, ExecutionResult
28
+ from climate_ref_core.exceptions import DiagnosticError, ExecutionError
29
+ from climate_ref_core.executor import execute_locally
30
+
31
+ from .local import ExecutionFuture, process_result
32
+
33
+
34
+ @python_app
35
+ def _process_run(definition: ExecutionDefinition, log_level: str) -> ExecutionResult:
36
+ """Run the function on computer nodes"""
37
+ # This is a catch-all for any exceptions that occur in the process and need to raise for
38
+ # parsl retries to work
39
+ try:
40
+ return execute_locally(definition=definition, log_level=log_level, raise_error=True)
41
+ except DiagnosticError as e: # pragma: no cover
42
+ # any diagnostic error will be caught here
43
+ logger.exception("Error running diagnostic")
44
+ return cast(ExecutionResult, e.result)
45
+
46
+
47
+ def _to_float(x: Any) -> float | None:
48
+ if x is None:
49
+ return None
50
+ if isinstance(x, int | float):
51
+ return float(x)
52
+ try:
53
+ return float(x)
54
+ except (ValueError, TypeError):
55
+ return None
56
+
57
+
58
+ def _to_int(x: Any) -> int | None:
59
+ if x is None:
60
+ return None
61
+ if isinstance(x, int):
62
+ return x
63
+ try:
64
+ return int(float(x)) # Handles both "123" and "123.0"
65
+ except (ValueError, TypeError):
66
+ return None
67
+
68
+
69
+ class HPCExecutor:
70
+ """
71
+ Run diagnostics by submitting a job script
72
+
73
+ """
74
+
75
+ name = "hpc"
76
+
77
+ def __init__(
78
+ self,
79
+ *,
80
+ database: Database | None = None,
81
+ config: Config | None = None,
82
+ **executor_config: str | float | int,
83
+ ) -> None:
84
+ config = config or Config.default()
85
+ database = database or Database.from_config(config, run_migrations=False)
86
+
87
+ self.config = config
88
+ self.database = database
89
+
90
+ self.scheduler = executor_config.get("scheduler", "slurm")
91
+ self.account = str(executor_config.get("account", os.environ.get("USER")))
92
+ self.username = executor_config.get("username", os.environ.get("USER"))
93
+ self.partition = str(executor_config.get("partition")) if executor_config.get("partition") else None
94
+ self.qos = str(executor_config.get("qos")) if executor_config.get("qos") else None
95
+ self.req_nodes = int(executor_config.get("req_nodes", 1))
96
+ self.walltime = str(executor_config.get("walltime", "00:10:00"))
97
+ self.log_dir = str(executor_config.get("log_dir", "runinfo"))
98
+
99
+ self.cores_per_worker = _to_int(executor_config.get("cores_per_worker"))
100
+ self.mem_per_worker = _to_float(executor_config.get("mem_per_worker"))
101
+
102
+ hours, minutes, seconds = map(int, self.walltime.split(":"))
103
+ total_minutes = hours * 60 + minutes + seconds / 60
104
+ self.total_minutes = total_minutes
105
+
106
+ if executor_config.get("validation") and HAS_REAL_SLURM:
107
+ self._validate_slurm_params()
108
+
109
+ self._initialize_parsl()
110
+
111
+ self.parsl_results: list[ExecutionFuture] = []
112
+
113
+ def _validate_slurm_params(self) -> None:
114
+ """Validate the Slurm configuration using SlurmChecker.
115
+
116
+ Raises
117
+ ------
118
+ ValueError: If account, partition or QOS are invalid or inaccessible.
119
+ """
120
+ slurm_checker = SlurmChecker()
121
+ if self.account and not slurm_checker.get_account_info(self.account):
122
+ raise ValueError(f"Account: {self.account} not valid")
123
+
124
+ partition_limits = None
125
+ node_info = None
126
+
127
+ if self.partition:
128
+ if not slurm_checker.get_partition_info(self.partition):
129
+ raise ValueError(f"Partition: {self.partition} not valid")
130
+
131
+ if not slurm_checker.can_account_use_partition(self.account, self.partition):
132
+ raise ValueError(f"Account: {self.account} cannot access partiton: {self.partition}")
133
+
134
+ partition_limits = slurm_checker.get_partition_limits(self.partition)
135
+ node_info = slurm_checker.get_node_from_partition(self.partition)
136
+
137
+ qos_limits = None
138
+ if self.qos:
139
+ if not slurm_checker.get_qos_info(self.qos):
140
+ raise ValueError(f"QOS: {self.qos} not valid")
141
+
142
+ if not slurm_checker.can_account_use_qos(self.account, self.qos):
143
+ raise ValueError(f"Account: {self.account} cannot access qos: {self.qos}")
144
+
145
+ qos_limits = slurm_checker.get_qos_limits(self.qos)
146
+
147
+ max_cores_per_node = int(node_info["cpus"]) if node_info else None
148
+ if max_cores_per_node and self.cores_per_worker:
149
+ if self.cores_per_worker > max_cores_per_node:
150
+ raise ValueError(
151
+ f"cores_per_work:{self.cores_per_worker}"
152
+ f"larger than the maximum in a node {max_cores_per_node}"
153
+ )
154
+
155
+ max_mem_per_node = float(node_info["real_memory"]) if node_info else None
156
+ if max_mem_per_node and self.mem_per_worker:
157
+ if self.mem_per_worker > max_mem_per_node:
158
+ raise ValueError(
159
+ f"mem_per_work:{self.mem_per_worker}"
160
+ f"larger than the maximum mem in a node {max_mem_per_node}"
161
+ )
162
+
163
+ max_walltime_partition = (
164
+ partition_limits["max_time_minutes"] if partition_limits else self.total_minutes
165
+ )
166
+ max_walltime_qos = qos_limits["max_time_minutes"] if qos_limits else self.total_minutes
167
+
168
+ max_walltime_minutes = min(float(max_walltime_partition), float(max_walltime_qos))
169
+
170
+ if self.total_minutes > float(max_walltime_minutes):
171
+ raise ValueError(
172
+ f"Walltime: {self.walltime} exceed the maximum time "
173
+ f"{max_walltime_minutes} allowed by {self.partition} and {self.qos}"
174
+ )
175
+
176
+ def _initialize_parsl(self) -> None:
177
+ executor_config = self.config.executor.config
178
+
179
+ provider = SlurmProvider(
180
+ account=self.account,
181
+ partition=self.partition,
182
+ qos=self.qos,
183
+ nodes_per_block=self.req_nodes,
184
+ max_blocks=int(executor_config.get("max_blocks", 1)),
185
+ scheduler_options=executor_config.get("scheduler_options", "#SBATCH -C cpu"),
186
+ worker_init=executor_config.get("worker_init", "source .venv/bin/activate"),
187
+ launcher=SrunLauncher(
188
+ debug=True,
189
+ overrides=executor_config.get("overrides", ""),
190
+ ),
191
+ walltime=self.walltime,
192
+ cmd_timeout=int(executor_config.get("cmd_timeout", 120)),
193
+ )
194
+ executor = HighThroughputExecutor(
195
+ label="ref_hpc_executor",
196
+ cores_per_worker=self.cores_per_worker if self.cores_per_worker else 1,
197
+ mem_per_worker=self.mem_per_worker,
198
+ max_workers_per_node=_to_int(executor_config.get("max_workers_per_node", 16)),
199
+ cpu_affinity=str(executor_config.get("cpu_affinity")),
200
+ provider=provider,
201
+ )
202
+
203
+ hpc_config = ParslConfig(
204
+ run_dir=self.log_dir, executors=[executor], retries=int(executor_config.get("retries", 2))
205
+ )
206
+ parsl.load(hpc_config)
207
+
208
+ def run(
209
+ self,
210
+ definition: ExecutionDefinition,
211
+ execution: Execution | None = None,
212
+ ) -> None:
213
+ """
214
+ Run a diagnostic in process
215
+
216
+ Parameters
217
+ ----------
218
+ definition
219
+ A description of the information needed for this execution of the diagnostic
220
+ execution
221
+ A database model representing the execution of the diagnostic.
222
+ If provided, the result will be updated in the database when completed.
223
+ """
224
+ # Submit the execution to the process pool
225
+ # and track the future so we can wait for it to complete
226
+ future = _process_run(
227
+ definition=definition,
228
+ log_level=self.config.log_level,
229
+ )
230
+
231
+ self.parsl_results.append(
232
+ ExecutionFuture(
233
+ future=future,
234
+ definition=definition,
235
+ execution_id=execution.id if execution else None,
236
+ )
237
+ )
238
+
239
+ def join(self, timeout: float) -> None:
240
+ """
241
+ Wait for all diagnostics to finish
242
+
243
+ This will block until all diagnostics have completed or the timeout is reached.
244
+ If the timeout is reached, the method will return and raise an exception.
245
+
246
+ Parameters
247
+ ----------
248
+ timeout
249
+ Timeout in seconds (won't used in HPCExecutor)
250
+
251
+ Raises
252
+ ------
253
+ TimeoutError
254
+ If the timeout is reached
255
+ """
256
+ start_time = time.time()
257
+ refresh_time = 0.5
258
+
259
+ results = self.parsl_results
260
+ t = tqdm(total=len(results), desc="Waiting for executions to complete", unit="execution")
261
+
262
+ try:
263
+ while results:
264
+ # Iterate over a copy of the list and remove finished tasks
265
+ for result in results[:]:
266
+ if result.future.done():
267
+ try:
268
+ execution_result = result.future.result(timeout=0)
269
+ except Exception as e:
270
+ # Something went wrong when attempting to run the execution
271
+ # This is likely a failure in the execution itself not the diagnostic
272
+ raise ExecutionError(
273
+ f"Failed to execute {result.definition.execution_slug()!r}"
274
+ ) from e
275
+
276
+ assert execution_result is not None, "Execution result should not be None"
277
+ assert isinstance(execution_result, ExecutionResult), (
278
+ "Execution result should be of type ExecutionResult"
279
+ )
280
+
281
+ # Process the result in the main process
282
+ # The results should be committed after each execution
283
+ with self.database.session.begin():
284
+ execution = (
285
+ self.database.session.get(Execution, result.execution_id)
286
+ if result.execution_id
287
+ else None
288
+ )
289
+ process_result(self.config, self.database, result.future.result(), execution)
290
+ logger.debug(f"Execution completed: {result}")
291
+ t.update(n=1)
292
+ results.remove(result)
293
+
294
+ # Break early to avoid waiting for one more sleep cycle
295
+ if len(results) == 0:
296
+ break
297
+
298
+ elapsed_time = time.time() - start_time
299
+
300
+ if elapsed_time > self.total_minutes * 60:
301
+ logger.debug(f"Time elasped {elapsed_time} for joining the results")
302
+
303
+ # Wait for a short time before checking for completed executions
304
+ time.sleep(refresh_time)
305
+ finally:
306
+ t.close()
307
+ if parsl.dfk():
308
+ parsl.dfk().cleanup()
@@ -1,4 +1,5 @@
1
1
  import concurrent.futures
2
+ import multiprocessing
2
3
  import time
3
4
  from concurrent.futures import Future, ProcessPoolExecutor
4
5
  from typing import Any
@@ -124,7 +125,12 @@ class LocalExecutor:
124
125
  if pool is not None:
125
126
  self.pool = pool
126
127
  else:
127
- self.pool = ProcessPoolExecutor(max_workers=n, initializer=_process_initialiser)
128
+ self.pool = ProcessPoolExecutor(
129
+ max_workers=n,
130
+ initializer=_process_initialiser,
131
+ # Explicitly set the context to "spawn" to avoid issues with hanging on MacOS
132
+ mp_context=multiprocessing.get_context("spawn"),
133
+ )
128
134
  self._results: list[ExecutionFuture] = []
129
135
 
130
136
  def run(
climate_ref/slurm.py ADDED
@@ -0,0 +1,192 @@
1
+ import importlib.util
2
+ from typing import Any
3
+
4
+ HAS_REAL_SLURM = importlib.util.find_spec("pyslurm") is not None
5
+
6
+
7
+ class SlurmChecker:
8
+ """Check and get slurm settings."""
9
+
10
+ def __init__(self, intest: bool = False) -> None:
11
+ if HAS_REAL_SLURM:
12
+ import pyslurm # type: ignore
13
+
14
+ self.slurm_association: dict[int, Any] | None = pyslurm.db.Associations.load()
15
+ self.slurm_partition: dict[str, Any] | None = pyslurm.Partitions.load()
16
+ self.slurm_qos: dict[str, Any] | None = pyslurm.qos().get()
17
+ self.slurm_node: dict[str, Any] | None = pyslurm.Nodes.load()
18
+ elif intest:
19
+ import pyslurm
20
+
21
+ self.slurm_association = pyslurm.db.Associations.load() # dict [num -> Association]
22
+ self.slurm_partition = pyslurm.Partitions.load() # collection
23
+ self.slurm_qos = pyslurm.qos().get() # dict
24
+ self.slurm_node = pyslurm.Nodes.load() # dict
25
+ else:
26
+ print("Warning: pyslurm not found. Skipping HPCExecutor config validations")
27
+ self.slurm_association = None
28
+ self.slurm_partition = None
29
+ self.slurm_qos = None
30
+ self.slurm_node = None
31
+
32
+ def get_partition_info(self, partition_name: str) -> Any:
33
+ """Check if a partition exists in the Slurm configuration."""
34
+ return self.slurm_partition.get(partition_name) if self.slurm_partition else None
35
+
36
+ def get_qos_info(self, qos_name: str) -> Any:
37
+ """Check if a qos exists in the Slurm configuration."""
38
+ return self.slurm_qos.get(qos_name) if self.slurm_qos else None
39
+
40
+ def get_account_info(self, account_name: str) -> list[Any]:
41
+ """Get all associations for an account"""
42
+ if self.slurm_association:
43
+ return [a for a in self.slurm_association.values() if a.account == account_name]
44
+ else:
45
+ return [None]
46
+
47
+ def can_account_use_partition(self, account_name: str, partition_name: str) -> bool:
48
+ """
49
+ Check if an account has access to a specific partition.
50
+
51
+ Returns
52
+ -------
53
+ bool: True if accessible, False if not accessible or error occurred
54
+ """
55
+ account_info = self.get_account_info(account_name)
56
+ if not account_info:
57
+ return False
58
+
59
+ partition_info = self.get_partition_info(partition_name)
60
+
61
+ if not partition_info:
62
+ return False
63
+
64
+ allowed_partitions = account_info[0].partition
65
+ if allowed_partitions is None:
66
+ return True
67
+ else:
68
+ return partition_name in allowed_partitions
69
+
70
+ def can_account_use_qos(self, account_name: str, qos_name: str) -> bool:
71
+ """
72
+ Check if an account has access to a specific qos.
73
+
74
+ Returns
75
+ -------
76
+ bool: True if accessible, False if not accessible or error occurred
77
+ """
78
+ account_info = self.get_account_info(account_name)
79
+
80
+ if not account_info:
81
+ return False
82
+
83
+ qos_info = self.get_qos_info(qos_name)
84
+ if not qos_info:
85
+ return False
86
+
87
+ sample_acc = account_info[0]
88
+ for acc in account_info:
89
+ if acc.user == "minxu":
90
+ sample_acc = acc
91
+ break
92
+
93
+ allowed_qoss = sample_acc.qos
94
+ if allowed_qoss is None:
95
+ return True
96
+ else:
97
+ return qos_name in allowed_qoss
98
+
99
+ def get_partition_limits(self, partition_name: str) -> dict[str, str | int] | None:
100
+ """
101
+ Get time limits for a specific partition.
102
+
103
+ Returns
104
+ -------
105
+ Dict with 'max_time' and 'default_time' (strings or UNLIMITED)
106
+ or None if partition doesn't exist or error occurred
107
+ """
108
+ partition_info = self.get_partition_info(partition_name)
109
+ if not partition_info:
110
+ return None
111
+
112
+ return {
113
+ "max_time_minutes": partition_info.to_dict().get("max_time", 0), # in minutes
114
+ "default_time_minutes": partition_info.to_dict().get("default_time", 30), # in minutes
115
+ "max_nodes": partition_info.to_dict().get("max_node", 1),
116
+ "total_nodes": partition_info.to_dict().get("total_nodes", 0),
117
+ "total_cpus": partition_info.to_dict().get("total_cpus", 0),
118
+ }
119
+
120
+ def get_node_from_partition(self, partition_name: str) -> dict[str, str | int] | None:
121
+ """
122
+ Get the node information for a specific partition.
123
+
124
+ Returns
125
+ -------
126
+ Dicts
127
+ """
128
+ partition_info = self.get_partition_info(partition_name)
129
+ if not partition_info:
130
+ return None
131
+
132
+ sample_node = None
133
+
134
+ if self.slurm_node:
135
+ for node in self.slurm_node.values():
136
+ if partition_name in node.partitions and "cpu" in node.available_features:
137
+ sample_node = node
138
+ break
139
+
140
+ return {
141
+ "cpus": int(sample_node.total_cpus) if sample_node is not None else 1,
142
+ "cores_per_socket": int(sample_node.cores_per_socket) if sample_node is not None else 1,
143
+ "sockets": int(sample_node.sockets) if sample_node is not None else 1,
144
+ "threads_per_core": int(sample_node.threads_per_core) if sample_node is not None else 1,
145
+ "real_memory": int(sample_node.real_memory) if sample_node is not None else 215,
146
+ "node_names": sample_node.name if sample_node is not None else "unknown",
147
+ }
148
+
149
+ def get_qos_limits(self, qos_name: str) -> dict[str, str | int]:
150
+ """
151
+ Get time limits for a specific qos.
152
+
153
+ Returns
154
+ -------
155
+ Dict with 'max_time' and 'default_time' (strings or UNLIMITED)
156
+ or None if partition doesn't exist or error occurred
157
+ """
158
+ qos_info = self.get_qos_info(qos_name)
159
+
160
+ return {
161
+ "max_time_minutes": qos_info.get("max_wall_pj", 1.0e6),
162
+ "max_jobs_pu": qos_info.get("max_jobs_pu", 1.0e6),
163
+ "max_submit_jobs_pu": qos_info.get("max_submit_jobs_pu", 1.0e6),
164
+ "max_tres_pj": qos_info.get("max_tres_pj").split("=")[0],
165
+ "default_time_minutes": 120,
166
+ }
167
+
168
+ def check_account_partition_access_with_limits(
169
+ self, account_name: str, partition_name: str
170
+ ) -> dict[str, Any]:
171
+ """
172
+ Comprehensive check of account access and partition limits.
173
+
174
+ Returns dictionary with all relevant information.
175
+ """
176
+ result = {
177
+ "account_exists": True if self.get_account_info(account_name) else False,
178
+ "partition_exists": True if self.get_partition_info(partition_name) else False,
179
+ "has_access": False,
180
+ "time_limits": None,
181
+ "error": "none",
182
+ }
183
+
184
+ try:
185
+ if result["account_exists"] and result["partition_exists"]:
186
+ result["has_access"] = self.can_account_use_partition(account_name, partition_name)
187
+ if result["has_access"]:
188
+ result["time_limits"] = self.get_partition_info(partition_name).to_dict().get("max_time")
189
+ except Exception as e:
190
+ result["error"] = str(e)
191
+
192
+ return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: climate-ref
3
- Version: 0.6.0
3
+ Version: 0.6.1
4
4
  Summary: Application which runs the CMIP Rapid Evaluation Framework
5
5
  Author-email: Jared Lewis <jared.lewis@climate-resource.com>, Mika Pflueger <mika.pflueger@climate-resource.com>, Bouwe Andela <b.andela@esciencecenter.nl>, Jiwoo Lee <lee1043@llnl.gov>, Min Xu <xum1@ornl.gov>, Nathan Collier <collierno@ornl.gov>, Dora Hegedus <dora.hegedus@stfc.ac.uk>
6
6
  License-Expression: Apache-2.0
@@ -25,6 +25,7 @@ Requires-Dist: climate-ref-core
25
25
  Requires-Dist: ecgtools>=2024.7.31
26
26
  Requires-Dist: environs>=11.0.0
27
27
  Requires-Dist: loguru>=0.7.2
28
+ Requires-Dist: parsl>=2025.5.19
28
29
  Requires-Dist: platformdirs>=4.3.6
29
30
  Requires-Dist: sqlalchemy>=2.0.36
30
31
  Requires-Dist: tomlkit>=0.13.2
@@ -6,6 +6,7 @@ climate_ref/constants.py,sha256=9RaNLgUSuQva7ki4eRW3TjOKeVP6T81QNiu0veB1zVk,111
6
6
  climate_ref/database.py,sha256=b_6XHdr78Mo7KeLqQJ5DjLsySHPdQE83P8dRpdMfzfM,8661
7
7
  climate_ref/provider_registry.py,sha256=dyfj4vU6unKHNXtT03HafQtAi3LilL37uvu3paCnmNY,4159
8
8
  climate_ref/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ climate_ref/slurm.py,sha256=20of7zMEeCg8BVQmy_IjSgZ8AJlYJbb8MK2Rjuzig-U,7215
9
10
  climate_ref/solver.py,sha256=T5sQjweSvpUMG4q8MfbGjljxa5kBgKxNotT78PwyxqU,16804
10
11
  climate_ref/testing.py,sha256=1b9lVCJlKxjJ7JGq6zDD2gK3BEM9ZVv1dbA-j6yb4Yk,4256
11
12
  climate_ref/cli/__init__.py,sha256=q-JAiRmwTXqapJGwtfuZ2P-L1a4XAmWj3CoZKLWlP3A,4357
@@ -23,8 +24,9 @@ climate_ref/datasets/cmip6.py,sha256=3MVJ1kPdw6f6V3G4gdHIiqDGUyMqPs-_wttkw2YKAH0
23
24
  climate_ref/datasets/obs4mips.py,sha256=CmMm4kopfb0yFsMSgUlHUm8clGJImBaodSkh6lAv_Ug,5926
24
25
  climate_ref/datasets/pmp_climatology.py,sha256=goHDc_3B2Wdiy_hmpERNvWDdDYZACPOyFDt3Du6nGc0,534
25
26
  climate_ref/datasets/utils.py,sha256=iLJO7h4G3DWsRe9hIC4qkIyi5_zIW1ZMw-FDASLujtM,359
26
- climate_ref/executor/__init__.py,sha256=DooN4jQudmLHyw24IfqNfWynfa1vEolLs-mZ7uY8O0k,604
27
- climate_ref/executor/local.py,sha256=P_nGD4blrLavk-ISj73cakAQCeELM_hNIhs8yVWWSAQ,8353
27
+ climate_ref/executor/__init__.py,sha256=PYtJs3oBS_GiUHbt8BF-6wJibpF6_vREm1Cg9TxVbLI,648
28
+ climate_ref/executor/hpc.py,sha256=2uZkSXww-oMqmzrYmh21hwJ7Mjjpojg-AhxX8eX_8Qo,11677
29
+ climate_ref/executor/local.py,sha256=65LUl41YtURFb87YTWZQHjDpIRlIKJ5Ny51c9DZjy0s,8582
28
30
  climate_ref/executor/result_handling.py,sha256=i7ZMX5vvyPY5gW-WWd-JHLi1BLviB9FXhn4FE8C9d4w,7787
29
31
  climate_ref/executor/synchronous.py,sha256=o4TndsoKMu9AzJYLkusU9lRkgHCy6HcCP46tEs6o86U,1895
30
32
  climate_ref/migrations/README,sha256=xM5osYbyEbEFA2eh5kwary_oh-5VFWtDubA-vgWwvlE,935
@@ -39,9 +41,9 @@ climate_ref/models/diagnostic.py,sha256=YB6xzbEXdpz2j-Ddf19RV8mAiWBrkmtRmiAEUV3t
39
41
  climate_ref/models/execution.py,sha256=lRCpaKLSR7rZbuoL94GW76tm9wLMsSDoIOA7bIa6xgY,9848
40
42
  climate_ref/models/metric_value.py,sha256=44OLcZz-qLx-p_9w7YWDKpD5S7Y9HyTKKsvSb77RBro,10190
41
43
  climate_ref/models/provider.py,sha256=RAE2qAAxwObu-72CdK4kt5ACMmKYEn07WJm7DU9hF28,990
42
- climate_ref-0.6.0.dist-info/METADATA,sha256=Ov6ZLG2A0D78je48OtG4TOhaCczrwoknsHSSt0rwutE,4399
43
- climate_ref-0.6.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
44
- climate_ref-0.6.0.dist-info/entry_points.txt,sha256=IaggEJlDIhoYWXdXJafacWbWtCcoEqUKceP1qD7_7vU,44
45
- climate_ref-0.6.0.dist-info/licenses/LICENCE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
46
- climate_ref-0.6.0.dist-info/licenses/NOTICE,sha256=4qTlax9aX2-mswYJuVrLqJ9jK1IkN5kSBqfVvYLF3Ws,128
47
- climate_ref-0.6.0.dist-info/RECORD,,
44
+ climate_ref-0.6.1.dist-info/METADATA,sha256=X-O5GFGtgPisrdsh0VZp4v5JgqOXF9e5Kqx1jYkxbkI,4431
45
+ climate_ref-0.6.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
46
+ climate_ref-0.6.1.dist-info/entry_points.txt,sha256=IaggEJlDIhoYWXdXJafacWbWtCcoEqUKceP1qD7_7vU,44
47
+ climate_ref-0.6.1.dist-info/licenses/LICENCE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
48
+ climate_ref-0.6.1.dist-info/licenses/NOTICE,sha256=4qTlax9aX2-mswYJuVrLqJ9jK1IkN5kSBqfVvYLF3Ws,128
49
+ climate_ref-0.6.1.dist-info/RECORD,,