climate-ref-core 0.6.4__py3-none-any.whl → 0.6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,7 +6,7 @@ import sys
6
6
  import warnings
7
7
  from collections import defaultdict
8
8
  from collections.abc import Mapping
9
- from typing import Protocol, runtime_checkable
9
+ from typing import Literal, Protocol, runtime_checkable
10
10
 
11
11
  if sys.version_info < (3, 11):
12
12
  from typing_extensions import Self
@@ -148,6 +148,7 @@ class RequireFacets:
148
148
 
149
149
  dimension: str
150
150
  required_facets: tuple[str, ...]
151
+ operator: Literal["all", "any"] = "all"
151
152
 
152
153
  def validate(self, group: pd.DataFrame) -> bool:
153
154
  """
@@ -156,7 +157,8 @@ class RequireFacets:
156
157
  if self.dimension not in group:
157
158
  logger.warning(f"Dimension {self.dimension} not present in group {group}")
158
159
  return False
159
- return all(value in group[self.dimension].values for value in self.required_facets)
160
+ op = all if self.operator == "all" else any
161
+ return op(value in group[self.dimension].values for value in self.required_facets)
160
162
 
161
163
 
162
164
  @frozen
@@ -200,17 +202,27 @@ class AddSupplementaryDataset:
200
202
  for facet, values in supplementary_facets.items():
201
203
  mask = supplementary_group[facet].isin(values)
202
204
  supplementary_group = supplementary_group[mask]
203
-
204
- if not supplementary_group.empty and self.optional_matching_facets:
205
- facets = list(self.matching_facets + self.optional_matching_facets)
205
+ if not supplementary_group.empty:
206
+ matching_facets = list(self.matching_facets)
207
+ facets = matching_facets + list(self.optional_matching_facets)
206
208
  datasets = group[facets].drop_duplicates()
207
209
  indices = set()
208
210
  for i in range(len(datasets)):
209
- scores = (supplementary_group[facets] == datasets.iloc[i]).sum(axis=1)
210
- matches = supplementary_group[scores == scores.max()]
211
- # Select the latest version if there are multiple matches
212
- matches = matches[matches["version"] == matches["version"].max()]
213
- indices.add(matches.index[0])
211
+ dataset = datasets.iloc[i]
212
+ # Restrict the supplementary datasets to those that match the main dataset.
213
+ supplementaries = supplementary_group[
214
+ (supplementary_group[matching_facets] == dataset[matching_facets]).all(1)
215
+ ]
216
+ if not supplementaries.empty:
217
+ # Select the best matching supplementary dataset based on the optional matching facets.
218
+ scores = (supplementaries[facets] == dataset).sum(axis=1)
219
+ matches = supplementaries[scores == scores.max()]
220
+ if "version" in facets:
221
+ # Select the latest version if there are multiple matches
222
+ matches = matches[matches["version"] == matches["version"].max()]
223
+ # Select one match per dataset
224
+ indices.add(matches.index[0])
225
+
214
226
  supplementary_group = supplementary_group.loc[list(indices)].drop_duplicates()
215
227
 
216
228
  return pd.concat([group, supplementary_group])
@@ -5,7 +5,7 @@ Dataset management and filtering
5
5
  import enum
6
6
  import functools
7
7
  import hashlib
8
- from collections.abc import Collection, Iterable
8
+ from collections.abc import Collection, Iterable, Iterator
9
9
  from typing import Any, Self
10
10
 
11
11
  import pandas as pd
@@ -172,9 +172,24 @@ class ExecutionDatasetCollection:
172
172
  def __hash__(self) -> int:
173
173
  return hash(self.hash)
174
174
 
175
+ def __iter__(self) -> Iterator[SourceDatasetType]:
176
+ return iter(self._collection)
177
+
178
+ def keys(self) -> Iterable[SourceDatasetType]:
179
+ """
180
+ Iterate over the source types in the collection.
181
+ """
182
+ return self._collection.keys()
183
+
184
+ def values(self) -> Iterable[DatasetCollection]:
185
+ """
186
+ Iterate over the datasets in the collection.
187
+ """
188
+ return self._collection.values()
189
+
175
190
  def items(self) -> Iterable[tuple[SourceDatasetType, DatasetCollection]]:
176
191
  """
177
- Iterate over the datasets in the collection
192
+ Iterate over the items in the collection.
178
193
  """
179
194
  return self._collection.items()
180
195
 
@@ -14,6 +14,7 @@ from attrs import field, frozen
14
14
  from climate_ref_core.constraints import GroupConstraint
15
15
  from climate_ref_core.datasets import ExecutionDatasetCollection, FacetFilter, SourceDatasetType
16
16
  from climate_ref_core.metric_values import SeriesMetricValue
17
+ from climate_ref_core.metric_values.typing import SeriesDefinition
17
18
  from climate_ref_core.pycmec.metric import CMECMetric
18
19
  from climate_ref_core.pycmec.output import CMECOutput
19
20
 
@@ -182,9 +183,11 @@ class ExecutionResult:
182
183
  Whether the diagnostic execution ran successfully.
183
184
  """
184
185
 
185
- series: Sequence[SeriesMetricValue] = field(factory=tuple)
186
+ series_filename: pathlib.Path | None = None
186
187
  """
187
188
  A collection of series metric values that were extracted from the execution.
189
+
190
+ These are written to a CSV file in the output directory.
188
191
  """
189
192
 
190
193
  @staticmethod
@@ -193,6 +196,7 @@ class ExecutionResult:
193
196
  *,
194
197
  cmec_output_bundle: CMECOutput | dict[str, Any],
195
198
  cmec_metric_bundle: CMECMetric | dict[str, Any],
199
+ series: Sequence[SeriesMetricValue] = tuple(),
196
200
  ) -> ExecutionResult:
197
201
  """
198
202
  Build a ExecutionResult from a CMEC output bundle.
@@ -205,6 +209,8 @@ class ExecutionResult:
205
209
  An output bundle in the CMEC format.
206
210
  cmec_metric_bundle
207
211
  An diagnostic bundle in the CMEC format.
212
+ series
213
+ Series metric values extracted from the execution.
208
214
 
209
215
  Returns
210
216
  -------
@@ -223,17 +229,21 @@ class ExecutionResult:
223
229
  cmec_metric = cmec_metric_bundle
224
230
 
225
231
  definition.to_output_path(filename=None).mkdir(parents=True, exist_ok=True)
226
- bundle_path = definition.to_output_path("output.json")
227
- cmec_output.dump_to_json(bundle_path)
228
232
 
229
- definition.to_output_path(filename=None).mkdir(parents=True, exist_ok=True)
230
- bundle_path = definition.to_output_path("diagnostic.json")
231
- cmec_metric.dump_to_json(bundle_path)
233
+ output_filename = "output.json"
234
+ metric_filename = "diagnostic.json"
235
+ series_filename = "series.json"
232
236
 
237
+ cmec_output.dump_to_json(definition.to_output_path(output_filename))
238
+ cmec_metric.dump_to_json(definition.to_output_path(metric_filename))
239
+ SeriesMetricValue.dump_to_json(definition.to_output_path(series_filename), series)
240
+
241
+ # We are using relative paths for the output files for portability of the results
233
242
  return ExecutionResult(
234
243
  definition=definition,
235
- output_bundle_filename=pathlib.Path("output.json"),
236
- metric_bundle_filename=pathlib.Path("diagnostic.json"),
244
+ output_bundle_filename=pathlib.Path(output_filename),
245
+ metric_bundle_filename=pathlib.Path(metric_filename),
246
+ series_filename=pathlib.Path(series_filename),
237
247
  successful=True,
238
248
  )
239
249
 
@@ -432,6 +442,11 @@ class AbstractDiagnostic(Protocol):
432
442
  is raised.
433
443
  """
434
444
 
445
+ series: Sequence[SeriesDefinition]
446
+ """
447
+ Definition of the series that are produced by the diagnostic.
448
+ """
449
+
435
450
  provider: DiagnosticProvider
436
451
  """
437
452
  The provider that provides the diagnostic.
@@ -493,6 +508,8 @@ class Diagnostic(AbstractDiagnostic):
493
508
  See (climate_ref_example.example.ExampleDiagnostic)[] for an example implementation.
494
509
  """
495
510
 
511
+ series: Sequence[SeriesDefinition] = tuple()
512
+
496
513
  def __init__(self) -> None:
497
514
  super().__init__()
498
515
  self._provider: DiagnosticProvider | None = None
@@ -160,12 +160,15 @@ def import_executor_cls(fqn: str) -> type[Executor]:
160
160
  imp = importlib.import_module(module)
161
161
  executor: type[Executor] = getattr(imp, attribute_name)
162
162
 
163
+ if isinstance(executor, Exception):
164
+ raise executor
165
+
163
166
  # We can't really check if the executor is a subclass of Executor here
164
167
  # Protocols can't be used with issubclass if they have non-method members
165
168
  # We have to check this at class instantiation time
166
169
 
167
170
  return executor
168
- except ModuleNotFoundError:
171
+ except (ModuleNotFoundError, ImportError):
169
172
  logger.error(f"Package '{fqn}' not found")
170
173
  raise InvalidExecutorException(fqn, f"Module '{module}' not found")
171
174
  except AttributeError:
@@ -1,11 +1,37 @@
1
+ import json
1
2
  from collections.abc import Sequence
2
- from typing import Self
3
+ from pathlib import Path
4
+ from typing import Any, Self
3
5
 
4
6
  from pydantic import BaseModel, model_validator
5
7
 
6
8
  Value = float | int
7
9
 
8
10
 
11
+ class SeriesDefinition(BaseModel):
12
+ """
13
+ A definition of a 1-d array with an associated index and additional dimensions.
14
+ """
15
+
16
+ file_pattern: str
17
+ """A glob pattern to match files that contain the series values."""
18
+
19
+ sel: dict[str, Any] | None = None
20
+ """A dictionary of selection criteria to apply with :meth:`xarray.Dataset.sel` after loading the file."""
21
+
22
+ dimensions: dict[str, str]
23
+ """Key, value pairs that identify the dimensions of the metric."""
24
+
25
+ values_name: str
26
+ """The name of the variable in the file that contains the values of the series."""
27
+
28
+ index_name: str
29
+ """The name of the variable in the file that contains the index of the series."""
30
+
31
+ attributes: Sequence[str]
32
+ """A list of attributes that should be extracted from the file and included in the series metadata."""
33
+
34
+
9
35
  class SeriesMetricValue(BaseModel):
10
36
  """
11
37
  A 1-d array with an associated index and additional dimensions
@@ -52,6 +78,45 @@ class SeriesMetricValue(BaseModel):
52
78
  )
53
79
  return self
54
80
 
81
+ @classmethod
82
+ def dump_to_json(cls, path: Path, series: Sequence["SeriesMetricValue"]) -> None:
83
+ """
84
+ Dump a sequence of SeriesMetricValue to a JSON file.
85
+
86
+ Parameters
87
+ ----------
88
+ path
89
+ The path to the JSON file.
90
+
91
+ The directory containing this file must already exist.
92
+ This file will be overwritten if it already exists.
93
+ series
94
+ The series values to dump.
95
+ """
96
+ with open(path, "w") as f:
97
+ json.dump([s.model_dump() for s in series], f, indent=2)
98
+
99
+ @classmethod
100
+ def load_from_json(
101
+ cls,
102
+ path: Path,
103
+ ) -> list["SeriesMetricValue"]:
104
+ """
105
+ Dump a sequence of SeriesMetricValue to a JSON file.
106
+
107
+ Parameters
108
+ ----------
109
+ path
110
+ The path to the JSON file.
111
+ """
112
+ with open(path) as f:
113
+ data = json.load(f)
114
+
115
+ if not isinstance(data, list):
116
+ raise ValueError(f"Expected a list of series values, got {type(data)}")
117
+
118
+ return [cls.model_validate(s) for s in data]
119
+
55
120
 
56
121
  class ScalarMetricValue(BaseModel):
57
122
  """
@@ -232,6 +232,27 @@ def _get_micromamba_url() -> str:
232
232
  class CondaDiagnosticProvider(CommandLineDiagnosticProvider):
233
233
  """
234
234
  A provider for diagnostics that can be run from the command line in a conda environment.
235
+
236
+ Parameters
237
+ ----------
238
+ name
239
+ The name of the provider.
240
+ version
241
+ The version of the provider.
242
+ slug
243
+ A slugified version of the name.
244
+ repo
245
+ URL of the git repository to install a development version of the package from.
246
+ tag_or_commit
247
+ Tag or commit to install from the `repo` repository.
248
+
249
+ Attributes
250
+ ----------
251
+ env_vars
252
+ Environment variables to set when running commands in the conda environment.
253
+ url
254
+ URL to install a development version of the package from.
255
+
235
256
  """
236
257
 
237
258
  def __init__(
@@ -246,6 +267,7 @@ class CondaDiagnosticProvider(CommandLineDiagnosticProvider):
246
267
  self._conda_exe: Path | None = None
247
268
  self._prefix: Path | None = None
248
269
  self.url = f"git+{repo}@{tag_or_commit}" if repo and tag_or_commit else None
270
+ self.env_vars: dict[str, str] = {}
249
271
 
250
272
  @property
251
273
  def prefix(self) -> Path:
@@ -404,6 +426,8 @@ class CondaDiagnosticProvider(CommandLineDiagnosticProvider):
404
426
  *cmd,
405
427
  ]
406
428
  logger.info(f"Running '{' '.join(cmd)}'")
429
+ env_vars = os.environ.copy()
430
+ env_vars.update(self.env_vars)
407
431
  try:
408
432
  # This captures the log output until the execution is complete
409
433
  # We could poll using `subprocess.Popen` if we want something more responsive
@@ -413,6 +437,7 @@ class CondaDiagnosticProvider(CommandLineDiagnosticProvider):
413
437
  stdout=subprocess.PIPE,
414
438
  stderr=subprocess.STDOUT,
415
439
  text=True,
440
+ env=env_vars,
416
441
  )
417
442
  logger.info("Command output: \n" + res.stdout)
418
443
  logger.info("Command execution successful")
@@ -1,4 +1,5 @@
1
1
  import pathlib
2
+ from collections.abc import Iterable, Sequence
2
3
  from typing import Any
3
4
 
4
5
  from attrs import field, frozen, validators
@@ -7,6 +8,7 @@ from loguru import logger
7
8
  from yaml import safe_load
8
9
 
9
10
  from climate_ref_core.exceptions import ResultValidationError
11
+ from climate_ref_core.metric_values import ScalarMetricValue, SeriesMetricValue
10
12
  from climate_ref_core.pycmec.metric import CMECMetric
11
13
 
12
14
  RESERVED_DIMENSION_NAMES = {"attributes", "json_structure", "created_at", "updated_at", "value", "id"}
@@ -122,33 +124,49 @@ class CV:
122
124
  return dim
123
125
  raise KeyError(f"Dimension {name} not found")
124
126
 
125
- def validate_metrics(self, metric_bundle: CMECMetric) -> None:
127
+ def _validate_value(self, metric_value: ScalarMetricValue | SeriesMetricValue) -> None:
126
128
  """
127
- Validate a diagnostic bundle against a CV
129
+ Validate a single metric value against the CV
130
+ """
131
+ for k, v in metric_value.dimensions.items():
132
+ try:
133
+ dimension = self.get_dimension_by_name(k)
134
+ except KeyError:
135
+ raise ResultValidationError(f"Unknown dimension: {k!r}")
136
+ if not dimension.allow_extra_values:
137
+ if v not in [dv.name for dv in dimension.values]:
138
+ raise ResultValidationError(f"Unknown value {v!r} for dimension {k!r}")
139
+
140
+ if hasattr(metric_value, "value") and not isinstance(metric_value.value, float): # pragma: no cover
141
+ # This may not be possible with the current CMECMetric implementation
142
+ raise ResultValidationError(f"Unexpected value: {metric_value.value!r}")
143
+
144
+ def validate_metrics(self, metric_value_collection: CMECMetric | Sequence[SeriesMetricValue]) -> None:
145
+ """
146
+ Validate a set of metric values (either scalar or series) against a CV
128
147
 
129
148
  The CV describes the accepted dimensions and values within a bundle
130
149
 
131
150
  Parameters
132
151
  ----------
133
- metric_bundle
152
+ metric_value_collection
153
+ A collection of metric values to validate.
154
+
155
+ This can be a CMECMetric instance or a sequence of SeriesMetricValue instances.
134
156
 
135
157
  Raises
136
158
  ------
137
159
  ResultValidationError
138
160
  If the validation of the dimensions or values fails
139
161
  """
140
- for result in metric_bundle.iter_results():
141
- for k, v in result.dimensions.items():
142
- try:
143
- dimension = self.get_dimension_by_name(k)
144
- except KeyError:
145
- raise ResultValidationError(f"Unknown dimension: {k!r}")
146
- if not dimension.allow_extra_values:
147
- if v not in [dv.name for dv in dimension.values]:
148
- raise ResultValidationError(f"Unknown value {v!r} for dimension {k!r}")
149
- if not isinstance(result.value, float): # pragma: no cover
150
- # This may not be possible with the current CMECMetric implementation
151
- raise ResultValidationError(f"Unexpected value: {result.value!r}")
162
+ generator: Iterable[SeriesMetricValue | ScalarMetricValue]
163
+ if isinstance(metric_value_collection, CMECMetric):
164
+ generator = metric_value_collection.iter_results()
165
+ else:
166
+ generator = iter(metric_value_collection)
167
+
168
+ for result in generator:
169
+ self._validate_value(result)
152
170
 
153
171
  @staticmethod
154
172
  def load_from_file(filename: pathlib.Path | str) -> "CV":
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: climate-ref-core
3
- Version: 0.6.4
3
+ Version: 0.6.6
4
4
  Summary: Core library for the CMIP Rapid Evaluation Framework
5
5
  Author-email: Jared Lewis <jared.lewis@climate-resource.com>, Mika Pflueger <mika.pflueger@climate-resource.com>, Bouwe Andela <b.andela@esciencecenter.nl>, Jiwoo Lee <lee1043@llnl.gov>, Min Xu <xum1@ornl.gov>, Nathan Collier <collierno@ornl.gov>, Dora Hegedus <dora.hegedus@stfc.ac.uk>
6
6
  License-Expression: Apache-2.0
@@ -29,7 +29,7 @@ Requires-Dist: pydantic>=2.10.6
29
29
  Requires-Dist: pyyaml>=6.0.2
30
30
  Requires-Dist: requests
31
31
  Requires-Dist: rich
32
- Requires-Dist: setuptools>=75.8.0
32
+ Requires-Dist: setuptools<81
33
33
  Requires-Dist: typing-extensions
34
34
  Description-Content-Type: text/markdown
35
35
 
@@ -1,24 +1,24 @@
1
1
  climate_ref_core/__init__.py,sha256=MtmPThF2F9_2UODEN6rt1x30LDxrHIZ0wyRN_wsHx5I,127
2
- climate_ref_core/constraints.py,sha256=QOqMh5jDBxdWTnQw2HNBizJQDF6Uu97rfJp9WudQWHc,11819
2
+ climate_ref_core/constraints.py,sha256=9dZPlBAPHwv8A4ZCK7pKj3EnP6SB5baASjVNW7nNrOY,12503
3
3
  climate_ref_core/dataset_registry.py,sha256=sQp2VT9xSVAaWsf0tF4E_VQxuEsvIxU2MZm5uNX1ynw,7172
4
- climate_ref_core/datasets.py,sha256=TK50WQwTfbase26s8wPEGEN1BwcedrOd8nk6IlEf3Ww,6124
5
- climate_ref_core/diagnostics.py,sha256=5KCtHuhToSpATqjW4HBi56PsOxT5WX4VkqoZPUvYR60,18769
4
+ climate_ref_core/datasets.py,sha256=-Rto5NNx0WCHnJL4OSqB0-DnTgUrjpoovBC86OsEfbw,6570
5
+ climate_ref_core/diagnostics.py,sha256=hhWik9WBjTMbQ-XW6AYPfBBZblP0LeTpffXIeqZ_yJA,19401
6
6
  climate_ref_core/env.py,sha256=Ph2dejVxTELfP3bL0xES086WLGvV5H6KvsOwCkL6m-k,753
7
7
  climate_ref_core/exceptions.py,sha256=7Mkz22P-kbiL-ZevAhlOuQaaeTio6zpwE9YA45OTGvs,1909
8
- climate_ref_core/executor.py,sha256=QiVOca-d9JxKIktQIinQQYZGr3ecV5mL3nvUwCdMiJQ,5372
8
+ climate_ref_core/executor.py,sha256=9mKVkm0S7ikub3_FP7CrgdC4Qj9ynOi0r_DIfzCDS-0,5459
9
9
  climate_ref_core/logging.py,sha256=cg6CK2DHGjyLaoRJm75p-Ja82hnVhBBQ4riOKk3l9XY,7063
10
- climate_ref_core/providers.py,sha256=HZTpz1VFa9yZLPr1Cwofag2TCuwn6P4VPOrBaXbhn0A,12746
10
+ climate_ref_core/providers.py,sha256=z5oD7EErIWprH5zv8I5yuU0IavEiSsi6SFkYMtiqE7g,13448
11
11
  climate_ref_core/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
12
  climate_ref_core/metric_values/__init__.py,sha256=aHfwRrqzLOmmaBKf1-4q97DnHb8KwmW0Dhwd79ZQiNQ,634
13
- climate_ref_core/metric_values/typing.py,sha256=2DpzmjqQ7tqOPAyjthZ_O14c0-MhiYt-A_n9p6-bOao,1903
13
+ climate_ref_core/metric_values/typing.py,sha256=4Qmr1LJQxQd2qkndwOzKTHq-hihiIBfWDZ_vzRWNbsI,3880
14
14
  climate_ref_core/pycmec/README.md,sha256=PzkovlPpsXqFopsYzz5GRvCAipNRGO1Wo-0gc17qr2Y,36
15
15
  climate_ref_core/pycmec/__init__.py,sha256=hXvKGEJQWyAp1i-ndr3D4zuYxkRhcR2LfXgFXlhYOk4,28
16
- climate_ref_core/pycmec/controlled_vocabulary.py,sha256=_GZ2Y6KAYF12I3IS27g8YdFBAcAONyGAhpucQ4oEcOA,5128
16
+ climate_ref_core/pycmec/controlled_vocabulary.py,sha256=kgMEvQ1P6EwXC7sFgdC77IQDo8I0DnnQ2CPXXQaavjE,5944
17
17
  climate_ref_core/pycmec/cv_cmip7_aft.yaml,sha256=gx5QyW88pZQVUfiYXmsJtJO6AJg6NbIZgdU4vDIa3fE,4390
18
18
  climate_ref_core/pycmec/metric.py,sha256=zymXoutnjbdcxvG_fMJugFLLcBrfSPG0XoV-2tA0ujA,18499
19
19
  climate_ref_core/pycmec/output.py,sha256=4-RQ439sfgNLeQZVDPB1pewF_kTwX7nCK0Z4U6bvbd0,5709
20
- climate_ref_core-0.6.4.dist-info/METADATA,sha256=oYXP9COnofum1dw4zCrilxEK0AAC33wr032prB-z2-U,2930
21
- climate_ref_core-0.6.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
- climate_ref_core-0.6.4.dist-info/licenses/LICENCE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
23
- climate_ref_core-0.6.4.dist-info/licenses/NOTICE,sha256=4qTlax9aX2-mswYJuVrLqJ9jK1IkN5kSBqfVvYLF3Ws,128
24
- climate_ref_core-0.6.4.dist-info/RECORD,,
20
+ climate_ref_core-0.6.6.dist-info/METADATA,sha256=9vrBrbYGYhrERHV7pE1MmwsEPeEqrX83C9oLtaXUcBo,2925
21
+ climate_ref_core-0.6.6.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
22
+ climate_ref_core-0.6.6.dist-info/licenses/LICENCE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
23
+ climate_ref_core-0.6.6.dist-info/licenses/NOTICE,sha256=4qTlax9aX2-mswYJuVrLqJ9jK1IkN5kSBqfVvYLF3Ws,128
24
+ climate_ref_core-0.6.6.dist-info/RECORD,,