climate-ref-core 0.5.0__tar.gz → 0.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/.gitignore +0 -1
  2. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/PKG-INFO +1 -1
  3. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/pyproject.toml +1 -1
  4. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/datasets.py +62 -1
  5. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/diagnostics.py +18 -2
  6. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/exceptions.py +7 -0
  7. climate_ref_core-0.5.1/src/climate_ref_core/executor.py +167 -0
  8. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/logging.py +16 -5
  9. climate_ref_core-0.5.1/src/climate_ref_core/metric_values/__init__.py +16 -0
  10. climate_ref_core-0.5.1/src/climate_ref_core/metric_values/typing.py +74 -0
  11. climate_ref_core-0.5.1/src/climate_ref_core/pycmec/cv_cmip7_aft.yaml +95 -0
  12. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/pycmec/metric.py +141 -19
  13. climate_ref_core-0.5.1/tests/unit/metric_values/test_typing.py +67 -0
  14. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/pycmec/cmec_testdata/test_metric_json_schema.yml +1 -5
  15. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/pycmec/test_cmec_metric.py +136 -1
  16. climate_ref_core-0.5.1/tests/unit/test_datasets/dataset_collection_hash.yml +2 -0
  17. climate_ref_core-0.5.1/tests/unit/test_datasets/metric_dataset_hash.yml +2 -0
  18. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/test_datasets.py +32 -0
  19. climate_ref_core-0.5.1/tests/unit/test_executor.py +27 -0
  20. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/test_logging.py +1 -1
  21. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/test_metrics.py +7 -2
  22. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/test_providers.py +1 -1
  23. climate_ref_core-0.5.0/src/climate_ref_core/executor.py +0 -96
  24. climate_ref_core-0.5.0/src/climate_ref_core/pycmec/cv_cmip7_aft.yaml +0 -44
  25. climate_ref_core-0.5.0/tests/unit/test_datasets/dataset_collection_hash.yml +0 -2
  26. climate_ref_core-0.5.0/tests/unit/test_datasets/metric_dataset_hash.yml +0 -2
  27. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/LICENCE +0 -0
  28. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/NOTICE +0 -0
  29. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/README.md +0 -0
  30. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/__init__.py +0 -0
  31. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/constraints.py +0 -0
  32. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/dataset_registry.py +0 -0
  33. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/env.py +0 -0
  34. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/providers.py +0 -0
  35. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/py.typed +0 -0
  36. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/pycmec/README.md +0 -0
  37. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/pycmec/__init__.py +0 -0
  38. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/pycmec/controlled_vocabulary.py +0 -0
  39. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/src/climate_ref_core/pycmec/output.py +0 -0
  40. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/pycmec/cmec_testdata/cmec_metric_sample.json +0 -0
  41. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/pycmec/cmec_testdata/cmec_output_sample.json +0 -0
  42. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/pycmec/cmec_testdata/cv_sample.yaml +0 -0
  43. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/pycmec/cmec_testdata/test_output_json_schema.yml +0 -0
  44. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/pycmec/conftest.py +0 -0
  45. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/pycmec/test_cmec_output.py +0 -0
  46. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/pycmec/test_controlled_vocabulary.py +0 -0
  47. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/test_constraints.py +0 -0
  48. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/test_dataset_registry/test_dataset_registry.py +0 -0
  49. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/test_datasets/dataset_collection_obs4mips_hash.yml +0 -0
  50. {climate_ref_core-0.5.0 → climate_ref_core-0.5.1}/tests/unit/test_exceptions.py +0 -0
@@ -74,7 +74,6 @@ coverage.xml
74
74
  *.pot
75
75
 
76
76
  # Django stuff:
77
- *.log
78
77
  local_settings.py
79
78
  db.sqlite3
80
79
  db.sqlite3-journal
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: climate-ref-core
3
- Version: 0.5.0
3
+ Version: 0.5.1
4
4
  Summary: Core library for the CMIP Rapid Evaluation Framework
5
5
  Author-email: Jared Lewis <jared.lewis@climate-resource.com>, Mika Pflueger <mika.pflueger@climate-resource.com>, Bouwe Andela <b.andela@esciencecenter.nl>, Jiwoo Lee <lee1043@llnl.gov>, Min Xu <xum1@ornl.gov>, Nathan Collier <collierno@ornl.gov>, Dora Hegedus <dora.hegedus@stfc.ac.uk>
6
6
  License: Apache-2.0
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "climate-ref-core"
3
- version = "0.5.0"
3
+ version = "0.5.1"
4
4
  description = "Core library for the CMIP Rapid Evaluation Framework"
5
5
  readme = "README.md"
6
6
  authors = [
@@ -11,6 +11,16 @@ from typing import Any, Self
11
11
  import pandas as pd
12
12
  from attrs import field, frozen
13
13
 
14
+ Selector = tuple[tuple[str, str], ...]
15
+ """
16
+ Type describing the key used to identify a group of datasets
17
+
18
+ This is a tuple of tuples, where each inner tuple contains a metadata and dimension value
19
+ that was used to group the datasets together.
20
+
21
+ This type must be hashable, as it is used as a key in a dictionary.
22
+ """
23
+
14
24
 
15
25
  class SourceDatasetType(enum.Enum):
16
26
  """
@@ -76,6 +86,23 @@ class FacetFilter:
76
86
  """
77
87
 
78
88
 
89
+ def sort_selector(inp: Selector) -> Selector:
90
+ """
91
+ Sort the selector by key
92
+
93
+ Parameters
94
+ ----------
95
+ inp
96
+ Selector to sort
97
+
98
+ Returns
99
+ -------
100
+ :
101
+ Sorted selector
102
+ """
103
+ return tuple(sorted(inp, key=lambda x: x[0]))
104
+
105
+
79
106
  @frozen
80
107
  class DatasetCollection:
81
108
  """
@@ -83,15 +110,33 @@ class DatasetCollection:
83
110
  """
84
111
 
85
112
  datasets: pd.DataFrame
113
+ """
114
+ DataFrame containing the datasets that were selected for the execution.
115
+
116
+ The columns in this dataframe depend on the source dataset type, but always include:
117
+ * path
118
+ * [slug_column]
119
+ """
86
120
  slug_column: str
87
121
  """
88
122
  Column in datasets that contains the unique identifier for the dataset
89
123
  """
90
- selector: tuple[tuple[str, str], ...] = ()
124
+ selector: Selector = field(converter=sort_selector, factory=tuple)
91
125
  """
92
126
  Unique key, value pairs that were selected during the initial groupby
93
127
  """
94
128
 
129
+ def selector_dict(self) -> dict[str, str]:
130
+ """
131
+ Convert the selector to a dictionary
132
+
133
+ Returns
134
+ -------
135
+ :
136
+ Dictionary of the selector
137
+ """
138
+ return {key: value for key, value in self.selector}
139
+
95
140
  def __getattr__(self, item: str) -> Any:
96
141
  return getattr(self.datasets, item)
97
142
 
@@ -155,3 +200,19 @@ class ExecutionDatasetCollection:
155
200
  hash_sum = sum(hash(item) for item in self._collection.values())
156
201
  hash_bytes = hash_sum.to_bytes(16, "little", signed=True)
157
202
  return hashlib.sha1(hash_bytes).hexdigest() # noqa: S324
203
+
204
+ @property
205
+ def selectors(self) -> dict[str, Selector]:
206
+ """
207
+ Collection of selectors used to identify the datasets
208
+
209
+ These are the key, value pairs that were selected during the initial group-by,
210
+ for each data requirement.
211
+ """
212
+ # The "value" of SourceType is used here so this can be stored in the db
213
+ s = {}
214
+ for source_type in SourceDatasetType.ordered():
215
+ if source_type not in self._collection:
216
+ continue
217
+ s[source_type.value] = self._collection[source_type].selector
218
+ return s
@@ -14,6 +14,7 @@ from attrs import field, frozen
14
14
 
15
15
  from climate_ref_core.constraints import GroupConstraint
16
16
  from climate_ref_core.datasets import ExecutionDatasetCollection, FacetFilter, SourceDatasetType
17
+ from climate_ref_core.metric_values import SeriesMetricValue
17
18
  from climate_ref_core.pycmec.metric import CMECMetric
18
19
  from climate_ref_core.pycmec.output import CMECOutput
19
20
 
@@ -61,6 +62,11 @@ class ExecutionDefinition:
61
62
  for a specific set of datasets fulfilling the requirements.
62
63
  """
63
64
 
65
+ diagnostic: Diagnostic
66
+ """
67
+ The diagnostic that is being executed
68
+ """
69
+
64
70
  key: str
65
71
  """
66
72
  The unique identifier for the datasets in the diagnostic execution group.
@@ -85,6 +91,12 @@ class ExecutionDefinition:
85
91
  Root directory for storing the output of the diagnostic execution
86
92
  """
87
93
 
94
+ def execution_slug(self) -> str:
95
+ """
96
+ Get a slug for the execution
97
+ """
98
+ return f"{self.diagnostic.full_slug()}/{self.key}"
99
+
88
100
  def to_output_path(self, filename: pathlib.Path | str | None) -> pathlib.Path:
89
101
  """
90
102
  Get the absolute path for a file in the output directory
@@ -170,7 +182,11 @@ class ExecutionResult:
170
182
  """
171
183
  Whether the diagnostic execution ran successfully.
172
184
  """
173
- # Log info is in the output bundle file already, but is definitely useful
185
+
186
+ series: Sequence[SeriesMetricValue] = field(factory=tuple)
187
+ """
188
+ A collection of series metric values that were extracted from the execution.
189
+ """
174
190
 
175
191
  @staticmethod
176
192
  def build_from_output_bundle(
@@ -426,7 +442,7 @@ class AbstractDiagnostic(Protocol):
426
442
  """
427
443
  Run the diagnostic on the given configuration.
428
444
 
429
- The implementation of this method method is left to the diagnostic providers.
445
+ The implementation of this method is left to the diagnostic providers.
430
446
 
431
447
 
432
448
  Parameters
@@ -46,3 +46,10 @@ class ConstraintNotSatisfied(RefException):
46
46
 
47
47
  class ResultValidationError(RefException):
48
48
  """Exception raised when the executions from a diagnostic are invalid"""
49
+
50
+
51
+ class ExecutionError(RefException):
52
+ """Exception raised when an execution fails"""
53
+
54
+ def __init__(self, message: str) -> None:
55
+ super().__init__(message)
@@ -0,0 +1,167 @@
1
+ """
2
+ Executor interface for running diagnostics
3
+ """
4
+
5
+ import importlib
6
+ import shutil
7
+ from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
8
+
9
+ from loguru import logger
10
+
11
+ from climate_ref_core.diagnostics import ExecutionDefinition, ExecutionResult
12
+ from climate_ref_core.exceptions import InvalidExecutorException
13
+ from climate_ref_core.logging import redirect_logs
14
+
15
+ if TYPE_CHECKING:
16
+ # TODO: break this import cycle and move it into the execution definition
17
+ from climate_ref.models import Execution
18
+
19
+
20
+ def execute_locally(
21
+ definition: ExecutionDefinition,
22
+ log_level: str,
23
+ ) -> ExecutionResult:
24
+ """
25
+ Run a diagnostic execution
26
+
27
+ This is the chunk of work that should be executed by an executor.
28
+
29
+ Parameters
30
+ ----------
31
+ definition
32
+ A description of the information needed for this execution of the diagnostic
33
+ log_level
34
+ The log level to use for the execution
35
+ """
36
+ logger.info(f"Executing {definition.execution_slug()!r}")
37
+
38
+ try:
39
+ if definition.output_directory.exists():
40
+ logger.warning(
41
+ f"Output directory {definition.output_directory} already exists. "
42
+ f"Removing the existing directory."
43
+ )
44
+ shutil.rmtree(definition.output_directory)
45
+ definition.output_directory.mkdir(parents=True, exist_ok=True)
46
+
47
+ with redirect_logs(definition, log_level):
48
+ return definition.diagnostic.run(definition=definition)
49
+ except Exception:
50
+ # If the diagnostic fails, we want to log the error and return a failure result
51
+ logger.exception(f"Error running {definition.execution_slug()!r}")
52
+ return ExecutionResult.build_from_failure(definition)
53
+
54
+
55
+ @runtime_checkable
56
+ class Executor(Protocol):
57
+ """
58
+ An executor is responsible for running a diagnostic asynchronously
59
+
60
+ The diagnostic may be run locally in the same process or in a separate process or container.
61
+
62
+ Notes
63
+ -----
64
+ This is an extremely basic interface and will be expanded in the future, as we figure out
65
+ our requirements.
66
+ """
67
+
68
+ name: str
69
+
70
+ def __init__(self, **kwargs: Any) -> None: ...
71
+
72
+ def run(
73
+ self,
74
+ definition: ExecutionDefinition,
75
+ execution: "Execution | None" = None,
76
+ ) -> None:
77
+ """
78
+ Execute a diagnostic with a given definition
79
+
80
+ No executions are returned from this method,
81
+ as the execution may be performed asynchronously so executions may not be immediately available.
82
+
83
+ /// admonition | Note
84
+ In future, we may return a `Future` object that can be used to retrieve the result,
85
+ but that requires some additional work to implement.
86
+ ///
87
+
88
+ Parameters
89
+ ----------
90
+ definition
91
+ Definition of the information needed to execute a diagnostic
92
+
93
+ This definition describes which datasets are required to run the diagnostic and where
94
+ the output should be stored.
95
+ execution
96
+ The execution object to update with the results of the execution.
97
+
98
+ This is a database object that contains the executions of the execution.
99
+ If provided, it will be updated with the executions of the execution.
100
+ This may happen asynchronously, so the executions may not be immediately available.
101
+
102
+ Returns
103
+ -------
104
+ :
105
+ Results from running the diagnostic
106
+ """
107
+ ...
108
+
109
+ def join(self, timeout: float) -> None:
110
+ """
111
+ Wait for all executions to finish
112
+
113
+ If the timeout is reached, the method will return and raise an exception.
114
+
115
+ Parameters
116
+ ----------
117
+ timeout
118
+ Maximum time to wait for all executions to finish in seconds
119
+
120
+ Raises
121
+ ------
122
+ TimeoutError
123
+ If the timeout is reached
124
+ """
125
+
126
+
127
+ def import_executor_cls(fqn: str) -> type[Executor]:
128
+ """
129
+ Import an executor using a fully qualified module path
130
+
131
+ Parameters
132
+ ----------
133
+ fqn
134
+ Full package and attribute name of the executor to import
135
+
136
+ For example: `climate_ref_example.executor` will use the `executor` attribute from the
137
+ `climate_ref_example` package.
138
+
139
+ Raises
140
+ ------
141
+ InvalidExecutorException
142
+ If the executor cannot be imported
143
+
144
+ If the executor isn't a valid `DiagnosticProvider`.
145
+
146
+ Returns
147
+ -------
148
+ :
149
+ Executor instance
150
+ """
151
+ module, attribute_name = fqn.rsplit(".", 1)
152
+
153
+ try:
154
+ imp = importlib.import_module(module)
155
+ executor: type[Executor] = getattr(imp, attribute_name)
156
+
157
+ # We can't really check if the executor is a subclass of Executor here
158
+ # Protocols can't be used with issubclass if they have non-method members
159
+ # We have to check this at class instantiation time
160
+
161
+ return executor
162
+ except ModuleNotFoundError:
163
+ logger.error(f"Package '{fqn}' not found")
164
+ raise InvalidExecutorException(fqn, f"Module '{module}' not found")
165
+ except AttributeError:
166
+ logger.error(f"Provider '{fqn}' not found")
167
+ raise InvalidExecutorException(fqn, f"Executor '{attribute_name}' not found in {module}")
@@ -1,7 +1,7 @@
1
1
  """
2
2
  Logging utilities
3
3
 
4
- The REF uses [loguru](https://loguru.readthedocs.io/en/stable/), a simple logging framework
4
+ The REF uses [loguru](https://loguru.readthedocs.io/en/stable/), a simple logging framework.
5
5
  """
6
6
 
7
7
  import contextlib
@@ -16,7 +16,13 @@ from loguru import logger
16
16
  from rich.pretty import pretty_repr
17
17
 
18
18
  from climate_ref_core.diagnostics import ExecutionDefinition
19
- from climate_ref_core.executor import EXECUTION_LOG_FILENAME
19
+
20
+ EXECUTION_LOG_FILENAME = "out.log"
21
+ """
22
+ Filename for the execution log.
23
+
24
+ This file is written via [climate_ref_core.logging.redirect_logs][].
25
+ """
20
26
 
21
27
 
22
28
  class _InterceptHandler(logging.Handler):
@@ -72,7 +78,7 @@ def add_log_handler(**kwargs: Any) -> None:
72
78
 
73
79
  # Track the current handler via custom attributes on the logger
74
80
  # This is a bit of a workaround because of loguru's super slim API that doesn't allow for
75
- # modificiation of existing handlers.
81
+ # modification of existing handlers.
76
82
  logger.default_handler_id = handled_id # type: ignore[attr-defined]
77
83
  logger.default_handler_kwargs = kwargs # type: ignore[attr-defined]
78
84
 
@@ -88,7 +94,12 @@ def remove_log_handler() -> None:
88
94
  logger should be readded later
89
95
  """
90
96
  if hasattr(logger, "default_handler_id"):
91
- logger.remove(logger.default_handler_id)
97
+ try:
98
+ logger.remove(logger.default_handler_id)
99
+ except ValueError:
100
+ # This can happen if the handler has already been removed
101
+ # or if the logger was never configured
102
+ pass
92
103
  del logger.default_handler_id
93
104
  else:
94
105
  raise AssertionError("No default log handler to remove.")
@@ -143,4 +154,4 @@ def redirect_logs(definition: ExecutionDefinition, log_level: str) -> Generator[
143
154
  add_log_handler(**logger.default_handler_kwargs) # type: ignore[attr-defined]
144
155
 
145
156
 
146
- __all__ = ["add_log_handler", "capture_logging", "logger", "redirect_logs"]
157
+ __all__ = ["EXECUTION_LOG_FILENAME", "add_log_handler", "capture_logging", "logger", "redirect_logs"]
@@ -0,0 +1,16 @@
1
+ """
2
+ Metric Values
3
+
4
+ A metric is a single statistical evaluation contained within a diagnostic.
5
+ A diagnostic may consist of more than one metric.
6
+
7
+ Examples include bias, root mean squared error (RMSE), Earth Mover's Distance,
8
+ phase/timing of the seasonal cycle, amplitude of the seasonal cycle, spatial or temporal correlations,
9
+ interannual variability.
10
+ Not all metrics are useful for all variables or should be used with every observationally constrained dataset.
11
+ Each metric may be converted into a performance score.
12
+ """
13
+
14
+ from .typing import ScalarMetricValue, SeriesMetricValue
15
+
16
+ __all__ = ["ScalarMetricValue", "SeriesMetricValue"]
@@ -0,0 +1,74 @@
1
+ from collections.abc import Sequence
2
+ from typing import Self
3
+
4
+ from pydantic import BaseModel, model_validator
5
+
6
+ Value = float | int
7
+
8
+
9
+ class SeriesMetricValue(BaseModel):
10
+ """
11
+ A 1-d array with an associated index and additional dimensions
12
+
13
+ These values are typically sourced from the CMEC metrics bundle
14
+ """
15
+
16
+ dimensions: dict[str, str]
17
+ """
18
+ Key, value pairs that identify the dimensions of the metric
19
+
20
+ These values are used for a faceted search of the metric values.
21
+ """
22
+ values: Sequence[Value]
23
+ """
24
+ A 1-d array of values
25
+ """
26
+ index: Sequence[str | Value]
27
+ """
28
+ A 1-d array of index values
29
+
30
+ Values must be strings or numbers and have the same length as values.
31
+ Non-unique index values are not allowed.
32
+ """
33
+
34
+ index_name: str
35
+ """
36
+ The name of the index.
37
+
38
+ This is used for presentation purposes and is not used in the controlled vocabulary.
39
+ """
40
+
41
+ attributes: dict[str, str | Value] | None = None
42
+ """
43
+ Additional unstructured attributes associated with the metric value
44
+ """
45
+
46
+ @model_validator(mode="after")
47
+ def validate_index_length(self) -> Self:
48
+ """Validate that index has the same length as values"""
49
+ if len(self.index) != len(self.values):
50
+ raise ValueError(
51
+ f"Index length ({len(self.index)}) must match values length ({len(self.values)})"
52
+ )
53
+ return self
54
+
55
+
56
+ class ScalarMetricValue(BaseModel):
57
+ """
58
+ A scalar value with an associated dimensions
59
+ """
60
+
61
+ dimensions: dict[str, str]
62
+ """
63
+ Key, value pairs that identify the dimensions of the metric
64
+
65
+ These values are used for a faceted search of the metric values.
66
+ """
67
+ value: Value
68
+ """
69
+ A scalar value
70
+ """
71
+ attributes: dict[str, str | Value] | None = None
72
+ """
73
+ Additional unstructured attributes associated with the metric value
74
+ """
@@ -0,0 +1,95 @@
1
+ dimensions:
2
+ - name: source_id
3
+ long_name: Source ID
4
+ description: "Source ID (e.g., GFDL-CM4)"
5
+ allow_extra_values: true
6
+ required: false
7
+ - name: reference_source_id
8
+ long_name: Reference Source ID
9
+ description: "Source ID of the reference dataset(e.g., HadISST)"
10
+ allow_extra_values: true
11
+ required: false
12
+ - name: experiment_id
13
+ long_name: Experiment ID
14
+ description: "Experiment ID (e.g., historical, ssp585)"
15
+ allow_extra_values: true
16
+ required: false
17
+ - name: variable_id
18
+ long_name: Variable
19
+ description: "Variable ID (e.g., tas, pr, etc.)"
20
+ allow_extra_values: true
21
+ required: false
22
+ - name: reference_variable_id
23
+ long_name: Reference Variable
24
+ description: "Variable ID for the reference dataset (e.g., tas, pr, etc.)"
25
+ allow_extra_values: true
26
+ required: false
27
+ - name: member_id
28
+ long_name: Member ID
29
+ description: "Unique identifier for each ensemble member, includes the variant label and sub-experiment if present"
30
+ allow_extra_values: true
31
+ required: false
32
+ - name: variant_label
33
+ long_name: Variant Label
34
+ description: "Ensemble member (construct from realization, initialization, physics, and forcing indices)"
35
+ allow_extra_values: true
36
+ required: false
37
+ - name: metric
38
+ long_name: Metric
39
+ description: ""
40
+ required: true
41
+ allow_extra_values: true
42
+ - name: region
43
+ long_name: Region
44
+ description: "Part of the world from which the metric values are calculated. "
45
+ required: true
46
+ allow_extra_values: true
47
+ values:
48
+ - name: global
49
+ long_name: Global
50
+ description: "Global aggregate"
51
+ units: dimensionless
52
+ - name: season
53
+ long_name: Season
54
+ description: "Parts of the year from which the metric values are calculated"
55
+ required: true
56
+ allow_extra_values: true
57
+ values:
58
+ - name: ann
59
+ long_name: Annual
60
+ description: ""
61
+ units: dimensionless
62
+ - name: djf
63
+ long_name: Dec,Jan,Feb
64
+ description: "December, January, February"
65
+ units: dimensionless
66
+ - name: mam
67
+ long_name: Mar,Apr,May
68
+ description: "March, April, May"
69
+ units: dimensionless
70
+ - name: jja
71
+ long_name: Jun,Jul,Aug
72
+ description: "June, July, August"
73
+ units: dimensionless
74
+ - name: son
75
+ long_name: Sep,Oct,Nov
76
+ description: "September, October, November"
77
+ units: dimensionless
78
+ - name: statistic
79
+ long_name: Statistic
80
+ description: ""
81
+ required: true
82
+ allow_extra_values: true
83
+ values:
84
+ - name: rmse
85
+ long_name: Root Mean Square Error
86
+ description: ""
87
+ units: dimensionless
88
+ - name: overall score
89
+ long_name: Overall Score
90
+ description: ""
91
+ units: dimensionless
92
+ - name: bias
93
+ long_name: Bias
94
+ description: ""
95
+ units: dimensionless