climate-ref 0.6.6__py3-none-any.whl → 0.7.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. climate_ref/cli/__init__.py +12 -3
  2. climate_ref/cli/_utils.py +56 -2
  3. climate_ref/cli/datasets.py +48 -9
  4. climate_ref/cli/executions.py +333 -24
  5. climate_ref/cli/providers.py +1 -2
  6. climate_ref/config.py +4 -4
  7. climate_ref/database.py +62 -4
  8. climate_ref/dataset_registry/obs4ref_reference.txt +0 -9
  9. climate_ref/dataset_registry/sample_data.txt +10 -19
  10. climate_ref/datasets/__init__.py +3 -3
  11. climate_ref/datasets/base.py +121 -20
  12. climate_ref/datasets/cmip6.py +2 -0
  13. climate_ref/datasets/obs4mips.py +26 -15
  14. climate_ref/executor/result_handling.py +4 -1
  15. climate_ref/migrations/env.py +12 -10
  16. climate_ref/migrations/versions/2025-09-10T1358_2f6e36738e06_use_version_as_version_facet_for_.py +35 -0
  17. climate_ref/migrations/versions/2025-09-22T2359_20cd136a5b04_add_pmp_version.py +35 -0
  18. climate_ref/models/__init__.py +1 -6
  19. climate_ref/models/base.py +4 -20
  20. climate_ref/models/dataset.py +2 -0
  21. climate_ref/models/diagnostic.py +2 -1
  22. climate_ref/models/execution.py +219 -7
  23. climate_ref/models/metric_value.py +25 -110
  24. climate_ref/models/mixins.py +144 -0
  25. climate_ref/models/provider.py +2 -1
  26. climate_ref/provider_registry.py +4 -4
  27. climate_ref/slurm.py +2 -2
  28. climate_ref/testing.py +1 -1
  29. {climate_ref-0.6.6.dist-info → climate_ref-0.7.0.dist-info}/METADATA +1 -1
  30. climate_ref-0.7.0.dist-info/RECORD +58 -0
  31. climate_ref-0.6.6.dist-info/RECORD +0 -55
  32. {climate_ref-0.6.6.dist-info → climate_ref-0.7.0.dist-info}/WHEEL +0 -0
  33. {climate_ref-0.6.6.dist-info → climate_ref-0.7.0.dist-info}/entry_points.txt +0 -0
  34. {climate_ref-0.6.6.dist-info → climate_ref-0.7.0.dist-info}/licenses/LICENCE +0 -0
  35. {climate_ref-0.6.6.dist-info → climate_ref-0.7.0.dist-info}/licenses/NOTICE +0 -0
@@ -0,0 +1,35 @@
1
+ """use 'version' as version facet for obs4MIPs
2
+
3
+ Revision ID: 2f6e36738e06
4
+ Revises: 8d28e5e0f9c3
5
+ Create Date: 2025-09-10 13:58:40.660076
6
+
7
+ """
8
+
9
+ from collections.abc import Sequence
10
+ from typing import Union
11
+
12
+ import sqlalchemy as sa
13
+ from alembic import op
14
+
15
+ # revision identifiers, used by Alembic.
16
+ revision: str = "2f6e36738e06"
17
+ down_revision: Union[str, None] = "8d28e5e0f9c3"
18
+ branch_labels: Union[str, Sequence[str], None] = None
19
+ depends_on: Union[str, Sequence[str], None] = None
20
+
21
+
22
+ def upgrade() -> None:
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ with op.batch_alter_table("obs4mips_dataset", schema=None) as batch_op:
25
+ batch_op.add_column(sa.Column("version", sa.String(), nullable=False))
26
+
27
+ # ### end Alembic commands ###
28
+
29
+
30
+ def downgrade() -> None:
31
+ # ### commands auto generated by Alembic - please adjust! ###
32
+ with op.batch_alter_table("obs4mips_dataset", schema=None) as batch_op:
33
+ batch_op.drop_column("version")
34
+
35
+ # ### end Alembic commands ###
@@ -0,0 +1,35 @@
1
+ """add pmp version
2
+
3
+ Revision ID: 20cd136a5b04
4
+ Revises: 2f6e36738e06
5
+ Create Date: 2025-09-22 23:59:42.724007
6
+
7
+ """
8
+
9
+ from collections.abc import Sequence
10
+ from typing import Union
11
+
12
+ import sqlalchemy as sa
13
+ from alembic import op
14
+
15
+ # revision identifiers, used by Alembic.
16
+ revision: str = "20cd136a5b04"
17
+ down_revision: Union[str, None] = "2f6e36738e06"
18
+ branch_labels: Union[str, Sequence[str], None] = None
19
+ depends_on: Union[str, Sequence[str], None] = None
20
+
21
+
22
+ def upgrade() -> None:
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ with op.batch_alter_table("pmp_climatology_dataset", schema=None) as batch_op:
25
+ batch_op.add_column(sa.Column("version", sa.String(), nullable=False))
26
+
27
+ # ### end Alembic commands ###
28
+
29
+
30
+ def downgrade() -> None:
31
+ # ### commands auto generated by Alembic - please adjust! ###
32
+ with op.batch_alter_table("pmp_climatology_dataset", schema=None) as batch_op:
33
+ batch_op.drop_column("version")
34
+
35
+ # ### end Alembic commands ###
@@ -4,9 +4,7 @@ Declaration of the models used by the REF.
4
4
  These models are used to represent the data that is stored in the database.
5
5
  """
6
6
 
7
- from typing import TypeVar
8
-
9
- from climate_ref.models.base import Base
7
+ from climate_ref.models.base import Base, Table
10
8
  from climate_ref.models.dataset import Dataset
11
9
  from climate_ref.models.diagnostic import Diagnostic
12
10
  from climate_ref.models.execution import (
@@ -17,9 +15,6 @@ from climate_ref.models.execution import (
17
15
  from climate_ref.models.metric_value import MetricValue, ScalarMetricValue, SeriesMetricValue
18
16
  from climate_ref.models.provider import Provider
19
17
 
20
- Table = TypeVar("Table", bound=Base)
21
-
22
-
23
18
  __all__ = [
24
19
  "Base",
25
20
  "Dataset",
@@ -1,8 +1,7 @@
1
- import datetime
2
- from typing import Any
1
+ from typing import Any, TypeVar
3
2
 
4
- from sqlalchemy import JSON, MetaData, func
5
- from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
3
+ from sqlalchemy import JSON, MetaData
4
+ from sqlalchemy.orm import DeclarativeBase
6
5
 
7
6
 
8
7
  class Base(DeclarativeBase):
@@ -28,19 +27,4 @@ class Base(DeclarativeBase):
28
27
  )
29
28
 
30
29
 
31
- class CreatedUpdatedMixin:
32
- """
33
- Mixin for models that have a created_at and updated_at fields
34
- """
35
-
36
- created_at: Mapped[datetime.datetime] = mapped_column(server_default=func.now())
37
- """
38
- When the dataset was added to the database
39
- """
40
-
41
- updated_at: Mapped[datetime.datetime] = mapped_column(
42
- server_default=func.now(), onupdate=func.now(), index=True
43
- )
44
- """
45
- When the dataset was updated.
46
- """
30
+ Table = TypeVar("Table", bound=Base)
@@ -172,6 +172,7 @@ class Obs4MIPsDataset(Dataset):
172
172
  units: Mapped[str] = mapped_column()
173
173
  variable_id: Mapped[str] = mapped_column()
174
174
  variant_label: Mapped[str] = mapped_column()
175
+ version: Mapped[str] = mapped_column()
175
176
  vertical_levels: Mapped[int] = mapped_column()
176
177
  source_version_number: Mapped[str] = mapped_column()
177
178
 
@@ -206,6 +207,7 @@ class PMPClimatologyDataset(Dataset):
206
207
  units: Mapped[str] = mapped_column()
207
208
  variable_id: Mapped[str] = mapped_column()
208
209
  variant_label: Mapped[str] = mapped_column()
210
+ version: Mapped[str] = mapped_column()
209
211
  vertical_levels: Mapped[int] = mapped_column()
210
212
  source_version_number: Mapped[str] = mapped_column()
211
213
 
@@ -3,7 +3,8 @@ from typing import TYPE_CHECKING
3
3
  from sqlalchemy import ForeignKey, UniqueConstraint
4
4
  from sqlalchemy.orm import Mapped, mapped_column, relationship
5
5
 
6
- from climate_ref.models.base import Base, CreatedUpdatedMixin
6
+ from climate_ref.models.base import Base
7
+ from climate_ref.models.mixins import CreatedUpdatedMixin
7
8
 
8
9
  if TYPE_CHECKING:
9
10
  from climate_ref.models.execution import ExecutionGroup
@@ -1,19 +1,22 @@
1
1
  import enum
2
2
  import pathlib
3
- from typing import TYPE_CHECKING, Any
3
+ from collections.abc import Sequence
4
+ from typing import TYPE_CHECKING, Any, ClassVar
4
5
 
5
6
  from loguru import logger
6
- from sqlalchemy import Column, ForeignKey, Table, UniqueConstraint, func
7
+ from sqlalchemy import Column, ForeignKey, Table, UniqueConstraint, func, or_
7
8
  from sqlalchemy.orm import Mapped, Session, mapped_column, relationship
8
9
  from sqlalchemy.orm.query import RowReturningQuery
9
10
 
10
- from climate_ref.models import Dataset
11
- from climate_ref.models.base import Base, CreatedUpdatedMixin
11
+ from climate_ref.models.base import Base
12
+ from climate_ref.models.dataset import Dataset
13
+ from climate_ref.models.diagnostic import Diagnostic
14
+ from climate_ref.models.mixins import CreatedUpdatedMixin, DimensionMixin
15
+ from climate_ref.models.provider import Provider
12
16
  from climate_ref_core.datasets import ExecutionDatasetCollection
13
17
 
14
18
  if TYPE_CHECKING:
15
19
  from climate_ref.database import Database
16
- from climate_ref.models.diagnostic import Diagnostic
17
20
  from climate_ref.models.metric_value import MetricValue
18
21
 
19
22
 
@@ -217,16 +220,21 @@ class ResultOutputType(enum.Enum):
217
220
  HTML = "html"
218
221
 
219
222
 
220
- class ExecutionOutput(CreatedUpdatedMixin, Base):
223
+ class ExecutionOutput(DimensionMixin, CreatedUpdatedMixin, Base):
221
224
  """
222
225
  An output generated as part of an execution.
223
226
 
224
227
  This output may be a plot, data file or HTML file.
225
- These outputs are defined in the CMEC output bundle
228
+ These outputs are defined in the CMEC output bundle.
229
+
230
+ Outputs can be tagged with dimensions from the controlled vocabulary
231
+ to enable filtering and organization.
226
232
  """
227
233
 
228
234
  __tablename__ = "execution_output"
229
235
 
236
+ _cv_dimensions: ClassVar[list[str]] = []
237
+
230
238
  id: Mapped[int] = mapped_column(primary_key=True)
231
239
 
232
240
  execution_id: Mapped[int] = mapped_column(ForeignKey("execution.id"), index=True)
@@ -264,6 +272,65 @@ class ExecutionOutput(CreatedUpdatedMixin, Base):
264
272
 
265
273
  execution: Mapped["Execution"] = relationship(back_populates="outputs")
266
274
 
275
+ @classmethod
276
+ def build( # noqa: PLR0913
277
+ cls,
278
+ *,
279
+ execution_id: int,
280
+ output_type: ResultOutputType,
281
+ dimensions: dict[str, str],
282
+ filename: str | None = None,
283
+ short_name: str | None = None,
284
+ long_name: str | None = None,
285
+ description: str | None = None,
286
+ ) -> "ExecutionOutput":
287
+ """
288
+ Build an ExecutionOutput from dimensions and metadata
289
+
290
+ This is a helper method that validates the dimensions supplied.
291
+
292
+ Parameters
293
+ ----------
294
+ execution_id
295
+ Execution that created the output
296
+ output_type
297
+ Type of the output
298
+ dimensions
299
+ Dimensions that describe the output
300
+ filename
301
+ Path to the output
302
+ short_name
303
+ Short key of the output
304
+ long_name
305
+ Human readable name
306
+ description
307
+ Long description
308
+
309
+ Raises
310
+ ------
311
+ KeyError
312
+ If an unknown dimension was supplied.
313
+
314
+ Dimensions must exist in the controlled vocabulary.
315
+
316
+ Returns
317
+ -------
318
+ Newly created ExecutionOutput
319
+ """
320
+ for k in dimensions:
321
+ if k not in cls._cv_dimensions:
322
+ raise KeyError(f"Unknown dimension column '{k}'")
323
+
324
+ return ExecutionOutput(
325
+ execution_id=execution_id,
326
+ output_type=output_type,
327
+ filename=filename,
328
+ short_name=short_name,
329
+ long_name=long_name,
330
+ description=description,
331
+ **dimensions,
332
+ )
333
+
267
334
 
268
335
  def get_execution_group_and_latest(
269
336
  session: Session,
@@ -305,3 +372,148 @@ def get_execution_group_and_latest(
305
372
  )
306
373
 
307
374
  return query # type: ignore
375
+
376
+
377
+ def _filter_executions_by_facets(
378
+ results: Sequence[tuple[ExecutionGroup, Execution | None]],
379
+ facet_filters: dict[str, str],
380
+ ) -> list[tuple[ExecutionGroup, Execution | None]]:
381
+ """
382
+ Filter execution groups and their latest executions based on facet key-value pairs.
383
+
384
+ This is a relatively expensive operation as it requires iterating over all results.
385
+ This should be replaced once we have normalised the selectors into a separate table.
386
+
387
+
388
+ Parameters
389
+ ----------
390
+ results
391
+ List of tuples containing ExecutionGroup and its latest Execution (or None)
392
+ facet_filters
393
+ Dictionary of facet key-value pairs to filter by (AND logic, exact match)
394
+
395
+ Returns
396
+ -------
397
+ Filtered list of tuples containing ExecutionGroup and its latest Execution (or None)
398
+
399
+ Notes
400
+ -----
401
+ - Facet filters can either be key=value (searches all dataset types)
402
+ or dataset_type.key=value (searches specific dataset type)
403
+ - Key=value filters search across all dataset types
404
+ - dataset_type.key=value filters only search within the specified dataset type
405
+ - Multiple values within same filter type use OR logic
406
+ - All specified facets must match for an execution group to be included (AND logic)
407
+ """
408
+ filtered_results = []
409
+ for eg, execution in results:
410
+ all_filters_match = True
411
+ for facet_key, facet_value in facet_filters.items():
412
+ filter_match = False
413
+ if "." in facet_key:
414
+ # Handle dataset_type.key=value format
415
+ dataset_type, key = facet_key.split(".", 1)
416
+ if dataset_type in eg.selectors:
417
+ if [key, facet_value] in eg.selectors[dataset_type]:
418
+ filter_match = True
419
+ break
420
+ else:
421
+ # Handle key=value format (search across all dataset types)
422
+ for ds_type_selectors in eg.selectors.values():
423
+ if [facet_key, facet_value] in ds_type_selectors:
424
+ filter_match = True
425
+ break
426
+
427
+ if not filter_match:
428
+ all_filters_match = False
429
+ break
430
+ if all_filters_match:
431
+ filtered_results.append((eg, execution))
432
+ return filtered_results
433
+
434
+
435
+ def get_execution_group_and_latest_filtered( # noqa: PLR0913
436
+ session: Session,
437
+ diagnostic_filters: list[str] | None = None,
438
+ provider_filters: list[str] | None = None,
439
+ facet_filters: dict[str, str] | None = None,
440
+ dirty: bool | None = None,
441
+ successful: bool | None = None,
442
+ ) -> list[tuple[ExecutionGroup, Execution | None]]:
443
+ """
444
+ Query execution groups with filtering capabilities.
445
+
446
+ Parameters
447
+ ----------
448
+ session
449
+ Database session
450
+ diagnostic_filters
451
+ List of diagnostic slug substrings (OR logic, case-insensitive)
452
+ provider_filters
453
+ List of provider slug substrings (OR logic, case-insensitive)
454
+ facet_filters
455
+ Dictionary of facet key-value pairs (AND logic, exact match)
456
+ dirty
457
+ If True, only return dirty execution groups.
458
+ If False, only return clean execution groups.
459
+ If None, do not filter by dirty status.
460
+ successful
461
+ If True, only return execution groups whose latest execution was successful.
462
+ If False, only return execution groups whose latest execution was unsuccessful or has no executions.
463
+ If None, do not filter by execution success.
464
+
465
+ Returns
466
+ -------
467
+ Query returning tuples of (ExecutionGroup, latest Execution or None)
468
+
469
+ Notes
470
+ -----
471
+ - Diagnostic and provider filters use substring matching (case-insensitive)
472
+ - Multiple values within same filter type use OR logic
473
+ - Different filter types use AND logic
474
+ - Facet filters can either be key=value (searches all dataset types)
475
+ or dataset_type.key=value (searches specific dataset type)
476
+ """
477
+ # Start with base query
478
+ query = get_execution_group_and_latest(session)
479
+
480
+ if diagnostic_filters or provider_filters:
481
+ # Join through to the Diagnostic table
482
+ query = query.join(Diagnostic, ExecutionGroup.diagnostic_id == Diagnostic.id)
483
+
484
+ # Apply diagnostic filter (OR logic for multiple values)
485
+ if diagnostic_filters:
486
+ diagnostic_conditions = [
487
+ Diagnostic.slug.ilike(f"%{filter_value.lower()}%") for filter_value in diagnostic_filters
488
+ ]
489
+ query = query.filter(or_(*diagnostic_conditions))
490
+
491
+ # Apply provider filter (OR logic for multiple values)
492
+ if provider_filters:
493
+ # Need to join through Diagnostic to Provider
494
+ query = query.join(Provider, Diagnostic.provider_id == Provider.id)
495
+
496
+ provider_conditions = [
497
+ Provider.slug.ilike(f"%{filter_value.lower()}%") for filter_value in provider_filters
498
+ ]
499
+ query = query.filter(or_(*provider_conditions))
500
+
501
+ if successful is not None:
502
+ if successful:
503
+ query = query.filter(Execution.successful.is_(True))
504
+ else:
505
+ query = query.filter(or_(Execution.successful.is_(False), Execution.successful.is_(None)))
506
+
507
+ if dirty is not None:
508
+ if dirty:
509
+ query = query.filter(ExecutionGroup.dirty.is_(True))
510
+ else:
511
+ query = query.filter(or_(ExecutionGroup.dirty.is_(False), ExecutionGroup.dirty.is_(None)))
512
+
513
+ if facet_filters:
514
+ # Load all results into memory for Python-based filtering
515
+ # TODO: Update once we have normalised the selector
516
+ results = [r._tuple() for r in query.all()]
517
+ return _filter_executions_by_facets(results, facet_filters)
518
+ else:
519
+ return [r._tuple() for r in query.all()]
@@ -2,12 +2,11 @@ import enum
2
2
  from collections.abc import Mapping
3
3
  from typing import TYPE_CHECKING, Any, ClassVar
4
4
 
5
- from loguru import logger
6
- from sqlalchemy import Column, ForeignKey, Text, event
5
+ from sqlalchemy import ForeignKey, event
7
6
  from sqlalchemy.orm import Mapped, mapped_column, relationship
8
7
 
9
- from climate_ref.models.base import Base, CreatedUpdatedMixin
10
- from climate_ref_core.pycmec.controlled_vocabulary import CV, Dimension
8
+ from climate_ref.models.base import Base
9
+ from climate_ref.models.mixins import CreatedUpdatedMixin, DimensionMixin
11
10
 
12
11
  if TYPE_CHECKING:
13
12
  from climate_ref.models.execution import Execution
@@ -27,11 +26,14 @@ class MetricValueType(enum.Enum):
27
26
  SERIES = "series"
28
27
 
29
28
 
30
- class MetricValue(CreatedUpdatedMixin, Base):
29
+ class MetricValue(DimensionMixin, CreatedUpdatedMixin, Base):
31
30
  """
32
31
  Represents a single metric value
33
32
 
34
- This value has a number of dimensions which are used to query the diagnostic value.
33
+ This is a base class for different types of metric values (e.g. scalar, series) which
34
+ are stored in a single table using single table inheritance.
35
+
36
+ This value has a number of dimensions which are used to query the diagnostic values.
35
37
  These dimensions describe aspects such as the type of statistic being measured,
36
38
  the region of interest or the model from which the statistic is being measured.
37
39
 
@@ -46,6 +48,8 @@ class MetricValue(CreatedUpdatedMixin, Base):
46
48
  "polymorphic_on": "type",
47
49
  }
48
50
 
51
+ _cv_dimensions: ClassVar[list[str]] = []
52
+
49
53
  id: Mapped[int] = mapped_column(primary_key=True)
50
54
  execution_id: Mapped[int] = mapped_column(ForeignKey("execution.id"), index=True)
51
55
 
@@ -60,111 +64,9 @@ class MetricValue(CreatedUpdatedMixin, Base):
60
64
  This value is used to determine how the metric value should be interpreted.
61
65
  """
62
66
 
63
- _cv_dimensions: ClassVar[list[str]] = []
64
-
65
- @property
66
- def dimensions(self) -> dict[str, str]:
67
- """
68
- Get the non-null dimensions and their values
69
-
70
- Any changes to the resulting dictionary are not reflected in the object
71
-
72
- Returns
73
- -------
74
- Collection of dimensions names and their values
75
- """
76
- dims = {}
77
- for key in self._cv_dimensions:
78
- value = getattr(self, key)
79
- if value is not None:
80
- dims[key] = value
81
- return dims
82
-
83
67
  def __repr__(self) -> str:
84
68
  return f"<MetricValue id={self.id} execution={self.execution} dimensions={self.dimensions}>"
85
69
 
86
- @staticmethod
87
- def build_dimension_column(dimension: Dimension) -> Column[str]:
88
- """
89
- Create a column representing a CV dimension
90
-
91
- These columns are not automatically generated with alembic revisions.
92
- Any changes to this functionality likely require a manual database migration
93
- of the existing columns.
94
-
95
- Parameters
96
- ----------
97
- dimension
98
- Dimension definition to create the column for.
99
-
100
- Currently only the "name" field is being used.
101
-
102
- Returns
103
- -------
104
- An instance of a sqlalchemy Column
105
-
106
- This doesn't create the column in the database,
107
- but enables the ORM to access it.
108
-
109
- """
110
- return Column(
111
- dimension.name,
112
- Text,
113
- index=True,
114
- nullable=True,
115
- info={"skip_autogenerate": True},
116
- )
117
-
118
- @classmethod
119
- def register_cv_dimensions(cls, cv: CV) -> None:
120
- """
121
- Register the dimensions supplied in the controlled vocabulary
122
-
123
- This has to be done at run-time to support custom CVs.
124
- Any extra columns already in the database, but not in the CV are ignored.
125
-
126
- Parameters
127
- ----------
128
- cv
129
- Controlled vocabulary being used by the application.
130
- This controlled vocabulary contains the definitions of the dimensions that can be used.
131
- """
132
- for dimension in cv.dimensions:
133
- target_attribute = dimension.name
134
- if target_attribute in cls._cv_dimensions:
135
- continue
136
-
137
- cls._cv_dimensions.append(target_attribute)
138
- logger.debug(f"Registered MetricValue dimension: {target_attribute}")
139
-
140
- if hasattr(cls, target_attribute):
141
- # This should only occur in test suite as we don't support removing dimensions at runtime
142
- logger.warning("Column attribute already exists on MetricValue. Ignoring")
143
- else:
144
- setattr(cls, target_attribute, cls.build_dimension_column(dimension))
145
-
146
- # TODO: Check if the underlying table already contains columns
147
-
148
- @classmethod
149
- def _reset_cv_dimensions(cls) -> None:
150
- """
151
- Remove any previously registered dimensions
152
-
153
- Used by the test suite and should not be called at runtime.
154
-
155
- This doesn't remove any previous column definitions due to a limitation that columns in
156
- declarative classes cannot be removed.
157
- This means that `hasattr(MetricValue, "old_attribute")`
158
- will still return True after resetting, but the values will not be included in any executions.
159
- """
160
- logger.warning(f"Removing MetricValue dimensions: {cls._cv_dimensions}")
161
-
162
- keys = list(cls._cv_dimensions)
163
- for key in keys:
164
- cls._cv_dimensions.remove(key)
165
-
166
- assert not len(cls._cv_dimensions)
167
-
168
70
 
169
71
  class ScalarMetricValue(MetricValue):
170
72
  """
@@ -180,6 +82,12 @@ class ScalarMetricValue(MetricValue):
180
82
  # This is a scalar value
181
83
  value: Mapped[float] = mapped_column(nullable=True)
182
84
 
85
+ def __repr__(self) -> str:
86
+ return (
87
+ f"<ScalarMetricValue "
88
+ f"id={self.id} execution={self.execution} dimensions={self.dimensions} value={self.value}>"
89
+ )
90
+
183
91
  @classmethod
184
92
  def build(
185
93
  cls,
@@ -232,9 +140,10 @@ class ScalarMetricValue(MetricValue):
232
140
 
233
141
  class SeriesMetricValue(MetricValue):
234
142
  """
235
- A scalar value with an associated dimensions
143
+ A 1d series with associated dimensions
236
144
 
237
- This is a subclass of MetricValue that is used to represent a scalar value.
145
+ This is a subclass of MetricValue that is used to represent a series.
146
+ This can be used to represent time series, vertical profiles or other 1d data.
238
147
  """
239
148
 
240
149
  __mapper_args__: ClassVar[Mapping[str, Any]] = { # type: ignore
@@ -246,6 +155,12 @@ class SeriesMetricValue(MetricValue):
246
155
  index: Mapped[list[float | int | str]] = mapped_column(nullable=True)
247
156
  index_name: Mapped[str] = mapped_column(nullable=True)
248
157
 
158
+ def __repr__(self) -> str:
159
+ return (
160
+ f"<SeriesMetricValue id={self.id} execution={self.execution} "
161
+ f"dimensions={self.dimensions} index_name={self.index_name}>"
162
+ )
163
+
249
164
  @classmethod
250
165
  def build( # noqa: PLR0913
251
166
  cls,