dataeval 0.69.3__py3-none-any.whl → 0.70.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. dataeval/__init__.py +3 -3
  2. dataeval/_internal/datasets.py +300 -0
  3. dataeval/_internal/detectors/drift/base.py +5 -6
  4. dataeval/_internal/detectors/drift/mmd.py +3 -3
  5. dataeval/_internal/detectors/duplicates.py +62 -45
  6. dataeval/_internal/detectors/merged_stats.py +23 -54
  7. dataeval/_internal/detectors/ood/ae.py +3 -3
  8. dataeval/_internal/detectors/outliers.py +133 -61
  9. dataeval/_internal/interop.py +11 -7
  10. dataeval/_internal/metrics/balance.py +9 -9
  11. dataeval/_internal/metrics/ber.py +3 -3
  12. dataeval/_internal/metrics/divergence.py +3 -3
  13. dataeval/_internal/metrics/diversity.py +6 -6
  14. dataeval/_internal/metrics/parity.py +24 -16
  15. dataeval/_internal/metrics/stats/base.py +231 -0
  16. dataeval/_internal/metrics/stats/boxratiostats.py +159 -0
  17. dataeval/_internal/metrics/stats/datasetstats.py +97 -0
  18. dataeval/_internal/metrics/stats/dimensionstats.py +111 -0
  19. dataeval/_internal/metrics/stats/hashstats.py +73 -0
  20. dataeval/_internal/metrics/stats/labelstats.py +125 -0
  21. dataeval/_internal/metrics/stats/pixelstats.py +117 -0
  22. dataeval/_internal/metrics/stats/visualstats.py +122 -0
  23. dataeval/_internal/metrics/uap.py +2 -2
  24. dataeval/_internal/metrics/utils.py +28 -13
  25. dataeval/_internal/output.py +3 -18
  26. dataeval/_internal/workflows/sufficiency.py +123 -133
  27. dataeval/metrics/stats/__init__.py +14 -3
  28. dataeval/workflows/__init__.py +2 -2
  29. {dataeval-0.69.3.dist-info → dataeval-0.70.0.dist-info}/METADATA +3 -2
  30. {dataeval-0.69.3.dist-info → dataeval-0.70.0.dist-info}/RECORD +32 -26
  31. {dataeval-0.69.3.dist-info → dataeval-0.70.0.dist-info}/WHEEL +1 -1
  32. dataeval/_internal/flags.py +0 -77
  33. dataeval/_internal/metrics/stats.py +0 -397
  34. dataeval/flags/__init__.py +0 -3
  35. {dataeval-0.69.3.dist-info → dataeval-0.70.0.dist-info}/LICENSE.txt +0 -0
@@ -0,0 +1,125 @@
1
+ from __future__ import annotations
2
+
3
+ from collections import Counter, defaultdict
4
+ from dataclasses import dataclass
5
+ from typing import Any, Iterable, Mapping, TypeVar
6
+
7
+ from numpy.typing import ArrayLike
8
+
9
+ from dataeval._internal.interop import to_numpy
10
+ from dataeval._internal.output import OutputMetadata, set_metadata
11
+
12
+
13
+ @dataclass(frozen=True)
14
+ class LabelStatsOutput(OutputMetadata):
15
+ """
16
+ Output class for `labelstats` metrics function
17
+
18
+ Attributes
19
+ ----------
20
+ label_counts_per_class : dict[str | int, int]
21
+ Dictionary whose keys are the different label classes and
22
+ values are total counts of each class
23
+ label_counts_per_image : list[int]
24
+ Number of labels per image
25
+ image_counts_per_label : dict[str | int, int]
26
+ Dictionary whose keys are the different label classes and
27
+ values are total counts of each image the class is present in
28
+ image_indices_per_label : dict[str | int, list]
29
+ Dictionary whose keys are the different label classes and
30
+ values are lists containing the images that have that label
31
+ image_count : int
32
+ Total number of images present
33
+ class_count : int
34
+ Total number of classes present
35
+ label_count : int
36
+ Total number of labels present
37
+ """
38
+
39
+ label_counts_per_class: dict[str | int, int]
40
+ label_counts_per_image: list[int]
41
+ image_counts_per_label: dict[str | int, int]
42
+ image_indices_per_label: dict[str | int, list[int]]
43
+ image_count: int
44
+ class_count: int
45
+ label_count: int
46
+
47
+
48
+ TKey = TypeVar("TKey", int, str)
49
+
50
+
51
+ def sort(d: Mapping[TKey, Any]) -> dict[TKey, Any]:
52
+ """
53
+ Sort mappings by key in increasing order
54
+ """
55
+ return dict(sorted(d.items(), key=lambda x: x[0]))
56
+
57
+
58
+ @set_metadata("dataeval.metrics")
59
+ def labelstats(
60
+ labels: Iterable[ArrayLike],
61
+ ) -> LabelStatsOutput:
62
+ """
63
+ Calculates statistics for data labels
64
+
65
+ This function computes counting metrics (e.g., total per class, total per image)
66
+ on the labels.
67
+
68
+ Parameters
69
+ ----------
70
+ labels : ArrayLike, shape - [label] | [[label]] or (N,M) | (N,)
71
+ Lists or numpy array of labels.
72
+ A set of lists where each list contains all labels per image -
73
+ (e.g. [[label1, label2], [label2], [label1, label3]] or [label1, label2, label1, label3]).
74
+ If a numpy array, N is the number of images, M is the number of labels per image.
75
+
76
+ Returns
77
+ -------
78
+ LabelStatsOutput
79
+ A dictionary-like object containing the computed counting metrics for the labels.
80
+
81
+ Examples
82
+ --------
83
+ Calculating the statistics on labels for a set of data
84
+
85
+ >>> stats = labelstats(labels)
86
+ >>> stats.label_counts_per_class
87
+ {'chicken': 3, 'cow': 8, 'horse': 9, 'pig': 7, 'sheep': 7}
88
+ >>> stats.label_counts_per_image
89
+ [3, 2, 3, 4, 1, 5, 4, 4, 4, 4]
90
+ >>> stats.image_counts_per_label
91
+ {'chicken': 2, 'cow': 6, 'horse': 7, 'pig': 5, 'sheep': 7}
92
+ >>> (stats.image_count, stats.class_count, stats.label_count)
93
+ (10, 5, 34)
94
+ """
95
+ label_counts = Counter()
96
+ image_counts = Counter()
97
+ index_location = defaultdict(list[int])
98
+ label_per_image: list[int] = []
99
+
100
+ for i, group in enumerate(labels):
101
+ # Count occurrences of each label in all sublists
102
+ group = to_numpy(group)
103
+
104
+ label_counts.update(group)
105
+
106
+ # Get the number of labels per image
107
+ label_per_image.append(len(group))
108
+
109
+ # Create a set of unique items in the current sublist
110
+ unique_items: set[int] = set(group)
111
+
112
+ # Update image counts and index locations
113
+ image_counts.update(unique_items)
114
+ for item in unique_items:
115
+ index_location[item].append(i)
116
+
117
+ return LabelStatsOutput(
118
+ label_counts_per_class=sort(label_counts),
119
+ label_counts_per_image=label_per_image,
120
+ image_counts_per_label=sort(image_counts),
121
+ image_indices_per_label=sort(index_location),
122
+ image_count=len(label_per_image),
123
+ class_count=len(label_counts),
124
+ label_count=sum(label_counts.values()),
125
+ )
@@ -0,0 +1,117 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Iterable
5
+
6
+ import numpy as np
7
+ from numpy.typing import ArrayLike, NDArray
8
+ from scipy.stats import entropy, kurtosis, skew
9
+
10
+ from dataeval._internal.metrics.stats.base import BaseStatsOutput, StatsProcessor, run_stats
11
+ from dataeval._internal.output import set_metadata
12
+
13
+
14
+ class PixelStatsProcessor(StatsProcessor):
15
+ cache_keys = ["histogram"]
16
+ image_function_map = {
17
+ "mean": lambda self: np.mean(self.scaled),
18
+ "std": lambda x: np.std(x.scaled),
19
+ "var": lambda x: np.var(x.scaled),
20
+ "skew": lambda x: np.nan_to_num(skew(x.scaled.ravel())),
21
+ "kurtosis": lambda x: np.nan_to_num(kurtosis(x.scaled.ravel())),
22
+ "histogram": lambda x: np.histogram(x.scaled, 256, (0, 1))[0],
23
+ "entropy": lambda x: entropy(x.get("histogram")),
24
+ }
25
+ channel_function_map = {
26
+ "mean": lambda x: np.mean(x.scaled, axis=1),
27
+ "std": lambda x: np.std(x.scaled, axis=1),
28
+ "var": lambda x: np.var(x.scaled, axis=1),
29
+ "skew": lambda x: np.nan_to_num(skew(x.scaled, axis=1)),
30
+ "kurtosis": lambda x: np.nan_to_num(kurtosis(x.scaled, axis=1)),
31
+ "histogram": lambda x: np.apply_along_axis(lambda y: np.histogram(y, 256, (0, 1))[0], 1, x.scaled),
32
+ "entropy": lambda x: entropy(x.get("histogram"), axis=1),
33
+ }
34
+
35
+
36
+ @dataclass(frozen=True)
37
+ class PixelStatsOutput(BaseStatsOutput):
38
+ """
39
+ Attributes
40
+ ----------
41
+ mean : NDArray[np.float16]
42
+ Mean of the pixel values of the images
43
+ std : NDArray[np.float16]
44
+ Standard deviation of the pixel values of the images
45
+ var : NDArray[np.float16]
46
+ Variance of the pixel values of the images
47
+ skew : NDArray[np.float16]
48
+ Skew of the pixel values of the images
49
+ kurtosis : NDArray[np.float16]
50
+ Kurtosis of the pixel values of the images
51
+ histogram : NDArray[np.uint32]
52
+ Histogram of the pixel values of the images across 256 bins scaled between 0 and 1
53
+ entropy : NDArray[np.float16]
54
+ Entropy of the pixel values of the images
55
+ """
56
+
57
+ mean: NDArray[np.float16]
58
+ std: NDArray[np.float16]
59
+ var: NDArray[np.float16]
60
+ skew: NDArray[np.float16]
61
+ kurtosis: NDArray[np.float16]
62
+ histogram: NDArray[np.uint32]
63
+ entropy: NDArray[np.float16]
64
+
65
+
66
+ @set_metadata("dataeval.metrics")
67
+ def pixelstats(
68
+ images: Iterable[ArrayLike],
69
+ bboxes: Iterable[ArrayLike] | None = None,
70
+ per_channel: bool = False,
71
+ ) -> PixelStatsOutput:
72
+ """
73
+ Calculates pixel statistics for each image
74
+
75
+ This function computes various statistical metrics (e.g., mean, standard deviation, entropy)
76
+ on the images as a whole.
77
+
78
+ Parameters
79
+ ----------
80
+ images : Iterable[ArrayLike]
81
+ Images to perform calculations on
82
+ bboxes : Iterable[ArrayLike] or None
83
+ Bounding boxes in `xyxy` format for each image to perform calculations
84
+
85
+ Returns
86
+ -------
87
+ PixelStatsOutput
88
+ A dictionary-like object containing the computed statistics for each image. The keys correspond
89
+ to the names of the statistics (e.g., 'mean', 'std'), and the values are lists of results for
90
+ each image or numpy arrays when the results are multi-dimensional.
91
+
92
+ See Also
93
+ --------
94
+ dimensionstats, visualstats, Outliers
95
+
96
+ Notes
97
+ -----
98
+ - All metrics are scaled based on the perceived bit depth (which is derived from the largest pixel value)
99
+ to allow for better comparison between images stored in different formats and different resolutions.
100
+
101
+ Examples
102
+ --------
103
+ Calculating the statistics on the images, whose shape is (C, H, W)
104
+
105
+ >>> results = pixelstats(images)
106
+ >>> print(results.mean)
107
+ [0.04828 0.562 0.06726 0.09937 0.1315 0.1636 0.1957 0.2278 0.26
108
+ 0.292 0.3242 0.3562 0.3884 0.4204 0.4526 0.4846 0.5166 0.549
109
+ 0.581 0.6133 0.6455 0.6772 0.7095 0.7417 0.774 0.8057 0.838
110
+ 0.87 0.9023 0.934 ]
111
+ >>> print(results.entropy)
112
+ [3.238 3.303 0.8125 1.028 0.8223 1.046 0.8247 1.041 0.8203 1.012
113
+ 0.812 0.9883 0.795 0.9243 0.9243 0.795 0.9907 0.8125 1.028 0.8223
114
+ 1.046 0.8247 1.041 0.8203 1.012 0.812 0.9883 0.795 0.9243 0.9243]
115
+ """
116
+ output = run_stats(images, bboxes, per_channel, PixelStatsProcessor, PixelStatsOutput)
117
+ return PixelStatsOutput(**output)
@@ -0,0 +1,122 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Iterable
5
+
6
+ import numpy as np
7
+ from numpy.typing import ArrayLike, NDArray
8
+
9
+ from dataeval._internal.metrics.stats.base import BaseStatsOutput, StatsProcessor, run_stats
10
+ from dataeval._internal.metrics.utils import edge_filter
11
+ from dataeval._internal.output import set_metadata
12
+
13
+ QUARTILES = (0, 25, 50, 75, 100)
14
+
15
+
16
+ class VisualStatsProcessor(StatsProcessor):
17
+ cache_keys = ["percentiles"]
18
+ image_function_map = {
19
+ "brightness": lambda x: x.get("percentiles")[-2],
20
+ "blurriness": lambda x: np.std(edge_filter(np.mean(x.image, axis=0))),
21
+ "contrast": lambda x: np.nan_to_num(
22
+ (np.max(x.get("percentiles")) - np.min(x.get("percentiles"))) / np.mean(x.get("percentiles"))
23
+ ),
24
+ "darkness": lambda x: x.get("percentiles")[1],
25
+ "missing": lambda x: np.sum(np.isnan(x.image)) / np.prod(x.shape[-2:]),
26
+ "zeros": lambda x: np.count_nonzero(x.image == 0) / np.prod(x.shape[-2:]),
27
+ "percentiles": lambda x: np.nanpercentile(x.scaled, q=QUARTILES),
28
+ }
29
+ channel_function_map = {
30
+ "brightness": lambda x: x.get("percentiles")[:, -2],
31
+ "blurriness": lambda x: np.std(np.vectorize(edge_filter, signature="(m,n)->(m,n)")(x.image), axis=(1, 2)),
32
+ "contrast": lambda x: np.nan_to_num(
33
+ (np.max(x.get("percentiles"), axis=1) - np.min(x.get("percentiles"), axis=1))
34
+ / np.mean(x.get("percentiles"), axis=1)
35
+ ),
36
+ "darkness": lambda x: x.get("percentiles")[:, 1],
37
+ "missing": lambda x: np.sum(np.isnan(x.image), axis=(1, 2)) / np.prod(x.shape[-2:]),
38
+ "zeros": lambda x: np.count_nonzero(x.image == 0, axis=(1, 2)) / np.prod(x.shape[-2:]),
39
+ "percentiles": lambda x: np.nanpercentile(x.scaled, q=QUARTILES, axis=1).T,
40
+ }
41
+
42
+
43
+ @dataclass(frozen=True)
44
+ class VisualStatsOutput(BaseStatsOutput):
45
+ """
46
+ Attributes
47
+ ----------
48
+ brightness : NDArray[np.float16]
49
+ Brightness of the images
50
+ blurriness : NDArray[np.float16]
51
+ Blurriness of the images
52
+ contrast : NDArray[np.float16]
53
+ Image contrast ratio
54
+ darkness : NDArray[np.float16]
55
+ Darkness of the images
56
+ missing : NDArray[np.float16]
57
+ Percentage of the images with missing pixels
58
+ zeros : NDArray[np.float16]
59
+ Percentage of the images with zero value pixels
60
+ percentiles : NDArray[np.float16]
61
+ Percentiles of the pixel values of the images with quartiles of (0, 25, 50, 75, 100)
62
+ """
63
+
64
+ brightness: NDArray[np.float16]
65
+ blurriness: NDArray[np.float16]
66
+ contrast: NDArray[np.float16]
67
+ darkness: NDArray[np.float16]
68
+ missing: NDArray[np.float16]
69
+ zeros: NDArray[np.float16]
70
+ percentiles: NDArray[np.float16]
71
+
72
+
73
+ @set_metadata("dataeval.metrics")
74
+ def visualstats(
75
+ images: Iterable[ArrayLike],
76
+ bboxes: Iterable[ArrayLike] | None = None,
77
+ per_channel: bool = False,
78
+ ) -> VisualStatsOutput:
79
+ """
80
+ Calculates visual statistics for each image
81
+
82
+ This function computes various visual metrics (e.g., brightness, darkness, contrast, blurriness)
83
+ on the images as a whole.
84
+
85
+ Parameters
86
+ ----------
87
+ images : Iterable[ArrayLike]
88
+ Images to perform calculations on
89
+ bboxes : Iterable[ArrayLike] or None
90
+ Bounding boxes in `xyxy` format for each image to perform calculations on
91
+
92
+ Returns
93
+ -------
94
+ VisualStatsOutput
95
+ A dictionary-like object containing the computed visual statistics for each image. The keys correspond
96
+ to the names of the statistics (e.g., 'brightness', 'blurriness'), and the values are lists of results for
97
+ each image or numpy arrays when the results are multi-dimensional.
98
+
99
+ See Also
100
+ --------
101
+ dimensionstats, pixelstats, Outliers
102
+
103
+ Notes
104
+ -----
105
+ - `zeros` and `missing` are presented as a percentage of total pixel counts
106
+
107
+ Examples
108
+ --------
109
+ Calculating the statistics on the images, whose shape is (C, H, W)
110
+
111
+ >>> results = visualstats(images)
112
+ >>> print(results.brightness)
113
+ [0.0737 0.607 0.0713 0.1046 0.138 0.1713 0.2046 0.2379 0.2712 0.3047
114
+ 0.338 0.3713 0.4045 0.438 0.4712 0.5044 0.538 0.5713 0.6045 0.638
115
+ 0.6714 0.7046 0.738 0.7715 0.8047 0.838 0.871 0.905 0.938 0.971 ]
116
+ >>> print(results.contrast)
117
+ [2.041 1.332 1.293 1.279 1.272 1.268 1.265 1.263 1.261 1.26 1.259 1.258
118
+ 1.258 1.257 1.257 1.256 1.256 1.255 1.255 1.255 1.255 1.254 1.254 1.254
119
+ 1.254 1.254 1.254 1.253 1.253 1.253]
120
+ """
121
+ output = run_stats(images, bboxes, per_channel, VisualStatsProcessor, VisualStatsOutput)
122
+ return VisualStatsOutput(**output)
@@ -9,7 +9,7 @@ from dataclasses import dataclass
9
9
  from numpy.typing import ArrayLike
10
10
  from sklearn.metrics import average_precision_score
11
11
 
12
- from dataeval._internal.interop import to_numpy
12
+ from dataeval._internal.interop import as_numpy
13
13
  from dataeval._internal.output import OutputMetadata, set_metadata
14
14
 
15
15
 
@@ -75,5 +75,5 @@ def uap(labels: ArrayLike, scores: ArrayLike) -> UAPOutput:
75
75
  UAPOutput(uap=0.7777777777777777)
76
76
  """
77
77
 
78
- precision = float(average_precision_score(to_numpy(labels), to_numpy(scores), average="weighted"))
78
+ precision = float(average_precision_score(as_numpy(labels), as_numpy(scores), average="weighted"))
79
79
  return UAPOutput(precision)
@@ -1,10 +1,10 @@
1
1
  from __future__ import annotations
2
2
 
3
- from typing import Any, Callable, Literal, NamedTuple, Sequence
3
+ from typing import Any, Callable, Literal, Mapping, NamedTuple
4
4
 
5
5
  import numpy as np
6
6
  import xxhash as xxh
7
- from numpy.typing import NDArray
7
+ from numpy.typing import ArrayLike, NDArray
8
8
  from PIL import Image
9
9
  from scipy.fftpack import dct
10
10
  from scipy.signal import convolve2d
@@ -14,6 +14,8 @@ from scipy.spatial.distance import pdist, squareform
14
14
  from scipy.stats import entropy as sp_entropy
15
15
  from sklearn.neighbors import NearestNeighbors
16
16
 
17
+ from dataeval._internal.interop import to_numpy
18
+
17
19
  EPSILON = 1e-5
18
20
  EDGE_KERNEL = np.array([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]], dtype=np.int8)
19
21
  BIT_DEPTH = (1, 8, 12, 16, 32)
@@ -162,26 +164,26 @@ def infer_categorical(X: NDArray, threshold: float = 0.2) -> NDArray:
162
164
 
163
165
 
164
166
  def preprocess_metadata(
165
- class_labels: Sequence[int], metadata: list[dict], cat_thresh: float = 0.2
167
+ class_labels: ArrayLike, metadata: Mapping[str, ArrayLike], cat_thresh: float = 0.2
166
168
  ) -> tuple[NDArray, list[str], list[bool]]:
167
- # convert class_labels and list of metadata dicts to dict of ndarrays
168
- metadata_dict: dict[str, NDArray] = {
169
- "class_label": np.asarray(class_labels, dtype=int),
170
- **{k: np.array([d[k] for d in metadata]) for k in metadata[0]},
171
- }
169
+ # convert class_labels and dict of lists to matrix of metadata values
170
+ preprocessed_metadata = {"class_label": np.asarray(class_labels, dtype=int)}
172
171
 
173
172
  # map columns of dict that are not numeric (e.g. string) to numeric values
174
173
  # that mutual information and diversity functions can accommodate. Each
175
174
  # unique string receives a unique integer value.
176
- for k, v in metadata_dict.items():
175
+ for k, v in metadata.items():
177
176
  # if not numeric
177
+ v = to_numpy(v)
178
178
  if not np.issubdtype(v.dtype, np.number):
179
179
  _, mapped_vals = np.unique(v, return_inverse=True)
180
- metadata_dict[k] = mapped_vals
180
+ preprocessed_metadata[k] = mapped_vals
181
+ else:
182
+ preprocessed_metadata[k] = v
181
183
 
182
- data = np.stack(list(metadata_dict.values()), axis=-1)
183
- names = list(metadata_dict.keys())
184
- is_categorical = [infer_categorical(metadata_dict[var], cat_thresh)[0] for var in names]
184
+ data = np.stack(list(preprocessed_metadata.values()), axis=-1)
185
+ names = list(preprocessed_metadata.keys())
186
+ is_categorical = [infer_categorical(preprocessed_metadata[var], cat_thresh)[0] for var in names]
185
187
 
186
188
  return data, names, is_categorical
187
189
 
@@ -350,6 +352,19 @@ def normalize_image_shape(image: NDArray) -> NDArray:
350
352
  raise ValueError("Images must have 2 or more dimensions.")
351
353
 
352
354
 
355
+ def normalize_box_shape(bounding_box: NDArray) -> NDArray:
356
+ """
357
+ Normalizes the bounding box shape into (N,4).
358
+ """
359
+ ndim = bounding_box.ndim
360
+ if ndim == 1:
361
+ return np.expand_dims(bounding_box, axis=0)
362
+ elif ndim > 2:
363
+ raise ValueError("Bounding boxes must have 2 dimensions: (# of boxes in an image, [X,Y,W,H]) -> (N,4)")
364
+ else:
365
+ return bounding_box
366
+
367
+
353
368
  def edge_filter(image: NDArray, offset: float = 0.5) -> NDArray:
354
369
  """
355
370
  Returns the image filtered using a 3x3 edge detection kernel:
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  import inspect
4
4
  from datetime import datetime, timezone
5
5
  from functools import wraps
6
+ from typing import Any
6
7
 
7
8
  import numpy as np
8
9
 
@@ -17,10 +18,10 @@ class OutputMetadata:
17
18
  _state: dict[str, str]
18
19
  _version: str
19
20
 
20
- def dict(self) -> dict:
21
+ def dict(self) -> dict[str, Any]:
21
22
  return {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
22
23
 
23
- def meta(self) -> dict:
24
+ def meta(self) -> dict[str, Any]:
24
25
  return {k.removeprefix("_"): v for k, v in self.__dict__.items() if k.startswith("_")}
25
26
 
26
27
 
@@ -67,19 +68,3 @@ def set_metadata(module_name: str = "", state_attr: list[str] | None = None):
67
68
  return wrapper
68
69
 
69
70
  return decorator
70
-
71
-
72
- def populate_defaults(d: dict, c: type) -> dict:
73
- def default(t):
74
- t = (
75
- t if isinstance(t, str) else t._name if hasattr(t, "_name") else t.__name__
76
- ).lower() # py3.9 : _name, py3.10 : __name__
77
- if t.startswith("dict"):
78
- return {}
79
- if t.startswith("list"):
80
- return []
81
- if t.startswith("ndarray"):
82
- return np.array([])
83
- raise TypeError("Unrecognized annotation type")
84
-
85
- return {k: d[k] if k in d else default(t) for k, t in c.__annotations__.items()}