ocf-data-sampler 0.2.38__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ocf-data-sampler might be problematic. Click here for more details.
- ocf_data_sampler/config/model.py +33 -4
- ocf_data_sampler/load/load_dataset.py +1 -1
- ocf_data_sampler/load/nwp/providers/cloudcasting.py +1 -1
- ocf_data_sampler/load/nwp/providers/ecmwf.py +1 -1
- ocf_data_sampler/load/nwp/providers/gfs.py +6 -1
- ocf_data_sampler/load/nwp/providers/icon.py +1 -1
- ocf_data_sampler/load/nwp/providers/ukv.py +1 -1
- ocf_data_sampler/load/nwp/providers/utils.py +43 -3
- ocf_data_sampler/load/open_tensorstore_zarrs.py +92 -0
- ocf_data_sampler/load/satellite.py +6 -40
- ocf_data_sampler/load/utils.py +1 -1
- ocf_data_sampler/select/dropout.py +33 -19
- ocf_data_sampler/torch_datasets/datasets/pvnet_uk.py +2 -0
- {ocf_data_sampler-0.2.38.dist-info → ocf_data_sampler-0.3.1.dist-info}/METADATA +2 -1
- {ocf_data_sampler-0.2.38.dist-info → ocf_data_sampler-0.3.1.dist-info}/RECORD +17 -16
- {ocf_data_sampler-0.2.38.dist-info → ocf_data_sampler-0.3.1.dist-info}/WHEEL +0 -0
- {ocf_data_sampler-0.2.38.dist-info → ocf_data_sampler-0.3.1.dist-info}/top_level.txt +0 -0
ocf_data_sampler/config/model.py
CHANGED
|
@@ -90,11 +90,10 @@ class DropoutMixin(Base):
|
|
|
90
90
|
"negative or zero.",
|
|
91
91
|
)
|
|
92
92
|
|
|
93
|
-
dropout_fraction: float = Field(
|
|
93
|
+
dropout_fraction: float|list[float] = Field(
|
|
94
94
|
default=0,
|
|
95
|
-
description="Chance of dropout being applied to each sample"
|
|
96
|
-
|
|
97
|
-
le=1,
|
|
95
|
+
description="Either a float(Chance of dropout being applied to each sample) or a list of "
|
|
96
|
+
"floats (probability that dropout of the corresponding timedelta is applied)",
|
|
98
97
|
)
|
|
99
98
|
|
|
100
99
|
@field_validator("dropout_timedeltas_minutes")
|
|
@@ -105,6 +104,36 @@ class DropoutMixin(Base):
|
|
|
105
104
|
raise ValueError("Dropout timedeltas must be negative")
|
|
106
105
|
return v
|
|
107
106
|
|
|
107
|
+
|
|
108
|
+
@field_validator("dropout_fraction")
|
|
109
|
+
def dropout_fractions(cls, dropout_frac: float|list[float]) -> float|list[float]:
|
|
110
|
+
"""Validate 'dropout_frac'."""
|
|
111
|
+
from math import isclose
|
|
112
|
+
if isinstance(dropout_frac, float):
|
|
113
|
+
if not (dropout_frac <= 1):
|
|
114
|
+
raise ValueError("Input should be less than or equal to 1")
|
|
115
|
+
elif not (dropout_frac >= 0):
|
|
116
|
+
raise ValueError("Input should be greater than or equal to 0")
|
|
117
|
+
|
|
118
|
+
elif isinstance(dropout_frac, list):
|
|
119
|
+
if not dropout_frac:
|
|
120
|
+
raise ValueError("List cannot be empty")
|
|
121
|
+
|
|
122
|
+
if not all(isinstance(i, float) for i in dropout_frac):
|
|
123
|
+
raise ValueError("All elements in the list must be floats")
|
|
124
|
+
|
|
125
|
+
if not all(0 <= i <= 1 for i in dropout_frac):
|
|
126
|
+
raise ValueError("Each float in the list must be between 0 and 1")
|
|
127
|
+
|
|
128
|
+
if not isclose(sum(dropout_frac), 1.0, rel_tol=1e-9):
|
|
129
|
+
raise ValueError("Sum of all floats in the list must be 1.0")
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
else:
|
|
133
|
+
raise TypeError("Must be either a float or a list of floats")
|
|
134
|
+
return dropout_frac
|
|
135
|
+
|
|
136
|
+
|
|
108
137
|
@model_validator(mode="after")
|
|
109
138
|
def dropout_instructions_consistent(self) -> "DropoutMixin":
|
|
110
139
|
"""Validator for dropout instructions."""
|
|
@@ -28,7 +28,7 @@ def open_cloudcasting(zarr_path: str | list[str]) -> xr.DataArray:
|
|
|
28
28
|
[3] https://github.com/openclimatefix/sat_pred
|
|
29
29
|
"""
|
|
30
30
|
# Open the data
|
|
31
|
-
ds = open_zarr_paths(zarr_path)
|
|
31
|
+
ds = open_zarr_paths(zarr_path, backend="tensorstore")
|
|
32
32
|
|
|
33
33
|
# Rename
|
|
34
34
|
ds = ds.rename(
|
|
@@ -19,7 +19,7 @@ def open_ifs(zarr_path: str | list[str]) -> xr.DataArray:
|
|
|
19
19
|
Returns:
|
|
20
20
|
Xarray DataArray of the NWP data
|
|
21
21
|
"""
|
|
22
|
-
ds = open_zarr_paths(zarr_path)
|
|
22
|
+
ds = open_zarr_paths(zarr_path, backend="tensorstore")
|
|
23
23
|
|
|
24
24
|
# LEGACY SUPPORT - rename variable to channel if it exists
|
|
25
25
|
ds = ds.rename({"init_time": "init_time_utc", "variable": "channel"})
|
|
@@ -23,7 +23,12 @@ def open_gfs(zarr_path: str | list[str], public: bool = False) -> xr.DataArray:
|
|
|
23
23
|
_log.info("Loading NWP GFS data")
|
|
24
24
|
|
|
25
25
|
# Open data
|
|
26
|
-
gfs: xr.Dataset = open_zarr_paths(
|
|
26
|
+
gfs: xr.Dataset = open_zarr_paths(
|
|
27
|
+
zarr_path,
|
|
28
|
+
time_dim="init_time_utc",
|
|
29
|
+
public=public,
|
|
30
|
+
backend="dask",
|
|
31
|
+
)
|
|
27
32
|
nwp: xr.DataArray = gfs.to_array(dim="channel")
|
|
28
33
|
|
|
29
34
|
del gfs
|
|
@@ -20,7 +20,7 @@ def open_icon_eu(zarr_path: str | list[str]) -> xr.DataArray:
|
|
|
20
20
|
Xarray DataArray of the NWP data
|
|
21
21
|
"""
|
|
22
22
|
# Open and check initially
|
|
23
|
-
ds = open_zarr_paths(zarr_path, time_dim="init_time_utc")
|
|
23
|
+
ds = open_zarr_paths(zarr_path, time_dim="init_time_utc", backend="dask")
|
|
24
24
|
|
|
25
25
|
if "icon_eu_data" in ds.data_vars:
|
|
26
26
|
nwp = ds["icon_eu_data"]
|
|
@@ -1,21 +1,48 @@
|
|
|
1
1
|
"""Utility functions for the NWP data processing."""
|
|
2
2
|
|
|
3
|
+
from glob import glob
|
|
4
|
+
|
|
3
5
|
import xarray as xr
|
|
6
|
+
from xarray_tensorstore import open_zarr
|
|
7
|
+
|
|
8
|
+
from ocf_data_sampler.load.open_tensorstore_zarrs import open_zarrs
|
|
4
9
|
|
|
5
10
|
|
|
6
11
|
def open_zarr_paths(
|
|
7
|
-
zarr_path: str | list[str],
|
|
12
|
+
zarr_path: str | list[str],
|
|
13
|
+
time_dim: str = "init_time",
|
|
14
|
+
public: bool = False,
|
|
15
|
+
backend: str = "dask",
|
|
8
16
|
) -> xr.Dataset:
|
|
9
17
|
"""Opens the NWP data.
|
|
10
18
|
|
|
11
19
|
Args:
|
|
12
20
|
zarr_path: Path to the zarr(s) to open
|
|
13
21
|
time_dim: Name of the time dimension
|
|
14
|
-
public: Whether the data is public or private
|
|
22
|
+
public: Whether the data is public or private. Only available for the dask backend.
|
|
23
|
+
backend: The xarray backend to use.
|
|
15
24
|
|
|
16
25
|
Returns:
|
|
17
26
|
The opened Xarray Dataset
|
|
18
27
|
"""
|
|
28
|
+
if backend not in ["dask", "tensorstore"]:
|
|
29
|
+
raise ValueError(
|
|
30
|
+
f"Unsupported backend: {backend}. Supported backends are 'dask' and 'tensorstore'.",
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
if public and backend == "tensorstore":
|
|
34
|
+
raise ValueError("Public data is only supported with the 'dask' backend.")
|
|
35
|
+
|
|
36
|
+
if backend == "tensorstore":
|
|
37
|
+
ds = _tensostore_open_zarr_paths(zarr_path, time_dim)
|
|
38
|
+
|
|
39
|
+
elif backend == "dask":
|
|
40
|
+
ds = _dask_open_zarr_paths(zarr_path, time_dim, public)
|
|
41
|
+
|
|
42
|
+
return ds
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _dask_open_zarr_paths(zarr_path: str | list[str], time_dim: str, public: bool) -> xr.Dataset:
|
|
19
46
|
general_kwargs = {
|
|
20
47
|
"engine": "zarr",
|
|
21
48
|
"chunks": "auto",
|
|
@@ -26,7 +53,7 @@ def open_zarr_paths(
|
|
|
26
53
|
# note this only works for s3 zarr paths at the moment
|
|
27
54
|
general_kwargs["storage_options"] = {"anon": True}
|
|
28
55
|
|
|
29
|
-
if
|
|
56
|
+
if isinstance(zarr_path, list | tuple) or "*" in str(zarr_path): # Multi-file dataset
|
|
30
57
|
ds = xr.open_mfdataset(
|
|
31
58
|
zarr_path,
|
|
32
59
|
concat_dim=time_dim,
|
|
@@ -41,3 +68,16 @@ def open_zarr_paths(
|
|
|
41
68
|
**general_kwargs,
|
|
42
69
|
)
|
|
43
70
|
return ds
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _tensostore_open_zarr_paths(zarr_path: str | list[str], time_dim: str) -> xr.Dataset:
|
|
74
|
+
|
|
75
|
+
if "*" in str(zarr_path):
|
|
76
|
+
zarr_path = sorted(glob(zarr_path))
|
|
77
|
+
|
|
78
|
+
if isinstance(zarr_path, list | tuple):
|
|
79
|
+
ds = open_zarrs(zarr_path, concat_dim=time_dim).sortby(time_dim)
|
|
80
|
+
else:
|
|
81
|
+
ds = open_zarr(zarr_path)
|
|
82
|
+
return ds
|
|
83
|
+
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"""Open multiple zarrs with TensorStore.
|
|
2
|
+
|
|
3
|
+
This extendds the functionality of xarray_tensorstore to open multiple zarr stores
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
|
|
8
|
+
import tensorstore as ts
|
|
9
|
+
import xarray as xr
|
|
10
|
+
from xarray_tensorstore import (
|
|
11
|
+
_raise_if_mask_and_scale_used_for_data_vars,
|
|
12
|
+
_TensorStoreAdapter,
|
|
13
|
+
_zarr_spec_from_path,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def tensorstore_open_multi_zarrs(
|
|
18
|
+
paths: list[str],
|
|
19
|
+
data_vars: list[str],
|
|
20
|
+
concat_axes: list[int],
|
|
21
|
+
context: ts.Context,
|
|
22
|
+
write: bool,
|
|
23
|
+
) -> dict[str, ts.TensorStore]:
|
|
24
|
+
"""Open multiple zarrs with TensorStore.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
paths: List of paths to zarr stores.
|
|
28
|
+
data_vars: List of data variable names to open.
|
|
29
|
+
concat_axes: List of axes along which to concatenate the data variables.
|
|
30
|
+
context: TensorStore context.
|
|
31
|
+
write: Whether to open the stores for writing.
|
|
32
|
+
"""
|
|
33
|
+
arrays_list = []
|
|
34
|
+
for path in paths:
|
|
35
|
+
specs = {k: _zarr_spec_from_path(os.path.join(path, k)) for k in data_vars}
|
|
36
|
+
array_futures = {
|
|
37
|
+
k: ts.open(spec, read=True, write=write, context=context)
|
|
38
|
+
for k, spec in specs.items()
|
|
39
|
+
}
|
|
40
|
+
arrays_list.append({k: v.result() for k, v in array_futures.items()})
|
|
41
|
+
|
|
42
|
+
arrays = {}
|
|
43
|
+
for k, axis in zip(data_vars, concat_axes, strict=False):
|
|
44
|
+
datasets = [d[k] for d in arrays_list]
|
|
45
|
+
arrays[k] = ts.concat(datasets, axis=axis)
|
|
46
|
+
|
|
47
|
+
return arrays
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def open_zarrs(
|
|
51
|
+
paths: list[str],
|
|
52
|
+
concat_dim: str,
|
|
53
|
+
*,
|
|
54
|
+
context: ts.Context | None = None,
|
|
55
|
+
mask_and_scale: bool = True,
|
|
56
|
+
write: bool = False,
|
|
57
|
+
) -> xr.Dataset:
|
|
58
|
+
"""Open multiple zarrs with TensorStore.
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
paths: List of paths to zarr stores.
|
|
62
|
+
concat_dim: Dimension along which to concatenate the data variables.
|
|
63
|
+
context: TensorStore context.
|
|
64
|
+
mask_and_scale: Whether to mask and scale the data.
|
|
65
|
+
write: Whether to open the stores for writing.
|
|
66
|
+
"""
|
|
67
|
+
if context is None:
|
|
68
|
+
context = ts.Context()
|
|
69
|
+
|
|
70
|
+
ds = xr.open_mfdataset(
|
|
71
|
+
paths,
|
|
72
|
+
concat_dim=concat_dim,
|
|
73
|
+
combine="nested",
|
|
74
|
+
mask_and_scale=mask_and_scale,
|
|
75
|
+
decode_timedelta=True,
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
if mask_and_scale:
|
|
79
|
+
# Data variables get replaced below with _TensorStoreAdapter arrays, which
|
|
80
|
+
# don't get masked or scaled. Raising an error avoids surprising users with
|
|
81
|
+
# incorrect data values.
|
|
82
|
+
_raise_if_mask_and_scale_used_for_data_vars(ds)
|
|
83
|
+
|
|
84
|
+
data_vars = list(ds.data_vars)
|
|
85
|
+
|
|
86
|
+
concat_axes = [ds[v].dims.index(concat_dim) for v in data_vars]
|
|
87
|
+
|
|
88
|
+
arrays = tensorstore_open_multi_zarrs(paths, data_vars, concat_axes, context, write)
|
|
89
|
+
|
|
90
|
+
new_data = {k: _TensorStoreAdapter(v) for k, v in arrays.items()}
|
|
91
|
+
|
|
92
|
+
return ds.copy(data=new_data)
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
"""Satellite loader."""
|
|
2
2
|
import numpy as np
|
|
3
3
|
import xarray as xr
|
|
4
|
+
from xarray_tensorstore import open_zarr
|
|
4
5
|
|
|
5
6
|
from ocf_data_sampler.load.utils import (
|
|
6
7
|
check_time_unique_increasing,
|
|
@@ -8,39 +9,7 @@ from ocf_data_sampler.load.utils import (
|
|
|
8
9
|
make_spatial_coords_increasing,
|
|
9
10
|
)
|
|
10
11
|
|
|
11
|
-
|
|
12
|
-
def get_single_sat_data(zarr_path: str) -> xr.Dataset:
|
|
13
|
-
"""Helper function to open a zarr from either a local or GCP path.
|
|
14
|
-
|
|
15
|
-
Args:
|
|
16
|
-
zarr_path: path to a zarr file. Wildcards (*) are supported only for local paths
|
|
17
|
-
GCS paths (gs://) do not support wildcards
|
|
18
|
-
|
|
19
|
-
Returns:
|
|
20
|
-
An xarray Dataset containing satellite data
|
|
21
|
-
|
|
22
|
-
Raises:
|
|
23
|
-
ValueError: If a wildcard (*) is used in a GCS (gs://) path
|
|
24
|
-
"""
|
|
25
|
-
# Raise an error if a wildcard is used in a GCP path
|
|
26
|
-
if "gs://" in str(zarr_path) and "*" in str(zarr_path):
|
|
27
|
-
raise ValueError("Wildcard (*) paths are not supported for GCP (gs://) URLs")
|
|
28
|
-
|
|
29
|
-
# Handle multi-file dataset for local paths
|
|
30
|
-
if "*" in str(zarr_path):
|
|
31
|
-
ds = xr.open_mfdataset(
|
|
32
|
-
zarr_path,
|
|
33
|
-
engine="zarr",
|
|
34
|
-
concat_dim="time",
|
|
35
|
-
combine="nested",
|
|
36
|
-
chunks="auto",
|
|
37
|
-
join="override",
|
|
38
|
-
)
|
|
39
|
-
check_time_unique_increasing(ds.time)
|
|
40
|
-
else:
|
|
41
|
-
ds = xr.open_dataset(zarr_path, engine="zarr", chunks="auto")
|
|
42
|
-
|
|
43
|
-
return ds
|
|
12
|
+
from .open_tensorstore_zarrs import open_zarrs
|
|
44
13
|
|
|
45
14
|
|
|
46
15
|
def open_sat_data(zarr_path: str | list[str]) -> xr.DataArray:
|
|
@@ -52,14 +21,11 @@ def open_sat_data(zarr_path: str | list[str]) -> xr.DataArray:
|
|
|
52
21
|
"""
|
|
53
22
|
# Open the data
|
|
54
23
|
if isinstance(zarr_path, list | tuple):
|
|
55
|
-
ds =
|
|
56
|
-
[get_single_sat_data(path) for path in zarr_path],
|
|
57
|
-
concat_dim="time",
|
|
58
|
-
combine_attrs="override",
|
|
59
|
-
join="override",
|
|
60
|
-
)
|
|
24
|
+
ds = open_zarrs(zarr_path, concat_dim="time")
|
|
61
25
|
else:
|
|
62
|
-
ds =
|
|
26
|
+
ds = open_zarr(zarr_path)
|
|
27
|
+
|
|
28
|
+
check_time_unique_increasing(ds.time)
|
|
63
29
|
|
|
64
30
|
ds = ds.rename(
|
|
65
31
|
{
|
ocf_data_sampler/load/utils.py
CHANGED
|
@@ -47,7 +47,7 @@ def get_xr_data_array_from_xr_dataset(ds: xr.Dataset) -> xr.DataArray:
|
|
|
47
47
|
Args:
|
|
48
48
|
ds: xr.Dataset to extract xr.DataArray from
|
|
49
49
|
"""
|
|
50
|
-
datavars = list(ds.
|
|
50
|
+
datavars = list(ds.data_vars)
|
|
51
51
|
if len(datavars) != 1:
|
|
52
52
|
raise ValueError("Cannot open as xr.DataArray: dataset contains multiple variables")
|
|
53
53
|
return ds[datavars[0]]
|
|
@@ -12,7 +12,7 @@ import xarray as xr
|
|
|
12
12
|
def apply_sampled_dropout_time(
|
|
13
13
|
t0: pd.Timestamp,
|
|
14
14
|
dropout_timedeltas: list[pd.Timedelta],
|
|
15
|
-
dropout_frac: float,
|
|
15
|
+
dropout_frac: float|list[float],
|
|
16
16
|
da: xr.DataArray,
|
|
17
17
|
) -> xr.DataArray:
|
|
18
18
|
"""Randomly pick a dropout time from a list of timedeltas and apply dropout time to the data.
|
|
@@ -20,28 +20,42 @@ def apply_sampled_dropout_time(
|
|
|
20
20
|
Args:
|
|
21
21
|
t0: The forecast init-time
|
|
22
22
|
dropout_timedeltas: List of timedeltas relative to t0 to pick from
|
|
23
|
-
dropout_frac:
|
|
24
|
-
This should be between 0 and 1 inclusive
|
|
23
|
+
dropout_frac: Either a probability that dropout will be applied.
|
|
24
|
+
This should be between 0 and 1 inclusive.
|
|
25
|
+
Or a list of probabilities for each of the corresponding timedeltas
|
|
25
26
|
da: Xarray DataArray with 'time_utc' coordinate
|
|
26
27
|
"""
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
28
|
+
if isinstance(dropout_frac, list):
|
|
29
|
+
# checking if len match
|
|
30
|
+
if len(dropout_frac) != len(dropout_timedeltas):
|
|
31
|
+
raise ValueError("Lengths of dropout_frac and dropout_timedeltas should match")
|
|
30
32
|
|
|
31
|
-
for t in dropout_timedeltas:
|
|
32
|
-
if t > pd.Timedelta("0min"):
|
|
33
|
-
raise ValueError("Dropout timedeltas must be negative")
|
|
34
33
|
|
|
35
|
-
if not (0 <= dropout_frac <= 1):
|
|
36
|
-
raise ValueError("dropout_frac must be between 0 and 1 inclusive")
|
|
37
34
|
|
|
38
|
-
|
|
39
|
-
dropout_time =
|
|
35
|
+
|
|
36
|
+
dropout_time = t0 + np.random.choice(dropout_timedeltas,p=dropout_frac)
|
|
37
|
+
|
|
38
|
+
return da.where(da.time_utc <= dropout_time)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
# old logic
|
|
40
43
|
else:
|
|
41
|
-
|
|
44
|
+
# sample dropout time
|
|
45
|
+
if dropout_frac > 0 and len(dropout_timedeltas) == 0:
|
|
46
|
+
raise ValueError("To apply dropout, dropout_timedeltas must be provided")
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
if not (0 <= dropout_frac <= 1):
|
|
50
|
+
raise ValueError("dropout_frac must be between 0 and 1 inclusive")
|
|
51
|
+
|
|
52
|
+
if (len(dropout_timedeltas) == 0) or (np.random.uniform() >= dropout_frac):
|
|
53
|
+
dropout_time = None
|
|
54
|
+
else:
|
|
55
|
+
dropout_time = t0 + np.random.choice(dropout_timedeltas)
|
|
42
56
|
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
57
|
+
# apply dropout time
|
|
58
|
+
if dropout_time is None:
|
|
59
|
+
return da
|
|
60
|
+
# This replaces the times after the dropout with NaNs
|
|
61
|
+
return da.where(da.time_utc <= dropout_time)
|
|
@@ -270,6 +270,8 @@ class PVNetUKRegionalDataset(AbstractPVNetUKDataset):
|
|
|
270
270
|
def __getitem__(self, idx: int) -> NumpySample:
|
|
271
271
|
# Get the coordinates of the sample
|
|
272
272
|
|
|
273
|
+
idx = int(idx)
|
|
274
|
+
|
|
273
275
|
if idx >= len(self):
|
|
274
276
|
raise ValueError(f"Index {idx} out of range for dataset of length {len(self)}")
|
|
275
277
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ocf-data-sampler
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.3.1
|
|
4
4
|
Author: James Fulton, Peter Dudfield
|
|
5
5
|
Author-email: Open Climate Fix team <info@openclimatefix.org>
|
|
6
6
|
License: MIT License
|
|
@@ -44,6 +44,7 @@ Requires-Dist: pyproj
|
|
|
44
44
|
Requires-Dist: pyaml_env
|
|
45
45
|
Requires-Dist: pyresample
|
|
46
46
|
Requires-Dist: h5netcdf
|
|
47
|
+
Requires-Dist: xarray-tensorstore==0.1.5
|
|
47
48
|
|
|
48
49
|
# ocf-data-sampler
|
|
49
50
|
|
|
@@ -2,25 +2,26 @@ ocf_data_sampler/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,
|
|
|
2
2
|
ocf_data_sampler/utils.py,sha256=2NEl70ySdTpr0pbLRk4LGklvXe1Nv1hun9XKcDw7-44,610
|
|
3
3
|
ocf_data_sampler/config/__init__.py,sha256=O29mbH0XG2gIY1g3BaveGCnpBO2SFqdu-qzJ7a6evl0,223
|
|
4
4
|
ocf_data_sampler/config/load.py,sha256=LL-7wemI8o4KPkx35j-wQ3HjsMvDgqXr7G46IcASfnU,632
|
|
5
|
-
ocf_data_sampler/config/model.py,sha256=
|
|
5
|
+
ocf_data_sampler/config/model.py,sha256=Jss8UDJAaQIBDr9megX2pERoT0ocFmwLNFC8pCWN6VA,12386
|
|
6
6
|
ocf_data_sampler/config/save.py,sha256=m8SPw5rXjkMm1rByjh3pK5StdBi4e8ysnn3jQopdRaI,1064
|
|
7
7
|
ocf_data_sampler/data/uk_gsp_locations_20220314.csv,sha256=RSh7DRh55E3n8lVAaWXGTaXXHevZZtI58td4d4DhGos,10415772
|
|
8
8
|
ocf_data_sampler/data/uk_gsp_locations_20250109.csv,sha256=XZISFatnbpO9j8LwaxNKFzQSjs6hcHFsV8a9uDDpy2E,9055334
|
|
9
9
|
ocf_data_sampler/load/__init__.py,sha256=-vQP9g0UOWdVbjEGyVX_ipa7R1btmiETIKAf6aw4d78,201
|
|
10
10
|
ocf_data_sampler/load/gsp.py,sha256=IrTA6z9quN08imKGHJLf8gRktarxn1-utNMNFD0zWQs,2944
|
|
11
|
-
ocf_data_sampler/load/load_dataset.py,sha256=
|
|
12
|
-
ocf_data_sampler/load/
|
|
11
|
+
ocf_data_sampler/load/load_dataset.py,sha256=K8rWykjII-3g127If7WRRFivzHNx3SshCvZj4uQlf28,2089
|
|
12
|
+
ocf_data_sampler/load/open_tensorstore_zarrs.py,sha256=_RHWe0GmrBSA9s1TH5I9VCMPpeZEsuRuhDt5Vyyx5Fo,2725
|
|
13
|
+
ocf_data_sampler/load/satellite.py,sha256=RylkJz8avxdM5pK_liaTlD1DTboyPMgykXJ4_Ek9WBA,1840
|
|
13
14
|
ocf_data_sampler/load/site.py,sha256=WtOy20VMHJIY0IwEemCdcecSDUGcVaLUown-4ixJw90,2147
|
|
14
|
-
ocf_data_sampler/load/utils.py,sha256=
|
|
15
|
+
ocf_data_sampler/load/utils.py,sha256=AGL0aOOQPrgqNBTjlBtR7Qg1PyQov3DFJo-y198u8pY,2044
|
|
15
16
|
ocf_data_sampler/load/nwp/__init__.py,sha256=SmcrnbygO5xtCKmGR4wtHrj-HI7nOAvnAtfuvRufBGQ,25
|
|
16
17
|
ocf_data_sampler/load/nwp/nwp.py,sha256=0E9shei3Mq1N7F-fBlEKY5Hm0_kI7ysY_rffnWIshvk,3612
|
|
17
18
|
ocf_data_sampler/load/nwp/providers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
18
|
-
ocf_data_sampler/load/nwp/providers/cloudcasting.py,sha256=
|
|
19
|
-
ocf_data_sampler/load/nwp/providers/ecmwf.py,sha256=
|
|
20
|
-
ocf_data_sampler/load/nwp/providers/gfs.py,sha256=
|
|
21
|
-
ocf_data_sampler/load/nwp/providers/icon.py,sha256=
|
|
22
|
-
ocf_data_sampler/load/nwp/providers/ukv.py,sha256=
|
|
23
|
-
ocf_data_sampler/load/nwp/providers/utils.py,sha256=
|
|
19
|
+
ocf_data_sampler/load/nwp/providers/cloudcasting.py,sha256=EbhJO1QNtTtumuyh3I7HQa7xZ50WeD-SQe7AsMxZC84,1544
|
|
20
|
+
ocf_data_sampler/load/nwp/providers/ecmwf.py,sha256=P7JqfssmQq8eHKKXaBexsxts325AnK-JF3bl5ymVCjY,1020
|
|
21
|
+
ocf_data_sampler/load/nwp/providers/gfs.py,sha256=h6vm-Rfz1JGOE4P_fP1_XQJ3bugNbeNAIyt56N8B1Dc,1066
|
|
22
|
+
ocf_data_sampler/load/nwp/providers/icon.py,sha256=iVZwLKRr_D74_kAu5MHir6pRKEfbTmIxFRZAxzmiYdI,1257
|
|
23
|
+
ocf_data_sampler/load/nwp/providers/ukv.py,sha256=2i32VM9gnmWUpbL0qBSp_AKzuyKucXZPS8yklbcGlbc,1039
|
|
24
|
+
ocf_data_sampler/load/nwp/providers/utils.py,sha256=cVwCiC8FqNpkZFSUGv1CRqIQlKdjx1sIsb2SIUlvWV8,2333
|
|
24
25
|
ocf_data_sampler/numpy_sample/__init__.py,sha256=nY5C6CcuxiWZ_jrXRzWtN7WyKXhJImSiVTIG6Rz4B_4,401
|
|
25
26
|
ocf_data_sampler/numpy_sample/collate.py,sha256=hoxIc5SoHoIs3Nx37aRZzWChpswjy9lHUgaKgHIoo80,2039
|
|
26
27
|
ocf_data_sampler/numpy_sample/common_types.py,sha256=9CjYHkUTx0ObduWh43fhsybZCTXvexql7qC2ptMDoek,377
|
|
@@ -31,7 +32,7 @@ ocf_data_sampler/numpy_sample/satellite.py,sha256=RaYzYIcB1AmDrKeiqSpn4QVfBH-QMe
|
|
|
31
32
|
ocf_data_sampler/numpy_sample/site.py,sha256=zfYBjK3CJrIaKH1QdKXU7gwOxTqONt527y3nJ9TRnwc,1325
|
|
32
33
|
ocf_data_sampler/numpy_sample/sun_position.py,sha256=5tt-zNm6aRuZMsxZPaAxyg7HeikswfZCeHWXTHuO2K0,1555
|
|
33
34
|
ocf_data_sampler/select/__init__.py,sha256=mK7Wu_-j9IXGTYrOuDf5yDDuU5a306b0iGKTAooNg_s,210
|
|
34
|
-
ocf_data_sampler/select/dropout.py,sha256=
|
|
35
|
+
ocf_data_sampler/select/dropout.py,sha256=BYpv8L771faPOyN7SdIJ5cwkpDve-ohClj95jjsHmjg,1973
|
|
35
36
|
ocf_data_sampler/select/fill_time_periods.py,sha256=TlGxp1xiAqnhdWfLy0pv3FuZc00dtimjWdLzr4JoTGA,865
|
|
36
37
|
ocf_data_sampler/select/find_contiguous_time_periods.py,sha256=etkr6LuB7zxkfzWJ6SgHiULdRuFzFlq5bOUNd257Qx4,11545
|
|
37
38
|
ocf_data_sampler/select/geospatial.py,sha256=CDExkl36eZOKmdJPzUr_K0Wn3axHqv5nYo-EkSiINcc,5032
|
|
@@ -39,7 +40,7 @@ ocf_data_sampler/select/location.py,sha256=AZvGR8y62opiW7zACGXjoOtBEWRfSLOZIA73O
|
|
|
39
40
|
ocf_data_sampler/select/select_spatial_slice.py,sha256=Hd4jGRUfIZRoWCirOQZeoLpaUnStB6KyFSTPX69wZLw,8790
|
|
40
41
|
ocf_data_sampler/select/select_time_slice.py,sha256=HeHbwZ0CP03x0-LaJtpbSdtpLufwVTR73p6wH6O_PS8,5513
|
|
41
42
|
ocf_data_sampler/torch_datasets/datasets/__init__.py,sha256=jfJSFcR0eO1AqeH7S3KnGjsBqVZT5w3oyi784PUR6Q0,146
|
|
42
|
-
ocf_data_sampler/torch_datasets/datasets/pvnet_uk.py,sha256=
|
|
43
|
+
ocf_data_sampler/torch_datasets/datasets/pvnet_uk.py,sha256=v63goKEMI6UgBPnQCnIbxhFFdwuP_sxgcPYY6iNfGkc,12257
|
|
43
44
|
ocf_data_sampler/torch_datasets/datasets/site.py,sha256=R9sYZz3e1zr8NAtlYQp8_DgI3wIfC7Zvaeo_73TyiA8,24936
|
|
44
45
|
ocf_data_sampler/torch_datasets/sample/__init__.py,sha256=GL84vdZl_SjHDGVyh9Uekx2XhPYuZ0dnO3l6f6KXnHI,100
|
|
45
46
|
ocf_data_sampler/torch_datasets/sample/base.py,sha256=cQ1oIyhdmlotejZK8B3Cw6MNvpdnBPD8G_o2h7Ye4Vc,2206
|
|
@@ -55,7 +56,7 @@ ocf_data_sampler/torch_datasets/utils/validation_utils.py,sha256=YqmT-lExWlI8_ul
|
|
|
55
56
|
scripts/download_gsp_location_data.py,sha256=rRDXMoqX-RYY4jPdxhdlxJGhWdl6r245F5UARgKV6P4,3121
|
|
56
57
|
scripts/refactor_site.py,sha256=skzvsPP0Cn9yTKndzkilyNcGz4DZ88ctvCJ0XrBdc2A,3135
|
|
57
58
|
utils/compute_icon_mean_stddev.py,sha256=a1oWMRMnny39rV-dvu8rcx85sb4bXzPFrR1gkUr4Jpg,2296
|
|
58
|
-
ocf_data_sampler-0.
|
|
59
|
-
ocf_data_sampler-0.
|
|
60
|
-
ocf_data_sampler-0.
|
|
61
|
-
ocf_data_sampler-0.
|
|
59
|
+
ocf_data_sampler-0.3.1.dist-info/METADATA,sha256=pQpPqmpTlUiZnPY1Q_xZr1Z-GrKSATG_P77YYHpWm6Y,12224
|
|
60
|
+
ocf_data_sampler-0.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
61
|
+
ocf_data_sampler-0.3.1.dist-info/top_level.txt,sha256=LEFU4Uk-PEo72QGLAfnVZIUEm37Q8mKuMeg_Xk-p33g,31
|
|
62
|
+
ocf_data_sampler-0.3.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|