rslearn 0.0.12__py3-none-any.whl → 0.0.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rslearn/config/dataset.py +23 -4
- rslearn/data_sources/planetary_computer.py +52 -0
- rslearn/dataset/handler_summaries.py +1 -0
- rslearn/dataset/manage.py +16 -2
- rslearn/models/olmoearth_pretrain/model.py +1 -0
- rslearn/train/prediction_writer.py +25 -8
- rslearn/train/tasks/embedding.py +116 -0
- rslearn/utils/raster_format.py +38 -0
- {rslearn-0.0.12.dist-info → rslearn-0.0.13.dist-info}/METADATA +2 -2
- {rslearn-0.0.12.dist-info → rslearn-0.0.13.dist-info}/RECORD +15 -14
- {rslearn-0.0.12.dist-info → rslearn-0.0.13.dist-info}/WHEEL +0 -0
- {rslearn-0.0.12.dist-info → rslearn-0.0.13.dist-info}/entry_points.txt +0 -0
- {rslearn-0.0.12.dist-info → rslearn-0.0.13.dist-info}/licenses/LICENSE +0 -0
- {rslearn-0.0.12.dist-info → rslearn-0.0.13.dist-info}/licenses/NOTICE +0 -0
- {rslearn-0.0.12.dist-info → rslearn-0.0.13.dist-info}/top_level.txt +0 -0
rslearn/config/dataset.py
CHANGED
|
@@ -125,7 +125,8 @@ class BandSetConfig:
|
|
|
125
125
|
self,
|
|
126
126
|
config_dict: dict[str, Any],
|
|
127
127
|
dtype: DType,
|
|
128
|
-
bands: list[str],
|
|
128
|
+
bands: list[str] | None = None,
|
|
129
|
+
num_bands: int | None = None,
|
|
129
130
|
format: dict[str, Any] | None = None,
|
|
130
131
|
zoom_offset: int = 0,
|
|
131
132
|
remap: dict[str, Any] | None = None,
|
|
@@ -137,7 +138,10 @@ class BandSetConfig:
|
|
|
137
138
|
Args:
|
|
138
139
|
config_dict: the config dict used to configure this BandSetConfig
|
|
139
140
|
dtype: the pixel value type to store tiles in
|
|
140
|
-
bands: list of band names in this BandSetConfig
|
|
141
|
+
bands: list of band names in this BandSetConfig. One of bands or num_bands
|
|
142
|
+
must be set.
|
|
143
|
+
num_bands: the number of bands in this band set. The bands will be named
|
|
144
|
+
B00, B01, B02, etc.
|
|
141
145
|
format: the format to store tiles in, defaults to geotiff
|
|
142
146
|
zoom_offset: store images at a resolution higher or lower than the window
|
|
143
147
|
resolution. This enables keeping source data at its native resolution,
|
|
@@ -155,6 +159,14 @@ class BandSetConfig:
|
|
|
155
159
|
materialization when creating mosaics, to determine which parts of the
|
|
156
160
|
source images should be copied.
|
|
157
161
|
"""
|
|
162
|
+
if (bands is None and num_bands is None) or (
|
|
163
|
+
bands is not None and num_bands is not None
|
|
164
|
+
):
|
|
165
|
+
raise ValueError("exactly one of bands and num_bands must be set")
|
|
166
|
+
if bands is None:
|
|
167
|
+
assert num_bands is not None
|
|
168
|
+
bands = [f"B{idx}" for idx in range(num_bands)]
|
|
169
|
+
|
|
158
170
|
if class_names is not None and len(bands) != len(class_names):
|
|
159
171
|
raise ValueError(
|
|
160
172
|
f"the number of class lists ({len(class_names)}) does not match the number of bands ({len(bands)})"
|
|
@@ -187,9 +199,16 @@ class BandSetConfig:
|
|
|
187
199
|
kwargs = dict(
|
|
188
200
|
config_dict=config,
|
|
189
201
|
dtype=DType(config["dtype"]),
|
|
190
|
-
bands=config["bands"],
|
|
191
202
|
)
|
|
192
|
-
for k in [
|
|
203
|
+
for k in [
|
|
204
|
+
"bands",
|
|
205
|
+
"num_bands",
|
|
206
|
+
"format",
|
|
207
|
+
"zoom_offset",
|
|
208
|
+
"remap",
|
|
209
|
+
"class_names",
|
|
210
|
+
"nodata_vals",
|
|
211
|
+
]:
|
|
193
212
|
if k in config:
|
|
194
213
|
kwargs[k] = config[k]
|
|
195
214
|
return BandSetConfig(**kwargs) # type: ignore
|
|
@@ -827,3 +827,55 @@ class Sentinel1(PlanetaryComputer):
|
|
|
827
827
|
kwargs[k] = d[k]
|
|
828
828
|
|
|
829
829
|
return Sentinel1(**kwargs)
|
|
830
|
+
|
|
831
|
+
|
|
832
|
+
class Naip(PlanetaryComputer):
|
|
833
|
+
"""A data source for NAIP data on Microsoft Planetary Computer.
|
|
834
|
+
|
|
835
|
+
See https://planetarycomputer.microsoft.com/dataset/naip.
|
|
836
|
+
"""
|
|
837
|
+
|
|
838
|
+
COLLECTION_NAME = "naip"
|
|
839
|
+
ASSET_BANDS = {"image": ["R", "G", "B", "NIR"]}
|
|
840
|
+
|
|
841
|
+
def __init__(
|
|
842
|
+
self,
|
|
843
|
+
**kwargs: Any,
|
|
844
|
+
):
|
|
845
|
+
"""Initialize a new Naip instance.
|
|
846
|
+
|
|
847
|
+
Args:
|
|
848
|
+
band_names: list of bands to try to ingest.
|
|
849
|
+
kwargs: additional arguments to pass to PlanetaryComputer.
|
|
850
|
+
"""
|
|
851
|
+
super().__init__(
|
|
852
|
+
collection_name=self.COLLECTION_NAME,
|
|
853
|
+
asset_bands=self.ASSET_BANDS,
|
|
854
|
+
**kwargs,
|
|
855
|
+
)
|
|
856
|
+
|
|
857
|
+
@staticmethod
|
|
858
|
+
def from_config(config: RasterLayerConfig, ds_path: UPath) -> "Naip":
|
|
859
|
+
"""Creates a new Naip instance from a configuration dictionary."""
|
|
860
|
+
if config.data_source is None:
|
|
861
|
+
raise ValueError("config.data_source is required")
|
|
862
|
+
d = config.data_source.config_dict
|
|
863
|
+
kwargs = {}
|
|
864
|
+
|
|
865
|
+
if "timeout_seconds" in d:
|
|
866
|
+
kwargs["timeout"] = timedelta(seconds=d["timeout_seconds"])
|
|
867
|
+
|
|
868
|
+
if "cache_dir" in d:
|
|
869
|
+
kwargs["cache_dir"] = join_upath(ds_path, d["cache_dir"])
|
|
870
|
+
|
|
871
|
+
simple_optionals = [
|
|
872
|
+
"query",
|
|
873
|
+
"sort_by",
|
|
874
|
+
"sort_ascending",
|
|
875
|
+
"max_items_per_client",
|
|
876
|
+
]
|
|
877
|
+
for k in simple_optionals:
|
|
878
|
+
if k in d:
|
|
879
|
+
kwargs[k] = d[k]
|
|
880
|
+
|
|
881
|
+
return Naip(**kwargs)
|
rslearn/dataset/manage.py
CHANGED
|
@@ -118,6 +118,7 @@ def prepare_dataset_windows(
|
|
|
118
118
|
duration_seconds=time.monotonic() - layer_start_time,
|
|
119
119
|
windows_prepared=0,
|
|
120
120
|
windows_skipped=len(windows),
|
|
121
|
+
windows_rejected=0,
|
|
121
122
|
get_items_attempts=0,
|
|
122
123
|
)
|
|
123
124
|
)
|
|
@@ -141,6 +142,7 @@ def prepare_dataset_windows(
|
|
|
141
142
|
duration_seconds=time.monotonic() - layer_start_time,
|
|
142
143
|
windows_prepared=0,
|
|
143
144
|
windows_skipped=len(windows),
|
|
145
|
+
windows_rejected=0,
|
|
144
146
|
get_items_attempts=0,
|
|
145
147
|
)
|
|
146
148
|
)
|
|
@@ -181,6 +183,9 @@ def prepare_dataset_windows(
|
|
|
181
183
|
attempts_counter=attempts_counter,
|
|
182
184
|
)
|
|
183
185
|
|
|
186
|
+
windows_prepared = 0
|
|
187
|
+
windows_rejected = 0
|
|
188
|
+
min_matches = data_source_cfg.query_config.min_matches
|
|
184
189
|
for window, result in zip(needed_windows, results):
|
|
185
190
|
layer_datas = window.load_layer_datas()
|
|
186
191
|
layer_datas[layer_name] = WindowLayerData(
|
|
@@ -191,13 +196,22 @@ def prepare_dataset_windows(
|
|
|
191
196
|
)
|
|
192
197
|
window.save_layer_datas(layer_datas)
|
|
193
198
|
|
|
199
|
+
# If result is empty and min_matches > 0, window was rejected due to min_matches
|
|
200
|
+
if len(result) == 0 and min_matches > 0:
|
|
201
|
+
windows_rejected += 1
|
|
202
|
+
else:
|
|
203
|
+
windows_prepared += 1
|
|
204
|
+
|
|
205
|
+
windows_skipped = len(windows) - len(needed_windows)
|
|
206
|
+
|
|
194
207
|
layer_summaries.append(
|
|
195
208
|
LayerPrepareSummary(
|
|
196
209
|
layer_name=layer_name,
|
|
197
210
|
data_source_name=data_source_cfg.name,
|
|
198
211
|
duration_seconds=time.monotonic() - layer_start_time,
|
|
199
|
-
windows_prepared=
|
|
200
|
-
windows_skipped=
|
|
212
|
+
windows_prepared=windows_prepared,
|
|
213
|
+
windows_skipped=windows_skipped,
|
|
214
|
+
windows_rejected=windows_rejected,
|
|
201
215
|
get_items_attempts=attempts_counter.value,
|
|
202
216
|
)
|
|
203
217
|
)
|
|
@@ -22,7 +22,11 @@ from rslearn.log_utils import get_logger
|
|
|
22
22
|
from rslearn.utils.array import copy_spatial_array
|
|
23
23
|
from rslearn.utils.feature import Feature
|
|
24
24
|
from rslearn.utils.geometry import PixelBounds
|
|
25
|
-
from rslearn.utils.raster_format import
|
|
25
|
+
from rslearn.utils.raster_format import (
|
|
26
|
+
RasterFormat,
|
|
27
|
+
adjust_projection_and_bounds_for_array,
|
|
28
|
+
load_raster_format,
|
|
29
|
+
)
|
|
26
30
|
from rslearn.utils.vector_format import VectorFormat, load_vector_format
|
|
27
31
|
|
|
28
32
|
from .lightning_module import RslearnLightningModule
|
|
@@ -68,15 +72,18 @@ class VectorMerger(PatchPredictionMerger):
|
|
|
68
72
|
class RasterMerger(PatchPredictionMerger):
|
|
69
73
|
"""Merger for raster data that copies the rasters to the output."""
|
|
70
74
|
|
|
71
|
-
def __init__(self, padding: int | None = None):
|
|
75
|
+
def __init__(self, padding: int | None = None, downsample_factor: int = 1):
|
|
72
76
|
"""Create a new RasterMerger.
|
|
73
77
|
|
|
74
78
|
Args:
|
|
75
79
|
padding: the padding around the individual patch outputs to remove. This is
|
|
76
80
|
typically used when leveraging overlapping patches. Portions of outputs
|
|
77
81
|
at the border of the window will still be retained.
|
|
82
|
+
downsample_factor: the factor by which the rasters output by the task are
|
|
83
|
+
lower in resolution relative to the window resolution.
|
|
78
84
|
"""
|
|
79
85
|
self.padding = padding
|
|
86
|
+
self.downsample_factor = downsample_factor
|
|
80
87
|
|
|
81
88
|
def merge(
|
|
82
89
|
self, window: Window, outputs: Sequence[PendingPatchOutput]
|
|
@@ -87,8 +94,8 @@ class RasterMerger(PatchPredictionMerger):
|
|
|
87
94
|
merged_image = np.zeros(
|
|
88
95
|
(
|
|
89
96
|
num_channels,
|
|
90
|
-
window.bounds[3] - window.bounds[1],
|
|
91
|
-
window.bounds[2] - window.bounds[0],
|
|
97
|
+
(window.bounds[3] - window.bounds[1]) // self.downsample_factor,
|
|
98
|
+
(window.bounds[2] - window.bounds[0]) // self.downsample_factor,
|
|
92
99
|
),
|
|
93
100
|
dtype=dtype,
|
|
94
101
|
)
|
|
@@ -104,7 +111,10 @@ class RasterMerger(PatchPredictionMerger):
|
|
|
104
111
|
# If the output is not on the left or top boundary, then we should apply
|
|
105
112
|
# the padding (if set).
|
|
106
113
|
src = output.output
|
|
107
|
-
src_offset = (
|
|
114
|
+
src_offset = (
|
|
115
|
+
output.bounds[0] // self.downsample_factor,
|
|
116
|
+
output.bounds[1] // self.downsample_factor,
|
|
117
|
+
)
|
|
108
118
|
if self.padding is not None and output.bounds[0] != window.bounds[0]:
|
|
109
119
|
src = src[:, :, self.padding :]
|
|
110
120
|
src_offset = (src_offset[0] + self.padding, src_offset[1])
|
|
@@ -116,7 +126,10 @@ class RasterMerger(PatchPredictionMerger):
|
|
|
116
126
|
src=src,
|
|
117
127
|
dst=merged_image,
|
|
118
128
|
src_offset=src_offset,
|
|
119
|
-
dst_offset=(
|
|
129
|
+
dst_offset=(
|
|
130
|
+
window.bounds[0] // self.downsample_factor,
|
|
131
|
+
window.bounds[1] // self.downsample_factor,
|
|
132
|
+
),
|
|
120
133
|
)
|
|
121
134
|
|
|
122
135
|
return merged_image
|
|
@@ -330,9 +343,13 @@ class RslearnWriter(BasePredictionWriter):
|
|
|
330
343
|
self.output_layer, self.layer_config.band_sets[0].bands
|
|
331
344
|
)
|
|
332
345
|
assert isinstance(self.format, RasterFormat)
|
|
333
|
-
|
|
334
|
-
|
|
346
|
+
|
|
347
|
+
# In case the merged_output is at a different resolution than the window,
|
|
348
|
+
# get adjusted projection and bounds for writing it.
|
|
349
|
+
projection, bounds = adjust_projection_and_bounds_for_array(
|
|
350
|
+
window.projection, window.bounds, merged_output
|
|
335
351
|
)
|
|
352
|
+
self.format.encode_raster(raster_dir, projection, bounds, merged_output)
|
|
336
353
|
|
|
337
354
|
elif self.layer_config.layer_type == LayerType.VECTOR:
|
|
338
355
|
layer_dir = window.get_layer_dir(self.output_layer)
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
"""Embedding task."""
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
import numpy.typing as npt
|
|
6
|
+
import torch
|
|
7
|
+
from torchmetrics import MetricCollection
|
|
8
|
+
|
|
9
|
+
from rslearn.utils import Feature
|
|
10
|
+
|
|
11
|
+
from .task import Task
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class EmbeddingTask(Task):
|
|
15
|
+
"""A dummy task for computing embeddings.
|
|
16
|
+
|
|
17
|
+
This task does not compute any targets or loss. Instead, it is just set up for
|
|
18
|
+
inference, to save embeddings from the configured model.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
def process_inputs(
|
|
22
|
+
self,
|
|
23
|
+
raw_inputs: dict[str, torch.Tensor],
|
|
24
|
+
metadata: dict[str, Any],
|
|
25
|
+
load_targets: bool = True,
|
|
26
|
+
) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
27
|
+
"""Processes the data into targets.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
raw_inputs: raster or vector data to process
|
|
31
|
+
metadata: metadata about the patch being read
|
|
32
|
+
load_targets: whether to load the targets or only inputs
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
tuple (input_dict, target_dict) containing the processed inputs and targets
|
|
36
|
+
that are compatible with both metrics and loss functions
|
|
37
|
+
"""
|
|
38
|
+
return {}, {}
|
|
39
|
+
|
|
40
|
+
def process_output(
|
|
41
|
+
self, raw_output: Any, metadata: dict[str, Any]
|
|
42
|
+
) -> npt.NDArray[Any] | list[Feature]:
|
|
43
|
+
"""Processes an output into raster or vector data.
|
|
44
|
+
|
|
45
|
+
Args:
|
|
46
|
+
raw_output: the output from prediction head.
|
|
47
|
+
metadata: metadata about the patch being read
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
either raster or vector data.
|
|
51
|
+
"""
|
|
52
|
+
# Just convert the raw output to numpy array that can be saved to GeoTIFF.
|
|
53
|
+
return raw_output.cpu().numpy()
|
|
54
|
+
|
|
55
|
+
def visualize(
|
|
56
|
+
self,
|
|
57
|
+
input_dict: dict[str, Any],
|
|
58
|
+
target_dict: dict[str, Any] | None,
|
|
59
|
+
output: Any,
|
|
60
|
+
) -> dict[str, npt.NDArray[Any]]:
|
|
61
|
+
"""Visualize the outputs and targets.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
input_dict: the input dict from process_inputs
|
|
65
|
+
target_dict: the target dict from process_inputs
|
|
66
|
+
output: the prediction
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
a dictionary mapping image name to visualization image
|
|
70
|
+
"""
|
|
71
|
+
# EmbeddingTask is only set up to support `model predict`.
|
|
72
|
+
raise NotImplementedError
|
|
73
|
+
|
|
74
|
+
def get_metrics(self) -> MetricCollection:
|
|
75
|
+
"""Get the metrics for this task."""
|
|
76
|
+
return MetricCollection({})
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class EmbeddingHead(torch.nn.Module):
|
|
80
|
+
"""Head for embedding task.
|
|
81
|
+
|
|
82
|
+
This picks one feature map from the input list of feature maps to output. It also
|
|
83
|
+
returns a dummy loss.
|
|
84
|
+
"""
|
|
85
|
+
|
|
86
|
+
def __init__(self, feature_map_index: int | None = 0):
|
|
87
|
+
"""Create a new EmbeddingHead.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
feature_map_index: the index of the feature map to choose from the input
|
|
91
|
+
list of multi-scale feature maps (default 0). If the input is already
|
|
92
|
+
a single feature map, then set to None.
|
|
93
|
+
"""
|
|
94
|
+
super().__init__()
|
|
95
|
+
self.feature_map_index = feature_map_index
|
|
96
|
+
|
|
97
|
+
def forward(
|
|
98
|
+
self,
|
|
99
|
+
features: torch.Tensor,
|
|
100
|
+
inputs: list[dict[str, Any]],
|
|
101
|
+
targets: list[dict[str, Any]] | None = None,
|
|
102
|
+
) -> tuple[torch.Tensor, dict[str, Any]]:
|
|
103
|
+
"""Select the desired feature map and return it along with a dummy loss.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
features: list of BCHW feature maps (or one feature map, if feature_map_index is None).
|
|
107
|
+
inputs: original inputs (ignored).
|
|
108
|
+
targets: should contain classes key that stores the per-pixel class labels.
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
tuple of outputs and loss dict
|
|
112
|
+
"""
|
|
113
|
+
if self.feature_map_index is not None:
|
|
114
|
+
features = features[self.feature_map_index]
|
|
115
|
+
|
|
116
|
+
return features, {"loss": 0}
|
rslearn/utils/raster_format.py
CHANGED
|
@@ -123,6 +123,44 @@ def get_transform_from_projection_and_bounds(
|
|
|
123
123
|
)
|
|
124
124
|
|
|
125
125
|
|
|
126
|
+
def adjust_projection_and_bounds_for_array(
|
|
127
|
+
projection: Projection, bounds: PixelBounds, array: npt.NDArray
|
|
128
|
+
) -> tuple[Projection, PixelBounds]:
|
|
129
|
+
"""Adjust the projection and bounds to correspond to the resolution of the array.
|
|
130
|
+
|
|
131
|
+
The returned projection and bounds cover the same spatial extent as the inputs, but
|
|
132
|
+
are updated so that the width and height match that of the array.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
projection: the original projection.
|
|
136
|
+
bounds: the original bounds.
|
|
137
|
+
array: the CHW array for which to compute an updated projection and bounds. The
|
|
138
|
+
returned bounds will have the same width and height as this array.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
a tuple of adjusted (projection, bounds)
|
|
142
|
+
"""
|
|
143
|
+
if array.shape[2] == (bounds[2] - bounds[0]) and array.shape[1] == (
|
|
144
|
+
bounds[3] - bounds[1]
|
|
145
|
+
):
|
|
146
|
+
return (projection, bounds)
|
|
147
|
+
|
|
148
|
+
x_factor = array.shape[2] / (bounds[2] - bounds[0])
|
|
149
|
+
y_factor = array.shape[1] / (bounds[3] - bounds[1])
|
|
150
|
+
adjusted_projection = Projection(
|
|
151
|
+
projection.crs,
|
|
152
|
+
projection.x_resolution / x_factor,
|
|
153
|
+
projection.y_resolution / y_factor,
|
|
154
|
+
)
|
|
155
|
+
adjusted_bounds = (
|
|
156
|
+
round(bounds[0] * x_factor),
|
|
157
|
+
round(bounds[1] * y_factor),
|
|
158
|
+
round(bounds[0] * x_factor) + array.shape[2],
|
|
159
|
+
round(bounds[1] * y_factor) + array.shape[1],
|
|
160
|
+
)
|
|
161
|
+
return (adjusted_projection, adjusted_bounds)
|
|
162
|
+
|
|
163
|
+
|
|
126
164
|
class RasterFormat:
|
|
127
165
|
"""An abstract class for writing raster data.
|
|
128
166
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: rslearn
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.13
|
|
4
4
|
Summary: A library for developing remote sensing datasets and models
|
|
5
5
|
Author: OlmoEarth Team
|
|
6
6
|
License: Apache License
|
|
@@ -214,7 +214,7 @@ License-File: LICENSE
|
|
|
214
214
|
License-File: NOTICE
|
|
215
215
|
Requires-Dist: boto3>=1.39
|
|
216
216
|
Requires-Dist: fiona>=1.10
|
|
217
|
-
Requires-Dist: fsspec>=2025.
|
|
217
|
+
Requires-Dist: fsspec>=2025.10.0
|
|
218
218
|
Requires-Dist: jsonargparse>=4.35.0
|
|
219
219
|
Requires-Dist: lightning>=2.5.1.post0
|
|
220
220
|
Requires-Dist: Pillow>=11.3
|
|
@@ -6,7 +6,7 @@ rslearn/main.py,sha256=fLYmm2ZsUTCaJBKZvxu3pc4fB2thaf-p2Qv0AifDlXM,31292
|
|
|
6
6
|
rslearn/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
7
|
rslearn/template_params.py,sha256=Vop0Ha-S44ctCa9lvSZRjrMETznJZlR5y_gJrVIwrPg,791
|
|
8
8
|
rslearn/config/__init__.py,sha256=Bhf2VVncdMYRC8Wfb4GsJJ13OAJYNCO_ODLSNTmBOHM,638
|
|
9
|
-
rslearn/config/dataset.py,sha256=
|
|
9
|
+
rslearn/config/dataset.py,sha256=VpXUGKCr45kzE-W27rgF4tPQuyICfwQkJTxb2z9aXQM,21685
|
|
10
10
|
rslearn/data_sources/__init__.py,sha256=8_7Pi3agKsatNoxXw74-U5G-QAP-rbdfcH8EkZfJbH4,1449
|
|
11
11
|
rslearn/data_sources/aws_landsat.py,sha256=GA9H04KagBDm-N37jFdh_aHCX2ZneVdnqT1SNOyAwTs,20829
|
|
12
12
|
rslearn/data_sources/aws_open_data.py,sha256=nU_D5cqc-wibxq4uyUNb0z-XD0Puf1gZ8v5FMiMAN5w,30258
|
|
@@ -24,7 +24,7 @@ rslearn/data_sources/local_files.py,sha256=d08m6IzrUN_80VfvgpHahMJrv-n6_CI6EIocp
|
|
|
24
24
|
rslearn/data_sources/openstreetmap.py,sha256=qUSMFiIA_laJkO3meBXf9TmSI7OBD-o3i4JxqllUv3Q,19232
|
|
25
25
|
rslearn/data_sources/planet.py,sha256=F2JoLaQ5Cb3k1cTm0hwSWTL2TPfbaAUMXZ8q4Dy7UlA,10109
|
|
26
26
|
rslearn/data_sources/planet_basemap.py,sha256=wuWM9dHSJMdINfyWb78Zk9i-KvJHTrf9J0Q2gyEyiiA,10450
|
|
27
|
-
rslearn/data_sources/planetary_computer.py,sha256=
|
|
27
|
+
rslearn/data_sources/planetary_computer.py,sha256=Vi-aBHQe-BA8NjRyPMgurMAdo3sK6PJteCK5MwXygJo,31869
|
|
28
28
|
rslearn/data_sources/raster_source.py,sha256=b8wo55GhVLxXwx1WYLzeRAlzD_ZkE_P9tnvUOdnsfQE,689
|
|
29
29
|
rslearn/data_sources/usda_cdl.py,sha256=2_V11AhPRgLEGd4U5Pmx3UvE2HWBPbsFXhUIQVRVFeE,7138
|
|
30
30
|
rslearn/data_sources/usgs_landsat.py,sha256=31GmOUfmxwTE6MTiVI4psb-ciVmunuA8cfvqDuvTHPE,19312
|
|
@@ -37,9 +37,9 @@ rslearn/data_sources/xyz_tiles.py,sha256=SJV8TB6WUP6DTPr2d3LXRKVjFxda7bdR9IM84Vv
|
|
|
37
37
|
rslearn/dataset/__init__.py,sha256=bHtBlEEBCekO-gaJqiww0-VjvZTE5ahx0llleo8bfP8,289
|
|
38
38
|
rslearn/dataset/add_windows.py,sha256=pwCEvwLE1jQCoqQxw6CJ-sP46ayWppFa2hGYIB6VVkc,8494
|
|
39
39
|
rslearn/dataset/dataset.py,sha256=bjf9nI55j-MF0bIQWSNPjNbpfqnLK4jy-96TAcwO0MM,5214
|
|
40
|
-
rslearn/dataset/handler_summaries.py,sha256=
|
|
40
|
+
rslearn/dataset/handler_summaries.py,sha256=wI99RDk5erCWkzl1A7Uc4chatQ9KWIr4F_0Hxr9Co6s,2607
|
|
41
41
|
rslearn/dataset/index.py,sha256=Wni5m6h4gisRB54fPLnCfUrRTEsJ5EvwS0fs9sYc2wg,6025
|
|
42
|
-
rslearn/dataset/manage.py,sha256=
|
|
42
|
+
rslearn/dataset/manage.py,sha256=owelBiBqvoIQYLhFMDK4ULzcoGBNE27JV8kl68jf3wg,18563
|
|
43
43
|
rslearn/dataset/materialize.py,sha256=-z47svc_JqGhzkp8kq5Hd9fykWNqFEUCQezo887TWBw,22056
|
|
44
44
|
rslearn/dataset/remap.py,sha256=6MaImsY02GNACpvRM81RvWmjZWRfAHxo_R3Ox6XLF6A,2723
|
|
45
45
|
rslearn/dataset/window.py,sha256=I5RqZ12jlIXhohw4qews1x_I4tSDpml709DZRtLiN24,12546
|
|
@@ -86,7 +86,7 @@ rslearn/models/galileo/__init__.py,sha256=QQa0C29nuPRva0KtGiMHQ2ZB02n9SSwj_wqTKP
|
|
|
86
86
|
rslearn/models/galileo/galileo.py,sha256=jUHA64YvVC3Fz5fevc_9dFJfZaINODRDrhSGLIiOZcw,21115
|
|
87
87
|
rslearn/models/galileo/single_file_galileo.py,sha256=l5tlmmdr2eieHNH-M7rVIvcptkv0Fuk3vKXFW691ezA,56143
|
|
88
88
|
rslearn/models/olmoearth_pretrain/__init__.py,sha256=AjRvbjBdadCdPh-EdvySH76sVAQ8NGQaJt11Tsn1D5I,36
|
|
89
|
-
rslearn/models/olmoearth_pretrain/model.py,sha256=
|
|
89
|
+
rslearn/models/olmoearth_pretrain/model.py,sha256=ZJgoyy7vwB0PUMJtHF-sdJ-uSBqnUXMDBco0Dx4cAes,10670
|
|
90
90
|
rslearn/models/olmoearth_pretrain/norm.py,sha256=rHjFyWkpNLYMx9Ow7TsU-jGm9Sjx7FVf0p4R__ohx2c,3266
|
|
91
91
|
rslearn/models/panopticon_data/sensors/drone.yaml,sha256=xqWS-_QMtJyRoWXJm-igoSur9hAmCFdqkPin8DT5qpw,431
|
|
92
92
|
rslearn/models/panopticon_data/sensors/enmap.yaml,sha256=b2j6bSgYR2yKR9DRm3SPIzSVYlHf51ny_p-1B4B9sB4,13431
|
|
@@ -111,7 +111,7 @@ rslearn/train/data_module.py,sha256=K-nQgnOZn-KGq_G2pVOQFtWRrlWih0212i_bkXZ2bEE,
|
|
|
111
111
|
rslearn/train/dataset.py,sha256=YiskNlYYcKqZxyw0Xzop1RGLbjMc-oK_rmhrSMVbTQg,51857
|
|
112
112
|
rslearn/train/lightning_module.py,sha256=ZLBiId3secUlVs2yzkN-mwVv4rMdh5TkdZYl4vv_Cw0,14466
|
|
113
113
|
rslearn/train/optimizer.py,sha256=EKSqkmERalDA0bF32Gey7n6z69KLyaUWKlRsGJfKBmE,927
|
|
114
|
-
rslearn/train/prediction_writer.py,sha256=
|
|
114
|
+
rslearn/train/prediction_writer.py,sha256=mDvREwEB5k5_tNuBnYIvAGnxS3sYFWQYvV07V3UEe2k,14106
|
|
115
115
|
rslearn/train/scheduler.py,sha256=wFbmycMHgL6nRYeYalDjb0G8YVo8VD3T3sABS61jJ7c,2318
|
|
116
116
|
rslearn/train/callbacks/__init__.py,sha256=VNV0ArZyYMvl3dGK2wl6F046khYJ1dEBlJS6G_SYNm0,47
|
|
117
117
|
rslearn/train/callbacks/adapters.py,sha256=yfv8nyCj3jmo2_dNkFrjukKxh0MHsf2xKqWwMF0QUtY,1869
|
|
@@ -121,6 +121,7 @@ rslearn/train/callbacks/peft.py,sha256=wEOKsS3RhsRaZTXn_Kz2wdsZdIiIaZPdCJWtdJBur
|
|
|
121
121
|
rslearn/train/tasks/__init__.py,sha256=dag1u72x1-me6y0YcOubUo5MYZ0Tjf6-dOir9UeFNMs,75
|
|
122
122
|
rslearn/train/tasks/classification.py,sha256=kahVdXPU6fDwDCdqlrjZGb9uA-PYG74DbQQ0kJUt-Eg,13186
|
|
123
123
|
rslearn/train/tasks/detection.py,sha256=9j9webusrjGexvUmZ7gl3NTBS63Qq511VFlB2WbLi5Y,22302
|
|
124
|
+
rslearn/train/tasks/embedding.py,sha256=DK3l1aQ3d5gQUT1h3cD6vcUaNKvSsH26RHx2Bbzutbg,3667
|
|
124
125
|
rslearn/train/tasks/multi_task.py,sha256=dBWsnbvQ0CReNsbDHmZ_-sXjUE0H4S2OPcbJwMquG9g,6016
|
|
125
126
|
rslearn/train/tasks/per_pixel_regression.py,sha256=W8dbLyIiPgFI3gA_aZQX0pSFRWLP2v6tthsFbKhcDVg,8783
|
|
126
127
|
rslearn/train/tasks/regression.py,sha256=zZhrrZ1qxjrdLjKWC9McRivDXCcKiYfdLC-kaMeVkDc,11547
|
|
@@ -145,16 +146,16 @@ rslearn/utils/get_utm_ups_crs.py,sha256=kUrcyjCK7KWvuP1XR-nURPeRqYeRO-3L8QUJ1QTF
|
|
|
145
146
|
rslearn/utils/grid_index.py,sha256=hRmrtgpqN1pLa-djnZtgSXqKJlbgGyttGnCEmPLD0zo,2347
|
|
146
147
|
rslearn/utils/jsonargparse.py,sha256=JcTKQoZ6jgwag-kSeTIEVBO9AsRj0X1oEJBsoaCazH4,658
|
|
147
148
|
rslearn/utils/mp.py,sha256=XYmVckI5TOQuCKc49NJyirDJyFgvb4AI-gGypG2j680,1399
|
|
148
|
-
rslearn/utils/raster_format.py,sha256=
|
|
149
|
+
rslearn/utils/raster_format.py,sha256=RDzDPnWUJunqcj-F4oXKBl-rKFBUpRjvq7mMYhid3iU,27413
|
|
149
150
|
rslearn/utils/rtree_index.py,sha256=j0Zwrq3pXuAJ-hKpiRFQ7VNtvO3fZYk-Em2uBPAqfx4,6460
|
|
150
151
|
rslearn/utils/spatial_index.py,sha256=eomJAUgzmjir8j9HZnSgQoJHwN9H0wGTjmJkMkLLfsU,762
|
|
151
152
|
rslearn/utils/sqlite_index.py,sha256=YGOJi66544e6JNtfSft6YIlHklFdSJO2duxQ4TJ2iu4,2920
|
|
152
153
|
rslearn/utils/time.py,sha256=2ilSLG94_sxLP3y5RSV5L5CG8CoND_dbdzYEHVtN-I8,387
|
|
153
154
|
rslearn/utils/vector_format.py,sha256=EIChYCL6GLOILS2TO2JBkca1TuaWsSubWv6iRS3P2ds,16139
|
|
154
|
-
rslearn-0.0.
|
|
155
|
-
rslearn-0.0.
|
|
156
|
-
rslearn-0.0.
|
|
157
|
-
rslearn-0.0.
|
|
158
|
-
rslearn-0.0.
|
|
159
|
-
rslearn-0.0.
|
|
160
|
-
rslearn-0.0.
|
|
155
|
+
rslearn-0.0.13.dist-info/licenses/LICENSE,sha256=_99ZWPoLdlUbqZoSC5DF4ihiNwl5rTEmBaq2fACecdg,11352
|
|
156
|
+
rslearn-0.0.13.dist-info/licenses/NOTICE,sha256=wLPr6rwV_jCg-xEknNGwhnkfRfuoOE9MZ-lru2yZyLI,5070
|
|
157
|
+
rslearn-0.0.13.dist-info/METADATA,sha256=44oDmbvkIrjJ0unVNaYeO5OypD6RavmG7l5HUz9Re48,36319
|
|
158
|
+
rslearn-0.0.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
159
|
+
rslearn-0.0.13.dist-info/entry_points.txt,sha256=doTBQ57NT7nq-dgYGgTTw6mafcGWb_4PWYtYR4rGm50,46
|
|
160
|
+
rslearn-0.0.13.dist-info/top_level.txt,sha256=XDKo90WBH8P9RQumHxo0giLJsoufT4r9odv-WE6Ahk4,8
|
|
161
|
+
rslearn-0.0.13.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|