rslearn 0.0.6__tar.gz → 0.0.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {rslearn-0.0.6/rslearn.egg-info → rslearn-0.0.8}/PKG-INFO +144 -15
- {rslearn-0.0.6 → rslearn-0.0.8}/README.md +139 -11
- {rslearn-0.0.6 → rslearn-0.0.8}/pyproject.toml +9 -6
- rslearn-0.0.8/rslearn/dataset/handler_summaries.py +130 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/dataset/manage.py +157 -22
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/main.py +60 -8
- rslearn-0.0.8/rslearn/models/anysat.py +207 -0
- rslearn-0.0.8/rslearn/models/clay/clay.py +219 -0
- rslearn-0.0.8/rslearn/models/clay/configs/metadata.yaml +295 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/copernicusfm.py +37 -25
- rslearn-0.0.8/rslearn/models/dinov3.py +165 -0
- rslearn-0.0.8/rslearn/models/galileo/__init__.py +5 -0
- rslearn-0.0.8/rslearn/models/galileo/galileo.py +517 -0
- rslearn-0.0.8/rslearn/models/galileo/single_file_galileo.py +1672 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/drone.yaml +32 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/enmap.yaml +904 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/goes.yaml +9 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/himawari.yaml +9 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/intuition.yaml +606 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/landsat8.yaml +84 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/modis_terra.yaml +99 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/qb2_ge1.yaml +34 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/sentinel1.yaml +85 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/sentinel2.yaml +97 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/superdove.yaml +60 -0
- rslearn-0.0.8/rslearn/models/panopticon_data/sensors/wv23.yaml +63 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/presto/presto.py +10 -7
- rslearn-0.0.8/rslearn/models/prithvi.py +1122 -0
- rslearn-0.0.8/rslearn/models/resize_features.py +45 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/simple_time_series.py +65 -10
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/unet.py +17 -11
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/upsample.py +2 -2
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/tile_stores/default.py +31 -6
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/transforms/normalize.py +34 -5
- rslearn-0.0.8/rslearn/train/transforms/select_bands.py +67 -0
- rslearn-0.0.8/rslearn/train/transforms/sentinel1.py +60 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/geometry.py +61 -1
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/raster_format.py +7 -1
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/vector_format.py +13 -10
- {rslearn-0.0.6 → rslearn-0.0.8/rslearn.egg-info}/PKG-INFO +144 -15
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn.egg-info/SOURCES.txt +24 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn.egg-info/requires.txt +4 -3
- {rslearn-0.0.6 → rslearn-0.0.8}/LICENSE +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/arg_parser.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/config/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/config/dataset.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/const.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/aws_landsat.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/aws_open_data.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/aws_sentinel1.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/climate_data_store.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/copernicus.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/data_source.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/earthdaily.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/earthdata_srtm.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/eurocrops.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/gcp_public_data.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/geotiff.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/google_earth_engine.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/local_files.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/openstreetmap.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/planet.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/planet_basemap.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/planetary_computer.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/raster_source.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/usda_cdl.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/usgs_landsat.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/utils.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/vector_source.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/worldcereal.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/worldcover.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/worldpop.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/data_sources/xyz_tiles.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/dataset/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/dataset/add_windows.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/dataset/dataset.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/dataset/index.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/dataset/materialize.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/dataset/remap.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/dataset/window.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/log_utils.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/clip.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/conv.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/copernicusfm_src/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/copernicusfm_src/aurora/area.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/copernicusfm_src/aurora/fourier.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/copernicusfm_src/dynamic_hypernetwork.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/copernicusfm_src/flexivit/patch_embed.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/copernicusfm_src/flexivit/utils.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/copernicusfm_src/model_vit.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/copernicusfm_src/util/pos_embed.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/croma.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/detr/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/detr/box_ops.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/detr/detr.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/detr/matcher.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/detr/position_encoding.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/detr/transformer.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/detr/util.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/faster_rcnn.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/fpn.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/module_wrapper.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/molmo.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/multitask.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/panopticon.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/pick_features.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/pooling_decoder.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/presto/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/presto/single_file_presto.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/registry.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/sam2_enc.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/satlaspretrain.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/singletask.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/ssl4eo_s12.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/swin.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/task_embedding.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/terramind.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/trunk.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/models/use_croma.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/py.typed +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/template_params.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/tile_stores/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/tile_stores/tile_store.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/callbacks/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/callbacks/adapters.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/callbacks/freeze_unfreeze.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/callbacks/gradients.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/callbacks/peft.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/data_module.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/dataset.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/lightning_module.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/optimizer.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/prediction_writer.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/scheduler.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/tasks/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/tasks/classification.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/tasks/detection.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/tasks/multi_task.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/tasks/per_pixel_regression.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/tasks/regression.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/tasks/segmentation.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/tasks/task.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/transforms/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/transforms/concatenate.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/transforms/crop.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/transforms/flip.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/transforms/mask.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/transforms/pad.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/train/transforms/transform.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/__init__.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/array.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/feature.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/fsspec.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/get_utm_ups_crs.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/grid_index.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/jsonargparse.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/mp.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/rtree_index.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/spatial_index.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/sqlite_index.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn/utils/time.py +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn.egg-info/dependency_links.txt +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn.egg-info/entry_points.txt +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/rslearn.egg-info/top_level.txt +0 -0
- {rslearn-0.0.6 → rslearn-0.0.8}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: rslearn
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.8
|
|
4
4
|
Summary: A library for developing remote sensing datasets and models
|
|
5
5
|
Author: OlmoEarth Team
|
|
6
6
|
License: Apache License
|
|
@@ -214,7 +214,7 @@ License-File: LICENSE
|
|
|
214
214
|
Requires-Dist: boto3>=1.39
|
|
215
215
|
Requires-Dist: class_registry>=2.1
|
|
216
216
|
Requires-Dist: fiona>=1.10
|
|
217
|
-
Requires-Dist: fsspec
|
|
217
|
+
Requires-Dist: fsspec>=2025.9.0
|
|
218
218
|
Requires-Dist: jsonargparse>=4.35.0
|
|
219
219
|
Requires-Dist: lightning>=2.5.1.post0
|
|
220
220
|
Requires-Dist: Pillow>=11.3
|
|
@@ -233,9 +233,10 @@ Requires-Dist: cdsapi>=0.7.6; extra == "extra"
|
|
|
233
233
|
Requires-Dist: earthdaily[platform]>=1.0.7; extra == "extra"
|
|
234
234
|
Requires-Dist: earthengine-api>=1.6.3; extra == "extra"
|
|
235
235
|
Requires-Dist: einops>=0.8; extra == "extra"
|
|
236
|
-
Requires-Dist:
|
|
236
|
+
Requires-Dist: fsspec[gcs,s3]; extra == "extra"
|
|
237
237
|
Requires-Dist: google-cloud-bigquery>=3.35; extra == "extra"
|
|
238
238
|
Requires-Dist: google-cloud-storage>=2.18; extra == "extra"
|
|
239
|
+
Requires-Dist: huggingface_hub>=0.34.4; extra == "extra"
|
|
239
240
|
Requires-Dist: netCDF4>=1.7.2; extra == "extra"
|
|
240
241
|
Requires-Dist: osmium>=4.0.2; extra == "extra"
|
|
241
242
|
Requires-Dist: planet>=3.1; extra == "extra"
|
|
@@ -243,12 +244,12 @@ Requires-Dist: planetary_computer>=1.0; extra == "extra"
|
|
|
243
244
|
Requires-Dist: pycocotools>=2.0; extra == "extra"
|
|
244
245
|
Requires-Dist: pystac_client>=0.9; extra == "extra"
|
|
245
246
|
Requires-Dist: rtree>=1.4; extra == "extra"
|
|
246
|
-
Requires-Dist: s3fs==2025.3.0; extra == "extra"
|
|
247
247
|
Requires-Dist: satlaspretrain_models>=0.3; extra == "extra"
|
|
248
248
|
Requires-Dist: scipy>=1.16; extra == "extra"
|
|
249
249
|
Requires-Dist: terratorch>=1.0.2; extra == "extra"
|
|
250
250
|
Requires-Dist: transformers>=4.55; extra == "extra"
|
|
251
251
|
Requires-Dist: wandb>=0.21; extra == "extra"
|
|
252
|
+
Requires-Dist: timm>=0.9.7; extra == "extra"
|
|
252
253
|
Provides-Extra: dev
|
|
253
254
|
Requires-Dist: interrogate>=1.7.0; extra == "dev"
|
|
254
255
|
Requires-Dist: mypy<2,>=1.17.1; extra == "dev"
|
|
@@ -437,10 +438,10 @@ that they align with the windows we have previously defined (and the Sentinel-2
|
|
|
437
438
|
we have already ingested). We can use the LocalFiles data source to have rslearn
|
|
438
439
|
automate this process. Update the dataset `config.json` with a new layer:
|
|
439
440
|
|
|
440
|
-
```
|
|
441
|
+
```jsonc
|
|
441
442
|
"layers": {
|
|
442
443
|
"sentinel2": {
|
|
443
|
-
...
|
|
444
|
+
# ...
|
|
444
445
|
},
|
|
445
446
|
"worldcover": {
|
|
446
447
|
"type": "raster",
|
|
@@ -455,7 +456,7 @@ automate this process. Update the dataset `config.json` with a new layer:
|
|
|
455
456
|
}
|
|
456
457
|
}
|
|
457
458
|
},
|
|
458
|
-
...
|
|
459
|
+
# ...
|
|
459
460
|
```
|
|
460
461
|
|
|
461
462
|
Repeat the materialize process so we populate the data for this new layer:
|
|
@@ -577,6 +578,7 @@ trainer:
|
|
|
577
578
|
save_last: true
|
|
578
579
|
monitor: val_accuracy
|
|
579
580
|
mode: max
|
|
581
|
+
dirpath: ./land_cover_model_checkpoints/
|
|
580
582
|
```
|
|
581
583
|
|
|
582
584
|
Now we can train the model:
|
|
@@ -621,13 +623,13 @@ windows in the "predict" group, which is where we added the Portland window.
|
|
|
621
623
|
And it will be written in a new output_layer called "output". But we have to update the
|
|
622
624
|
dataset configuration so it specifies the layer:
|
|
623
625
|
|
|
624
|
-
```
|
|
626
|
+
```jsonc
|
|
625
627
|
"layers": {
|
|
626
628
|
"sentinel2": {
|
|
627
|
-
...
|
|
629
|
+
# ...
|
|
628
630
|
},
|
|
629
631
|
"worldcover": {
|
|
630
|
-
...
|
|
632
|
+
# ...
|
|
631
633
|
},
|
|
632
634
|
"output": {
|
|
633
635
|
"type": "raster",
|
|
@@ -644,7 +646,7 @@ Now we can apply the model:
|
|
|
644
646
|
```
|
|
645
647
|
# Find model checkpoint in lightning_logs dir.
|
|
646
648
|
ls lightning_logs/*/checkpoints/last.ckpt
|
|
647
|
-
rslearn model predict --config land_cover_model.yaml --ckpt_path
|
|
649
|
+
rslearn model predict --config land_cover_model.yaml --ckpt_path land_cover_model_checkpoints/last.ckpt
|
|
648
650
|
```
|
|
649
651
|
|
|
650
652
|
And visualize the Sentinel-2 image and output in qgis:
|
|
@@ -751,17 +753,144 @@ got 585 examples in split val
|
|
|
751
753
|
|
|
752
754
|
### Visualizing with `model test`
|
|
753
755
|
|
|
754
|
-
|
|
756
|
+
We can visualize the ground truth labels and model predictions in the test set using
|
|
757
|
+
the `model test` command:
|
|
758
|
+
|
|
759
|
+
```
|
|
760
|
+
mkdir ./vis
|
|
761
|
+
rslearn model test --config land_cover_model.yaml --ckpt_path land_cover_model_checkpoints/last.ckpt --model.init_args.visualize_dir=./vis/
|
|
762
|
+
```
|
|
763
|
+
|
|
764
|
+
This will produce PNGs in the vis directory. The visualizations are produced by the
|
|
765
|
+
`Task.visualize` function, so we could customize the visualization by subclassing
|
|
766
|
+
SegmentationTask and overriding the visualize function.
|
|
767
|
+
|
|
768
|
+
|
|
769
|
+
### Logging to Weights & Biases
|
|
770
|
+
|
|
771
|
+
We can log to W&B by setting the logger under trainer in the model configuration file:
|
|
772
|
+
|
|
773
|
+
```yaml
|
|
774
|
+
trainer:
|
|
775
|
+
# ...
|
|
776
|
+
logger:
|
|
777
|
+
class_path: lightning.pytorch.loggers.WandbLogger
|
|
778
|
+
init_args:
|
|
779
|
+
project: land_cover_model
|
|
780
|
+
name: version_00
|
|
781
|
+
```
|
|
782
|
+
|
|
783
|
+
Now, runs with this model configuration should show on W&B. For `model fit` runs,
|
|
784
|
+
the training and validation loss and accuracy metric will be logged. The accuracy
|
|
785
|
+
metric is provided by SegmentationTask, and additional metrics can be enabled by
|
|
786
|
+
passing the relevant init_args to the task, e.g. mean IoU and F1:
|
|
787
|
+
|
|
788
|
+
```yaml
|
|
789
|
+
class_path: rslearn.train.tasks.segmentation.SegmentationTask
|
|
790
|
+
init_args:
|
|
791
|
+
num_classes: 101
|
|
792
|
+
remap_values: [[0, 1], [0, 255]]
|
|
793
|
+
enable_miou_metric: true
|
|
794
|
+
enable_f1_metric: true
|
|
795
|
+
```
|
|
755
796
|
|
|
756
797
|
|
|
757
798
|
### Inputting Multiple Sentinel-2 Images
|
|
758
799
|
|
|
759
|
-
|
|
800
|
+
Currently our model inputs a single Sentinel-2 image. However, for most tasks where
|
|
801
|
+
labels are not expected to change from week to week, we find that accuracy can be
|
|
802
|
+
significantly improved by inputting multiple images, regardless of the pre-trained
|
|
803
|
+
model used. Multiple images makes the model more resilient to clouds and image
|
|
804
|
+
artifacts, and allows the model to synthesize information across different views that
|
|
805
|
+
may come from different seasons or weather conditions.
|
|
760
806
|
|
|
807
|
+
We first update our dataset configuration to obtain three images, by customizing the
|
|
808
|
+
query_config section. This can replace the sentinel2 layer:
|
|
761
809
|
|
|
762
|
-
|
|
810
|
+
```jsonc
|
|
811
|
+
"layers": {
|
|
812
|
+
"sentinel2_multi": {
|
|
813
|
+
"type": "raster",
|
|
814
|
+
"band_sets": [{
|
|
815
|
+
"dtype": "uint8",
|
|
816
|
+
"bands": ["R", "G", "B"]
|
|
817
|
+
}],
|
|
818
|
+
"data_source": {
|
|
819
|
+
"name": "rslearn.data_sources.gcp_public_data.Sentinel2",
|
|
820
|
+
"index_cache_dir": "cache/sentinel2/",
|
|
821
|
+
"sort_by": "cloud_cover",
|
|
822
|
+
"use_rtree_index": false,
|
|
823
|
+
"query_config": {
|
|
824
|
+
"max_matches": 3
|
|
825
|
+
}
|
|
826
|
+
}
|
|
827
|
+
},
|
|
828
|
+
"worldcover": {
|
|
829
|
+
# ...
|
|
830
|
+
},
|
|
831
|
+
"output": {
|
|
832
|
+
# ...
|
|
833
|
+
}
|
|
834
|
+
}
|
|
835
|
+
```
|
|
836
|
+
|
|
837
|
+
Repeat the steps from earlier to prepare, ingest, and materialize the dataset.
|
|
838
|
+
|
|
839
|
+
Now we update our model configuration file. First, we modify the model architecture to
|
|
840
|
+
be able to input an image time series. We use the SimpleTimeSeries model, which takes
|
|
841
|
+
an encoder that expects a single-image input, and applies that encoder on each image in
|
|
842
|
+
the time series. It then applies max temporal pooling to combine the per-image feature
|
|
843
|
+
maps extracted by the encoder.
|
|
844
|
+
|
|
845
|
+
Image time series in rslearn are currently stored as [T*C, H, W] tensors. So we pass
|
|
846
|
+
the `image_channels` to SimpleTimeSeries so it knows how to slice up the tensor to
|
|
847
|
+
recover the per-timestep images.
|
|
848
|
+
|
|
849
|
+
```yaml
|
|
850
|
+
model:
|
|
851
|
+
class_path: rslearn.train.lightning_module.RslearnLightningModule
|
|
852
|
+
init_args:
|
|
853
|
+
model:
|
|
854
|
+
class_path: rslearn.models.singletask.SingleTaskModel
|
|
855
|
+
init_args:
|
|
856
|
+
encoder:
|
|
857
|
+
- class_path: rslearn.models.simple_time_series.SimpleTimeSeries
|
|
858
|
+
init_args:
|
|
859
|
+
encoder:
|
|
860
|
+
class_path: rslearn.models.satlaspretrain.SatlasPretrain
|
|
861
|
+
init_args:
|
|
862
|
+
model_identifier: "Sentinel2_SwinB_SI_RGB"
|
|
863
|
+
image_channels: 3
|
|
864
|
+
decoder:
|
|
865
|
+
# ...
|
|
866
|
+
```
|
|
763
867
|
|
|
764
|
-
|
|
868
|
+
Next, we update the data module section so that the dataset loads the image time series
|
|
869
|
+
rather than a single image. The `load_all_layers` option tells the dataset to stack the
|
|
870
|
+
rasters from all of the layers specified, and also to ignore windows where any of those
|
|
871
|
+
layers are missing.
|
|
872
|
+
|
|
873
|
+
```yaml
|
|
874
|
+
data:
|
|
875
|
+
class_path: rslearn.train.data_module.RslearnDataModule
|
|
876
|
+
init_args:
|
|
877
|
+
path: # ...
|
|
878
|
+
inputs:
|
|
879
|
+
image:
|
|
880
|
+
data_type: "raster"
|
|
881
|
+
layers: ["sentinel2_multi", "sentinel2_multi.1", "sentinel2_multi.2"]
|
|
882
|
+
bands: ["R", "G", "B"]
|
|
883
|
+
passthrough: true
|
|
884
|
+
load_all_layers: true
|
|
885
|
+
targets:
|
|
886
|
+
# ...
|
|
887
|
+
```
|
|
888
|
+
|
|
889
|
+
Now we can train an updated model:
|
|
890
|
+
|
|
891
|
+
```
|
|
892
|
+
rslearn model fit --config land_cover_model.yaml
|
|
893
|
+
```
|
|
765
894
|
|
|
766
895
|
|
|
767
896
|
Contact
|
|
@@ -175,10 +175,10 @@ that they align with the windows we have previously defined (and the Sentinel-2
|
|
|
175
175
|
we have already ingested). We can use the LocalFiles data source to have rslearn
|
|
176
176
|
automate this process. Update the dataset `config.json` with a new layer:
|
|
177
177
|
|
|
178
|
-
```
|
|
178
|
+
```jsonc
|
|
179
179
|
"layers": {
|
|
180
180
|
"sentinel2": {
|
|
181
|
-
...
|
|
181
|
+
# ...
|
|
182
182
|
},
|
|
183
183
|
"worldcover": {
|
|
184
184
|
"type": "raster",
|
|
@@ -193,7 +193,7 @@ automate this process. Update the dataset `config.json` with a new layer:
|
|
|
193
193
|
}
|
|
194
194
|
}
|
|
195
195
|
},
|
|
196
|
-
...
|
|
196
|
+
# ...
|
|
197
197
|
```
|
|
198
198
|
|
|
199
199
|
Repeat the materialize process so we populate the data for this new layer:
|
|
@@ -315,6 +315,7 @@ trainer:
|
|
|
315
315
|
save_last: true
|
|
316
316
|
monitor: val_accuracy
|
|
317
317
|
mode: max
|
|
318
|
+
dirpath: ./land_cover_model_checkpoints/
|
|
318
319
|
```
|
|
319
320
|
|
|
320
321
|
Now we can train the model:
|
|
@@ -359,13 +360,13 @@ windows in the "predict" group, which is where we added the Portland window.
|
|
|
359
360
|
And it will be written in a new output_layer called "output". But we have to update the
|
|
360
361
|
dataset configuration so it specifies the layer:
|
|
361
362
|
|
|
362
|
-
```
|
|
363
|
+
```jsonc
|
|
363
364
|
"layers": {
|
|
364
365
|
"sentinel2": {
|
|
365
|
-
...
|
|
366
|
+
# ...
|
|
366
367
|
},
|
|
367
368
|
"worldcover": {
|
|
368
|
-
...
|
|
369
|
+
# ...
|
|
369
370
|
},
|
|
370
371
|
"output": {
|
|
371
372
|
"type": "raster",
|
|
@@ -382,7 +383,7 @@ Now we can apply the model:
|
|
|
382
383
|
```
|
|
383
384
|
# Find model checkpoint in lightning_logs dir.
|
|
384
385
|
ls lightning_logs/*/checkpoints/last.ckpt
|
|
385
|
-
rslearn model predict --config land_cover_model.yaml --ckpt_path
|
|
386
|
+
rslearn model predict --config land_cover_model.yaml --ckpt_path land_cover_model_checkpoints/last.ckpt
|
|
386
387
|
```
|
|
387
388
|
|
|
388
389
|
And visualize the Sentinel-2 image and output in qgis:
|
|
@@ -489,17 +490,144 @@ got 585 examples in split val
|
|
|
489
490
|
|
|
490
491
|
### Visualizing with `model test`
|
|
491
492
|
|
|
492
|
-
|
|
493
|
+
We can visualize the ground truth labels and model predictions in the test set using
|
|
494
|
+
the `model test` command:
|
|
495
|
+
|
|
496
|
+
```
|
|
497
|
+
mkdir ./vis
|
|
498
|
+
rslearn model test --config land_cover_model.yaml --ckpt_path land_cover_model_checkpoints/last.ckpt --model.init_args.visualize_dir=./vis/
|
|
499
|
+
```
|
|
500
|
+
|
|
501
|
+
This will produce PNGs in the vis directory. The visualizations are produced by the
|
|
502
|
+
`Task.visualize` function, so we could customize the visualization by subclassing
|
|
503
|
+
SegmentationTask and overriding the visualize function.
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
### Logging to Weights & Biases
|
|
507
|
+
|
|
508
|
+
We can log to W&B by setting the logger under trainer in the model configuration file:
|
|
509
|
+
|
|
510
|
+
```yaml
|
|
511
|
+
trainer:
|
|
512
|
+
# ...
|
|
513
|
+
logger:
|
|
514
|
+
class_path: lightning.pytorch.loggers.WandbLogger
|
|
515
|
+
init_args:
|
|
516
|
+
project: land_cover_model
|
|
517
|
+
name: version_00
|
|
518
|
+
```
|
|
519
|
+
|
|
520
|
+
Now, runs with this model configuration should show on W&B. For `model fit` runs,
|
|
521
|
+
the training and validation loss and accuracy metric will be logged. The accuracy
|
|
522
|
+
metric is provided by SegmentationTask, and additional metrics can be enabled by
|
|
523
|
+
passing the relevant init_args to the task, e.g. mean IoU and F1:
|
|
524
|
+
|
|
525
|
+
```yaml
|
|
526
|
+
class_path: rslearn.train.tasks.segmentation.SegmentationTask
|
|
527
|
+
init_args:
|
|
528
|
+
num_classes: 101
|
|
529
|
+
remap_values: [[0, 1], [0, 255]]
|
|
530
|
+
enable_miou_metric: true
|
|
531
|
+
enable_f1_metric: true
|
|
532
|
+
```
|
|
493
533
|
|
|
494
534
|
|
|
495
535
|
### Inputting Multiple Sentinel-2 Images
|
|
496
536
|
|
|
497
|
-
|
|
537
|
+
Currently our model inputs a single Sentinel-2 image. However, for most tasks where
|
|
538
|
+
labels are not expected to change from week to week, we find that accuracy can be
|
|
539
|
+
significantly improved by inputting multiple images, regardless of the pre-trained
|
|
540
|
+
model used. Multiple images makes the model more resilient to clouds and image
|
|
541
|
+
artifacts, and allows the model to synthesize information across different views that
|
|
542
|
+
may come from different seasons or weather conditions.
|
|
498
543
|
|
|
544
|
+
We first update our dataset configuration to obtain three images, by customizing the
|
|
545
|
+
query_config section. This can replace the sentinel2 layer:
|
|
499
546
|
|
|
500
|
-
|
|
547
|
+
```jsonc
|
|
548
|
+
"layers": {
|
|
549
|
+
"sentinel2_multi": {
|
|
550
|
+
"type": "raster",
|
|
551
|
+
"band_sets": [{
|
|
552
|
+
"dtype": "uint8",
|
|
553
|
+
"bands": ["R", "G", "B"]
|
|
554
|
+
}],
|
|
555
|
+
"data_source": {
|
|
556
|
+
"name": "rslearn.data_sources.gcp_public_data.Sentinel2",
|
|
557
|
+
"index_cache_dir": "cache/sentinel2/",
|
|
558
|
+
"sort_by": "cloud_cover",
|
|
559
|
+
"use_rtree_index": false,
|
|
560
|
+
"query_config": {
|
|
561
|
+
"max_matches": 3
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
},
|
|
565
|
+
"worldcover": {
|
|
566
|
+
# ...
|
|
567
|
+
},
|
|
568
|
+
"output": {
|
|
569
|
+
# ...
|
|
570
|
+
}
|
|
571
|
+
}
|
|
572
|
+
```
|
|
573
|
+
|
|
574
|
+
Repeat the steps from earlier to prepare, ingest, and materialize the dataset.
|
|
575
|
+
|
|
576
|
+
Now we update our model configuration file. First, we modify the model architecture to
|
|
577
|
+
be able to input an image time series. We use the SimpleTimeSeries model, which takes
|
|
578
|
+
an encoder that expects a single-image input, and applies that encoder on each image in
|
|
579
|
+
the time series. It then applies max temporal pooling to combine the per-image feature
|
|
580
|
+
maps extracted by the encoder.
|
|
581
|
+
|
|
582
|
+
Image time series in rslearn are currently stored as [T*C, H, W] tensors. So we pass
|
|
583
|
+
the `image_channels` to SimpleTimeSeries so it knows how to slice up the tensor to
|
|
584
|
+
recover the per-timestep images.
|
|
585
|
+
|
|
586
|
+
```yaml
|
|
587
|
+
model:
|
|
588
|
+
class_path: rslearn.train.lightning_module.RslearnLightningModule
|
|
589
|
+
init_args:
|
|
590
|
+
model:
|
|
591
|
+
class_path: rslearn.models.singletask.SingleTaskModel
|
|
592
|
+
init_args:
|
|
593
|
+
encoder:
|
|
594
|
+
- class_path: rslearn.models.simple_time_series.SimpleTimeSeries
|
|
595
|
+
init_args:
|
|
596
|
+
encoder:
|
|
597
|
+
class_path: rslearn.models.satlaspretrain.SatlasPretrain
|
|
598
|
+
init_args:
|
|
599
|
+
model_identifier: "Sentinel2_SwinB_SI_RGB"
|
|
600
|
+
image_channels: 3
|
|
601
|
+
decoder:
|
|
602
|
+
# ...
|
|
603
|
+
```
|
|
501
604
|
|
|
502
|
-
|
|
605
|
+
Next, we update the data module section so that the dataset loads the image time series
|
|
606
|
+
rather than a single image. The `load_all_layers` option tells the dataset to stack the
|
|
607
|
+
rasters from all of the layers specified, and also to ignore windows where any of those
|
|
608
|
+
layers are missing.
|
|
609
|
+
|
|
610
|
+
```yaml
|
|
611
|
+
data:
|
|
612
|
+
class_path: rslearn.train.data_module.RslearnDataModule
|
|
613
|
+
init_args:
|
|
614
|
+
path: # ...
|
|
615
|
+
inputs:
|
|
616
|
+
image:
|
|
617
|
+
data_type: "raster"
|
|
618
|
+
layers: ["sentinel2_multi", "sentinel2_multi.1", "sentinel2_multi.2"]
|
|
619
|
+
bands: ["R", "G", "B"]
|
|
620
|
+
passthrough: true
|
|
621
|
+
load_all_layers: true
|
|
622
|
+
targets:
|
|
623
|
+
# ...
|
|
624
|
+
```
|
|
625
|
+
|
|
626
|
+
Now we can train an updated model:
|
|
627
|
+
|
|
628
|
+
```
|
|
629
|
+
rslearn model fit --config land_cover_model.yaml
|
|
630
|
+
```
|
|
503
631
|
|
|
504
632
|
|
|
505
633
|
Contact
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "rslearn"
|
|
3
|
-
version = "0.0.
|
|
3
|
+
version = "0.0.8"
|
|
4
4
|
description = "A library for developing remote sensing datasets and models"
|
|
5
5
|
authors = [
|
|
6
6
|
{ name = "OlmoEarth Team" },
|
|
@@ -12,9 +12,7 @@ dependencies = [
|
|
|
12
12
|
"boto3>=1.39",
|
|
13
13
|
"class_registry>=2.1",
|
|
14
14
|
"fiona>=1.10",
|
|
15
|
-
#
|
|
16
|
-
# this issue: https://github.com/fsspec/gcsfs/issues/696
|
|
17
|
-
"fsspec==2025.3.0",
|
|
15
|
+
"fsspec>=2025.9.0", # this is used both directly and indirectly (via universal_pathlib) in our code
|
|
18
16
|
"jsonargparse>=4.35.0",
|
|
19
17
|
"lightning>=2.5.1.post0",
|
|
20
18
|
"Pillow>=11.3",
|
|
@@ -37,9 +35,12 @@ extra = [
|
|
|
37
35
|
"earthdaily[platform]>=1.0.7",
|
|
38
36
|
"earthengine-api>=1.6.3",
|
|
39
37
|
"einops>=0.8",
|
|
40
|
-
|
|
38
|
+
# https://github.com/fsspec/universal_pathlib?tab=readme-ov-file#adding-universal_pathlib-to-your-project
|
|
39
|
+
# https://github.com/fsspec/filesystem_spec?tab=readme-ov-file#install
|
|
40
|
+
"fsspec[gcs, s3]", # for both direct use via fsspec and indirect use via universal_pathlib, docs suggest enabling specific backends like this
|
|
41
41
|
"google-cloud-bigquery>=3.35",
|
|
42
42
|
"google-cloud-storage>=2.18",
|
|
43
|
+
"huggingface_hub>=0.34.4",
|
|
43
44
|
"netCDF4>=1.7.2",
|
|
44
45
|
"osmium>=4.0.2",
|
|
45
46
|
"planet>=3.1",
|
|
@@ -47,12 +48,12 @@ extra = [
|
|
|
47
48
|
"pycocotools>=2.0",
|
|
48
49
|
"pystac_client>=0.9",
|
|
49
50
|
"rtree>=1.4",
|
|
50
|
-
"s3fs==2025.3.0",
|
|
51
51
|
"satlaspretrain_models>=0.3",
|
|
52
52
|
"scipy>=1.16",
|
|
53
53
|
"terratorch>=1.0.2",
|
|
54
54
|
"transformers>=4.55",
|
|
55
55
|
"wandb>=0.21",
|
|
56
|
+
"timm>=0.9.7",
|
|
56
57
|
]
|
|
57
58
|
|
|
58
59
|
dev = [
|
|
@@ -83,6 +84,8 @@ include = ["rslearn*"]
|
|
|
83
84
|
|
|
84
85
|
[tool.setuptools.package-data]
|
|
85
86
|
rslearn = ["py.typed"]
|
|
87
|
+
"rslearn.models.clay.configs" = ["metadata.yaml"]
|
|
88
|
+
"rslearn.models.panopticon_data.sensors" = ["*.yaml"]
|
|
86
89
|
|
|
87
90
|
[tool.ruff]
|
|
88
91
|
fix = true
|
|
@@ -0,0 +1,130 @@
|
|
|
1
|
+
"""This module contains dataclasses for summarizing the results of dataset operations.
|
|
2
|
+
|
|
3
|
+
They can be used by callers to emit telemetry / logs, or discarded.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class LayerPrepareSummary:
|
|
11
|
+
"""Results for preparing a single layer."""
|
|
12
|
+
|
|
13
|
+
# Identity
|
|
14
|
+
layer_name: str
|
|
15
|
+
data_source_name: str
|
|
16
|
+
|
|
17
|
+
# Timing
|
|
18
|
+
duration_seconds: float
|
|
19
|
+
|
|
20
|
+
# Counts
|
|
21
|
+
windows_prepared: int
|
|
22
|
+
windows_skipped: int
|
|
23
|
+
get_items_attempts: int
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
@dataclass
|
|
27
|
+
class PrepareDatasetWindowsSummary:
|
|
28
|
+
"""Results from prepare_dataset_windows operation for telemetry purposes."""
|
|
29
|
+
|
|
30
|
+
# Timing
|
|
31
|
+
duration_seconds: float
|
|
32
|
+
|
|
33
|
+
# Counts
|
|
34
|
+
total_windows_requested: int
|
|
35
|
+
|
|
36
|
+
# Per-layer summaries
|
|
37
|
+
layer_summaries: list[LayerPrepareSummary]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class IngestCounts:
|
|
42
|
+
"""Known ingestion counts."""
|
|
43
|
+
|
|
44
|
+
items_ingested: int
|
|
45
|
+
geometries_ingested: int
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class UnknownIngestCounts:
|
|
50
|
+
"""Indicates ingestion counts are unknown due to partial failure."""
|
|
51
|
+
|
|
52
|
+
items_attempted: int
|
|
53
|
+
geometries_attempted: int
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class LayerIngestSummary:
|
|
58
|
+
"""Results for ingesting a single layer."""
|
|
59
|
+
|
|
60
|
+
# Identity
|
|
61
|
+
layer_name: str
|
|
62
|
+
data_source_name: str
|
|
63
|
+
|
|
64
|
+
# Timing
|
|
65
|
+
duration_seconds: float
|
|
66
|
+
|
|
67
|
+
# Counts - either known or unknown
|
|
68
|
+
ingest_counts: IngestCounts | UnknownIngestCounts
|
|
69
|
+
ingest_attempts: int
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
@dataclass
|
|
73
|
+
class IngestDatasetJobsSummary:
|
|
74
|
+
"""Results from ingesting a set of jobs; for telemetry purposes."""
|
|
75
|
+
|
|
76
|
+
# Timing
|
|
77
|
+
duration_seconds: float
|
|
78
|
+
|
|
79
|
+
# Counts
|
|
80
|
+
num_jobs: int
|
|
81
|
+
|
|
82
|
+
# Per-layer summaries
|
|
83
|
+
layer_summaries: list[LayerIngestSummary]
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
@dataclass
|
|
87
|
+
class MaterializeWindowLayerSummary:
|
|
88
|
+
"""Results for materializing a single window layer."""
|
|
89
|
+
|
|
90
|
+
skipped: bool
|
|
91
|
+
materialize_attempts: int
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
@dataclass
|
|
95
|
+
class MaterializeWindowLayersSummary:
|
|
96
|
+
"""Results for materialize a given layer for all windows in a materialize call."""
|
|
97
|
+
|
|
98
|
+
# Identity
|
|
99
|
+
layer_name: str
|
|
100
|
+
data_source_name: str
|
|
101
|
+
|
|
102
|
+
# Timing
|
|
103
|
+
duration_seconds: float
|
|
104
|
+
|
|
105
|
+
# Counts
|
|
106
|
+
total_windows_requested: int
|
|
107
|
+
num_windows_materialized: int
|
|
108
|
+
materialize_attempts: int
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
@dataclass
|
|
112
|
+
class MaterializeDatasetWindowsSummary:
|
|
113
|
+
"""Results from materialize_dataset_windows operation for telemetry purposes."""
|
|
114
|
+
|
|
115
|
+
# Timing
|
|
116
|
+
duration_seconds: float
|
|
117
|
+
|
|
118
|
+
# Counts
|
|
119
|
+
total_windows_requested: int
|
|
120
|
+
|
|
121
|
+
# Per-layer summaries
|
|
122
|
+
layer_summaries: list[MaterializeWindowLayersSummary]
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
@dataclass
|
|
126
|
+
class ErrorOutcome:
|
|
127
|
+
"""TBD what goes in here, if anything."""
|
|
128
|
+
|
|
129
|
+
# Timing
|
|
130
|
+
duration_seconds: float
|