rslearn 0.0.6__tar.gz → 0.0.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. {rslearn-0.0.6/rslearn.egg-info → rslearn-0.0.7}/PKG-INFO +145 -15
  2. {rslearn-0.0.6 → rslearn-0.0.7}/README.md +139 -11
  3. {rslearn-0.0.6 → rslearn-0.0.7}/pyproject.toml +8 -6
  4. rslearn-0.0.7/rslearn/models/anysat.py +207 -0
  5. rslearn-0.0.7/rslearn/models/clay/clay.py +204 -0
  6. rslearn-0.0.7/rslearn/models/clay/configs/metadata.yaml +295 -0
  7. rslearn-0.0.7/rslearn/models/galileo/__init__.py +5 -0
  8. rslearn-0.0.7/rslearn/models/galileo/galileo.py +517 -0
  9. rslearn-0.0.7/rslearn/models/galileo/single_file_galileo.py +1672 -0
  10. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/drone.yaml +32 -0
  11. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/enmap.yaml +904 -0
  12. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/goes.yaml +9 -0
  13. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/himawari.yaml +9 -0
  14. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/intuition.yaml +606 -0
  15. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/landsat8.yaml +84 -0
  16. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/modis_terra.yaml +99 -0
  17. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/qb2_ge1.yaml +34 -0
  18. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/sentinel1.yaml +85 -0
  19. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/sentinel2.yaml +97 -0
  20. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/superdove.yaml +60 -0
  21. rslearn-0.0.7/rslearn/models/panopticon_data/sensors/wv23.yaml +63 -0
  22. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/presto/presto.py +10 -7
  23. rslearn-0.0.7/rslearn/models/prithvi.py +1046 -0
  24. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/unet.py +17 -11
  25. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/geometry.py +61 -1
  26. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/vector_format.py +13 -10
  27. {rslearn-0.0.6 → rslearn-0.0.7/rslearn.egg-info}/PKG-INFO +145 -15
  28. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn.egg-info/SOURCES.txt +19 -0
  29. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn.egg-info/requires.txt +5 -3
  30. {rslearn-0.0.6 → rslearn-0.0.7}/LICENSE +0 -0
  31. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/__init__.py +0 -0
  32. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/arg_parser.py +0 -0
  33. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/config/__init__.py +0 -0
  34. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/config/dataset.py +0 -0
  35. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/const.py +0 -0
  36. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/__init__.py +0 -0
  37. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/aws_landsat.py +0 -0
  38. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/aws_open_data.py +0 -0
  39. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/aws_sentinel1.py +0 -0
  40. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/climate_data_store.py +0 -0
  41. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/copernicus.py +0 -0
  42. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/data_source.py +0 -0
  43. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/earthdaily.py +0 -0
  44. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/earthdata_srtm.py +0 -0
  45. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/eurocrops.py +0 -0
  46. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/gcp_public_data.py +0 -0
  47. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/geotiff.py +0 -0
  48. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/google_earth_engine.py +0 -0
  49. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/local_files.py +0 -0
  50. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/openstreetmap.py +0 -0
  51. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/planet.py +0 -0
  52. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/planet_basemap.py +0 -0
  53. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/planetary_computer.py +0 -0
  54. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/raster_source.py +0 -0
  55. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/usda_cdl.py +0 -0
  56. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/usgs_landsat.py +0 -0
  57. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/utils.py +0 -0
  58. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/vector_source.py +0 -0
  59. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/worldcereal.py +0 -0
  60. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/worldcover.py +0 -0
  61. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/worldpop.py +0 -0
  62. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/data_sources/xyz_tiles.py +0 -0
  63. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/dataset/__init__.py +0 -0
  64. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/dataset/add_windows.py +0 -0
  65. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/dataset/dataset.py +0 -0
  66. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/dataset/index.py +0 -0
  67. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/dataset/manage.py +0 -0
  68. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/dataset/materialize.py +0 -0
  69. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/dataset/remap.py +0 -0
  70. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/dataset/window.py +0 -0
  71. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/log_utils.py +0 -0
  72. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/main.py +0 -0
  73. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/__init__.py +0 -0
  74. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/clip.py +0 -0
  75. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/conv.py +0 -0
  76. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/copernicusfm.py +0 -0
  77. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/copernicusfm_src/__init__.py +0 -0
  78. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/copernicusfm_src/aurora/area.py +0 -0
  79. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/copernicusfm_src/aurora/fourier.py +0 -0
  80. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/copernicusfm_src/dynamic_hypernetwork.py +0 -0
  81. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/copernicusfm_src/flexivit/patch_embed.py +0 -0
  82. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/copernicusfm_src/flexivit/utils.py +0 -0
  83. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/copernicusfm_src/model_vit.py +0 -0
  84. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/copernicusfm_src/util/pos_embed.py +0 -0
  85. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/croma.py +0 -0
  86. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/detr/__init__.py +0 -0
  87. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/detr/box_ops.py +0 -0
  88. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/detr/detr.py +0 -0
  89. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/detr/matcher.py +0 -0
  90. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/detr/position_encoding.py +0 -0
  91. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/detr/transformer.py +0 -0
  92. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/detr/util.py +0 -0
  93. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/faster_rcnn.py +0 -0
  94. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/fpn.py +0 -0
  95. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/module_wrapper.py +0 -0
  96. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/molmo.py +0 -0
  97. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/multitask.py +0 -0
  98. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/panopticon.py +0 -0
  99. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/pick_features.py +0 -0
  100. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/pooling_decoder.py +0 -0
  101. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/presto/__init__.py +0 -0
  102. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/presto/single_file_presto.py +0 -0
  103. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/registry.py +0 -0
  104. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/sam2_enc.py +0 -0
  105. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/satlaspretrain.py +0 -0
  106. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/simple_time_series.py +0 -0
  107. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/singletask.py +0 -0
  108. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/ssl4eo_s12.py +0 -0
  109. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/swin.py +0 -0
  110. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/task_embedding.py +0 -0
  111. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/terramind.py +0 -0
  112. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/trunk.py +0 -0
  113. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/upsample.py +0 -0
  114. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/models/use_croma.py +0 -0
  115. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/py.typed +0 -0
  116. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/template_params.py +0 -0
  117. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/tile_stores/__init__.py +0 -0
  118. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/tile_stores/default.py +0 -0
  119. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/tile_stores/tile_store.py +0 -0
  120. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/__init__.py +0 -0
  121. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/callbacks/__init__.py +0 -0
  122. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/callbacks/adapters.py +0 -0
  123. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/callbacks/freeze_unfreeze.py +0 -0
  124. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/callbacks/gradients.py +0 -0
  125. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/callbacks/peft.py +0 -0
  126. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/data_module.py +0 -0
  127. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/dataset.py +0 -0
  128. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/lightning_module.py +0 -0
  129. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/optimizer.py +0 -0
  130. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/prediction_writer.py +0 -0
  131. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/scheduler.py +0 -0
  132. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/tasks/__init__.py +0 -0
  133. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/tasks/classification.py +0 -0
  134. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/tasks/detection.py +0 -0
  135. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/tasks/multi_task.py +0 -0
  136. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/tasks/per_pixel_regression.py +0 -0
  137. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/tasks/regression.py +0 -0
  138. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/tasks/segmentation.py +0 -0
  139. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/tasks/task.py +0 -0
  140. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/transforms/__init__.py +0 -0
  141. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/transforms/concatenate.py +0 -0
  142. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/transforms/crop.py +0 -0
  143. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/transforms/flip.py +0 -0
  144. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/transforms/mask.py +0 -0
  145. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/transforms/normalize.py +0 -0
  146. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/transforms/pad.py +0 -0
  147. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/train/transforms/transform.py +0 -0
  148. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/__init__.py +0 -0
  149. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/array.py +0 -0
  150. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/feature.py +0 -0
  151. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/fsspec.py +0 -0
  152. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/get_utm_ups_crs.py +0 -0
  153. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/grid_index.py +0 -0
  154. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/jsonargparse.py +0 -0
  155. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/mp.py +0 -0
  156. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/raster_format.py +0 -0
  157. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/rtree_index.py +0 -0
  158. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/spatial_index.py +0 -0
  159. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/sqlite_index.py +0 -0
  160. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn/utils/time.py +0 -0
  161. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn.egg-info/dependency_links.txt +0 -0
  162. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn.egg-info/entry_points.txt +0 -0
  163. {rslearn-0.0.6 → rslearn-0.0.7}/rslearn.egg-info/top_level.txt +0 -0
  164. {rslearn-0.0.6 → rslearn-0.0.7}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rslearn
3
- Version: 0.0.6
3
+ Version: 0.0.7
4
4
  Summary: A library for developing remote sensing datasets and models
5
5
  Author: OlmoEarth Team
6
6
  License: Apache License
@@ -214,7 +214,7 @@ License-File: LICENSE
214
214
  Requires-Dist: boto3>=1.39
215
215
  Requires-Dist: class_registry>=2.1
216
216
  Requires-Dist: fiona>=1.10
217
- Requires-Dist: fsspec==2025.3.0
217
+ Requires-Dist: fsspec>=2025.9.0
218
218
  Requires-Dist: jsonargparse>=4.35.0
219
219
  Requires-Dist: lightning>=2.5.1.post0
220
220
  Requires-Dist: Pillow>=11.3
@@ -233,9 +233,10 @@ Requires-Dist: cdsapi>=0.7.6; extra == "extra"
233
233
  Requires-Dist: earthdaily[platform]>=1.0.7; extra == "extra"
234
234
  Requires-Dist: earthengine-api>=1.6.3; extra == "extra"
235
235
  Requires-Dist: einops>=0.8; extra == "extra"
236
- Requires-Dist: gcsfs==2025.3.0; extra == "extra"
236
+ Requires-Dist: gcsfs>=2025.9.0; extra == "extra"
237
237
  Requires-Dist: google-cloud-bigquery>=3.35; extra == "extra"
238
238
  Requires-Dist: google-cloud-storage>=2.18; extra == "extra"
239
+ Requires-Dist: huggingface_hub>=0.34.4; extra == "extra"
239
240
  Requires-Dist: netCDF4>=1.7.2; extra == "extra"
240
241
  Requires-Dist: osmium>=4.0.2; extra == "extra"
241
242
  Requires-Dist: planet>=3.1; extra == "extra"
@@ -243,12 +244,13 @@ Requires-Dist: planetary_computer>=1.0; extra == "extra"
243
244
  Requires-Dist: pycocotools>=2.0; extra == "extra"
244
245
  Requires-Dist: pystac_client>=0.9; extra == "extra"
245
246
  Requires-Dist: rtree>=1.4; extra == "extra"
246
- Requires-Dist: s3fs==2025.3.0; extra == "extra"
247
+ Requires-Dist: s3fs>=2025.9.0; extra == "extra"
247
248
  Requires-Dist: satlaspretrain_models>=0.3; extra == "extra"
248
249
  Requires-Dist: scipy>=1.16; extra == "extra"
249
250
  Requires-Dist: terratorch>=1.0.2; extra == "extra"
250
251
  Requires-Dist: transformers>=4.55; extra == "extra"
251
252
  Requires-Dist: wandb>=0.21; extra == "extra"
253
+ Requires-Dist: timm>=0.9.7; extra == "extra"
252
254
  Provides-Extra: dev
253
255
  Requires-Dist: interrogate>=1.7.0; extra == "dev"
254
256
  Requires-Dist: mypy<2,>=1.17.1; extra == "dev"
@@ -437,10 +439,10 @@ that they align with the windows we have previously defined (and the Sentinel-2
437
439
  we have already ingested). We can use the LocalFiles data source to have rslearn
438
440
  automate this process. Update the dataset `config.json` with a new layer:
439
441
 
440
- ```json
442
+ ```jsonc
441
443
  "layers": {
442
444
  "sentinel2": {
443
- ...
445
+ # ...
444
446
  },
445
447
  "worldcover": {
446
448
  "type": "raster",
@@ -455,7 +457,7 @@ automate this process. Update the dataset `config.json` with a new layer:
455
457
  }
456
458
  }
457
459
  },
458
- ...
460
+ # ...
459
461
  ```
460
462
 
461
463
  Repeat the materialize process so we populate the data for this new layer:
@@ -577,6 +579,7 @@ trainer:
577
579
  save_last: true
578
580
  monitor: val_accuracy
579
581
  mode: max
582
+ dirpath: ./land_cover_model_checkpoints/
580
583
  ```
581
584
 
582
585
  Now we can train the model:
@@ -621,13 +624,13 @@ windows in the "predict" group, which is where we added the Portland window.
621
624
  And it will be written in a new output_layer called "output". But we have to update the
622
625
  dataset configuration so it specifies the layer:
623
626
 
624
- ```json
627
+ ```jsonc
625
628
  "layers": {
626
629
  "sentinel2": {
627
- ...
630
+ # ...
628
631
  },
629
632
  "worldcover": {
630
- ...
633
+ # ...
631
634
  },
632
635
  "output": {
633
636
  "type": "raster",
@@ -644,7 +647,7 @@ Now we can apply the model:
644
647
  ```
645
648
  # Find model checkpoint in lightning_logs dir.
646
649
  ls lightning_logs/*/checkpoints/last.ckpt
647
- rslearn model predict --config land_cover_model.yaml --ckpt_path lightning_logs/version_0/checkpoints/last.ckpt
650
+ rslearn model predict --config land_cover_model.yaml --ckpt_path land_cover_model_checkpoints/last.ckpt
648
651
  ```
649
652
 
650
653
  And visualize the Sentinel-2 image and output in qgis:
@@ -751,17 +754,144 @@ got 585 examples in split val
751
754
 
752
755
  ### Visualizing with `model test`
753
756
 
754
- Coming soon
757
+ We can visualize the ground truth labels and model predictions in the test set using
758
+ the `model test` command:
759
+
760
+ ```
761
+ mkdir ./vis
762
+ rslearn model test --config land_cover_model.yaml --ckpt_path land_cover_model_checkpoints/last.ckpt --model.init_args.visualize_dir=./vis/
763
+ ```
764
+
765
+ This will produce PNGs in the vis directory. The visualizations are produced by the
766
+ `Task.visualize` function, so we could customize the visualization by subclassing
767
+ SegmentationTask and overriding the visualize function.
768
+
769
+
770
+ ### Logging to Weights & Biases
771
+
772
+ We can log to W&B by setting the logger under trainer in the model configuration file:
773
+
774
+ ```yaml
775
+ trainer:
776
+ # ...
777
+ logger:
778
+ class_path: lightning.pytorch.loggers.WandbLogger
779
+ init_args:
780
+ project: land_cover_model
781
+ name: version_00
782
+ ```
783
+
784
+ Now, runs with this model configuration should show on W&B. For `model fit` runs,
785
+ the training and validation loss and accuracy metric will be logged. The accuracy
786
+ metric is provided by SegmentationTask, and additional metrics can be enabled by
787
+ passing the relevant init_args to the task, e.g. mean IoU and F1:
788
+
789
+ ```yaml
790
+ class_path: rslearn.train.tasks.segmentation.SegmentationTask
791
+ init_args:
792
+ num_classes: 101
793
+ remap_values: [[0, 1], [0, 255]]
794
+ enable_miou_metric: true
795
+ enable_f1_metric: true
796
+ ```
755
797
 
756
798
 
757
799
  ### Inputting Multiple Sentinel-2 Images
758
800
 
759
- Coming soon
801
+ Currently our model inputs a single Sentinel-2 image. However, for most tasks where
802
+ labels are not expected to change from week to week, we find that accuracy can be
803
+ significantly improved by inputting multiple images, regardless of the pre-trained
804
+ model used. Multiple images makes the model more resilient to clouds and image
805
+ artifacts, and allows the model to synthesize information across different views that
806
+ may come from different seasons or weather conditions.
760
807
 
808
+ We first update our dataset configuration to obtain three images, by customizing the
809
+ query_config section. This can replace the sentinel2 layer:
761
810
 
762
- ### Logging to Weights & Biases
811
+ ```jsonc
812
+ "layers": {
813
+ "sentinel2_multi": {
814
+ "type": "raster",
815
+ "band_sets": [{
816
+ "dtype": "uint8",
817
+ "bands": ["R", "G", "B"]
818
+ }],
819
+ "data_source": {
820
+ "name": "rslearn.data_sources.gcp_public_data.Sentinel2",
821
+ "index_cache_dir": "cache/sentinel2/",
822
+ "sort_by": "cloud_cover",
823
+ "use_rtree_index": false,
824
+ "query_config": {
825
+ "max_matches": 3
826
+ }
827
+ }
828
+ },
829
+ "worldcover": {
830
+ # ...
831
+ },
832
+ "output": {
833
+ # ...
834
+ }
835
+ }
836
+ ```
837
+
838
+ Repeat the steps from earlier to prepare, ingest, and materialize the dataset.
839
+
840
+ Now we update our model configuration file. First, we modify the model architecture to
841
+ be able to input an image time series. We use the SimpleTimeSeries model, which takes
842
+ an encoder that expects a single-image input, and applies that encoder on each image in
843
+ the time series. It then applies max temporal pooling to combine the per-image feature
844
+ maps extracted by the encoder.
845
+
846
+ Image time series in rslearn are currently stored as [T*C, H, W] tensors. So we pass
847
+ the `image_channels` to SimpleTimeSeries so it knows how to slice up the tensor to
848
+ recover the per-timestep images.
849
+
850
+ ```yaml
851
+ model:
852
+ class_path: rslearn.train.lightning_module.RslearnLightningModule
853
+ init_args:
854
+ model:
855
+ class_path: rslearn.models.singletask.SingleTaskModel
856
+ init_args:
857
+ encoder:
858
+ - class_path: rslearn.models.simple_time_series.SimpleTimeSeries
859
+ init_args:
860
+ encoder:
861
+ class_path: rslearn.models.satlaspretrain.SatlasPretrain
862
+ init_args:
863
+ model_identifier: "Sentinel2_SwinB_SI_RGB"
864
+ image_channels: 3
865
+ decoder:
866
+ # ...
867
+ ```
763
868
 
764
- Coming soon
869
+ Next, we update the data module section so that the dataset loads the image time series
870
+ rather than a single image. The `load_all_layers` option tells the dataset to stack the
871
+ rasters from all of the layers specified, and also to ignore windows where any of those
872
+ layers are missing.
873
+
874
+ ```yaml
875
+ data:
876
+ class_path: rslearn.train.data_module.RslearnDataModule
877
+ init_args:
878
+ path: # ...
879
+ inputs:
880
+ image:
881
+ data_type: "raster"
882
+ layers: ["sentinel2_multi", "sentinel2_multi.1", "sentinel2_multi.2"]
883
+ bands: ["R", "G", "B"]
884
+ passthrough: true
885
+ load_all_layers: true
886
+ targets:
887
+ # ...
888
+ ```
889
+
890
+ Now we can train an updated model:
891
+
892
+ ```
893
+ rslearn model fit --config land_cover_model.yaml
894
+ ```
765
895
 
766
896
 
767
897
  Contact
@@ -175,10 +175,10 @@ that they align with the windows we have previously defined (and the Sentinel-2
175
175
  we have already ingested). We can use the LocalFiles data source to have rslearn
176
176
  automate this process. Update the dataset `config.json` with a new layer:
177
177
 
178
- ```json
178
+ ```jsonc
179
179
  "layers": {
180
180
  "sentinel2": {
181
- ...
181
+ # ...
182
182
  },
183
183
  "worldcover": {
184
184
  "type": "raster",
@@ -193,7 +193,7 @@ automate this process. Update the dataset `config.json` with a new layer:
193
193
  }
194
194
  }
195
195
  },
196
- ...
196
+ # ...
197
197
  ```
198
198
 
199
199
  Repeat the materialize process so we populate the data for this new layer:
@@ -315,6 +315,7 @@ trainer:
315
315
  save_last: true
316
316
  monitor: val_accuracy
317
317
  mode: max
318
+ dirpath: ./land_cover_model_checkpoints/
318
319
  ```
319
320
 
320
321
  Now we can train the model:
@@ -359,13 +360,13 @@ windows in the "predict" group, which is where we added the Portland window.
359
360
  And it will be written in a new output_layer called "output". But we have to update the
360
361
  dataset configuration so it specifies the layer:
361
362
 
362
- ```json
363
+ ```jsonc
363
364
  "layers": {
364
365
  "sentinel2": {
365
- ...
366
+ # ...
366
367
  },
367
368
  "worldcover": {
368
- ...
369
+ # ...
369
370
  },
370
371
  "output": {
371
372
  "type": "raster",
@@ -382,7 +383,7 @@ Now we can apply the model:
382
383
  ```
383
384
  # Find model checkpoint in lightning_logs dir.
384
385
  ls lightning_logs/*/checkpoints/last.ckpt
385
- rslearn model predict --config land_cover_model.yaml --ckpt_path lightning_logs/version_0/checkpoints/last.ckpt
386
+ rslearn model predict --config land_cover_model.yaml --ckpt_path land_cover_model_checkpoints/last.ckpt
386
387
  ```
387
388
 
388
389
  And visualize the Sentinel-2 image and output in qgis:
@@ -489,17 +490,144 @@ got 585 examples in split val
489
490
 
490
491
  ### Visualizing with `model test`
491
492
 
492
- Coming soon
493
+ We can visualize the ground truth labels and model predictions in the test set using
494
+ the `model test` command:
495
+
496
+ ```
497
+ mkdir ./vis
498
+ rslearn model test --config land_cover_model.yaml --ckpt_path land_cover_model_checkpoints/last.ckpt --model.init_args.visualize_dir=./vis/
499
+ ```
500
+
501
+ This will produce PNGs in the vis directory. The visualizations are produced by the
502
+ `Task.visualize` function, so we could customize the visualization by subclassing
503
+ SegmentationTask and overriding the visualize function.
504
+
505
+
506
+ ### Logging to Weights & Biases
507
+
508
+ We can log to W&B by setting the logger under trainer in the model configuration file:
509
+
510
+ ```yaml
511
+ trainer:
512
+ # ...
513
+ logger:
514
+ class_path: lightning.pytorch.loggers.WandbLogger
515
+ init_args:
516
+ project: land_cover_model
517
+ name: version_00
518
+ ```
519
+
520
+ Now, runs with this model configuration should show on W&B. For `model fit` runs,
521
+ the training and validation loss and accuracy metric will be logged. The accuracy
522
+ metric is provided by SegmentationTask, and additional metrics can be enabled by
523
+ passing the relevant init_args to the task, e.g. mean IoU and F1:
524
+
525
+ ```yaml
526
+ class_path: rslearn.train.tasks.segmentation.SegmentationTask
527
+ init_args:
528
+ num_classes: 101
529
+ remap_values: [[0, 1], [0, 255]]
530
+ enable_miou_metric: true
531
+ enable_f1_metric: true
532
+ ```
493
533
 
494
534
 
495
535
  ### Inputting Multiple Sentinel-2 Images
496
536
 
497
- Coming soon
537
+ Currently our model inputs a single Sentinel-2 image. However, for most tasks where
538
+ labels are not expected to change from week to week, we find that accuracy can be
539
+ significantly improved by inputting multiple images, regardless of the pre-trained
540
+ model used. Multiple images makes the model more resilient to clouds and image
541
+ artifacts, and allows the model to synthesize information across different views that
542
+ may come from different seasons or weather conditions.
498
543
 
544
+ We first update our dataset configuration to obtain three images, by customizing the
545
+ query_config section. This can replace the sentinel2 layer:
499
546
 
500
- ### Logging to Weights & Biases
547
+ ```jsonc
548
+ "layers": {
549
+ "sentinel2_multi": {
550
+ "type": "raster",
551
+ "band_sets": [{
552
+ "dtype": "uint8",
553
+ "bands": ["R", "G", "B"]
554
+ }],
555
+ "data_source": {
556
+ "name": "rslearn.data_sources.gcp_public_data.Sentinel2",
557
+ "index_cache_dir": "cache/sentinel2/",
558
+ "sort_by": "cloud_cover",
559
+ "use_rtree_index": false,
560
+ "query_config": {
561
+ "max_matches": 3
562
+ }
563
+ }
564
+ },
565
+ "worldcover": {
566
+ # ...
567
+ },
568
+ "output": {
569
+ # ...
570
+ }
571
+ }
572
+ ```
573
+
574
+ Repeat the steps from earlier to prepare, ingest, and materialize the dataset.
575
+
576
+ Now we update our model configuration file. First, we modify the model architecture to
577
+ be able to input an image time series. We use the SimpleTimeSeries model, which takes
578
+ an encoder that expects a single-image input, and applies that encoder on each image in
579
+ the time series. It then applies max temporal pooling to combine the per-image feature
580
+ maps extracted by the encoder.
581
+
582
+ Image time series in rslearn are currently stored as [T*C, H, W] tensors. So we pass
583
+ the `image_channels` to SimpleTimeSeries so it knows how to slice up the tensor to
584
+ recover the per-timestep images.
585
+
586
+ ```yaml
587
+ model:
588
+ class_path: rslearn.train.lightning_module.RslearnLightningModule
589
+ init_args:
590
+ model:
591
+ class_path: rslearn.models.singletask.SingleTaskModel
592
+ init_args:
593
+ encoder:
594
+ - class_path: rslearn.models.simple_time_series.SimpleTimeSeries
595
+ init_args:
596
+ encoder:
597
+ class_path: rslearn.models.satlaspretrain.SatlasPretrain
598
+ init_args:
599
+ model_identifier: "Sentinel2_SwinB_SI_RGB"
600
+ image_channels: 3
601
+ decoder:
602
+ # ...
603
+ ```
501
604
 
502
- Coming soon
605
+ Next, we update the data module section so that the dataset loads the image time series
606
+ rather than a single image. The `load_all_layers` option tells the dataset to stack the
607
+ rasters from all of the layers specified, and also to ignore windows where any of those
608
+ layers are missing.
609
+
610
+ ```yaml
611
+ data:
612
+ class_path: rslearn.train.data_module.RslearnDataModule
613
+ init_args:
614
+ path: # ...
615
+ inputs:
616
+ image:
617
+ data_type: "raster"
618
+ layers: ["sentinel2_multi", "sentinel2_multi.1", "sentinel2_multi.2"]
619
+ bands: ["R", "G", "B"]
620
+ passthrough: true
621
+ load_all_layers: true
622
+ targets:
623
+ # ...
624
+ ```
625
+
626
+ Now we can train an updated model:
627
+
628
+ ```
629
+ rslearn model fit --config land_cover_model.yaml
630
+ ```
503
631
 
504
632
 
505
633
  Contact
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "rslearn"
3
- version = "0.0.6"
3
+ version = "0.0.7"
4
4
  description = "A library for developing remote sensing datasets and models"
5
5
  authors = [
6
6
  { name = "OlmoEarth Team" },
@@ -12,9 +12,7 @@ dependencies = [
12
12
  "boto3>=1.39",
13
13
  "class_registry>=2.1",
14
14
  "fiona>=1.10",
15
- # Need this pin since 2025.7.0 has slow performance for exists/ls operations, see
16
- # this issue: https://github.com/fsspec/gcsfs/issues/696
17
- "fsspec==2025.3.0",
15
+ "fsspec>=2025.9.0",
18
16
  "jsonargparse>=4.35.0",
19
17
  "lightning>=2.5.1.post0",
20
18
  "Pillow>=11.3",
@@ -37,9 +35,10 @@ extra = [
37
35
  "earthdaily[platform]>=1.0.7",
38
36
  "earthengine-api>=1.6.3",
39
37
  "einops>=0.8",
40
- "gcsfs==2025.3.0",
38
+ "gcsfs>=2025.9.0",
41
39
  "google-cloud-bigquery>=3.35",
42
40
  "google-cloud-storage>=2.18",
41
+ "huggingface_hub>=0.34.4",
43
42
  "netCDF4>=1.7.2",
44
43
  "osmium>=4.0.2",
45
44
  "planet>=3.1",
@@ -47,12 +46,13 @@ extra = [
47
46
  "pycocotools>=2.0",
48
47
  "pystac_client>=0.9",
49
48
  "rtree>=1.4",
50
- "s3fs==2025.3.0",
49
+ "s3fs>=2025.9.0",
51
50
  "satlaspretrain_models>=0.3",
52
51
  "scipy>=1.16",
53
52
  "terratorch>=1.0.2",
54
53
  "transformers>=4.55",
55
54
  "wandb>=0.21",
55
+ "timm>=0.9.7",
56
56
  ]
57
57
 
58
58
  dev = [
@@ -83,6 +83,8 @@ include = ["rslearn*"]
83
83
 
84
84
  [tool.setuptools.package-data]
85
85
  rslearn = ["py.typed"]
86
+ "rslearn.models.clay.configs" = ["metadata.yaml"]
87
+ "rslearn.models.panopticon_data.sensors" = ["*.yaml"]
86
88
 
87
89
  [tool.ruff]
88
90
  fix = true