rslearn 0.0.11__py3-none-any.whl → 0.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rslearn/models/anysat.py CHANGED
@@ -1,4 +1,8 @@
1
- """AnySat model."""
1
+ """AnySat model.
2
+
3
+ This code loads the AnySat model from torch hub. See
4
+ https://github.com/gastruc/AnySat for applicable license and copyright information.
5
+ """
2
6
 
3
7
  from typing import Any
4
8
 
rslearn/models/dinov3.py CHANGED
@@ -1,4 +1,9 @@
1
- """DinoV3 model."""
1
+ """DinoV3 model.
2
+
3
+ This code loads the DINOv3 model. You must obtain the model separately from Meta to use
4
+ it. See https://github.com/facebookresearch/dinov3 for applicable license and copyright
5
+ information.
6
+ """
2
7
 
3
8
  from enum import StrEnum
4
9
  from pathlib import Path
@@ -0,0 +1,50 @@
1
+ """Apply center cropping on a feature map."""
2
+
3
+ from typing import Any
4
+
5
+ import torch
6
+
7
+
8
+ class FeatureCenterCrop(torch.nn.Module):
9
+ """Apply center cropping on the input feature maps."""
10
+
11
+ def __init__(
12
+ self,
13
+ sizes: list[tuple[int, int]],
14
+ ) -> None:
15
+ """Create a new FeatureCenterCrop.
16
+
17
+ Only the center of each feature map will be retained and passed to the next
18
+ module.
19
+
20
+ Args:
21
+ sizes: a list of (height, width) tuples, with one tuple for each input
22
+ feature map.
23
+ """
24
+ super().__init__()
25
+ self.sizes = sizes
26
+
27
+ def forward(
28
+ self, features: list[torch.Tensor], inputs: list[dict[str, Any]]
29
+ ) -> list[torch.Tensor]:
30
+ """Apply center cropping on the feature maps.
31
+
32
+ Args:
33
+ features: list of feature maps at different resolutions.
34
+ inputs: original inputs (ignored).
35
+
36
+ Returns:
37
+ center cropped feature maps.
38
+ """
39
+ new_features = []
40
+ for i, feat in enumerate(features):
41
+ height, width = self.sizes[i]
42
+ if feat.shape[2] < height or feat.shape[3] < width:
43
+ raise ValueError(
44
+ "feature map is smaller than the desired height and width"
45
+ )
46
+ start_h = feat.shape[2] // 2 - height // 2
47
+ start_w = feat.shape[3] // 2 - width // 2
48
+ feat = feat[:, :, start_h : start_h + height, start_w : start_w + width]
49
+ new_features.append(feat)
50
+ return new_features
@@ -9,6 +9,11 @@ from einops import rearrange
9
9
  from olmo_core.config import Config
10
10
  from olmo_core.distributed.checkpoint import load_model_and_optim_state
11
11
  from olmoearth_pretrain.data.constants import Modality
12
+ from olmoearth_pretrain.model_loader import (
13
+ ModelID,
14
+ load_model_from_id,
15
+ load_model_from_path,
16
+ )
12
17
  from olmoearth_pretrain.nn.flexihelios import Encoder, TokensAndMasks
13
18
  from olmoearth_pretrain.train.masking import MaskedOlmoEarthSample, MaskValue
14
19
  from upath import UPath
@@ -31,54 +36,114 @@ AUTOCAST_DTYPE_MAP = {
31
36
  "float32": torch.float32,
32
37
  }
33
38
 
39
+ EMBEDDING_SIZES = {
40
+ ModelID.OLMOEARTH_V1_NANO: 128,
41
+ ModelID.OLMOEARTH_V1_TINY: 192,
42
+ ModelID.OLMOEARTH_V1_BASE: 768,
43
+ }
44
+
34
45
 
35
46
  class OlmoEarth(torch.nn.Module):
36
47
  """A wrapper to support the OlmoEarth model."""
37
48
 
38
49
  def __init__(
39
50
  self,
40
- # TODO: we should accept model ID instead of checkpoint_path once we are closer
41
- # to being ready for release.
42
- checkpoint_path: str,
43
- selector: list[str | int] = [],
51
+ patch_size: int,
52
+ model_id: ModelID | None = None,
53
+ model_path: str | None = None,
54
+ checkpoint_path: str | None = None,
55
+ selector: list[str | int] = ["encoder"],
44
56
  forward_kwargs: dict[str, Any] = {},
45
57
  random_initialization: bool = False,
46
58
  embedding_size: int | None = None,
47
- patch_size: int | None = None,
48
59
  autocast_dtype: str | None = "bfloat16",
49
60
  ):
50
61
  """Create a new OlmoEarth model.
51
62
 
52
63
  Args:
53
- checkpoint_path: the checkpoint directory to load. It should contain
54
- config.json file as well as model_and_optim folder.
64
+ patch_size: token spatial patch size to use.
65
+ model_id: the model ID to load. One of model_id or model_path or checkpoint_path must be
66
+ set.
67
+ model_path: the path to load the model from. One of model_id or model_path or checkpoint_path must be
68
+ set. Same structure as the HF-hosted `model_id` models: bundle with a config.json and weights.pth.
69
+ checkpoint_path: the checkpoint directory to load from, if model_id or model_path is not
70
+ set. It should contain a distributed checkpoint with a config.json file as well as model_and_optim
71
+ folder.
55
72
  selector: an optional sequence of attribute names or list indices to select
56
- the sub-module that should be applied on the input images.
73
+ the sub-module that should be applied on the input images. Defaults to
74
+ ["encoder"] to select only the transformer encoder.
57
75
  forward_kwargs: additional arguments to pass to forward pass besides the
58
76
  MaskedOlmoEarthSample.
59
77
  random_initialization: whether to skip loading the checkpoint so the
60
78
  weights are randomly initialized. In this case, the checkpoint is only
61
79
  used to define the model architecture.
62
80
  embedding_size: optional embedding size to report via
63
- get_backbone_channels.
64
- patch_size: optional patch size to report via get_backbone_channels.
81
+ get_backbone_channels (if model_id is not set).
65
82
  autocast_dtype: which dtype to use for autocasting, or set None to disable.
66
83
  """
84
+ if (
85
+ sum(
86
+ [
87
+ model_id is not None,
88
+ model_path is not None,
89
+ checkpoint_path is not None,
90
+ ]
91
+ )
92
+ != 1
93
+ ):
94
+ raise ValueError(
95
+ "exactly one of model_id, model_path, or checkpoint_path must be set"
96
+ )
97
+
67
98
  super().__init__()
68
- _checkpoint_path = UPath(checkpoint_path)
99
+ self.patch_size = patch_size
69
100
  self.forward_kwargs = forward_kwargs
70
101
  self.embedding_size = embedding_size
71
- self.patch_size = patch_size
72
102
 
73
103
  if autocast_dtype is not None:
74
104
  self.autocast_dtype = AUTOCAST_DTYPE_MAP[autocast_dtype]
75
105
  else:
76
106
  self.autocast_dtype = None
77
107
 
108
+ if model_id is not None:
109
+ # Load from Hugging Face.
110
+ model = load_model_from_id(model_id, load_weights=not random_initialization)
111
+ if self.embedding_size is None and model_id in EMBEDDING_SIZES:
112
+ self.embedding_size = EMBEDDING_SIZES[model_id]
113
+
114
+ elif model_path is not None:
115
+ # Load from path.
116
+ model = load_model_from_path(
117
+ UPath(model_path), load_weights=not random_initialization
118
+ )
119
+
120
+ else:
121
+ # Load the distributed model checkpoint by path through Olmo Core
122
+ model = self._load_model_from_checkpoint(
123
+ UPath(checkpoint_path), random_initialization
124
+ )
125
+
126
+ # Select just the portion of the model that we actually want to use.
127
+ for part in selector:
128
+ if isinstance(part, str):
129
+ model = getattr(model, part)
130
+ else:
131
+ model = model[part]
132
+ self.model = model
133
+
134
+ def _load_model_from_checkpoint(
135
+ self, checkpoint_upath: UPath, random_initialization: bool
136
+ ) -> torch.nn.Module:
137
+ """Load the OlmoEarth pre-trained model from a distributed checkpoint folder.
138
+
139
+ The folder should contain config.json as well as the model_and_optim folder
140
+ that contains the distributed checkpoint. This is the format produced by
141
+ pre-training runs in olmoearth_pretrain.
142
+ """
78
143
  # Load the model config and initialize it.
79
144
  # We avoid loading the train module here because it depends on running within
80
145
  # olmo_core.
81
- with (_checkpoint_path / "config.json").open() as f:
146
+ with (checkpoint_upath / "config.json").open() as f:
82
147
  config_dict = json.load(f)
83
148
  model_config = Config.from_dict(config_dict["model"])
84
149
 
@@ -86,22 +151,14 @@ class OlmoEarth(torch.nn.Module):
86
151
 
87
152
  # Load the checkpoint.
88
153
  if not random_initialization:
89
- train_module_dir = _checkpoint_path / "model_and_optim"
154
+ train_module_dir = checkpoint_upath / "model_and_optim"
90
155
  if train_module_dir.exists():
91
156
  load_model_and_optim_state(str(train_module_dir), model)
92
157
  logger.info(f"loaded OlmoEarth encoder from {train_module_dir}")
93
158
  else:
94
159
  logger.info(f"could not find OlmoEarth encoder at {train_module_dir}")
95
- else:
96
- logger.info("skipping loading OlmoEarth encoder")
97
160
 
98
- # Select just the portion of the model that we actually want to use.
99
- for part in selector:
100
- if isinstance(part, str):
101
- model = getattr(model, part)
102
- else:
103
- model = model[part]
104
- self.model = model
161
+ return model
105
162
 
106
163
  def forward(self, inputs: list[dict[str, Any]]) -> list[torch.Tensor]:
107
164
  """Compute feature maps from the OlmoEarth backbone.
@@ -167,13 +224,16 @@ class OlmoEarth(torch.nn.Module):
167
224
  if isinstance(self.model, Encoder):
168
225
  # Encoder has a fast_pass argument to indicate mask is not needed.
169
226
  tokens_and_masks = self.model(
170
- sample, fast_pass=True, **self.forward_kwargs
227
+ sample,
228
+ fast_pass=True,
229
+ patch_size=self.patch_size,
230
+ **self.forward_kwargs,
171
231
  )["tokens_and_masks"]
172
232
  else:
173
233
  # Other models like STEncoder do not have this option supported.
174
- tokens_and_masks = self.model(sample, **self.forward_kwargs)[
175
- "tokens_and_masks"
176
- ]
234
+ tokens_and_masks = self.model(
235
+ sample, patch_size=self.patch_size, **self.forward_kwargs
236
+ )["tokens_and_masks"]
177
237
 
178
238
  # Apply temporal/modality pooling so we just have one feature per patch.
179
239
  features = []
rslearn/models/prithvi.py CHANGED
@@ -1,4 +1,12 @@
1
- """Prithvi V2."""
1
+ """Prithvi V2.
2
+
3
+ This code is adapted from https://github.com/NASA-IMPACT/Prithvi-WxC
4
+
5
+ The code is released under:
6
+
7
+ MIT License
8
+ Copyright (c) 2024 Inter Agency Implementation and Advanced Concepts
9
+ """
2
10
 
3
11
  import json
4
12
  import logging
@@ -94,7 +94,6 @@ class RslearnLightningModule(L.LightningModule):
94
94
  restore_config: RestoreConfig | None = None,
95
95
  print_parameters: bool = False,
96
96
  print_model: bool = False,
97
- strict_loading: bool = True,
98
97
  # Deprecated options.
99
98
  lr: float = 1e-3,
100
99
  plateau: bool = False,
@@ -118,7 +117,6 @@ class RslearnLightningModule(L.LightningModule):
118
117
  print_parameters: whether to print the list of model parameters after model
119
118
  initialization
120
119
  print_model: whether to print the model after model initialization
121
- strict_loading: whether to strictly load the model parameters.
122
120
  lr: deprecated.
123
121
  plateau: deprecated.
124
122
  plateau_factor: deprecated.
@@ -132,7 +130,6 @@ class RslearnLightningModule(L.LightningModule):
132
130
  self.visualize_dir = visualize_dir
133
131
  self.metrics_file = metrics_file
134
132
  self.restore_config = restore_config
135
- self.strict_loading = strict_loading
136
133
 
137
134
  self.scheduler_factory: SchedulerFactory | None = None
138
135
  if scheduler:
@@ -49,8 +49,8 @@ class ClassificationTask(BasicTask):
49
49
  features with matching properties.
50
50
  read_class_id: whether to read an integer class ID instead of the class
51
51
  name.
52
- allow_invalid: instead of throwing error when no regression label is found
53
- at a window, simply mark the example invalid for this task
52
+ allow_invalid: instead of throwing error when no classification label is
53
+ found at a window, simply mark the example invalid for this task
54
54
  skip_unknown_categories: whether to skip examples with categories that are
55
55
  not passed via classes, instead of throwing error
56
56
  prob_property: when predicting, write probabilities in addition to class ID
@@ -72,11 +72,11 @@ class DetectionTask(BasicTask):
72
72
  f1_metric_kwargs: dict[str, Any] = {},
73
73
  **kwargs: Any,
74
74
  ) -> None:
75
- """Initialize a new SegmentationTask.
75
+ """Initialize a new DetectionTask.
76
76
 
77
77
  Args:
78
- property_name: the property from which to extract the class name. The class
79
- is read from the first matching feature.
78
+ property_name: the property from which to extract the class name. Features
79
+ without this property name are ignored.
80
80
  classes: a list of class names.
81
81
  filters: optional list of (property_name, property_value) to only consider
82
82
  features with matching properties.
@@ -86,8 +86,8 @@ class DetectionTask(BasicTask):
86
86
  not passed via classes, instead of throwing error
87
87
  skip_empty_examples: whether to skip examples with zero labels.
88
88
  colors: optional colors for each class
89
- box_size: force all boxes to be this size, centered at the centroid of the
90
- geometry. Required for Point geometries.
89
+ box_size: force all boxes to be two times this size, centered at the
90
+ centroid of the geometry. Required for Point geometries.
91
91
  clip_boxes: whether to clip boxes to the image bounds.
92
92
  exclude_by_center: before optionally clipping boxes, exclude boxes if the
93
93
  center is outside the image bounds.
@@ -26,10 +26,11 @@ class PerPixelRegressionTask(BasicTask):
26
26
  """Initialize a new PerPixelRegressionTask.
27
27
 
28
28
  Args:
29
- scale_factor: multiply the label value by this factor before using it for
29
+ scale_factor: multiply ground truth values by this factor before using it for
30
30
  training.
31
- metric_mode: what metric to use, either mse or l1
32
- nodata_value: optional value to treat as invalid
31
+ metric_mode: what metric to use, either "mse" (default) or "l1"
32
+ nodata_value: optional value to treat as invalid. The loss will be masked
33
+ at pixels where the ground truth value is equal to nodata_value.
33
34
  kwargs: other arguments to pass to BasicTask
34
35
  """
35
36
  super().__init__(**kwargs)
@@ -141,7 +142,7 @@ class PerPixelRegressionHead(torch.nn.Module):
141
142
  """Initialize a new RegressionHead.
142
143
 
143
144
  Args:
144
- loss_mode: the loss function to use, either "mse" or "l1".
145
+ loss_mode: the loss function to use, either "mse" (default) or "l1".
145
146
  use_sigmoid: whether to apply a sigmoid activation on the output. This
146
147
  requires targets to be between 0-1.
147
148
  """
@@ -33,14 +33,14 @@ class RegressionTask(BasicTask):
33
33
  """Initialize a new RegressionTask.
34
34
 
35
35
  Args:
36
- property_name: the property from which to extract the regression value. The
37
- value is read from the first matching feature.
36
+ property_name: the property from which to extract the ground truth
37
+ regression value. The value is read from the first matching feature.
38
38
  filters: optional list of (property_name, property_value) to only consider
39
39
  features with matching properties.
40
40
  allow_invalid: instead of throwing error when no regression label is found
41
41
  at a window, simply mark the example invalid for this task
42
- scale_factor: multiply the label value by this factor
43
- metric_mode: what metric to use, either mse or l1
42
+ scale_factor: multiply the label value by this factor for training
43
+ metric_mode: what metric to use, either "mse" (default) or "l1"
44
44
  use_accuracy_metric: include metric that reports percentage of
45
45
  examples where output is within a factor of the ground truth.
46
46
  within_factor: the factor for accuracy metric. If it's 0.2, and ground
@@ -189,7 +189,7 @@ class RegressionHead(torch.nn.Module):
189
189
  """Initialize a new RegressionHead.
190
190
 
191
191
  Args:
192
- loss_mode: the loss function to use, either "mse" or "l1".
192
+ loss_mode: the loss function to use, either "mse" (default) or "l1".
193
193
  use_sigmoid: whether to apply a sigmoid activation on the output. This
194
194
  requires targets to be between 0-1.
195
195
  """
@@ -25,8 +25,8 @@ class Pad(Transform):
25
25
  Args:
26
26
  size: the size to pad to, or a min/max range of pad sizes. If the image is
27
27
  larger than this size, then it is cropped instead.
28
- mode: "center" (default) to apply padding equally on all sides, or
29
- "topleft" to only apply it on the bottom and right.
28
+ mode: "topleft" (default) to only apply padding on the bottom and right
29
+ sides, or "center" to apply padding equally on all sides.
30
30
  image_selectors: image items to transform.
31
31
  box_selectors: boxes items to transform.
32
32
  """
@@ -64,7 +64,7 @@ class Pad(Transform):
64
64
  ) -> torch.Tensor:
65
65
  # Before/after must either be both non-negative or both negative.
66
66
  # >=0 indicates padding while <0 indicates cropping.
67
- assert (before < 0 and after < 0) or (before >= 0 and after >= 0)
67
+ assert (before < 0 and after <= 0) or (before >= 0 and after >= 0)
68
68
  if before > 0:
69
69
  # Padding.
70
70
  if horizontal:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: rslearn
3
- Version: 0.0.11
3
+ Version: 0.0.12
4
4
  Summary: A library for developing remote sensing datasets and models
5
5
  Author: OlmoEarth Team
6
6
  License: Apache License
@@ -211,6 +211,7 @@ Project-URL: repository, https://github.com/allenai/rslearn
211
211
  Requires-Python: >=3.11
212
212
  Description-Content-Type: text/markdown
213
213
  License-File: LICENSE
214
+ License-File: NOTICE
214
215
  Requires-Dist: boto3>=1.39
215
216
  Requires-Dist: fiona>=1.10
216
217
  Requires-Dist: fsspec>=2025.9.0
@@ -44,13 +44,13 @@ rslearn/dataset/materialize.py,sha256=-z47svc_JqGhzkp8kq5Hd9fykWNqFEUCQezo887TWB
44
44
  rslearn/dataset/remap.py,sha256=6MaImsY02GNACpvRM81RvWmjZWRfAHxo_R3Ox6XLF6A,2723
45
45
  rslearn/dataset/window.py,sha256=I5RqZ12jlIXhohw4qews1x_I4tSDpml709DZRtLiN24,12546
46
46
  rslearn/models/__init__.py,sha256=_vWoF9d2Slah8-6XhYhdU4SRsy_CNxXjCGQTD2yvu3Q,22
47
- rslearn/models/anysat.py,sha256=3BnaiS1sYB4SnV6qRjHksiz_r9vUuZeGPUO2XUziFA0,7810
47
+ rslearn/models/anysat.py,sha256=3Oh2gWxicVdUzOjevBEZf0PuolmCy0KC5Ad7JY-0Plc,7949
48
48
  rslearn/models/clip.py,sha256=u5aqYnVB4Jag7o1h8EzPDAc1t2BAHeALA9FcUwP5tfo,2238
49
49
  rslearn/models/conv.py,sha256=fWyByeswIOKKzyPmP3erYUlZaKEV0huWHA4CyKTBbfY,1703
50
- rslearn/models/copernicusfm.py,sha256=3AiORuUre9sZYwydbrDgShwKtxeTLmExp7WQmJtBylg,7842
51
50
  rslearn/models/croma.py,sha256=cOazTp3l2PNJltKrmPqD5Gy4pi3CI03-X9G4T10cX2k,9529
52
- rslearn/models/dinov3.py,sha256=GKk5qXZPCEporATJdjaSWsDTfWDlAGRWBplFUJN5nRM,6146
51
+ rslearn/models/dinov3.py,sha256=9k9kNlXCorQQwKjLGptooANd48TUBsITQ1e4fUomlM4,6337
53
52
  rslearn/models/faster_rcnn.py,sha256=uaxX6-E1f0BibaA9sorEg3be83C7kTdTc39pC5jRqwE,8286
53
+ rslearn/models/feature_center_crop.py,sha256=24eOrvLEGGVWPw7kPHyUes5HtYNAX7GZ_NpqDGMILEY,1553
54
54
  rslearn/models/fpn.py,sha256=s3cz29I14FaSuvBvLOcwCrqVsaRBxG5GjLlqap4WgPc,1603
55
55
  rslearn/models/module_wrapper.py,sha256=H2zb-8Au4t31kawW_4JEKHsaXFjpYDawb31ZEauKcxU,2728
56
56
  rslearn/models/molmo.py,sha256=mVrARBhZciMzOgOOjGB5AHlPIf2iO9IBSJmdyKSl1L8,2061
@@ -58,7 +58,7 @@ rslearn/models/multitask.py,sha256=j2Kiwj_dUiUp_CIUr25bS8HiyeoFlr1PGqjTfpgIGLc,1
58
58
  rslearn/models/panopticon.py,sha256=woNEs53wVc5D-NxbSDEPRZ_mYe8vllnuldmADjvhfDQ,5806
59
59
  rslearn/models/pick_features.py,sha256=y8e4tJFhyG7ZuVSElWhQ5-Aer4ZKJCEH9wLGJU7WqGI,1551
60
60
  rslearn/models/pooling_decoder.py,sha256=unr2fSE_QmJHPi3dKtopqMtb1Kn-2h94LgwwAVP9vZg,4437
61
- rslearn/models/prithvi.py,sha256=SVM3ypJlVTkXQ69pPhB4UeJr87VnmADTCuyV365dbkU,39961
61
+ rslearn/models/prithvi.py,sha256=AIzcO5xk1ggR0MjbfhIzqPVgUKFN7odxygmgyAelfW8,40143
62
62
  rslearn/models/registry.py,sha256=yCcrOvLkbn07Xtln1j7hAB_kmGw0MGsiR2TloJq9Bmk,504
63
63
  rslearn/models/resize_features.py,sha256=asKXWrLHIBrU6GaAV0Ory9YuK7IK104XjhkB4ljzI3A,1289
64
64
  rslearn/models/sam2_enc.py,sha256=gNlPokr7eNxO2KvnzDMXNxYM2WRO0YkQPjR4110n6cw,3508
@@ -75,14 +75,6 @@ rslearn/models/upsample.py,sha256=3kWbyWZIk56JJxj8en9pieitbrk3XnbIsTKlEkiDQQY,93
75
75
  rslearn/models/use_croma.py,sha256=OSBqMuLp-pDtqPNWAVBfmX4wckmyYCKtUDdGCjJk_K8,17966
76
76
  rslearn/models/clay/clay.py,sha256=5RO5H8EM0tKjCwWMQ4xDkKkUCwKpm2K_Yw1alnhvVhU,7773
77
77
  rslearn/models/clay/configs/metadata.yaml,sha256=rZTFh4Yb9htEfbQNOPl4HTbFogEhzwIRqFzG-1uT01Y,4652
78
- rslearn/models/copernicusfm_src/__init__.py,sha256=8QLhisbHub6VJl6egijnrOPKK5QNAe5FJhfcxEelj4Y,22
79
- rslearn/models/copernicusfm_src/dynamic_hypernetwork.py,sha256=aWH5_PgmS8umIwRbGA42RuEx-stb13z1nBjyUhBtaN4,18218
80
- rslearn/models/copernicusfm_src/model_vit.py,sha256=3coM_xYILlFY2TJiACmQBSe2z16jSG80SVEad_3uB3Q,11396
81
- rslearn/models/copernicusfm_src/aurora/area.py,sha256=ssg9aXgoZktOsFcEXDEY9670aPUN_PHfCOfDMtpsz1s,1711
82
- rslearn/models/copernicusfm_src/aurora/fourier.py,sha256=bmoNV3P6CH8R6W2GFuVW8zT_frQVaL-PAgpN3aFS5fA,4414
83
- rslearn/models/copernicusfm_src/flexivit/patch_embed.py,sha256=EQgbsHBXDq0dTM9kApmmIqd5ZV2X9CPuA_AytbE51uM,9363
84
- rslearn/models/copernicusfm_src/flexivit/utils.py,sha256=tLBlzgT5bpwMSvyir46bPRWsMmRKh8s7VwMNuvSatGo,2192
85
- rslearn/models/copernicusfm_src/util/pos_embed.py,sha256=dUYuM_Nch2LB8jQ7UDTmFj36KWe4mM9bsY6dv5m_yZI,8511
86
78
  rslearn/models/detr/__init__.py,sha256=GGAnTIhyuvl34IRrJ_4gXjm_01OlM5rbQQ3c3TGfbK8,84
87
79
  rslearn/models/detr/box_ops.py,sha256=ORCF6EwMpMBB_VgQT05SjR47dCR2rN2gPhL_gsuUWJs,3236
88
80
  rslearn/models/detr/detr.py,sha256=otLmmyUm05e4MUyvQBoqo-RKnx3hbodTXvfPQWvuTEI,18737
@@ -94,7 +86,7 @@ rslearn/models/galileo/__init__.py,sha256=QQa0C29nuPRva0KtGiMHQ2ZB02n9SSwj_wqTKP
94
86
  rslearn/models/galileo/galileo.py,sha256=jUHA64YvVC3Fz5fevc_9dFJfZaINODRDrhSGLIiOZcw,21115
95
87
  rslearn/models/galileo/single_file_galileo.py,sha256=l5tlmmdr2eieHNH-M7rVIvcptkv0Fuk3vKXFW691ezA,56143
96
88
  rslearn/models/olmoearth_pretrain/__init__.py,sha256=AjRvbjBdadCdPh-EdvySH76sVAQ8NGQaJt11Tsn1D5I,36
97
- rslearn/models/olmoearth_pretrain/model.py,sha256=F-B1ym9UZuTPJ0OY15Jwb1TkNtr_EtAUlqI-tr_Z2uo,8352
89
+ rslearn/models/olmoearth_pretrain/model.py,sha256=I_RWFbwzO5yCWpEcEQP8PeiD8M1QpeMtVrjl15evIHU,10632
98
90
  rslearn/models/olmoearth_pretrain/norm.py,sha256=rHjFyWkpNLYMx9Ow7TsU-jGm9Sjx7FVf0p4R__ohx2c,3266
99
91
  rslearn/models/panopticon_data/sensors/drone.yaml,sha256=xqWS-_QMtJyRoWXJm-igoSur9hAmCFdqkPin8DT5qpw,431
100
92
  rslearn/models/panopticon_data/sensors/enmap.yaml,sha256=b2j6bSgYR2yKR9DRm3SPIzSVYlHf51ny_p-1B4B9sB4,13431
@@ -117,7 +109,7 @@ rslearn/tile_stores/tile_store.py,sha256=9AeYduDYPp_Ia2NMlq6osptpz_AFGIOQcLJrqZ_
117
109
  rslearn/train/__init__.py,sha256=fnJyY4aHs5zQqbDKSfXsJZXY_M9fbTsf7dRYaPwZr2M,30
118
110
  rslearn/train/data_module.py,sha256=K-nQgnOZn-KGq_G2pVOQFtWRrlWih0212i_bkXZ2bEE,23515
119
111
  rslearn/train/dataset.py,sha256=YiskNlYYcKqZxyw0Xzop1RGLbjMc-oK_rmhrSMVbTQg,51857
120
- rslearn/train/lightning_module.py,sha256=ge2z8trU7cMvxBeqUXC1tB44pftzitw7DRsIa6asBS4,14623
112
+ rslearn/train/lightning_module.py,sha256=ZLBiId3secUlVs2yzkN-mwVv4rMdh5TkdZYl4vv_Cw0,14466
121
113
  rslearn/train/optimizer.py,sha256=EKSqkmERalDA0bF32Gey7n6z69KLyaUWKlRsGJfKBmE,927
122
114
  rslearn/train/prediction_writer.py,sha256=YNs92QqPrqbREZXoE-aPa_oKQW0C9LvZAY129vyvI08,13288
123
115
  rslearn/train/scheduler.py,sha256=wFbmycMHgL6nRYeYalDjb0G8YVo8VD3T3sABS61jJ7c,2318
@@ -127,11 +119,11 @@ rslearn/train/callbacks/freeze_unfreeze.py,sha256=8fIzBMhCKKjpTffIeAdhdSjsBd8NjT
127
119
  rslearn/train/callbacks/gradients.py,sha256=4YqCf0tBb6E5FnyFYbveXfQFlgNPyxIXb2FCWX4-6qs,5075
128
120
  rslearn/train/callbacks/peft.py,sha256=wEOKsS3RhsRaZTXn_Kz2wdsZdIiIaZPdCJWtdJBurT8,4156
129
121
  rslearn/train/tasks/__init__.py,sha256=dag1u72x1-me6y0YcOubUo5MYZ0Tjf6-dOir9UeFNMs,75
130
- rslearn/train/tasks/classification.py,sha256=DI0_Wzs-9rNPWokvfxi1BIA6QyqNee42SpptQx82WHM,13182
131
- rslearn/train/tasks/detection.py,sha256=OoZzC8ZbmhyZ30tD-4cB-3Jj0AN6Y7hg0wk27rDguCE,22297
122
+ rslearn/train/tasks/classification.py,sha256=kahVdXPU6fDwDCdqlrjZGb9uA-PYG74DbQQ0kJUt-Eg,13186
123
+ rslearn/train/tasks/detection.py,sha256=9j9webusrjGexvUmZ7gl3NTBS63Qq511VFlB2WbLi5Y,22302
132
124
  rslearn/train/tasks/multi_task.py,sha256=dBWsnbvQ0CReNsbDHmZ_-sXjUE0H4S2OPcbJwMquG9g,6016
133
- rslearn/train/tasks/per_pixel_regression.py,sha256=tkVntKFzPlWFxdupPlMfhIRWlJ0UCgxg_FGhcA2-wjE,8649
134
- rslearn/train/tasks/regression.py,sha256=_PoxOfWNseujD4IWsuTL82fAAXgtco4WdfkNXQ68Nbg,11497
125
+ rslearn/train/tasks/per_pixel_regression.py,sha256=W8dbLyIiPgFI3gA_aZQX0pSFRWLP2v6tthsFbKhcDVg,8783
126
+ rslearn/train/tasks/regression.py,sha256=zZhrrZ1qxjrdLjKWC9McRivDXCcKiYfdLC-kaMeVkDc,11547
135
127
  rslearn/train/tasks/segmentation.py,sha256=xEni3CLDyetviv84XrpJg5xeJU87WHGFKTVfIeemGIY,21868
136
128
  rslearn/train/tasks/task.py,sha256=4w2xKL_U5JAtdj2dYoVv82h6xTtgUsA3IvIOcXyZecs,3887
137
129
  rslearn/train/transforms/__init__.py,sha256=BkCAzm4f-8TEhPIuyvCj7eJGh36aMkZFYlq-H_jkSvY,778
@@ -140,7 +132,7 @@ rslearn/train/transforms/crop.py,sha256=4jA3JJsC0ghicPHbfsNJ0d3WpChyvftY73ONiwQa
140
132
  rslearn/train/transforms/flip.py,sha256=lkTeje3T8gNn2gt6957morXq1fGNho-apSpCvNp0_9o,3480
141
133
  rslearn/train/transforms/mask.py,sha256=pwt33XXWLwldLiar-PgVgBQzQd1qfL18SPz3LYQMoYM,2111
142
134
  rslearn/train/transforms/normalize.py,sha256=uyv2hE5hw5B2kCRHa4JIx0tfowm-C7bgumwINvvfyts,5014
143
- rslearn/train/transforms/pad.py,sha256=EDswS9KYRSloM3DQlbCz6S0WYqFQJvI433qMqTtqrZw,4686
135
+ rslearn/train/transforms/pad.py,sha256=pj4Ql8GSRrhg8KOZTNPB40Qq8CoCCHdGo04uficik84,4698
144
136
  rslearn/train/transforms/select_bands.py,sha256=uDfD9G8Z4VTt88QZsjj1FB20QEmzSefhKf7uDXYn77M,2441
145
137
  rslearn/train/transforms/sentinel1.py,sha256=FrLaYZs2AjqWQCun8DTFtgo1l0xLxqaFKtDNIehtpDg,1913
146
138
  rslearn/train/transforms/transform.py,sha256=n1Qzqix2dVvej-Q7iPzHeOQbqH79IBlvqPoymxhNVpE,4446
@@ -159,9 +151,10 @@ rslearn/utils/spatial_index.py,sha256=eomJAUgzmjir8j9HZnSgQoJHwN9H0wGTjmJkMkLLfs
159
151
  rslearn/utils/sqlite_index.py,sha256=YGOJi66544e6JNtfSft6YIlHklFdSJO2duxQ4TJ2iu4,2920
160
152
  rslearn/utils/time.py,sha256=2ilSLG94_sxLP3y5RSV5L5CG8CoND_dbdzYEHVtN-I8,387
161
153
  rslearn/utils/vector_format.py,sha256=EIChYCL6GLOILS2TO2JBkca1TuaWsSubWv6iRS3P2ds,16139
162
- rslearn-0.0.11.dist-info/licenses/LICENSE,sha256=_99ZWPoLdlUbqZoSC5DF4ihiNwl5rTEmBaq2fACecdg,11352
163
- rslearn-0.0.11.dist-info/METADATA,sha256=jwB0ZZ-oLa1Y_1iuZRKCQoB4i3kOFDJ0xSeMTJP7zww,36297
164
- rslearn-0.0.11.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
165
- rslearn-0.0.11.dist-info/entry_points.txt,sha256=doTBQ57NT7nq-dgYGgTTw6mafcGWb_4PWYtYR4rGm50,46
166
- rslearn-0.0.11.dist-info/top_level.txt,sha256=XDKo90WBH8P9RQumHxo0giLJsoufT4r9odv-WE6Ahk4,8
167
- rslearn-0.0.11.dist-info/RECORD,,
154
+ rslearn-0.0.12.dist-info/licenses/LICENSE,sha256=_99ZWPoLdlUbqZoSC5DF4ihiNwl5rTEmBaq2fACecdg,11352
155
+ rslearn-0.0.12.dist-info/licenses/NOTICE,sha256=wLPr6rwV_jCg-xEknNGwhnkfRfuoOE9MZ-lru2yZyLI,5070
156
+ rslearn-0.0.12.dist-info/METADATA,sha256=0jHeiz1QCT56zOws1CGGFVM9TotMOWIboQmGASdZAwY,36318
157
+ rslearn-0.0.12.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
158
+ rslearn-0.0.12.dist-info/entry_points.txt,sha256=doTBQ57NT7nq-dgYGgTTw6mafcGWb_4PWYtYR4rGm50,46
159
+ rslearn-0.0.12.dist-info/top_level.txt,sha256=XDKo90WBH8P9RQumHxo0giLJsoufT4r9odv-WE6Ahk4,8
160
+ rslearn-0.0.12.dist-info/RECORD,,
@@ -0,0 +1,115 @@
1
+ rslearn is released under Apache License 2.0
2
+ Copyright 2025 Allen Institute for AI
3
+
4
+ The following third party code is included in this repository.
5
+
6
+ ====================
7
+
8
+ rslearn.models.detr is adapted from https://github.com/facebookresearch/detr which is
9
+ released under Apache License 2.0.
10
+
11
+ Copyright 2020 - present, Facebook, Inc
12
+
13
+ ====================
14
+
15
+ rslearn.models.use_croma is copied from https://github.com/antofuller/CROMA
16
+
17
+ MIT License
18
+
19
+ Copyright (c) 2023 Anthony Fuller
20
+
21
+ Permission is hereby granted, free of charge, to any person obtaining a copy
22
+ of this software and associated documentation files (the "Software"), to deal
23
+ in the Software without restriction, including without limitation the rights
24
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
25
+ copies of the Software, and to permit persons to whom the Software is
26
+ furnished to do so, subject to the following conditions:
27
+
28
+ The above copyright notice and this permission notice shall be included in all
29
+ copies or substantial portions of the Software.
30
+
31
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
34
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
35
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
36
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37
+ SOFTWARE.
38
+
39
+ ====================
40
+
41
+ rslearn.models.galileo is adapted from https://github.com/nasaharvest/galileo
42
+
43
+ MIT License
44
+
45
+ Copyright (c) 2024 Presto Authors
46
+
47
+ Permission is hereby granted, free of charge, to any person obtaining a copy
48
+ of this software and associated documentation files (the "Software"), to deal
49
+ in the Software without restriction, including without limitation the rights
50
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
51
+ copies of the Software, and to permit persons to whom the Software is
52
+ furnished to do so, subject to the following conditions:
53
+
54
+ The above copyright notice and this permission notice shall be included in all
55
+ copies or substantial portions of the Software.
56
+
57
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
58
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
59
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
60
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
61
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
62
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
63
+ SOFTWARE.
64
+
65
+ ====================
66
+
67
+ rslearn.models.presto is adapted from https://github.com/nasaharvest/presto
68
+
69
+ MIT License
70
+
71
+ Copyright (c) 2024 Presto Authors
72
+
73
+ Permission is hereby granted, free of charge, to any person obtaining a copy
74
+ of this software and associated documentation files (the "Software"), to deal
75
+ in the Software without restriction, including without limitation the rights
76
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
77
+ copies of the Software, and to permit persons to whom the Software is
78
+ furnished to do so, subject to the following conditions:
79
+
80
+ The above copyright notice and this permission notice shall be included in all
81
+ copies or substantial portions of the Software.
82
+
83
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
84
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
85
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
86
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
87
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
88
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
89
+ SOFTWARE.
90
+
91
+ ====================
92
+
93
+ rslearn.models.prithvi includes code adapted from https://github.com/NASA-IMPACT/Prithvi-WxC
94
+
95
+ MIT License
96
+
97
+ Copyright (c) 2024 Inter Agency Implementation and Advanced Concepts
98
+
99
+ Permission is hereby granted, free of charge, to any person obtaining a copy
100
+ of this software and associated documentation files (the "Software"), to deal
101
+ in the Software without restriction, including without limitation the rights
102
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
103
+ copies of the Software, and to permit persons to whom the Software is
104
+ furnished to do so, subject to the following conditions:
105
+
106
+ The above copyright notice and this permission notice shall be included in all
107
+ copies or substantial portions of the Software.
108
+
109
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
110
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
111
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
112
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
113
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
114
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
115
+ SOFTWARE.