RadGEEToolbox 1.7.3__py3-none-any.whl → 1.7.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- RadGEEToolbox/CollectionStitch.py +16 -3
- RadGEEToolbox/Export.py +16 -0
- RadGEEToolbox/GenericCollection.py +698 -202
- RadGEEToolbox/LandsatCollection.py +818 -218
- RadGEEToolbox/Sentinel1Collection.py +734 -204
- RadGEEToolbox/Sentinel2Collection.py +771 -219
- RadGEEToolbox/__init__.py +4 -4
- {radgeetoolbox-1.7.3.dist-info → radgeetoolbox-1.7.5.dist-info}/METADATA +6 -6
- radgeetoolbox-1.7.5.dist-info/RECORD +14 -0
- {radgeetoolbox-1.7.3.dist-info → radgeetoolbox-1.7.5.dist-info}/WHEEL +1 -1
- radgeetoolbox-1.7.3.dist-info/RECORD +0 -14
- {radgeetoolbox-1.7.3.dist-info → radgeetoolbox-1.7.5.dist-info}/licenses/LICENSE.txt +0 -0
- {radgeetoolbox-1.7.3.dist-info → radgeetoolbox-1.7.5.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import ee
|
|
2
2
|
import pandas as pd
|
|
3
3
|
import numpy as np
|
|
4
|
+
import warnings
|
|
4
5
|
|
|
5
6
|
|
|
6
7
|
# ---- Reflectance scaling for Landsat Collection 2 SR ----
|
|
@@ -186,6 +187,14 @@ class LandsatCollection:
|
|
|
186
187
|
self._PixelAreaSumCollection = None
|
|
187
188
|
self._Reflectance = None
|
|
188
189
|
|
|
190
|
+
def __call__(self):
|
|
191
|
+
"""
|
|
192
|
+
Allows the object to be called as a function, returning itself.
|
|
193
|
+
This enables property-like methods to be accessed with or without parentheses
|
|
194
|
+
(e.g., .mosaicByDate or .mosaicByDate()).
|
|
195
|
+
"""
|
|
196
|
+
return self
|
|
197
|
+
|
|
189
198
|
@staticmethod
|
|
190
199
|
def image_dater(image):
|
|
191
200
|
"""
|
|
@@ -772,7 +781,7 @@ class LandsatCollection:
|
|
|
772
781
|
return image.addBands(anomaly_image, overwrite=True).copyProperties(image)
|
|
773
782
|
|
|
774
783
|
@staticmethod
|
|
775
|
-
def
|
|
784
|
+
def maskWater(image):
|
|
776
785
|
"""
|
|
777
786
|
Masks water pixels based on Landsat image QA band.
|
|
778
787
|
|
|
@@ -787,9 +796,19 @@ class LandsatCollection:
|
|
|
787
796
|
water_extract = qa.bitwiseAnd(WaterBitMask).eq(0)
|
|
788
797
|
masked_image = image.updateMask(water_extract).copyProperties(image).set('system:time_start', image.get('system:time_start'))
|
|
789
798
|
return masked_image
|
|
799
|
+
|
|
800
|
+
@staticmethod
|
|
801
|
+
def MaskWaterLandsat(image):
|
|
802
|
+
warnings.warn(
|
|
803
|
+
"MaskWaterLandsat is deprecated and will be removed in a future release. "
|
|
804
|
+
"Please use maskWater instead.",
|
|
805
|
+
DeprecationWarning,
|
|
806
|
+
stacklevel=2
|
|
807
|
+
)
|
|
808
|
+
return LandsatCollection.maskWater(image)
|
|
790
809
|
|
|
791
810
|
@staticmethod
|
|
792
|
-
def
|
|
811
|
+
def maskWaterByNDWI(image, threshold, ng_threshold=None):
|
|
793
812
|
"""
|
|
794
813
|
Masks water pixels (mask land and cloud pixels) for all bands based on NDWI and a set threshold where
|
|
795
814
|
all pixels less than NDWI threshold are masked out. Can specify separate thresholds for Landsat 5 vs 8&9 images, where the threshold
|
|
@@ -825,9 +844,19 @@ class LandsatCollection:
|
|
|
825
844
|
"threshold", threshold, 'system:time_start', image.get('system:time_start')
|
|
826
845
|
)
|
|
827
846
|
return water
|
|
828
|
-
|
|
847
|
+
|
|
829
848
|
@staticmethod
|
|
830
|
-
def
|
|
849
|
+
def MaskWaterLandsatByNDWI(image, threshold, ng_threshold=None):
|
|
850
|
+
warnings.warn(
|
|
851
|
+
"MaskWaterLandsatByNDWI is deprecated and will be removed in a future release. "
|
|
852
|
+
"Please use maskWaterByNDWI instead.",
|
|
853
|
+
DeprecationWarning,
|
|
854
|
+
stacklevel=2
|
|
855
|
+
)
|
|
856
|
+
return LandsatCollection.maskWaterByNDWI(image, threshold, ng_threshold=ng_threshold)
|
|
857
|
+
|
|
858
|
+
@staticmethod
|
|
859
|
+
def maskToWater(image):
|
|
831
860
|
"""
|
|
832
861
|
Masks image to water pixels by masking land and cloud pixels based on Landsat image QA band.
|
|
833
862
|
|
|
@@ -842,9 +871,19 @@ class LandsatCollection:
|
|
|
842
871
|
water_extract = qa.bitwiseAnd(WaterBitMask).neq(0)
|
|
843
872
|
masked_image = image.updateMask(water_extract).copyProperties(image).set('system:time_start', image.get('system:time_start'))
|
|
844
873
|
return masked_image
|
|
874
|
+
|
|
875
|
+
@staticmethod
|
|
876
|
+
def MaskToWaterLandsat(image):
|
|
877
|
+
warnings.warn(
|
|
878
|
+
"MaskToWaterLandsat is deprecated and will be removed in a future release. "
|
|
879
|
+
"Please use maskToWater instead.",
|
|
880
|
+
DeprecationWarning,
|
|
881
|
+
stacklevel=2
|
|
882
|
+
)
|
|
883
|
+
return LandsatCollection.maskToWater(image)
|
|
845
884
|
|
|
846
885
|
@staticmethod
|
|
847
|
-
def
|
|
886
|
+
def maskToWaterByNDWI(image, threshold, ng_threshold=None):
|
|
848
887
|
"""
|
|
849
888
|
Masks water pixels using NDWI based on threshold. Can specify separate thresholds for Landsat 5 vs 8&9 images, where the threshold
|
|
850
889
|
argument applies to Landsat 5 and the ng_threshold argument applies to Landsat 8&9
|
|
@@ -879,6 +918,50 @@ class LandsatCollection:
|
|
|
879
918
|
"threshold", threshold, 'system:time_start', image.get('system:time_start')
|
|
880
919
|
)
|
|
881
920
|
return water
|
|
921
|
+
|
|
922
|
+
@staticmethod
|
|
923
|
+
def MaskToWaterLandsatByNDWI(image, threshold, ng_threshold=None):
|
|
924
|
+
warnings.warn(
|
|
925
|
+
"MaskToWaterLandsatNDWI is deprecated and will be removed in a future release. "
|
|
926
|
+
"Please use maskToWaterByNDWI instead.",
|
|
927
|
+
DeprecationWarning,
|
|
928
|
+
stacklevel=2
|
|
929
|
+
)
|
|
930
|
+
return LandsatCollection.maskToWaterByNDWI(image, threshold, ng_threshold=ng_threshold)
|
|
931
|
+
|
|
932
|
+
# @staticmethod
|
|
933
|
+
# def maskClouds(image):
|
|
934
|
+
# """
|
|
935
|
+
# Masks clouds pixels based on Landsat image QA band.
|
|
936
|
+
|
|
937
|
+
# Args:
|
|
938
|
+
# image (ee.Image): input ee.Image
|
|
939
|
+
|
|
940
|
+
# Returns:
|
|
941
|
+
# ee.Image: ee.Image with cloud pixels masked.
|
|
942
|
+
# """
|
|
943
|
+
# CloudBitMask = ee.Number(2).pow(3).int()
|
|
944
|
+
# qa = image.select("QA_PIXEL")
|
|
945
|
+
# cloud_extract = qa.bitwiseAnd(CloudBitMask).eq(0)
|
|
946
|
+
# masked_image = image.updateMask(cloud_extract).copyProperties(image).set('system:time_start', image.get('system:time_start'))
|
|
947
|
+
# return masked_image
|
|
948
|
+
|
|
949
|
+
# @staticmethod
|
|
950
|
+
# def maskShadows(image):
|
|
951
|
+
# """
|
|
952
|
+
# Masks shadows pixels based on Landsat image QA band.
|
|
953
|
+
|
|
954
|
+
# Args:
|
|
955
|
+
# image (ee.Image): input ee.Image
|
|
956
|
+
|
|
957
|
+
# Returns:
|
|
958
|
+
# ee.Image: ee.Image with cloud pixels masked.
|
|
959
|
+
# """
|
|
960
|
+
# ShadowBitMask = ee.Number(2).pow(4).int()
|
|
961
|
+
# qa = image.select("QA_PIXEL")
|
|
962
|
+
# shadow_extract = qa.bitwiseAnd(ShadowBitMask).eq(0)
|
|
963
|
+
# masked_image = image.updateMask(shadow_extract).copyProperties(image).set('system:time_start', image.get('system:time_start'))
|
|
964
|
+
# return masked_image
|
|
882
965
|
|
|
883
966
|
@staticmethod
|
|
884
967
|
def mask_via_band_fn(image, band_to_mask, band_for_mask, threshold, mask_above=False, add_band_to_original_image=False):
|
|
@@ -1026,7 +1109,7 @@ class LandsatCollection:
|
|
|
1026
1109
|
return mask
|
|
1027
1110
|
|
|
1028
1111
|
@staticmethod
|
|
1029
|
-
def
|
|
1112
|
+
def maskClouds(image):
|
|
1030
1113
|
"""
|
|
1031
1114
|
Masks clouds baseed on Landsat 8 QA band.
|
|
1032
1115
|
|
|
@@ -1042,9 +1125,17 @@ class LandsatCollection:
|
|
|
1042
1125
|
cloud_mask = qa.bitwiseAnd(cloudBitMask).eq(0)
|
|
1043
1126
|
cirrus_mask = qa.bitwiseAnd(CirrusBitMask).eq(0)
|
|
1044
1127
|
return image.updateMask(cloud_mask).updateMask(cirrus_mask)
|
|
1128
|
+
|
|
1129
|
+
@staticmethod
|
|
1130
|
+
def maskL8clouds(image):
|
|
1131
|
+
warnings.warn(
|
|
1132
|
+
"maskL8clouds is deprecated and will be removed in a future release. Please use maskClouds instead.",
|
|
1133
|
+
DeprecationWarning,
|
|
1134
|
+
stacklevel=2)
|
|
1135
|
+
return LandsatCollection.maskClouds(image)
|
|
1045
1136
|
|
|
1046
1137
|
@staticmethod
|
|
1047
|
-
def
|
|
1138
|
+
def maskShadows(image):
|
|
1048
1139
|
"""
|
|
1049
1140
|
Masks cloud shadows based on Landsat 8 QA band.
|
|
1050
1141
|
|
|
@@ -1058,6 +1149,14 @@ class LandsatCollection:
|
|
|
1058
1149
|
qa = image.select("QA_PIXEL")
|
|
1059
1150
|
shadow_mask = qa.bitwiseAnd(shadowBitMask).eq(0)
|
|
1060
1151
|
return image.updateMask(shadow_mask)
|
|
1152
|
+
|
|
1153
|
+
@staticmethod
|
|
1154
|
+
def maskL8shadows(image):
|
|
1155
|
+
warnings.warn(
|
|
1156
|
+
"maskL8shadows is deprecated and will be removed in a future release. Please use maskShadows instead.",
|
|
1157
|
+
DeprecationWarning,
|
|
1158
|
+
stacklevel=2)
|
|
1159
|
+
return LandsatCollection.maskShadows(image)
|
|
1061
1160
|
|
|
1062
1161
|
@staticmethod
|
|
1063
1162
|
def temperature_bands(img):
|
|
@@ -1172,7 +1271,7 @@ class LandsatCollection:
|
|
|
1172
1271
|
return out.copyProperties(img)
|
|
1173
1272
|
|
|
1174
1273
|
@staticmethod
|
|
1175
|
-
def
|
|
1274
|
+
def pixelAreaSum(
|
|
1176
1275
|
image, band_name, geometry, threshold=-1, scale=30, maxPixels=1e12
|
|
1177
1276
|
):
|
|
1178
1277
|
"""
|
|
@@ -1231,8 +1330,18 @@ class LandsatCollection:
|
|
|
1231
1330
|
# Call to iterate the calculate_and_set_area function over the list of bands, starting with the original image
|
|
1232
1331
|
final_image = ee.Image(bands.iterate(calculate_and_set_area, image))
|
|
1233
1332
|
return final_image
|
|
1333
|
+
|
|
1334
|
+
@staticmethod
|
|
1335
|
+
def PixelAreaSum(image, band_name, geometry, threshold=-1, scale=30, maxPixels=1e12):
|
|
1336
|
+
warnings.warn(
|
|
1337
|
+
"PixelAreaSum is deprecated and will be removed in a future release. "
|
|
1338
|
+
"Please use pixelAreaSum instead.",
|
|
1339
|
+
DeprecationWarning,
|
|
1340
|
+
stacklevel=2
|
|
1341
|
+
)
|
|
1342
|
+
return LandsatCollection.pixelAreaSum(image, band_name, geometry, threshold, scale, maxPixels)
|
|
1234
1343
|
|
|
1235
|
-
def
|
|
1344
|
+
def pixelAreaSumCollection(
|
|
1236
1345
|
self, band_name, geometry, threshold=-1, scale=30, maxPixels=1e12, output_type='ImageCollection', area_data_export_path=None):
|
|
1237
1346
|
"""
|
|
1238
1347
|
Calculates the geodesic summation of area for pixels of interest (above a specific threshold)
|
|
@@ -1258,7 +1367,7 @@ class LandsatCollection:
|
|
|
1258
1367
|
collection = self.collection
|
|
1259
1368
|
# Area calculation for each image in the collection, using the PixelAreaSum function
|
|
1260
1369
|
AreaCollection = collection.map(
|
|
1261
|
-
lambda image: LandsatCollection.
|
|
1370
|
+
lambda image: LandsatCollection.pixelAreaSum(
|
|
1262
1371
|
image,
|
|
1263
1372
|
band_name=band_name,
|
|
1264
1373
|
geometry=geometry,
|
|
@@ -1274,17 +1383,27 @@ class LandsatCollection:
|
|
|
1274
1383
|
|
|
1275
1384
|
# If an export path is provided, the area data will be exported to a CSV file
|
|
1276
1385
|
if area_data_export_path:
|
|
1277
|
-
LandsatCollection(collection=self._PixelAreaSumCollection).
|
|
1386
|
+
LandsatCollection(collection=self._PixelAreaSumCollection).exportProperties(property_names=prop_names, file_path=area_data_export_path+'.csv')
|
|
1278
1387
|
# Returning the result in the desired format based on output_type argument or raising an error for invalid input
|
|
1279
1388
|
if output_type == 'ImageCollection' or output_type == 'ee.ImageCollection':
|
|
1280
1389
|
return self._PixelAreaSumCollection
|
|
1281
1390
|
elif output_type == 'LandsatCollection':
|
|
1282
1391
|
return LandsatCollection(collection=self._PixelAreaSumCollection)
|
|
1283
1392
|
elif output_type == 'DataFrame' or output_type == 'Pandas' or output_type == 'pd' or output_type == 'dataframe' or output_type == 'df':
|
|
1284
|
-
return LandsatCollection(collection=self._PixelAreaSumCollection).
|
|
1393
|
+
return LandsatCollection(collection=self._PixelAreaSumCollection).exportProperties(property_names=prop_names)
|
|
1285
1394
|
else:
|
|
1286
1395
|
raise ValueError("Incorrect `output_type`. The `output_type` argument must be one of the following: 'ImageCollection', 'ee.ImageCollection', 'LandsatCollection', 'DataFrame', 'Pandas', 'pd', 'dataframe', or 'df'.")
|
|
1287
1396
|
|
|
1397
|
+
def PixelAreaSumCollection(
|
|
1398
|
+
self, band_name, geometry, threshold=-1, scale=30, maxPixels=1e12, output_type='ImageCollection', area_data_export_path=None):
|
|
1399
|
+
warnings.warn(
|
|
1400
|
+
"PixelAreaSumCollection is deprecated and will be removed in a future release. "
|
|
1401
|
+
"Please use pixelAreaSumCollection instead.",
|
|
1402
|
+
DeprecationWarning,
|
|
1403
|
+
stacklevel=2
|
|
1404
|
+
)
|
|
1405
|
+
return self.pixelAreaSumCollection(band_name, geometry, threshold, scale, maxPixels, output_type, area_data_export_path)
|
|
1406
|
+
|
|
1288
1407
|
@staticmethod
|
|
1289
1408
|
def add_month_property_fn(image):
|
|
1290
1409
|
"""
|
|
@@ -1388,7 +1507,12 @@ class LandsatCollection:
|
|
|
1388
1507
|
return LandsatCollection(collection=ee.ImageCollection(paired.map(_pair_two)))
|
|
1389
1508
|
|
|
1390
1509
|
# Preferred path: merge many singleband products into the parent
|
|
1391
|
-
if not isinstance(collections, list) or len(collections) == 0:
|
|
1510
|
+
# if not isinstance(collections, list) or len(collections) == 0:
|
|
1511
|
+
# raise ValueError("Provide a non-empty list of LandsatCollection objects in `collections`.")
|
|
1512
|
+
if not isinstance(collections, list):
|
|
1513
|
+
collections = [collections]
|
|
1514
|
+
|
|
1515
|
+
if len(collections) == 0:
|
|
1392
1516
|
raise ValueError("Provide a non-empty list of LandsatCollection objects in `collections`.")
|
|
1393
1517
|
|
|
1394
1518
|
result = self.collection
|
|
@@ -1515,7 +1639,7 @@ class LandsatCollection:
|
|
|
1515
1639
|
self._dates = dates
|
|
1516
1640
|
return self._dates
|
|
1517
1641
|
|
|
1518
|
-
def
|
|
1642
|
+
def exportProperties(self, property_names, file_path=None):
|
|
1519
1643
|
"""
|
|
1520
1644
|
Fetches and returns specified properties from each image in the collection as a list, and returns a pandas DataFrame and optionally saves the results to a csv file.
|
|
1521
1645
|
|
|
@@ -1570,6 +1694,15 @@ class LandsatCollection:
|
|
|
1570
1694
|
print(f"Properties saved to {file_path}")
|
|
1571
1695
|
|
|
1572
1696
|
return df
|
|
1697
|
+
|
|
1698
|
+
def ExportProperties(self, property_names, file_path=None):
|
|
1699
|
+
warnings.warn(
|
|
1700
|
+
"ExportProperties is deprecated and will be removed in a future release. "
|
|
1701
|
+
"Please use exportProperties instead.",
|
|
1702
|
+
DeprecationWarning,
|
|
1703
|
+
stacklevel=2
|
|
1704
|
+
)
|
|
1705
|
+
return self.exportProperties(property_names, file_path)
|
|
1573
1706
|
|
|
1574
1707
|
def get_filtered_collection(self):
|
|
1575
1708
|
"""
|
|
@@ -3089,7 +3222,7 @@ class LandsatCollection:
|
|
|
3089
3222
|
LandsatCollection: LandsatCollection image collection
|
|
3090
3223
|
"""
|
|
3091
3224
|
if self._masked_water_collection is None:
|
|
3092
|
-
col = self.collection.map(LandsatCollection.
|
|
3225
|
+
col = self.collection.map(LandsatCollection.maskWater)
|
|
3093
3226
|
self._masked_water_collection = LandsatCollection(collection=col)
|
|
3094
3227
|
return self._masked_water_collection
|
|
3095
3228
|
|
|
@@ -3104,7 +3237,7 @@ class LandsatCollection:
|
|
|
3104
3237
|
LandsatCollection: LandsatCollection image collection
|
|
3105
3238
|
"""
|
|
3106
3239
|
col = self.collection.map(
|
|
3107
|
-
lambda image: LandsatCollection.
|
|
3240
|
+
lambda image: LandsatCollection.maskWaterByNDWI(
|
|
3108
3241
|
image, threshold=threshold
|
|
3109
3242
|
)
|
|
3110
3243
|
)
|
|
@@ -3119,7 +3252,7 @@ class LandsatCollection:
|
|
|
3119
3252
|
LandsatCollection: LandsatCollection image collection
|
|
3120
3253
|
"""
|
|
3121
3254
|
if self._masked_to_water_collection is None:
|
|
3122
|
-
col = self.collection.map(LandsatCollection.
|
|
3255
|
+
col = self.collection.map(LandsatCollection.maskToWater)
|
|
3123
3256
|
self._masked_to_water_collection = LandsatCollection(collection=col)
|
|
3124
3257
|
return self._masked_to_water_collection
|
|
3125
3258
|
|
|
@@ -3134,7 +3267,7 @@ class LandsatCollection:
|
|
|
3134
3267
|
LandsatCollection: LandsatCollection image collection
|
|
3135
3268
|
"""
|
|
3136
3269
|
col = self.collection.map(
|
|
3137
|
-
lambda image: LandsatCollection.
|
|
3270
|
+
lambda image: LandsatCollection.maskToWaterByNDWI(
|
|
3138
3271
|
image, threshold=threshold
|
|
3139
3272
|
)
|
|
3140
3273
|
)
|
|
@@ -3149,7 +3282,7 @@ class LandsatCollection:
|
|
|
3149
3282
|
LandsatCollection: LandsatCollection image collection
|
|
3150
3283
|
"""
|
|
3151
3284
|
if self._masked_clouds_collection is None:
|
|
3152
|
-
col = self.collection.map(LandsatCollection.
|
|
3285
|
+
col = self.collection.map(LandsatCollection.maskClouds)
|
|
3153
3286
|
self._masked_clouds_collection = LandsatCollection(collection=col)
|
|
3154
3287
|
return self._masked_clouds_collection
|
|
3155
3288
|
|
|
@@ -3162,7 +3295,7 @@ class LandsatCollection:
|
|
|
3162
3295
|
LandsatCollection: LandsatCollection image collection
|
|
3163
3296
|
"""
|
|
3164
3297
|
if self._masked_shadows_collection is None:
|
|
3165
|
-
col = self.collection.map(LandsatCollection.
|
|
3298
|
+
col = self.collection.map(LandsatCollection.maskShadows)
|
|
3166
3299
|
self._masked_shadows_collection = LandsatCollection(collection=col)
|
|
3167
3300
|
return self._masked_shadows_collection
|
|
3168
3301
|
|
|
@@ -3231,20 +3364,14 @@ class LandsatCollection:
|
|
|
3231
3364
|
LandsatCollection: masked LandsatCollection image collection
|
|
3232
3365
|
|
|
3233
3366
|
"""
|
|
3234
|
-
|
|
3235
|
-
|
|
3236
|
-
mask = ee.Image.constant(1).clip(polygon)
|
|
3367
|
+
# Convert the polygon to a mask
|
|
3368
|
+
mask = ee.Image.constant(1).clip(polygon)
|
|
3237
3369
|
|
|
3238
|
-
|
|
3239
|
-
|
|
3370
|
+
# Update the mask of each image in the collection
|
|
3371
|
+
masked_collection = self.collection.map(lambda img: img.updateMask(mask)\
|
|
3372
|
+
.copyProperties(img).set('system:time_start', img.get('system:time_start')))
|
|
3240
3373
|
|
|
3241
|
-
|
|
3242
|
-
self._geometry_masked_collection = LandsatCollection(
|
|
3243
|
-
collection=masked_collection
|
|
3244
|
-
)
|
|
3245
|
-
|
|
3246
|
-
# Return the updated object
|
|
3247
|
-
return self._geometry_masked_collection
|
|
3374
|
+
return LandsatCollection(collection=masked_collection)
|
|
3248
3375
|
|
|
3249
3376
|
def mask_out_polygon(self, polygon):
|
|
3250
3377
|
"""
|
|
@@ -3257,23 +3384,18 @@ class LandsatCollection:
|
|
|
3257
3384
|
LandsatCollection: masked LandsatCollection image collection
|
|
3258
3385
|
|
|
3259
3386
|
"""
|
|
3260
|
-
|
|
3261
|
-
|
|
3262
|
-
full_mask = ee.Image.constant(1)
|
|
3387
|
+
# Convert the polygon to a mask
|
|
3388
|
+
full_mask = ee.Image.constant(1)
|
|
3263
3389
|
|
|
3264
|
-
|
|
3265
|
-
|
|
3390
|
+
# Use paint to set pixels inside polygon as 0
|
|
3391
|
+
area = full_mask.paint(polygon, 0)
|
|
3266
3392
|
|
|
3267
|
-
|
|
3268
|
-
|
|
3269
|
-
|
|
3270
|
-
# Update the internal collection state
|
|
3271
|
-
self._geometry_masked_out_collection = LandsatCollection(
|
|
3272
|
-
collection=masked_collection
|
|
3273
|
-
)
|
|
3393
|
+
# Update the mask of each image in the collection
|
|
3394
|
+
masked_collection = self.collection.map(lambda img: img.updateMask(area)\
|
|
3395
|
+
.copyProperties(img).set('system:time_start', img.get('system:time_start')))
|
|
3274
3396
|
|
|
3275
3397
|
# Return the updated object
|
|
3276
|
-
return
|
|
3398
|
+
return LandsatCollection(collection=masked_collection)
|
|
3277
3399
|
|
|
3278
3400
|
def mask_halite(self, threshold, ng_threshold=None):
|
|
3279
3401
|
"""
|
|
@@ -3473,6 +3595,8 @@ class LandsatCollection:
|
|
|
3473
3595
|
rightField='Date_Filter')
|
|
3474
3596
|
else:
|
|
3475
3597
|
raise ValueError(f'The chosen `join_method`: {join_method} does not match the options of "system:time_start" or "Date_Filter".')
|
|
3598
|
+
|
|
3599
|
+
native_projection = image_collection.first().select(target_band).projection()
|
|
3476
3600
|
|
|
3477
3601
|
# for any matches during a join, set image as a property key called 'future_image'
|
|
3478
3602
|
join = ee.Join.saveAll(matchesKey='future_image')
|
|
@@ -3516,7 +3640,7 @@ class LandsatCollection:
|
|
|
3516
3640
|
# convert the image collection to an image of s_statistic values per pixel
|
|
3517
3641
|
# where the s_statistic is the sum of partial s values
|
|
3518
3642
|
# renaming the band as 's_statistic' for later usage
|
|
3519
|
-
final_s_image = partial_s_col.sum().rename('s_statistic')
|
|
3643
|
+
final_s_image = partial_s_col.sum().rename('s_statistic').setDefaultProjection(native_projection)
|
|
3520
3644
|
|
|
3521
3645
|
|
|
3522
3646
|
########## PART 2 - VARIANCE and Z-SCORE ##########
|
|
@@ -3579,7 +3703,7 @@ class LandsatCollection:
|
|
|
3579
3703
|
mask = ee.Image(1).clip(geometry)
|
|
3580
3704
|
final_image = final_image.updateMask(mask)
|
|
3581
3705
|
|
|
3582
|
-
return final_image
|
|
3706
|
+
return final_image.setDefaultProjection(native_projection)
|
|
3583
3707
|
|
|
3584
3708
|
def sens_slope_trend(self, target_band=None, join_method='system:time_start', geometry=None):
|
|
3585
3709
|
"""
|
|
@@ -3615,6 +3739,8 @@ class LandsatCollection:
|
|
|
3615
3739
|
if geometry is not None and not isinstance(geometry, ee.Geometry):
|
|
3616
3740
|
raise ValueError(f'The chosen `geometry`: {geometry} is not a valid ee.Geometry object.')
|
|
3617
3741
|
|
|
3742
|
+
native_projection = image_collection.first().select(target_band).projection()
|
|
3743
|
+
|
|
3618
3744
|
# Add Year Band (Time X-Axis)
|
|
3619
3745
|
def add_year_band(image):
|
|
3620
3746
|
# Handle user-defined date strings vs system time
|
|
@@ -3642,7 +3768,7 @@ class LandsatCollection:
|
|
|
3642
3768
|
mask = ee.Image(1).clip(geometry)
|
|
3643
3769
|
slope_band = slope_band.updateMask(mask)
|
|
3644
3770
|
|
|
3645
|
-
return slope_band
|
|
3771
|
+
return slope_band.setDefaultProjection(native_projection)
|
|
3646
3772
|
|
|
3647
3773
|
def mask_via_band(self, band_to_mask, band_for_mask, threshold=-1, mask_above=True, add_band_to_original_image=False):
|
|
3648
3774
|
"""
|
|
@@ -3809,7 +3935,7 @@ class LandsatCollection:
|
|
|
3809
3935
|
new_col = self.collection.filter(ee.Filter.eq("Date_Filter", img_date))
|
|
3810
3936
|
return new_col.first()
|
|
3811
3937
|
|
|
3812
|
-
def
|
|
3938
|
+
def collectionStitch(self, img_col2):
|
|
3813
3939
|
"""
|
|
3814
3940
|
Function to mosaic two LandsatCollection objects which share image dates.
|
|
3815
3941
|
Mosaics are only formed for dates where both image collections have images.
|
|
@@ -3861,9 +3987,15 @@ class LandsatCollection:
|
|
|
3861
3987
|
|
|
3862
3988
|
# Return a LandsatCollection instance
|
|
3863
3989
|
return LandsatCollection(collection=new_col)
|
|
3990
|
+
|
|
3991
|
+
def CollectionStitch(self, img_col2):
|
|
3992
|
+
warnings.warn(
|
|
3993
|
+
"CollectionStitch is deprecated and will be removed in future versions. Please use the 'collectionStitch' property instead.",
|
|
3994
|
+
DeprecationWarning, stacklevel=2)
|
|
3995
|
+
return self.collectionStitch(img_col2)
|
|
3864
3996
|
|
|
3865
3997
|
@property
|
|
3866
|
-
def
|
|
3998
|
+
def mosaicByDateDepr(self):
|
|
3867
3999
|
"""
|
|
3868
4000
|
Property attribute function to mosaic collection images that share the same date.
|
|
3869
4001
|
|
|
@@ -3928,6 +4060,73 @@ class LandsatCollection:
|
|
|
3928
4060
|
|
|
3929
4061
|
# Convert the list of mosaics to an ImageCollection
|
|
3930
4062
|
return self._MosaicByDate
|
|
4063
|
+
|
|
4064
|
+
@property
|
|
4065
|
+
def mosaicByDate(self):
|
|
4066
|
+
"""
|
|
4067
|
+
Property attribute function to mosaic collection images that share the same date.
|
|
4068
|
+
|
|
4069
|
+
The property CLOUD_COVER for each image is used to calculate an overall mean,
|
|
4070
|
+
which replaces the CLOUD_COVER property for each mosaiced image.
|
|
4071
|
+
Server-side friendly.
|
|
4072
|
+
|
|
4073
|
+
NOTE: if images are removed from the collection from cloud filtering, you may have mosaics composed of only one image.
|
|
4074
|
+
|
|
4075
|
+
Returns:
|
|
4076
|
+
LandsatCollection: LandsatCollection image collection with mosaiced imagery and mean CLOUD_COVER as a property
|
|
4077
|
+
"""
|
|
4078
|
+
if self._MosaicByDate is None:
|
|
4079
|
+
distinct_dates = self.collection.distinct("Date_Filter")
|
|
4080
|
+
|
|
4081
|
+
# Define a join to link images by Date_Filter
|
|
4082
|
+
filter_date = ee.Filter.equals(leftField="Date_Filter", rightField="Date_Filter")
|
|
4083
|
+
join = ee.Join.saveAll(matchesKey="date_matches")
|
|
4084
|
+
|
|
4085
|
+
# Apply the join
|
|
4086
|
+
# Primary: Distinct dates collection
|
|
4087
|
+
# Secondary: The full original collection
|
|
4088
|
+
joined_col = ee.ImageCollection(join.apply(distinct_dates, self.collection, filter_date))
|
|
4089
|
+
|
|
4090
|
+
# Define the mosaicking function
|
|
4091
|
+
def _mosaic_day(img):
|
|
4092
|
+
# Recover the list of images for this day
|
|
4093
|
+
daily_list = ee.List(img.get("date_matches"))
|
|
4094
|
+
daily_col = ee.ImageCollection.fromImages(daily_list)
|
|
4095
|
+
|
|
4096
|
+
# Create the mosaic
|
|
4097
|
+
mosaic = daily_col.mosaic().setDefaultProjection(img.projection())
|
|
4098
|
+
|
|
4099
|
+
# Calculate mean metadata properties
|
|
4100
|
+
cloud_percentage = daily_col.aggregate_mean("CLOUD_COVER")
|
|
4101
|
+
|
|
4102
|
+
# Properties to preserve from the representative image
|
|
4103
|
+
props_of_interest = [
|
|
4104
|
+
"SPACECRAFT_ID",
|
|
4105
|
+
"SENSOR_ID",
|
|
4106
|
+
"PROCESSING_LEVEL",
|
|
4107
|
+
"ACQUISITION_DATE",
|
|
4108
|
+
"system:time_start",
|
|
4109
|
+
"Date_Filter"
|
|
4110
|
+
]
|
|
4111
|
+
|
|
4112
|
+
# Return mosaic with properties set
|
|
4113
|
+
return mosaic.copyProperties(img, props_of_interest).set({
|
|
4114
|
+
"CLOUD_COVER": cloud_percentage
|
|
4115
|
+
})
|
|
4116
|
+
|
|
4117
|
+
# 5. Map the function and wrap the result
|
|
4118
|
+
mosaiced_col = joined_col.map(_mosaic_day)
|
|
4119
|
+
self._MosaicByDate = LandsatCollection(collection=mosaiced_col)
|
|
4120
|
+
|
|
4121
|
+
# Convert the list of mosaics to an ImageCollection
|
|
4122
|
+
return self._MosaicByDate
|
|
4123
|
+
|
|
4124
|
+
@property
|
|
4125
|
+
def MosaicByDate(self):
|
|
4126
|
+
warnings.warn(
|
|
4127
|
+
"MosaicByDate is deprecated and will be removed in future versions. Please use the 'mosaicByDate' property instead.",
|
|
4128
|
+
DeprecationWarning, stacklevel=2)
|
|
4129
|
+
return self.mosaicByDate
|
|
3931
4130
|
|
|
3932
4131
|
@staticmethod
|
|
3933
4132
|
def ee_to_df(
|
|
@@ -4148,200 +4347,197 @@ class LandsatCollection:
|
|
|
4148
4347
|
lines,
|
|
4149
4348
|
line_names,
|
|
4150
4349
|
reducer="mean",
|
|
4151
|
-
dist_interval=
|
|
4350
|
+
dist_interval=90,
|
|
4152
4351
|
n_segments=None,
|
|
4153
4352
|
scale=30,
|
|
4154
4353
|
processing_mode='aggregated',
|
|
4155
4354
|
save_folder_path=None,
|
|
4156
4355
|
sampling_method='line',
|
|
4157
|
-
point_buffer_radius=15
|
|
4356
|
+
point_buffer_radius=15,
|
|
4357
|
+
batch_size=10
|
|
4158
4358
|
):
|
|
4159
4359
|
"""
|
|
4160
|
-
Computes and returns pixel values along transects
|
|
4161
|
-
|
|
4162
|
-
|
|
4163
|
-
|
|
4164
|
-
for maximum flexibility and performance.
|
|
4165
|
-
|
|
4166
|
-
There are two processing modes available, aggregated and iterative:
|
|
4167
|
-
- 'aggregated' (default; suggested): Fast, server-side processing. Fetches all results
|
|
4168
|
-
in a single request. Highly recommended. Returns a dictionary of pandas DataFrames.
|
|
4169
|
-
- 'iterative': Slower, client-side loop that processes one image at a time.
|
|
4170
|
-
Kept for backward compatibility (effectively depreciated). Returns None and saves individual CSVs.
|
|
4171
|
-
This method is not recommended unless absolutely necessary, as it is less efficient and may be subject to client-side timeouts.
|
|
4172
|
-
|
|
4360
|
+
Computes and returns pixel values along transects. Provide a list of ee.Geometry.LineString objects and corresponding names, and the function will compute the specified reducer value
|
|
4361
|
+
at regular intervals along each line for all images in the collection. Use `dist_interval` or `n_segments` to control sampling resolution. The user can choose between 'aggregated' mode (returns a dictionary of DataFrames) or 'iterative' mode (saves individual CSVs for each transect).
|
|
4362
|
+
Alter `sampling_method` to sample directly along the line or via buffered points along the line. Buffered points can help capture more representative pixel values in heterogeneous landscapes, and the buffer radius can be adjusted via `point_buffer_radius`.
|
|
4363
|
+
|
|
4173
4364
|
Args:
|
|
4174
|
-
lines (list):
|
|
4175
|
-
|
|
4176
|
-
|
|
4177
|
-
|
|
4178
|
-
|
|
4179
|
-
|
|
4180
|
-
|
|
4181
|
-
|
|
4182
|
-
|
|
4183
|
-
|
|
4184
|
-
|
|
4185
|
-
each transect line into for sampling. This parameter overrides `dist_interval`.
|
|
4186
|
-
Defaults to None.
|
|
4187
|
-
scale (int, optional): The nominal scale in meters for the reduction,
|
|
4188
|
-
which should typically match the pixel resolution of the imagery.
|
|
4189
|
-
Defaults to 30.
|
|
4190
|
-
processing_mode (str, optional): The method for processing the collection.
|
|
4191
|
-
- 'aggregated' (default): Fast, server-side processing. Fetches all
|
|
4192
|
-
results in a single request. Highly recommended. Returns a dictionary
|
|
4193
|
-
of pandas DataFrames.
|
|
4194
|
-
- 'iterative': Slower, client-side loop that processes one image at a
|
|
4195
|
-
time. Kept for backward compatibility. Returns None and saves
|
|
4196
|
-
individual CSVs.
|
|
4197
|
-
save_folder_path (str, optional): If provided, the function will save the
|
|
4198
|
-
resulting transect data to CSV files. The behavior depends on the
|
|
4199
|
-
`processing_mode`:
|
|
4200
|
-
- In 'aggregated' mode, one CSV is saved for each transect,
|
|
4201
|
-
containing all dates. (e.g., 'MyTransect_transects.csv').
|
|
4202
|
-
- In 'iterative' mode, one CSV is saved for each date,
|
|
4203
|
-
containing all transects. (e.g., '2022-06-15_transects.csv').
|
|
4204
|
-
sampling_method (str, optional): The geometric method used for sampling.
|
|
4205
|
-
- 'line' (default): Reduces all pixels intersecting each small line
|
|
4206
|
-
segment. This can be unreliable and produce blank rows if
|
|
4207
|
-
`dist_interval` is too small relative to the `scale`.
|
|
4208
|
-
- 'buffered_point': Reduces all pixels within a buffer around the
|
|
4209
|
-
midpoint of each line segment. This method is more robust and
|
|
4210
|
-
reliably avoids blank rows, but may not reduce all pixels along a line segment.
|
|
4211
|
-
point_buffer_radius (int, optional): The radius in meters for the buffer
|
|
4212
|
-
when `sampling_method` is 'buffered_point'. Defaults to 15.
|
|
4365
|
+
lines (list): List of ee.Geometry.LineString objects.
|
|
4366
|
+
line_names (list): List of string names for each transect.
|
|
4367
|
+
reducer (str, optional): Reducer name. Defaults to 'mean'.
|
|
4368
|
+
dist_interval (float, optional): Distance interval in meters. Defaults to 90.
|
|
4369
|
+
n_segments (int, optional): Number of segments (overrides dist_interval).
|
|
4370
|
+
scale (int, optional): Scale in meters. Defaults to 30.
|
|
4371
|
+
processing_mode (str, optional): 'aggregated' or 'iterative'.
|
|
4372
|
+
save_folder_path (str, optional): Path to save CSVs.
|
|
4373
|
+
sampling_method (str, optional): 'line' or 'buffered_point'.
|
|
4374
|
+
point_buffer_radius (int, optional): Buffer radius if using 'buffered_point'.
|
|
4375
|
+
batch_size (int, optional): Images per request in 'aggregated' mode. Defaults to 10. Lower the value if you encounter a 'Too many aggregations' error.
|
|
4213
4376
|
|
|
4214
4377
|
Returns:
|
|
4215
|
-
dict or None:
|
|
4216
|
-
- If `processing_mode` is 'aggregated', returns a dictionary where each
|
|
4217
|
-
key is a transect name and each value is a pandas DataFrame. In the
|
|
4218
|
-
DataFrame, the index is the distance along the transect and each
|
|
4219
|
-
column represents an image date. Optionally saves CSV files if
|
|
4220
|
-
`save_folder_path` is provided.
|
|
4221
|
-
- If `processing_mode` is 'iterative', returns None as it saves
|
|
4222
|
-
files directly.
|
|
4223
|
-
|
|
4224
|
-
Raises:
|
|
4225
|
-
ValueError: If `lines` and `line_names` have different lengths, or if
|
|
4226
|
-
an unknown reducer or processing mode is specified.
|
|
4378
|
+
dict or None: Dictionary of DataFrames (aggregated) or None (iterative).
|
|
4227
4379
|
"""
|
|
4228
|
-
# Validating inputs
|
|
4229
4380
|
if len(lines) != len(line_names):
|
|
4230
4381
|
raise ValueError("'lines' and 'line_names' must have the same number of elements.")
|
|
4231
|
-
|
|
4382
|
+
|
|
4383
|
+
first_img = self.collection.first()
|
|
4384
|
+
bands = first_img.bandNames().getInfo()
|
|
4385
|
+
is_multiband = len(bands) > 1
|
|
4386
|
+
|
|
4387
|
+
# Setup robust dictionary for handling masked/zero values
|
|
4388
|
+
default_val = -9999
|
|
4389
|
+
dummy_dict = ee.Dictionary.fromLists(bands, ee.List.repeat(default_val, len(bands)))
|
|
4390
|
+
|
|
4391
|
+
if is_multiband:
|
|
4392
|
+
reducer_cols = [f"{b}_{reducer}" for b in bands]
|
|
4393
|
+
clean_names = bands
|
|
4394
|
+
rename_keys = bands
|
|
4395
|
+
rename_vals = reducer_cols
|
|
4396
|
+
else:
|
|
4397
|
+
reducer_cols = [reducer]
|
|
4398
|
+
clean_names = [bands[0]]
|
|
4399
|
+
rename_keys = bands
|
|
4400
|
+
rename_vals = reducer_cols
|
|
4401
|
+
|
|
4402
|
+
print("Pre-computing transect geometries from input LineString(s)...")
|
|
4403
|
+
|
|
4404
|
+
master_transect_fc = ee.FeatureCollection([])
|
|
4405
|
+
geom_error = 1.0
|
|
4406
|
+
|
|
4407
|
+
for i, line in enumerate(lines):
|
|
4408
|
+
line_name = line_names[i]
|
|
4409
|
+
length = line.length(geom_error)
|
|
4410
|
+
|
|
4411
|
+
eff_interval = length.divide(n_segments) if n_segments else dist_interval
|
|
4412
|
+
|
|
4413
|
+
distances = ee.List.sequence(0, length, eff_interval)
|
|
4414
|
+
cut_lines = line.cutLines(distances, geom_error).geometries()
|
|
4415
|
+
|
|
4416
|
+
def create_feature(l):
|
|
4417
|
+
geom = ee.Geometry(ee.List(l).get(0))
|
|
4418
|
+
dist = ee.Number(ee.List(l).get(1))
|
|
4419
|
+
|
|
4420
|
+
final_geom = ee.Algorithms.If(
|
|
4421
|
+
ee.String(sampling_method).equals('buffered_point'),
|
|
4422
|
+
geom.centroid(geom_error).buffer(point_buffer_radius),
|
|
4423
|
+
geom
|
|
4424
|
+
)
|
|
4425
|
+
|
|
4426
|
+
return ee.Feature(ee.Geometry(final_geom), {
|
|
4427
|
+
'transect_name': line_name,
|
|
4428
|
+
'distance': dist
|
|
4429
|
+
})
|
|
4430
|
+
|
|
4431
|
+
line_fc = ee.FeatureCollection(cut_lines.zip(distances).map(create_feature))
|
|
4432
|
+
master_transect_fc = master_transect_fc.merge(line_fc)
|
|
4433
|
+
|
|
4434
|
+
try:
|
|
4435
|
+
ee_reducer = getattr(ee.Reducer, reducer)()
|
|
4436
|
+
except AttributeError:
|
|
4437
|
+
raise ValueError(f"Unknown reducer: '{reducer}'.")
|
|
4438
|
+
|
|
4439
|
+
def process_image(image):
|
|
4440
|
+
date_val = image.get('Date_Filter')
|
|
4441
|
+
|
|
4442
|
+
# Map over points (Slower but Robust)
|
|
4443
|
+
def reduce_point(f):
|
|
4444
|
+
stats = image.reduceRegion(
|
|
4445
|
+
reducer=ee_reducer,
|
|
4446
|
+
geometry=f.geometry(),
|
|
4447
|
+
scale=scale,
|
|
4448
|
+
maxPixels=1e13
|
|
4449
|
+
)
|
|
4450
|
+
# Combine with defaults (preserves 0, handles masked)
|
|
4451
|
+
safe_stats = dummy_dict.combine(stats, overwrite=True)
|
|
4452
|
+
# Rename keys to match expected outputs (e.g. 'ndvi' -> 'ndvi_mean')
|
|
4453
|
+
final_stats = safe_stats.rename(rename_keys, rename_vals)
|
|
4454
|
+
|
|
4455
|
+
return f.set(final_stats).set({'image_date': date_val})
|
|
4456
|
+
|
|
4457
|
+
return master_transect_fc.map(reduce_point)
|
|
4458
|
+
|
|
4459
|
+
export_cols = ['transect_name', 'distance', 'image_date'] + reducer_cols
|
|
4460
|
+
|
|
4232
4461
|
if processing_mode == 'aggregated':
|
|
4233
|
-
|
|
4234
|
-
|
|
4235
|
-
|
|
4236
|
-
|
|
4237
|
-
|
|
4238
|
-
|
|
4239
|
-
|
|
4240
|
-
|
|
4241
|
-
|
|
4242
|
-
|
|
4243
|
-
#
|
|
4244
|
-
|
|
4245
|
-
|
|
4246
|
-
|
|
4247
|
-
|
|
4248
|
-
|
|
4249
|
-
|
|
4250
|
-
|
|
4251
|
-
# Determine effective distance interval based on n_segments or dist_interval
|
|
4252
|
-
effective_dist_interval = ee.Algorithms.If(
|
|
4253
|
-
n_segments,
|
|
4254
|
-
length.divide(n_segments),
|
|
4255
|
-
dist_interval or 30 # Defaults to 30 if both are None
|
|
4256
|
-
)
|
|
4257
|
-
# Generate distances along the line(s) for segmentation
|
|
4258
|
-
distances = ee.List.sequence(0, length, effective_dist_interval)
|
|
4259
|
-
# Segmenting the line into smaller lines at the specified distances
|
|
4260
|
-
cut_lines_geoms = line.cutLines(distances, maxError).geometries()
|
|
4261
|
-
# Function to create features with distance attributes
|
|
4262
|
-
# Adjusted to ensure consistent return types
|
|
4263
|
-
def set_dist_attr(l):
|
|
4264
|
-
# l is a list: [geometry, distance]
|
|
4265
|
-
# Extracting geometry portion of line
|
|
4266
|
-
geom_segment = ee.Geometry(ee.List(l).get(0))
|
|
4267
|
-
# Extracting distance value for attribute
|
|
4268
|
-
distance = ee.Number(ee.List(l).get(1))
|
|
4269
|
-
### Determine final geometry based on sampling method
|
|
4270
|
-
# If the sampling method is 'buffered_point',
|
|
4271
|
-
# create a buffered point feature at the centroid of each segment,
|
|
4272
|
-
# otherwise create a line feature
|
|
4273
|
-
final_feature = ee.Algorithms.If(
|
|
4274
|
-
ee.String(sampling_method).equals('buffered_point'),
|
|
4275
|
-
# True Case: Create the buffered point feature
|
|
4276
|
-
ee.Feature(
|
|
4277
|
-
geom_segment.centroid(maxError).buffer(point_buffer_radius),
|
|
4278
|
-
{'distance': distance}
|
|
4279
|
-
),
|
|
4280
|
-
# False Case: Create the line segment feature
|
|
4281
|
-
ee.Feature(geom_segment, {'distance': distance})
|
|
4282
|
-
)
|
|
4283
|
-
# Return either the line segment feature or the buffered point feature
|
|
4284
|
-
return final_feature
|
|
4285
|
-
# Creating a FeatureCollection of the cut lines with distance attributes
|
|
4286
|
-
# Using map to apply the set_dist_attr function to each cut line geometry
|
|
4287
|
-
line_features = ee.FeatureCollection(cut_lines_geoms.zip(distances).map(set_dist_attr))
|
|
4288
|
-
# Reducing the image over the line features to get transect values
|
|
4289
|
-
transect_fc = image.reduceRegions(
|
|
4290
|
-
collection=line_features, reducer=ee_reducer, scale=scale
|
|
4291
|
-
)
|
|
4292
|
-
# Adding image date and line name properties to each feature
|
|
4293
|
-
def set_props(feature):
|
|
4294
|
-
return feature.set({'image_date': image_date, 'transect_name': line_name})
|
|
4295
|
-
# Append to the list of all transects for this image
|
|
4296
|
-
all_transects_for_image = all_transects_for_image.add(transect_fc.map(set_props))
|
|
4297
|
-
# Combine all transect FeatureCollections into a single FeatureCollection and flatten
|
|
4298
|
-
# Flatten is used to merge the list of FeatureCollections into one
|
|
4299
|
-
return ee.FeatureCollection(all_transects_for_image).flatten()
|
|
4300
|
-
# Map the function over the entire image collection and flatten the results
|
|
4301
|
-
results_fc = ee.FeatureCollection(self.collection.map(get_transects_for_image)).flatten()
|
|
4302
|
-
# Convert the results to a pandas DataFrame
|
|
4303
|
-
df = LandsatCollection.ee_to_df(results_fc, remove_geom=True)
|
|
4304
|
-
# Check if the DataFrame is empty
|
|
4305
|
-
if df.empty:
|
|
4306
|
-
print("Warning: No transect data was generated.")
|
|
4462
|
+
collection_size = self.collection.size().getInfo()
|
|
4463
|
+
print(f"Starting batch process of {collection_size} images...")
|
|
4464
|
+
|
|
4465
|
+
dfs = []
|
|
4466
|
+
for i in range(0, collection_size, batch_size):
|
|
4467
|
+
print(f" Processing image {i} to {min(i + batch_size, collection_size)}...")
|
|
4468
|
+
|
|
4469
|
+
batch_col = ee.ImageCollection(self.collection.toList(batch_size, i))
|
|
4470
|
+
results_fc = batch_col.map(process_image).flatten()
|
|
4471
|
+
|
|
4472
|
+
# Dynamic Class Call for ee_to_df
|
|
4473
|
+
df_batch = self.__class__.ee_to_df(results_fc, columns=export_cols, remove_geom=True)
|
|
4474
|
+
|
|
4475
|
+
if not df_batch.empty:
|
|
4476
|
+
dfs.append(df_batch)
|
|
4477
|
+
|
|
4478
|
+
if not dfs:
|
|
4479
|
+
print("Warning: No transect data generated.")
|
|
4307
4480
|
return {}
|
|
4308
|
-
|
|
4481
|
+
|
|
4482
|
+
df = pd.concat(dfs, ignore_index=True)
|
|
4483
|
+
|
|
4484
|
+
# Post-Process & Split
|
|
4309
4485
|
output_dfs = {}
|
|
4310
|
-
|
|
4486
|
+
for col in reducer_cols:
|
|
4487
|
+
df[col] = pd.to_numeric(df[col], errors='coerce')
|
|
4488
|
+
df[col] = df[col].replace(-9999, np.nan)
|
|
4489
|
+
|
|
4311
4490
|
for name in sorted(df['transect_name'].unique()):
|
|
4312
|
-
|
|
4313
|
-
|
|
4314
|
-
|
|
4315
|
-
|
|
4316
|
-
|
|
4317
|
-
|
|
4318
|
-
|
|
4319
|
-
|
|
4320
|
-
|
|
4321
|
-
|
|
4322
|
-
|
|
4323
|
-
|
|
4491
|
+
line_df = df[df['transect_name'] == name]
|
|
4492
|
+
|
|
4493
|
+
for raw_col, band_name in zip(reducer_cols, clean_names):
|
|
4494
|
+
try:
|
|
4495
|
+
# Safety drop for duplicates
|
|
4496
|
+
line_df_clean = line_df.drop_duplicates(subset=['distance', 'image_date'])
|
|
4497
|
+
|
|
4498
|
+
pivot = line_df_clean.pivot(index='distance', columns='image_date', values=raw_col)
|
|
4499
|
+
pivot.columns.name = 'Date'
|
|
4500
|
+
key = f"{name}_{band_name}"
|
|
4501
|
+
output_dfs[key] = pivot
|
|
4502
|
+
|
|
4503
|
+
if save_folder_path:
|
|
4504
|
+
safe_key = "".join(x for x in key if x.isalnum() or x in "._-")
|
|
4505
|
+
fname = f"{save_folder_path}{safe_key}_transects.csv"
|
|
4506
|
+
pivot.to_csv(fname)
|
|
4507
|
+
print(f"Saved: {fname}")
|
|
4508
|
+
except Exception as e:
|
|
4509
|
+
print(f"Skipping pivot for {name}/{band_name}: {e}")
|
|
4510
|
+
|
|
4324
4511
|
return output_dfs
|
|
4325
4512
|
|
|
4326
|
-
### old, depreciated iterative client-side processing method ###
|
|
4327
4513
|
elif processing_mode == 'iterative':
|
|
4328
4514
|
if not save_folder_path:
|
|
4329
|
-
raise ValueError("
|
|
4515
|
+
raise ValueError("save_folder_path is required for iterative mode.")
|
|
4330
4516
|
|
|
4331
4517
|
image_collection_dates = self.dates
|
|
4332
4518
|
for i, date in enumerate(image_collection_dates):
|
|
4333
4519
|
try:
|
|
4334
4520
|
print(f"Processing image {i+1}/{len(image_collection_dates)}: {date}")
|
|
4335
|
-
|
|
4336
|
-
|
|
4337
|
-
|
|
4338
|
-
)
|
|
4339
|
-
|
|
4340
|
-
|
|
4521
|
+
image_list = self.collection.toList(self.collection.size())
|
|
4522
|
+
image = ee.Image(image_list.get(i))
|
|
4523
|
+
|
|
4524
|
+
fc_result = process_image(image)
|
|
4525
|
+
df = self.__class__.ee_to_df(fc_result, columns=export_cols, remove_geom=True)
|
|
4526
|
+
|
|
4527
|
+
if not df.empty:
|
|
4528
|
+
for col in reducer_cols:
|
|
4529
|
+
df[col] = pd.to_numeric(df[col], errors='coerce')
|
|
4530
|
+
df[col] = df[col].replace(-9999, np.nan)
|
|
4531
|
+
|
|
4532
|
+
fname = f"{save_folder_path}{date}_transects.csv"
|
|
4533
|
+
df.to_csv(fname, index=False)
|
|
4534
|
+
print(f"Saved: {fname}")
|
|
4535
|
+
else:
|
|
4536
|
+
print(f"Skipping {date}: No data.")
|
|
4341
4537
|
except Exception as e:
|
|
4342
|
-
print(f"
|
|
4538
|
+
print(f"Error processing {date}: {e}")
|
|
4343
4539
|
else:
|
|
4344
|
-
raise ValueError("
|
|
4540
|
+
raise ValueError("processing_mode must be 'iterative' or 'aggregated'.")
|
|
4345
4541
|
|
|
4346
4542
|
@staticmethod
|
|
4347
4543
|
def extract_zonal_stats_from_buffer(
|
|
@@ -4445,7 +4641,8 @@ class LandsatCollection:
|
|
|
4445
4641
|
buffer_size=1,
|
|
4446
4642
|
tileScale=1,
|
|
4447
4643
|
dates=None,
|
|
4448
|
-
file_path=None
|
|
4644
|
+
file_path=None,
|
|
4645
|
+
unweighted=False
|
|
4449
4646
|
):
|
|
4450
4647
|
"""
|
|
4451
4648
|
Iterates over a collection of images and extracts spatial statistics (defaults to mean) for a given list of geometries or coordinates. Individual statistics are calculated for each geometry or coordinate provided.
|
|
@@ -4464,6 +4661,7 @@ class LandsatCollection:
|
|
|
4464
4661
|
tileScale (int, optional): A scaling factor to reduce aggregation tile size. Defaults to 1.
|
|
4465
4662
|
dates (list, optional): A list of date strings ('YYYY-MM-DD') for filtering the collection, such that only images from these dates are included for zonal statistic retrieval. Defaults to None, which uses all dates in the collection.
|
|
4466
4663
|
file_path (str, optional): File path to save the output CSV.
|
|
4664
|
+
unweighted (bool, optional): Whether to use unweighted reducer. Defaults to False.
|
|
4467
4665
|
|
|
4468
4666
|
Returns:
|
|
4469
4667
|
pd.DataFrame or None: A pandas DataFrame with dates as the index and coordinate names
|
|
@@ -4570,6 +4768,9 @@ class LandsatCollection:
|
|
|
4570
4768
|
reducer = getattr(ee.Reducer, reducer_type)()
|
|
4571
4769
|
except AttributeError:
|
|
4572
4770
|
raise ValueError(f"Unknown reducer_type: '{reducer_type}'.")
|
|
4771
|
+
|
|
4772
|
+
if unweighted:
|
|
4773
|
+
reducer = reducer.unweighted()
|
|
4573
4774
|
|
|
4574
4775
|
# Define the function to map over the image collection
|
|
4575
4776
|
def calculate_stats_for_image(image):
|
|
@@ -4631,6 +4832,394 @@ class LandsatCollection:
|
|
|
4631
4832
|
print(f"Zonal stats saved to {file_path}.csv")
|
|
4632
4833
|
return
|
|
4633
4834
|
return pivot_df
|
|
4835
|
+
|
|
4836
|
+
def multiband_zonal_stats(
|
|
4837
|
+
self,
|
|
4838
|
+
geometry,
|
|
4839
|
+
bands,
|
|
4840
|
+
reducer_types,
|
|
4841
|
+
scale=30,
|
|
4842
|
+
geometry_name='geom',
|
|
4843
|
+
dates=None,
|
|
4844
|
+
include_area=False,
|
|
4845
|
+
file_path=None,
|
|
4846
|
+
unweighted=False
|
|
4847
|
+
):
|
|
4848
|
+
"""
|
|
4849
|
+
Calculates zonal statistics for multiple bands over a single geometry for each image in the collection.
|
|
4850
|
+
Allows for specifying different reducers for different bands. Optionally includes the geometry area.
|
|
4851
|
+
|
|
4852
|
+
Args:
|
|
4853
|
+
geometry (ee.Geometry or ee.Feature): The single geometry to calculate statistics for.
|
|
4854
|
+
bands (list of str): A list of band names to include in the analysis.
|
|
4855
|
+
reducer_types (str or list of str): A single reducer name (e.g., 'mean') to apply to all bands,
|
|
4856
|
+
or a list of reducer names matching the length of the 'bands' list to apply specific reducers
|
|
4857
|
+
to specific bands.
|
|
4858
|
+
scale (int, optional): The scale in meters for the reduction. Defaults to 30.
|
|
4859
|
+
geometry_name (str, optional): A name for the geometry, used in column naming. Defaults to 'geom'.
|
|
4860
|
+
dates (list of str, optional): A list of date strings ('YYYY-MM-DD') to filter the collection.
|
|
4861
|
+
Defaults to None (processes all images).
|
|
4862
|
+
include_area (bool, optional): If True, adds a column with the area of the geometry in square meters.
|
|
4863
|
+
Defaults to False.
|
|
4864
|
+
file_path (str, optional): If provided, saves the resulting DataFrame to a CSV file at this path.
|
|
4865
|
+
unweighted (bool, optional): Whether to use unweighted reducers. Defaults to False.
|
|
4866
|
+
|
|
4867
|
+
Returns:
|
|
4868
|
+
pd.DataFrame: A pandas DataFrame indexed by Date, with columns named as '{band}_{geometry_name}_{reducer}'.
|
|
4869
|
+
"""
|
|
4870
|
+
# 1. Input Validation and Setup
|
|
4871
|
+
if not isinstance(geometry, (ee.Geometry, ee.Feature)):
|
|
4872
|
+
raise ValueError("The `geometry` argument must be an ee.Geometry or ee.Feature.")
|
|
4873
|
+
|
|
4874
|
+
region = geometry.geometry() if isinstance(geometry, ee.Feature) else geometry
|
|
4875
|
+
|
|
4876
|
+
if isinstance(bands, str):
|
|
4877
|
+
bands = [bands]
|
|
4878
|
+
if not isinstance(bands, list):
|
|
4879
|
+
raise ValueError("The `bands` argument must be a string or a list of strings.")
|
|
4880
|
+
|
|
4881
|
+
# Handle reducer_types (str vs list)
|
|
4882
|
+
if isinstance(reducer_types, str):
|
|
4883
|
+
reducers_list = [reducer_types] * len(bands)
|
|
4884
|
+
elif isinstance(reducer_types, list):
|
|
4885
|
+
if len(reducer_types) != len(bands):
|
|
4886
|
+
raise ValueError("If `reducer_types` is a list, it must have the same length as `bands`.")
|
|
4887
|
+
reducers_list = reducer_types
|
|
4888
|
+
else:
|
|
4889
|
+
raise ValueError("`reducer_types` must be a string or a list of strings.")
|
|
4890
|
+
|
|
4891
|
+
# 2. Filter Collection
|
|
4892
|
+
processing_col = self.collection
|
|
4893
|
+
|
|
4894
|
+
if dates:
|
|
4895
|
+
processing_col = processing_col.filter(ee.Filter.inList('Date_Filter', dates))
|
|
4896
|
+
|
|
4897
|
+
processing_col = processing_col.select(bands)
|
|
4898
|
+
|
|
4899
|
+
# 3. Pre-calculate Area (if requested)
|
|
4900
|
+
area_val = None
|
|
4901
|
+
area_col_name = f"{geometry_name}_area_m2"
|
|
4902
|
+
if include_area:
|
|
4903
|
+
# Calculate geodesic area in square meters with maxError of 1m
|
|
4904
|
+
area_val = region.area(1)
|
|
4905
|
+
|
|
4906
|
+
# 4. Define the Reduction Logic
|
|
4907
|
+
def calculate_multiband_stats(image):
|
|
4908
|
+
# Base feature with date property
|
|
4909
|
+
date_val = image.get('Date_Filter')
|
|
4910
|
+
feature = ee.Feature(None, {'Date': date_val})
|
|
4911
|
+
|
|
4912
|
+
# If requested, add the static area value to every feature
|
|
4913
|
+
if include_area:
|
|
4914
|
+
feature = feature.set(area_col_name, area_val)
|
|
4915
|
+
|
|
4916
|
+
unique_reducers = list(set(reducers_list))
|
|
4917
|
+
|
|
4918
|
+
# OPTIMIZED PATH: Single reducer type for all bands
|
|
4919
|
+
if len(unique_reducers) == 1:
|
|
4920
|
+
r_type = unique_reducers[0]
|
|
4921
|
+
try:
|
|
4922
|
+
reducer = getattr(ee.Reducer, r_type)()
|
|
4923
|
+
except AttributeError:
|
|
4924
|
+
reducer = ee.Reducer.mean()
|
|
4925
|
+
|
|
4926
|
+
if unweighted:
|
|
4927
|
+
reducer = reducer.unweighted()
|
|
4928
|
+
|
|
4929
|
+
stats = image.reduceRegion(
|
|
4930
|
+
reducer=reducer,
|
|
4931
|
+
geometry=region,
|
|
4932
|
+
scale=scale,
|
|
4933
|
+
maxPixels=1e13
|
|
4934
|
+
)
|
|
4935
|
+
|
|
4936
|
+
for band in bands:
|
|
4937
|
+
col_name = f"{band}_{geometry_name}_{r_type}"
|
|
4938
|
+
val = stats.get(band)
|
|
4939
|
+
feature = feature.set(col_name, val)
|
|
4940
|
+
|
|
4941
|
+
# ITERATIVE PATH: Different reducers for different bands
|
|
4942
|
+
else:
|
|
4943
|
+
for band, r_type in zip(bands, reducers_list):
|
|
4944
|
+
try:
|
|
4945
|
+
reducer = getattr(ee.Reducer, r_type)()
|
|
4946
|
+
except AttributeError:
|
|
4947
|
+
reducer = ee.Reducer.mean()
|
|
4948
|
+
|
|
4949
|
+
if unweighted:
|
|
4950
|
+
reducer = reducer.unweighted()
|
|
4951
|
+
|
|
4952
|
+
stats = image.select(band).reduceRegion(
|
|
4953
|
+
reducer=reducer,
|
|
4954
|
+
geometry=region,
|
|
4955
|
+
scale=scale,
|
|
4956
|
+
maxPixels=1e13
|
|
4957
|
+
)
|
|
4958
|
+
|
|
4959
|
+
val = stats.get(band)
|
|
4960
|
+
col_name = f"{band}_{geometry_name}_{r_type}"
|
|
4961
|
+
feature = feature.set(col_name, val)
|
|
4962
|
+
|
|
4963
|
+
return feature
|
|
4964
|
+
|
|
4965
|
+
# 5. Execute Server-Side Mapping (with explicit Cast)
|
|
4966
|
+
results_fc = ee.FeatureCollection(processing_col.map(calculate_multiband_stats))
|
|
4967
|
+
|
|
4968
|
+
# 6. Client-Side Conversion
|
|
4969
|
+
try:
|
|
4970
|
+
df = LandsatCollection.ee_to_df(results_fc, remove_geom=True)
|
|
4971
|
+
except Exception as e:
|
|
4972
|
+
raise RuntimeError(f"Failed to convert Earth Engine results to DataFrame. Error: {e}")
|
|
4973
|
+
|
|
4974
|
+
if df.empty:
|
|
4975
|
+
print("Warning: No results returned. Check if the geometry intersects the imagery or if dates are valid.")
|
|
4976
|
+
return pd.DataFrame()
|
|
4977
|
+
|
|
4978
|
+
# 7. Formatting & Reordering
|
|
4979
|
+
if 'Date' in df.columns:
|
|
4980
|
+
df['Date'] = pd.to_datetime(df['Date'])
|
|
4981
|
+
df = df.sort_values('Date').set_index('Date')
|
|
4982
|
+
|
|
4983
|
+
# Construct the expected column names in the exact order of the input lists
|
|
4984
|
+
expected_order = [f"{band}_{geometry_name}_{r_type}" for band, r_type in zip(bands, reducers_list)]
|
|
4985
|
+
|
|
4986
|
+
# If area was included, append it to the END of the list
|
|
4987
|
+
if include_area:
|
|
4988
|
+
expected_order.append(area_col_name)
|
|
4989
|
+
|
|
4990
|
+
# Reindex the DataFrame to match this order.
|
|
4991
|
+
existing_cols = [c for c in expected_order if c in df.columns]
|
|
4992
|
+
df = df[existing_cols]
|
|
4993
|
+
|
|
4994
|
+
# 8. Export (Optional)
|
|
4995
|
+
if file_path:
|
|
4996
|
+
if not file_path.lower().endswith('.csv'):
|
|
4997
|
+
file_path += '.csv'
|
|
4998
|
+
try:
|
|
4999
|
+
df.to_csv(file_path)
|
|
5000
|
+
print(f"Multiband zonal stats saved to {file_path}")
|
|
5001
|
+
except Exception as e:
|
|
5002
|
+
print(f"Error saving file to {file_path}: {e}")
|
|
5003
|
+
|
|
5004
|
+
return df
|
|
5005
|
+
|
|
5006
|
+
def sample(
|
|
5007
|
+
self,
|
|
5008
|
+
locations,
|
|
5009
|
+
band=None,
|
|
5010
|
+
scale=None,
|
|
5011
|
+
location_names=None,
|
|
5012
|
+
dates=None,
|
|
5013
|
+
file_path=None,
|
|
5014
|
+
tileScale=1
|
|
5015
|
+
):
|
|
5016
|
+
"""
|
|
5017
|
+
Extracts time-series pixel values for a list of locations.
|
|
5018
|
+
|
|
5019
|
+
|
|
5020
|
+
Args:
|
|
5021
|
+
locations (list, tuple, ee.Geometry, or ee.FeatureCollection): Input points.
|
|
5022
|
+
band (str, optional): The name of the band to sample. Defaults to the first band.
|
|
5023
|
+
scale (int, optional): Scale in meters. Defaults to 30 if None.
|
|
5024
|
+
location_names (list of str, optional): Custom names for locations.
|
|
5025
|
+
dates (list, optional): Date filter ['YYYY-MM-DD'].
|
|
5026
|
+
file_path (str, optional): CSV export path.
|
|
5027
|
+
tileScale (int, optional): Aggregation tile scale. Defaults to 1.
|
|
5028
|
+
|
|
5029
|
+
Returns:
|
|
5030
|
+
pd.DataFrame (or CSV if file_path is provided): DataFrame indexed by Date, columns by Location.
|
|
5031
|
+
"""
|
|
5032
|
+
col = self.collection
|
|
5033
|
+
if dates:
|
|
5034
|
+
col = col.filter(ee.Filter.inList('Date_Filter', dates))
|
|
5035
|
+
|
|
5036
|
+
first_img = col.first()
|
|
5037
|
+
available_bands = first_img.bandNames().getInfo()
|
|
5038
|
+
|
|
5039
|
+
if band:
|
|
5040
|
+
if band not in available_bands:
|
|
5041
|
+
raise ValueError(f"Band '{band}' not found. Available: {available_bands}")
|
|
5042
|
+
target_band = band
|
|
5043
|
+
else:
|
|
5044
|
+
target_band = available_bands[0]
|
|
5045
|
+
|
|
5046
|
+
processing_col = col.select([target_band])
|
|
5047
|
+
|
|
5048
|
+
def set_name(f):
|
|
5049
|
+
name = ee.Algorithms.If(
|
|
5050
|
+
f.get('geo_name'), f.get('geo_name'),
|
|
5051
|
+
ee.Algorithms.If(f.get('name'), f.get('name'),
|
|
5052
|
+
ee.Algorithms.If(f.get('system:index'), f.get('system:index'), 'unnamed'))
|
|
5053
|
+
)
|
|
5054
|
+
return f.set('geo_name', name)
|
|
5055
|
+
|
|
5056
|
+
if isinstance(locations, (ee.FeatureCollection, ee.Feature)):
|
|
5057
|
+
features = ee.FeatureCollection(locations)
|
|
5058
|
+
elif isinstance(locations, ee.Geometry):
|
|
5059
|
+
lbl = location_names[0] if (location_names and location_names[0]) else 'Point_1'
|
|
5060
|
+
features = ee.FeatureCollection([ee.Feature(locations).set('geo_name', lbl)])
|
|
5061
|
+
elif isinstance(locations, tuple) and len(locations) == 2:
|
|
5062
|
+
lbl = location_names[0] if location_names else 'Location_1'
|
|
5063
|
+
features = ee.FeatureCollection([ee.Feature(ee.Geometry.Point(locations), {'geo_name': lbl})])
|
|
5064
|
+
elif isinstance(locations, list):
|
|
5065
|
+
if all(isinstance(i, tuple) for i in locations):
|
|
5066
|
+
names = location_names if location_names else [f"Loc_{i+1}" for i in range(len(locations))]
|
|
5067
|
+
features = ee.FeatureCollection([
|
|
5068
|
+
ee.Feature(ee.Geometry.Point(p), {'geo_name': str(n)}) for p, n in zip(locations, names)
|
|
5069
|
+
])
|
|
5070
|
+
elif all(isinstance(i, ee.Geometry) for i in locations):
|
|
5071
|
+
names = location_names if location_names else [f"Geom_{i+1}" for i in range(len(locations))]
|
|
5072
|
+
features = ee.FeatureCollection([
|
|
5073
|
+
ee.Feature(g, {'geo_name': str(n)}) for g, n in zip(locations, names)
|
|
5074
|
+
])
|
|
5075
|
+
else:
|
|
5076
|
+
raise ValueError("List must contain (lon, lat) tuples or ee.Geometry objects.")
|
|
5077
|
+
else:
|
|
5078
|
+
raise TypeError("Invalid locations input.")
|
|
5079
|
+
|
|
5080
|
+
features = features.map(set_name)
|
|
5081
|
+
|
|
5082
|
+
|
|
5083
|
+
def sample_image(img):
|
|
5084
|
+
date = img.get('Date_Filter')
|
|
5085
|
+
use_scale = scale if scale is not None else 30
|
|
5086
|
+
|
|
5087
|
+
|
|
5088
|
+
default_dict = ee.Dictionary({target_band: -9999})
|
|
5089
|
+
|
|
5090
|
+
def extract_point(f):
|
|
5091
|
+
stats = img.reduceRegion(
|
|
5092
|
+
reducer=ee.Reducer.first(),
|
|
5093
|
+
geometry=f.geometry(),
|
|
5094
|
+
scale=use_scale,
|
|
5095
|
+
tileScale=tileScale
|
|
5096
|
+
)
|
|
5097
|
+
|
|
5098
|
+
# Combine dictionaries.
|
|
5099
|
+
# If stats has 'target_band' (even if 0), it overwrites -9999.
|
|
5100
|
+
# If stats is empty (masked), -9999 remains.
|
|
5101
|
+
safe_stats = default_dict.combine(stats, overwrite=True)
|
|
5102
|
+
val = safe_stats.get(target_band)
|
|
5103
|
+
|
|
5104
|
+
return f.set({
|
|
5105
|
+
target_band: val,
|
|
5106
|
+
'image_date': date
|
|
5107
|
+
})
|
|
5108
|
+
|
|
5109
|
+
return features.map(extract_point)
|
|
5110
|
+
|
|
5111
|
+
# Flatten the results
|
|
5112
|
+
flat_results = processing_col.map(sample_image).flatten()
|
|
5113
|
+
|
|
5114
|
+
df = LandsatCollection.ee_to_df(
|
|
5115
|
+
flat_results,
|
|
5116
|
+
columns=['image_date', 'geo_name', target_band],
|
|
5117
|
+
remove_geom=True
|
|
5118
|
+
)
|
|
5119
|
+
|
|
5120
|
+
if df.empty:
|
|
5121
|
+
print("Warning: No data returned.")
|
|
5122
|
+
return pd.DataFrame()
|
|
5123
|
+
|
|
5124
|
+
# 6. Clean and Pivot
|
|
5125
|
+
df[target_band] = pd.to_numeric(df[target_band], errors='coerce')
|
|
5126
|
+
|
|
5127
|
+
# Filter out ONLY the sentinel value (-9999), preserving 0.
|
|
5128
|
+
df = df[df[target_band] != -9999]
|
|
5129
|
+
|
|
5130
|
+
if df.empty:
|
|
5131
|
+
print(f"Warning: All data points were masked (NoData) for band '{target_band}'.")
|
|
5132
|
+
return pd.DataFrame()
|
|
5133
|
+
|
|
5134
|
+
pivot_df = df.pivot(index='image_date', columns='geo_name', values=target_band)
|
|
5135
|
+
pivot_df.index.name = 'Date'
|
|
5136
|
+
pivot_df.columns.name = None
|
|
5137
|
+
pivot_df = pivot_df.reset_index()
|
|
5138
|
+
|
|
5139
|
+
if file_path:
|
|
5140
|
+
if not file_path.lower().endswith('.csv'):
|
|
5141
|
+
file_path += '.csv'
|
|
5142
|
+
pivot_df.to_csv(file_path, index=False)
|
|
5143
|
+
print(f"Sampled data saved to {file_path}")
|
|
5144
|
+
return None
|
|
5145
|
+
|
|
5146
|
+
return pivot_df
|
|
5147
|
+
|
|
5148
|
+
def multiband_sample(
|
|
5149
|
+
self,
|
|
5150
|
+
location,
|
|
5151
|
+
scale=30,
|
|
5152
|
+
file_path=None
|
|
5153
|
+
):
|
|
5154
|
+
"""
|
|
5155
|
+
Extracts ALL band values for a SINGLE location across the entire collection.
|
|
5156
|
+
|
|
5157
|
+
Args:
|
|
5158
|
+
location (tuple or ee.Geometry): A single (lon, lat) tuple OR ee.Geometry.
|
|
5159
|
+
scale (int, optional): Scale in meters. Defaults to 30.
|
|
5160
|
+
file_path (str, optional): Path to save CSV.
|
|
5161
|
+
|
|
5162
|
+
Returns:
|
|
5163
|
+
pd.DataFrame: DataFrame indexed by Date, with columns for each Band.
|
|
5164
|
+
"""
|
|
5165
|
+
if isinstance(location, tuple) and len(location) == 2:
|
|
5166
|
+
geom = ee.Geometry.Point(location)
|
|
5167
|
+
elif isinstance(location, ee.Geometry):
|
|
5168
|
+
geom = location
|
|
5169
|
+
else:
|
|
5170
|
+
raise ValueError("Location must be a single (lon, lat) tuple or ee.Geometry.")
|
|
5171
|
+
|
|
5172
|
+
first_img = self.collection.first()
|
|
5173
|
+
band_names = first_img.bandNames()
|
|
5174
|
+
|
|
5175
|
+
# Create a dictionary of {band_name: -9999}
|
|
5176
|
+
# fill missing values so the Feature structure is consistent
|
|
5177
|
+
dummy_values = ee.List.repeat(-9999, band_names.length())
|
|
5178
|
+
default_dict = ee.Dictionary.fromLists(band_names, dummy_values)
|
|
5179
|
+
|
|
5180
|
+
def get_all_bands(img):
|
|
5181
|
+
date = img.get('Date_Filter')
|
|
5182
|
+
|
|
5183
|
+
# reduceRegion returns a Dictionary.
|
|
5184
|
+
# If a pixel is masked, that band key is missing from 'stats'.
|
|
5185
|
+
stats = img.reduceRegion(
|
|
5186
|
+
reducer=ee.Reducer.first(),
|
|
5187
|
+
geometry=geom,
|
|
5188
|
+
scale=scale,
|
|
5189
|
+
maxPixels=1e13
|
|
5190
|
+
)
|
|
5191
|
+
|
|
5192
|
+
# Combine stats with defaults.
|
|
5193
|
+
# overwrite=True means real data (stats) overwrites the -9999 defaults.
|
|
5194
|
+
complete_stats = default_dict.combine(stats, overwrite=True)
|
|
5195
|
+
|
|
5196
|
+
return ee.Feature(None, complete_stats).set('Date', date)
|
|
5197
|
+
|
|
5198
|
+
fc = ee.FeatureCollection(self.collection.map(get_all_bands))
|
|
5199
|
+
|
|
5200
|
+
df = LandsatCollection.ee_to_df(fc, remove_geom=True)
|
|
5201
|
+
|
|
5202
|
+
if df.empty:
|
|
5203
|
+
print("Warning: No data found.")
|
|
5204
|
+
return pd.DataFrame()
|
|
5205
|
+
|
|
5206
|
+
# 6. Cleanup
|
|
5207
|
+
if 'Date' in df.columns:
|
|
5208
|
+
df['Date'] = pd.to_datetime(df['Date'])
|
|
5209
|
+
df = df.set_index('Date').sort_index()
|
|
5210
|
+
|
|
5211
|
+
# Replace our sentinel -9999 with proper NaNs
|
|
5212
|
+
df = df.replace(-9999, np.nan)
|
|
5213
|
+
|
|
5214
|
+
# 7. Export
|
|
5215
|
+
if file_path:
|
|
5216
|
+
if not file_path.lower().endswith('.csv'):
|
|
5217
|
+
file_path += '.csv'
|
|
5218
|
+
df.to_csv(file_path)
|
|
5219
|
+
print(f"Multiband sample saved to {file_path}")
|
|
5220
|
+
return None
|
|
5221
|
+
|
|
5222
|
+
return df
|
|
4634
5223
|
|
|
4635
5224
|
def export_to_asset_collection(
|
|
4636
5225
|
self,
|
|
@@ -4641,7 +5230,8 @@ class LandsatCollection:
|
|
|
4641
5230
|
filename_prefix="",
|
|
4642
5231
|
crs=None,
|
|
4643
5232
|
max_pixels=int(1e13),
|
|
4644
|
-
description_prefix="export"
|
|
5233
|
+
description_prefix="export",
|
|
5234
|
+
overwrite=False
|
|
4645
5235
|
):
|
|
4646
5236
|
"""
|
|
4647
5237
|
Exports an image collection to a Google Earth Engine asset collection. The asset collection will be created if it does not already exist,
|
|
@@ -4656,10 +5246,12 @@ class LandsatCollection:
|
|
|
4656
5246
|
crs (str, optional): The coordinate reference system. Defaults to None, which will use the image's CRS.
|
|
4657
5247
|
max_pixels (int, optional): The maximum number of pixels. Defaults to int(1e13).
|
|
4658
5248
|
description_prefix (str, optional): The description prefix. Defaults to "export".
|
|
5249
|
+
overwrite (bool, optional): Whether to overwrite existing assets. Defaults to False.
|
|
4659
5250
|
|
|
4660
5251
|
Returns:
|
|
4661
5252
|
None: (queues export tasks)
|
|
4662
5253
|
"""
|
|
5254
|
+
overwrite = overwrite
|
|
4663
5255
|
ic = self.collection
|
|
4664
5256
|
if dates is None:
|
|
4665
5257
|
dates = self.dates
|
|
@@ -4673,6 +5265,14 @@ class LandsatCollection:
|
|
|
4673
5265
|
asset_id = asset_collection_path + "/" + filename_prefix + date_str
|
|
4674
5266
|
desc = description_prefix + "_" + filename_prefix + date_str
|
|
4675
5267
|
|
|
5268
|
+
if overwrite:
|
|
5269
|
+
try:
|
|
5270
|
+
ee.data.deleteAsset(asset_id)
|
|
5271
|
+
print(f"Overwriting: Deleted existing asset {asset_id}")
|
|
5272
|
+
except ee.EEException:
|
|
5273
|
+
# Asset does not exist, so nothing to delete. Proceed safely.
|
|
5274
|
+
pass
|
|
5275
|
+
|
|
4676
5276
|
params = {
|
|
4677
5277
|
'image': img,
|
|
4678
5278
|
'description': desc,
|