geebeam 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,207 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
geebeam-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Kylen Solvik
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
geebeam-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,158 @@
1
+ Metadata-Version: 2.4
2
+ Name: geebeam
3
+ Version: 0.1.0
4
+ Summary: Earth Engine + Apache Beam
5
+ License-Expression: MIT
6
+ License-File: LICENSE
7
+ Requires-Python: >=3.10
8
+ Requires-Dist: apache-beam[gcp]>=2.71.0
9
+ Requires-Dist: dill>=0.4.1
10
+ Requires-Dist: earthengine-api>=1.7.10
11
+ Requires-Dist: geopandas>=1.0.1
12
+ Requires-Dist: grpcio>=1.62.3
13
+ Requires-Dist: tensorflow-data-validation>=1.16
14
+ Requires-Dist: tensorflow-metadata>=1.16
15
+ Requires-Dist: tensorflow>=2.16
16
+ Requires-Dist: tfx-bsl>=1.16
17
+ Description-Content-Type: text/markdown
18
+
19
+ # GeeBeam
20
+
21
+ [![Testing + Linting](https://github.com/kysolvik/geebeam/actions/workflows/pytest-lint.yml/badge.svg)](https://github.com/kysolvik/geebeam/actions/workflows/pytest-lint.yml)
22
+
23
+ Google Earth Engine + Apache Beam for building geospatial training datasets
24
+
25
+ ## Purpose:
26
+
27
+ GeeBeam is a lightweight library for building and executing Apache Beam pipelines that download data "chips" from Google Earth Engine and write them to TensorFlow records for model training.
28
+
29
+ The user defines the Earth Engine images they want to download chips from using the Python earthengine-api. geebeam then serialized the graph-definition of the images so they can be passed to the Beam workers.
30
+
31
+ The pipelines can be run locally or on Google Cloud Dataflow. (Note: currently local jobs are limited to short-running tasks due to grpc "Deadline Exceeded" error).
32
+
33
+
34
+ ## Install:
35
+
36
+ ```bash
37
+ pip install geebeam
38
+ ```
39
+
40
+ ## Examples:
41
+
42
+ Here we'll create a burned area mask for 2024 using the MCD64A1 product.
43
+ For example, this could be the target variable for a burn risk model.
44
+
45
+ ```python
46
+ import ee
47
+ import geebeam
48
+ import google
49
+
50
+ # Get default project id from environment (or specify PROJECT_ID manually)
51
+ PROJECT_ID = google.auth.default()[1]
52
+
53
+ # Initialize ee client, replace with your GCP project ID
54
+ ee.Initalize(project=PROJECT_ID)
55
+
56
+ # Build image for download
57
+ burned_2024 = (ee.ImageCollection('MODIS/061/MCD64A1')
58
+ .select('BurnDate')
59
+ .filter(ee.Filter.calendarRange(2024, 2024, 'year'))
60
+ .min()
61
+ .gt(0)
62
+ .rename(['Burn'])
63
+ )
64
+
65
+ # Building and triggering the pipeline is done with a single command:
66
+ geebeam.run(
67
+ image_list = [burned_2024],
68
+ project=PROJECT_ID,
69
+ patch_size=128, # Pixel dimensions in each direction
70
+ scale=500, # Final export resolution in meters
71
+ n_sample=10, # Number of tiles to sample
72
+ validation_ratio=0.2, # Fraction to select as validation data
73
+ output_path='./test/',
74
+ sampling_region=ee.Geometry.Rectangle(-63.0, -9.0, -56.0, -4.0),
75
+ num_workers=2
76
+ )
77
+ ```
78
+
79
+ Now let's add another dataset: MapBiomas Amazonia forest fraction
80
+ ```python
81
+ # MB Land-use/land-cover forest fraction
82
+ # Note that LULC codes less than 10 area forest in MapBiomas Amazon Collection 6
83
+ mb_amz_lulc = (
84
+ ee.Image('projects/mapbiomas-public/assets/amazon/lulc/collection6/mapbiomas_collection60_integration_v1')
85
+ .lt(10)
86
+ .reduceResolution('mean', maxPixels=500)
87
+ )
88
+
89
+ # Exporting both together is as simple as this:
90
+ geebeam.run(
91
+ image_list = [burned_2024, mb_amz_lulc],
92
+ project=PROJECT_ID,
93
+ patch_size=128,
94
+ scale=500,
95
+ n_sample=10,
96
+ validation_ratio=0.2,
97
+ output_path='./test/',
98
+ sampling_region=ee.Geometry.Rectangle(-63.0, -9.0, -56.0, -4.0),
99
+ num_workers=2
100
+ )
101
+
102
+ ```
103
+
104
+ ### Dataflow:
105
+
106
+ The export process can be scaled to many workers via Google Cloud DataFlow:
107
+
108
+ ```bash
109
+ python examples/geebeam_run.py \
110
+ --region us-east1 \
111
+ --worker_zone us-east1-b \
112
+ --runner DataflowRunner \
113
+ --max_num_workers=8 \
114
+ --num_workers=1 \
115
+ --experiments=use_runner_v2 \
116
+ --machine_type=n2-highmem-2
117
+ ```
118
+
119
+ Note that in this case the output_path defined in geebeam_run.py should be a Google Cloud Storage path.
120
+
121
+ #### Artifact registry setup:
122
+
123
+ To speed up the start-up and running of jobs on DataFlow, you can build a Docker image containing `geebeam` and it's dependencies.
124
+
125
+ See instructions on Google Cloud
126
+
127
+ First, you'll need to have Docker installed on your computer, either just [Docker Engine](https://docs.docker.com/engine/) or full [Docker Desktop](https://docs.docker.com/desktop/) will do.
128
+
129
+ Next, you'll need to create a Google Artifact Registry respository and configure your Docker to authenticate requests for the Artifact Registry. [Google Cloud DataFlow documentation has step-by-step instructions](https://docs.cloud.google.com/dataflow/docs/guides/build-container-image#before_you_begin).
130
+
131
+ Now you can pre-build the container at the start of your DataFlow job:
132
+
133
+ ```bash
134
+ python examples/geebeam_run.py \
135
+ --region us-east1 \
136
+ --worker_zone us-east1-b \
137
+ --runner DataflowRunner \
138
+ --max_num_workers=8 \
139
+ --num_workers=1 \
140
+ --experiments=use_runner_v2 \
141
+ --machine_type=n2-highmem-2
142
+ --prebuild_sdk_container_engine=local_docker \
143
+ --docker_registry_push_url=us-east1-docker.pkg.dev/[PROJECT_ID]/[REPO_NAME]/[IMAGE_NAME] \
144
+ --setup_file=./setup.py
145
+ ```
146
+
147
+ Next time you can use the existing image with:
148
+
149
+ ```bash
150
+ --sdk_container_image=us-east1-docker.pkg.dev/[PROJECT_ID]/[REPO_NAME]/[IMAGE_NAME]:[IMAGE_TAG]
151
+ ```
152
+
153
+
154
+ ## Alternatives:
155
+
156
+ - [GeeFlow](https://github.com/google-deepmind/geeflow): Google DeepMind's GeeFlow fulfills a similar purpose. It is more flexible, allowing for more user control of data processing, reprojection, and writing, but slower and no longer actively maintained. With the goal of meeting *most* users' needs, GeeBeam is designed to be easier and quicker to use, but allows from more limited data transformations.
157
+ - Export training data to Google Cloud Storage then download chips from there: This works, but if you need to get data from many different datasets it's slow to export all that data to Cloud Storage and can be expensive to store it there if you don't delete it quickly. This also uses a lot of Earth Engine compute hours, which are now subject to stricter monthly limits.
158
+ - [Xee](https://github.com/google/Xee): Xee allows for accessing Earth Engine objects as xarray.Datasets. You could use this to define a xarray.Dataset and download "chips" from it, but geebeam interfaces with Beam to automatically parallelize this task.
@@ -0,0 +1,140 @@
1
+ # GeeBeam
2
+
3
+ [![Testing + Linting](https://github.com/kysolvik/geebeam/actions/workflows/pytest-lint.yml/badge.svg)](https://github.com/kysolvik/geebeam/actions/workflows/pytest-lint.yml)
4
+
5
+ Google Earth Engine + Apache Beam for building geospatial training datasets
6
+
7
+ ## Purpose:
8
+
9
+ GeeBeam is a lightweight library for building and executing Apache Beam pipelines that download data "chips" from Google Earth Engine and write them to TensorFlow records for model training.
10
+
11
+ The user defines the Earth Engine images they want to download chips from using the Python earthengine-api. geebeam then serialized the graph-definition of the images so they can be passed to the Beam workers.
12
+
13
+ The pipelines can be run locally or on Google Cloud Dataflow. (Note: currently local jobs are limited to short-running tasks due to grpc "Deadline Exceeded" error).
14
+
15
+
16
+ ## Install:
17
+
18
+ ```bash
19
+ pip install geebeam
20
+ ```
21
+
22
+ ## Examples:
23
+
24
+ Here we'll create a burned area mask for 2024 using the MCD64A1 product.
25
+ For example, this could be the target variable for a burn risk model.
26
+
27
+ ```python
28
+ import ee
29
+ import geebeam
30
+ import google
31
+
32
+ # Get default project id from environment (or specify PROJECT_ID manually)
33
+ PROJECT_ID = google.auth.default()[1]
34
+
35
+ # Initialize ee client, replace with your GCP project ID
36
+ ee.Initalize(project=PROJECT_ID)
37
+
38
+ # Build image for download
39
+ burned_2024 = (ee.ImageCollection('MODIS/061/MCD64A1')
40
+ .select('BurnDate')
41
+ .filter(ee.Filter.calendarRange(2024, 2024, 'year'))
42
+ .min()
43
+ .gt(0)
44
+ .rename(['Burn'])
45
+ )
46
+
47
+ # Building and triggering the pipeline is done with a single command:
48
+ geebeam.run(
49
+ image_list = [burned_2024],
50
+ project=PROJECT_ID,
51
+ patch_size=128, # Pixel dimensions in each direction
52
+ scale=500, # Final export resolution in meters
53
+ n_sample=10, # Number of tiles to sample
54
+ validation_ratio=0.2, # Fraction to select as validation data
55
+ output_path='./test/',
56
+ sampling_region=ee.Geometry.Rectangle(-63.0, -9.0, -56.0, -4.0),
57
+ num_workers=2
58
+ )
59
+ ```
60
+
61
+ Now let's add another dataset: MapBiomas Amazonia forest fraction
62
+ ```python
63
+ # MB Land-use/land-cover forest fraction
64
+ # Note that LULC codes less than 10 area forest in MapBiomas Amazon Collection 6
65
+ mb_amz_lulc = (
66
+ ee.Image('projects/mapbiomas-public/assets/amazon/lulc/collection6/mapbiomas_collection60_integration_v1')
67
+ .lt(10)
68
+ .reduceResolution('mean', maxPixels=500)
69
+ )
70
+
71
+ # Exporting both together is as simple as this:
72
+ geebeam.run(
73
+ image_list = [burned_2024, mb_amz_lulc],
74
+ project=PROJECT_ID,
75
+ patch_size=128,
76
+ scale=500,
77
+ n_sample=10,
78
+ validation_ratio=0.2,
79
+ output_path='./test/',
80
+ sampling_region=ee.Geometry.Rectangle(-63.0, -9.0, -56.0, -4.0),
81
+ num_workers=2
82
+ )
83
+
84
+ ```
85
+
86
+ ### Dataflow:
87
+
88
+ The export process can be scaled to many workers via Google Cloud DataFlow:
89
+
90
+ ```bash
91
+ python examples/geebeam_run.py \
92
+ --region us-east1 \
93
+ --worker_zone us-east1-b \
94
+ --runner DataflowRunner \
95
+ --max_num_workers=8 \
96
+ --num_workers=1 \
97
+ --experiments=use_runner_v2 \
98
+ --machine_type=n2-highmem-2
99
+ ```
100
+
101
+ Note that in this case the output_path defined in geebeam_run.py should be a Google Cloud Storage path.
102
+
103
+ #### Artifact registry setup:
104
+
105
+ To speed up the start-up and running of jobs on DataFlow, you can build a Docker image containing `geebeam` and it's dependencies.
106
+
107
+ See instructions on Google Cloud
108
+
109
+ First, you'll need to have Docker installed on your computer, either just [Docker Engine](https://docs.docker.com/engine/) or full [Docker Desktop](https://docs.docker.com/desktop/) will do.
110
+
111
+ Next, you'll need to create a Google Artifact Registry respository and configure your Docker to authenticate requests for the Artifact Registry. [Google Cloud DataFlow documentation has step-by-step instructions](https://docs.cloud.google.com/dataflow/docs/guides/build-container-image#before_you_begin).
112
+
113
+ Now you can pre-build the container at the start of your DataFlow job:
114
+
115
+ ```bash
116
+ python examples/geebeam_run.py \
117
+ --region us-east1 \
118
+ --worker_zone us-east1-b \
119
+ --runner DataflowRunner \
120
+ --max_num_workers=8 \
121
+ --num_workers=1 \
122
+ --experiments=use_runner_v2 \
123
+ --machine_type=n2-highmem-2
124
+ --prebuild_sdk_container_engine=local_docker \
125
+ --docker_registry_push_url=us-east1-docker.pkg.dev/[PROJECT_ID]/[REPO_NAME]/[IMAGE_NAME] \
126
+ --setup_file=./setup.py
127
+ ```
128
+
129
+ Next time you can use the existing image with:
130
+
131
+ ```bash
132
+ --sdk_container_image=us-east1-docker.pkg.dev/[PROJECT_ID]/[REPO_NAME]/[IMAGE_NAME]:[IMAGE_TAG]
133
+ ```
134
+
135
+
136
+ ## Alternatives:
137
+
138
+ - [GeeFlow](https://github.com/google-deepmind/geeflow): Google DeepMind's GeeFlow fulfills a similar purpose. It is more flexible, allowing for more user control of data processing, reprojection, and writing, but slower and no longer actively maintained. With the goal of meeting *most* users' needs, GeeBeam is designed to be easier and quicker to use, but allows from more limited data transformations.
139
+ - Export training data to Google Cloud Storage then download chips from there: This works, but if you need to get data from many different datasets it's slow to export all that data to Cloud Storage and can be expensive to store it there if you don't delete it quickly. This also uses a lot of Earth Engine compute hours, which are now subject to stricter monthly limits.
140
+ - [Xee](https://github.com/google/Xee): Xee allows for accessing Earth Engine objects as xarray.Datasets. You could use this to define a xarray.Dataset and download "chips" from it, but geebeam interfaces with Beam to automatically parallelize this task.
@@ -0,0 +1,32 @@
1
+ [build-system]
2
+ requires = ["hatchling"]
3
+ build-backend = "hatchling.build"
4
+
5
+ [project]
6
+ name = "geebeam"
7
+ version = "0.1.0"
8
+ license = "MIT"
9
+ description = "Earth Engine + Apache Beam"
10
+ readme = "README.md"
11
+ requires-python = ">=3.10"
12
+ dependencies = [
13
+ "apache-beam[gcp]>=2.71.0",
14
+ "dill>=0.4.1",
15
+ "earthengine-api>=1.7.10",
16
+ "geopandas>=1.0.1",
17
+ "tensorflow>=2.16",
18
+ "tensorflow-data-validation>=1.16",
19
+ "tensorflow-metadata>=1.16",
20
+ "tfx-bsl>=1.16",
21
+ "grpcio>=1.62.3",
22
+ ]
23
+
24
+ [tool.hatch.build.targets.sdist]
25
+ include = [
26
+ "src",
27
+ "tests",
28
+ "README.md",
29
+ "LICENSE",
30
+ "pyproject.toml",
31
+ "setup.py"
32
+ ]
@@ -0,0 +1,15 @@
1
+ """Runners
2
+
3
+ Beam and Earth Engine helpers for running data pipelines
4
+ """
5
+
6
+ from . import runner, sampler, transforms, ee_utils, climate_indices
7
+
8
+
9
+ __all__ = [
10
+ "ee_utils",
11
+ "runner",
12
+ "sampler",
13
+ "transforms",
14
+ "climate_indices",
15
+ ]
@@ -0,0 +1,37 @@
1
+ """Helpers to download non-spatial climate indices"""
2
+
3
+ import pandas as pd
4
+
5
+ def download_clim_indices(
6
+ index_name: str,
7
+ year_start: int,
8
+ year_end: int
9
+ ) -> pd.DataFrame:
10
+ clim_registry = {
11
+ 'amo':'https://www.ncei.noaa.gov/pub/data/cmb/ersst/v5/index/ersst.v5.amo.dat',
12
+ 'soi':'https://psl.noaa.gov/data/timeseries/month/data/soi.long.csv',
13
+ 'oni':'https://psl.noaa.gov/data/correlation/oni.csv',
14
+ 'mei': 'https://psl.noaa.gov/data/correlation/meiv2.csv',
15
+ 'tna': 'https://psl.noaa.gov/data/correlation/tna.csv'
16
+ }
17
+
18
+ try:
19
+ download_url = clim_registry[index_name]
20
+ except KeyError:
21
+ raise ValueError(f'{index_name} not found. Current options are {list(clim_registry.keys())}')
22
+
23
+ if index_name == 'amo':
24
+ df = pd.read_csv(download_url, skiprows=1, sep='\s+')
25
+ df['Date'] = df['Year'].astype(str) + '-' + df['month'].astype(str) + '-01'
26
+ df = df.drop(columns=['Year','month'])[['Date','SSTA']]
27
+ else:
28
+ df = pd.read_csv(download_url)
29
+
30
+ df['Date'] = pd.to_datetime(df['Date'])
31
+ df.columns = ['Date', 'metric']
32
+
33
+ df = df.set_index('Date')
34
+ indexer = (df.index.year >=year_start) & (df.index.year <= year_end)
35
+ return df.loc[indexer]
36
+
37
+
@@ -0,0 +1,40 @@
1
+ """
2
+ Utilities for cleaning, combining, and serializing/deserializing EE objects.
3
+ """
4
+ import ee
5
+
6
+ def deserialize(obj_json):
7
+ """Deserialize Earth Engine JSON DAG"""
8
+ return ee.deserializer.fromJSON(obj_json)
9
+
10
+ def serialize(obj_ee):
11
+ """Serialize Earth Engine object to JSON for Dataflow workers"""
12
+ return ee.serializer.toJSON(obj_ee)
13
+
14
+ def get_band_names(input_list):
15
+ """Get simplified band_names for output (without prefixed image_id)
16
+
17
+ TODO: Add year to distinguish multiple years?
18
+ """
19
+ return [
20
+ image.bandNames()
21
+ for image in input_list
22
+ ]
23
+
24
+ def build_prepped_image(input_list, split_processing=False):
25
+ """Combine a list of EE images into single image"""
26
+ band_names = get_band_names(input_list)
27
+ band_names_flat = ee.List(band_names).flatten()
28
+
29
+ if split_processing:
30
+ band_groups = band_names
31
+ else:
32
+ band_groups = [band_names_flat]
33
+ # Final prepped image
34
+ prepped_im = ee.ImageCollection(input_list).toBands().rename(band_names_flat)
35
+ return prepped_im, band_groups
36
+
37
+
38
+ def list_to_im(input_list):
39
+ prepped_im, _ = build_prepped_image(input_list)
40
+ return prepped_im