paddle 1.1.1__tar.gz → 1.1.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,12 @@
1
+ [bumpversion]
2
+ current_version = 1.1.4
3
+ commit = True
4
+ tag = True
5
+
6
+ [bumpversion:file:src/paddle/__init__.py]
7
+ search = __version__ = "{current_version}"
8
+ replace = __version__ = "{new_version}"
9
+
10
+ [bumpversion:file:pyproject.toml]
11
+ search = version = "{current_version}"
12
+ replace = version = "{new_version}"
@@ -0,0 +1,78 @@
1
+ name: Bump Version and Tag
2
+
3
+ on:
4
+ pull_request:
5
+ types: [closed]
6
+ branches: [main]
7
+
8
+ env:
9
+ PYTHON_VERSION: "3.11"
10
+
11
+ jobs:
12
+ bump-version:
13
+ runs-on: ubuntu-latest
14
+ permissions:
15
+ contents: write
16
+ pull-requests: write
17
+
18
+ steps:
19
+ - name: Checkout repository
20
+ uses: actions/checkout@v4
21
+ with:
22
+ fetch-depth: 0
23
+
24
+ - name: Set up Python
25
+ uses: actions/setup-python@v5
26
+ with:
27
+ python-version: ${{ env.PYTHON_VERSION }}
28
+
29
+ - name: Configure Git
30
+ run: |
31
+ git config user.name "github-actions[bot]"
32
+ git config user.email "github-actions[bot]@users.noreply.github.com"
33
+
34
+ - name: Bump version and push tag
35
+ id: bump_version
36
+ uses: jasonamyers/github-bumpversion-action@v1.0.5
37
+ env:
38
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
39
+
40
+ - name: Push changes
41
+ uses: ad-m/github-push-action@master
42
+ with:
43
+ github_token: ${{ secrets.GITHUB_TOKEN }}
44
+ branch: ${{ github.ref }}
45
+ tags: true
46
+
47
+ - name: Generate Release Notes
48
+ uses: octokit/request-action@v2.x
49
+ id: get_release_notes
50
+ with:
51
+ route: POST /repos/${{ github.repository }}/releases/generate-notes
52
+ tag_name: v${{ steps.bump_version.outputs.new_ver }}
53
+ env:
54
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
55
+
56
+ - name: Create GitHub release
57
+ id: create_release
58
+ uses: actions/create-release@v1.1.4
59
+ env:
60
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
61
+ with:
62
+ tag_name: v${{ steps.bump_version.outputs.new_ver }}
63
+ release_name: Release v${{ steps.bump_version.outputs.new_ver }}
64
+ draft: false
65
+ prerelease: false
66
+ body: ${{ fromJson(steps.get_release_notes.outputs.data).body }}
67
+
68
+ - name: Post comment on PR with release notes
69
+ uses: actions/github-script@v7
70
+ with:
71
+ script: |
72
+ const notes = `${{ fromJson(steps.get_release_notes.outputs.data).body }}`;
73
+ github.rest.issues.createComment({
74
+ issue_number: context.payload.pull_request.number,
75
+ owner: context.repo.owner,
76
+ repo: context.repo.repo,
77
+ body: `🎉 Released v${{ steps.bump_version.outputs.new_ver }}!\n\n${notes}`
78
+ })
@@ -0,0 +1,55 @@
1
+ name: Deploy to PyPI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ tags: [v*]
7
+
8
+ jobs:
9
+ build:
10
+ name: Build distribution artifacts
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - name: Checkout code
14
+ uses: actions/checkout@v4
15
+
16
+ - name: Setup Host Python
17
+ uses: actions/setup-python@v5
18
+ with:
19
+ python-version: "3.11"
20
+ cache: pip
21
+
22
+ - name: Install build backend
23
+ run: |
24
+ python -m pip install --upgrade pip
25
+ pip install build
26
+
27
+ - name: Build sdist and wheel
28
+ run: python -m build
29
+
30
+ - name: Upload artifacts
31
+ uses: actions/upload-artifact@v4
32
+ with:
33
+ name: dist
34
+ path: dist/*
35
+
36
+ publish-pypi:
37
+ name: Publish to PyPI
38
+ needs: build
39
+ runs-on: ubuntu-latest
40
+ permissions:
41
+ id-token: write
42
+ contents: read
43
+ steps:
44
+ - name: Download wheels
45
+ uses: actions/download-artifact@v4
46
+ with:
47
+ name: dist
48
+ path: dist
49
+
50
+ - name: Publish to PyPI
51
+ uses: pypa/gh-action-pypi-publish@release/v1
52
+ with:
53
+ user: __token__
54
+ password: ${{ secrets.PYPI_API_TOKEN }}
55
+ verbose: true
@@ -0,0 +1,60 @@
1
+ name: Continuous Integration
2
+
3
+ on:
4
+ pull_request:
5
+ branches: [main]
6
+
7
+ push:
8
+ branches: [main]
9
+ tags: [v*]
10
+
11
+ env:
12
+ PYTHON_VERSION: "3.11"
13
+ BUILD_TYPE: Release
14
+
15
+ jobs:
16
+ pre-commit:
17
+ runs-on: ubuntu-latest
18
+ steps:
19
+ - name: Check out code
20
+ uses: actions/checkout@v4
21
+
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v5
24
+ with:
25
+ python-version: ${{ env.PYTHON_VERSION }}
26
+
27
+ - name: Install style checkers
28
+ run: pip install --user cpplint cppcheck clang-format==20.1.4
29
+
30
+ - name: Cache pre-commit
31
+ uses: actions/cache@v3
32
+ with:
33
+ path: ~/.cache/pre-commit
34
+ key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}
35
+ restore-keys: |
36
+ ${{ runner.os }}-pre-commit-
37
+
38
+ - name: Run pre-commit
39
+ uses: pre-commit/action@v3.0.1
40
+
41
+ build-and-test:
42
+ needs: pre-commit
43
+ runs-on: ${{ matrix.os }}
44
+ strategy:
45
+ fail-fast: true
46
+ matrix:
47
+ os: [ubuntu-latest, macOS-latest]
48
+ steps:
49
+ - name: Check out code
50
+ uses: actions/checkout@v4
51
+
52
+ - name: Set up Python
53
+ uses: actions/setup-python@v5
54
+ with:
55
+ python-version: ${{ env.PYTHON_VERSION }}
56
+ cache: "pip"
57
+
58
+ - name: Install Python dependencies
59
+ run: |
60
+ pip install numpy pytest 'pyharp>=1.7.1' 'torch==2.7.1'
@@ -0,0 +1,207 @@
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[codz]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # UV
98
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ #uv.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+ #poetry.toml
110
+
111
+ # pdm
112
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
113
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
114
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
115
+ #pdm.lock
116
+ #pdm.toml
117
+ .pdm-python
118
+ .pdm-build/
119
+
120
+ # pixi
121
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
122
+ #pixi.lock
123
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
124
+ # in the .venv directory. It is recommended not to include this directory in version control.
125
+ .pixi
126
+
127
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
128
+ __pypackages__/
129
+
130
+ # Celery stuff
131
+ celerybeat-schedule
132
+ celerybeat.pid
133
+
134
+ # SageMath parsed files
135
+ *.sage.py
136
+
137
+ # Environments
138
+ .env
139
+ .envrc
140
+ .venv
141
+ env/
142
+ venv/
143
+ ENV/
144
+ env.bak/
145
+ venv.bak/
146
+
147
+ # Spyder project settings
148
+ .spyderproject
149
+ .spyproject
150
+
151
+ # Rope project settings
152
+ .ropeproject
153
+
154
+ # mkdocs documentation
155
+ /site
156
+
157
+ # mypy
158
+ .mypy_cache/
159
+ .dmypy.json
160
+ dmypy.json
161
+
162
+ # Pyre type checker
163
+ .pyre/
164
+
165
+ # pytype static type analyzer
166
+ .pytype/
167
+
168
+ # Cython debug symbols
169
+ cython_debug/
170
+
171
+ # PyCharm
172
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
173
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
174
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
175
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
176
+ #.idea/
177
+
178
+ # Abstra
179
+ # Abstra is an AI-powered process automation framework.
180
+ # Ignore directories containing user credentials, local state, and settings.
181
+ # Learn more at https://abstra.io/docs
182
+ .abstra/
183
+
184
+ # Visual Studio Code
185
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
186
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
187
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
188
+ # you could uncomment the following to ignore the entire vscode folder
189
+ # .vscode/
190
+
191
+ # Ruff stuff:
192
+ .ruff_cache/
193
+
194
+ # PyPI configuration file
195
+ .pypirc
196
+
197
+ # Cursor
198
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
199
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
200
+ # refer to https://docs.cursor.com/context/ignore-files
201
+ .cursorignore
202
+ .cursorindexingignore
203
+
204
+ # Marimo
205
+ marimo/_static/
206
+ marimo/_lsp/
207
+ __marimo__/
@@ -0,0 +1,17 @@
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.0.1
4
+ hooks:
5
+ - id: requirements-txt-fixer
6
+ - id: trailing-whitespace
7
+ exclude: ^(data|patches)/.*$
8
+ - id: end-of-file-fixer
9
+ exclude: ^(data|patches)/.*$
10
+ - id: check-yaml
11
+ exclude: ^data/
12
+
13
+ - repo: https://github.com/psf/black
14
+ rev: 24.10.0 # Use the latest stable version
15
+ hooks:
16
+ - id: black
17
+ args: [--line-length=88]
paddle-1.1.4/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Elijah Mullens
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -1,11 +1,12 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: paddle
3
- Version: 1.1.1
4
- Summary: Canoe's utility subroutines
5
- Project-URL: Homepage, https://github.com/chengcli/paddle
6
- Project-URL: Repository, https://github.com/chengcli/paddle
7
- Project-URL: Issues, https://github.com/chengcli/paddle/issues
8
- Author-email: Cheng Li <chengcli@umich.edu>
3
+ Version: 1.1.4
4
+ Summary: Python Atmospheric Dynamics: Discovery and Learning about Exoplanets. An open-source, user-friendly python frontend of canoe
5
+ Project-URL: Homepage, https://github.com/elijah-mullens/paddle
6
+ Project-URL: Repository, https://github.com/elijah-mullens/paddle
7
+ Project-URL: Issues, https://github.com/elijah-mullens/paddle/issues
8
+ Author-email: Elijah Mullens <eem85@cornell.edu>, Cheng Li <chengcli@umich.edu>
9
+ License-File: LICENSE
9
10
  Classifier: Development Status :: 3 - Alpha
10
11
  Classifier: Intended Audience :: Developers
11
12
  Classifier: Intended Audience :: Science/Research
@@ -29,10 +30,4 @@ Requires-Dist: pytest>=7; extra == 'dev'
29
30
  Description-Content-Type: text/markdown
30
31
 
31
32
  # paddle
32
-
33
- A minimal, utility subroutines for canoe
34
-
35
- ## Install
36
-
37
- ```bash
38
- pip install paddle
33
+ Python Atmospheric Dynamics: Discovering and Learning about Exoplanets. An open-source, user-friendly python version of canoe.
paddle-1.1.4/README.md ADDED
@@ -0,0 +1,2 @@
1
+ # paddle
2
+ Python Atmospheric Dynamics: Discovering and Learning about Exoplanets. An open-source, user-friendly python version of canoe.
@@ -4,11 +4,14 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "paddle"
7
- version = "1.1.1"
8
- description = "Canoe's utility subroutines"
7
+ version = "1.1.4"
8
+ description = "Python Atmospheric Dynamics: Discovery and Learning about Exoplanets. An open-source, user-friendly python frontend of canoe"
9
9
  readme = "README.md"
10
10
  requires-python = ">=3.9"
11
- authors = [{ name = "Cheng Li", email = "chengcli@umich.edu" }]
11
+ authors = [
12
+ { name = "Elijah Mullens", email = "eem85@cornell.edu" },
13
+ { name = "Cheng Li", email = "chengcli@umich.edu" },
14
+ ]
12
15
  keywords = []
13
16
  classifiers = [
14
17
  "Development Status :: 3 - Alpha",
@@ -39,9 +42,9 @@ dev = [
39
42
  ]
40
43
 
41
44
  [project.urls]
42
- Homepage = "https://github.com/chengcli/paddle"
43
- Repository = "https://github.com/chengcli/paddle"
44
- Issues = "https://github.com/chengcli/paddle/issues"
45
+ Homepage = "https://github.com/elijah-mullens/paddle"
46
+ Repository = "https://github.com/elijah-mullens/paddle"
47
+ Issues = "https://github.com/elijah-mullens/paddle/issues"
45
48
 
46
49
  [project.scripts]
47
50
  paddle = "paddle.__main__:main"
@@ -3,8 +3,5 @@ from .write_profile import write_profile
3
3
  from .find_init_params import find_init_params
4
4
  from .evolve_kinetics import evolve_kinetics
5
5
 
6
- __all__ = ["setup_profile",
7
- "write_profile",
8
- "find_init_params",
9
- "evolve_kinetics"]
10
- __version__ = "1.1.1"
6
+ __all__ = ["setup_profile", "write_profile", "find_init_params", "evolve_kinetics"]
7
+ __version__ = "1.1.4"
@@ -4,20 +4,20 @@ import time
4
4
  import kintera
5
5
  import numpy as np
6
6
  from snapy import (
7
- index,
8
- MeshBlockOptions,
9
- MeshBlock,
10
- OutputOptions,
11
- NetcdfOutput,
12
- )
7
+ index,
8
+ MeshBlockOptions,
9
+ MeshBlock,
10
+ OutputOptions,
11
+ NetcdfOutput,
12
+ )
13
13
  from kintera import (
14
- ThermoOptions,
15
- ThermoX,
16
- KineticsOptions,
17
- Kinetics,
18
- )
14
+ ThermoOptions,
15
+ ThermoX,
16
+ KineticsOptions,
17
+ Kinetics,
18
+ )
19
19
 
20
- if __name__ == '__main__':
20
+ if __name__ == "__main__":
21
21
  # input file
22
22
  infile = "earth.yaml"
23
23
  device = "cpu"
@@ -46,7 +46,7 @@ if __name__ == '__main__':
46
46
 
47
47
  # set up initial condition
48
48
  w = setup_initial_condition(block, thermo_x)
49
- print("w = ", w[:,0,0,:])
49
+ print("w = ", w[:, 0, 0, :])
50
50
 
51
51
  # integration
52
52
  current_time = 0.0
@@ -70,7 +70,7 @@ if __name__ == '__main__':
70
70
  block.forward(dt, stage)
71
71
 
72
72
  # evolve kinetics
73
- u[index.icy:] += evolve_kinetics(block, kinet, thermo_x)
73
+ u[index.icy :] += evolve_kinetics(block, kinet, thermo_x)
74
74
 
75
75
  current_time += dt
76
76
  count += 1
@@ -2,12 +2,14 @@ import torch
2
2
  import snapy
3
3
  import kintera
4
4
 
5
+
5
6
  def evolve_kinetics(
6
7
  hydro_w: torch.Tensor,
7
- block: snapy.MeshBlock,
8
- kinet: kintera.Kinetics,
8
+ block: snapy.MeshBlock,
9
+ kinet: kintera.Kinetics,
9
10
  thermo_x: kintera.ThermoX,
10
- dt) -> torch.Tensor:
11
+ dt,
12
+ ) -> torch.Tensor:
11
13
  """
12
14
  Evolve the chemical kinetics for one time step using implicit method.
13
15
 
@@ -26,7 +28,7 @@ def evolve_kinetics(
26
28
 
27
29
  temp = eos.compute("W->T", (hydro_w,))
28
30
  pres = hydro_w[snapy.index.ipr]
29
- xfrac = thermo_y.compute("Y->X", (hydro_w[snapy.index.icy:],))
31
+ xfrac = thermo_y.compute("Y->X", (hydro_w[snapy.index.icy :],))
30
32
  conc = thermo_x.compute("TPX->V", (temp, pres, xfrac))
31
33
  cp_vol = thermo_x.compute("TV->cp", (temp, conc))
32
34
 
@@ -4,16 +4,18 @@ import numpy as np
4
4
 
5
5
  from .setup_profile import setup_profile
6
6
 
7
+
7
8
  def find_init_params(
8
- block: snapy.MeshBlock,
9
- param: dict[str, float],
10
- *,
11
- target_T: float=300.,
12
- target_P: float=1.e5,
13
- method: str="moist-adiabat",
14
- max_iter: int=50,
15
- ftol: float=1.e-2,
16
- verbose: bool=True):
9
+ block: snapy.MeshBlock,
10
+ param: dict[str, float],
11
+ *,
12
+ target_T: float = 300.0,
13
+ target_P: float = 1.0e5,
14
+ method: str = "moist-adiabat",
15
+ max_iter: int = 50,
16
+ ftol: float = 1.0e-2,
17
+ verbose: bool = True,
18
+ ):
17
19
  """Find initial parameters that yield desired T and P
18
20
 
19
21
  Args:
@@ -44,14 +46,15 @@ def find_init_params(
44
46
  temp = eos.compute("W->T", (w,)).squeeze()
45
47
 
46
48
  # calculate 1D pressure
47
- pres = w[snapy.index.ipr,...].squeeze()
49
+ pres = w[snapy.index.ipr, ...].squeeze()
48
50
 
49
51
  # temperature function
50
52
  t_func = interp1d(
51
53
  pres.log().cpu().numpy(),
52
54
  temp.log().cpu().numpy(),
53
55
  kind="linear",
54
- fill_value="extrapolate")
56
+ fill_value="extrapolate",
57
+ )
55
58
 
56
59
  temp1 = np.exp(t_func(np.log(target_P)))
57
60
  if verbose:
@@ -69,4 +72,3 @@ def find_init_params(
69
72
  count += 1
70
73
 
71
74
  raise RuntimeError("Failed to converge within the maximum number of iterations.")
72
-
@@ -4,18 +4,19 @@ import torch
4
4
  import snapy
5
5
  import kintera
6
6
 
7
+
7
8
  def integrate_neutral(
8
- thermo_x: kintera.ThermoX,
9
- temp: torch.Tensor,
10
- pres: torch.Tensor,
11
- xfrac: torch.Tensor,
12
- grav: float,
13
- dz: float,
14
- max_iter: int = 100
15
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
9
+ thermo_x: kintera.ThermoX,
10
+ temp: torch.Tensor,
11
+ pres: torch.Tensor,
12
+ xfrac: torch.Tensor,
13
+ grav: float,
14
+ dz: float,
15
+ max_iter: int = 100,
16
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
16
17
  """
17
18
  A neutral density profile assumes no cloud and:
18
-
19
+
19
20
  (1) dP/dz = -rho*g
20
21
  (2) d(rho)/dz = ...
21
22
 
@@ -68,15 +69,16 @@ def integrate_neutral(
68
69
 
69
70
  return temp2, pres2, xfrac2
70
71
 
72
+
71
73
  def integrate_dry_adiabat(
72
- thermo_x: kintera.ThermoX,
73
- temp: torch.Tensor,
74
- pres: torch.Tensor,
75
- xfrac: torch.Tensor,
76
- grav: float,
77
- dz: float,
78
- max_iter: int = 100
79
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
74
+ thermo_x: kintera.ThermoX,
75
+ temp: torch.Tensor,
76
+ pres: torch.Tensor,
77
+ xfrac: torch.Tensor,
78
+ grav: float,
79
+ dz: float,
80
+ max_iter: int = 100,
81
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
80
82
  """
81
83
  A dry adiabatic profile assumes no cloud and:
82
84
 
@@ -84,12 +86,12 @@ def integrate_dry_adiabat(
84
86
  (2) dP/dz = -rho*g
85
87
 
86
88
  In discretized form:
87
-
89
+
88
90
  cp_bar = 0.5 * (cp(T_old) + cp(T_new))
89
91
  T_new = T_old - g/bar * dz
90
92
  rho_bar = 0.5 * (rho_old + rho_new)
91
93
  P_new = P_old - rho_bar * g * dz
92
-
94
+
93
95
  """
94
96
  conc1 = thermo_x.compute("TPX->V", [temp, pres, xfrac])
95
97
  cp1 = thermo_x.compute("TV->cp", [temp, conc1]) / conc1.sum(-1)
@@ -136,11 +138,10 @@ def integrate_dry_adiabat(
136
138
 
137
139
  return temp2, pres2, xfrac2
138
140
 
141
+
139
142
  def setup_profile(
140
- block: snapy.MeshBlock,
141
- param: dict[str, float] = {},
142
- method: str = "moist-adiabat"
143
- ) -> torch.Tensor:
143
+ block: snapy.MeshBlock, param: dict[str, float] = {}, method: str = "moist-adiabat"
144
+ ) -> torch.Tensor:
144
145
  """
145
146
  Set up an adiabatic initial condition for the mesh block.
146
147
 
@@ -175,16 +176,16 @@ def setup_profile(
175
176
  "moist-adiabat",
176
177
  "isothermal",
177
178
  "pseudo-adiabat",
178
- "neutral"
179
+ "neutral",
179
180
  ]
180
181
 
181
182
  if method not in valid_methods:
182
183
  raise ValueError(f"Invalid method '{method}'. Choose from {valid_methods}.")
183
184
 
184
- Ts = param.get("Ts", 300.)
185
- Ps = param.get("Ps", 1.e5)
185
+ Ts = param.get("Ts", 300.0)
186
+ Ps = param.get("Ps", 1.0e5)
186
187
  grav = param.get("grav", 9.8)
187
- Tmin = param.get("Tmin", 0.)
188
+ Tmin = param.get("Tmin", 0.0)
188
189
 
189
190
  # get handles to modules
190
191
  coord = block.module("hydro.coord")
@@ -204,8 +205,7 @@ def setup_profile(
204
205
  ny = len(thermo_y.options.species()) - 1
205
206
  nvar = 5 + ny
206
207
 
207
- w = torch.zeros((nvar, nc3, nc2, nc1),
208
- dtype=x1v.dtype, device=x1v.device)
208
+ w = torch.zeros((nvar, nc3, nc2, nc1), dtype=x1v.dtype, device=x1v.device)
209
209
 
210
210
  temp = Ts * torch.ones((nc3, nc2), dtype=w.dtype, device=w.device)
211
211
  pres = Ps * torch.ones((nc3, nc2), dtype=w.dtype, device=w.device)
@@ -216,7 +216,7 @@ def setup_profile(
216
216
  xfrac[..., index] = param.get(f"x{name}", 0.0)
217
217
 
218
218
  # dry air mole fraction
219
- xfrac[..., 0] = 1. - xfrac[..., 1:].sum(dim=-1)
219
+ xfrac[..., 0] = 1.0 - xfrac[..., 1:].sum(dim=-1)
220
220
 
221
221
  # start and end indices for the vertical direction
222
222
  # excluding ghost cells
@@ -227,7 +227,7 @@ def setup_profile(
227
227
  dz = coord.buffer("dx1f")[ifirst]
228
228
 
229
229
  # half a grid to cell center
230
- thermo_x.extrapolate_ad(temp, pres, xfrac, grav, dz / 2.);
230
+ thermo_x.extrapolate_ad(temp, pres, xfrac, grav, dz / 2.0)
231
231
 
232
232
  # adiabatic extrapolation
233
233
  if method == "isothermal":
@@ -245,17 +245,19 @@ def setup_profile(
245
245
  xfrac /= xfrac.sum(dim=-1, keepdim=True)
246
246
  conc = thermo_x.compute("TPX->V", [temp, pres, xfrac])
247
247
 
248
- w[snapy.index.ipr, ..., i] = pres;
248
+ w[snapy.index.ipr, ..., i] = pres
249
249
  w[snapy.index.idn, ..., i] = thermo_x.compute("V->D", [conc])
250
- w[snapy.index.icy:, ...,i] = thermo_x.compute("X->Y", [xfrac])
250
+ w[snapy.index.icy :, ..., i] = thermo_x.compute("X->Y", [xfrac])
251
251
 
252
252
  dz = coord.buffer("dx1f")[i]
253
253
  if method.split("-")[0] == "dry":
254
- temp, pres, xfrac = integrate_dry_adiabat(thermo_x, temp, pres, xfrac, grav, dz);
254
+ temp, pres, xfrac = integrate_dry_adiabat(
255
+ thermo_x, temp, pres, xfrac, grav, dz
256
+ )
255
257
  elif method.split("-")[0] == "neutral":
256
- temp, pres, xfrac = integrate_neutral(thermo_x, temp, pres, xfrac, grav, dz);
258
+ temp, pres, xfrac = integrate_neutral(thermo_x, temp, pres, xfrac, grav, dz)
257
259
  else:
258
- thermo_x.extrapolate_ad(temp, pres, xfrac, grav, dz);
260
+ thermo_x.extrapolate_ad(temp, pres, xfrac, grav, dz)
259
261
 
260
262
  if torch.any(temp < Tmin):
261
263
  i_isothermal = i + 1
@@ -276,5 +278,5 @@ def setup_profile(
276
278
  conc = thermo_x.compute("TPX->V", [temp, pres, xfrac])
277
279
  w[snapy.index.ipr, ..., i] = pres
278
280
  w[snapy.index.idn, ..., i] = thermo_x.compute("V->D", [conc])
279
- w[snapy.index.icy:, ..., i] = thermo_x.compute("X->Y", [xfrac])
281
+ w[snapy.index.icy :, ..., i] = thermo_x.compute("X->Y", [xfrac])
280
282
  return w
@@ -7,11 +7,12 @@ import torch
7
7
  import kintera
8
8
  import snapy
9
9
 
10
+
10
11
  def write_profile(
11
12
  filename: str,
12
13
  hydro_w: torch.Tensor,
13
14
  block: snapy.MeshBlock,
14
- ref_pressure: float = 1.e5,
15
+ ref_pressure: float = 1.0e5,
15
16
  comment: Optional[str] = None,
16
17
  ) -> None:
17
18
  """
@@ -46,27 +47,28 @@ def write_profile(
46
47
  raise ValueError("hydro_w must have shape (N, 1, 1, L).")
47
48
 
48
49
  # calculate a height grid
49
- pres = hydro_w[snapy.index.ipr,...].squeeze() / 1.e5 # Pa -> bar
50
+ pres = hydro_w[snapy.index.ipr, ...].squeeze() / 1.0e5 # Pa -> bar
50
51
  zlev_func = interp1d(
51
52
  pres.log().cpu().numpy(),
52
53
  coord.buffer("x1v").cpu().numpy(),
53
54
  kind="linear",
54
- fill_value="extrapolate")
55
- zref = zlev_func(np.log(ref_pressure / 1.e5))
56
- zlev = (coord.buffer("x1v") - zref) / 1.e3 # m -> km
55
+ fill_value="extrapolate",
56
+ )
57
+ zref = zlev_func(np.log(ref_pressure / 1.0e5))
58
+ zlev = (coord.buffer("x1v") - zref) / 1.0e3 # m -> km
57
59
 
58
60
  # calculate temperature
59
61
  temp = eos.compute("W->T", (hydro_w,)).squeeze()
60
62
 
61
63
  # calculate mole fractions
62
- xfrac = thermo_y.compute("Y->X", (hydro_w[snapy.index.icy:,...],)).squeeze()
64
+ xfrac = thermo_y.compute("Y->X", (hydro_w[snapy.index.icy :, ...],)).squeeze()
63
65
 
64
66
  # calculate heat capacity
65
- conc = thermo_x.compute("TPX->V", (temp, pres * 1.e5, xfrac))
67
+ conc = thermo_x.compute("TPX->V", (temp, pres * 1.0e5, xfrac))
66
68
  cpx = thermo_x.compute("TV->cp", (temp, conc)) / conc.sum(-1)
67
69
 
68
70
  # calculate entropy
69
- ens = thermo_x.compute("TPV->S", (temp, pres * 1.e5, conc)) / conc.sum(-1)
71
+ ens = thermo_x.compute("TPV->S", (temp, pres * 1.0e5, conc)) / conc.sum(-1)
70
72
 
71
73
  with open(filename, "w") as f:
72
74
  # write comments
@@ -1,5 +1,5 @@
1
1
  # Saturn Reference Atmosphere Model
2
- #
2
+ #
3
3
  # Solar abundances relative to H2, enrichments
4
4
  #
5
5
  # X_He = 0.195, 0.6955
@@ -1,45 +1,48 @@
1
1
  from importlib import resources
2
2
  from paddle import (
3
- setup_profile,
4
- write_profile,
5
- find_init_params,
6
- )
3
+ setup_profile,
4
+ write_profile,
5
+ find_init_params,
6
+ )
7
7
  from snapy import (
8
- MeshBlockOptions,
9
- MeshBlock,
10
- )
8
+ MeshBlockOptions,
9
+ MeshBlock,
10
+ )
11
11
  from kintera import ThermoX
12
12
 
13
+
13
14
  def setup_saturn_profile():
14
- path = resources.files("paddle") / "data" / "saturn1d.yaml"
15
+ # path = resources.files("paddle") / "data" / "saturn1d.yaml"
16
+ path = "data" / "saturn1d.yaml"
15
17
  print(f"Reading input file: {path}")
16
18
 
17
19
  op_block = MeshBlockOptions.from_yaml(str(path))
18
20
  block = MeshBlock(op_block)
19
21
 
20
22
  param = {
21
- "Ts": 600.,
22
- "Ps": 100.e5,
23
- "Tmin": 85.,
23
+ "Ts": 600.0,
24
+ "Ps": 100.0e5,
25
+ "Tmin": 85.0,
24
26
  "xH2O": 8.91e-3,
25
27
  "xNH3": 3.52e-4,
26
28
  "xH2S": 8.08e-5,
27
29
  "grav": 10.44,
28
30
  }
29
31
 
30
- #method = "pseudo-adiabat"
31
- #method = "moist-adiabat"
32
+ # method = "pseudo-adiabat"
33
+ # method = "moist-adiabat"
32
34
  method = "dry-adiabat"
33
35
 
34
36
  param = find_init_params(
35
- block,
36
- param,
37
- target_T=134.,
38
- target_P=1.e5,
39
- method=method,
40
- max_iter=50,
41
- ftol=1.e-2,
42
- verbose=True)
37
+ block,
38
+ param,
39
+ target_T=134.0,
40
+ target_P=1.0e5,
41
+ method=method,
42
+ max_iter=50,
43
+ ftol=1.0e-2,
44
+ verbose=True,
45
+ )
43
46
 
44
47
  w = setup_profile(block, param, method=method)
45
48
 
@@ -50,5 +53,6 @@ def setup_saturn_profile():
50
53
  write_profile("saturn_profile.txt", w, block)
51
54
  return w
52
55
 
56
+
53
57
  if __name__ == "__main__":
54
58
  w = setup_saturn_profile()
paddle-1.1.1/.gitignore DELETED
@@ -1,14 +0,0 @@
1
- # Python
2
- __pycache__/
3
- *.py[cod]
4
- *.egg-info/
5
- .build/
6
- dist/
7
- build/
8
- .coverage
9
- htmlcov/
10
- .pytest_cache/
11
- .venv/
12
- .env
13
- *.log
14
-
paddle-1.1.1/README.md DELETED
@@ -1,8 +0,0 @@
1
- # paddle
2
-
3
- A minimal, utility subroutines for canoe
4
-
5
- ## Install
6
-
7
- ```bash
8
- pip install paddle