ladim 1.3.3__py3-none-any.whl → 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ladim/__init__.py +1 -1
- ladim/config.py +125 -0
- ladim/forcing.py +95 -0
- ladim/grid.py +79 -0
- ladim/gridforce/ROMS.py +32 -5
- ladim/gridforce/__init__.py +0 -1
- ladim/ibms/__init__.py +19 -4
- ladim/main.py +1 -1
- ladim/model.py +64 -29
- ladim/output.py +246 -0
- ladim/plugins/__init__.py +0 -0
- ladim/release.py +234 -0
- ladim/sample.py +3 -0
- ladim/{timestepper.py → solver.py} +5 -5
- ladim/state.py +142 -0
- ladim/tracker.py +165 -0
- ladim/utilities.py +5 -0
- {ladim-1.3.3.dist-info → ladim-2.0.0.dist-info}/METADATA +1 -1
- ladim-2.0.0.dist-info/RECORD +31 -0
- {ladim-1.3.3.dist-info → ladim-2.0.0.dist-info}/WHEEL +1 -1
- ladim/configuration/__init__.py +0 -1
- ladim/configuration/legacy.py +0 -425
- ladim/configuration/modularized.py +0 -22
- ladim/gridforce/legacy.py +0 -103
- ladim/ibms/legacy.py +0 -34
- ladim/output/__init__.py +0 -1
- ladim/output/legacy.py +0 -247
- ladim/release/__init__.py +0 -1
- ladim/release/legacy.py +0 -316
- ladim/state/__init__.py +0 -1
- ladim/state/legacy.py +0 -126
- ladim/tracker/__init__.py +0 -1
- ladim/tracker/legacy.py +0 -225
- ladim-1.3.3.dist-info/RECORD +0 -36
- {ladim-1.3.3.dist-info → ladim-2.0.0.dist-info}/LICENSE +0 -0
- {ladim-1.3.3.dist-info → ladim-2.0.0.dist-info}/entry_points.txt +0 -0
- {ladim-1.3.3.dist-info → ladim-2.0.0.dist-info}/top_level.txt +0 -0
ladim/tracker.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
from .model import Model, Module
|
|
2
|
+
import numpy as np
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class Tracker(Module):
|
|
6
|
+
def __init__(self, model: Model):
|
|
7
|
+
super().__init__(model)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class HorizontalTracker:
|
|
11
|
+
"""The physical particle tracking kernel"""
|
|
12
|
+
|
|
13
|
+
def __init__(self, model: Model, method, diffusion) -> None:
|
|
14
|
+
self.model = model
|
|
15
|
+
|
|
16
|
+
if not diffusion:
|
|
17
|
+
method += "_nodiff"
|
|
18
|
+
self.integrator = StochasticDifferentialEquationIntegrator.from_keyword(method)
|
|
19
|
+
self.D = diffusion # [m2.s-1]
|
|
20
|
+
|
|
21
|
+
def update(self):
|
|
22
|
+
state = self.model.state
|
|
23
|
+
grid = self.model.grid
|
|
24
|
+
forcing = self.model.forcing
|
|
25
|
+
|
|
26
|
+
t0 = self.model.solver.time
|
|
27
|
+
dt = self.model.solver.step
|
|
28
|
+
|
|
29
|
+
act = state['active']
|
|
30
|
+
X, Y, Z = state['X'][act], state['Y'][act], state['Z'][act]
|
|
31
|
+
dx, dy = grid.sample_metric(X, Y)
|
|
32
|
+
r0 = np.stack([X, Y])
|
|
33
|
+
|
|
34
|
+
# Set diffusion function
|
|
35
|
+
def mixing(t, r):
|
|
36
|
+
_ = t
|
|
37
|
+
stddev = (2 * self.D) ** 0.5
|
|
38
|
+
u_diff = stddev / dx
|
|
39
|
+
return np.broadcast_to(u_diff, r.shape)
|
|
40
|
+
|
|
41
|
+
# Set advection function
|
|
42
|
+
def velocity(t, r):
|
|
43
|
+
x, y = r.reshape([2, -1])
|
|
44
|
+
u, v = forcing.velocity(x, y, Z, tstep=(t - t0) / dt)
|
|
45
|
+
return np.concatenate([u / dx, v / dy]).reshape(r.shape)
|
|
46
|
+
|
|
47
|
+
X1, Y1 = self.integrator(velocity, mixing, t0, r0, dt)
|
|
48
|
+
|
|
49
|
+
# Land, boundary treatment. Do not move the particles
|
|
50
|
+
# Consider a sequence of different actions
|
|
51
|
+
# I = (grid.ingrid(X1, Y1)) & (grid.atsea(X1, Y1))
|
|
52
|
+
should_move = grid.atsea(X1, Y1)
|
|
53
|
+
# I = True
|
|
54
|
+
X[should_move] = X1[should_move]
|
|
55
|
+
Y[should_move] = Y1[should_move]
|
|
56
|
+
|
|
57
|
+
state['X'][act] = X
|
|
58
|
+
state['Y'][act] = Y
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class StochasticDifferentialEquationIntegrator:
|
|
62
|
+
@staticmethod
|
|
63
|
+
def from_keyword(kw):
|
|
64
|
+
integrators = {
|
|
65
|
+
"RK4": RK4Integrator,
|
|
66
|
+
"EF": EFIntegrator,
|
|
67
|
+
"RK4_nodiff": RK4NodiffIntegrator,
|
|
68
|
+
"EF_nodiff": EFNodiffIntegrator,
|
|
69
|
+
}
|
|
70
|
+
if kw not in integrators:
|
|
71
|
+
raise NotImplementedError(f"Unknown integration method: {kw}")
|
|
72
|
+
return integrators[kw]()
|
|
73
|
+
|
|
74
|
+
def __call__(self, vel, mix, t0, r0, dt):
|
|
75
|
+
"""
|
|
76
|
+
Integrate a stochastic differential equation, one time step
|
|
77
|
+
|
|
78
|
+
For both ``vel`` and ``mix``, the calling signature is fun(t, r) where t is
|
|
79
|
+
scalar time and r is an array of particle positions. The function should
|
|
80
|
+
return an array of the same shape as r.
|
|
81
|
+
|
|
82
|
+
Both ``vel`` and ``mix`` should assume the same coordinate system and units as
|
|
83
|
+
the input particle positions and time. For instance, if time is given in
|
|
84
|
+
seconds and positions are given in grid coordinates, velocity should have units
|
|
85
|
+
of grid cells per second and mixing should have units of grid cells squared per
|
|
86
|
+
second.
|
|
87
|
+
|
|
88
|
+
:param vel: Velocity function. Calling signature is fun(t, r) where t is scalar
|
|
89
|
+
and r is an array of particle positions. Should return an array of the
|
|
90
|
+
same shape as r.
|
|
91
|
+
|
|
92
|
+
:param mix: Mixing function. Calling signature is fun(t, r) where t is scalar
|
|
93
|
+
and r is an array of particle positions. Should return an array of the
|
|
94
|
+
same shape as r.
|
|
95
|
+
|
|
96
|
+
:param t0: Initial time
|
|
97
|
+
|
|
98
|
+
:param r0: Initial positions
|
|
99
|
+
|
|
100
|
+
:param dt: Time step size
|
|
101
|
+
|
|
102
|
+
:return: Final particle positions (same shape as r0)
|
|
103
|
+
"""
|
|
104
|
+
|
|
105
|
+
return r0
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class RK4Integrator(StochasticDifferentialEquationIntegrator):
|
|
109
|
+
def __call__(self, velocity, mixing, t0, r0, dt):
|
|
110
|
+
u1 = velocity(t0, r0)
|
|
111
|
+
r1 = r0 + 0.5 * u1 * dt
|
|
112
|
+
|
|
113
|
+
u2 = velocity(t0 + 0.5 * dt, r1)
|
|
114
|
+
r2 = r0 + 0.5 * u2 * dt
|
|
115
|
+
|
|
116
|
+
u3 = velocity(t0 + 0.5 * dt, r2)
|
|
117
|
+
r3 = r0 + u3 * dt
|
|
118
|
+
|
|
119
|
+
u4 = velocity(t0 + dt, r3)
|
|
120
|
+
|
|
121
|
+
u_adv = (u1 + 2 * u2 + 2 * u3 + u4) / 6.0
|
|
122
|
+
r_adv = r0 + u_adv * dt
|
|
123
|
+
|
|
124
|
+
# Diffusive velocity
|
|
125
|
+
u_diff = mixing(t0, r_adv)
|
|
126
|
+
dw = np.random.normal(size=np.size(r0)).reshape(r0.shape) * np.sqrt(dt)
|
|
127
|
+
|
|
128
|
+
return r_adv + u_diff * dw
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class EFIntegrator(StochasticDifferentialEquationIntegrator):
|
|
132
|
+
def __call__(self, velocity, mixing, t0, r0, dt):
|
|
133
|
+
u1 = velocity(t0, r0)
|
|
134
|
+
r_adv = r0 + u1 * dt
|
|
135
|
+
|
|
136
|
+
# Diffusive velocity
|
|
137
|
+
u_diff = mixing(t0, r_adv)
|
|
138
|
+
dw = np.random.normal(size=np.size(r0)).reshape(r0.shape) * np.sqrt(dt)
|
|
139
|
+
|
|
140
|
+
return r_adv + u_diff * dw
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class RK4NodiffIntegrator(StochasticDifferentialEquationIntegrator):
|
|
144
|
+
def __call__(self, velocity, mixing, t0, r0, dt):
|
|
145
|
+
u1 = velocity(t0, r0)
|
|
146
|
+
r1 = r0 + 0.5 * u1 * dt
|
|
147
|
+
|
|
148
|
+
u2 = velocity(t0 + 0.5 * dt, r1)
|
|
149
|
+
r2 = r0 + 0.5 * u2 * dt
|
|
150
|
+
|
|
151
|
+
u3 = velocity(t0 + 0.5 * dt, r2)
|
|
152
|
+
r3 = r0 + u3 * dt
|
|
153
|
+
|
|
154
|
+
u4 = velocity(t0 + dt, r3)
|
|
155
|
+
|
|
156
|
+
u_adv = (u1 + 2 * u2 + 2 * u3 + u4) / 6.0
|
|
157
|
+
r_adv = r0 + u_adv * dt
|
|
158
|
+
|
|
159
|
+
return r_adv
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
class EFNodiffIntegrator(StochasticDifferentialEquationIntegrator):
|
|
163
|
+
def __call__(self, velocity, mixing, t0, r0, dt):
|
|
164
|
+
u1 = velocity(t0, r0)
|
|
165
|
+
return r0 + u1 * dt
|
ladim/utilities.py
CHANGED
|
@@ -25,3 +25,8 @@ def ingrid(x: float, y: float, subgrid: List[int]) -> bool:
|
|
|
25
25
|
"""Check if position (x, y) is in a subgrid"""
|
|
26
26
|
i0, i1, j0, j1 = subgrid
|
|
27
27
|
return (i0 <= x) & (x <= i1 - 1) & (j0 <= y) & (y <= j1 - 1)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def read_timedelta(conf) -> np.timedelta64:
|
|
31
|
+
time_value, time_unit = conf
|
|
32
|
+
return np.timedelta64(time_value, time_unit)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
ladim/__init__.py,sha256=Ns1alQ3-vgq1Hh0sgRGMsYsmH5LnEReoG-M-ajzLg04,51
|
|
2
|
+
ladim/config.py,sha256=6b0ikBrBnq_sSElgO2YcVtJHjQU0nyZsVyPV3q9fy5I,4233
|
|
3
|
+
ladim/forcing.py,sha256=f4PpSwyilSScXeNyorTWLMgVTiat9htSLkCwAkRlJVM,3048
|
|
4
|
+
ladim/grid.py,sha256=m6bQrGJ3cux7rqC8pbRXD86cOI-VQKF-XjP9m1jCIcY,2221
|
|
5
|
+
ladim/main.py,sha256=wO91-nLd1gvF3V20XK5qRvvOIV4xoTOKiWFcbwV2oag,2812
|
|
6
|
+
ladim/model.py,sha256=jpjq_ZSh7ULpwi3_RqDb-p5SG8WcdgCPaBkSpnNWblU,3137
|
|
7
|
+
ladim/output.py,sha256=Rz7iujvS7Z3LoABiJduQqyb3zPswNqhhFsywr3MLsBY,8373
|
|
8
|
+
ladim/release.py,sha256=5QD0hxQNmkxGmtrQX72T5QumHeWLQMFHysJD6Sc0BUE,8067
|
|
9
|
+
ladim/sample.py,sha256=n8wRGd_VsW_qyQe1ZoTpmfZcdcwB929vsM8PoKG6JTs,8292
|
|
10
|
+
ladim/solver.py,sha256=sZvYgOxzJ-EItI-IB2y8_z8Tf-SJAQSrmydlhDRa7ZQ,755
|
|
11
|
+
ladim/state.py,sha256=5ICIiujsV3KOAUYagGLK7YdmhcItgJmtntZeR11nIpw,3781
|
|
12
|
+
ladim/tracker.py,sha256=VVX6T5CqiU6nGSCgLlSCC8w0UYhW273OGFE7ApPjdyI,5091
|
|
13
|
+
ladim/utilities.py,sha256=r7-zShqJhh0cBctDUmtfw-GBOk1eTTYR4S72b0ouiSQ,994
|
|
14
|
+
ladim/gridforce/ROMS.py,sha256=yrMr1GiDgr7VG9V630nHFdeajPY4WHLu653ZRwAk1aw,26888
|
|
15
|
+
ladim/gridforce/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
16
|
+
ladim/gridforce/analytical.py,sha256=qI-1LJdjmnwwanzOdrsDZqwGgo73bT75CB7pMaxbHKM,1094
|
|
17
|
+
ladim/gridforce/zROMS.py,sha256=MVA6PQuY1ukvs2E20sWY4kr5-QieeQHTrA5ruxCqbUM,22826
|
|
18
|
+
ladim/ibms/__init__.py,sha256=GOG75jZDmNEiLr8brxrKqIlqVj-pNR7pnPP8FUKE6hU,565
|
|
19
|
+
ladim/ibms/light.py,sha256=POltHmKkX8-q3t9wXyfcseCKEq9Bq-kX1WEJYsr1lNQ,2737
|
|
20
|
+
ladim/plugins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
21
|
+
postladim/__init__.py,sha256=ND-wa5_GNg01Tp0p_1zA1VD804zbfP3o4Cmd8pWU7OE,113
|
|
22
|
+
postladim/cellcount.py,sha256=nCFu9iJmprubn4YmPB4W0VO02GfEb90Iif7D49w1Kss,2054
|
|
23
|
+
postladim/kde_plot.py,sha256=GvMWzT6VxIeXKh1cnqaGzR-4jGG_WIHGMLPpRMXIpo4,1628
|
|
24
|
+
postladim/particlefile.py,sha256=0aif9wYUJ-VrpQKeCef8wB5VCiBB-gWY6sxNCUYviTA,4889
|
|
25
|
+
postladim/variable.py,sha256=-2aihoppYMMmpSpCqaF31XvpinTMaH3Y01-USDIkbBc,6587
|
|
26
|
+
ladim-2.0.0.dist-info/LICENSE,sha256=BgtXyjNr6Ly9nQ7ZLXKpV3r5kWRLnh5MiN0dxp0Bvfc,1085
|
|
27
|
+
ladim-2.0.0.dist-info/METADATA,sha256=ap0yaeqHIhnmQiMwuH8gaCT__YNxESVoudzgFuko9Zs,1841
|
|
28
|
+
ladim-2.0.0.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
29
|
+
ladim-2.0.0.dist-info/entry_points.txt,sha256=JDlNJo87GJaOkH0-BpAzTPLCrZcuPSdSlHNQ4XmnoRg,41
|
|
30
|
+
ladim-2.0.0.dist-info/top_level.txt,sha256=TK8Gl7d6MsrAQvqKG4b6YJCbB4UL46Se3SzsI-sJAuc,16
|
|
31
|
+
ladim-2.0.0.dist-info/RECORD,,
|
ladim/configuration/__init__.py
DELETED
|
@@ -1 +0,0 @@
|
|
|
1
|
-
from .modularized import configure
|
ladim/configuration/legacy.py
DELETED
|
@@ -1,425 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Configuration module for ladim
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
# ----------------------------------
|
|
6
|
-
# Bjørn Ådlandsvik <bjorn@imr.no>
|
|
7
|
-
# Institute of Marine Research
|
|
8
|
-
# 2017-01-17
|
|
9
|
-
# ----------------------------------
|
|
10
|
-
|
|
11
|
-
# import datetime
|
|
12
|
-
import logging
|
|
13
|
-
from typing import Dict, Any
|
|
14
|
-
import numpy as np
|
|
15
|
-
import yaml
|
|
16
|
-
import yaml.parser
|
|
17
|
-
from netCDF4 import Dataset, num2date
|
|
18
|
-
|
|
19
|
-
Config = Dict[str, Any] # type of the config dictionary
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
def configure(conf):
|
|
23
|
-
config = read_configuration(conf)
|
|
24
|
-
config = to_modularized_conf(config)
|
|
25
|
-
return config
|
|
26
|
-
|
|
27
|
-
def configure_ibm(conf: Dict[str, Any]) -> Config:
|
|
28
|
-
"""Configure the IBM module
|
|
29
|
-
|
|
30
|
-
Input: Raw conf dictionary from configuration file
|
|
31
|
-
|
|
32
|
-
Return: Dictionary with IBM configuration
|
|
33
|
-
|
|
34
|
-
If an IBM is used, check that module name is present
|
|
35
|
-
Special treatment for the variables item
|
|
36
|
-
Other items are stored for the IBM module
|
|
37
|
-
"""
|
|
38
|
-
|
|
39
|
-
logging.info("Configuration: IBM")
|
|
40
|
-
if conf is None: # No ibm section
|
|
41
|
-
return {}
|
|
42
|
-
D = conf.get("ibm") # Empty ibm section
|
|
43
|
-
if D is None:
|
|
44
|
-
return {}
|
|
45
|
-
|
|
46
|
-
# Mandatory: module name (or obsolete ibm_module)
|
|
47
|
-
if "module" not in D:
|
|
48
|
-
if "ibm_module" in D:
|
|
49
|
-
D["module"] = D.pop("ibm_module")
|
|
50
|
-
else:
|
|
51
|
-
logging.error("No IBM module specified")
|
|
52
|
-
raise SystemExit(1)
|
|
53
|
-
logging.info(f' {"module":15s}: {D["module"]}')
|
|
54
|
-
|
|
55
|
-
# The variables item
|
|
56
|
-
if "variables" not in D:
|
|
57
|
-
if "ibm_variables" in D:
|
|
58
|
-
D["variables"] = D.pop("ibm_variables")
|
|
59
|
-
# ibm_variables may live under state (obsolete)
|
|
60
|
-
elif "state" in conf and conf["state"] is not None:
|
|
61
|
-
if "ibm_variables" in conf.get("state", dict()):
|
|
62
|
-
D["variables"] = conf["state"]["ibm_variables"]
|
|
63
|
-
else:
|
|
64
|
-
D["variables"] = []
|
|
65
|
-
|
|
66
|
-
for key in D:
|
|
67
|
-
if key != "module":
|
|
68
|
-
logging.info(f" {key:15s}: {D[key]}")
|
|
69
|
-
|
|
70
|
-
return D
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
def configure_gridforce(conf: Dict[str, Any]) -> Config:
|
|
74
|
-
"""Parse gridforce related info and pass on
|
|
75
|
-
|
|
76
|
-
Input: raw conf dictionary from configuration file
|
|
77
|
-
|
|
78
|
-
Return: dictionary with gridforce configuration
|
|
79
|
-
|
|
80
|
-
"""
|
|
81
|
-
logging.info("Configuration: gridforce")
|
|
82
|
-
if conf is None:
|
|
83
|
-
logging.error("No gridforce section in configuration file")
|
|
84
|
-
raise SystemExit(1)
|
|
85
|
-
D = conf.get("gridforce")
|
|
86
|
-
if D is None:
|
|
87
|
-
logging.error("Empty gridforce section in configuration file")
|
|
88
|
-
raise SystemExit(1)
|
|
89
|
-
|
|
90
|
-
# module is the only mandatory field
|
|
91
|
-
if "module" not in D:
|
|
92
|
-
logging.error("No gridforce module specified")
|
|
93
|
-
raise SystemExit(1)
|
|
94
|
-
logging.info(f' {"module":15s}: {D["module"]}')
|
|
95
|
-
|
|
96
|
-
# Backwards compability (for ROMS.py)
|
|
97
|
-
if "files" in conf and conf["files"] is not None:
|
|
98
|
-
if "grid_file" in conf["files"]:
|
|
99
|
-
# Give grid_file under gridforce highest priority
|
|
100
|
-
if "grid_file" not in D:
|
|
101
|
-
D["grid_file"] = conf["files"]["grid_file"]
|
|
102
|
-
if "input_file" in conf["files"]:
|
|
103
|
-
# Give input_file under gridforce highest priority
|
|
104
|
-
if "input_file" not in D:
|
|
105
|
-
D["input_file"] = conf["files"]["input_file"]
|
|
106
|
-
|
|
107
|
-
for key in D:
|
|
108
|
-
if key != "module":
|
|
109
|
-
logging.info(f" {key:15s}: {D[key]}")
|
|
110
|
-
|
|
111
|
-
return D
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
# ---------------------------------------
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
def read_configuration(conf) -> Config:
|
|
118
|
-
"""The main configuration handling function
|
|
119
|
-
|
|
120
|
-
Input: Name of configuration file in yaml format
|
|
121
|
-
|
|
122
|
-
Returns: Configuration dictionary
|
|
123
|
-
|
|
124
|
-
"""
|
|
125
|
-
|
|
126
|
-
config: Config = dict()
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
# ----------------
|
|
130
|
-
# Time control
|
|
131
|
-
# ----------------
|
|
132
|
-
logging.info("Configuration: Time Control")
|
|
133
|
-
for name in ["start_time", "stop_time"]:
|
|
134
|
-
config[name] = np.datetime64(conf["time_control"][name]).astype("M8[s]")
|
|
135
|
-
logging.info(f" {name.replace('_', ' '):15s}: {config[name]}")
|
|
136
|
-
# reference_time, default = start_time
|
|
137
|
-
config["reference_time"] = np.datetime64(
|
|
138
|
-
conf["time_control"].get("reference_time", config["start_time"])
|
|
139
|
-
).astype("M8[s]")
|
|
140
|
-
logging.info(f' {"reference time":15s}: {config["reference_time"]}')
|
|
141
|
-
|
|
142
|
-
# -------------
|
|
143
|
-
# Files
|
|
144
|
-
# -------------
|
|
145
|
-
logging.info("Configuration: Files")
|
|
146
|
-
#logging.info(f' {"config_stream":15s}: {config_stream}')
|
|
147
|
-
for name in ["particle_release_file", "output_file"]:
|
|
148
|
-
config[name] = conf["files"][name]
|
|
149
|
-
logging.info(f" {name:15s}: {config[name]}")
|
|
150
|
-
|
|
151
|
-
try:
|
|
152
|
-
config["warm_start_file"] = conf["files"]["warm_start_file"]
|
|
153
|
-
config["start"] = "warm"
|
|
154
|
-
logging.info(f' {"Warm start from":15s}: {config["warm_start_file"]}')
|
|
155
|
-
except KeyError:
|
|
156
|
-
config["start"] = "cold"
|
|
157
|
-
config["warm_start_file"] = ""
|
|
158
|
-
|
|
159
|
-
# Override start time for warm start
|
|
160
|
-
if config["start"] == "warm":
|
|
161
|
-
try:
|
|
162
|
-
nc = Dataset(config["warm_start_file"])
|
|
163
|
-
except (FileNotFoundError, OSError):
|
|
164
|
-
logging.error(f"Could not open warm start file,{config['warm_start_file']}")
|
|
165
|
-
raise SystemExit(1)
|
|
166
|
-
tvar = nc.variables["time"]
|
|
167
|
-
# Use last record in restart file
|
|
168
|
-
warm_start_time = np.datetime64(num2date(tvar[-1], tvar.units))
|
|
169
|
-
warm_start_time = warm_start_time.astype("M8[s]")
|
|
170
|
-
config["start_time"] = warm_start_time
|
|
171
|
-
logging.info(f" Warm start at {warm_start_time}")
|
|
172
|
-
|
|
173
|
-
# Variables needed by restart, mightwarm_ be changed
|
|
174
|
-
# default should be instance variables among release variables
|
|
175
|
-
try:
|
|
176
|
-
warm_start_variables = conf["state"]["warm_start_variables"]
|
|
177
|
-
except KeyError:
|
|
178
|
-
warm_start_variables = ["X", "Y", "Z"]
|
|
179
|
-
config["warm_start_variables"] = warm_start_variables
|
|
180
|
-
|
|
181
|
-
# --- Time stepping ---
|
|
182
|
-
logging.info("Configuration: Time Stepping")
|
|
183
|
-
# Read time step and convert to seconds
|
|
184
|
-
dt = np.timedelta64(*tuple(conf["numerics"]["dt"]))
|
|
185
|
-
config["dt"] = int(dt.astype("m8[s]").astype("int"))
|
|
186
|
-
config["simulation_time"] = np.timedelta64(
|
|
187
|
-
config["stop_time"] - config["start_time"], "s"
|
|
188
|
-
).astype("int")
|
|
189
|
-
config["numsteps"] = config["simulation_time"] // config["dt"]
|
|
190
|
-
logging.info(f' {"dt":15s}: {config["dt"]} seconds')
|
|
191
|
-
logging.info(
|
|
192
|
-
f' {"simulation time":15s}: {config["simulation_time"] // 3600} hours'
|
|
193
|
-
)
|
|
194
|
-
logging.info(f' {"number of time steps":15s}: {config["numsteps"]}')
|
|
195
|
-
|
|
196
|
-
# --- Grid ---
|
|
197
|
-
config["gridforce"] = configure_gridforce(conf)
|
|
198
|
-
|
|
199
|
-
# --- Forcing ---
|
|
200
|
-
try:
|
|
201
|
-
config["ibm_forcing"] = conf["gridforce"]["ibm_forcing"]
|
|
202
|
-
except (KeyError, TypeError):
|
|
203
|
-
config["ibm_forcing"] = []
|
|
204
|
-
# ibm_forcing used to be a dictionary
|
|
205
|
-
if isinstance(config["ibm_forcing"], dict):
|
|
206
|
-
config["ibm_forcing"] = list(config["ibm_forcing"].keys())
|
|
207
|
-
logging.info(f' {"ibm_forcing":15s}: {config["ibm_forcing"]}')
|
|
208
|
-
|
|
209
|
-
# --- IBM ---
|
|
210
|
-
|
|
211
|
-
config["ibm"] = configure_ibm(conf)
|
|
212
|
-
# Make obsolete
|
|
213
|
-
config["ibm_variables"] = config["ibm"].get("variables", [])
|
|
214
|
-
config["ibm_module"] = config["ibm"].get("module")
|
|
215
|
-
|
|
216
|
-
# --- Particle release ---
|
|
217
|
-
logging.info("Configuration: Particle Releaser")
|
|
218
|
-
prelease = conf["particle_release"]
|
|
219
|
-
try:
|
|
220
|
-
config["release_type"] = prelease["release_type"]
|
|
221
|
-
except KeyError:
|
|
222
|
-
config["release_type"] = "discrete"
|
|
223
|
-
logging.info(f' {"release_type":15s}: {config["release_type"]}')
|
|
224
|
-
if config["release_type"] == "continuous":
|
|
225
|
-
config["release_frequency"] = np.timedelta64(
|
|
226
|
-
*tuple(prelease["release_frequency"])
|
|
227
|
-
)
|
|
228
|
-
logging.info(
|
|
229
|
-
f' {"release_frequency":11s}: {str(config["release_frequency"])}'
|
|
230
|
-
)
|
|
231
|
-
config["release_format"] = conf["particle_release"]["variables"]
|
|
232
|
-
config["release_dtype"] = dict()
|
|
233
|
-
# Map from str to converter
|
|
234
|
-
type_mapping = dict(int=int, float=float, time=np.datetime64, str=str)
|
|
235
|
-
for name in config["release_format"]:
|
|
236
|
-
config["release_dtype"][name] = type_mapping[
|
|
237
|
-
conf["particle_release"].get(name, "float")
|
|
238
|
-
]
|
|
239
|
-
logging.info(f' {name:15s}: {config["release_dtype"][name]}')
|
|
240
|
-
config["particle_variables"] = prelease["particle_variables"]
|
|
241
|
-
|
|
242
|
-
# --- Model state ---
|
|
243
|
-
# logging.info("Configuration: Model State Variables")
|
|
244
|
-
|
|
245
|
-
# -----------------
|
|
246
|
-
# Output control
|
|
247
|
-
# -----------------
|
|
248
|
-
logging.info("Configuration: Output Control")
|
|
249
|
-
try:
|
|
250
|
-
output_format = conf["output_variables"]["format"]
|
|
251
|
-
except KeyError:
|
|
252
|
-
output_format = "NETCDF3_64BIT_OFFSET"
|
|
253
|
-
config["output_format"] = output_format
|
|
254
|
-
logging.info(f' {"output_format":15s}: {config["output_format"]}')
|
|
255
|
-
|
|
256
|
-
# Skip output of initial state, useful for restart
|
|
257
|
-
# with cold start the default is False
|
|
258
|
-
# with warm start, the default is true
|
|
259
|
-
try:
|
|
260
|
-
skip_initial = conf["output_variables"]["skip_initial_output"]
|
|
261
|
-
except KeyError:
|
|
262
|
-
skip_initial = config["start"] == "warm"
|
|
263
|
-
config["skip_initial"] = skip_initial
|
|
264
|
-
logging.info(f" {'Skip inital output':15s}: {skip_initial}")
|
|
265
|
-
|
|
266
|
-
try:
|
|
267
|
-
numrec = conf["output_variables"]["numrec"]
|
|
268
|
-
except KeyError:
|
|
269
|
-
numrec = 0
|
|
270
|
-
config["output_numrec"] = numrec
|
|
271
|
-
logging.info(f' {"output_numrec":15s}: {config["output_numrec"]}')
|
|
272
|
-
|
|
273
|
-
outper = np.timedelta64(*tuple(conf["output_variables"]["outper"]))
|
|
274
|
-
outper = outper.astype("m8[s]").astype("int") // config["dt"]
|
|
275
|
-
config["output_period"] = outper
|
|
276
|
-
logging.info(f' {"output_period":15s}: {config["output_period"]} timesteps')
|
|
277
|
-
config["num_output"] = 1 + config["numsteps"] // config["output_period"]
|
|
278
|
-
logging.info(f' {"numsteps":15s}: {config["numsteps"]}')
|
|
279
|
-
config["output_particle"] = conf["output_variables"]["particle"]
|
|
280
|
-
config["output_instance"] = conf["output_variables"]["instance"]
|
|
281
|
-
config["nc_attributes"] = dict()
|
|
282
|
-
for name in config["output_particle"] + config["output_instance"]:
|
|
283
|
-
value = conf["output_variables"][name]
|
|
284
|
-
if "units" in value:
|
|
285
|
-
if value["units"] == "seconds since reference_time":
|
|
286
|
-
timeref = str(config["reference_time"]).replace("T", " ")
|
|
287
|
-
value["units"] = f"seconds since {timeref}"
|
|
288
|
-
config["nc_attributes"][name] = conf["output_variables"][name]
|
|
289
|
-
logging.info(" particle variables")
|
|
290
|
-
for name in config["output_particle"]:
|
|
291
|
-
logging.info(8 * " " + name)
|
|
292
|
-
for item in config["nc_attributes"][name].items():
|
|
293
|
-
logging.info(12 * " " + "{:11s}: {:s}".format(*item))
|
|
294
|
-
logging.info(" particle instance variables")
|
|
295
|
-
for name in config["output_instance"]:
|
|
296
|
-
logging.info(8 * " " + name)
|
|
297
|
-
for item in config["nc_attributes"][name].items():
|
|
298
|
-
logging.info(12 * " " + "{:11s}: {:s}".format(*item))
|
|
299
|
-
|
|
300
|
-
# --- Numerics ---
|
|
301
|
-
|
|
302
|
-
# dt belongs here, but is already read
|
|
303
|
-
logging.info("Configuration: Numerics")
|
|
304
|
-
try:
|
|
305
|
-
config["advection"] = conf["numerics"]["advection"]
|
|
306
|
-
except KeyError:
|
|
307
|
-
config["advection"] = "RK4"
|
|
308
|
-
logging.info(f' {"advection":15s}: {config["advection"]}')
|
|
309
|
-
try:
|
|
310
|
-
diffusion = conf["numerics"]["diffusion"]
|
|
311
|
-
except KeyError:
|
|
312
|
-
diffusion = 0.0
|
|
313
|
-
if diffusion > 0:
|
|
314
|
-
config["diffusion"] = True
|
|
315
|
-
config["diffusion_coefficient"] = diffusion
|
|
316
|
-
logging.info(
|
|
317
|
-
f' {"diffusion coefficient":15s}: {config["diffusion_coefficient"]}'
|
|
318
|
-
)
|
|
319
|
-
else:
|
|
320
|
-
config["diffusion"] = False
|
|
321
|
-
logging.info(" no diffusion")
|
|
322
|
-
config["seed"] = conf['numerics'].get('seed', None)
|
|
323
|
-
|
|
324
|
-
return config
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
def to_modularized_conf(c):
|
|
328
|
-
mconf = dict(
|
|
329
|
-
release=dict(
|
|
330
|
-
module='ladim.release.legacy.ParticleReleaser',
|
|
331
|
-
release_type=c['release_type'],
|
|
332
|
-
release_format=c['release_format'],
|
|
333
|
-
release_dtype=c['release_dtype'],
|
|
334
|
-
start_time=c['start_time'],
|
|
335
|
-
stop_time=c['stop_time'],
|
|
336
|
-
particle_release_file=c['particle_release_file'],
|
|
337
|
-
start=c['start'],
|
|
338
|
-
dt=c['dt'],
|
|
339
|
-
warm_start_file=c['warm_start_file'],
|
|
340
|
-
particle_variables=c['particle_variables'],
|
|
341
|
-
reference_time=c['reference_time'],
|
|
342
|
-
release_frequency=c.get("release_frequency", None),
|
|
343
|
-
),
|
|
344
|
-
state=dict(
|
|
345
|
-
module='ladim.state.legacy.State',
|
|
346
|
-
particle_variables=c['particle_variables'],
|
|
347
|
-
start_time=c['start_time'],
|
|
348
|
-
dt=c['dt'],
|
|
349
|
-
ibm=dict(variables=c['ibm'].get('variables', [])),
|
|
350
|
-
release_dtype=c['release_dtype'],
|
|
351
|
-
warm_start_file=c['warm_start_file'],
|
|
352
|
-
ibm_forcing=c['ibm_forcing'],
|
|
353
|
-
),
|
|
354
|
-
grid={
|
|
355
|
-
**c['gridforce'],
|
|
356
|
-
**dict(
|
|
357
|
-
start_time=c['start_time'],
|
|
358
|
-
legacy_module=c['gridforce']['module'],
|
|
359
|
-
module='ladim.gridforce.legacy.Grid',
|
|
360
|
-
),
|
|
361
|
-
},
|
|
362
|
-
forcing={
|
|
363
|
-
**c['gridforce'],
|
|
364
|
-
**dict(
|
|
365
|
-
start_time=c['start_time'],
|
|
366
|
-
stop_time=c['stop_time'],
|
|
367
|
-
dt=c['dt'],
|
|
368
|
-
legacy_module=c['gridforce']['module'],
|
|
369
|
-
module='ladim.gridforce.legacy.Forcing',
|
|
370
|
-
),
|
|
371
|
-
},
|
|
372
|
-
output=dict(
|
|
373
|
-
module='ladim.output.legacy.OutPut',
|
|
374
|
-
output_format=c['output_format'],
|
|
375
|
-
skip_initial=c['skip_initial'],
|
|
376
|
-
output_numrec=c['output_numrec'],
|
|
377
|
-
output_period=c['output_period'],
|
|
378
|
-
num_output=c['num_output'],
|
|
379
|
-
output_particle=c['output_particle'],
|
|
380
|
-
output_instance=c['output_instance'],
|
|
381
|
-
nc_attributes=c['nc_attributes'],
|
|
382
|
-
reference_time=c['reference_time'],
|
|
383
|
-
output_file=c['output_file'],
|
|
384
|
-
dt=c['dt'],
|
|
385
|
-
),
|
|
386
|
-
timestepper=dict(
|
|
387
|
-
start=c['start_time'],
|
|
388
|
-
stop=c['stop_time'],
|
|
389
|
-
step=c['dt'],
|
|
390
|
-
order=('release', 'forcing', 'output', 'tracker', 'ibm', 'state'),
|
|
391
|
-
seed=c.get("seed", None),
|
|
392
|
-
),
|
|
393
|
-
tracker=dict(
|
|
394
|
-
module='ladim.tracker.legacy.Tracker',
|
|
395
|
-
advection=c['advection'],
|
|
396
|
-
diffusion=c['diffusion'],
|
|
397
|
-
dt=c['dt'],
|
|
398
|
-
ibm_variables=c['ibm_variables'],
|
|
399
|
-
diffusion_coefficient=c.get('diffusion_coefficient', 0),
|
|
400
|
-
),
|
|
401
|
-
ibm={
|
|
402
|
-
**c['ibm'],
|
|
403
|
-
**dict(
|
|
404
|
-
module='ladim.ibms.legacy.Legacy_IBM',
|
|
405
|
-
dt=c['dt'],
|
|
406
|
-
start_time=c['start_time'],
|
|
407
|
-
nc_attributes=c['nc_attributes'],
|
|
408
|
-
output_instance=c['output_instance'],
|
|
409
|
-
legacy_module=c['ibm'].get('module', None),
|
|
410
|
-
),
|
|
411
|
-
},
|
|
412
|
-
)
|
|
413
|
-
return mconf
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
def strdict(d, ind=0):
|
|
417
|
-
s = ""
|
|
418
|
-
for k, v in d.items():
|
|
419
|
-
if isinstance(v, dict):
|
|
420
|
-
s += (" " * ind) + f"{k}:\n"
|
|
421
|
-
s += strdict(v, ind + 2)
|
|
422
|
-
else:
|
|
423
|
-
s += (" " * ind) + f"{k}: {v}\n"
|
|
424
|
-
|
|
425
|
-
return s
|
|
@@ -1,22 +0,0 @@
|
|
|
1
|
-
def configure(module_conf):
|
|
2
|
-
import yaml
|
|
3
|
-
|
|
4
|
-
# Handle variations of input config type
|
|
5
|
-
if isinstance(module_conf, dict):
|
|
6
|
-
config_dict = module_conf
|
|
7
|
-
else:
|
|
8
|
-
config_dict = yaml.safe_load(module_conf)
|
|
9
|
-
|
|
10
|
-
if 'version' in config_dict:
|
|
11
|
-
return _versioned_configure(config_dict)
|
|
12
|
-
else:
|
|
13
|
-
return _legacy_configure(config_dict)
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
def _versioned_configure(config_dict):
|
|
17
|
-
return config_dict
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
def _legacy_configure(config_dict):
|
|
21
|
-
from .legacy import configure as legacy_configure
|
|
22
|
-
return legacy_configure(config_dict)
|