ladim 1.3.4__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ladim/release/legacy.py DELETED
@@ -1,316 +0,0 @@
1
- # Particle release class
2
-
3
- # -------------------
4
- # release.py
5
- # part of LADiM
6
- # --------------------
7
-
8
- # ----------------------------------
9
- # Bjørn Ådlandsvik <bjorn@imr.no>
10
- # Institute of Marine Research
11
- # Bergen, Norway
12
- # ----------------------------------
13
-
14
- import logging
15
- import numpy as np
16
- import pandas as pd
17
- from typing import Iterator, List
18
-
19
- from netCDF4 import Dataset
20
-
21
- from ladim.utilities import ingrid
22
-
23
-
24
- # from .gridforce import Grid
25
-
26
-
27
- def mylen(df: pd.DataFrame) -> int:
28
- """Number of rows in a DataFrame,
29
-
30
- A workaround for len() which does not
31
- have the expected behaviour with itemizing,
32
- """
33
- return df.shape[0] if df.ndim > 1 else 1
34
- # if df.ndim == 1:
35
- # return 1
36
- # else:
37
- # return df.shape[0]
38
-
39
-
40
- class ParticleReleaser(Iterator):
41
- """Particle Release Class"""
42
-
43
- def __init__(self, modules, **config) -> None:
44
- self.modules = modules
45
-
46
- start_time = pd.to_datetime(config["start_time"])
47
- stop_time = pd.to_datetime(config["stop_time"])
48
-
49
- logging.info("Initializing the particle releaser")
50
-
51
- # Read the particle release file
52
- A = pd.read_csv(
53
- config["particle_release_file"],
54
- names=config["release_format"],
55
- converters=config["release_dtype"],
56
- sep='\\s+',
57
- )
58
-
59
- # If no mult column, add a column of ones
60
- if "mult" not in config["release_format"]:
61
- A["mult"] = 1
62
-
63
- # Use release_time as index
64
- A.index = A["release_time"]
65
-
66
- # Conversion from longitude, latitude to grid coordinates
67
- if "X" not in A.columns or "Y" not in A.columns:
68
- if "lon" not in A.columns or "lat" not in A.columns:
69
- logging.critical("Particle release mush have position")
70
- raise SystemExit(3)
71
- # else
72
- X, Y = modules['grid'].ll2xy(A["lon"], A["lat"])
73
- A["lon"] = X
74
- A["lat"] = Y
75
- A.rename(columns={"lon": "X", "lat": "Y"}, inplace=True)
76
-
77
- # Remove everything after simulation stop time
78
- # A = A[A['release_time'] <= stop_time] # Use < ?
79
- A = A[A.index <= stop_time] # Use < ?
80
- if len(A) == 0: # All release after simulation time
81
- logging.critical("All particles released after similation stop")
82
- raise SystemExit(3)
83
-
84
- # Optionally, remove everything outside a subgrid
85
- try:
86
- subgrid: List[int] = config["grid_args"]["subgrid"]
87
- except KeyError:
88
- subgrid = []
89
- if subgrid:
90
- lenA = len(A)
91
- A = A[ingrid(A["X"], A["Y"], subgrid)]
92
- if len(A) < lenA:
93
- logging.warning("Ignoring particle release outside subgrid")
94
-
95
- # file_times = A['release_time'].unique()
96
-
97
- # TODO: Make a function of continuous -> discrete
98
- # Fill out if continuous release
99
- if config["release_type"] == "continuous":
100
-
101
- # Find first effective release time
102
- # i.e. the last time <= start_time
103
- # and remove too early releases
104
- # Can be moved out of if-block?
105
- n = np.sum(A.index <= start_time)
106
- if n == 0:
107
- logging.warning("No particles released at simulation start")
108
- n = 1
109
- # First effective release:
110
- # release_time0 = A.iloc[n-1].release_time
111
-
112
- # TODO: Check pandas, better way to delete rows?
113
- times = A["release_time"]
114
- try:
115
- release_time0 = times[times <= pd.to_datetime(start_time)][-1]
116
- except IndexError:
117
- release_time0 = times[0]
118
- A = A[A.index >= release_time0]
119
-
120
- # time0 = file_times[0]
121
- # time1 = max(file_times[-1], stop_time)
122
- time0 = A.index[0]
123
- time1 = max(A.index[-1], pd.Timestamp(stop_time))
124
- # time1 = max(A['release_time'][-1], stop_time)
125
- times = np.arange(time0, time1, config["release_frequency"])
126
- # A = A.reindex(times, method='pad')
127
- # A['release_time'] = A.index
128
- # Reindex does not work with non-unique index
129
- I = A.index.unique()
130
- J = pd.Series(I, index=I).reindex(times, method="pad")
131
- M = {i: mylen(A.loc[i]) for i in I}
132
- A = A.loc[J]
133
- # Correct time index
134
- S: List[int] = []
135
- for t in times:
136
- S.extend(M[J[t]] * [t])
137
- A["release_time"] = S
138
- A.index = S
139
-
140
- # Remove any new instances before start time
141
- # n = np.sum(A['release_time'] <= start_time)
142
- # if n == 0:
143
- # n = 1
144
- # A = A.iloc[n-1:]
145
-
146
- # If first release is early set it to start time
147
- # A['release_time'] = np.maximum(A['release_time'], # tart_time)
148
-
149
- # If discrete, there is an error to have only early releases
150
- # OK if warm start
151
- else: # Discrete
152
- if A.index[-1] < start_time and config["start"] == "cold":
153
- logging.error("All particles released before similation start")
154
- raise SystemExit
155
-
156
- # We are now discrete,
157
- # remove everything before start time
158
- A = A[A.index >= start_time]
159
-
160
- # If warm start, no new release at start time (already accounted for)
161
- if config["start"] == "warm":
162
- A = A[A.index > start_time]
163
-
164
- # Compute which timestep the release should happen
165
- timediff = A["release_time"] - config['start_time']
166
- dt = np.timedelta64(config["dt"], 's')
167
- rel_tstep = np.int32(timediff / dt)
168
-
169
- # Release times, rounded to nearest time step
170
- self.times = np.unique(config['start_time'] + rel_tstep * dt)
171
-
172
- logging.info("Number of release times = {}".format(len(self.times)))
173
-
174
- # Compute the release time steps
175
- rel_time = self.times - config["start_time"]
176
- rel_time = rel_time.astype("m8[s]").astype("int")
177
- self.steps = rel_time // config["dt"]
178
-
179
- # Make dataframes for each timeframe
180
- # self._B = [x[1] for x in A.groupby('release_time')]
181
- self._B = [x[1] for x in A.groupby(rel_tstep)]
182
-
183
- # Read the particle variables
184
- self._index = 0 # Index of next release
185
- self._particle_count = 0 # Particle counter
186
-
187
- # Handle the particle variables initially
188
- # TODO: Need a test to check that this iw working properly
189
- pvars = dict()
190
- for name in config["particle_variables"]:
191
- dtype = config["release_dtype"][name]
192
- if dtype == np.datetime64:
193
- dtype = np.float64
194
- pvars[name] = np.array([], dtype=dtype)
195
-
196
- # Get particle data from warm start
197
- if config["start"] == "warm":
198
- with Dataset(config["warm_start_file"]) as f:
199
- # warm_particle_count = len(f.dimensions['particle'])
200
- warm_particle_count = np.max(f.variables["pid"][:]) + 1
201
- for name in config["particle_variables"]:
202
- pvars[name] = f.variables[name][:warm_particle_count]
203
- else:
204
- warm_particle_count = 0
205
-
206
- # initital number of particles
207
- if config["start"] == "warm":
208
- init_released = warm_particle_count
209
- else:
210
- init_released = 0
211
- particles_released = [init_released] + [df['mult'].sum() for df in self._B]
212
-
213
- # Loop through the releases, collect particle variable data
214
- mult = A['mult'].values
215
- for name in config['particle_variables']:
216
- val = np.repeat(A[name].values, mult)
217
- if config['release_dtype'][name] == np.datetime64:
218
- val = (val - config["reference_time"]) / np.timedelta64(1, 's')
219
- pvars[name] = np.concatenate((pvars[name], val))
220
-
221
- self.total_particle_count = warm_particle_count + np.sum(mult)
222
- self.particle_variables = pvars
223
- logging.info("Total particle count = {}".format(self.total_particle_count))
224
- self.particles_released = particles_released
225
-
226
- # Export total particle count
227
- # Ugly to write back to config
228
- # Easier way to give this piece of information to the
229
- # output initialization?
230
- config["total_particle_count"] = self.total_particle_count
231
-
232
- # Reset the counter after the particle counting
233
- self._index = 0 # Index of next release
234
- self._particle_count = warm_particle_count
235
-
236
- def update(self):
237
- step = self.modules['state'].timestep
238
- grid = self.modules['grid']
239
- state = self.modules['state']
240
-
241
- # Extension, allow inactive particles (not moved next time)
242
- if "active" in state.ibm_variables:
243
- pass
244
- # self.active = self.ibm_variables['active']
245
- else: # Default = active
246
- state.active = np.ones_like(state.pid)
247
-
248
- # Surface/bottom boundary conditions
249
- # Reflective at surface
250
- I = state.Z < 0
251
- state.Z[I] = -state.Z[I]
252
- # Keep just above bottom
253
- H = grid.sample_depth(state.X, state.Y)
254
- I = state.Z > H
255
- state.Z[I] = 0.99 * H[I]
256
-
257
- # Compactify by removing dead particles
258
- # Could have a switch to avoid this if no deaths
259
- state.pid = state.pid[state.alive]
260
- for key in state.instance_variables:
261
- state[key] = state[key][state.alive]
262
-
263
- if step in self.steps:
264
- V = next(self)
265
- self.modules['state'].append(V, self.modules['forcing'])
266
-
267
- # From physics all particles are alive
268
- # self.alive = np.ones(len(self), dtype="bool")
269
- state.alive = grid.ingrid(state.X, state.Y)
270
-
271
- def __next__(self) -> pd.DataFrame:
272
- """Perform the next particle release
273
-
274
- Return a DataFrame with the release info,
275
- repeating mult times
276
-
277
- """
278
-
279
- # This should not happen
280
- if self._index >= len(self.times):
281
- raise StopIteration
282
-
283
- # Skip first release if warm start (should be present in start file)
284
- # Not always, make better test
285
- # Moving test to state.py
286
- # if self._index == 0 and self._particle_count > 0: # Warm start
287
- # return
288
-
289
- # rel_time = self.times[self._index]
290
- # file_time = self._file_times[self._file_index]
291
-
292
- V = self._B[self._index]
293
- nnew = V.mult.sum()
294
- # Workaround, missing repeat method for pandas DataFrame
295
- V0 = V.to_records(index=False)
296
- V0 = V0.repeat(V.mult)
297
- V = pd.DataFrame(V0)
298
- # Do not need the mult column any more
299
- V.drop("mult", axis=1, inplace=True)
300
- # Buffer the new values
301
- # self.V = V
302
- # self._file_index += 1
303
-
304
- # Add the new pids
305
- nnew = len(V)
306
-
307
- pids = pd.Series(
308
- range(self._particle_count, self._particle_count + nnew), name="pid"
309
- )
310
- V = V.join(pids)
311
-
312
- # Update the counters
313
- self._index += 1
314
- self._particle_count += len(V)
315
-
316
- return V
ladim/state/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .legacy import State
ladim/state/legacy.py DELETED
@@ -1,126 +0,0 @@
1
- """
2
- Class for the state of the model
3
- """
4
-
5
- import sys
6
- import os
7
- import importlib
8
- import logging
9
- from typing import Any, Dict, Sized # mypy
10
-
11
- import numpy as np
12
- from netCDF4 import Dataset, num2date
13
-
14
- from ladim.tracker.legacy import Tracker
15
- from ladim.gridforce.legacy import Grid, Forcing
16
-
17
- # ------------------------
18
-
19
- Config = Dict[str, Any]
20
-
21
-
22
- class State(Sized):
23
- """The model variables at a given time"""
24
-
25
- def __init__(self, modules, **config):
26
- self.modules = modules
27
-
28
- logging.info("Initializing the model state")
29
-
30
- self.timestep = 0
31
- self.timestamp = config["start_time"].astype("datetime64[s]")
32
- self.dt = np.timedelta64(config["dt"], "s")
33
- self.position_variables = ["X", "Y", "Z"]
34
- if "ibm" in config and "variables" in config["ibm"]:
35
- self.ibm_variables = config["ibm"]["variables"]
36
- else:
37
- self.ibm_variables = config.get("ibm_variables", [])
38
- self.ibm_forcing = config["ibm_forcing"]
39
- self.particle_variables = config["particle_variables"]
40
- self.instance_variables = self.position_variables + [
41
- var for var in self.ibm_variables if var not in self.particle_variables
42
- ]
43
-
44
- self.pid = np.array([], dtype=int)
45
- for name in self.instance_variables:
46
- setattr(self, name, np.array([], dtype=float))
47
-
48
- for name in self.particle_variables:
49
- setattr(self, name, np.array([], dtype=config["release_dtype"][name]))
50
-
51
- self.dt = config["dt"]
52
- self.alive = []
53
-
54
- # self.num_particles = len(self.X)
55
- self.nnew = 0 # Modify with warm start?
56
-
57
- if config["warm_start_file"]:
58
- self.warm_start(config, self.modules['grid'])
59
-
60
- def __getitem__(self, name: str) -> None:
61
- return getattr(self, name)
62
-
63
- def __setitem__(self, name: str, value: Any) -> None:
64
- return setattr(self, name, value)
65
-
66
- def __len__(self) -> int:
67
- return len(getattr(self, "X"))
68
-
69
- def append(self, new: Dict[str, Any], forcing: Forcing) -> None:
70
- """Append new particles to the model state"""
71
- nnew = len(new["pid"])
72
- self.pid = np.concatenate((self.pid, new["pid"]))
73
- for name in self.instance_variables:
74
- if name in new:
75
- self[name] = np.concatenate((self[name], new[name]))
76
- elif name in self.ibm_forcing:
77
- # Take values as Z must be a numpy array
78
- self[name] = np.concatenate(
79
- (self[name], forcing.field(new["X"], new["Y"], new["Z"].values, name))
80
- )
81
- else: # Initialize to zero
82
- self[name] = np.concatenate((self[name], np.zeros(nnew)))
83
- self.nnew = nnew
84
-
85
- def update(self):
86
- """Update the model state to the next timestep"""
87
- self.timestep += 1
88
- self.timestamp += np.timedelta64(self.dt, "s")
89
-
90
- if self.timestamp.astype("int") % 3600 == 0: # New hour
91
- logging.info("Model time = {}".format(self.timestamp.astype("M8[h]")))
92
-
93
- def warm_start(self, config: Config, grid: Grid) -> None:
94
- """Perform a warm (re)start"""
95
-
96
- warm_start_file = config["warm_start_file"]
97
- try:
98
- f = Dataset(warm_start_file)
99
- except FileNotFoundError:
100
- logging.critical(f"Can not open warm start file: {warm_start_file}")
101
- raise SystemExit(1)
102
-
103
- logging.info("Reading warm start file")
104
- # Using last record in file
105
- tvar = f.variables["time"]
106
- warm_start_time = np.datetime64(num2date(tvar[-1], tvar.units))
107
- # Not needed anymore, explicitly set in configuration
108
- # if warm_start_time != config['start_time']:
109
- # print("warm start time = ", warm_start_time)
110
- # print("start time = ", config['start_time'])
111
- # logging.error("Warm start time and start time differ")
112
- # raise SystemExit(1)
113
-
114
- pstart = f.variables["particle_count"][:-1].sum()
115
- pcount = f.variables["particle_count"][-1]
116
- self.pid = f.variables["pid"][pstart : pstart + pcount]
117
- # Give error if variable not in restart file
118
- for var in config["warm_start_variables"]:
119
- logging.debug(f"Reading {var} from warm start file")
120
- self[var] = f.variables[var][pstart : pstart + pcount]
121
-
122
- # Remove particles near edge of grid
123
- I = grid.ingrid(self["X"], self["Y"])
124
- self.pid = self.pid[I]
125
- for var in config["warm_start_variables"]:
126
- self[var] = self[var][I]
ladim/tracker/__init__.py DELETED
@@ -1 +0,0 @@
1
- from .legacy import Tracker
ladim/tracker/legacy.py DELETED
@@ -1,225 +0,0 @@
1
- # ------------------------------------
2
- # tracker.py
3
- # Part of the LADiM Model
4
- #
5
- # Bjørn Ådlandsvik, <bjorn@imr.no>
6
- # Institute of Marine Research
7
- #
8
- # Licenced under the MIT license
9
- # ------------------------------------
10
-
11
- import logging
12
- from typing import Any, Tuple
13
- import numpy as np
14
-
15
- from ladim.gridforce.legacy import Grid, Forcing
16
-
17
- # from .state import State # Circular import
18
-
19
- Velocity = Tuple[np.ndarray, np.ndarray]
20
- State = Any # Could not find any better
21
-
22
-
23
- class Tracker:
24
- """The physical particle tracking kernel"""
25
-
26
- def __init__(self, modules, **config) -> None:
27
- self.modules = modules
28
-
29
- logging.info("Initiating the particle tracking")
30
- self.dt = config["dt"]
31
- if config["advection"]:
32
- self.advect = getattr(self, config["advection"])
33
- else:
34
- self.advect = None
35
- # Read from config:
36
- self.diffusion = config["diffusion"]
37
- if self.diffusion:
38
- self.D = config["diffusion_coefficient"] # [m2.s-1]
39
- self.active_check = 'active' in config['ibm_variables']
40
-
41
- def update(self):
42
- self.move_particles(
43
- self.modules['grid'],
44
- self.modules['forcing'],
45
- self.modules['state'],
46
- )
47
-
48
- def move_particles(self, grid: Grid, forcing: Forcing, state: State) -> None:
49
- """Move the particles"""
50
-
51
- X, Y = state.X, state.Y
52
- dx, dy = grid.sample_metric(X, Y)
53
- self.dx, self.dy = dx, dy
54
- dt = self.dt
55
- self.num_particles = len(X)
56
- # Make more elegant, need not do every time
57
- # Works for C-grid
58
- self.xmin = grid.xmin + 0.01
59
- self.xmax = grid.xmax - 0.01
60
- self.ymin = grid.ymin + 0.01
61
- self.ymax = grid.ymax - 0.01
62
-
63
- U = np.zeros(self.num_particles, dtype=float)
64
- V = np.zeros(self.num_particles, dtype=float)
65
-
66
- # --- Advection ---
67
- if self.advect:
68
- Uadv, Vadv = self.advect(forcing, state)
69
- U += Uadv
70
- V += Vadv
71
-
72
- # --- Diffusion ---
73
- if self.diffusion:
74
- Udiff, Vdiff = self.diffuse()
75
- U += Udiff
76
- V += Vdiff
77
-
78
- # --- Move the particles
79
-
80
- # New position, if OK
81
- X1 = X + U * dt / dx
82
- Y1 = Y + V * dt / dy
83
-
84
- # Do not move out of grid
85
- I = ~grid.ingrid(X1, Y1)
86
- X1[I] = X[I]
87
- Y1[I] = Y[I]
88
- # Kill particles trying to move out of the grid
89
- state.alive[I] = False
90
-
91
- if self.active_check:
92
- # Do not move inactive particles
93
- I = state.active < 1
94
- X1[I] = X[I]
95
- Y1[I] = Y[I]
96
-
97
- # Land, boundary treatment. Do not move the particles
98
- # Consider a sequence of different actions
99
- # I = (grid.ingrid(X1, Y1)) & (grid.atsea(X1, Y1))
100
- I = grid.atsea(X1, Y1)
101
- # I = True
102
- X[I] = X1[I]
103
- Y[I] = Y1[I]
104
-
105
- state.X = X
106
- state.Y = Y
107
-
108
- def EF(self, forcing: Forcing, state: State) -> Velocity:
109
- """Euler-Forward advection"""
110
-
111
- X, Y, Z = state["X"], state["Y"], state["Z"]
112
- # dt = self.dt
113
- # pm, pn = grid.sample_metric(X, Y)
114
-
115
- U, V = forcing.velocity(X, Y, Z)
116
-
117
- return U, V
118
-
119
- def RK2a(self, forcing: Forcing, state: State) -> Velocity:
120
- """Runge-Kutta second order = Heun scheme"""
121
-
122
- X, Y, Z = state["X"], state["Y"], state["Z"]
123
- dt = self.dt
124
-
125
- U, V = forcing.velocity(X, Y, Z)
126
- X1 = X + 0.5 * U * dt / self.dx
127
- Y1 = Y + 0.5 * V * dt / self.dy
128
-
129
- U, V = forcing.velocity(X1, Y1, Z, tstep=0.5)
130
- return U, V
131
-
132
- def RK2b(self, forcing: Forcing, state: State) -> Velocity:
133
- """Runge-Kutta second order = Heun scheme
134
-
135
- This version does not sample velocities outside the grid
136
- """
137
-
138
- X, Y, Z = state["X"], state["Y"], state["Z"]
139
- dt = self.dt
140
-
141
- U, V = forcing.velocity(X, Y, Z)
142
- X1 = X + 0.5 * U * dt / self.dx
143
- Y1 = Y + 0.5 * V * dt / self.dy
144
- X1.clip(self.xmin, self.xmax, out=X1)
145
- Y1.clip(self.ymin, self.ymax, out=Y1)
146
-
147
- U, V = forcing.velocity(X1, Y1, Z, tstep=0.5)
148
- return U, V
149
-
150
- RK2 = RK2b
151
-
152
- def RK4a(self, forcing: Forcing, state: State) -> Velocity:
153
- """Runge-Kutta fourth order advection"""
154
-
155
- X, Y, Z = state["X"], state["Y"], state["Z"]
156
- dt = self.dt
157
- dx, dy = self.dx, self.dy
158
-
159
- U1, V1 = forcing.velocity(X, Y, Z, tstep=0.0)
160
- X1 = X + 0.5 * U1 * dt / dx
161
- Y1 = Y + 0.5 * V1 * dt / dy
162
-
163
- U2, V2 = forcing.velocity(X1, Y1, Z, tstep=0.5)
164
- X2 = X + 0.5 * U2 * dt / dx
165
- Y2 = Y + 0.5 * V2 * dt / dy
166
-
167
- U3, V3 = forcing.velocity(X2, Y2, Z, tstep=0.5)
168
- X3 = X + U3 * dt / dx
169
- Y3 = Y + V3 * dt / dy
170
-
171
- U4, V4 = forcing.velocity(X3, Y3, Z, tstep=1.0)
172
-
173
- U = (U1 + 2 * U2 + 2 * U3 + U4) / 6.0
174
- V = (V1 + 2 * V2 + 2 * V3 + V4) / 6.0
175
-
176
- return U, V
177
-
178
- def RK4b(self, forcing: Forcing, state: State) -> Velocity:
179
- """Runge-Kutta fourth order advection
180
-
181
- This version does not sample velocities outside the grid
182
-
183
- """
184
-
185
- X, Y, Z = state["X"], state["Y"], state["Z"]
186
- dt = self.dt
187
- dx, dy = self.dx, self.dy
188
- xmin, xmax, ymin, ymax = self.xmin, self.xmax, self.ymin, self.ymax
189
-
190
- U1, V1 = forcing.velocity(X, Y, Z, tstep=0.0)
191
- X1 = X + 0.5 * U1 * dt / dx
192
- Y1 = Y + 0.5 * V1 * dt / dy
193
- X1.clip(xmin, xmax, out=X1)
194
- Y1.clip(ymin, ymax, out=Y1)
195
-
196
- U2, V2 = forcing.velocity(X1, Y1, Z, tstep=0.5)
197
- X2 = X + 0.5 * U2 * dt / dx
198
- Y2 = Y + 0.5 * V2 * dt / dy
199
- X2.clip(xmin, xmax, out=X2)
200
- Y2.clip(ymin, ymax, out=Y2)
201
-
202
- U3, V3 = forcing.velocity(X2, Y2, Z, tstep=0.5)
203
- X3 = X + U3 * dt / dx
204
- Y3 = Y + V3 * dt / dy
205
- X3.clip(xmin, xmax, out=X3)
206
- Y3.clip(ymin, ymax, out=Y3)
207
-
208
- U4, V4 = forcing.velocity(X3, Y3, Z, tstep=1.0)
209
-
210
- U = (U1 + 2 * U2 + 2 * U3 + U4) / 6.0
211
- V = (V1 + 2 * V2 + 2 * V3 + V4) / 6.0
212
-
213
- return U, V
214
-
215
- RK4 = RK4b
216
-
217
- def diffuse(self) -> Velocity:
218
- """Random walk diffusion"""
219
-
220
- # Diffusive velocity
221
- stddev = (2 * self.D / self.dt) ** 0.5
222
- U = stddev * np.random.normal(size=self.num_particles)
223
- V = stddev * np.random.normal(size=self.num_particles)
224
-
225
- return U, V