ladim 2.1.5__tar.gz → 2.1.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {ladim-2.1.5 → ladim-2.1.6}/PKG-INFO +3 -1
  2. {ladim-2.1.5 → ladim-2.1.6}/ladim/__init__.py +1 -1
  3. ladim-2.1.6/ladim/forcing.py +548 -0
  4. {ladim-2.1.5 → ladim-2.1.6}/ladim.egg-info/PKG-INFO +3 -1
  5. {ladim-2.1.5 → ladim-2.1.6}/ladim.egg-info/requires.txt +2 -0
  6. {ladim-2.1.5 → ladim-2.1.6}/setup.cfg +2 -0
  7. ladim-2.1.6/tests/test_forcing.py +167 -0
  8. ladim-2.1.5/ladim/forcing.py +0 -100
  9. ladim-2.1.5/tests/test_forcing.py +0 -10
  10. {ladim-2.1.5 → ladim-2.1.6}/LICENSE +0 -0
  11. {ladim-2.1.5 → ladim-2.1.6}/README.md +0 -0
  12. {ladim-2.1.5 → ladim-2.1.6}/ladim/__main__.py +0 -0
  13. {ladim-2.1.5 → ladim-2.1.6}/ladim/config.py +0 -0
  14. {ladim-2.1.5 → ladim-2.1.6}/ladim/grid.py +0 -0
  15. {ladim-2.1.5 → ladim-2.1.6}/ladim/gridforce/ROMS.py +0 -0
  16. {ladim-2.1.5 → ladim-2.1.6}/ladim/gridforce/__init__.py +0 -0
  17. {ladim-2.1.5 → ladim-2.1.6}/ladim/gridforce/analytical.py +0 -0
  18. {ladim-2.1.5 → ladim-2.1.6}/ladim/gridforce/zROMS.py +0 -0
  19. {ladim-2.1.5 → ladim-2.1.6}/ladim/ibms/__init__.py +0 -0
  20. {ladim-2.1.5 → ladim-2.1.6}/ladim/ibms/light.py +0 -0
  21. {ladim-2.1.5 → ladim-2.1.6}/ladim/main.py +0 -0
  22. {ladim-2.1.5 → ladim-2.1.6}/ladim/model.py +0 -0
  23. {ladim-2.1.5 → ladim-2.1.6}/ladim/output.py +0 -0
  24. {ladim-2.1.5 → ladim-2.1.6}/ladim/plugins/__init__.py +0 -0
  25. {ladim-2.1.5 → ladim-2.1.6}/ladim/release.py +0 -0
  26. {ladim-2.1.5 → ladim-2.1.6}/ladim/sample.py +0 -0
  27. {ladim-2.1.5 → ladim-2.1.6}/ladim/solver.py +0 -0
  28. {ladim-2.1.5 → ladim-2.1.6}/ladim/state.py +0 -0
  29. {ladim-2.1.5 → ladim-2.1.6}/ladim/tracker.py +0 -0
  30. {ladim-2.1.5 → ladim-2.1.6}/ladim/utilities.py +0 -0
  31. {ladim-2.1.5 → ladim-2.1.6}/ladim.egg-info/SOURCES.txt +0 -0
  32. {ladim-2.1.5 → ladim-2.1.6}/ladim.egg-info/dependency_links.txt +0 -0
  33. {ladim-2.1.5 → ladim-2.1.6}/ladim.egg-info/entry_points.txt +0 -0
  34. {ladim-2.1.5 → ladim-2.1.6}/ladim.egg-info/top_level.txt +0 -0
  35. {ladim-2.1.5 → ladim-2.1.6}/postladim/__init__.py +0 -0
  36. {ladim-2.1.5 → ladim-2.1.6}/postladim/cellcount.py +0 -0
  37. {ladim-2.1.5 → ladim-2.1.6}/postladim/kde_plot.py +0 -0
  38. {ladim-2.1.5 → ladim-2.1.6}/postladim/particlefile.py +0 -0
  39. {ladim-2.1.5 → ladim-2.1.6}/postladim/variable.py +0 -0
  40. {ladim-2.1.5 → ladim-2.1.6}/pyproject.toml +0 -0
  41. {ladim-2.1.5 → ladim-2.1.6}/tests/test_config.py +0 -0
  42. {ladim-2.1.5 → ladim-2.1.6}/tests/test_grid.py +0 -0
  43. {ladim-2.1.5 → ladim-2.1.6}/tests/test_ladim.py +0 -0
  44. {ladim-2.1.5 → ladim-2.1.6}/tests/test_output.py +0 -0
  45. {ladim-2.1.5 → ladim-2.1.6}/tests/test_release.py +0 -0
  46. {ladim-2.1.5 → ladim-2.1.6}/tests/test_solver.py +0 -0
  47. {ladim-2.1.5 → ladim-2.1.6}/tests/test_utilities.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ladim
3
- Version: 2.1.5
3
+ Version: 2.1.6
4
4
  Summary: Lagrangian Advection and Diffusion Model
5
5
  Home-page: https://github.com/pnsaevik/ladim
6
6
  Author: Bjørn Ådlandsvik
@@ -18,6 +18,8 @@ Requires-Python: >=3.7
18
18
  Description-Content-Type: text/markdown
19
19
  License-File: LICENSE
20
20
  Requires-Dist: netCDF4
21
+ Requires-Dist: numba
22
+ Requires-Dist: numexpr
21
23
  Requires-Dist: numpy
22
24
  Requires-Dist: pandas
23
25
  Requires-Dist: pyarrow
@@ -1,3 +1,3 @@
1
- __version__ = '2.1.5'
1
+ __version__ = '2.1.6'
2
2
 
3
3
  from .main import main, run
@@ -0,0 +1,548 @@
1
+ import typing
2
+ if typing.TYPE_CHECKING:
3
+ from ladim.model import Model
4
+ import numexpr
5
+ import string
6
+ import numpy as np
7
+ from numba import njit
8
+
9
+
10
+ class Forcing:
11
+ @staticmethod
12
+ def from_roms(**conf):
13
+ return RomsForcing(**conf)
14
+
15
+ def velocity(self, X, Y, Z, tstep=0.0):
16
+ raise NotImplementedError
17
+
18
+ def update(self, model: "Model"):
19
+ raise NotImplementedError
20
+
21
+
22
+ class RomsForcing(Forcing):
23
+ def __init__(self, file, variables=None, **conf):
24
+ """
25
+ Forcing module which uses output data from the ROMS ocean model
26
+
27
+ :param file: Glob pattern for the input files
28
+ :param variables: A mapping of variable names to interpolation
29
+ specifications. Each interpolaction specification consists of 0-4
30
+ of the letters "xyzt". Coordinates that are listed in the string are
31
+ interpolated linearly, while the remaining ones use nearest-neighbor
32
+ interpolation. Some default configurations are defined:
33
+
34
+ .. code-block:: json
35
+ {
36
+ "temp": "xyzt",
37
+ "salt": "xyzt",
38
+ "u": "xt",
39
+ "v": "yt",
40
+ "w": "zt",
41
+ }
42
+
43
+
44
+ :param conf: Legacy config dict
45
+ """
46
+ # Apply default interpolation configs
47
+ variables = variables or dict()
48
+ default_vars = dict(u="xt", v="yt", w="zt", temp="xyzt", salt="xyzt")
49
+ self.variables = {**default_vars, **variables}
50
+
51
+ grid_ref = GridReference()
52
+ legacy_conf = dict(
53
+ gridforce=dict(input_file=file, **conf),
54
+ ibm_forcing=conf.get('ibm_forcing', []),
55
+ start_time=conf.get('start_time', None),
56
+ stop_time=conf.get('stop_time', None),
57
+ dt=conf.get('dt', None),
58
+ )
59
+ if conf.get('subgrid', None) is not None:
60
+ legacy_conf['gridforce']['subgrid'] = conf['subgrid']
61
+
62
+ from .utilities import load_class
63
+ LegacyForcing = load_class(conf.get('legacy_module', 'ladim.gridforce.ROMS.Forcing'))
64
+
65
+ # Allow gridforce module in current directory
66
+ import sys
67
+ import os
68
+ sys.path.insert(0, os.getcwd())
69
+ # Import correct gridforce_module
70
+ self.forcing = LegacyForcing(legacy_conf, grid_ref)
71
+ # self.steps = self.forcing.steps
72
+ # self.U = self.forcing.U
73
+ # self.V = self.forcing.V
74
+
75
+ def update(self, model: "Model"):
76
+ elapsed = model.solver.time - model.solver.start
77
+ t = elapsed // model.solver.step
78
+
79
+ # noinspection PyProtectedMember
80
+ self.forcing._grid.modules = model
81
+ self.forcing.update(t)
82
+
83
+ # Update state variables by sampling the field
84
+ x, y, z = model.state['X'], model.state['Y'], model.state['Z']
85
+ for v in self.variables:
86
+ if v in model.state:
87
+ model.state[v] = self.field(x, y, z, v)
88
+
89
+ def velocity(self, X, Y, Z, tstep=0.0):
90
+ return self.forcing.velocity(X, Y, Z, tstep=tstep)
91
+
92
+ def field(self, X, Y, Z, name):
93
+ return self.forcing.field(X, Y, Z, name)
94
+
95
+ def close(self):
96
+ return self.forcing.close()
97
+
98
+
99
+ class GridReference:
100
+ def __init__(self):
101
+ self.modules = None
102
+
103
+ def __getattr__(self, item):
104
+ return getattr(self.modules.grid.grid, item)
105
+
106
+
107
+ def load_netcdf_chunk(url, varname, subset):
108
+ """
109
+ Download, unzip and decode a netcdf chunk from file or url
110
+ """
111
+ import xarray as xr
112
+ with xr.open_dataset(url) as dset:
113
+ values = dset.variables[varname][subset].values
114
+ if varname in ['u', 'v', 'w']:
115
+ values = np.nan_to_num(values)
116
+ return values
117
+
118
+
119
+ class ChunkCache:
120
+ """
121
+ A cache for storing and sharing chunks of data using shared memory.
122
+
123
+ This class manages a memory block divided into a header, index, and data section.
124
+ It is designed for efficient inter-process communication of chunked data arrays.
125
+
126
+ :ivar mem: SharedMemory object representing the memory block.
127
+ :ivar num_chunks: Number of slots/chunks in the cache (read-only).
128
+ :ivar chunksize: Size of each chunk (read-only).
129
+ :ivar datatype: Data type of the stored chunks (read-only).
130
+ :ivar itemsize: Size in bytes of each data item (read-only).
131
+ :ivar chunk_id: Array of chunk IDs for tracking which data is stored in each slot.
132
+ :ivar data: 2D array holding the actual chunked data.
133
+ """
134
+ def __init__(self, name: str):
135
+ """
136
+ Attach to an existing shared memory block and map the cache structure.
137
+
138
+ :param name: The name of the shared memory block to attach to.
139
+ """
140
+ from multiprocessing.shared_memory import SharedMemory
141
+ mem = SharedMemory(name=name, create=False)
142
+ self.mem = mem
143
+
144
+ # Header block
145
+ self.num_chunks = np.ndarray(shape=(), dtype=np.int64, buffer=mem.buf[0:8])
146
+ self.chunksize = np.ndarray(shape=(), dtype=np.int64, buffer=mem.buf[8:16])
147
+ self.datatype = np.ndarray(shape=(), dtype='S8', buffer=mem.buf[16:24])
148
+ self.itemsize = np.ndarray(shape=(), dtype=np.int64, buffer=mem.buf[24:32])
149
+ self.num_chunks.setflags(write=False)
150
+ self.chunksize.setflags(write=False)
151
+ self.datatype.setflags(write=False)
152
+ self.itemsize.setflags(write=False)
153
+
154
+ # LRU block
155
+ lru_start = 32
156
+ lru_stop = lru_start + 2*self.num_chunks
157
+ self.lru = np.ndarray(
158
+ shape=(self.num_chunks,),
159
+ dtype=np.int16,
160
+ buffer=mem.buf[lru_start:lru_stop])
161
+
162
+ # Index block
163
+ idx_start = lru_stop
164
+ idx_stop = idx_start + 8*self.num_chunks
165
+ self.chunk_id = np.ndarray(
166
+ shape=(self.num_chunks, ),
167
+ dtype=np.int64,
168
+ buffer=mem.buf[idx_start:idx_stop])
169
+
170
+ # Data block
171
+ dat_start = idx_stop
172
+ dat_stop = dat_start + self.num_chunks * self.chunksize * self.itemsize
173
+ self.data = np.ndarray(
174
+ shape=(self.num_chunks, self.chunksize),
175
+ dtype=self.datatype.item().decode('ascii'),
176
+ buffer=mem.buf[dat_start:dat_stop])
177
+
178
+ def _update_lru(self, slot: int) -> None:
179
+ """
180
+ Move the given slot to the front (most recently used) in the LRU table.
181
+ """
182
+ update_lru(self.lru, slot)
183
+
184
+ def read(self, slot: int) -> np.ndarray:
185
+ """
186
+ Read data from the given slot and update the LRU table.
187
+
188
+ :param slot: The slot index to read
189
+ :return: The data in the slot
190
+ """
191
+ self._update_lru(slot)
192
+ return self.data[slot, :]
193
+
194
+ def write(self, data: np.ndarray, slot: int) -> None:
195
+ """
196
+ Overwrite the data in the given slot and update the LRU table.
197
+
198
+ :param data: 1D numpy array of length self.chunksize and dtype self.datatype
199
+ :param slot: The slot index to overwrite
200
+ """
201
+ self._update_lru(slot)
202
+ self.data[slot, :] = data
203
+
204
+ def __enter__(self) -> "ChunkCache":
205
+ """
206
+ Enter the runtime context related to this object.
207
+ Returns self for use in 'with' statements.
208
+
209
+ :return: self
210
+ """
211
+ return self
212
+
213
+ def __exit__(self, type: type, value: Exception, tb: object) -> None:
214
+ """
215
+ Exit the runtime context and close the shared memory.
216
+
217
+ :param type: Exception type
218
+ :param value: Exception value
219
+ :param tb: Traceback object
220
+ """
221
+ self.close()
222
+
223
+ def __setattr__(self, name: str, value: object) -> None:
224
+ """
225
+ Prevent reassignment of attributes after initialization.
226
+ Raises AttributeError if an attribute is already set.
227
+
228
+ :param name: Attribute name
229
+ :param value: Attribute value
230
+ :raises AttributeError: If attribute is already set
231
+ """
232
+ if hasattr(self, name):
233
+ raise AttributeError(f"Cannot reassign attribute '{name}'")
234
+ super().__setattr__(name, value)
235
+
236
+ @staticmethod
237
+ def create(slots: int, chunksize: int, datatype: str = 'f4') -> "ChunkCache":
238
+ """
239
+ Create a new shared memory block and initialize a ChunkCache.
240
+
241
+ :param slots: Number of slots/chunks in the cache.
242
+ :param chunksize: Size of each chunk.
243
+ :param datatype: Numpy dtype string for the data (default 'f4').
244
+ :return: An instance attached to the new shared memory block.
245
+ """
246
+ from multiprocessing.shared_memory import SharedMemory
247
+
248
+ test_item = np.empty((), dtype=datatype)
249
+ str_dtype = str(test_item.dtype)
250
+ if len(str_dtype) > 8:
251
+ raise ValueError('Unsupported data type: {str_dtype}')
252
+
253
+ # Reserve memory space for the cache block
254
+ size_header_block = 32
255
+ size_lru_block = 2 * slots # int16
256
+ size_index_block = 8 * slots
257
+ size_data_block = slots * chunksize * test_item.itemsize
258
+ bytes = size_header_block + size_lru_block + size_index_block + size_data_block
259
+ mem = SharedMemory(create=True, size=bytes)
260
+
261
+ # Write some header information
262
+ mem_slots = np.ndarray(shape=(), dtype=np.int64, buffer=mem.buf[0:8])
263
+ mem_slots[...] = slots
264
+ mem_chunksize = np.ndarray(shape=(), dtype=np.int64, buffer=mem.buf[8:16])
265
+ mem_chunksize[...] = chunksize
266
+ mem_datatype = np.ndarray(shape=(), dtype='S8', buffer=mem.buf[16:24])
267
+ mem_datatype[...] = str_dtype
268
+ mem_itemsize = np.ndarray(shape=(), dtype=np.int64, buffer=mem.buf[24:32])
269
+ mem_itemsize[...] = test_item.itemsize
270
+
271
+ # LRU block
272
+ lru_start = size_header_block
273
+ mem_lru = np.ndarray(
274
+ shape=(slots,),
275
+ dtype=np.int16,
276
+ buffer=mem.buf[lru_start:lru_start + size_lru_block])
277
+ mem_lru[...] = np.arange(slots, dtype=np.int16)
278
+
279
+ # Index block
280
+ index_start = lru_start + size_lru_block
281
+ mem_chunkid = np.ndarray(
282
+ shape=(slots, ),
283
+ dtype=np.int64,
284
+ buffer=mem.buf[index_start:index_start + size_index_block])
285
+ mem_chunkid[...] = -1
286
+
287
+ # Data block
288
+ # (no need to initialize, will be written on use)
289
+ return ChunkCache(mem.name)
290
+
291
+
292
+ def close(self) -> None:
293
+ """
294
+ Close the shared memory block.
295
+ """
296
+ self.mem.close()
297
+
298
+ def contains(self, id: int) -> bool:
299
+ """
300
+ Check if the cache contains a chunk with the given id.
301
+
302
+ :param id: The chunk id to check
303
+ :return: True if the chunk is in the cache, False otherwise
304
+ """
305
+ return indexof(self.chunk_id, id) >= 0
306
+
307
+ def push(self, data: np.ndarray, id: int) -> None:
308
+ """
309
+ Push a chunk of data into the cache with the given id.
310
+
311
+ :param data: 1D numpy array of length self.chunksize and dtype self.datatype
312
+ :param id: The chunk id to associate with this data
313
+ :note: If no free slots are available, evict the least recently used slot.
314
+ """
315
+ free_slots = np.where(self.chunk_id == -1)[0]
316
+ if len(free_slots) > 0:
317
+ slot = free_slots[0]
318
+ else:
319
+ # Evict the least recently used slot (last in lru)
320
+ slot = self.lru[-1]
321
+ self.write(data, slot)
322
+ self.chunk_id[slot] = id
323
+
324
+ def pull(self, id: int) -> np.ndarray:
325
+ """
326
+ Retrieve the data for the given chunk id and update the LRU table.
327
+
328
+ :param id: The chunk id to retrieve
329
+ :return: The data array for the chunk
330
+ :raises KeyError: If the chunk id is not found in the cache
331
+ """
332
+ slot = indexof(self.chunk_id, id)
333
+ if slot < 0:
334
+ raise KeyError(f"Chunk id {id} not found in cache")
335
+ return self.read(slot)
336
+
337
+
338
+ def timestring_formatter(pattern, time):
339
+ """
340
+ Format a time string
341
+
342
+ :param pattern: f-string style formatting pattern
343
+ :param time: Numpy convertible time specification
344
+ :returns: A formatted time string
345
+ """
346
+ posix_time = np.datetime64(time, 's').astype(int)
347
+
348
+ class PosixFormatter(string.Formatter):
349
+ def get_value(self, key: int | str, args: typing.Sequence[typing.Any], kwargs: typing.Mapping[str, typing.Any]) -> typing.Any:
350
+ return numexpr.evaluate(
351
+ key, local_dict=kwargs, global_dict=dict())
352
+
353
+ def format_field(self, value: typing.Any, format_spec: str) -> typing.Any:
354
+ dt = np.int64(value).astype('datetime64[s]').astype(object)
355
+ return dt.strftime(format_spec)
356
+
357
+ fmt = PosixFormatter()
358
+ return fmt.format(pattern, time=posix_time)
359
+
360
+
361
+ @njit
362
+ def update_lru(lru: np.ndarray, slot: int) -> None:
363
+ """
364
+ Update the LRU (Least Recently Used) list by moving the specified slot to the front.
365
+
366
+ :param lru: The LRU array
367
+ :param slot: The slot index to move to the front
368
+ """
369
+ v = slot
370
+ for i in range(len(lru)):
371
+ u = lru[i]
372
+ lru[i] = v
373
+ if u == slot:
374
+ break
375
+ v = u
376
+
377
+
378
+ @njit
379
+ def indexof(array: np.ndarray, value: int) -> int:
380
+ """
381
+ Find the index of the first occurrence of a value in an array.
382
+
383
+ :param array: The input array
384
+ :param value: The value to find
385
+ :return: The index of the first occurrence, or -1 if not found
386
+ """
387
+ for i in range(len(array)):
388
+ if array[i] == value:
389
+ return i
390
+ return -1
391
+
392
+
393
+ @njit(inline="always", fastmath=True)
394
+ def get_chunk_id(i, j, l, size_x, size_y, num_x, num_y):
395
+ """
396
+ Calculate the chunk ID based on the indices and sizes.
397
+
398
+ We assume that the array is chunked in the x and y dimensions,
399
+ but not in the z dimension. The t dimension is assumed to be
400
+ chunked with size_t = 1.
401
+
402
+ For instance, if the chunk size is x:10, y:5 and the number of chunks
403
+ in the x and y dimensions is 6 and 7 respectively, then the chunk
404
+ ID for the coordinates (31, 14, 0) would be calculated as follows:
405
+
406
+ chunk id in x direction: 31 // 10 = 3
407
+ chunk id in y direction: 14 // 5 = 2
408
+ chunk id in t direction: 0 // 1 = 0
409
+ chunk id = 3 + 6*2 + 6*7*0 = 15
410
+
411
+ This means that the chunk ID is a unique identifier for the chunk
412
+ containing the coordinates (31, 14, 0) in the array.
413
+
414
+ :param i: Integer x coordinate (global index)
415
+ :param j: Integer y coordinate (global index)
416
+ :param l: Integer t coordinate (global index)
417
+ :param size_x: Chunk size in x dimension
418
+ :param size_y: Chunk size in y dimension
419
+ :param num_x: Number of chunks in x dimension
420
+ :param num_y: Number of chunks in y dimension
421
+ :return: Chunk ID
422
+ """
423
+
424
+ # The global index is divided by the chunk size to get the chunk ID
425
+ # for each dimension. The chunk ID is then combined into a single
426
+ # integer value.
427
+ return (i // size_x) + num_x * ((j // size_y) + num_y * l)
428
+
429
+
430
+ @njit(inline="always", fastmath=True)
431
+ def interp_xyzt(x, y, z, t, v, ncx, ncy, ncz, nct, cache, ids):
432
+ """
433
+ Interpolate the data in the x, y, z, and t dimensions.
434
+
435
+ :param x: x coordinate (global index)
436
+ :param y: y coordinate (global index)
437
+ :param z: z coordinate (global index)
438
+ :param t: t coordinate (global index)
439
+ :param v: v coordinate (global index)
440
+ :param ncx: Number of chunks in x dimension
441
+ :param ncy: Number of chunks in y dimension
442
+ :param ncz: Number of chunks in z dimension
443
+ :param nct: Number of chunks in t dimension
444
+ :param cache: Chunk cache array
445
+ :param ids: Array of chunk ids for each slot in the cache
446
+ :return: Interpolated value
447
+ """
448
+ _, _, st, sz, sy, sx = cache.shape
449
+
450
+ max_x = ncx * sx
451
+ max_y = ncy * sy
452
+ max_z = ncz * sz
453
+ max_t = nct * st
454
+
455
+ i0 = max(0, min(max_x - 2, np.int32(x)))
456
+ j0 = max(0, min(max_y - 2, np.int32(y)))
457
+ k0 = max(0, min(max_z - 2, np.int32(z)))
458
+ l0 = max(0, min(max_t - 2, np.int32(t)))
459
+
460
+ i1 = i0 + 1
461
+ j1 = j0 + 1
462
+ k1 = k0 + 1
463
+ l1 = l0 + 1
464
+
465
+ # Chunk ID (chid) for the surrounding points
466
+ chid0000 = get_chunk_id(i0, j0, l0, sx, sy, ncx, ncy)
467
+ chid0001 = get_chunk_id(i1, j0, l0, sx, sy, ncx, ncy)
468
+ chid0010 = get_chunk_id(i0, j1, l0, sx, sy, ncx, ncy)
469
+ chid0011 = get_chunk_id(i1, j1, l0, sx, sy, ncx, ncy)
470
+ chid1000 = get_chunk_id(i0, j0, l1, sx, sy, ncx, ncy)
471
+ chid1001 = get_chunk_id(i1, j0, l1, sx, sy, ncx, ncy)
472
+ chid1010 = get_chunk_id(i0, j1, l1, sx, sy, ncx, ncy)
473
+ chid1011 = get_chunk_id(i1, j1, l1, sx, sy, ncx, ncy)
474
+
475
+ # Memory offset into cache for each chunk
476
+ slot0000 = indexof(ids, chid0000)
477
+ slot0001 = indexof(ids, chid0001)
478
+ slot0010 = indexof(ids, chid0010)
479
+ slot0011 = indexof(ids, chid0011)
480
+ slot1000 = indexof(ids, chid1000)
481
+ slot1001 = indexof(ids, chid1001)
482
+ slot1010 = indexof(ids, chid1010)
483
+ slot1011 = indexof(ids, chid1011)
484
+
485
+ # Return nan if any of the slots are not found
486
+ if (slot0000 < 0 or slot0001 < 0 or slot0010 < 0 or slot0011 < 0 or
487
+ slot1000 < 0 or slot1001 < 0 or slot1010 < 0 or slot1011 < 0):
488
+ return np.nan
489
+
490
+ # Within-chunk indices
491
+ ii0 = i0 % sx
492
+ ii1 = i1 % sx
493
+ jj0 = j0 % sy
494
+ jj1 = j1 % sy
495
+ kk0 = k0 % sz
496
+ kk1 = k1 % sz
497
+ ll0 = l0 % st
498
+ ll1 = l1 % st
499
+
500
+ # Memory extraction
501
+ u0000 = cache[slot0000, v, ll0, kk0, jj0, ii0]
502
+ u0001 = cache[slot0001, v, ll0, kk0, jj0, ii1]
503
+ u0010 = cache[slot0010, v, ll0, kk1, jj0, ii0]
504
+ u0011 = cache[slot0011, v, ll0, kk1, jj0, ii1]
505
+ u0100 = cache[slot0000, v, ll1, kk0, jj1, ii0]
506
+ u0101 = cache[slot0001, v, ll1, kk0, jj1, ii1]
507
+ u0110 = cache[slot0010, v, ll1, kk1, jj1, ii0]
508
+ u0111 = cache[slot0011, v, ll1, kk1, jj1, ii1]
509
+ u1000 = cache[slot1000, v, ll0, kk0, jj0, ii0]
510
+ u1001 = cache[slot1001, v, ll0, kk0, jj0, ii1]
511
+ u1010 = cache[slot1010, v, ll0, kk1, jj0, ii0]
512
+ u1011 = cache[slot1011, v, ll0, kk1, jj0, ii1]
513
+ u1100 = cache[slot1000, v, ll1, kk0, jj1, ii0]
514
+ u1101 = cache[slot1001, v, ll1, kk0, jj1, ii1]
515
+ u1110 = cache[slot1010, v, ll1, kk1, jj1, ii0]
516
+ u1111 = cache[slot1011, v, ll1, kk1, jj1, ii1]
517
+
518
+ # Interpolation weights
519
+ # The weights are calculated as the distance from the lower bound
520
+ p = x - i0
521
+ q = y - j0
522
+ r = z - k0
523
+ s = t - l0
524
+
525
+ w0000 = (1 - s) * (1 - r) * (1 - q) * (1 - p)
526
+ w0001 = (1 - s) * (1 - r) * (1 - q) * p
527
+ w0010 = (1 - s) * (1 - r) * q * (1 - p)
528
+ w0011 = (1 - s) * (1 - r) * q * p
529
+ w0100 = (1 - s) * r * (1 - q) * (1 - p)
530
+ w0101 = (1 - s) * r * (1 - q) * p
531
+ w0110 = (1 - s) * r * q * (1 - p)
532
+ w0111 = (1 - s) * r * q * p
533
+ w1000 = s * (1 - r) * (1 - q) * (1 - p)
534
+ w1001 = s * (1 - r) * (1 - q) * p
535
+ w1010 = s * (1 - r) * q * (1 - p)
536
+ w1011 = s * (1 - r) * q * p
537
+ w1100 = s * r * (1 - q) * (1 - p)
538
+ w1101 = s * r * (1 - q) * p
539
+ w1110 = s * r * q * (1 - p)
540
+ w1111 = s * r * q * p
541
+
542
+ # Interpolation
543
+ result = (w0000 * u0000 + w0001 * u0001 + w0010 * u0010 + w0011 * u0011 +
544
+ w0100 * u0100 + w0101 * u0101 + w0110 * u0110 + w0111 * u0111 +
545
+ w1000 * u1000 + w1001 * u1001 + w1010 * u1010 + w1011 * u1011 +
546
+ w1100 * u1100 + w1101 * u1101 + w1110 * u1110 + w1111 * u1111)
547
+
548
+ return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ladim
3
- Version: 2.1.5
3
+ Version: 2.1.6
4
4
  Summary: Lagrangian Advection and Diffusion Model
5
5
  Home-page: https://github.com/pnsaevik/ladim
6
6
  Author: Bjørn Ådlandsvik
@@ -18,6 +18,8 @@ Requires-Python: >=3.7
18
18
  Description-Content-Type: text/markdown
19
19
  License-File: LICENSE
20
20
  Requires-Dist: netCDF4
21
+ Requires-Dist: numba
22
+ Requires-Dist: numexpr
21
23
  Requires-Dist: numpy
22
24
  Requires-Dist: pandas
23
25
  Requires-Dist: pyarrow
@@ -1,4 +1,6 @@
1
1
  netCDF4
2
+ numba
3
+ numexpr
2
4
  numpy
3
5
  pandas
4
6
  pyarrow
@@ -22,6 +22,8 @@ classifiers =
22
22
  packages = find:
23
23
  install_requires =
24
24
  netCDF4
25
+ numba
26
+ numexpr
25
27
  numpy
26
28
  pandas
27
29
  pyarrow
@@ -0,0 +1,167 @@
1
+ from ladim.gridforce import ROMS
2
+ import numpy as np
3
+ from ladim import forcing
4
+ import pytest
5
+
6
+
7
+ class Test_z2s:
8
+ def test_returns_interpolated_s_level(self):
9
+ zrho = np.array([-5, -4, -3, -2]).reshape((4, 1, 1))
10
+ k, a = ROMS.z2s(zrho, np.zeros(5), np.zeros(5), np.array([6, 5, 3.5, 2, 0]))
11
+ assert k.tolist() == [1, 1, 2, 3, 3]
12
+ assert a.tolist() == [1.0, 1.0, 0.5, 0.0, 0.0]
13
+
14
+
15
+ class Test_timestring_formatter:
16
+ def test_can_format_simple_date(self):
17
+ result = forcing.timestring_formatter(
18
+ pattern="My time: {time:%Y-%m-%d %H:%M:%S}",
19
+ time="2012-12-31T23:58:59",
20
+ )
21
+ assert result == "My time: 2012-12-31 23:58:59"
22
+
23
+ def test_can_format_shifted_dates(self):
24
+ result = forcing.timestring_formatter(
25
+ pattern="My time: {time - 3600:%Y-%m-%d %H:%M:%S}",
26
+ time="2012-12-31T23:58:59",
27
+ )
28
+ assert result == "My time: 2012-12-31 22:58:59"
29
+
30
+
31
+ class Test_ChunkCache:
32
+ def test_creates_correct_header(self):
33
+ with forcing.ChunkCache.create(slots=3, chunksize=10) as c:
34
+ assert c.mem.size > 0
35
+ assert c.num_chunks == 3
36
+ assert c.chunksize == 10
37
+ assert c.datatype == b'float32'
38
+ assert c.itemsize == 4
39
+ assert len(c.chunk_id) == 3
40
+ assert c.data.shape == (3, 10)
41
+ assert str(c.data.dtype) == 'float32'
42
+
43
+ def test_can_push_pull_data(self):
44
+ with forcing.ChunkCache.create(slots=3, chunksize=10) as c:
45
+ c.push(data=np.arange(10), id=12345)
46
+ c.push(data=np.arange(10, 20), id=12346)
47
+ assert c.pull(12346).tolist() == [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
48
+ assert c.pull(12345).tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
49
+
50
+ def test_can_evict_older_chunks(self):
51
+ with forcing.ChunkCache.create(slots=3, chunksize=10) as c:
52
+ # Push single chunk, the data is there
53
+ c.push(data=np.arange(10), id=12345)
54
+ assert c.contains(12345)
55
+
56
+ # Push three more chunks, the first one is evicted in the last step
57
+ c.push(data=np.arange(10)+1, id=12346)
58
+ assert c.contains(12345)
59
+ c.push(data=np.arange(10)+2, id=12347)
60
+ assert c.contains(12345)
61
+ c.push(data=np.arange(10)+3, id=12348)
62
+ assert not c.contains(12345)
63
+
64
+ def test_no_accidental_reassigning(self):
65
+ with forcing.ChunkCache.create(slots=3, chunksize=10) as c:
66
+ with pytest.raises(AttributeError):
67
+ c.chunk_id = [0, 1, 2]
68
+
69
+
70
+ class Test_update_lru:
71
+ def test_update_lru_moves_slot_to_front(self):
72
+ # Test moving middle element
73
+ lru = np.array([0, 1, 2, 3, 4], dtype=np.int16)
74
+ forcing.update_lru(lru, 2)
75
+ assert lru.tolist() == [2, 0, 1, 3, 4]
76
+
77
+ # Test moving the first element (should be no change)
78
+ lru = np.array([0, 1, 2, 3, 4], dtype=np.int16)
79
+ forcing.update_lru(lru, 0)
80
+ assert lru.tolist() == [0, 1, 2, 3, 4]
81
+
82
+ # Test moving the last element
83
+ lru = np.array([0, 1, 2, 3, 4], dtype=np.int16)
84
+ forcing.update_lru(lru, 4)
85
+ assert lru.tolist() == [4, 0, 1, 2, 3]
86
+
87
+ # Test moving an element that is not in the array (wrong behavior, but no error or infinite loop)
88
+ lru = np.array([0, 1, 2, 3, 4], dtype=np.int16)
89
+ forcing.update_lru(lru, 5)
90
+ assert lru.tolist() == [5, 0, 1, 2, 3]
91
+
92
+
93
+ class Test_get_chunk_id:
94
+ def test_get_chunk_id(self):
95
+ # Coordinates are within first chunk
96
+ chunk_id = forcing.get_chunk_id(
97
+ i=1, j=2, l=0, size_x=10, size_y=5, num_x=2, num_y=3)
98
+ assert chunk_id == 0
99
+
100
+ # Coordinates are within chunk (0, 0, 1)
101
+ chunk_id = forcing.get_chunk_id(
102
+ i=11, j=2, l=0, size_x=10, size_y=5, num_x=2, num_y=3)
103
+ assert chunk_id == 1
104
+
105
+ # Coordinates are within chunk (0, 1, 0)
106
+ chunk_id = forcing.get_chunk_id(
107
+ i=1, j=6, l=0, size_x=10, size_y=5, num_x=2, num_y=3)
108
+ assert chunk_id == 2
109
+
110
+ # Coordinates are within chunk (1, 0, 0)
111
+ chunk_id = forcing.get_chunk_id(
112
+ i=1, j=4, l=1, size_x=10, size_y=5, num_x=2, num_y=3)
113
+ assert chunk_id == 6
114
+
115
+ # Coordinates are within chunk (1, 1, 1)
116
+ chunk_id = forcing.get_chunk_id(
117
+ i=11, j=6, l=1, size_x=10, size_y=5, num_x=2, num_y=3)
118
+ assert chunk_id == 9
119
+
120
+
121
+ class Test_interp_xyzt:
122
+ def test_interp_xyzt(self):
123
+ # Chunk size
124
+ sx = 7
125
+ sy = 6
126
+ sz = 5
127
+ st = 4
128
+ sv = 3
129
+
130
+ # Number of chunks
131
+ ncx = 6
132
+ ncy = 5
133
+ ncz = 1
134
+ nct = 3
135
+ ncv = 2
136
+
137
+ # Full data array
138
+ nx = ncx * sx
139
+ ny = ncy * sy
140
+ nz = ncz * sz
141
+ nt = nct * st
142
+ nv = ncv * sv
143
+ full_data = np.arange(nx * ny * nz * nt * nv, dtype='f4').reshape((nv, nt, nz, ny, nx))
144
+
145
+ # Create a chunk cache
146
+ import itertools
147
+ data = np.empty((12, sv, st, sz, sy, sx), dtype='f4')
148
+ ids = np.empty((12, ), dtype=np.int16)
149
+ for idx, (k, j, i) in enumerate(itertools.product(range(2), range(2), range(3))):
150
+ data[idx] = full_data[:sv, st*k:st*(k+1), :sz, sy*j:sy*(j+1), sx*i:sx*(i+1)]
151
+ ids[idx] = i + j * ncx + k * ncy * ncx * ncz
152
+
153
+ # Interpolate, first chunk
154
+ assert 0 == forcing.interp_xyzt(0, 0, 0, 0, 0, ncx, ncy, ncz, nct, data, ids)
155
+ assert 1 == forcing.interp_xyzt(1, 0, 0, 0, 0, ncx, ncy, ncz, nct, data, ids)
156
+ assert 0.5 == forcing.interp_xyzt(0.5, 0, 0, 0, 0, ncx, ncy, ncz, nct, data, ids)
157
+ assert nx == forcing.interp_xyzt(0, 1, 0, 0, 0, ncx, ncy, ncz, nct, data, ids)
158
+ assert nx+1 == forcing.interp_xyzt(1, 1, 0, 0, 0, ncx, ncy, ncz, nct, data, ids)
159
+
160
+ # Interpolate, second chunk
161
+ assert sx == forcing.interp_xyzt(sx, 0, 0, 0, 0, ncx, ncy, ncz, nct, data, ids)
162
+ assert sx + 0.5 == forcing.interp_xyzt(sx + 0.5, 0, 0, 0, 0, ncx, ncy, ncz, nct, data, ids)
163
+
164
+ # Interpolate, chunk not in cache
165
+ r = forcing.interp_xyzt(sx, sy, sz-1, st, sv-1, ncx, ncy, ncz, nct, data, ids)
166
+ assert np.isnan(r)
167
+
@@ -1,100 +0,0 @@
1
- import typing
2
- if typing.TYPE_CHECKING:
3
- from ladim.model import Model
4
-
5
-
6
- class Forcing:
7
- @staticmethod
8
- def from_roms(**conf):
9
- return RomsForcing(**conf)
10
-
11
- def velocity(self, X, Y, Z, tstep=0.0):
12
- raise NotImplementedError
13
-
14
- def update(self, model: "Model"):
15
- raise NotImplementedError
16
-
17
-
18
- class RomsForcing(Forcing):
19
- def __init__(self, file, variables=None, **conf):
20
- """
21
- Forcing module which uses output data from the ROMS ocean model
22
-
23
- :param file: Glob pattern for the input files
24
- :param variables: A mapping of variable names to interpolation
25
- specifications. Each interpolaction specification consists of 0-4
26
- of the letters "xyzt". Coordinates that are listed in the string are
27
- interpolated linearly, while the remaining ones use nearest-neighbor
28
- interpolation. Some default configurations are defined:
29
-
30
- .. code-block:: json
31
- {
32
- "temp": "xyzt",
33
- "salt": "xyzt",
34
- "u": "xt",
35
- "v": "yt",
36
- "w": "zt",
37
- }
38
-
39
-
40
- :param conf: Legacy config dict
41
- """
42
- # Apply default interpolation configs
43
- variables = variables or dict()
44
- default_vars = dict(u="xt", v="yt", w="zt", temp="xyzt", salt="xyzt")
45
- self.variables = {**default_vars, **variables}
46
-
47
- grid_ref = GridReference()
48
- legacy_conf = dict(
49
- gridforce=dict(input_file=file, **conf),
50
- ibm_forcing=conf.get('ibm_forcing', []),
51
- start_time=conf.get('start_time', None),
52
- stop_time=conf.get('stop_time', None),
53
- dt=conf.get('dt', None),
54
- )
55
- if conf.get('subgrid', None) is not None:
56
- legacy_conf['gridforce']['subgrid'] = conf['subgrid']
57
-
58
- from .utilities import load_class
59
- LegacyForcing = load_class(conf.get('legacy_module', 'ladim.gridforce.ROMS.Forcing'))
60
-
61
- # Allow gridforce module in current directory
62
- import sys
63
- import os
64
- sys.path.insert(0, os.getcwd())
65
- # Import correct gridforce_module
66
- self.forcing = LegacyForcing(legacy_conf, grid_ref)
67
- # self.steps = self.forcing.steps
68
- # self.U = self.forcing.U
69
- # self.V = self.forcing.V
70
-
71
- def update(self, model: "Model"):
72
- elapsed = model.solver.time - model.solver.start
73
- t = elapsed // model.solver.step
74
-
75
- # noinspection PyProtectedMember
76
- self.forcing._grid.modules = model
77
- self.forcing.update(t)
78
-
79
- # Update state variables by sampling the field
80
- x, y, z = model.state['X'], model.state['Y'], model.state['Z']
81
- for v in self.variables:
82
- if v in model.state:
83
- model.state[v] = self.field(x, y, z, v)
84
-
85
- def velocity(self, X, Y, Z, tstep=0.0):
86
- return self.forcing.velocity(X, Y, Z, tstep=tstep)
87
-
88
- def field(self, X, Y, Z, name):
89
- return self.forcing.field(X, Y, Z, name)
90
-
91
- def close(self):
92
- return self.forcing.close()
93
-
94
-
95
- class GridReference:
96
- def __init__(self):
97
- self.modules = None
98
-
99
- def __getattr__(self, item):
100
- return getattr(self.modules.grid.grid, item)
@@ -1,10 +0,0 @@
1
- from ladim.gridforce import ROMS
2
- import numpy as np
3
-
4
-
5
- class Test_z2s:
6
- def test_returns_interpolated_s_level(self):
7
- zrho = np.array([-5, -4, -3, -2]).reshape((4, 1, 1))
8
- k, a = ROMS.z2s(zrho, np.zeros(5), np.zeros(5), np.array([6, 5, 3.5, 2, 0]))
9
- assert k.tolist() == [1, 1, 2, 3, 3]
10
- assert a.tolist() == [1.0, 1.0, 0.5, 0.0, 0.0]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes