segyio 1.9.13__cp312-cp312-macosx_10_13_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of segyio might be problematic. Click here for more details.
- segyio/__init__.py +99 -0
- segyio/_segyio.cpython-312-darwin.so +0 -0
- segyio/binfield.py +89 -0
- segyio/create.py +257 -0
- segyio/depth.py +180 -0
- segyio/field.py +546 -0
- segyio/gather.py +444 -0
- segyio/line.py +498 -0
- segyio/open.py +192 -0
- segyio/segy.py +1010 -0
- segyio/segysampleformat.py +19 -0
- segyio/su/__init__.py +2 -0
- segyio/su/file.py +118 -0
- segyio/su/words.py +284 -0
- segyio/tools.py +731 -0
- segyio/trace.py +967 -0
- segyio/tracefield.py +195 -0
- segyio/tracesortingformat.py +6 -0
- segyio/utils.py +28 -0
- segyio-1.9.13.dist-info/METADATA +79 -0
- segyio-1.9.13.dist-info/RECORD +23 -0
- segyio-1.9.13.dist-info/WHEEL +5 -0
- segyio-1.9.13.dist-info/top_level.txt +1 -0
segyio/gather.py
ADDED
|
@@ -0,0 +1,444 @@
|
|
|
1
|
+
import collections
|
|
2
|
+
try:
|
|
3
|
+
from collections.abc import Mapping
|
|
4
|
+
except ImportError:
|
|
5
|
+
from collections import Mapping # noqa
|
|
6
|
+
|
|
7
|
+
import itertools
|
|
8
|
+
import numpy as np
|
|
9
|
+
|
|
10
|
+
try: from future_builtins import zip
|
|
11
|
+
except ImportError: pass
|
|
12
|
+
|
|
13
|
+
import segyio.tools as tools
|
|
14
|
+
|
|
15
|
+
from .line import sanitize_slice
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Gather(object):
|
|
19
|
+
"""
|
|
20
|
+
A gather is in this context the intersection of lines in a cube, i.e. all
|
|
21
|
+
the offsets at some inline/crossline intersection. The primary data type is
|
|
22
|
+
numpy.ndarray, with dimensions depending on the range of offsets specified.
|
|
23
|
+
|
|
24
|
+
Implements a dict_like lookup with the line and offset numbers (labels),
|
|
25
|
+
not 0-based indices.
|
|
26
|
+
|
|
27
|
+
Notes
|
|
28
|
+
-----
|
|
29
|
+
.. versionadded:: 1.1
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(self, trace, iline, xline, offsets):
|
|
33
|
+
# cache constructed modes for performance
|
|
34
|
+
self.trace = trace
|
|
35
|
+
self.iline = iline
|
|
36
|
+
self.xline = xline
|
|
37
|
+
self.offsets = offsets
|
|
38
|
+
|
|
39
|
+
def __getitem__(self, index):
|
|
40
|
+
"""gather[i, x, o], gather[:,:,:]
|
|
41
|
+
|
|
42
|
+
Get the gather or range of gathers, defined as offsets intersection
|
|
43
|
+
between an in- and a crossline. Also works on post-stack files (with
|
|
44
|
+
only 1 offset), although it is less useful in those cases.
|
|
45
|
+
|
|
46
|
+
If offsets are omitted, the default is all offsets.
|
|
47
|
+
|
|
48
|
+
A group of offsets is always returned as an offset-by-samples
|
|
49
|
+
numpy.ndarray. If either inline, crossline, or both, are slices, a
|
|
50
|
+
generator of such ndarrays are returned.
|
|
51
|
+
|
|
52
|
+
If the slice of offsets misses all offsets, a special, empty ndarray is
|
|
53
|
+
returned.
|
|
54
|
+
|
|
55
|
+
Parameters
|
|
56
|
+
----------
|
|
57
|
+
i : int or slice
|
|
58
|
+
inline
|
|
59
|
+
x : int or slice
|
|
60
|
+
crossline
|
|
61
|
+
o : int or slice
|
|
62
|
+
offsets (default is :)
|
|
63
|
+
|
|
64
|
+
Returns
|
|
65
|
+
-------
|
|
66
|
+
gather : numpy.ndarray or generator of numpy.ndarray
|
|
67
|
+
|
|
68
|
+
Notes
|
|
69
|
+
-----
|
|
70
|
+
.. versionadded:: 1.1
|
|
71
|
+
|
|
72
|
+
Examples
|
|
73
|
+
--------
|
|
74
|
+
Read one offset at an intersection:
|
|
75
|
+
|
|
76
|
+
>>> gather[200, 241, 25] # returns same shape as trace
|
|
77
|
+
|
|
78
|
+
Read all offsets at an intersection:
|
|
79
|
+
|
|
80
|
+
>>> gather[200, 241, :] # returns offsets x samples ndarray
|
|
81
|
+
>>> # If no offset is specified, this is implicitly (:)
|
|
82
|
+
>>> gather[200, 241, :] == gather[200, 241]
|
|
83
|
+
|
|
84
|
+
All offsets for a set of ilines, intersecting one crossline:
|
|
85
|
+
|
|
86
|
+
>>> gather[200:300, 241, :] == gather[200:300, 241]
|
|
87
|
+
|
|
88
|
+
Some offsets for a set of ilines, interescting one crossline:
|
|
89
|
+
|
|
90
|
+
>>> gather[200:300, 241, 10:25:5]
|
|
91
|
+
|
|
92
|
+
Some offsets for a set of ilines and xlines. This effectively yields a
|
|
93
|
+
subcube:
|
|
94
|
+
|
|
95
|
+
>>> f.gather[200:300, 241:248, 1:10]
|
|
96
|
+
"""
|
|
97
|
+
if len(index) < 3:
|
|
98
|
+
index = (index[0], index[1], None)
|
|
99
|
+
|
|
100
|
+
il, xl, off = index
|
|
101
|
+
|
|
102
|
+
if off is None and len(self.offsets) == 1:
|
|
103
|
+
off = self.offsets[0]
|
|
104
|
+
|
|
105
|
+
# if offset isn't specified, default to all, [:]
|
|
106
|
+
off = off or slice(None)
|
|
107
|
+
|
|
108
|
+
def isslice(x): return isinstance(x, slice)
|
|
109
|
+
|
|
110
|
+
# gather[int,int,int]
|
|
111
|
+
if not any(map(isslice, [il, xl, off])):
|
|
112
|
+
o = self.iline.offsets[off]
|
|
113
|
+
i = o + self.iline.heads[il] + self.xline.heads[xl]
|
|
114
|
+
return self.trace[i]
|
|
115
|
+
|
|
116
|
+
if isslice(off):
|
|
117
|
+
offs = sanitize_slice(off, self.offsets)
|
|
118
|
+
else:
|
|
119
|
+
offs = slice(off, off + 1, 1)
|
|
120
|
+
|
|
121
|
+
xs = list(filter(self.offsets.__contains__,
|
|
122
|
+
range(*offs.indices(self.offsets[-1]+1))))
|
|
123
|
+
|
|
124
|
+
empty = np.empty(0, dtype = self.trace.dtype)
|
|
125
|
+
# gather[int,int,:]
|
|
126
|
+
if not any(map(isslice, [il, xl])):
|
|
127
|
+
if len(xs) == 0: return empty
|
|
128
|
+
i = self.iline.heads[il] + self.xline.heads[xl]
|
|
129
|
+
return tools.collect(self.trace[i + self.iline.offsets[x]] for x in xs)
|
|
130
|
+
|
|
131
|
+
# gather[:,:,:], gather[int,:,:], gather[:,int,:]
|
|
132
|
+
# gather[:,:,int] etc
|
|
133
|
+
def gen():
|
|
134
|
+
# precomputed xline number -> xline offset into the iline
|
|
135
|
+
xlinds = { xlno: i for i, xlno in enumerate(self.xline.keys()) }
|
|
136
|
+
|
|
137
|
+
# ranges over gathers are done on a by-line basis so lines can be
|
|
138
|
+
# buffered, and traces can be read from the iline. This is the
|
|
139
|
+
# least efficient when there are very few traces read per inline,
|
|
140
|
+
# but huge savings with larger subcubes
|
|
141
|
+
last_il = self.iline.keys()[-1] + 1
|
|
142
|
+
last_xl = self.xline.keys()[-1] + 1
|
|
143
|
+
|
|
144
|
+
il_slice = il if isslice(il) else slice(il, il+1)
|
|
145
|
+
xl_slice = xl if isslice(xl) else slice(xl, xl+1)
|
|
146
|
+
|
|
147
|
+
# the slice could still be defaulted (:), in which case this starts
|
|
148
|
+
# at zero. the lookups are guarded by try-excepts so it won't fail,
|
|
149
|
+
# but it's unnecessary to chck all keys up until the first xline
|
|
150
|
+
# because that will never be a hit anyway
|
|
151
|
+
if il_slice.start is None:
|
|
152
|
+
start = self.iline.keys()[0]
|
|
153
|
+
il_slice = slice(start, il_slice.stop, il_slice.step)
|
|
154
|
+
|
|
155
|
+
if xl_slice.start is None:
|
|
156
|
+
start = self.xline.keys()[0]
|
|
157
|
+
xl_slice = slice(start, xl_slice.stop, xl_slice.step)
|
|
158
|
+
|
|
159
|
+
il_range = range(*il_slice.indices(last_il))
|
|
160
|
+
xl_range = range(*xl_slice.indices(last_xl))
|
|
161
|
+
|
|
162
|
+
# the try-except-else is essentially a filter on in/xl keys, but
|
|
163
|
+
# delegates the work (and decision) to the iline and xline modes
|
|
164
|
+
if not isslice(off):
|
|
165
|
+
for iline in self.iline[il_slice, off]:
|
|
166
|
+
for xlno in xl_range:
|
|
167
|
+
try:
|
|
168
|
+
xind = xlinds[xlno]
|
|
169
|
+
except KeyError:
|
|
170
|
+
pass
|
|
171
|
+
else:
|
|
172
|
+
yield iline[xind]
|
|
173
|
+
|
|
174
|
+
return
|
|
175
|
+
|
|
176
|
+
if len(xs) == 0:
|
|
177
|
+
for _, _ in itertools.product(il_range, xl_range): yield empty
|
|
178
|
+
return
|
|
179
|
+
|
|
180
|
+
for ilno in il_range:
|
|
181
|
+
iline = tools.collect(self.iline[ilno, off])
|
|
182
|
+
for x in xl_range:
|
|
183
|
+
try:
|
|
184
|
+
xind = xlinds[x]
|
|
185
|
+
except KeyError:
|
|
186
|
+
pass
|
|
187
|
+
else:
|
|
188
|
+
yield iline[:, xind]
|
|
189
|
+
|
|
190
|
+
return gen()
|
|
191
|
+
|
|
192
|
+
class Group(object):
|
|
193
|
+
"""
|
|
194
|
+
The inner representation of the Groups abstraction provided by Group.
|
|
195
|
+
|
|
196
|
+
A collection of trace indices that have identical `key`.
|
|
197
|
+
|
|
198
|
+
Notes
|
|
199
|
+
-----
|
|
200
|
+
.. versionadded:: 1.9
|
|
201
|
+
"""
|
|
202
|
+
def __init__(self, key, parent, index):
|
|
203
|
+
self.parent = parent
|
|
204
|
+
self.index = index
|
|
205
|
+
self.key = key
|
|
206
|
+
|
|
207
|
+
@property
|
|
208
|
+
def header(self):
|
|
209
|
+
"""
|
|
210
|
+
A generator of the the read-only headers in this group
|
|
211
|
+
|
|
212
|
+
Returns
|
|
213
|
+
-------
|
|
214
|
+
headers : iterator of Header
|
|
215
|
+
|
|
216
|
+
Notes
|
|
217
|
+
-----
|
|
218
|
+
The generator respects the order of the index - to iterate over headers
|
|
219
|
+
in a different order, the index attribute can be re-organised.
|
|
220
|
+
|
|
221
|
+
.. versionadded:: 1.9
|
|
222
|
+
"""
|
|
223
|
+
source = self.parent.header
|
|
224
|
+
for i in self.index:
|
|
225
|
+
yield source[i]
|
|
226
|
+
|
|
227
|
+
@property
|
|
228
|
+
def trace(self):
|
|
229
|
+
"""
|
|
230
|
+
A generator of the the read-only traces in this group
|
|
231
|
+
|
|
232
|
+
Returns
|
|
233
|
+
-------
|
|
234
|
+
traces : iterator of Trace
|
|
235
|
+
|
|
236
|
+
Notes
|
|
237
|
+
-----
|
|
238
|
+
The generator respects the order of the index - to iterate over headers
|
|
239
|
+
in a different order, the index attribute can be re-organised.
|
|
240
|
+
|
|
241
|
+
.. versionadded:: 1.9
|
|
242
|
+
"""
|
|
243
|
+
source = self.parent.trace
|
|
244
|
+
for i in self.index:
|
|
245
|
+
yield source[i]
|
|
246
|
+
|
|
247
|
+
def sort(self, fields):
|
|
248
|
+
"""
|
|
249
|
+
Sort the traces in the group, obeying the `fields` order of
|
|
250
|
+
most-to-least significant word.
|
|
251
|
+
"""
|
|
252
|
+
# TODO: examples
|
|
253
|
+
|
|
254
|
+
headers = [dict(self.parent.header[i]) for i in self.index]
|
|
255
|
+
index = list(zip(headers, self.index))
|
|
256
|
+
# sorting is stable, so sort the whole set by field, applied in the
|
|
257
|
+
# reverse order:
|
|
258
|
+
for field in reversed(fields):
|
|
259
|
+
index.sort(key = lambda x: x[0][field])
|
|
260
|
+
|
|
261
|
+
# strip off all the headers
|
|
262
|
+
index = [i for _, i in index]
|
|
263
|
+
self.index = index
|
|
264
|
+
|
|
265
|
+
class Groups(Mapping):
|
|
266
|
+
"""
|
|
267
|
+
The Groups implements the dict interface, grouping all traces that match a
|
|
268
|
+
given `fingerprint`. The fingerprint is a signature derived from a set of
|
|
269
|
+
trace header words, called a `key`.
|
|
270
|
+
|
|
271
|
+
Consider a file with five traces, and some selected header words::
|
|
272
|
+
|
|
273
|
+
0: {offset: 1, fldr: 1}
|
|
274
|
+
1: {offset: 1, fldr: 2}
|
|
275
|
+
2: {offset: 1, fldr: 1}
|
|
276
|
+
3: {offset: 2, fldr: 1}
|
|
277
|
+
4: {offset: 1, fldr: 2}
|
|
278
|
+
|
|
279
|
+
With key = (offset, fldr), there are 3 groups::
|
|
280
|
+
|
|
281
|
+
{offset: 1, fldr: 1 }: [0, 2]
|
|
282
|
+
{offset: 1, fldr: 2 }: [1, 4]
|
|
283
|
+
{offset: 2, fldr: 1 }: [3]
|
|
284
|
+
|
|
285
|
+
With a key = offset, there are 2 groups::
|
|
286
|
+
|
|
287
|
+
{offset: 1}: [0, 1, 2, 4]
|
|
288
|
+
{offset: 2}: [3]
|
|
289
|
+
|
|
290
|
+
The Groups class is intended to easily process files without the rigid
|
|
291
|
+
in/crossline structure of iline/xline/gather, but where there is sufficient
|
|
292
|
+
structure in the headers. This is common for some types of pre-stack data,
|
|
293
|
+
shot gather data etc.
|
|
294
|
+
|
|
295
|
+
Notes
|
|
296
|
+
-----
|
|
297
|
+
.. versionadded:: 1.9
|
|
298
|
+
"""
|
|
299
|
+
# TODO: only group in range of traces?
|
|
300
|
+
# TODO: cache header dicts?
|
|
301
|
+
def __init__(self, trace, header, key):
|
|
302
|
+
bins = collections.OrderedDict()
|
|
303
|
+
for i, h in enumerate(header[:]):
|
|
304
|
+
k = self.fingerprint(h[key])
|
|
305
|
+
if k in bins:
|
|
306
|
+
bins[k].append(i)
|
|
307
|
+
else:
|
|
308
|
+
bins[k] = [i]
|
|
309
|
+
|
|
310
|
+
self.trace = trace
|
|
311
|
+
self.header = header
|
|
312
|
+
self.key = key
|
|
313
|
+
self.bins = bins
|
|
314
|
+
|
|
315
|
+
@staticmethod
|
|
316
|
+
def normalize_keys(items):
|
|
317
|
+
"""
|
|
318
|
+
Normalize the key representation to integers, so that they're hashable,
|
|
319
|
+
even when a key is built with enumerators.
|
|
320
|
+
|
|
321
|
+
This function is intended for internal use, and provides the mapping
|
|
322
|
+
from accepted key representation to a canonical key.
|
|
323
|
+
|
|
324
|
+
Parameters
|
|
325
|
+
----------
|
|
326
|
+
items : iterator of (int_like, array_like)
|
|
327
|
+
|
|
328
|
+
Returns
|
|
329
|
+
-------
|
|
330
|
+
items : generator of (int, array_like)
|
|
331
|
+
|
|
332
|
+
Warnings
|
|
333
|
+
--------
|
|
334
|
+
This function provides no guarantees for value and type compatibility,
|
|
335
|
+
even between minor versions.
|
|
336
|
+
|
|
337
|
+
Notes
|
|
338
|
+
-----
|
|
339
|
+
.. versionadded:: 1.9
|
|
340
|
+
"""
|
|
341
|
+
return ((int(k), v) for k, v in items)
|
|
342
|
+
|
|
343
|
+
@staticmethod
|
|
344
|
+
def fingerprint(key):
|
|
345
|
+
"""
|
|
346
|
+
Compute a hashable fingerprint for a key. This function is intended for
|
|
347
|
+
internal use. Relies on normalize_keys for transforming keys to
|
|
348
|
+
canonical form. The output of this function is used for the group ->
|
|
349
|
+
index mapping.
|
|
350
|
+
|
|
351
|
+
Parameters
|
|
352
|
+
----------
|
|
353
|
+
key : int_like or dict of {int_like: int} or iterable of (int_like,int)
|
|
354
|
+
|
|
355
|
+
Returns
|
|
356
|
+
-------
|
|
357
|
+
key
|
|
358
|
+
A normalized canonical representation of key
|
|
359
|
+
|
|
360
|
+
Warnings
|
|
361
|
+
--------
|
|
362
|
+
This function provides no guarantees for value and type compatibility,
|
|
363
|
+
even between minor versions.
|
|
364
|
+
|
|
365
|
+
Notes
|
|
366
|
+
-----
|
|
367
|
+
.. versionadded:: 1.9
|
|
368
|
+
"""
|
|
369
|
+
try:
|
|
370
|
+
return int(key)
|
|
371
|
+
except TypeError:
|
|
372
|
+
pass
|
|
373
|
+
|
|
374
|
+
try:
|
|
375
|
+
items = key.items()
|
|
376
|
+
except AttributeError:
|
|
377
|
+
items = iter(key)
|
|
378
|
+
|
|
379
|
+
# map k -> tracefield -> int
|
|
380
|
+
return frozenset(Groups.normalize_keys(items))
|
|
381
|
+
|
|
382
|
+
def __len__(self):
|
|
383
|
+
"""x.__len__() <==> len(x)"""
|
|
384
|
+
return len(self.bins)
|
|
385
|
+
|
|
386
|
+
def __contains__(self, key):
|
|
387
|
+
"""x.__len__() <==> len(x)"""
|
|
388
|
+
return self.fingerprint(key) in self.bins
|
|
389
|
+
|
|
390
|
+
def __getitem__(self, key):
|
|
391
|
+
"""g[key]
|
|
392
|
+
|
|
393
|
+
Read the group associated with key.
|
|
394
|
+
|
|
395
|
+
Key can be any informal mapping between a header word (TraceField, su
|
|
396
|
+
header words, or raw integers) and a value.
|
|
397
|
+
|
|
398
|
+
Parameters
|
|
399
|
+
----------
|
|
400
|
+
key
|
|
401
|
+
|
|
402
|
+
Returns
|
|
403
|
+
-------
|
|
404
|
+
group : Group
|
|
405
|
+
|
|
406
|
+
Notes
|
|
407
|
+
-----
|
|
408
|
+
.. versionadded:: 1.9
|
|
409
|
+
|
|
410
|
+
Examples
|
|
411
|
+
--------
|
|
412
|
+
|
|
413
|
+
Group on FieldRecord, and get the group FieldRecord == 5:
|
|
414
|
+
|
|
415
|
+
>>> fr = segyio.TraceField.FieldRecord
|
|
416
|
+
>>> records = f.group(fr)
|
|
417
|
+
>>> record5 = records[5]
|
|
418
|
+
"""
|
|
419
|
+
key = self.fingerprint(key)
|
|
420
|
+
return Group(key, self, self.bins[key])
|
|
421
|
+
|
|
422
|
+
def values(self):
|
|
423
|
+
for key, index in self.bins.items():
|
|
424
|
+
yield Group(key, self, index)
|
|
425
|
+
|
|
426
|
+
def items(self):
|
|
427
|
+
for key, index in self.bins.items():
|
|
428
|
+
yield key, Group(key, self, index)
|
|
429
|
+
|
|
430
|
+
def __iter__(self):
|
|
431
|
+
return self.bins.keys()
|
|
432
|
+
|
|
433
|
+
def sort(self, fields):
|
|
434
|
+
"""
|
|
435
|
+
Reorganise the indices in all groups by fields
|
|
436
|
+
"""
|
|
437
|
+
bins = collections.OrderedDict()
|
|
438
|
+
|
|
439
|
+
for key, index in self.bins.items():
|
|
440
|
+
g = Group(key, self, index)
|
|
441
|
+
g.sort(fields)
|
|
442
|
+
bins[key] = g.index
|
|
443
|
+
|
|
444
|
+
self.bins = bins
|