segyio 1.9.13__cp313-cp313-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of segyio might be problematic. Click here for more details.
- segyio/__init__.py +99 -0
- segyio/_segyio.cp313-win_amd64.pyd +0 -0
- segyio/binfield.py +89 -0
- segyio/create.py +257 -0
- segyio/depth.py +180 -0
- segyio/field.py +546 -0
- segyio/gather.py +444 -0
- segyio/line.py +498 -0
- segyio/open.py +192 -0
- segyio/segy.py +1010 -0
- segyio/segysampleformat.py +19 -0
- segyio/su/__init__.py +2 -0
- segyio/su/file.py +118 -0
- segyio/su/words.py +284 -0
- segyio/tools.py +731 -0
- segyio/trace.py +967 -0
- segyio/tracefield.py +195 -0
- segyio/tracesortingformat.py +6 -0
- segyio/utils.py +28 -0
- segyio-1.9.13.dist-info/METADATA +79 -0
- segyio-1.9.13.dist-info/RECORD +23 -0
- segyio-1.9.13.dist-info/WHEEL +5 -0
- segyio-1.9.13.dist-info/top_level.txt +1 -0
segyio/segy.py
ADDED
|
@@ -0,0 +1,1010 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
try:
|
|
3
|
+
from future_builtins import zip
|
|
4
|
+
range = xrange
|
|
5
|
+
except (NameError, ImportError): pass
|
|
6
|
+
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from .gather import Gather, Groups
|
|
10
|
+
from .line import Line
|
|
11
|
+
from .trace import Trace, Header, Attributes, Text
|
|
12
|
+
from .field import Field
|
|
13
|
+
|
|
14
|
+
from .tracesortingformat import TraceSortingFormat
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class SegyFile(object):
|
|
19
|
+
"""
|
|
20
|
+
This class is not meant to be instantiated directly, but rather obtained
|
|
21
|
+
from ``segyio.open`` or ``segyio.create``.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
_unstructured_errmsg = "File opened in unstructured mode."
|
|
25
|
+
|
|
26
|
+
def __init__(self, fd, filename, mode, iline=189,
|
|
27
|
+
xline=193,
|
|
28
|
+
endian='big',
|
|
29
|
+
):
|
|
30
|
+
|
|
31
|
+
self._filename = filename
|
|
32
|
+
self._mode = mode
|
|
33
|
+
self._il = iline
|
|
34
|
+
self._xl = xline
|
|
35
|
+
|
|
36
|
+
# property value holders
|
|
37
|
+
self._ilines = None
|
|
38
|
+
self._xlines = None
|
|
39
|
+
self._offsets = None
|
|
40
|
+
self._samples = None
|
|
41
|
+
self._sorting = None
|
|
42
|
+
|
|
43
|
+
# private values
|
|
44
|
+
self._iline_length = None
|
|
45
|
+
self._iline_stride = None
|
|
46
|
+
self._xline_length = None
|
|
47
|
+
self._xline_stride = None
|
|
48
|
+
|
|
49
|
+
self.xfd = fd
|
|
50
|
+
metrics = self.xfd.metrics()
|
|
51
|
+
self._fmt = metrics['format']
|
|
52
|
+
self._tracecount = metrics['tracecount']
|
|
53
|
+
self._ext_headers = metrics['ext_headers']
|
|
54
|
+
|
|
55
|
+
try:
|
|
56
|
+
self._dtype = np.dtype({
|
|
57
|
+
-1: np.float32,
|
|
58
|
+
1: np.float32,
|
|
59
|
+
2: np.int32,
|
|
60
|
+
3: np.int16,
|
|
61
|
+
5: np.float32,
|
|
62
|
+
6: np.float64,
|
|
63
|
+
8: np.int8,
|
|
64
|
+
9: np.int64,
|
|
65
|
+
10: np.uint32,
|
|
66
|
+
11: np.uint16,
|
|
67
|
+
12: np.uint64,
|
|
68
|
+
16: np.uint8,
|
|
69
|
+
}[self._fmt])
|
|
70
|
+
except KeyError:
|
|
71
|
+
problem = 'Unknown trace value format {}'.format(self._fmt)
|
|
72
|
+
solution = 'falling back to ibm float'
|
|
73
|
+
warnings.warn(', '.join((problem, solution)))
|
|
74
|
+
self._fmt = 1
|
|
75
|
+
self._dtype = np.dtype(np.float32)
|
|
76
|
+
|
|
77
|
+
self._trace = Trace(self.xfd,
|
|
78
|
+
self.dtype,
|
|
79
|
+
self.tracecount,
|
|
80
|
+
metrics['samplecount'],
|
|
81
|
+
self.readonly,
|
|
82
|
+
)
|
|
83
|
+
self._header = Header(self)
|
|
84
|
+
self._iline = None
|
|
85
|
+
self._xline = None
|
|
86
|
+
self._gather = None
|
|
87
|
+
self.depth = None
|
|
88
|
+
self.endian = endian
|
|
89
|
+
|
|
90
|
+
super(SegyFile, self).__init__()
|
|
91
|
+
|
|
92
|
+
def __str__(self):
|
|
93
|
+
f = "SegyFile {}:".format(self._filename)
|
|
94
|
+
|
|
95
|
+
if self.unstructured:
|
|
96
|
+
il = " inlines: None"
|
|
97
|
+
xl = " crosslines: None"
|
|
98
|
+
of = " offsets: None"
|
|
99
|
+
else:
|
|
100
|
+
il = " inlines: {} [{}, {}]".format(len(self.ilines), self.ilines[0], self.ilines[-1])
|
|
101
|
+
xl = " crosslines: {} [{}, {}]".format(len(self.xlines), self.xlines[0], self.xlines[-1])
|
|
102
|
+
of = " offsets: {} [{}, {}]".format(len(self.offsets), self.offsets[0], self.offsets[-1])
|
|
103
|
+
|
|
104
|
+
tr = " traces: {}".format(self.tracecount)
|
|
105
|
+
sm = " samples: {}".format(self.samples)
|
|
106
|
+
fmt = " float representation: {}".format(self.format)
|
|
107
|
+
|
|
108
|
+
props = [f, il, xl, tr, sm]
|
|
109
|
+
|
|
110
|
+
if self.offsets is not None and len(self.offsets) > 1:
|
|
111
|
+
props.append(of)
|
|
112
|
+
|
|
113
|
+
props.append(fmt)
|
|
114
|
+
return '\n'.join(props)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def __repr__(self):
|
|
118
|
+
return "SegyFile('{}', '{}', iline = {}, xline = {})".format(
|
|
119
|
+
self._filename, self._mode, self._il, self._xl)
|
|
120
|
+
|
|
121
|
+
def __enter__(self):
|
|
122
|
+
return self
|
|
123
|
+
|
|
124
|
+
def __exit__(self, type, value, traceback):
|
|
125
|
+
self.close()
|
|
126
|
+
|
|
127
|
+
def flush(self):
|
|
128
|
+
"""Flush the file
|
|
129
|
+
|
|
130
|
+
Write the library buffers to disk, like C's ``fflush``. This method is
|
|
131
|
+
mostly useful for testing.
|
|
132
|
+
|
|
133
|
+
It is not necessary to call this method unless you want to observe your
|
|
134
|
+
changes on-disk while the file is still open. The file will
|
|
135
|
+
automatically be flushed for you if you use the `with` statement when
|
|
136
|
+
your routine is completed.
|
|
137
|
+
|
|
138
|
+
Notes
|
|
139
|
+
-----
|
|
140
|
+
|
|
141
|
+
.. versionadded:: 1.1
|
|
142
|
+
|
|
143
|
+
.. warning::
|
|
144
|
+
This is not guaranteed to actually write changes to disk, it only
|
|
145
|
+
flushes the library buffers. Your kernel is free to defer writing
|
|
146
|
+
changes to disk until a later time.
|
|
147
|
+
|
|
148
|
+
Examples
|
|
149
|
+
--------
|
|
150
|
+
|
|
151
|
+
Flush:
|
|
152
|
+
|
|
153
|
+
>>> with segyio.open(path) as f:
|
|
154
|
+
... # write something to f
|
|
155
|
+
... f.flush()
|
|
156
|
+
|
|
157
|
+
"""
|
|
158
|
+
self.xfd.flush()
|
|
159
|
+
|
|
160
|
+
def close(self):
|
|
161
|
+
"""Close the file
|
|
162
|
+
|
|
163
|
+
This method is mostly useful for testing.
|
|
164
|
+
|
|
165
|
+
It is not necessary to call this method if you're using the `with`
|
|
166
|
+
statement, which will close the file for you. Calling methods on a
|
|
167
|
+
previously-closed file will raise `IOError`.
|
|
168
|
+
|
|
169
|
+
Notes
|
|
170
|
+
-----
|
|
171
|
+
|
|
172
|
+
.. versionadded:: 1.1
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
"""
|
|
176
|
+
self.xfd.close()
|
|
177
|
+
|
|
178
|
+
def mmap(self):
|
|
179
|
+
"""Memory map the file
|
|
180
|
+
|
|
181
|
+
Memory map the file. This is an advanced feature for speed and
|
|
182
|
+
optimization; however, it is no silver bullet. If your file is smaller
|
|
183
|
+
than the memory available on your system this will likely result in
|
|
184
|
+
faster reads and writes, especially for line modes. However, if the
|
|
185
|
+
file is very large, or memory is very pressured, this optimization
|
|
186
|
+
might cause overall system slowdowns. However, if you're opening the
|
|
187
|
+
same file from many different instances of segyio then memory mapping
|
|
188
|
+
may significantly reduce the memory pressure.
|
|
189
|
+
|
|
190
|
+
If this call returns true, the file is memory mapped. If memory mapping
|
|
191
|
+
was build-time disabled or is not available for your platform this call
|
|
192
|
+
always return false. If the memory mapping is unsuccessful you can keep
|
|
193
|
+
using segyio - reading and writing falls back on non-memory mapped
|
|
194
|
+
features.
|
|
195
|
+
|
|
196
|
+
Returns
|
|
197
|
+
-------
|
|
198
|
+
|
|
199
|
+
success : bool
|
|
200
|
+
Returns True if the file was successfully memory mapped, False if
|
|
201
|
+
not
|
|
202
|
+
|
|
203
|
+
Notes
|
|
204
|
+
-----
|
|
205
|
+
|
|
206
|
+
.. versionadded:: 1.1
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
Examples
|
|
210
|
+
--------
|
|
211
|
+
|
|
212
|
+
Memory map:
|
|
213
|
+
|
|
214
|
+
>>> mapped = f.mmap()
|
|
215
|
+
>>> if mapped: print( "File is memory mapped!" )
|
|
216
|
+
File is memory mapped!
|
|
217
|
+
>>> pass # keep using segyio as per usual
|
|
218
|
+
>>> print( f.trace[10][7] )
|
|
219
|
+
1.02548
|
|
220
|
+
|
|
221
|
+
"""
|
|
222
|
+
return self.xfd.mmap()
|
|
223
|
+
|
|
224
|
+
@property
|
|
225
|
+
def dtype(self):
|
|
226
|
+
"""
|
|
227
|
+
|
|
228
|
+
The data type object of the traces. This is the format most accurate
|
|
229
|
+
and efficient to exchange with the underlying file, and the data type
|
|
230
|
+
you will find the data traces.
|
|
231
|
+
|
|
232
|
+
Returns
|
|
233
|
+
-------
|
|
234
|
+
|
|
235
|
+
dtype : numpy.dtype
|
|
236
|
+
|
|
237
|
+
Notes
|
|
238
|
+
-----
|
|
239
|
+
|
|
240
|
+
.. versionadded:: 1.6
|
|
241
|
+
|
|
242
|
+
"""
|
|
243
|
+
return self._dtype
|
|
244
|
+
|
|
245
|
+
@property
|
|
246
|
+
def sorting(self):
|
|
247
|
+
"""
|
|
248
|
+
|
|
249
|
+
Inline or crossline sorting, or Falsey (None or 0) if unstructured.
|
|
250
|
+
Returns
|
|
251
|
+
-------
|
|
252
|
+
|
|
253
|
+
sorting : int
|
|
254
|
+
|
|
255
|
+
"""
|
|
256
|
+
return self._sorting
|
|
257
|
+
|
|
258
|
+
@property
|
|
259
|
+
def tracecount(self):
|
|
260
|
+
"""Number of traces in this file
|
|
261
|
+
|
|
262
|
+
Equivalent to ``len(f.trace)``
|
|
263
|
+
|
|
264
|
+
Returns
|
|
265
|
+
-------
|
|
266
|
+
|
|
267
|
+
count : int
|
|
268
|
+
Number of traces in this file
|
|
269
|
+
|
|
270
|
+
"""
|
|
271
|
+
return self._tracecount
|
|
272
|
+
|
|
273
|
+
@property
|
|
274
|
+
def samples(self):
|
|
275
|
+
"""
|
|
276
|
+
Return the array of samples with appropriate intervals.
|
|
277
|
+
|
|
278
|
+
Returns
|
|
279
|
+
-------
|
|
280
|
+
|
|
281
|
+
samples : numpy.ndarray of int
|
|
282
|
+
|
|
283
|
+
Notes
|
|
284
|
+
-----
|
|
285
|
+
|
|
286
|
+
It holds that ``len(f.samples) == len(f.trace[0])``
|
|
287
|
+
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
return self._samples
|
|
291
|
+
|
|
292
|
+
@property
|
|
293
|
+
def offsets(self):
|
|
294
|
+
"""
|
|
295
|
+
|
|
296
|
+
Return the array of offset names. For post-stack data, this array has a
|
|
297
|
+
length of 1
|
|
298
|
+
|
|
299
|
+
Returns
|
|
300
|
+
-------
|
|
301
|
+
|
|
302
|
+
offsets : numpy.ndarray of int
|
|
303
|
+
|
|
304
|
+
"""
|
|
305
|
+
return self._offsets
|
|
306
|
+
|
|
307
|
+
@property
|
|
308
|
+
def ext_headers(self):
|
|
309
|
+
"""Extra text headers
|
|
310
|
+
|
|
311
|
+
The number of extra text headers, given by the ``ExtendedHeaders``
|
|
312
|
+
field in the binary header.
|
|
313
|
+
|
|
314
|
+
Returns
|
|
315
|
+
-------
|
|
316
|
+
|
|
317
|
+
headers : int
|
|
318
|
+
Number of extra text headers
|
|
319
|
+
|
|
320
|
+
"""
|
|
321
|
+
|
|
322
|
+
return self._ext_headers
|
|
323
|
+
|
|
324
|
+
@property
|
|
325
|
+
def unstructured(self):
|
|
326
|
+
"""
|
|
327
|
+
If the file is unstructured, sophisticated addressing modes that
|
|
328
|
+
require the file to represent a proper cube won't work, and only raw
|
|
329
|
+
data reading and writing is supported.
|
|
330
|
+
|
|
331
|
+
Returns
|
|
332
|
+
-------
|
|
333
|
+
|
|
334
|
+
unstructured : bool
|
|
335
|
+
``True`` if this file is unstructured, ``False`` if not
|
|
336
|
+
|
|
337
|
+
"""
|
|
338
|
+
return self.ilines is None
|
|
339
|
+
|
|
340
|
+
@property
|
|
341
|
+
def header(self):
|
|
342
|
+
"""
|
|
343
|
+
Interact with segy in header mode
|
|
344
|
+
|
|
345
|
+
Returns
|
|
346
|
+
-------
|
|
347
|
+
header : Header
|
|
348
|
+
|
|
349
|
+
Notes
|
|
350
|
+
-----
|
|
351
|
+
.. versionadded:: 1.1
|
|
352
|
+
"""
|
|
353
|
+
return self._header
|
|
354
|
+
|
|
355
|
+
@header.setter
|
|
356
|
+
def header(self, val):
|
|
357
|
+
"""headers macro assignment
|
|
358
|
+
|
|
359
|
+
A convenient way for operating on all headers of a file is to use the
|
|
360
|
+
default full-file range. It will write headers 0, 1, ..., n, but uses
|
|
361
|
+
the iteration specified by the right-hand side (i.e. can skip headers
|
|
362
|
+
etc).
|
|
363
|
+
|
|
364
|
+
If the right-hand-side headers are exhausted before all the destination
|
|
365
|
+
file headers the writing will stop, i.e. not all all headers in the
|
|
366
|
+
destination file will be written to.
|
|
367
|
+
|
|
368
|
+
Examples
|
|
369
|
+
--------
|
|
370
|
+
Copy headers from file g to file f:
|
|
371
|
+
|
|
372
|
+
>>> f.header = g.header
|
|
373
|
+
|
|
374
|
+
Set offset field:
|
|
375
|
+
|
|
376
|
+
>>> f.header = { TraceField.offset: 5 }
|
|
377
|
+
|
|
378
|
+
Copy every 12th header from the file g to f's 0, 1, 2...:
|
|
379
|
+
|
|
380
|
+
>>> f.header = g.header[::12]
|
|
381
|
+
>>> f.header[0] == g.header[0]
|
|
382
|
+
True
|
|
383
|
+
>>> f.header[1] == g.header[12]
|
|
384
|
+
True
|
|
385
|
+
>>> f.header[2] == g.header[2]
|
|
386
|
+
False
|
|
387
|
+
>>> f.header[2] == g.header[24]
|
|
388
|
+
True
|
|
389
|
+
"""
|
|
390
|
+
self.header[:] = val
|
|
391
|
+
|
|
392
|
+
def attributes(self, field):
|
|
393
|
+
"""File-wide attribute (header word) reading
|
|
394
|
+
|
|
395
|
+
Lazily gather a single header word for every trace in the file. The
|
|
396
|
+
array can be sliced, supports index lookup, and numpy-style
|
|
397
|
+
list-of-indices.
|
|
398
|
+
|
|
399
|
+
Parameters
|
|
400
|
+
----------
|
|
401
|
+
|
|
402
|
+
field : int or segyio.TraceField
|
|
403
|
+
field
|
|
404
|
+
|
|
405
|
+
Returns
|
|
406
|
+
-------
|
|
407
|
+
|
|
408
|
+
attrs : Attributes
|
|
409
|
+
A sliceable array_like of header words
|
|
410
|
+
|
|
411
|
+
Notes
|
|
412
|
+
-----
|
|
413
|
+
|
|
414
|
+
.. versionadded:: 1.1
|
|
415
|
+
|
|
416
|
+
"""
|
|
417
|
+
return Attributes(field, self.xfd, self.tracecount)
|
|
418
|
+
|
|
419
|
+
@property
|
|
420
|
+
def trace(self):
|
|
421
|
+
"""
|
|
422
|
+
Interact with segy in trace mode.
|
|
423
|
+
|
|
424
|
+
Returns
|
|
425
|
+
-------
|
|
426
|
+
trace : Trace
|
|
427
|
+
|
|
428
|
+
Notes
|
|
429
|
+
-----
|
|
430
|
+
.. versionadded:: 1.1
|
|
431
|
+
|
|
432
|
+
"""
|
|
433
|
+
|
|
434
|
+
return self._trace
|
|
435
|
+
|
|
436
|
+
@trace.setter
|
|
437
|
+
def trace(self, val):
|
|
438
|
+
"""traces macro assignment
|
|
439
|
+
|
|
440
|
+
Convenient way for setting traces from 0, 1, ... n, based on the
|
|
441
|
+
iterable set of traces on the right-hand-side.
|
|
442
|
+
|
|
443
|
+
If the right-hand-side traces are exhausted before all the destination
|
|
444
|
+
file traces the writing will stop, i.e. not all all traces in the
|
|
445
|
+
destination file will be written.
|
|
446
|
+
|
|
447
|
+
Notes
|
|
448
|
+
-----
|
|
449
|
+
.. versionadded:: 1.1
|
|
450
|
+
|
|
451
|
+
Examples
|
|
452
|
+
--------
|
|
453
|
+
Copy traces from file f to file g:
|
|
454
|
+
|
|
455
|
+
>>> f.trace = g.trace
|
|
456
|
+
|
|
457
|
+
Copy first half of the traces from g to f:
|
|
458
|
+
|
|
459
|
+
>>> f.trace = g.trace[:len(g.trace)/2]
|
|
460
|
+
|
|
461
|
+
Fill the file with one trace (filled with zeros):
|
|
462
|
+
|
|
463
|
+
>>> tr = np.zeros(f.samples)
|
|
464
|
+
>>> f.trace = itertools.repeat(tr)
|
|
465
|
+
|
|
466
|
+
For advanced users: sometimes you want to load the entire segy file
|
|
467
|
+
to memory and apply your own structural manipulations or operations
|
|
468
|
+
on it. Some segy files are very large and may not fit, in which
|
|
469
|
+
case this feature will break down. This is an optimisation feature;
|
|
470
|
+
using it should generally be driven by measurements.
|
|
471
|
+
|
|
472
|
+
Read the first 10 traces:
|
|
473
|
+
|
|
474
|
+
>>> f.trace.raw[0:10]
|
|
475
|
+
|
|
476
|
+
Read *all* traces to memory:
|
|
477
|
+
|
|
478
|
+
>>> f.trace.raw[:]
|
|
479
|
+
|
|
480
|
+
Read every other trace to memory:
|
|
481
|
+
|
|
482
|
+
>>> f.trace.raw[::2]
|
|
483
|
+
"""
|
|
484
|
+
self.trace[:] = val
|
|
485
|
+
|
|
486
|
+
@property
|
|
487
|
+
def ilines(self):
|
|
488
|
+
"""Inline labels
|
|
489
|
+
|
|
490
|
+
The inline labels in this file, if structured, else None
|
|
491
|
+
|
|
492
|
+
Returns
|
|
493
|
+
-------
|
|
494
|
+
|
|
495
|
+
inlines : array_like of int or None
|
|
496
|
+
|
|
497
|
+
"""
|
|
498
|
+
return self._ilines
|
|
499
|
+
|
|
500
|
+
@property
|
|
501
|
+
def iline(self):
|
|
502
|
+
"""
|
|
503
|
+
Interact with segy in inline mode
|
|
504
|
+
|
|
505
|
+
Returns
|
|
506
|
+
-------
|
|
507
|
+
iline : Line or None
|
|
508
|
+
|
|
509
|
+
Raises
|
|
510
|
+
------
|
|
511
|
+
ValueError
|
|
512
|
+
If the file is unstructured
|
|
513
|
+
|
|
514
|
+
Notes
|
|
515
|
+
-----
|
|
516
|
+
.. versionadded:: 1.1
|
|
517
|
+
"""
|
|
518
|
+
|
|
519
|
+
if self.unstructured:
|
|
520
|
+
raise ValueError(self._unstructured_errmsg)
|
|
521
|
+
|
|
522
|
+
if self._iline is not None:
|
|
523
|
+
return self._iline
|
|
524
|
+
|
|
525
|
+
self._iline = Line(self,
|
|
526
|
+
self.ilines,
|
|
527
|
+
self._iline_length,
|
|
528
|
+
self._iline_stride,
|
|
529
|
+
self.offsets,
|
|
530
|
+
'inline',
|
|
531
|
+
)
|
|
532
|
+
return self._iline
|
|
533
|
+
|
|
534
|
+
@iline.setter
|
|
535
|
+
def iline(self, value):
|
|
536
|
+
"""inlines macro assignment
|
|
537
|
+
|
|
538
|
+
Convenient way for setting inlines, from left-to-right as the inline
|
|
539
|
+
numbers are specified in the file.ilines property, from an iterable
|
|
540
|
+
set on the right-hand-side.
|
|
541
|
+
|
|
542
|
+
If the right-hand-side inlines are exhausted before all the destination
|
|
543
|
+
file inlines the writing will stop, i.e. not all all inlines in the
|
|
544
|
+
destination file will be written.
|
|
545
|
+
|
|
546
|
+
Notes
|
|
547
|
+
-----
|
|
548
|
+
.. versionadded:: 1.1
|
|
549
|
+
|
|
550
|
+
Examples
|
|
551
|
+
--------
|
|
552
|
+
Copy inlines from file f to file g:
|
|
553
|
+
|
|
554
|
+
>>> f.iline = g.iline
|
|
555
|
+
"""
|
|
556
|
+
self.iline[:] = value
|
|
557
|
+
|
|
558
|
+
@property
|
|
559
|
+
def xlines(self):
|
|
560
|
+
"""Crossline labels
|
|
561
|
+
|
|
562
|
+
The crosslane labels in this file, if structured, else None
|
|
563
|
+
|
|
564
|
+
Returns
|
|
565
|
+
-------
|
|
566
|
+
|
|
567
|
+
crosslines : array_like of int or None
|
|
568
|
+
|
|
569
|
+
"""
|
|
570
|
+
return self._xlines
|
|
571
|
+
|
|
572
|
+
@property
|
|
573
|
+
def xline(self):
|
|
574
|
+
"""
|
|
575
|
+
Interact with segy in crossline mode
|
|
576
|
+
|
|
577
|
+
Returns
|
|
578
|
+
-------
|
|
579
|
+
xline : Line or None
|
|
580
|
+
|
|
581
|
+
Raises
|
|
582
|
+
------
|
|
583
|
+
ValueError
|
|
584
|
+
If the file is unstructured
|
|
585
|
+
|
|
586
|
+
Notes
|
|
587
|
+
-----
|
|
588
|
+
.. versionadded:: 1.1
|
|
589
|
+
"""
|
|
590
|
+
if self.unstructured:
|
|
591
|
+
raise ValueError(self._unstructured_errmsg)
|
|
592
|
+
|
|
593
|
+
if self._xline is not None:
|
|
594
|
+
return self._xline
|
|
595
|
+
|
|
596
|
+
self._xline = Line(self,
|
|
597
|
+
self.xlines,
|
|
598
|
+
self._xline_length,
|
|
599
|
+
self._xline_stride,
|
|
600
|
+
self.offsets,
|
|
601
|
+
'crossline',
|
|
602
|
+
)
|
|
603
|
+
return self._xline
|
|
604
|
+
|
|
605
|
+
@xline.setter
|
|
606
|
+
def xline(self, value):
|
|
607
|
+
"""crosslines macro assignment
|
|
608
|
+
|
|
609
|
+
Convenient way for setting crosslines, from left-to-right as the inline
|
|
610
|
+
numbers are specified in the file.ilines property, from an iterable set
|
|
611
|
+
on the right-hand-side.
|
|
612
|
+
|
|
613
|
+
If the right-hand-side inlines are exhausted before all the destination
|
|
614
|
+
file inlines the writing will stop, i.e. not all all inlines in the
|
|
615
|
+
destination file will be written.
|
|
616
|
+
|
|
617
|
+
Notes
|
|
618
|
+
-----
|
|
619
|
+
.. versionadded:: 1.1
|
|
620
|
+
|
|
621
|
+
Examples
|
|
622
|
+
--------
|
|
623
|
+
Copy crosslines from file f to file g:
|
|
624
|
+
|
|
625
|
+
>>> f.xline = g.xline
|
|
626
|
+
"""
|
|
627
|
+
self.xline[:] = value
|
|
628
|
+
|
|
629
|
+
@property
|
|
630
|
+
def fast(self):
|
|
631
|
+
"""Access the 'fast' dimension
|
|
632
|
+
|
|
633
|
+
This mode yields iline or xline mode, depending on which one is laid
|
|
634
|
+
out `faster`, i.e. the line with linear disk layout. Use this mode if
|
|
635
|
+
the inline/crossline distinction isn't as interesting as traversing in
|
|
636
|
+
a fast manner (typically when you want to apply a function to the whole
|
|
637
|
+
file, line-by-line).
|
|
638
|
+
|
|
639
|
+
Returns
|
|
640
|
+
-------
|
|
641
|
+
fast : Line
|
|
642
|
+
line addressing mode
|
|
643
|
+
|
|
644
|
+
Notes
|
|
645
|
+
-----
|
|
646
|
+
.. versionadded:: 1.1
|
|
647
|
+
"""
|
|
648
|
+
if self.sorting == TraceSortingFormat.INLINE_SORTING:
|
|
649
|
+
return self.iline
|
|
650
|
+
elif self.sorting == TraceSortingFormat.CROSSLINE_SORTING:
|
|
651
|
+
return self.xline
|
|
652
|
+
else:
|
|
653
|
+
raise RuntimeError("Unknown sorting.")
|
|
654
|
+
|
|
655
|
+
@property
|
|
656
|
+
def slow(self):
|
|
657
|
+
"""Access the 'slow' dimension
|
|
658
|
+
|
|
659
|
+
This mode yields iline or xline mode, depending on which one is laid
|
|
660
|
+
out `slower`, i.e. the line with strided disk layout. Use this mode if
|
|
661
|
+
the inline/crossline distinction isn't as interesting as traversing in
|
|
662
|
+
the slower direction.
|
|
663
|
+
|
|
664
|
+
Returns
|
|
665
|
+
-------
|
|
666
|
+
slow : Line
|
|
667
|
+
line addressing mode
|
|
668
|
+
|
|
669
|
+
Notes
|
|
670
|
+
-----
|
|
671
|
+
.. versionadded:: 1.1
|
|
672
|
+
"""
|
|
673
|
+
if self.sorting == TraceSortingFormat.INLINE_SORTING:
|
|
674
|
+
return self.xline
|
|
675
|
+
elif self.sorting == TraceSortingFormat.CROSSLINE_SORTING:
|
|
676
|
+
return self.iline
|
|
677
|
+
else:
|
|
678
|
+
raise RuntimeError("Unknown sorting.")
|
|
679
|
+
|
|
680
|
+
@property
|
|
681
|
+
def depth_slice(self):
|
|
682
|
+
"""
|
|
683
|
+
Interact with segy in depth slice mode (fixed z-coordinate)
|
|
684
|
+
|
|
685
|
+
Returns
|
|
686
|
+
-------
|
|
687
|
+
depth : Depth
|
|
688
|
+
|
|
689
|
+
Notes
|
|
690
|
+
-----
|
|
691
|
+
.. versionadded:: 1.1
|
|
692
|
+
|
|
693
|
+
.. versionchanged:: 1.7.1
|
|
694
|
+
enabled for unstructured files
|
|
695
|
+
"""
|
|
696
|
+
|
|
697
|
+
if self.depth is not None:
|
|
698
|
+
return self.depth
|
|
699
|
+
|
|
700
|
+
from .depth import Depth
|
|
701
|
+
self.depth = Depth(self)
|
|
702
|
+
return self.depth
|
|
703
|
+
|
|
704
|
+
@depth_slice.setter
|
|
705
|
+
def depth_slice(self, value):
|
|
706
|
+
"""depth macro assignment
|
|
707
|
+
|
|
708
|
+
Convenient way for setting depth slices, from left-to-right as the depth slices
|
|
709
|
+
numbers are specified in the file.depth_slice property, from an iterable
|
|
710
|
+
set on the right-hand-side.
|
|
711
|
+
|
|
712
|
+
If the right-hand-side depth slices are exhausted before all the destination
|
|
713
|
+
file depth slices the writing will stop, i.e. not all all depth slices in the
|
|
714
|
+
destination file will be written.
|
|
715
|
+
|
|
716
|
+
Examples
|
|
717
|
+
--------
|
|
718
|
+
Copy depth slices from file f to file g:
|
|
719
|
+
|
|
720
|
+
>>> f.depth_slice = g.depth_slice
|
|
721
|
+
|
|
722
|
+
Copy first half of the depth slices from g to f:
|
|
723
|
+
|
|
724
|
+
>>> f.depth_slice = g.depth_slice[:g.samples/2]]
|
|
725
|
+
"""
|
|
726
|
+
self.depth_slice[:] = value
|
|
727
|
+
|
|
728
|
+
@property
|
|
729
|
+
def gather(self):
|
|
730
|
+
"""
|
|
731
|
+
Interact with segy in gather mode
|
|
732
|
+
|
|
733
|
+
Returns
|
|
734
|
+
-------
|
|
735
|
+
gather : Gather
|
|
736
|
+
"""
|
|
737
|
+
if self.unstructured:
|
|
738
|
+
raise ValueError(self._unstructured_errmsg)
|
|
739
|
+
|
|
740
|
+
if self._gather is not None:
|
|
741
|
+
return self._gather
|
|
742
|
+
|
|
743
|
+
self._gather = Gather(self.trace, self.iline, self.xline, self.offsets)
|
|
744
|
+
return self._gather
|
|
745
|
+
|
|
746
|
+
@property
|
|
747
|
+
def text(self):
|
|
748
|
+
"""Interact with segy in text mode
|
|
749
|
+
|
|
750
|
+
This mode gives access to reading and writing functionality for textual
|
|
751
|
+
headers.
|
|
752
|
+
|
|
753
|
+
The primary data type is the python string. Reading textual headers is
|
|
754
|
+
done with ``[]``, and writing is done via assignment. No additional
|
|
755
|
+
structure is built around the textual header, so everything is treated
|
|
756
|
+
as one long string without line breaks.
|
|
757
|
+
|
|
758
|
+
Returns
|
|
759
|
+
-------
|
|
760
|
+
text : Text
|
|
761
|
+
|
|
762
|
+
See also
|
|
763
|
+
--------
|
|
764
|
+
segyio.tools.wrap : line-wrap a text header
|
|
765
|
+
|
|
766
|
+
Notes
|
|
767
|
+
-----
|
|
768
|
+
.. versionadded:: 1.1
|
|
769
|
+
"""
|
|
770
|
+
return Text(self.xfd, self._ext_headers + 1)
|
|
771
|
+
|
|
772
|
+
@property
|
|
773
|
+
def bin(self):
|
|
774
|
+
"""
|
|
775
|
+
Interact with segy in binary mode
|
|
776
|
+
|
|
777
|
+
This mode gives access to reading and writing functionality for the
|
|
778
|
+
binary header. Please note that using numeric binary offsets uses the
|
|
779
|
+
offset numbers from the specification, i.e. the first field of the
|
|
780
|
+
binary header starts at 3201, not 1. If you're using the enumerations
|
|
781
|
+
this is handled for you.
|
|
782
|
+
|
|
783
|
+
Returns
|
|
784
|
+
-------
|
|
785
|
+
binary : Field
|
|
786
|
+
|
|
787
|
+
Notes
|
|
788
|
+
-----
|
|
789
|
+
.. versionadded:: 1.1
|
|
790
|
+
"""
|
|
791
|
+
|
|
792
|
+
return Field.binary(self)
|
|
793
|
+
|
|
794
|
+
@bin.setter
|
|
795
|
+
def bin(self, value):
|
|
796
|
+
"""Update binary header
|
|
797
|
+
|
|
798
|
+
Update a value or replace the binary header
|
|
799
|
+
|
|
800
|
+
Parameters
|
|
801
|
+
----------
|
|
802
|
+
|
|
803
|
+
value : dict_like
|
|
804
|
+
dict_like, keys of int or segyio.BinField or segyio.su
|
|
805
|
+
|
|
806
|
+
"""
|
|
807
|
+
self.bin.update(value)
|
|
808
|
+
|
|
809
|
+
@property
|
|
810
|
+
def format(self):
|
|
811
|
+
d = {
|
|
812
|
+
-2: "4-byte native big-endian float",
|
|
813
|
+
-1: "4-byte native little-endian float",
|
|
814
|
+
1: "4-byte IBM float",
|
|
815
|
+
2: "4-byte signed integer",
|
|
816
|
+
3: "2-byte signed integer",
|
|
817
|
+
4: "4-byte fixed point with gain",
|
|
818
|
+
5: "4-byte IEEE float",
|
|
819
|
+
6: "8-byte IEEE float",
|
|
820
|
+
7: "3-byte signed integer",
|
|
821
|
+
8: "1-byte signed char",
|
|
822
|
+
9: "8-byte signed integer",
|
|
823
|
+
10: "4-byte unsigned integer",
|
|
824
|
+
11: "2-byte unsigned integer",
|
|
825
|
+
12: "8-byte unsigned integer",
|
|
826
|
+
15: "3-byte unsigned integer",
|
|
827
|
+
16: "1-byte unsigned char"
|
|
828
|
+
}
|
|
829
|
+
|
|
830
|
+
class fmt:
|
|
831
|
+
def __int__(inner):
|
|
832
|
+
return self._fmt
|
|
833
|
+
|
|
834
|
+
def __str__(inner):
|
|
835
|
+
if not self._fmt in d:
|
|
836
|
+
return "Unknown format"
|
|
837
|
+
|
|
838
|
+
return d[self._fmt]
|
|
839
|
+
|
|
840
|
+
return fmt()
|
|
841
|
+
|
|
842
|
+
@property
|
|
843
|
+
def readonly(self):
|
|
844
|
+
"""File is read-only
|
|
845
|
+
|
|
846
|
+
Returns
|
|
847
|
+
-------
|
|
848
|
+
readonly : bool
|
|
849
|
+
True if this file is read-only
|
|
850
|
+
|
|
851
|
+
Notes
|
|
852
|
+
-----
|
|
853
|
+
.. versionadded:: 1.6
|
|
854
|
+
"""
|
|
855
|
+
|
|
856
|
+
return '+' not in self._mode
|
|
857
|
+
|
|
858
|
+
|
|
859
|
+
def interpret(self, ilines, xlines, offsets=None, sorting=TraceSortingFormat.INLINE_SORTING):
|
|
860
|
+
|
|
861
|
+
""" (Re-)interpret structure on top of a file
|
|
862
|
+
|
|
863
|
+
(Re-)interpret the structure of the file given the new sorting, ilines,
|
|
864
|
+
xlines and offset indices. Note that file itself is not changed in any
|
|
865
|
+
way, it is only segyio's interpretation of the file that changes. It's
|
|
866
|
+
a way of telling segyio that a file is laid out in a particular way,
|
|
867
|
+
even though the header fields say otherwise.
|
|
868
|
+
|
|
869
|
+
`interpret` expect that the ilines-, xlines- and offsets-indices are
|
|
870
|
+
unique. It also expect the dimensions of ilines, xlines and offset to
|
|
871
|
+
match the tracecount.
|
|
872
|
+
|
|
873
|
+
Parameters
|
|
874
|
+
----------
|
|
875
|
+
f : SegyFile
|
|
876
|
+
ilines : array_like
|
|
877
|
+
ilines indices in new structure
|
|
878
|
+
xlines : array_like
|
|
879
|
+
xlines indices in new structure
|
|
880
|
+
offsets : array_like
|
|
881
|
+
offset indices in new structure
|
|
882
|
+
sorting : int, string or TraceSortingFormat
|
|
883
|
+
Sorting in new structure
|
|
884
|
+
|
|
885
|
+
Notes
|
|
886
|
+
-----
|
|
887
|
+
|
|
888
|
+
.. versionadded:: 1.8
|
|
889
|
+
|
|
890
|
+
Examples
|
|
891
|
+
--------
|
|
892
|
+
(Re)interpret the structure of the file:
|
|
893
|
+
|
|
894
|
+
>>> ilines = [10, 11, 12, 13]
|
|
895
|
+
>>> xlines = [20, 21, 22, 23, 24]
|
|
896
|
+
>>> with segyio.open(file, ignore_geometry=True) as f:
|
|
897
|
+
... f.interpret(ilines, xlines)
|
|
898
|
+
"""
|
|
899
|
+
|
|
900
|
+
valid_sortings = {
|
|
901
|
+
1 : TraceSortingFormat.CROSSLINE_SORTING,
|
|
902
|
+
2 : TraceSortingFormat.INLINE_SORTING,
|
|
903
|
+
'iline' : TraceSortingFormat.INLINE_SORTING,
|
|
904
|
+
'inline' : TraceSortingFormat.INLINE_SORTING,
|
|
905
|
+
'xl' : TraceSortingFormat.CROSSLINE_SORTING,
|
|
906
|
+
'crossline' : TraceSortingFormat.CROSSLINE_SORTING,
|
|
907
|
+
TraceSortingFormat.INLINE_SORTING : TraceSortingFormat.INLINE_SORTING,
|
|
908
|
+
TraceSortingFormat.CROSSLINE_SORTING : TraceSortingFormat.CROSSLINE_SORTING,
|
|
909
|
+
}
|
|
910
|
+
|
|
911
|
+
if sorting not in valid_sortings:
|
|
912
|
+
error = "Invalid sorting"
|
|
913
|
+
solution = "valid sorting options are: {}".format(valid_sortings.keys())
|
|
914
|
+
raise ValueError('{}, {}'.format(error, solution))
|
|
915
|
+
|
|
916
|
+
if offsets is None:
|
|
917
|
+
offsets = np.arange(1)
|
|
918
|
+
|
|
919
|
+
ilines = np.copy(np.asarray(ilines, dtype=np.intc))
|
|
920
|
+
xlines = np.copy(np.asarray(xlines, dtype=np.intc))
|
|
921
|
+
offsets = np.copy(np.asarray(offsets, dtype=np.intc))
|
|
922
|
+
|
|
923
|
+
if np.unique(ilines).size != ilines.size:
|
|
924
|
+
error = "Inlines inconsistent"
|
|
925
|
+
solution = "expect all inlines to be unique"
|
|
926
|
+
raise ValueError("{}, {}".format(error, solution))
|
|
927
|
+
|
|
928
|
+
if np.unique(xlines).size != xlines.size:
|
|
929
|
+
error = "Crosslines inconsistent"
|
|
930
|
+
solution = "expect all crosslines to be unique"
|
|
931
|
+
raise ValueError("{}, {}".format(error, solution))
|
|
932
|
+
|
|
933
|
+
if np.unique(offsets).size != offsets.size:
|
|
934
|
+
error = "Offsets inconsistent"
|
|
935
|
+
solution = "expect all offsets to be unique"
|
|
936
|
+
raise ValueError("{}, {}".format(error, solution))
|
|
937
|
+
|
|
938
|
+
if ilines.size * xlines.size * offsets.size != self.tracecount:
|
|
939
|
+
error = ("Invalid dimensions, ilines ({}) * xlines ({}) * offsets "
|
|
940
|
+
"({}) should match the number of traces ({})").format(ilines.size,
|
|
941
|
+
xlines.size,
|
|
942
|
+
offsets.size,
|
|
943
|
+
self.tracecount)
|
|
944
|
+
raise ValueError(error)
|
|
945
|
+
|
|
946
|
+
from . import _segyio
|
|
947
|
+
|
|
948
|
+
line_metrics = _segyio.line_metrics(sorting,
|
|
949
|
+
self.tracecount,
|
|
950
|
+
ilines.size,
|
|
951
|
+
xlines.size,
|
|
952
|
+
offsets.size)
|
|
953
|
+
|
|
954
|
+
self._iline_length = line_metrics['iline_length']
|
|
955
|
+
self._iline_stride = line_metrics['iline_stride']
|
|
956
|
+
|
|
957
|
+
self._xline_length = line_metrics['xline_length']
|
|
958
|
+
self._xline_stride = line_metrics['xline_stride']
|
|
959
|
+
|
|
960
|
+
self._sorting = sorting
|
|
961
|
+
self._offsets = offsets
|
|
962
|
+
self._ilines = ilines
|
|
963
|
+
self._xlines = xlines
|
|
964
|
+
|
|
965
|
+
return self
|
|
966
|
+
|
|
967
|
+
def group(self, word):
|
|
968
|
+
"""Get groups of traces matching a key
|
|
969
|
+
|
|
970
|
+
This feature is **experimental**, and there are no guarantees code
|
|
971
|
+
using this will work in the future.
|
|
972
|
+
|
|
973
|
+
Walks the headers and groups traces into buckets, where all traces in a
|
|
974
|
+
bucket have the same value for the given set of words. It is
|
|
975
|
+
particularly useful for pre-stack files, gathering traces belonging to
|
|
976
|
+
the same gather or shot.
|
|
977
|
+
|
|
978
|
+
Parameters
|
|
979
|
+
----------
|
|
980
|
+
word : segyio.TraceField or iterable of segyio.TraceField
|
|
981
|
+
The set of words belonging to the same bucket
|
|
982
|
+
|
|
983
|
+
Returns
|
|
984
|
+
-------
|
|
985
|
+
groups : segyio.gather.Groups
|
|
986
|
+
|
|
987
|
+
Notes
|
|
988
|
+
-----
|
|
989
|
+
This feature is **experimental**, but you are encouraged to try it out.
|
|
990
|
+
Bug reports and suggestions for improvement are welcomed, but no
|
|
991
|
+
guarantees are made that the interface will remain as it is in the
|
|
992
|
+
future.
|
|
993
|
+
"""
|
|
994
|
+
msg = 'group is experimental and may change in the future'
|
|
995
|
+
warnings.warn(msg, FutureWarning)
|
|
996
|
+
return Groups(self.trace, self.header, word)
|
|
997
|
+
|
|
998
|
+
|
|
999
|
+
class spec(object):
|
|
1000
|
+
def __init__(self):
|
|
1001
|
+
self.iline = 189
|
|
1002
|
+
self.ilines = None
|
|
1003
|
+
self.xline = 193
|
|
1004
|
+
self.xlines = None
|
|
1005
|
+
self.offsets = [1]
|
|
1006
|
+
self.samples = None
|
|
1007
|
+
self.ext_headers = 0
|
|
1008
|
+
self.format = None
|
|
1009
|
+
self.sorting = None
|
|
1010
|
+
self.endian = 'big'
|