segyio 1.9.13__cp313-cp313-win32.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of segyio might be problematic. Click here for more details.
- segyio/__init__.py +99 -0
- segyio/_segyio.cp313-win32.pyd +0 -0
- segyio/binfield.py +89 -0
- segyio/create.py +257 -0
- segyio/depth.py +180 -0
- segyio/field.py +546 -0
- segyio/gather.py +444 -0
- segyio/line.py +498 -0
- segyio/open.py +192 -0
- segyio/segy.py +1010 -0
- segyio/segysampleformat.py +19 -0
- segyio/su/__init__.py +2 -0
- segyio/su/file.py +118 -0
- segyio/su/words.py +284 -0
- segyio/tools.py +731 -0
- segyio/trace.py +967 -0
- segyio/tracefield.py +195 -0
- segyio/tracesortingformat.py +6 -0
- segyio/utils.py +28 -0
- segyio-1.9.13.dist-info/METADATA +79 -0
- segyio-1.9.13.dist-info/RECORD +23 -0
- segyio-1.9.13.dist-info/WHEEL +5 -0
- segyio-1.9.13.dist-info/top_level.txt +1 -0
segyio/__init__.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
"""segyio
|
|
2
|
+
|
|
3
|
+
Welcome to segyio. For help, examples and reference, type ``help(function)`` in
|
|
4
|
+
your favourite python interpreter, or ``pydoc function`` in the unix console.
|
|
5
|
+
|
|
6
|
+
The segyio library attempts to be easy to use efficently for prototyping and
|
|
7
|
+
interaction with possibly large segy files. File reading and writing is
|
|
8
|
+
streaming, with large file support out of the box and without hassle. For a
|
|
9
|
+
quick start on reading files, type ``help(segyio.open)``.
|
|
10
|
+
|
|
11
|
+
An open segy file is interacted with in modes. For a reference with examples,
|
|
12
|
+
please type ``help(segyio.segy)``, look at the online documentation at
|
|
13
|
+
segyio.readthedocs.io, or run ``help()`` on the object returned by
|
|
14
|
+
``segyio.open``.. For documentation on individual modes, please
|
|
15
|
+
refer to the individual modes with ``help(f.[mode])``, where ``f`` is an open
|
|
16
|
+
file handle.
|
|
17
|
+
|
|
18
|
+
The available modes are:
|
|
19
|
+
* text, for textual headers including extended headers
|
|
20
|
+
* bin, for the binary header
|
|
21
|
+
* header, for the trace headers
|
|
22
|
+
* trace, for trace data
|
|
23
|
+
* iline, for inline biased operations
|
|
24
|
+
* xline, for crossline biased operations
|
|
25
|
+
* depth_slice, for depth biased operations
|
|
26
|
+
* gather, for gather/intersaction biased operations
|
|
27
|
+
|
|
28
|
+
The primary data type is the numpy.ndarray. All examples use ``np`` for the
|
|
29
|
+
numpy namespace. That means that any function that returns a trace, a set of
|
|
30
|
+
samples or even full lines, returns a numpy.ndarray. This enables quick and
|
|
31
|
+
easy mathematical operations on the data you care about.
|
|
32
|
+
|
|
33
|
+
Segyio is designed to blend into regular python code, so python concepts that
|
|
34
|
+
map to segy operations are written to behave similarly. That means that
|
|
35
|
+
sequences of data support list lookup, slicing (``f.trace[0:10:2]``), ``for x
|
|
36
|
+
in`` etc. Please refer to the individual modes' documentation for a more
|
|
37
|
+
extensive set of examples.
|
|
38
|
+
|
|
39
|
+
For all slicing operations that segyio provides the underlying buffer is
|
|
40
|
+
reused, so if you want to keep the data between iterations it is necessary to
|
|
41
|
+
manually copy the data, e.g. ``numpy.copy()``. Please refer to the examples.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class Enum(object):
|
|
46
|
+
def __init__(self, enum_value):
|
|
47
|
+
super(Enum, self).__init__()
|
|
48
|
+
self._value = int(enum_value)
|
|
49
|
+
|
|
50
|
+
def __int__(self):
|
|
51
|
+
return int(self._value)
|
|
52
|
+
|
|
53
|
+
def __str__(self):
|
|
54
|
+
for k, v in self.__class__.__dict__.items():
|
|
55
|
+
if isinstance(v, int) and self._value == v:
|
|
56
|
+
return k
|
|
57
|
+
return "Unknown Enum"
|
|
58
|
+
|
|
59
|
+
def __repr__(self):
|
|
60
|
+
return str(self)
|
|
61
|
+
|
|
62
|
+
def __hash__(self):
|
|
63
|
+
return hash(self._value)
|
|
64
|
+
|
|
65
|
+
def __eq__(self, other):
|
|
66
|
+
try:
|
|
67
|
+
o = int(other)
|
|
68
|
+
except ValueError:
|
|
69
|
+
return super(Enum, self).__eq__(other)
|
|
70
|
+
else:
|
|
71
|
+
return self._value == o
|
|
72
|
+
|
|
73
|
+
def __ne__(self, other):
|
|
74
|
+
return not self == other
|
|
75
|
+
|
|
76
|
+
@classmethod
|
|
77
|
+
def enums(cls):
|
|
78
|
+
result = []
|
|
79
|
+
for k, v in cls.__dict__.items():
|
|
80
|
+
if isinstance(v, int) and not str.startswith(k, "_"):
|
|
81
|
+
result.append(cls(v))
|
|
82
|
+
|
|
83
|
+
return sorted(result, key=int)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
from .binfield import BinField
|
|
87
|
+
from .segysampleformat import SegySampleFormat
|
|
88
|
+
from .tracesortingformat import TraceSortingFormat
|
|
89
|
+
from .tracefield import TraceField
|
|
90
|
+
from . import su
|
|
91
|
+
from .open import open
|
|
92
|
+
from .create import create
|
|
93
|
+
from .segy import SegyFile, spec
|
|
94
|
+
from .tools import dt, sample_indexes, create_text_header, native
|
|
95
|
+
from .tools import collect, cube
|
|
96
|
+
|
|
97
|
+
__copyright__ = 'Copyright 2016, Statoil ASA'
|
|
98
|
+
__license__ = 'GNU Lesser General Public License version 3'
|
|
99
|
+
__status__ = 'Production'
|
|
Binary file
|
segyio/binfield.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
from . import Enum
|
|
2
|
+
|
|
3
|
+
class BinField(Enum):
|
|
4
|
+
"""Trace header field enumerator
|
|
5
|
+
|
|
6
|
+
See also
|
|
7
|
+
-------
|
|
8
|
+
segyio.su : Seismic unix aliases for header fields
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
JobID = 3201
|
|
12
|
+
LineNumber = 3205
|
|
13
|
+
ReelNumber = 3209
|
|
14
|
+
Traces = 3213
|
|
15
|
+
AuxTraces = 3215
|
|
16
|
+
Interval = 3217
|
|
17
|
+
IntervalOriginal = 3219
|
|
18
|
+
Samples = 3221
|
|
19
|
+
SamplesOriginal = 3223
|
|
20
|
+
Format = 3225
|
|
21
|
+
EnsembleFold = 3227
|
|
22
|
+
SortingCode = 3229
|
|
23
|
+
VerticalSum = 3231
|
|
24
|
+
SweepFrequencyStart = 3233
|
|
25
|
+
SweepFrequencyEnd = 3235
|
|
26
|
+
SweepLength = 3237
|
|
27
|
+
Sweep = 3239
|
|
28
|
+
SweepChannel = 3241
|
|
29
|
+
SweepTaperStart = 3243
|
|
30
|
+
SweepTaperEnd = 3245
|
|
31
|
+
Taper = 3247
|
|
32
|
+
CorrelatedTraces = 3249
|
|
33
|
+
BinaryGainRecovery = 3251
|
|
34
|
+
AmplitudeRecovery = 3253
|
|
35
|
+
MeasurementSystem = 3255
|
|
36
|
+
ImpulseSignalPolarity = 3257
|
|
37
|
+
VibratoryPolarity = 3259
|
|
38
|
+
ExtTraces = 3261
|
|
39
|
+
ExtAuxTraces = 3265
|
|
40
|
+
ExtSamples = 3269
|
|
41
|
+
ExtSamplesOriginal = 3289
|
|
42
|
+
ExtEnsembleFold = 3293
|
|
43
|
+
Unassigned1 = 3261
|
|
44
|
+
SEGYRevision = 3501
|
|
45
|
+
SEGYRevisionMinor = 3502
|
|
46
|
+
TraceFlag = 3503
|
|
47
|
+
ExtendedHeaders = 3505
|
|
48
|
+
Unassigned2 = 3507
|
|
49
|
+
|
|
50
|
+
keys = {
|
|
51
|
+
'JobID' : 3201,
|
|
52
|
+
'LineNumber' : 3205,
|
|
53
|
+
'ReelNumber' : 3209,
|
|
54
|
+
'Traces' : 3213,
|
|
55
|
+
'AuxTraces' : 3215,
|
|
56
|
+
'Interval' : 3217,
|
|
57
|
+
'IntervalOriginal' : 3219,
|
|
58
|
+
'Samples' : 3221,
|
|
59
|
+
'SamplesOriginal' : 3223,
|
|
60
|
+
'Format' : 3225,
|
|
61
|
+
'EnsembleFold' : 3227,
|
|
62
|
+
'SortingCode' : 3229,
|
|
63
|
+
'VerticalSum' : 3231,
|
|
64
|
+
'SweepFrequencyStart' : 3233,
|
|
65
|
+
'SweepFrequencyEnd' : 3235,
|
|
66
|
+
'SweepLength' : 3237,
|
|
67
|
+
'Sweep' : 3239,
|
|
68
|
+
'SweepChannel' : 3241,
|
|
69
|
+
'SweepTaperStart' : 3243,
|
|
70
|
+
'SweepTaperEnd' : 3245,
|
|
71
|
+
'Taper' : 3247,
|
|
72
|
+
'CorrelatedTraces' : 3249,
|
|
73
|
+
'BinaryGainRecovery' : 3251,
|
|
74
|
+
'AmplitudeRecovery' : 3253,
|
|
75
|
+
'MeasurementSystem' : 3255,
|
|
76
|
+
'ImpulseSignalPolarity' : 3257,
|
|
77
|
+
'VibratoryPolarity' : 3259,
|
|
78
|
+
'ExtTraces' : 3261,
|
|
79
|
+
'ExtAuxTraces' : 3265,
|
|
80
|
+
'ExtSamples' : 3269,
|
|
81
|
+
'ExtSamplesOriginal' : 3289,
|
|
82
|
+
'ExtEnsembleFold' : 3293,
|
|
83
|
+
'Unassigned1' : 3301,
|
|
84
|
+
'SEGYRevision' : 3501,
|
|
85
|
+
'SEGYRevisionMinor' : 3502,
|
|
86
|
+
'TraceFlag' : 3503,
|
|
87
|
+
'ExtendedHeaders' : 3505,
|
|
88
|
+
'Unassigned2' : 3507,
|
|
89
|
+
}
|
segyio/create.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
import numpy
|
|
3
|
+
import segyio
|
|
4
|
+
|
|
5
|
+
from . import TraceSortingFormat
|
|
6
|
+
|
|
7
|
+
def default_text_header(iline, xline, offset):
|
|
8
|
+
lines = {
|
|
9
|
+
1: "DATE %s" % datetime.date.today().isoformat(),
|
|
10
|
+
2: "AN INCREASE IN AMPLITUDE EQUALS AN INCREASE IN ACOUSTIC IMPEDANCE",
|
|
11
|
+
3: "Written by libsegyio (python)",
|
|
12
|
+
11: "TRACE HEADER POSITION:",
|
|
13
|
+
12: " INLINE BYTES %03d-%03d | OFFSET BYTES %03d-%03d" % (iline, iline + 4, int(offset), int(offset) + 4),
|
|
14
|
+
13: " CROSSLINE BYTES %03d-%03d |" % (xline, xline + 4),
|
|
15
|
+
15: "END EBCDIC HEADER",
|
|
16
|
+
}
|
|
17
|
+
rows = segyio.create_text_header(lines)
|
|
18
|
+
rows = bytearray(rows, 'ascii') # mutable array of bytes
|
|
19
|
+
rows[-1] = 128 # \x80 -- Unsure if this is really required...
|
|
20
|
+
return bytes(rows) # immutable array of bytes that is compatible with strings
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def structured(spec):
|
|
24
|
+
if not hasattr(spec, 'ilines' ): return False
|
|
25
|
+
if not hasattr(spec, 'xlines' ): return False
|
|
26
|
+
if not hasattr(spec, 'offsets'): return False
|
|
27
|
+
|
|
28
|
+
if spec.ilines is None: return False
|
|
29
|
+
if spec.xlines is None: return False
|
|
30
|
+
if spec.offsets is None: return False
|
|
31
|
+
|
|
32
|
+
if not list(spec.ilines): return False
|
|
33
|
+
if not list(spec.xlines): return False
|
|
34
|
+
if not list(spec.offsets): return False
|
|
35
|
+
|
|
36
|
+
return True
|
|
37
|
+
|
|
38
|
+
def create(filename, spec):
|
|
39
|
+
"""Create a new segy file.
|
|
40
|
+
|
|
41
|
+
Create a new segy file with the geometry and properties given by `spec`.
|
|
42
|
+
This enables creating SEGY files from your data. The created file supports
|
|
43
|
+
all segyio modes, but has an emphasis on writing. The spec must be
|
|
44
|
+
complete, otherwise an exception will be raised. A default, empty spec can
|
|
45
|
+
be created with ``segyio.spec()``.
|
|
46
|
+
|
|
47
|
+
Very little data is written to the file, so just calling create is not
|
|
48
|
+
sufficient to re-read the file with segyio. Rather, every trace header and
|
|
49
|
+
trace must be written to the file to be considered complete.
|
|
50
|
+
|
|
51
|
+
Create should be used together with python's ``with`` statement. This ensure
|
|
52
|
+
the data is written. Please refer to the examples.
|
|
53
|
+
|
|
54
|
+
The ``segyio.spec()`` function will default sorting, offsets and everything
|
|
55
|
+
in the mandatory group, except format and samples, and requires the caller
|
|
56
|
+
to fill in *all* the fields in either of the exclusive groups.
|
|
57
|
+
|
|
58
|
+
If any field is missing from the first exclusive group, and the tracecount
|
|
59
|
+
is set, the resulting file will be considered unstructured. If the
|
|
60
|
+
tracecount is set, and all fields of the first exclusive group are
|
|
61
|
+
specified, the file is considered structured and the tracecount is inferred
|
|
62
|
+
from the xlines/ilines/offsets. The offsets are defaulted to ``[1]`` by
|
|
63
|
+
``segyio.spec()``.
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
filename : str
|
|
68
|
+
Path to file to create
|
|
69
|
+
spec : segyio.spec
|
|
70
|
+
Structure of the segy file
|
|
71
|
+
|
|
72
|
+
Returns
|
|
73
|
+
-------
|
|
74
|
+
file : segyio.SegyFile
|
|
75
|
+
An open segyio file handle, similar to that returned by `segyio.open`
|
|
76
|
+
|
|
77
|
+
See also
|
|
78
|
+
--------
|
|
79
|
+
segyio.spec : template for the `spec` argument
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
Notes
|
|
83
|
+
-----
|
|
84
|
+
|
|
85
|
+
.. versionadded:: 1.1
|
|
86
|
+
|
|
87
|
+
.. versionchanged:: 1.4
|
|
88
|
+
Support for creating unstructured files
|
|
89
|
+
|
|
90
|
+
.. versionchanged:: 1.8
|
|
91
|
+
Support for creating lsb files
|
|
92
|
+
|
|
93
|
+
The ``spec`` is any object that has the following attributes
|
|
94
|
+
|
|
95
|
+
Mandatory::
|
|
96
|
+
|
|
97
|
+
iline : int or segyio.BinField
|
|
98
|
+
xline : int or segyio.BinField
|
|
99
|
+
samples : array of int
|
|
100
|
+
format : { 1, 5 }
|
|
101
|
+
1 = IBM float, 5 = IEEE float
|
|
102
|
+
|
|
103
|
+
Exclusive::
|
|
104
|
+
|
|
105
|
+
ilines : array_like of int
|
|
106
|
+
xlines : array_like of int
|
|
107
|
+
offsets : array_like of int
|
|
108
|
+
sorting : int or segyio.TraceSortingFormat
|
|
109
|
+
|
|
110
|
+
OR
|
|
111
|
+
|
|
112
|
+
tracecount : int
|
|
113
|
+
|
|
114
|
+
Optional::
|
|
115
|
+
|
|
116
|
+
ext_headers : int
|
|
117
|
+
endian : str { 'big', 'msb', 'little', 'lsb' }
|
|
118
|
+
defaults to 'big'
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
Examples
|
|
122
|
+
--------
|
|
123
|
+
|
|
124
|
+
Create a file:
|
|
125
|
+
|
|
126
|
+
>>> spec = segyio.spec()
|
|
127
|
+
>>> spec.ilines = [1, 2, 3, 4]
|
|
128
|
+
>>> spec.xlines = [11, 12, 13]
|
|
129
|
+
>>> spec.samples = list(range(50))
|
|
130
|
+
>>> spec.sorting = 2
|
|
131
|
+
>>> spec.format = 1
|
|
132
|
+
>>> with segyio.create(path, spec) as f:
|
|
133
|
+
... ## fill the file with data
|
|
134
|
+
... pass
|
|
135
|
+
...
|
|
136
|
+
|
|
137
|
+
Copy a file, but shorten all traces by 50 samples:
|
|
138
|
+
|
|
139
|
+
>>> with segyio.open(srcpath) as src:
|
|
140
|
+
... spec = segyio.spec()
|
|
141
|
+
... spec.sorting = src.sorting
|
|
142
|
+
... spec.format = src.format
|
|
143
|
+
... spec.samples = src.samples[:len(src.samples) - 50]
|
|
144
|
+
... spec.ilines = src.ilines
|
|
145
|
+
... spec.xline = src.xlines
|
|
146
|
+
... with segyio.create(dstpath, spec) as dst:
|
|
147
|
+
... dst.text[0] = src.text[0]
|
|
148
|
+
... dst.bin = src.bin
|
|
149
|
+
... # this is writing a sparse file, which might be slow on some
|
|
150
|
+
... # systems
|
|
151
|
+
... dst.header = src.header
|
|
152
|
+
... dst.trace = src.trace
|
|
153
|
+
|
|
154
|
+
Copy a file, but shift samples time by 50:
|
|
155
|
+
|
|
156
|
+
>>> with segyio.open(srcpath) as src:
|
|
157
|
+
... delrt = 50
|
|
158
|
+
... spec = segyio.spec()
|
|
159
|
+
... spec.samples = src.samples + delrt
|
|
160
|
+
... spec.ilines = src.ilines
|
|
161
|
+
... spec.xline = src.xlines
|
|
162
|
+
... with segyio.create(dstpath, spec) as dst:
|
|
163
|
+
... dst.text[0] = src.text[0]
|
|
164
|
+
... dst.bin = src.bin
|
|
165
|
+
... dst.header = src.header
|
|
166
|
+
... dst.header = { TraceField.DelayRecordingTime: delrt }
|
|
167
|
+
... dst.trace = src.trace
|
|
168
|
+
|
|
169
|
+
Copy a file, but shorten all traces by 50 samples (since v1.4):
|
|
170
|
+
|
|
171
|
+
>>> with segyio.open(srcpath) as src:
|
|
172
|
+
... spec = segyio.tools.metadata(src)
|
|
173
|
+
... spec.samples = spec.samples[:len(spec.samples) - 50]
|
|
174
|
+
... with segyio.create(dstpath, spec) as dst:
|
|
175
|
+
... dst.text[0] = src.text[0]
|
|
176
|
+
... dst.bin = src.bin
|
|
177
|
+
... dst.header = src.header
|
|
178
|
+
... dst.trace = src.trace
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
from . import _segyio
|
|
182
|
+
|
|
183
|
+
if not structured(spec):
|
|
184
|
+
tracecount = spec.tracecount
|
|
185
|
+
else:
|
|
186
|
+
tracecount = len(spec.ilines) * len(spec.xlines) * len(spec.offsets)
|
|
187
|
+
|
|
188
|
+
ext_headers = spec.ext_headers if hasattr(spec, 'ext_headers') else 0
|
|
189
|
+
samples = numpy.asarray(spec.samples)
|
|
190
|
+
|
|
191
|
+
endians = {
|
|
192
|
+
'lsb': 256, # (1 << 8)
|
|
193
|
+
'little': 256,
|
|
194
|
+
'msb': 0,
|
|
195
|
+
'big': 0,
|
|
196
|
+
}
|
|
197
|
+
endian = spec.endian if hasattr(spec, 'endian') else 'big'
|
|
198
|
+
if endian is None:
|
|
199
|
+
endian = 'big'
|
|
200
|
+
|
|
201
|
+
if endian not in endians:
|
|
202
|
+
problem = 'unknown endianness {}, expected one of: '
|
|
203
|
+
opts = ' '.join(endians.keys())
|
|
204
|
+
raise ValueError(problem.format(endian) + opts)
|
|
205
|
+
|
|
206
|
+
fd = _segyio.segyiofd(str(filename), 'w+', endians[endian])
|
|
207
|
+
fd.segymake(
|
|
208
|
+
samples = len(samples),
|
|
209
|
+
tracecount = tracecount,
|
|
210
|
+
format = int(spec.format),
|
|
211
|
+
ext_headers = int(ext_headers),
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
f = segyio.SegyFile(fd,
|
|
215
|
+
filename = str(filename),
|
|
216
|
+
mode = 'w+',
|
|
217
|
+
iline = int(spec.iline),
|
|
218
|
+
xline = int(spec.xline),
|
|
219
|
+
endian = endian,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
f._samples = samples
|
|
223
|
+
|
|
224
|
+
if structured(spec):
|
|
225
|
+
sorting = spec.sorting if hasattr(spec, 'sorting') else None
|
|
226
|
+
if sorting is None:
|
|
227
|
+
sorting = TraceSortingFormat.INLINE_SORTING
|
|
228
|
+
f.interpret(spec.ilines, spec.xlines, spec.offsets, sorting)
|
|
229
|
+
|
|
230
|
+
f.text[0] = default_text_header(f._il, f._xl, segyio.TraceField.offset)
|
|
231
|
+
|
|
232
|
+
if len(samples) == 1:
|
|
233
|
+
interval = int(samples[0] * 1000)
|
|
234
|
+
else:
|
|
235
|
+
interval = int((samples[1] - samples[0]) * 1000)
|
|
236
|
+
|
|
237
|
+
f.bin.update(
|
|
238
|
+
ntrpr = tracecount,
|
|
239
|
+
nart = tracecount,
|
|
240
|
+
hdt = interval,
|
|
241
|
+
dto = interval,
|
|
242
|
+
hns = len(samples),
|
|
243
|
+
nso = len(samples),
|
|
244
|
+
format = int(spec.format),
|
|
245
|
+
exth = ext_headers,
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
if len(samples) > 2**16 - 1:
|
|
249
|
+
# when using the ext-samples field, also set rev2, even though it's a
|
|
250
|
+
# soft lie and files aren't really compliant
|
|
251
|
+
f.bin.update(
|
|
252
|
+
exthns = len(samples),
|
|
253
|
+
extnso = len(samples),
|
|
254
|
+
rev = 2
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
return f
|
segyio/depth.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
try:
|
|
2
|
+
from collections.abc import Sequence # noqa
|
|
3
|
+
except ImportError:
|
|
4
|
+
from collections import Sequence # noqa
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
try: from future_builtins import zip
|
|
8
|
+
except ImportError: pass
|
|
9
|
+
from .trace import Sequence
|
|
10
|
+
from .utils import castarray
|
|
11
|
+
|
|
12
|
+
class Depth(Sequence):
|
|
13
|
+
"""
|
|
14
|
+
The Depth implements the array interface, where every array element, the
|
|
15
|
+
depth, is a numpy.ndarray of a horizontal cut of the volume. As all arrays
|
|
16
|
+
it can be random accessed, iterated over, and read strided. Please note
|
|
17
|
+
that SEG-Y data is laid out trace-by-trace on disk, so accessing horizontal
|
|
18
|
+
cuts (fixed z-coordinates in a cartesian grid) is *very* inefficient.
|
|
19
|
+
|
|
20
|
+
This mode works even on unstructured files, because it is not reliant on
|
|
21
|
+
in/crosslines to be sensible. Please note that in the case of unstructured
|
|
22
|
+
depth slicing, the array shape == tracecount.
|
|
23
|
+
|
|
24
|
+
Notes
|
|
25
|
+
-----
|
|
26
|
+
.. versionadded:: 1.1
|
|
27
|
+
|
|
28
|
+
.. versionchanged:: 1.6
|
|
29
|
+
common list operations (Sequence)
|
|
30
|
+
|
|
31
|
+
.. versionchanged:: 1.7.1
|
|
32
|
+
enabled for unstructured files
|
|
33
|
+
|
|
34
|
+
.. warning::
|
|
35
|
+
Accessing the file by depth (fixed z-coordinate) is inefficient because
|
|
36
|
+
of poor locality and many reads. If you read more than a handful
|
|
37
|
+
depths, consider using a faster mode.
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(self, fd):
|
|
41
|
+
super(Depth, self).__init__(len(fd.samples))
|
|
42
|
+
self.filehandle = fd.xfd
|
|
43
|
+
self.dtype = fd.dtype
|
|
44
|
+
|
|
45
|
+
if fd.unstructured:
|
|
46
|
+
self.shape = fd.tracecount
|
|
47
|
+
self.offsets = 1
|
|
48
|
+
else:
|
|
49
|
+
self.shape = (len(fd.fast), len(fd.slow))
|
|
50
|
+
self.offsets = len(fd.offsets)
|
|
51
|
+
|
|
52
|
+
def __getitem__(self, i):
|
|
53
|
+
"""depth[i]
|
|
54
|
+
|
|
55
|
+
ith depth, a horizontal cross-section of the file, starting at 0.
|
|
56
|
+
depth[i] returns a numpy.ndarray, and changes to this array will *not*
|
|
57
|
+
be reflected on disk.
|
|
58
|
+
|
|
59
|
+
When i is a slice, a generator of numpy.ndarray is returned.
|
|
60
|
+
|
|
61
|
+
The depth slices are returned as a fast-by-slow shaped array, i.e. an
|
|
62
|
+
inline sorted file with 10 inlines and 5 crosslines has the shape
|
|
63
|
+
(10,5). If the file is unsorted, the array shape == tracecount.
|
|
64
|
+
|
|
65
|
+
Be aware that this interface uses zero-based indices (like traces) and
|
|
66
|
+
*not keys* (like ilines), so you can *not* use the values file.samples
|
|
67
|
+
as indices.
|
|
68
|
+
|
|
69
|
+
Parameters
|
|
70
|
+
----------
|
|
71
|
+
i : int or slice
|
|
72
|
+
|
|
73
|
+
Returns
|
|
74
|
+
-------
|
|
75
|
+
depth : numpy.ndarray of dtype or generator of numpy.ndarray of dtype
|
|
76
|
+
|
|
77
|
+
Notes
|
|
78
|
+
-----
|
|
79
|
+
.. versionadded:: 1.1
|
|
80
|
+
|
|
81
|
+
.. warning::
|
|
82
|
+
The segyio 1.5 and 1.6 series, and 1.7.0, would return the depth_slice in the
|
|
83
|
+
wrong shape for most files. Since segyio 1.7.1, the arrays have the
|
|
84
|
+
correct shape, i.e. fast-by-slow. The underlying data was always
|
|
85
|
+
fast-by-slow, so a numpy array reshape can fix programs using the
|
|
86
|
+
1.5 and 1.6 series.
|
|
87
|
+
|
|
88
|
+
Behaves like [] for lists.
|
|
89
|
+
|
|
90
|
+
Examples
|
|
91
|
+
--------
|
|
92
|
+
Read a single cut (one sample per trace):
|
|
93
|
+
|
|
94
|
+
>>> x = f.depth_slice[199]
|
|
95
|
+
|
|
96
|
+
Copy every depth slice into a list:
|
|
97
|
+
|
|
98
|
+
>>> l = [numpy.copy(x) for x in depth[:]]
|
|
99
|
+
|
|
100
|
+
Every third depth:
|
|
101
|
+
|
|
102
|
+
>>> for d in depth[::3]:
|
|
103
|
+
... (d * 6).mean()
|
|
104
|
+
|
|
105
|
+
Read up to 250:
|
|
106
|
+
|
|
107
|
+
>>> for d in depth[:250]:
|
|
108
|
+
... d.mean()
|
|
109
|
+
|
|
110
|
+
>>> len(ilines), len(xlines)
|
|
111
|
+
(1, 6)
|
|
112
|
+
>>> f.depth_slice[0]
|
|
113
|
+
array([[0. , 0.01, 0.02, 0.03, 0.04, 0.05]], dtype=float32)
|
|
114
|
+
"""
|
|
115
|
+
|
|
116
|
+
try:
|
|
117
|
+
i = self.wrapindex(i)
|
|
118
|
+
buf = np.empty(self.shape, dtype=self.dtype)
|
|
119
|
+
return self.filehandle.getdepth(i, buf.size, self.offsets, buf)
|
|
120
|
+
|
|
121
|
+
except TypeError:
|
|
122
|
+
try:
|
|
123
|
+
indices = i.indices(len(self))
|
|
124
|
+
except AttributeError:
|
|
125
|
+
msg = 'depth indices must be integers or slices, not {}'
|
|
126
|
+
raise TypeError(msg.format(type(i).__name__))
|
|
127
|
+
|
|
128
|
+
def gen():
|
|
129
|
+
x = np.empty(self.shape, dtype=self.dtype)
|
|
130
|
+
y = np.copy(x)
|
|
131
|
+
|
|
132
|
+
for j in range(*indices):
|
|
133
|
+
self.filehandle.getdepth(j, x.size, self.offsets, x)
|
|
134
|
+
x, y = y, x
|
|
135
|
+
yield y
|
|
136
|
+
|
|
137
|
+
return gen()
|
|
138
|
+
|
|
139
|
+
def __setitem__(self, depth, val):
|
|
140
|
+
"""depth[i] = val
|
|
141
|
+
|
|
142
|
+
Write the ith depth, a horizontal cross-section, of the file, starting
|
|
143
|
+
at 0. It accepts any array_like, but `val` must be at least as big as
|
|
144
|
+
the underlying data slice.
|
|
145
|
+
|
|
146
|
+
If `val` is longer than the underlying trace, `val` is essentially truncated.
|
|
147
|
+
|
|
148
|
+
Parameters
|
|
149
|
+
----------
|
|
150
|
+
i : int or slice
|
|
151
|
+
val : array_like
|
|
152
|
+
|
|
153
|
+
Notes
|
|
154
|
+
-----
|
|
155
|
+
.. versionadded:: 1.1
|
|
156
|
+
|
|
157
|
+
Behaves like [] for lists.
|
|
158
|
+
|
|
159
|
+
Examples
|
|
160
|
+
--------
|
|
161
|
+
Copy a depth:
|
|
162
|
+
|
|
163
|
+
>>> depth[4] = other[19]
|
|
164
|
+
|
|
165
|
+
Copy consecutive depths, and assign to a sub volume (inject a sub cube
|
|
166
|
+
into the larger volume):
|
|
167
|
+
|
|
168
|
+
>>> depth[10:50] = other[:]
|
|
169
|
+
|
|
170
|
+
Copy into every other depth from an iterable:
|
|
171
|
+
|
|
172
|
+
>>> depth[::2] = other
|
|
173
|
+
"""
|
|
174
|
+
if isinstance(depth, slice):
|
|
175
|
+
for i, x in zip(range(*depth.indices(len(self))), val):
|
|
176
|
+
self[i] = x
|
|
177
|
+
return
|
|
178
|
+
|
|
179
|
+
val = castarray(val, dtype = self.dtype)
|
|
180
|
+
self.filehandle.putdepth(depth, val.size, self.offsets, val)
|