segyio 1.9.13__cp312-cp312-macosx_10_13_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of segyio might be problematic. Click here for more details.

segyio/trace.py ADDED
@@ -0,0 +1,967 @@
1
+ try:
2
+ from collections.abc import Sequence # noqa
3
+ except ImportError:
4
+ from collections import Sequence # noqa
5
+
6
+ import contextlib
7
+ import itertools
8
+ import warnings
9
+ import sys
10
+ try: from future_builtins import zip
11
+ except ImportError: pass
12
+
13
+ import numpy as np
14
+
15
+ from .line import HeaderLine
16
+ from .field import Field
17
+ from .utils import castarray
18
+
19
+ class Sequence(Sequence):
20
+
21
+ # unify the common optimisations and boilerplate of Trace, RawTrace, and
22
+ # Header, which all obey the same index-oriented interface, and all share
23
+ # length and wrap-around properties.
24
+ #
25
+ # It provides a useful negative-wrap index method which deals
26
+ # appropriately with IndexError and python2-3 differences.
27
+
28
+ def __init__(self, length):
29
+ self.length = length
30
+
31
+ def __len__(self):
32
+ """x.__len__() <==> len(x)"""
33
+ return self.length
34
+
35
+ def __iter__(self):
36
+ """x.__iter__() <==> iter(x)"""
37
+ # __iter__ has a reasonable default implementation from Sequence. It's
38
+ # essentially this loop:
39
+ # for i in range(len(self)): yield self[i]
40
+ # However, in segyio that means the double-buffering, buffer reuse does
41
+ # not happen, which is *much* slower (the allocation of otherwised
42
+ # reused numpy objects takes about half the execution time), so
43
+ # explicitly implement it as [:]
44
+ return self[:]
45
+
46
+ def wrapindex(self, i):
47
+ if i < 0:
48
+ i += len(self)
49
+
50
+ if not 0 <= i < len(self):
51
+ # in python2, int-slice comparison does not raise a type error,
52
+ # (but returns False), so force a type-error if this still isn't an
53
+ # int-like.
54
+ _ = i + 0
55
+ raise IndexError('trace index out of range')
56
+
57
+ return i
58
+
59
+ class Trace(Sequence):
60
+ """
61
+ The Trace implements the array interface, where every array element, the
62
+ data trace, is a numpy.ndarray. As all arrays, it can be random accessed,
63
+ iterated over, and read strided. Data is read lazily from disk, so
64
+ iteration does not consume much memory. If you want eager reading, use
65
+ Trace.raw.
66
+
67
+ This mode gives access to reading and writing functionality for traces.
68
+ The primary data type is ``numpy.ndarray``. Traces can be accessed
69
+ individually or with python slices, and writing is done via assignment.
70
+
71
+ Notes
72
+ -----
73
+ .. versionadded:: 1.1
74
+
75
+ .. versionchanged:: 1.6
76
+ common list operations (Sequence)
77
+
78
+ Examples
79
+ --------
80
+ Read all traces in file f and store in a list:
81
+
82
+ >>> l = [numpy.copy(tr) for tr in trace[:]]
83
+
84
+ Do numpy operations on a trace:
85
+
86
+ >>> tr = trace[10]
87
+ >>> tr = tr * 2
88
+ >>> tr = tr - 100
89
+ >>> avg = numpy.average(tr)
90
+
91
+ Perform some seismic processing on a trace. E.g resample from 2ms spacing
92
+ to 4ms spacing (note there is no anti-alias filtering in this example):
93
+
94
+ >>> tr = scipy.signal.resample(tr, len(tr)/2)
95
+
96
+ Double every trace value and write to disk. Since accessing a trace
97
+ gives a numpy value, to write to the respective trace we need its index:
98
+
99
+ >>> for i, tr in enumerate(trace):
100
+ ... tr = tr * 2
101
+ ... trace[i] = tr
102
+
103
+ """
104
+
105
+ def __init__(self, filehandle, dtype, tracecount, samples, readonly):
106
+ super(Trace, self).__init__(tracecount)
107
+ self.filehandle = filehandle
108
+ self.dtype = dtype
109
+ self.shape = samples
110
+ self.readonly = readonly
111
+
112
+ def __getitem__(self, i):
113
+ """trace[i] or trace[i, j]
114
+
115
+ ith trace of the file, starting at 0. trace[i] returns a numpy array,
116
+ and changes to this array will *not* be reflected on disk.
117
+
118
+ When i is a tuple, the second index j (int or slice) is the depth index
119
+ or interval, respectively. j starts at 0.
120
+
121
+ When i is a slice, a generator of numpy arrays is returned.
122
+
123
+ Parameters
124
+ ----------
125
+ i : int or slice
126
+ j : int or slice
127
+
128
+ Returns
129
+ -------
130
+ trace : numpy.ndarray of dtype or generator of numpy.ndarray of dtype
131
+
132
+ Notes
133
+ -----
134
+ .. versionadded:: 1.1
135
+
136
+ Behaves like [] for lists.
137
+
138
+ .. note::
139
+
140
+ This operator reads lazily from the file, meaning the file is read
141
+ on ``next()``, and only one trace is fixed in memory. This means
142
+ segyio can run through arbitrarily large files without consuming
143
+ much memory, but it is potentially slow if the goal is to read the
144
+ entire file into memory. If that is the case, consider using
145
+ `trace.raw`, which reads eagerly.
146
+
147
+ Examples
148
+ --------
149
+ Read every other trace:
150
+
151
+ >>> for tr in trace[::2]:
152
+ ... print(tr)
153
+
154
+ Read all traces, last-to-first:
155
+
156
+ >>> for tr in trace[::-1]:
157
+ ... tr.mean()
158
+
159
+ Read a single value. The second [] is regular numpy array indexing, and
160
+ supports all numpy operations, including negative indexing and slicing:
161
+
162
+ >>> trace[0][0]
163
+ 1490.2
164
+ >>> trace[0][1]
165
+ 1490.8
166
+ >>> trace[0][-1]
167
+ 1871.3
168
+ >>> trace[-1][100]
169
+ 1562.0
170
+
171
+ Read only an interval in a trace:
172
+ >>> trace[0, 5:10]
173
+ """
174
+
175
+ try:
176
+ # optimize for the default case when i is a single trace index
177
+ i = self.wrapindex(i)
178
+ buf = np.zeros(self.shape, dtype = self.dtype)
179
+ return self.filehandle.gettr(buf, i, 1, 1, 0, self.shape, 1, self.shape)
180
+ except TypeError:
181
+ pass
182
+
183
+ try:
184
+ i, j = i
185
+ except TypeError:
186
+ # index is not a tuple. Set j to be a slice that causes the entire
187
+ # trace to be loaded.
188
+ j = slice(0, self.shape, 1)
189
+
190
+ single = False
191
+ try:
192
+ start, stop, step = j.indices(self.shape)
193
+ except AttributeError:
194
+ # j is not a slice, set start stop and step so that a single sample
195
+ # at position j is loaded.
196
+ start = int(j) % self.shape
197
+ stop = start + 1
198
+ step = 1
199
+ single = True
200
+
201
+ n_elements = len(range(start, stop, step))
202
+
203
+ try:
204
+ i = self.wrapindex(i)
205
+ buf = np.zeros(n_elements, dtype = self.dtype)
206
+ tr = self.filehandle.gettr(buf, i, 1, 1, start, stop, step, n_elements)
207
+ return tr[0] if single else tr
208
+ except TypeError:
209
+ pass
210
+
211
+ try:
212
+ indices = i.indices(len(self))
213
+ def gen():
214
+ # double-buffer the trace. when iterating over a range, we want
215
+ # to make sure the visible change happens as late as possible,
216
+ # and that in the case of exception the last valid trace was
217
+ # untouched. this allows for some fancy control flow, and more
218
+ # importantly helps debugging because you can fully inspect and
219
+ # interact with the last good value.
220
+ x = np.zeros(n_elements, dtype=self.dtype)
221
+ y = np.zeros(n_elements, dtype=self.dtype)
222
+
223
+ for k in range(*indices):
224
+ self.filehandle.gettr(x, k, 1, 1, start, stop, step, n_elements)
225
+ x, y = y, x
226
+ yield y
227
+
228
+ return gen()
229
+ except AttributeError:
230
+ # At this point we have tried to unpack index as a single int, a
231
+ # slice and a pair with either element being an int or slice.
232
+ msg = 'trace indices must be integers or slices, not {}'
233
+ raise TypeError(msg.format(type(i).__name__))
234
+
235
+
236
+ def __setitem__(self, i, val):
237
+ """trace[i] = val
238
+
239
+ Write the ith trace of the file, starting at 0. It accepts any
240
+ array_like, but val must be at least as big as the underlying data
241
+ trace.
242
+
243
+ If val is longer than the underlying trace, it is essentially
244
+ truncated.
245
+
246
+ For the best performance, val should be a numpy.ndarray of sufficient
247
+ size and same dtype as the file. segyio will warn on mismatched types,
248
+ and attempt a conversion for you.
249
+
250
+ Data is written immediately to disk. If writing multiple traces at
251
+ once, and a write fails partway through, the resulting file is left in
252
+ an unspecified state.
253
+
254
+ Parameters
255
+ ----------
256
+ i : int or slice
257
+ val : array_like
258
+
259
+ Notes
260
+ -----
261
+ .. versionadded:: 1.1
262
+
263
+ Behaves like [] for lists.
264
+
265
+ Examples
266
+ --------
267
+ Write a single trace:
268
+
269
+ >>> trace[10] = list(range(1000))
270
+
271
+ Write multiple traces:
272
+
273
+ >>> trace[10:15] = np.array([cube[i] for i in range(5)])
274
+
275
+ Write multiple traces with stride:
276
+
277
+ >>> trace[10:20:2] = np.array([cube[i] for i in range(5)])
278
+
279
+ """
280
+ if isinstance(i, slice):
281
+ for j, x in zip(range(*i.indices(len(self))), val):
282
+ self[j] = x
283
+
284
+ return
285
+
286
+ xs = castarray(val, self.dtype)
287
+
288
+ # TODO: check if len(xs) > shape, and optionally warn on truncating
289
+ # writes
290
+ self.filehandle.puttr(self.wrapindex(i), xs)
291
+
292
+ def __repr__(self):
293
+ return "Trace(traces = {}, samples = {})".format(len(self), self.shape)
294
+
295
+ @property
296
+ def raw(self):
297
+ """
298
+ An eager version of Trace
299
+
300
+ Returns
301
+ -------
302
+ raw : RawTrace
303
+ """
304
+ return RawTrace(self.filehandle,
305
+ self.dtype,
306
+ len(self),
307
+ self.shape,
308
+ self.readonly,
309
+ )
310
+
311
+ @property
312
+ @contextlib.contextmanager
313
+ def ref(self):
314
+ """
315
+ A write-back version of Trace
316
+
317
+ Returns
318
+ -------
319
+ ref : RefTrace
320
+ `ref` is returned in a context manager, and must be in a ``with``
321
+ statement
322
+
323
+ Notes
324
+ -----
325
+ .. versionadded:: 1.6
326
+
327
+ Examples
328
+ --------
329
+ >>> with trace.ref as ref:
330
+ ... ref[10] += 1.617
331
+ """
332
+
333
+ x = RefTrace(self.filehandle,
334
+ self.dtype,
335
+ len(self),
336
+ self.shape,
337
+ self.readonly,
338
+ )
339
+ yield x
340
+ x.flush()
341
+
342
+ class RawTrace(Trace):
343
+ """
344
+ Behaves exactly like trace, except reads are done eagerly and returned as
345
+ numpy.ndarray, instead of generators of numpy.ndarray.
346
+ """
347
+ def __init__(self, *args):
348
+ super(RawTrace, self).__init__(*args)
349
+
350
+ def __getitem__(self, i):
351
+ """trace[i]
352
+
353
+ Eagerly read the ith trace of the file, starting at 0. trace[i] returns
354
+ a numpy array, and changes to this array will *not* be reflected on
355
+ disk.
356
+
357
+ When i is a slice, this returns a 2-dimensional numpy.ndarray .
358
+
359
+ Parameters
360
+ ----------
361
+ i : int or slice
362
+
363
+ Returns
364
+ -------
365
+ trace : numpy.ndarray of dtype
366
+
367
+ Notes
368
+ -----
369
+ .. versionadded:: 1.1
370
+
371
+ Behaves like [] for lists.
372
+
373
+ .. note::
374
+
375
+ Reading this way is more efficient if you know you can afford the
376
+ extra memory usage. It reads the requested traces immediately to
377
+ memory.
378
+
379
+ """
380
+ try:
381
+ i = self.wrapindex(i)
382
+ buf = np.zeros(self.shape, dtype = self.dtype)
383
+ return self.filehandle.gettr(buf, i, 1, 1, 0, self.shape, 1, self.shape)
384
+ except TypeError:
385
+ try:
386
+ indices = i.indices(len(self))
387
+ except AttributeError:
388
+ msg = 'trace indices must be integers or slices, not {}'
389
+ raise TypeError(msg.format(type(i).__name__))
390
+ start, _, step = indices
391
+ length = len(range(*indices))
392
+ buf = np.empty((length, self.shape), dtype = self.dtype)
393
+ return self.filehandle.gettr(buf, start, step, length, 0, self.shape, 1, self.shape)
394
+
395
+
396
+ def fingerprint(x):
397
+ return hash(bytes(x.data))
398
+
399
+ class RefTrace(Trace):
400
+ """
401
+ Behaves like trace, except changes to the returned numpy arrays *are*
402
+ reflected on disk. Operations have to be in-place on the numpy array, so
403
+ assignment on a trace will not work.
404
+
405
+ This feature exists to support code like::
406
+
407
+ >>> with ref as r:
408
+ ... for x, y in zip(r, src):
409
+ ... numpy.copyto(x, y + 10)
410
+
411
+ This class is not meant to be instantiated directly, but returned by
412
+ :attr:`Trace.ref`. This feature requires a context manager, to guarantee
413
+ modifications are written back to disk.
414
+ """
415
+ def __init__(self, *args):
416
+ super(RefTrace, self).__init__(*args)
417
+ self.refs = {}
418
+
419
+ def flush(self):
420
+ """
421
+ Commit cached writes to the file handle. Does not flush libc buffers or
422
+ notifies the kernel, so these changes may not immediately be visible to
423
+ other processes.
424
+
425
+ Updates the fingerprints whena writes happen, so successive ``flush()``
426
+ invocations are no-ops.
427
+
428
+ It is not necessary to call this method in user code.
429
+
430
+ Notes
431
+ -----
432
+ .. versionadded:: 1.6
433
+
434
+ This method is not intended as user-oriented functionality, but might
435
+ be useful in certain contexts to provide stronger guarantees.
436
+ """
437
+ garbage = []
438
+ for i, (x, signature) in self.refs.items():
439
+ if sys.getrefcount(x) == 3:
440
+ garbage.append(i)
441
+
442
+ if fingerprint(x) == signature: continue
443
+
444
+ self.filehandle.puttr(i, x)
445
+ signature = fingerprint(x)
446
+
447
+
448
+ # to avoid too many resource leaks, when this dict is the only one
449
+ # holding references to already-produced traces, clear them
450
+ for i in garbage:
451
+ del self.refs[i]
452
+
453
+ def fetch(self, i, buf = None):
454
+ if buf is None:
455
+ buf = np.zeros(self.shape, dtype = self.dtype)
456
+
457
+ try:
458
+ self.filehandle.gettr(buf, i, 1, 1, 0, self.shape, 1, self.shape)
459
+ except IOError:
460
+ if not self.readonly:
461
+ # if the file is opened read-only and this happens, there's no
462
+ # way to actually write and the error is an actual error
463
+ buf.fill(0)
464
+ else: raise
465
+
466
+ return buf
467
+
468
+ def __getitem__(self, i):
469
+ """trace[i]
470
+
471
+ Read the ith trace of the file, starting at 0. trace[i] returns a numpy
472
+ array, but unlike Trace, changes to this array *will* be reflected on
473
+ disk. The modifications must happen to the actual array (views are ok),
474
+ so in-place operations work, but assignments will not::
475
+
476
+ >>> with ref as ref:
477
+ ... x = ref[10]
478
+ ... x += 1.617 # in-place, works
479
+ ... numpy.copyto(x, x + 10) # works
480
+ ... x = x + 10 # re-assignment, won't change the original x
481
+
482
+ Works on newly created files that has yet to have any traces written,
483
+ which opens up a natural way of filling newly created files with data.
484
+ When getting unwritten traces, a trace filled with zeros is returned.
485
+
486
+ Parameters
487
+ ----------
488
+ i : int or slice
489
+
490
+ Returns
491
+ -------
492
+ trace : numpy.ndarray of dtype
493
+
494
+ Notes
495
+ -----
496
+ .. versionadded:: 1.6
497
+
498
+ Behaves like [] for lists.
499
+
500
+ Examples
501
+ --------
502
+ Merge two files with a binary operation. Relies on python3 iterator
503
+ zip:
504
+
505
+ >>> with ref as ref:
506
+ ... for x, lhs, rhs in zip(ref, L, R):
507
+ ... numpy.copyto(x, lhs + rhs)
508
+
509
+ Create a file and fill with data (the repeated trace index):
510
+
511
+ >>> f = create()
512
+ >>> with f.trace.ref as ref:
513
+ ... for i, x in enumerate(ref):
514
+ ... x.fill(i)
515
+ """
516
+ try:
517
+ i = self.wrapindex(i)
518
+
519
+ # we know this class is only used in context managers, so we know
520
+ # refs don't escape (with expectation of being written), so
521
+ # preserve all refs yielded with getitem(int)
522
+ #
523
+ # using ref[int] is problematic and pointless, we need to handle
524
+ # this scenario gracefully:
525
+ # with f.trace.ref as ref:
526
+ # x = ref[10]
527
+ # x[5] = 0
528
+ # # invalidate other refs
529
+ # y = ref[11]
530
+ # y[6] = 1.6721
531
+ #
532
+ # # if we don't preserve returned individual getitems, this
533
+ # # write is lost
534
+ # x[5] = 52
535
+ #
536
+ # for slices, we know that references terminate with every
537
+ # iteration anyway, multiple live references cannot happen
538
+
539
+ if i in self.refs:
540
+ return self.refs[i][0]
541
+
542
+ x = self.fetch(i)
543
+ self.refs[i] = (x, fingerprint(x))
544
+ return x
545
+
546
+ except TypeError:
547
+ try:
548
+ indices = i.indices(len(self))
549
+ except AttributeError:
550
+ msg = 'trace indices must be integers or slices, not {}'
551
+ raise TypeError(msg.format(type(i).__name__))
552
+
553
+ def gen():
554
+ x = np.zeros(self.shape, dtype = self.dtype)
555
+ try:
556
+ for j in range(*indices):
557
+ x = self.fetch(j, x)
558
+ y = fingerprint(x)
559
+
560
+ yield x
561
+
562
+ if not fingerprint(x) == y:
563
+ self.filehandle.puttr(j, x)
564
+
565
+ finally:
566
+ # the last yielded item is available after the loop, so
567
+ # preserve it and check if it's been updated on exit
568
+ self.refs[j] = (x, y)
569
+
570
+ return gen()
571
+
572
+ class Header(Sequence):
573
+ """Interact with segy in header mode
574
+
575
+ This mode gives access to reading and writing functionality of headers,
576
+ both in individual (trace) mode and line mode. The returned header
577
+ implements a dict_like object with a fixed set of keys, given by the SEG-Y
578
+ standard.
579
+
580
+ The Header implements the array interface, where every array element, the
581
+ data trace, is a numpy.ndarray. As all arrays, it can be random accessed,
582
+ iterated over, and read strided. Data is read lazily from disk, so
583
+ iteration does not consume much memory.
584
+
585
+ Notes
586
+ -----
587
+ .. versionadded:: 1.1
588
+
589
+ .. versionchanged:: 1.6
590
+ common list operations (Sequence)
591
+
592
+ """
593
+ def __init__(self, segy):
594
+ self.segy = segy
595
+ super(Header, self).__init__(segy.tracecount)
596
+
597
+ def __getitem__(self, i):
598
+ """header[i]
599
+
600
+ ith header of the file, starting at 0.
601
+
602
+ Parameters
603
+ ----------
604
+ i : int or slice
605
+
606
+ Returns
607
+ -------
608
+ field : Field
609
+ dict_like header
610
+
611
+ Notes
612
+ -----
613
+ .. versionadded:: 1.1
614
+
615
+ Behaves like [] for lists.
616
+
617
+ Examples
618
+ --------
619
+ Reading a header:
620
+
621
+ >>> header[10]
622
+
623
+ Read a field in the first 5 headers:
624
+
625
+ >>> [x[25] for x in header[:5]]
626
+ [1, 2, 3, 4]
627
+
628
+ Read a field in every other header:
629
+
630
+ >>> [x[37] for x in header[::2]]
631
+ [1, 3, 1, 3, 1, 3]
632
+ """
633
+ try:
634
+ i = self.wrapindex(i)
635
+ return Field.trace(traceno = i, segy = self.segy)
636
+
637
+ except TypeError:
638
+ try:
639
+ indices = i.indices(len(self))
640
+ except AttributeError:
641
+ msg = 'trace indices must be integers or slices, not {}'
642
+ raise TypeError(msg.format(type(i).__name__))
643
+
644
+ def gen():
645
+ # double-buffer the header. when iterating over a range, we
646
+ # want to make sure the visible change happens as late as
647
+ # possible, and that in the case of exception the last valid
648
+ # header was untouched. this allows for some fancy control
649
+ # flow, and more importantly helps debugging because you can
650
+ # fully inspect and interact with the last good value.
651
+ x = Field.trace(None, self.segy)
652
+ buf = bytearray(x.buf)
653
+ for j in range(*indices):
654
+ # skip re-invoking __getitem__, just update the buffer
655
+ # directly with fetch, and save some initialisation work
656
+ buf = x.fetch(buf, j)
657
+ x.buf[:] = buf
658
+ x.traceno = j
659
+ yield x
660
+
661
+ return gen()
662
+
663
+ def __setitem__(self, i, val):
664
+ """header[i] = val
665
+
666
+ Write the ith header of the file, starting at 0. Unlike data traces
667
+ (which return numpy.ndarrays), changes to returned headers being
668
+ iterated over *will* be reflected on disk.
669
+
670
+ Parameters
671
+ ----------
672
+ i : int or slice
673
+ val : Field or array_like of dict_like
674
+
675
+ Notes
676
+ -----
677
+ .. versionadded:: 1.1
678
+
679
+ Behaves like [] for lists
680
+
681
+ Examples
682
+ --------
683
+ Copy a header to a different trace:
684
+
685
+ >>> header[28] = header[29]
686
+
687
+ Write multiple fields in a trace:
688
+
689
+ >>> header[10] = { 37: 5, TraceField.INLINE_3D: 2484 }
690
+
691
+ Set a fixed set of values in all headers:
692
+
693
+ >>> for x in header[:]:
694
+ ... x[37] = 1
695
+ ... x.update({ TraceField.offset: 1, 2484: 10 })
696
+
697
+ Write a field in multiple headers
698
+
699
+ >>> for x in header[:10]:
700
+ ... x.update({ TraceField.offset : 2 })
701
+
702
+ Write a field in every other header:
703
+
704
+ >>> for x in header[::2]:
705
+ ... x.update({ TraceField.offset : 2 })
706
+ """
707
+
708
+ x = self[i]
709
+
710
+ try:
711
+ x.update(val)
712
+ except AttributeError:
713
+ if isinstance(val, Field) or isinstance(val, dict):
714
+ val = itertools.repeat(val)
715
+
716
+ for h, v in zip(x, val):
717
+ h.update(v)
718
+
719
+ @property
720
+ def iline(self):
721
+ """
722
+ Headers, accessed by inline
723
+
724
+ Returns
725
+ -------
726
+ line : HeaderLine
727
+ """
728
+ return HeaderLine(self, self.segy.iline, 'inline')
729
+
730
+ @iline.setter
731
+ def iline(self, value):
732
+ """Write iterables to lines
733
+
734
+ Examples:
735
+ Supports writing to *all* crosslines via assignment, regardless of
736
+ data source and format. Will respect the sample size and structure
737
+ of the file being assigned to, so if the argument traces are longer
738
+ than that of the file being written to the surplus data will be
739
+ ignored. Uses same rules for writing as `f.iline[i] = x`.
740
+ """
741
+ for i, src in zip(self.segy.ilines, value):
742
+ self.iline[i] = src
743
+
744
+ @property
745
+ def xline(self):
746
+ """
747
+ Headers, accessed by crossline
748
+
749
+ Returns
750
+ -------
751
+ line : HeaderLine
752
+ """
753
+ return HeaderLine(self, self.segy.xline, 'crossline')
754
+
755
+ @xline.setter
756
+ def xline(self, value):
757
+ """Write iterables to lines
758
+
759
+ Examples:
760
+ Supports writing to *all* crosslines via assignment, regardless of
761
+ data source and format. Will respect the sample size and structure
762
+ of the file being assigned to, so if the argument traces are longer
763
+ than that of the file being written to the surplus data will be
764
+ ignored. Uses same rules for writing as `f.xline[i] = x`.
765
+ """
766
+
767
+ for i, src in zip(self.segy.xlines, value):
768
+ self.xline[i] = src
769
+
770
+ class Attributes(Sequence):
771
+ """File-wide attribute (header word) reading
772
+
773
+ Lazily read a single header word for every trace in the file. The
774
+ Attributes implement the array interface, and will behave as expected when
775
+ indexed and sliced.
776
+
777
+ Notes
778
+ -----
779
+ .. versionadded:: 1.1
780
+ """
781
+
782
+ def __init__(self, field, filehandle, tracecount):
783
+ super(Attributes, self).__init__(tracecount)
784
+ self.field = field
785
+ self.filehandle = filehandle
786
+ self.tracecount = tracecount
787
+ self.dtype = np.intc
788
+
789
+ def __iter__(self):
790
+ # attributes requires a custom iter, because self[:] returns a numpy
791
+ # array, which in itself is iterable, but not an iterator
792
+ return iter(self[:])
793
+
794
+ def __getitem__(self, i):
795
+ """attributes[:]
796
+
797
+ Parameters
798
+ ----------
799
+ i : int or slice or array_like
800
+
801
+ Returns
802
+ -------
803
+ attributes : array_like of dtype
804
+
805
+ Examples
806
+ --------
807
+ Read all unique sweep frequency end:
808
+
809
+ >>> end = segyio.TraceField.SweepFrequencyEnd
810
+ >>> sfe = np.unique(f.attributes( end )[:])
811
+
812
+ Discover the first traces of each unique sweep frequency end:
813
+
814
+ >>> end = segyio.TraceField.SweepFrequencyEnd
815
+ >>> attrs = f.attributes(end)
816
+ >>> sfe, tracenos = np.unique(attrs[:], return_index = True)
817
+
818
+ Scatter plot group x/y-coordinates with SFEs (using matplotlib):
819
+
820
+ >>> end = segyio.TraceField.SweepFrequencyEnd
821
+ >>> attrs = f.attributes(end)
822
+ >>> _, tracenos = np.unique(attrs[:], return_index = True)
823
+ >>> gx = f.attributes(segyio.TraceField.GroupX)[tracenos]
824
+ >>> gy = f.attributes(segyio.TraceField.GroupY)[tracenos]
825
+ >>> scatter(gx, gy)
826
+ """
827
+ try:
828
+ xs = np.asarray(i, dtype = self.dtype)
829
+ xs = xs.astype(dtype = self.dtype, order = 'C', copy = False)
830
+ attrs = np.empty(len(xs), dtype = self.dtype)
831
+ return self.filehandle.field_foreach(attrs, xs, self.field)
832
+
833
+ except TypeError:
834
+ try:
835
+ i = slice(i, i + 1, 1)
836
+ except TypeError:
837
+ pass
838
+
839
+ traces = self.tracecount
840
+ filehandle = self.filehandle
841
+ field = self.field
842
+
843
+ start, stop, step = i.indices(traces)
844
+ indices = range(start, stop, step)
845
+ attrs = np.empty(len(indices), dtype = self.dtype)
846
+ return filehandle.field_forall(attrs, start, stop, step, field)
847
+
848
+ class Text(Sequence):
849
+ """Interact with segy in text mode
850
+
851
+ This mode gives access to reading and writing functionality for textual
852
+ headers.
853
+
854
+ The primary data type is the python string. Reading textual headers is done
855
+ with [], and writing is done via assignment. No additional structure is
856
+ built around the textual header, so everything is treated as one long
857
+ string without line breaks.
858
+
859
+ Notes
860
+ -----
861
+ .. versionchanged:: 1.7
862
+ common list operations (Sequence)
863
+
864
+ """
865
+
866
+ def __init__(self, filehandle, textcount):
867
+ super(Text, self).__init__(textcount)
868
+ self.filehandle = filehandle
869
+
870
+ def __getitem__(self, i):
871
+ """text[i]
872
+
873
+ Read the text header at i. 0 is the mandatory, main
874
+
875
+ Examples
876
+ --------
877
+ Print the textual header:
878
+
879
+ >>> print(f.text[0])
880
+
881
+ Print the first extended textual header:
882
+
883
+ >>> print(f.text[1])
884
+
885
+ Print a textual header line-by-line:
886
+
887
+ >>> # using zip, from the zip documentation
888
+ >>> text = str(f.text[0])
889
+ >>> lines = map(''.join, zip( *[iter(text)] * 80))
890
+ >>> for line in lines:
891
+ ... print(line)
892
+ ...
893
+ """
894
+ try:
895
+ i = self.wrapindex(i)
896
+ return self.filehandle.gettext(i)
897
+
898
+ except TypeError:
899
+ try:
900
+ indices = i.indices(len(self))
901
+ except AttributeError:
902
+ msg = 'trace indices must be integers or slices, not {}'
903
+ raise TypeError(msg.format(type(i).__name__))
904
+
905
+ def gen():
906
+ for j in range(*indices):
907
+ yield self.filehandle.gettext(j)
908
+ return gen()
909
+
910
+ def __setitem__(self, i, val):
911
+ """text[i] = val
912
+
913
+ Write the ith text header of the file, starting at 0.
914
+ If val is instance of Text or iterable of Text,
915
+ value is set to be the first element of every Text
916
+
917
+ Parameters
918
+ ----------
919
+ i : int or slice
920
+ val : str, Text or iterable if i is slice
921
+
922
+ Examples
923
+ --------
924
+ Write a new textual header:
925
+
926
+ >>> f.text[0] = make_new_header()
927
+ >>> f.text[1:3] = ["new_header1", "new_header_2"]
928
+
929
+ Copy a textual header:
930
+
931
+ >>> f.text[1] = g.text[0]
932
+
933
+ Write a textual header based on Text:
934
+
935
+ >>> f.text[1] = g.text
936
+ >>> assert f.text[1] == g.text[0]
937
+
938
+ >>> f.text[1:3] = [g1.text, g2.text]
939
+ >>> assert f.text[1] == g1.text[0]
940
+ >>> assert f.text[2] == g2.text[0]
941
+
942
+ """
943
+ if isinstance(val, Text):
944
+ self[i] = val[0]
945
+ return
946
+
947
+ try:
948
+ i = self.wrapindex(i)
949
+ self.filehandle.puttext(i, val)
950
+
951
+ except TypeError:
952
+ try:
953
+ indices = i.indices(len(self))
954
+ except AttributeError:
955
+ msg = 'trace indices must be integers or slices, not {}'
956
+ raise TypeError(msg.format(type(i).__name__))
957
+
958
+ for i, text in zip(range(*indices), val):
959
+ if isinstance(text, Text):
960
+ text = text[0]
961
+ self.filehandle.puttext(i, text)
962
+
963
+
964
+ def __str__(self):
965
+ msg = 'str(text) is deprecated, use explicit format instead'
966
+ warnings.warn(msg, DeprecationWarning)
967
+ return '\n'.join(map(''.join, zip(*[iter(str(self[0]))] * 80)))