segyio 2.0.0a1__cp312-cp312-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
segyio/segy.py ADDED
@@ -0,0 +1,1182 @@
1
+ import warnings
2
+ try:
3
+ from future_builtins import zip
4
+ range = xrange
5
+ except (NameError, ImportError): pass
6
+
7
+ import numpy as np
8
+
9
+ from .gather import Gather, Groups
10
+ from .line import Line
11
+ from .trace import Trace, Header, Attributes, Text, Stanza
12
+ from .trace import RowLayoutEntries, FileFieldAccessor
13
+ from .field import Field
14
+
15
+ from .tracesortingformat import TraceSortingFormat
16
+
17
+
18
+
19
+ class SegyFile(object):
20
+ """
21
+ This class is not meant to be instantiated directly, but rather obtained
22
+ from ``segyio.open`` or ``segyio.create``.
23
+ """
24
+
25
+ _unstructured_errmsg = "File opened in unstructured mode."
26
+
27
+ def __init__(
28
+ self,
29
+ fd,
30
+ datasource_descriptor,
31
+ endian='big'
32
+ ):
33
+
34
+ self._datasource_descriptor = datasource_descriptor
35
+
36
+ self._traceheader_layouts = fd.traceheader_layouts()
37
+ standard_header_layout = self._traceheader_layouts["SEG00000"]
38
+ self._il = standard_header_layout.entry_by_name("iline").byte
39
+ self._xl = standard_header_layout.entry_by_name("xline").byte
40
+
41
+ self._traceheader_entries = []
42
+ for traceheader_layout in self._traceheader_layouts.values():
43
+ self._traceheader_entries.append(traceheader_layout)
44
+
45
+ self._traceheader_names = []
46
+ for traceheader_name in self._traceheader_layouts.keys():
47
+ self._traceheader_names.append(traceheader_name)
48
+
49
+ # property value holders
50
+ self._ilines = None
51
+ self._xlines = None
52
+ self._offsets = None
53
+ self._samples = None
54
+ self._sorting = None
55
+
56
+ # private values
57
+ self._iline_length = None
58
+ self._iline_stride = None
59
+ self._xline_length = None
60
+ self._xline_stride = None
61
+
62
+ self.segyfd = fd
63
+ metrics = self.segyfd.metrics()
64
+ self._fmt = metrics['format']
65
+ self._tracecount = metrics['tracecount']
66
+ self._ext_textheaders_count = metrics['ext_headers']
67
+ self._encoding = metrics['encoding']
68
+ self._traceheader_count = metrics['traceheader_count']
69
+
70
+ try:
71
+ self._dtype = np.dtype({
72
+ -1: np.float32,
73
+ 1: np.float32,
74
+ 2: np.int32,
75
+ 3: np.int16,
76
+ 5: np.float32,
77
+ 6: np.float64,
78
+ 8: np.int8,
79
+ 9: np.int64,
80
+ 10: np.uint32,
81
+ 11: np.uint16,
82
+ 12: np.uint64,
83
+ 16: np.uint8,
84
+ }[self._fmt])
85
+ except KeyError:
86
+ problem = 'Unknown trace value format {}'.format(self._fmt)
87
+ raise RuntimeError(problem)
88
+
89
+ self._trace = Trace(self.segyfd,
90
+ self.dtype,
91
+ self.tracecount,
92
+ metrics['samplecount'],
93
+ self.readonly,
94
+ )
95
+ self._header = Header(self)
96
+ self._traceheader = FileFieldAccessor(self)
97
+ self._iline = None
98
+ self._xline = None
99
+ self._gather = None
100
+ self.depth = None
101
+ self.endian = endian
102
+
103
+ super(SegyFile, self).__init__()
104
+
105
+ def __str__(self):
106
+ f = "SegyFile {}:".format(str(self._datasource_descriptor))
107
+
108
+ if self.unstructured:
109
+ il = " inlines: None"
110
+ xl = " crosslines: None"
111
+ of = " offsets: None"
112
+ else:
113
+ il = " inlines: {} [{}, {}]".format(len(self.ilines), self.ilines[0], self.ilines[-1])
114
+ xl = " crosslines: {} [{}, {}]".format(len(self.xlines), self.xlines[0], self.xlines[-1])
115
+ of = " offsets: {} [{}, {}]".format(len(self.offsets), self.offsets[0], self.offsets[-1])
116
+
117
+ tr = " traces: {}".format(self.tracecount)
118
+ sm = " samples: {}".format(self.samples)
119
+ fmt = " float representation: {}".format(self.format)
120
+
121
+ props = [f, il, xl, tr, sm]
122
+
123
+ if self.offsets is not None and len(self.offsets) > 1:
124
+ props.append(of)
125
+
126
+ props.append(fmt)
127
+ return '\n'.join(props)
128
+
129
+
130
+ def __repr__(self):
131
+ return "SegyFile('{}', iline = {}, xline = {})".format(
132
+ self._datasource_descriptor.__repr__(), self._il, self._xl)
133
+
134
+ def __enter__(self):
135
+ return self
136
+
137
+ def __exit__(self, type, value, traceback):
138
+ self.close()
139
+
140
+ def flush(self):
141
+ """Flush the file
142
+
143
+ Write the library buffers to disk, like C's ``fflush``. This method is
144
+ mostly useful for testing.
145
+
146
+ It is not necessary to call this method unless you want to observe your
147
+ changes on-disk while the file is still open. The file will
148
+ automatically be flushed for you if you use the `with` statement when
149
+ your routine is completed.
150
+
151
+ Notes
152
+ -----
153
+
154
+ .. versionadded:: 1.1
155
+
156
+ .. warning::
157
+ This is not guaranteed to actually write changes to disk, it only
158
+ flushes the library buffers. Your kernel is free to defer writing
159
+ changes to disk until a later time.
160
+
161
+ Examples
162
+ --------
163
+
164
+ Flush:
165
+
166
+ >>> with segyio.open(path) as f:
167
+ ... # write something to f
168
+ ... f.flush()
169
+
170
+ """
171
+ self.segyfd.flush()
172
+
173
+ def close(self):
174
+ """Close the file
175
+
176
+ This method is mostly useful for testing.
177
+
178
+ It is not necessary to call this method if you're using the `with`
179
+ statement, which will close the file for you. Calling methods on a
180
+ previously-closed file will raise `IOError`.
181
+
182
+ Notes
183
+ -----
184
+
185
+ .. versionadded:: 1.1
186
+
187
+
188
+ """
189
+ self.segyfd.close()
190
+
191
+ def mmap(self):
192
+ """Memory map the file
193
+
194
+ Memory map the file. This is an advanced feature for speed and
195
+ optimization; however, it is no silver bullet. If your file is smaller
196
+ than the memory available on your system this will likely result in
197
+ faster reads and writes, especially for line modes. However, if the
198
+ file is very large, or memory is very pressured, this optimization
199
+ might cause overall system slowdowns. However, if you're opening the
200
+ same file from many different instances of segyio then memory mapping
201
+ may significantly reduce the memory pressure.
202
+
203
+ If this call returns true, the file is memory mapped. If memory mapping
204
+ was build-time disabled or is not available for your platform this call
205
+ always return false. If the memory mapping is unsuccessful you can keep
206
+ using segyio - reading and writing falls back on non-memory mapped
207
+ features.
208
+
209
+ Returns
210
+ -------
211
+
212
+ success : bool
213
+ Returns True if the file was successfully memory mapped, False if
214
+ not
215
+
216
+ Notes
217
+ -----
218
+
219
+ .. versionadded:: 1.1
220
+
221
+
222
+ Examples
223
+ --------
224
+
225
+ Memory map:
226
+
227
+ >>> mapped = f.mmap()
228
+ >>> if mapped: print( "File is memory mapped!" )
229
+ File is memory mapped!
230
+ >>> pass # keep using segyio as per usual
231
+ >>> print( f.trace[10][7] )
232
+ 1.02548
233
+
234
+ """
235
+ return self.segyfd.mmap()
236
+
237
+ @property
238
+ def dtype(self):
239
+ """
240
+
241
+ The data type object of the traces. This is the format most accurate
242
+ and efficient to exchange with the underlying file, and the data type
243
+ you will find the data traces.
244
+
245
+ Returns
246
+ -------
247
+
248
+ dtype : numpy.dtype
249
+
250
+ Notes
251
+ -----
252
+
253
+ .. versionadded:: 1.6
254
+
255
+ """
256
+ return self._dtype
257
+
258
+ @property
259
+ def sorting(self):
260
+ """
261
+
262
+ Inline or crossline sorting, or Falsey (None or 0) if unstructured.
263
+ Returns
264
+ -------
265
+
266
+ sorting : int
267
+
268
+ """
269
+ return self._sorting
270
+
271
+ @property
272
+ def tracecount(self):
273
+ """Number of traces in this file
274
+
275
+ Equivalent to ``len(f.trace)``
276
+
277
+ Returns
278
+ -------
279
+
280
+ count : int
281
+ Number of traces in this file
282
+
283
+ """
284
+ return self._tracecount
285
+
286
+ @property
287
+ def samples(self):
288
+ """
289
+ Return the array of samples with appropriate intervals.
290
+
291
+ Returns
292
+ -------
293
+
294
+ samples : numpy.ndarray of int
295
+
296
+ Notes
297
+ -----
298
+
299
+ It holds that ``len(f.samples) == len(f.trace[0])``
300
+
301
+ """
302
+
303
+ return self._samples
304
+
305
+ @property
306
+ def offsets(self):
307
+ """
308
+
309
+ Return the array of offset names. For post-stack data, this array has a
310
+ length of 1
311
+
312
+ Returns
313
+ -------
314
+
315
+ offsets : numpy.ndarray of int
316
+
317
+ """
318
+ return self._offsets
319
+
320
+ @property
321
+ def ext_headers(self):
322
+ """
323
+ DEPRECATED to avoid confusion with trace header extensions. Use
324
+ ext_text_headers_count instead.
325
+
326
+ The number of extra text headers, given by the ``ExtendedHeaders``
327
+ field in the binary header.
328
+
329
+ Returns
330
+ -------
331
+ headers : int
332
+ Number of extra text headers
333
+
334
+ .. deprecated:: 2.0
335
+ Use :attr:`ext_text_headers_count` instead.
336
+ """
337
+ warnings.warn(
338
+ "The 'ext_headers' property is deprecated and will be removed in a future version. "
339
+ "Use 'ext_text_headers_count' instead.",
340
+ DeprecationWarning
341
+ )
342
+ return self._ext_textheaders_count
343
+
344
+ @property
345
+ def ext_text_headers_count(self):
346
+ """Number of extended textual file header records.
347
+
348
+ The number of 3200-byte, extended textual file header records following
349
+ the binary header.
350
+
351
+ Returns
352
+ -------
353
+ headers : int
354
+ Number of extra text headers
355
+ """
356
+ return self._ext_textheaders_count
357
+
358
+ @property
359
+ def traceheader_count(self):
360
+ """Total number of trace headers.
361
+
362
+ segyio assumes that every single trace has exactly this number of trace
363
+ headers.
364
+
365
+ Returns
366
+ -------
367
+ traceheader_count : int
368
+ Number of trace headers for each trace
369
+ """
370
+ return self._traceheader_count
371
+
372
+ @property
373
+ def unstructured(self):
374
+ """
375
+ If the file is unstructured, sophisticated addressing modes that
376
+ require the file to represent a proper cube won't work, and only raw
377
+ data reading and writing is supported.
378
+
379
+ Returns
380
+ -------
381
+
382
+ unstructured : bool
383
+ ``True`` if this file is unstructured, ``False`` if not
384
+
385
+ """
386
+ return self.ilines is None
387
+
388
+ @property
389
+ def tracefield(self):
390
+ """
391
+ Access the trace header layouts for this SEG-Y file.
392
+
393
+ Returns
394
+ -------
395
+ tracefield : RowLayoutEntries
396
+
397
+ Examples
398
+ --------
399
+ List all trace header layout names:
400
+
401
+ >>> f.tracefield.names()
402
+ ['SEG00000', 'SEG00001']
403
+
404
+ Access a specific layout by name:
405
+
406
+ >>> layout = f.tracefield.SEG00000
407
+
408
+ List all field names in a specific layout:
409
+
410
+ >>> f.tracefield.SEG00000.names()
411
+
412
+ Find offset of a specific field:
413
+
414
+ >>> f.tracefield.SEG00000.iline.offset()
415
+ >>> f.tracefield[0].xline.offset()
416
+
417
+ Get values of field for many traces:
418
+
419
+ >>> f.tracefield.SEG00001.dt[0, 10, 20]
420
+
421
+ Note:
422
+
423
+ - this works similar to :meth:`.attributes`
424
+ - use :meth:`.traceheader` if you need to get many fields from a single
425
+ trace.
426
+
427
+ Notes
428
+ -----
429
+ .. versionadded:: 2.0
430
+ """
431
+ return RowLayoutEntries(self)
432
+
433
+ @property
434
+ def header(self):
435
+ """
436
+ Interact with segy in standard header mode
437
+
438
+ Returns
439
+ -------
440
+ header : Header
441
+
442
+ Notes
443
+ -----
444
+ .. versionadded:: 1.1
445
+ """
446
+ return self._header
447
+
448
+ @header.setter
449
+ def header(self, val):
450
+ """standard headers macro assignment
451
+
452
+ A convenient way for operating on all standard headers of a file is to
453
+ use the default full-file range. It will write headers of trace 0, 1,
454
+ ..., n, but uses the iteration specified by the right-hand side (i.e.
455
+ can skip headers etc).
456
+
457
+ If the right-hand-side headers are exhausted before all the destination
458
+ file headers the writing will stop, i.e. not all all headers in the
459
+ destination file will be written to.
460
+
461
+ Examples
462
+ --------
463
+ Copy headers from file g to file f:
464
+
465
+ >>> f.header = g.header
466
+
467
+ Set offset field:
468
+
469
+ >>> f.header = { TraceField.offset: 5 }
470
+
471
+ Copy every 12th header from the file g to f's 0, 1, 2...:
472
+
473
+ >>> f.header = g.header[::12]
474
+ >>> f.header[0] == g.header[0]
475
+ True
476
+ >>> f.header[1] == g.header[12]
477
+ True
478
+ >>> f.header[2] == g.header[2]
479
+ False
480
+ >>> f.header[2] == g.header[24]
481
+ True
482
+ """
483
+ self.header[:] = val
484
+
485
+ @property
486
+ def traceheader(self):
487
+ """
488
+ Interact with segy in traceheader mode.
489
+
490
+ Works similar to :meth:`.header` but is applicable for all trace
491
+ headers, not just standard ones.
492
+
493
+ Examples
494
+ --------
495
+ Read field values from trace header extension 1 at trace 5:
496
+
497
+ >>> traceheader = f.traceheader[5][1]
498
+ >>> traceheader.rec_x
499
+ ... 100.5
500
+ >>> traceheader.rec_y
501
+ ... 150.75
502
+
503
+ Notes
504
+ -----
505
+ .. versionadded:: 2.0
506
+ """
507
+ return self._traceheader
508
+
509
+ @traceheader.setter
510
+ def traceheader(self, val):
511
+ """headers macro assignment
512
+
513
+ Operating on all headers of a file.
514
+
515
+ If the right-hand-side headers are exhausted before all the destination
516
+ file headers the behavior is undefined and may change in the future.
517
+
518
+ Examples
519
+ --------
520
+ Copy all headers from file g to file f:
521
+
522
+ >>> f.traceheader = g.traceheader
523
+
524
+ Copy all headers from trace 3 to trace 5:
525
+
526
+ >>> f.traceheader[5] = f.traceheader[3]
527
+
528
+ Copy standard header from trace 2 to trace 4:
529
+
530
+ >>> f.traceheader[4][0] = f.traceheader[2][0]
531
+ """
532
+ self.traceheader[:] = val
533
+
534
+ def attributes(self, field):
535
+ """File-wide attribute (standard header word) reading
536
+
537
+ Lazily gather a single standard header word for every trace in the file.
538
+ The array can be sliced, supports index lookup, and numpy-style
539
+ list-of-indices.
540
+
541
+ Parameters
542
+ ----------
543
+
544
+ field : int or segyio.TraceField
545
+ field
546
+
547
+ Returns
548
+ -------
549
+
550
+ attrs : Attributes
551
+ A sliceable array_like of header words
552
+
553
+ Notes
554
+ -----
555
+
556
+ .. versionadded:: 1.1
557
+
558
+ """
559
+ return Attributes(self, field, 0)
560
+
561
+ @property
562
+ def trace(self):
563
+ """
564
+ Interact with segy in trace mode.
565
+
566
+ Returns
567
+ -------
568
+ trace : Trace
569
+
570
+ Notes
571
+ -----
572
+ .. versionadded:: 1.1
573
+
574
+ """
575
+
576
+ return self._trace
577
+
578
+ @trace.setter
579
+ def trace(self, val):
580
+ """traces macro assignment
581
+
582
+ Convenient way for setting traces from 0, 1, ... n, based on the
583
+ iterable set of traces on the right-hand-side.
584
+
585
+ If the right-hand-side traces are exhausted before all the destination
586
+ file traces the writing will stop, i.e. not all all traces in the
587
+ destination file will be written.
588
+
589
+ Notes
590
+ -----
591
+ .. versionadded:: 1.1
592
+
593
+ Examples
594
+ --------
595
+ Copy traces from file f to file g:
596
+
597
+ >>> f.trace = g.trace
598
+
599
+ Copy first half of the traces from g to f:
600
+
601
+ >>> f.trace = g.trace[:len(g.trace)/2]
602
+
603
+ Fill the file with one trace (filled with zeros):
604
+
605
+ >>> tr = np.zeros(f.samples)
606
+ >>> f.trace = itertools.repeat(tr)
607
+
608
+ For advanced users: sometimes you want to load the entire segy file
609
+ to memory and apply your own structural manipulations or operations
610
+ on it. Some segy files are very large and may not fit, in which
611
+ case this feature will break down. This is an optimisation feature;
612
+ using it should generally be driven by measurements.
613
+
614
+ Read the first 10 traces:
615
+
616
+ >>> f.trace.raw[0:10]
617
+
618
+ Read *all* traces to memory:
619
+
620
+ >>> f.trace.raw[:]
621
+
622
+ Read every other trace to memory:
623
+
624
+ >>> f.trace.raw[::2]
625
+ """
626
+ self.trace[:] = val
627
+
628
+ @property
629
+ def ilines(self):
630
+ """Inline labels
631
+
632
+ The inline labels in this file, if structured, else None
633
+
634
+ Returns
635
+ -------
636
+
637
+ inlines : array_like of int or None
638
+
639
+ """
640
+ return self._ilines
641
+
642
+ @property
643
+ def iline(self):
644
+ """
645
+ Interact with segy in inline mode
646
+
647
+ Returns
648
+ -------
649
+ iline : Line or None
650
+
651
+ Raises
652
+ ------
653
+ ValueError
654
+ If the file is unstructured
655
+
656
+ Notes
657
+ -----
658
+ .. versionadded:: 1.1
659
+ """
660
+
661
+ if self.unstructured:
662
+ raise ValueError(self._unstructured_errmsg)
663
+
664
+ if self._iline is not None:
665
+ return self._iline
666
+
667
+ self._iline = Line(self,
668
+ self.ilines,
669
+ self._iline_length,
670
+ self._iline_stride,
671
+ self.offsets,
672
+ 'inline',
673
+ )
674
+ return self._iline
675
+
676
+ @iline.setter
677
+ def iline(self, value):
678
+ """inlines macro assignment
679
+
680
+ Convenient way for setting inlines, from left-to-right as the inline
681
+ numbers are specified in the file.ilines property, from an iterable
682
+ set on the right-hand-side.
683
+
684
+ If the right-hand-side inlines are exhausted before all the destination
685
+ file inlines the writing will stop, i.e. not all all inlines in the
686
+ destination file will be written.
687
+
688
+ Notes
689
+ -----
690
+ .. versionadded:: 1.1
691
+
692
+ Examples
693
+ --------
694
+ Copy inlines from file f to file g:
695
+
696
+ >>> f.iline = g.iline
697
+ """
698
+ self.iline[:] = value
699
+
700
+ @property
701
+ def xlines(self):
702
+ """Crossline labels
703
+
704
+ The crosslane labels in this file, if structured, else None
705
+
706
+ Returns
707
+ -------
708
+
709
+ crosslines : array_like of int or None
710
+
711
+ """
712
+ return self._xlines
713
+
714
+ @property
715
+ def xline(self):
716
+ """
717
+ Interact with segy in crossline mode
718
+
719
+ Returns
720
+ -------
721
+ xline : Line or None
722
+
723
+ Raises
724
+ ------
725
+ ValueError
726
+ If the file is unstructured
727
+
728
+ Notes
729
+ -----
730
+ .. versionadded:: 1.1
731
+ """
732
+ if self.unstructured:
733
+ raise ValueError(self._unstructured_errmsg)
734
+
735
+ if self._xline is not None:
736
+ return self._xline
737
+
738
+ self._xline = Line(self,
739
+ self.xlines,
740
+ self._xline_length,
741
+ self._xline_stride,
742
+ self.offsets,
743
+ 'crossline',
744
+ )
745
+ return self._xline
746
+
747
+ @xline.setter
748
+ def xline(self, value):
749
+ """crosslines macro assignment
750
+
751
+ Convenient way for setting crosslines, from left-to-right as the inline
752
+ numbers are specified in the file.ilines property, from an iterable set
753
+ on the right-hand-side.
754
+
755
+ If the right-hand-side inlines are exhausted before all the destination
756
+ file inlines the writing will stop, i.e. not all all inlines in the
757
+ destination file will be written.
758
+
759
+ Notes
760
+ -----
761
+ .. versionadded:: 1.1
762
+
763
+ Examples
764
+ --------
765
+ Copy crosslines from file f to file g:
766
+
767
+ >>> f.xline = g.xline
768
+ """
769
+ self.xline[:] = value
770
+
771
+ @property
772
+ def fast(self):
773
+ """Access the 'fast' dimension
774
+
775
+ This mode yields iline or xline mode, depending on which one is laid
776
+ out `faster`, i.e. the line with linear disk layout. Use this mode if
777
+ the inline/crossline distinction isn't as interesting as traversing in
778
+ a fast manner (typically when you want to apply a function to the whole
779
+ file, line-by-line).
780
+
781
+ Returns
782
+ -------
783
+ fast : Line
784
+ line addressing mode
785
+
786
+ Notes
787
+ -----
788
+ .. versionadded:: 1.1
789
+ """
790
+ if self.sorting == TraceSortingFormat.INLINE_SORTING:
791
+ return self.iline
792
+ elif self.sorting == TraceSortingFormat.CROSSLINE_SORTING:
793
+ return self.xline
794
+ else:
795
+ raise RuntimeError("Unknown sorting.")
796
+
797
+ @property
798
+ def slow(self):
799
+ """Access the 'slow' dimension
800
+
801
+ This mode yields iline or xline mode, depending on which one is laid
802
+ out `slower`, i.e. the line with strided disk layout. Use this mode if
803
+ the inline/crossline distinction isn't as interesting as traversing in
804
+ the slower direction.
805
+
806
+ Returns
807
+ -------
808
+ slow : Line
809
+ line addressing mode
810
+
811
+ Notes
812
+ -----
813
+ .. versionadded:: 1.1
814
+ """
815
+ if self.sorting == TraceSortingFormat.INLINE_SORTING:
816
+ return self.xline
817
+ elif self.sorting == TraceSortingFormat.CROSSLINE_SORTING:
818
+ return self.iline
819
+ else:
820
+ raise RuntimeError("Unknown sorting.")
821
+
822
+ @property
823
+ def depth_slice(self):
824
+ """
825
+ Interact with segy in depth slice mode (fixed z-coordinate)
826
+
827
+ Returns
828
+ -------
829
+ depth : Depth
830
+
831
+ Notes
832
+ -----
833
+ .. versionadded:: 1.1
834
+
835
+ .. versionchanged:: 1.7.1
836
+ enabled for unstructured files
837
+ """
838
+
839
+ if self.depth is not None:
840
+ return self.depth
841
+
842
+ from .depth import Depth
843
+ self.depth = Depth(self)
844
+ return self.depth
845
+
846
+ @depth_slice.setter
847
+ def depth_slice(self, value):
848
+ """depth macro assignment
849
+
850
+ Convenient way for setting depth slices, from left-to-right as the depth slices
851
+ numbers are specified in the file.depth_slice property, from an iterable
852
+ set on the right-hand-side.
853
+
854
+ If the right-hand-side depth slices are exhausted before all the destination
855
+ file depth slices the writing will stop, i.e. not all all depth slices in the
856
+ destination file will be written.
857
+
858
+ Examples
859
+ --------
860
+ Copy depth slices from file f to file g:
861
+
862
+ >>> f.depth_slice = g.depth_slice
863
+
864
+ Copy first half of the depth slices from g to f:
865
+
866
+ >>> f.depth_slice = g.depth_slice[:g.samples/2]]
867
+ """
868
+ self.depth_slice[:] = value
869
+
870
+ @property
871
+ def gather(self):
872
+ """
873
+ Interact with segy in gather mode
874
+
875
+ Returns
876
+ -------
877
+ gather : Gather
878
+ """
879
+ if self.unstructured:
880
+ raise ValueError(self._unstructured_errmsg)
881
+
882
+ if self._gather is not None:
883
+ return self._gather
884
+
885
+ self._gather = Gather(self.trace, self.iline, self.xline, self.offsets)
886
+ return self._gather
887
+
888
+ @property
889
+ def text(self):
890
+ """Interact with segy in text mode
891
+
892
+ This mode gives access to reading and writing functionality for textual
893
+ headers.
894
+
895
+ The primary data type is the python string. Reading textual headers is
896
+ done with ``[]``, and writing is done via assignment. No additional
897
+ structure is built around the textual header, so everything is treated
898
+ as one long string without line breaks.
899
+
900
+ Returns
901
+ -------
902
+ text : Text
903
+
904
+ See also
905
+ --------
906
+ segyio.tools.wrap : line-wrap a text header
907
+
908
+ Notes
909
+ -----
910
+ .. versionadded:: 1.1
911
+ """
912
+ return Text(self.segyfd, self._ext_textheaders_count + 1)
913
+
914
+ @property
915
+ def stanza(self):
916
+ """Interact with segy in stanza mode
917
+
918
+ Allows reading stanzas present in the file.
919
+
920
+ Returns
921
+ -------
922
+ stanza : Stanza
923
+
924
+ Notes
925
+ -----
926
+ .. versionadded:: 2.0
927
+ """
928
+ return Stanza(self.segyfd)
929
+
930
+ @property
931
+ def bin(self):
932
+ """
933
+ Interact with segy in binary mode
934
+
935
+ This mode gives access to reading and writing functionality for the
936
+ binary header. Please note that using numeric binary offsets uses the
937
+ offset numbers from the specification, i.e. the first field of the
938
+ binary header starts at 3201, not 1. If you're using the enumerations
939
+ this is handled for you.
940
+
941
+ Returns
942
+ -------
943
+ binary : Field
944
+
945
+ Notes
946
+ -----
947
+ .. versionadded:: 1.1
948
+ """
949
+
950
+ return Field.binary(self)
951
+
952
+ @bin.setter
953
+ def bin(self, value):
954
+ """Update binary header
955
+
956
+ Update a value or replace the binary header
957
+
958
+ Parameters
959
+ ----------
960
+
961
+ value : dict_like
962
+ dict_like, keys of int or segyio.BinField or segyio.su
963
+
964
+ """
965
+ self.bin.update(value)
966
+
967
+ @property
968
+ def format(self):
969
+ d = {
970
+ -2: "4-byte native big-endian float",
971
+ -1: "4-byte native little-endian float",
972
+ 1: "4-byte IBM float",
973
+ 2: "4-byte signed integer",
974
+ 3: "2-byte signed integer",
975
+ 4: "4-byte fixed point with gain",
976
+ 5: "4-byte IEEE float",
977
+ 6: "8-byte IEEE float",
978
+ 7: "3-byte signed integer",
979
+ 8: "1-byte signed char",
980
+ 9: "8-byte signed integer",
981
+ 10: "4-byte unsigned integer",
982
+ 11: "2-byte unsigned integer",
983
+ 12: "8-byte unsigned integer",
984
+ 15: "3-byte unsigned integer",
985
+ 16: "1-byte unsigned char"
986
+ }
987
+
988
+ class fmt:
989
+ def __int__(inner):
990
+ return self._fmt
991
+
992
+ def __str__(inner):
993
+ if not self._fmt in d:
994
+ return "Unknown format"
995
+
996
+ return d[self._fmt]
997
+
998
+ return fmt()
999
+
1000
+ @property
1001
+ def encoding(self):
1002
+ d = {
1003
+ 0: "ebcdic",
1004
+ 1: "ascii",
1005
+ }
1006
+
1007
+ if not self._encoding in d:
1008
+ return "Unknown encoding"
1009
+
1010
+ return d[self._encoding]
1011
+
1012
+ @property
1013
+ def readonly(self):
1014
+ """File is read-only
1015
+
1016
+ Returns
1017
+ -------
1018
+ readonly : bool
1019
+ True if this file is read-only
1020
+
1021
+ Notes
1022
+ -----
1023
+ .. versionadded:: 1.6
1024
+ """
1025
+
1026
+ return self._datasource_descriptor.readonly()
1027
+
1028
+
1029
+ def interpret(self, ilines, xlines, offsets=None, sorting=TraceSortingFormat.INLINE_SORTING):
1030
+
1031
+ """ (Re-)interpret structure on top of a file
1032
+
1033
+ (Re-)interpret the structure of the file given the new sorting, ilines,
1034
+ xlines and offset indices. Note that file itself is not changed in any
1035
+ way, it is only segyio's interpretation of the file that changes. It's
1036
+ a way of telling segyio that a file is laid out in a particular way,
1037
+ even though the header fields say otherwise.
1038
+
1039
+ `interpret` expect that the ilines-, xlines- and offsets-indices are
1040
+ unique. It also expect the dimensions of ilines, xlines and offset to
1041
+ match the tracecount.
1042
+
1043
+ Parameters
1044
+ ----------
1045
+ f : SegyFile
1046
+ ilines : array_like
1047
+ ilines indices in new structure
1048
+ xlines : array_like
1049
+ xlines indices in new structure
1050
+ offsets : array_like
1051
+ offset indices in new structure
1052
+ sorting : int, string or TraceSortingFormat
1053
+ Sorting in new structure
1054
+
1055
+ Notes
1056
+ -----
1057
+
1058
+ .. versionadded:: 1.8
1059
+
1060
+ Examples
1061
+ --------
1062
+ (Re)interpret the structure of the file:
1063
+
1064
+ >>> ilines = [10, 11, 12, 13]
1065
+ >>> xlines = [20, 21, 22, 23, 24]
1066
+ >>> with segyio.open(file, ignore_geometry=True) as f:
1067
+ ... f.interpret(ilines, xlines)
1068
+ """
1069
+
1070
+ valid_sortings = {
1071
+ 1 : TraceSortingFormat.CROSSLINE_SORTING,
1072
+ 2 : TraceSortingFormat.INLINE_SORTING,
1073
+ 'iline' : TraceSortingFormat.INLINE_SORTING,
1074
+ 'inline' : TraceSortingFormat.INLINE_SORTING,
1075
+ 'xl' : TraceSortingFormat.CROSSLINE_SORTING,
1076
+ 'crossline' : TraceSortingFormat.CROSSLINE_SORTING,
1077
+ TraceSortingFormat.INLINE_SORTING : TraceSortingFormat.INLINE_SORTING,
1078
+ TraceSortingFormat.CROSSLINE_SORTING : TraceSortingFormat.CROSSLINE_SORTING,
1079
+ }
1080
+
1081
+ if sorting not in valid_sortings:
1082
+ error = "Invalid sorting"
1083
+ solution = "valid sorting options are: {}".format(valid_sortings.keys())
1084
+ raise ValueError('{}, {}'.format(error, solution))
1085
+
1086
+ if offsets is None:
1087
+ offsets = np.arange(1)
1088
+
1089
+ ilines = np.copy(np.asarray(ilines, dtype=np.intc))
1090
+ xlines = np.copy(np.asarray(xlines, dtype=np.intc))
1091
+ offsets = np.copy(np.asarray(offsets, dtype=np.intc))
1092
+
1093
+ if np.unique(ilines).size != ilines.size:
1094
+ error = "Inlines inconsistent"
1095
+ solution = "expect all inlines to be unique"
1096
+ raise ValueError("{}, {}".format(error, solution))
1097
+
1098
+ if np.unique(xlines).size != xlines.size:
1099
+ error = "Crosslines inconsistent"
1100
+ solution = "expect all crosslines to be unique"
1101
+ raise ValueError("{}, {}".format(error, solution))
1102
+
1103
+ if np.unique(offsets).size != offsets.size:
1104
+ error = "Offsets inconsistent"
1105
+ solution = "expect all offsets to be unique"
1106
+ raise ValueError("{}, {}".format(error, solution))
1107
+
1108
+ if ilines.size * xlines.size * offsets.size != self.tracecount:
1109
+ error = ("Invalid dimensions, ilines ({}) * xlines ({}) * offsets "
1110
+ "({}) should match the number of traces ({})").format(ilines.size,
1111
+ xlines.size,
1112
+ offsets.size,
1113
+ self.tracecount)
1114
+ raise ValueError(error)
1115
+
1116
+ from . import _segyio
1117
+
1118
+ line_metrics = _segyio.line_metrics(sorting,
1119
+ self.tracecount,
1120
+ ilines.size,
1121
+ xlines.size,
1122
+ offsets.size)
1123
+
1124
+ self._iline_length = line_metrics['iline_length']
1125
+ self._iline_stride = line_metrics['iline_stride']
1126
+
1127
+ self._xline_length = line_metrics['xline_length']
1128
+ self._xline_stride = line_metrics['xline_stride']
1129
+
1130
+ self._sorting = sorting
1131
+ self._offsets = offsets
1132
+ self._ilines = ilines
1133
+ self._xlines = xlines
1134
+
1135
+ return self
1136
+
1137
+ def group(self, word):
1138
+ """Get groups of traces matching a key
1139
+
1140
+ This feature is **experimental**, and there are no guarantees code
1141
+ using this will work in the future.
1142
+
1143
+ Walks the headers and groups traces into buckets, where all traces in a
1144
+ bucket have the same value for the given set of words. It is
1145
+ particularly useful for pre-stack files, gathering traces belonging to
1146
+ the same gather or shot.
1147
+
1148
+ Parameters
1149
+ ----------
1150
+ word : segyio.TraceField or iterable of segyio.TraceField
1151
+ The set of words belonging to the same bucket
1152
+
1153
+ Returns
1154
+ -------
1155
+ groups : segyio.gather.Groups
1156
+
1157
+ Notes
1158
+ -----
1159
+ This feature is **experimental**, but you are encouraged to try it out.
1160
+ Bug reports and suggestions for improvement are welcomed, but no
1161
+ guarantees are made that the interface will remain as it is in the
1162
+ future.
1163
+ """
1164
+ msg = 'group is experimental and may change in the future'
1165
+ warnings.warn(msg, FutureWarning)
1166
+ return Groups(self.trace, self.header, word)
1167
+
1168
+
1169
+ class spec(object):
1170
+ def __init__(self):
1171
+ self.iline = 189
1172
+ self.ilines = None
1173
+ self.xline = 193
1174
+ self.xlines = None
1175
+ self.offsets = [1]
1176
+ self.samples = None
1177
+ self.ext_headers = 0
1178
+ self.format = None
1179
+ self.sorting = None
1180
+ self.endian = 'big'
1181
+ self.encoding = 'ebcdic'
1182
+ self.traceheader_count = 1