xtgeo 4.8.0__cp313-cp313-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of xtgeo might be problematic. Click here for more details.

Files changed (117) hide show
  1. cxtgeo.py +582 -0
  2. cxtgeoPYTHON_wrap.c +20938 -0
  3. xtgeo/__init__.py +246 -0
  4. xtgeo/_cxtgeo.cp313-win_amd64.pyd +0 -0
  5. xtgeo/_internal.cp313-win_amd64.pyd +0 -0
  6. xtgeo/common/__init__.py +19 -0
  7. xtgeo/common/_angles.py +29 -0
  8. xtgeo/common/_xyz_enum.py +50 -0
  9. xtgeo/common/calc.py +396 -0
  10. xtgeo/common/constants.py +30 -0
  11. xtgeo/common/exceptions.py +42 -0
  12. xtgeo/common/log.py +93 -0
  13. xtgeo/common/sys.py +166 -0
  14. xtgeo/common/types.py +18 -0
  15. xtgeo/common/version.py +21 -0
  16. xtgeo/common/xtgeo_dialog.py +604 -0
  17. xtgeo/cube/__init__.py +9 -0
  18. xtgeo/cube/_cube_export.py +214 -0
  19. xtgeo/cube/_cube_import.py +532 -0
  20. xtgeo/cube/_cube_roxapi.py +180 -0
  21. xtgeo/cube/_cube_utils.py +287 -0
  22. xtgeo/cube/_cube_window_attributes.py +340 -0
  23. xtgeo/cube/cube1.py +1023 -0
  24. xtgeo/grid3d/__init__.py +15 -0
  25. xtgeo/grid3d/_ecl_grid.py +774 -0
  26. xtgeo/grid3d/_ecl_inte_head.py +148 -0
  27. xtgeo/grid3d/_ecl_logi_head.py +71 -0
  28. xtgeo/grid3d/_ecl_output_file.py +81 -0
  29. xtgeo/grid3d/_egrid.py +1004 -0
  30. xtgeo/grid3d/_find_gridprop_in_eclrun.py +625 -0
  31. xtgeo/grid3d/_grdecl_format.py +266 -0
  32. xtgeo/grid3d/_grdecl_grid.py +388 -0
  33. xtgeo/grid3d/_grid3d.py +29 -0
  34. xtgeo/grid3d/_grid3d_fence.py +181 -0
  35. xtgeo/grid3d/_grid3d_utils.py +228 -0
  36. xtgeo/grid3d/_grid_boundary.py +76 -0
  37. xtgeo/grid3d/_grid_etc1.py +1566 -0
  38. xtgeo/grid3d/_grid_export.py +221 -0
  39. xtgeo/grid3d/_grid_hybrid.py +66 -0
  40. xtgeo/grid3d/_grid_import.py +79 -0
  41. xtgeo/grid3d/_grid_import_ecl.py +101 -0
  42. xtgeo/grid3d/_grid_import_roff.py +135 -0
  43. xtgeo/grid3d/_grid_import_xtgcpgeom.py +375 -0
  44. xtgeo/grid3d/_grid_refine.py +125 -0
  45. xtgeo/grid3d/_grid_roxapi.py +292 -0
  46. xtgeo/grid3d/_grid_wellzone.py +165 -0
  47. xtgeo/grid3d/_gridprop_export.py +178 -0
  48. xtgeo/grid3d/_gridprop_import_eclrun.py +164 -0
  49. xtgeo/grid3d/_gridprop_import_grdecl.py +130 -0
  50. xtgeo/grid3d/_gridprop_import_roff.py +52 -0
  51. xtgeo/grid3d/_gridprop_import_xtgcpprop.py +168 -0
  52. xtgeo/grid3d/_gridprop_lowlevel.py +171 -0
  53. xtgeo/grid3d/_gridprop_op1.py +174 -0
  54. xtgeo/grid3d/_gridprop_roxapi.py +239 -0
  55. xtgeo/grid3d/_gridprop_value_init.py +140 -0
  56. xtgeo/grid3d/_gridprops_import_eclrun.py +344 -0
  57. xtgeo/grid3d/_gridprops_import_roff.py +83 -0
  58. xtgeo/grid3d/_roff_grid.py +469 -0
  59. xtgeo/grid3d/_roff_parameter.py +303 -0
  60. xtgeo/grid3d/grid.py +2537 -0
  61. xtgeo/grid3d/grid_properties.py +699 -0
  62. xtgeo/grid3d/grid_property.py +1341 -0
  63. xtgeo/grid3d/types.py +15 -0
  64. xtgeo/io/__init__.py +1 -0
  65. xtgeo/io/_file.py +592 -0
  66. xtgeo/metadata/__init__.py +17 -0
  67. xtgeo/metadata/metadata.py +431 -0
  68. xtgeo/roxutils/__init__.py +7 -0
  69. xtgeo/roxutils/_roxar_loader.py +54 -0
  70. xtgeo/roxutils/_roxutils_etc.py +122 -0
  71. xtgeo/roxutils/roxutils.py +207 -0
  72. xtgeo/surface/__init__.py +18 -0
  73. xtgeo/surface/_regsurf_boundary.py +26 -0
  74. xtgeo/surface/_regsurf_cube.py +210 -0
  75. xtgeo/surface/_regsurf_cube_window.py +391 -0
  76. xtgeo/surface/_regsurf_cube_window_v2.py +297 -0
  77. xtgeo/surface/_regsurf_cube_window_v3.py +360 -0
  78. xtgeo/surface/_regsurf_export.py +388 -0
  79. xtgeo/surface/_regsurf_grid3d.py +271 -0
  80. xtgeo/surface/_regsurf_gridding.py +347 -0
  81. xtgeo/surface/_regsurf_ijxyz_parser.py +278 -0
  82. xtgeo/surface/_regsurf_import.py +347 -0
  83. xtgeo/surface/_regsurf_lowlevel.py +122 -0
  84. xtgeo/surface/_regsurf_oper.py +631 -0
  85. xtgeo/surface/_regsurf_roxapi.py +241 -0
  86. xtgeo/surface/_regsurf_utils.py +81 -0
  87. xtgeo/surface/_surfs_import.py +43 -0
  88. xtgeo/surface/_zmap_parser.py +138 -0
  89. xtgeo/surface/regular_surface.py +2967 -0
  90. xtgeo/surface/surfaces.py +276 -0
  91. xtgeo/well/__init__.py +24 -0
  92. xtgeo/well/_blockedwell_roxapi.py +221 -0
  93. xtgeo/well/_blockedwells_roxapi.py +68 -0
  94. xtgeo/well/_well_aux.py +30 -0
  95. xtgeo/well/_well_io.py +327 -0
  96. xtgeo/well/_well_oper.py +574 -0
  97. xtgeo/well/_well_roxapi.py +304 -0
  98. xtgeo/well/_wellmarkers.py +486 -0
  99. xtgeo/well/_wells_utils.py +158 -0
  100. xtgeo/well/blocked_well.py +216 -0
  101. xtgeo/well/blocked_wells.py +122 -0
  102. xtgeo/well/well1.py +1514 -0
  103. xtgeo/well/wells.py +211 -0
  104. xtgeo/xyz/__init__.py +6 -0
  105. xtgeo/xyz/_polygons_oper.py +272 -0
  106. xtgeo/xyz/_xyz.py +741 -0
  107. xtgeo/xyz/_xyz_data.py +646 -0
  108. xtgeo/xyz/_xyz_io.py +490 -0
  109. xtgeo/xyz/_xyz_lowlevel.py +42 -0
  110. xtgeo/xyz/_xyz_oper.py +613 -0
  111. xtgeo/xyz/_xyz_roxapi.py +766 -0
  112. xtgeo/xyz/points.py +681 -0
  113. xtgeo/xyz/polygons.py +811 -0
  114. xtgeo-4.8.0.dist-info/METADATA +145 -0
  115. xtgeo-4.8.0.dist-info/RECORD +117 -0
  116. xtgeo-4.8.0.dist-info/WHEEL +5 -0
  117. xtgeo-4.8.0.dist-info/licenses/LICENSE.md +165 -0
xtgeo/xyz/_xyz_io.py ADDED
@@ -0,0 +1,490 @@
1
+ """Private import and export routines for XYZ stuff."""
2
+
3
+ import contextlib
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+
8
+ from xtgeo.common._xyz_enum import _AttrName, _XYZType
9
+ from xtgeo.common.constants import UNDEF, UNDEF_INT
10
+ from xtgeo.common.exceptions import InvalidFileFormatError
11
+ from xtgeo.common.log import null_logger
12
+ from xtgeo.io._file import FileFormat, FileWrapper
13
+
14
+ logger = null_logger(__name__)
15
+
16
+
17
+ def import_xyz(pfile, zname=_AttrName.ZNAME.value):
18
+ """Simple X Y Z file. All points as Pandas framework."""
19
+ return {
20
+ "zname": zname,
21
+ "xname": _AttrName.XNAME.value,
22
+ "yname": _AttrName.YNAME.value,
23
+ "values": pd.read_csv(
24
+ pfile.file,
25
+ sep=r"\s+",
26
+ skiprows=0,
27
+ header=None,
28
+ names=[_AttrName.XNAME.value, _AttrName.YNAME.value, zname],
29
+ dtype=np.float64,
30
+ na_values=999.00,
31
+ ),
32
+ }
33
+
34
+
35
+ def import_zmap(pfile, zname=_AttrName.ZNAME.value):
36
+ """The zmap ascii polygon format; not sure about all details."""
37
+ # ...seems that I just
38
+ # take in the columns starting from @<blank> line as is.
39
+ # Potential here to improve...
40
+
41
+ #
42
+ # !
43
+ # ! File exported from RMS.
44
+ # !
45
+ # ! Project:
46
+ # ! Date: 2017-11-07T17:22:30
47
+ # !
48
+ # ! Polygons/points Z-MAP file generated for ''.
49
+ # ! Coordinate system is ''.
50
+ # !
51
+ # !------------------------------------------------------------------
52
+ # @FREE POINT , DATA, 80, 1
53
+ # X (EASTING) , 1, 1, 1, 1, 20,, 1.0E+30,,, 4, 0
54
+ # Y (NORTHING) , 2, 2, 1, 21, 40,, 1.0E+30,,, 4, 0
55
+ # Z VALUE , 3, 3, 1, 41, 60,, 1.0E+30,,, 4, 0
56
+ # SEG I.D. , 4, 35, 1, 61, 70,, 1.0E+30,,, 0, 0
57
+ # @
58
+ # 457357.781250 6782685.500000 1744.463379 0
59
+ # 457359.343750 6782676.000000 1744.482056 0
60
+ # 457370.906250 6782606.000000 1744.619507 0
61
+ # 457370.468750 6782568.500000 1745.868286 0
62
+
63
+ xname = "X_UTME"
64
+ yname = "Y_UTMN"
65
+ pname = "POLY_ID"
66
+
67
+ dtype = {
68
+ xname: np.float64,
69
+ yname: np.float64,
70
+ zname: np.float64,
71
+ pname: np.int32,
72
+ }
73
+
74
+ df = pd.read_csv(
75
+ pfile.file,
76
+ sep=r"\s+",
77
+ skiprows=16,
78
+ header=None,
79
+ names=[xname, yname, zname, pname],
80
+ dtype=dtype,
81
+ na_values=1.0e30,
82
+ )
83
+
84
+ return {"xname": xname, "yname": yname, "zname": zname, "values": df}
85
+
86
+
87
+ def import_rms_attr(pfile, zname="Z_TVDSS"):
88
+ """The RMS ascii file Points format with attributes.
89
+
90
+ It appears that the the RMS attributes format is supported for Points only,
91
+ hence Polygons is not admitted.
92
+
93
+ Example::
94
+
95
+ Discrete FaultBlock
96
+ String FaultTag
97
+ Float VerticalSep
98
+ 519427.941 6733887.914 1968.988 6 UNDEF UNDEF
99
+ 519446.363 6732037.910 1806.782 19 UNDEF UNDEF
100
+ 519446.379 6732137.910 1795.707 19 UNDEF UNDEF
101
+
102
+ Returns a kwargs list with the following items:
103
+ xname
104
+ yname
105
+ zname
106
+ values as a valid dataframe
107
+ attributes
108
+
109
+ Important notes from RMS manual and reverse engineering:
110
+
111
+ * For discrete numbers use 'Discrete' or 'Integer', not 'Int'
112
+ * For Discrete/Integer/Float both UNDEF and -999 will mark as undefined
113
+ * For Discrete/Integer, numbers less than -999 seems to accepted by RMS
114
+ * For String, use UNDEF only as undefined
115
+ """
116
+
117
+ kwargs = {}
118
+ _xn = kwargs["xname"] = "X_UTME"
119
+ _yn = kwargs["yname"] = "Y_UTMN"
120
+ _zn = kwargs["zname"] = zname
121
+
122
+ dtypes = {_xn: np.float64, _yn: np.float64, _zn: np.float64}
123
+
124
+ names = list(dtypes.keys())
125
+ _attrs = {}
126
+
127
+ # parse header
128
+ skiprows = 0
129
+ with open(pfile.file, "r") as rmsfile:
130
+ for iline in range(20):
131
+ fields = rmsfile.readline().split()
132
+ if len(fields) != 2:
133
+ skiprows = iline
134
+ break
135
+
136
+ dty, cname = fields
137
+ dtyx = None
138
+
139
+ # note that Pandas treats dtype str as object, cf:
140
+ # https://stackoverflow.com/questions/34881079
141
+ if dty == "Discrete":
142
+ dtyx = "int"
143
+ elif dty == "String":
144
+ dtyx = "str"
145
+ elif dty == "Float":
146
+ dtyx = "float"
147
+ elif dty == "Int":
148
+ dtyx = "int"
149
+ else:
150
+ dtyx = "str"
151
+ names.append(cname)
152
+ _attrs[cname] = dtyx
153
+
154
+ dfr = pd.read_csv(
155
+ pfile.file,
156
+ sep=r"\s+",
157
+ skiprows=skiprows,
158
+ header=None,
159
+ names=names,
160
+ dtype=dtypes,
161
+ )
162
+ for col in dfr.columns[3:]:
163
+ if col in _attrs:
164
+ # pandas gives a FutureWarning here due to casting what was
165
+ # previously a string to a float/int.
166
+ if _attrs[col] == "float":
167
+ dfr[col] = dfr[col].replace("UNDEF", UNDEF).astype(float)
168
+ elif _attrs[col] == "int":
169
+ dfr[col] = dfr[col].replace("UNDEF", UNDEF_INT).astype(int)
170
+
171
+ # cast to numerical if possible
172
+ with contextlib.suppress(ValueError, TypeError):
173
+ dfr[col] = pd.to_numeric(dfr[col])
174
+
175
+ kwargs["values"] = dfr
176
+ kwargs["attributes"] = _attrs
177
+
178
+ return kwargs
179
+
180
+
181
+ def to_file(
182
+ xyz,
183
+ pfile,
184
+ fformat="xyz",
185
+ attributes=False,
186
+ pfilter=None,
187
+ wcolumn=None,
188
+ hcolumn=None,
189
+ mdcolumn="M_MDEPTH",
190
+ ispolygons=False,
191
+ **kwargs,
192
+ ):
193
+ """Export XYZ (Points/Polygons) to file.
194
+
195
+ Args:
196
+ pfile (str): Name of file
197
+ fformat (str): File format xyz/poi/pol / rms_attr /rms_wellpicks
198
+ attributes (bool or list): List of extra columns to export (some formats)
199
+ or True for all attributes present
200
+ pfilter (dict): Filter on e.g. top name(s) with keys TopName
201
+ or ZoneName as {'TopName': ['Top1', 'Top2']}
202
+ wcolumn (str): Name of well column (rms_wellpicks format only)
203
+ hcolumn (str): Name of horizons column (rms_wellpicks format only)
204
+ mdcolumn (str): Name of MD column (rms_wellpicks format only)
205
+
206
+ Returns:
207
+ Number of points exported
208
+
209
+ Note that the rms_wellpicks will try to output to:
210
+
211
+ * HorizonName, WellName, MD if a MD (mdcolumn) is present,
212
+ * HorizonName, WellName, X, Y, Z otherwise
213
+
214
+ Raises:
215
+ KeyError if pfilter is set and key(s) are invalid
216
+
217
+ """
218
+ filter_deprecated = kwargs.get("filter")
219
+ if filter_deprecated is not None and pfilter is None:
220
+ pfilter = filter_deprecated
221
+
222
+ pfile = FileWrapper(pfile)
223
+ pfile.check_folder(raiseerror=OSError)
224
+
225
+ ncount = 0
226
+ if xyz.get_dataframe(copy=False) is None:
227
+ logger.warning("Nothing to export!")
228
+ return ncount
229
+
230
+ if fformat is None or fformat in FileFormat.XYZ.value:
231
+ # NB! reuse export_rms_attr function, but no attributes
232
+ # are possible
233
+ ncount = export_rms_attr(
234
+ xyz, pfile.name, attributes=False, pfilter=pfilter, ispolygons=ispolygons
235
+ )
236
+
237
+ elif fformat in FileFormat.RMS_ATTR.value:
238
+ ncount = export_rms_attr(
239
+ xyz,
240
+ pfile.name,
241
+ attributes=attributes,
242
+ pfilter=pfilter,
243
+ ispolygons=ispolygons,
244
+ )
245
+ elif fformat == "rms_wellpicks":
246
+ ncount = export_rms_wpicks(xyz, pfile.name, hcolumn, wcolumn, mdcolumn=mdcolumn)
247
+ else:
248
+ extensions = FileFormat.extensions_string([FileFormat.XYZ, FileFormat.RMS_ATTR])
249
+ raise InvalidFileFormatError(
250
+ f"File format {fformat} is invalid for type Points or Polygons. "
251
+ f"Supported formats are {extensions}, 'rms_wellpicks'."
252
+ )
253
+
254
+ if ncount is None:
255
+ ncount = 0
256
+
257
+ if ncount == 0:
258
+ logger.warning("Nothing to export!")
259
+
260
+ return ncount
261
+
262
+
263
+ def export_rms_attr(self, pfile, attributes=True, pfilter=None, ispolygons=False):
264
+ """Export til RMS attribute, also called RMS extended set.
265
+
266
+ If attributes is None, then it will be a simple XYZ file.
267
+
268
+ Attributes can be a bool or a list. If True, then use all attributes.
269
+
270
+ Filter is on the form {TopName: ['Name1', 'Name2']}
271
+
272
+ Returns:
273
+ The number of values exported. If value is 0; then no file
274
+ is made.
275
+ """
276
+
277
+ df = self.get_dataframe()
278
+
279
+ if not df.index.any():
280
+ logger.warning("Nothing to export")
281
+ return 0
282
+
283
+ columns = [self._xname, self._yname, self.zname]
284
+ df.fillna(value=999.0, inplace=True)
285
+
286
+ mode = "w"
287
+
288
+ transl = {"int": "Discrete", "float": "Float", "str": "String"}
289
+
290
+ logger.info("Attributes is %s", attributes)
291
+
292
+ # apply pfilter if any
293
+ if pfilter:
294
+ for key, val in pfilter.items():
295
+ if key in df.columns:
296
+ df = df.loc[df[key].isin(val)]
297
+ else:
298
+ raise KeyError(
299
+ f"The requested pfilter key {key} was not found in dataframe. "
300
+ f"Valid keys are {df.columns}"
301
+ )
302
+
303
+ if ispolygons:
304
+ if not attributes and self._pname in df.columns:
305
+ # need to convert the dataframe
306
+ df = _convert_idbased_xyz(self, df)
307
+ elif attributes is True:
308
+ attributes = list(self._attrs.keys())
309
+ logger.info("Use all attributes: %s", attributes)
310
+
311
+ for column in (self._xname, self._yname, self._zname):
312
+ try:
313
+ attributes.remove(column)
314
+ except ValueError:
315
+ continue
316
+ if isinstance(attributes, list):
317
+ mode = "a"
318
+ columns += attributes
319
+ with open(pfile, "w") as fout:
320
+ for col in attributes:
321
+ if col in df.columns:
322
+ fout.write(transl[self._attrs[col]] + " " + col + "\n")
323
+ if self._attrs[col] == "int":
324
+ df[col] = df[col].replace(UNDEF_INT, "UNDEF")
325
+ elif self._attrs[col] == "float":
326
+ df[col] = df[col].replace(UNDEF, "UNDEF")
327
+
328
+ with open(pfile, mode) as fc:
329
+ df.to_csv(fc, sep=" ", header=None, columns=columns, index=False)
330
+
331
+ return len(df.index)
332
+
333
+
334
+ def _convert_idbased_xyz(self, df):
335
+ """Conversion of format from ID column to 999 flag."""
336
+
337
+ # If polygons, there is a 4th column with POLY_ID. This needs
338
+ # to replaced by adding 999 line instead (for polygons)
339
+ # prior to XYZ export or when interactions in CXTGEO
340
+
341
+ idgroups = df.groupby(self._pname)
342
+
343
+ newdf = pd.DataFrame(
344
+ columns=[self._xname, self._yname, self._zname], dtype="float64"
345
+ )
346
+ udef = pd.DataFrame(
347
+ [[999.0, 999.0, 999.0]], columns=[self._xname, self._yname, self._zname]
348
+ )
349
+
350
+ for _id, gr in idgroups:
351
+ dfx = gr.drop(self._pname, axis=1)
352
+ newdf = pd.concat([newdf, dfx, udef], ignore_index=True)
353
+
354
+ return newdf
355
+
356
+
357
+ def export_rms_wpicks(self, pfile, hcolumn, wcolumn, mdcolumn="M_MDEPTH"):
358
+ """Export til RMS wellpicks
359
+
360
+ If a MD column (mdcolumn) exists, it will use the MD
361
+
362
+ Args:
363
+ pfile (str): File to export to
364
+ hcolumn (str): Name of horizon/zone column in the point set
365
+ wcolumn (str): Name of well column in the point set
366
+ mdcolumn (str): Name of measured depht column (if any)
367
+ Returns:
368
+ The number of values exported. If value is 0; then no file
369
+ is made.
370
+
371
+ """
372
+
373
+ df = self.get_dataframe()
374
+
375
+ if not df.index.any():
376
+ logger.warning("Nothing to export")
377
+ return 0
378
+
379
+ columns = []
380
+
381
+ if hcolumn in df.columns:
382
+ columns.append(hcolumn)
383
+ else:
384
+ raise ValueError(f"Column for horizons/zones <{hcolumn}> not present")
385
+
386
+ if wcolumn in df.columns:
387
+ columns.append(wcolumn)
388
+ else:
389
+ raise ValueError(f"Column for wells <{wcolumn}> not present")
390
+
391
+ if mdcolumn in df.columns:
392
+ columns.append(mdcolumn)
393
+ else:
394
+ columns += [self._xname, self._yname, self._zname]
395
+
396
+ if not df.index.any():
397
+ logger.warning("Nothing to export")
398
+ return 0
399
+
400
+ with open(pfile, "w") as fc:
401
+ df.to_csv(fc, sep=" ", header=None, columns=columns, index=False)
402
+
403
+ return len(df.index)
404
+
405
+
406
+ def _from_list_like(plist, zname, attrs, xyztype) -> pd.DataFrame:
407
+ """Import Points or Polygons from a list-like input.
408
+
409
+ The following 'list-like' inputs are possible:
410
+
411
+ * List of tuples [(x1, y1, z1, <id1>), (x2, y2, z2, <id2>), ...].
412
+ * List of lists [[x1, y1, z1, <id1>], [x2, y2, z2, <id2>], ...].
413
+ * List of numpy arrays [nparr1, nparr2, ...] where nparr1 is first row.
414
+ * A numpy array with shape [nrow, ncol], where ncol >= 3
415
+ * An existing pandas dataframe
416
+
417
+ Points scenaria:
418
+ * 3 columns, X Y Z
419
+ * 4 or more columns: rest columns are attributes
420
+
421
+ Polygons scenaria:
422
+ * 3 columns, X Y Z. Here P column is assigned 0 afterwards
423
+ * 4 or more columns:
424
+ - if totnum = lenattrs + 3 then POLY_ID is missing and will be made
425
+ - if totnum = lenattrs + 4 then assume that 4'th column is POLY_ID
426
+
427
+ It is currently not much error checking that lists/tuples are consistent, e.g.
428
+ if there always is either 3 or 4 elements per tuple, or that 4 number is
429
+ an integer.
430
+
431
+ Args:
432
+ plist (str): List of tuples, each tuple is length 3 or 4
433
+ zname (str): Name of third column
434
+ attrs (dict): Attributes, for Points
435
+ xyztype (str): POINTS/POLYGONS/...
436
+
437
+ Returns:
438
+ A valid datafram
439
+
440
+ Raises:
441
+ ValueError: If something is wrong with input
442
+
443
+ .. versionadded:: 2.16
444
+ """
445
+
446
+ dfr = None
447
+ if isinstance(plist, list):
448
+ plist = np.array(plist)
449
+
450
+ if isinstance(plist, np.ndarray):
451
+ logger.info("Process numpy to points")
452
+ if len(plist) == 0:
453
+ return pd.DataFrame([], columns=["X_UTME", "Y_UTMN", zname])
454
+
455
+ if plist.ndim != 2:
456
+ raise ValueError("Input numpy array must two-dimensional")
457
+ totnum = plist.shape[1]
458
+ lenattrs = len(attrs) if attrs is not None else 0
459
+ attr_first_col = 3
460
+ if totnum == 3 + lenattrs:
461
+ dfr = pd.DataFrame(plist[:, :3], columns=["X_UTME", "Y_UTMN", zname])
462
+ dfr = dfr.astype(float)
463
+ if xyztype == _XYZType.POLYGONS.value:
464
+ # pname column is missing but assign 0 as ID
465
+ dfr["POLY_ID"] = 0
466
+
467
+ elif totnum == 4 + lenattrs and xyztype == _XYZType.POLYGONS.value:
468
+ dfr = pd.DataFrame(
469
+ plist[:, :4],
470
+ columns=["X_UTME", "Y_UTMN", zname, "POLY_ID"],
471
+ )
472
+ attr_first_col = 4
473
+ else:
474
+ raise ValueError(
475
+ f"Wrong length detected of row: {totnum}. Are attributes set correct?"
476
+ )
477
+ dfr.dropna()
478
+ dfr = dfr.astype(np.float64)
479
+ if xyztype == _XYZType.POLYGONS.value:
480
+ dfr[_AttrName.PNAME.value] = dfr[_AttrName.PNAME.value].astype(np.int32)
481
+
482
+ if lenattrs > 0:
483
+ for enum, (key, dtype) in enumerate(attrs.items()):
484
+ dfr[key] = plist[:, attr_first_col + enum]
485
+ dfr = dfr.astype({key: dtype})
486
+
487
+ else:
488
+ raise TypeError("Not possible to make XYZ from given input")
489
+
490
+ return dfr
@@ -0,0 +1,42 @@
1
+ """Private low level routines (SWIG vs C)"""
2
+
3
+ import numpy as np
4
+
5
+ from xtgeo import _cxtgeo
6
+ from xtgeo.common.log import null_logger
7
+
8
+ logger = null_logger(__name__)
9
+
10
+
11
+ def convert_np_carr_int(xyz, np_array): # pragma: no cover
12
+ """Convert numpy 1D array to C array, assuming int type."""
13
+
14
+ # The numpy is always a double (float64), so need to convert first
15
+ # xyz is the general object
16
+
17
+ carr = _cxtgeo.new_intarray(xyz.nrow)
18
+
19
+ np_array = np_array.astype(np.int32)
20
+
21
+ _cxtgeo.swig_numpy_to_carr_i1d(np_array, carr)
22
+
23
+ return carr
24
+
25
+
26
+ def convert_np_carr_double(xyz, np_array): # pragma: no cover
27
+ """Convert numpy 1D array to C array, assuming double type."""
28
+
29
+ carr = _cxtgeo.new_doublearray(xyz.nrow)
30
+
31
+ _cxtgeo.swig_numpy_to_carr_1d(np_array, carr)
32
+
33
+ return carr
34
+
35
+
36
+ def convert_carr_double_np(xyz, carray, nlen=None): # pragma: no cover
37
+ """Convert a C array to numpy, assuming double type."""
38
+
39
+ if nlen is None:
40
+ nlen = len(xyz._df.index)
41
+
42
+ return _cxtgeo.swig_carr_to_numpy_1d(nlen, carray)