lasmnemonicsid 0.0.3rc0__tar.gz → 0.0.5__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lasmnemonicsid-0.0.5/PKG-INFO +276 -0
- lasmnemonicsid-0.0.5/README.md +238 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/pyproject.toml +2 -2
- lasmnemonicsid-0.0.5/src/LASMnemonicsID/ASCII/ASCII.py +134 -0
- lasmnemonicsid-0.0.5/src/LASMnemonicsID/ASCII/__init__.py +2 -0
- lasmnemonicsid-0.0.5/src/LASMnemonicsID/DLIS/DLIS.py +150 -0
- lasmnemonicsid-0.0.5/src/LASMnemonicsID/DLIS/__init__.py +2 -0
- lasmnemonicsid-0.0.5/src/LASMnemonicsID/__init__.py +15 -0
- lasmnemonicsid-0.0.5/src/lasmnemonicsid.egg-info/PKG-INFO +276 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/src/lasmnemonicsid.egg-info/SOURCES.txt +4 -0
- lasmnemonicsid-0.0.5/tests/test_ascii.py +221 -0
- lasmnemonicsid-0.0.5/tests/test_dlis.py +161 -0
- lasmnemonicsid-0.0.3rc0/PKG-INFO +0 -110
- lasmnemonicsid-0.0.3rc0/README.md +0 -72
- lasmnemonicsid-0.0.3rc0/src/LASMnemonicsID/DLIS/__init__.py +0 -0
- lasmnemonicsid-0.0.3rc0/src/LASMnemonicsID/__init__.py +0 -16
- lasmnemonicsid-0.0.3rc0/src/lasmnemonicsid.egg-info/PKG-INFO +0 -110
- lasmnemonicsid-0.0.3rc0/tests/test_dlis.py +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/LICENSE +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/setup.cfg +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/src/LASMnemonicsID/LAS/LAS.py +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/src/LASMnemonicsID/LAS/__init__.py +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/src/LASMnemonicsID/utils/__init__.py +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/src/LASMnemonicsID/utils/mnemonics.py +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/src/lasmnemonicsid.egg-info/dependency_links.txt +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/src/lasmnemonicsid.egg-info/requires.txt +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/src/lasmnemonicsid.egg-info/top_level.txt +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/tests/test_las.py +0 -0
- {lasmnemonicsid-0.0.3rc0 → lasmnemonicsid-0.0.5}/tests/test_utils.py +0 -0
|
@@ -0,0 +1,276 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: lasmnemonicsid
|
|
3
|
+
Version: 0.0.5
|
|
4
|
+
Summary: Well log mnemonic identification using lasio and dlisio to load LAS/DLIS/ASCII files into DataFrames
|
|
5
|
+
Author-email: Nobleza Energy <info@nobleza-energy.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://nobleza-energy.github.io/LASMnemonicsID
|
|
8
|
+
Project-URL: Repository, https://github.com/Nobleza-Energy/LASMnemonicsID
|
|
9
|
+
Project-URL: Documentation, https://nobleza-energy.github.io/LASMnemonicsID/
|
|
10
|
+
Project-URL: Bug Tracker, https://github.com/Nobleza-Energy/LASMnemonicsID/issues
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Science/Research
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Operating System :: OS Independent
|
|
15
|
+
Classifier: Programming Language :: Python :: 3
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
19
|
+
Classifier: Topic :: Scientific/Engineering
|
|
20
|
+
Requires-Python: >=3.10
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
License-File: LICENSE
|
|
23
|
+
Requires-Dist: numpy>=1.21.0
|
|
24
|
+
Requires-Dist: pandas>=2.0.1
|
|
25
|
+
Requires-Dist: lasio>=0.30
|
|
26
|
+
Requires-Dist: dlisio>=1.0.0
|
|
27
|
+
Provides-Extra: docs
|
|
28
|
+
Requires-Dist: mkdocs>=1.5.0; extra == "docs"
|
|
29
|
+
Requires-Dist: mkdocs-material>=9.0.0; extra == "docs"
|
|
30
|
+
Requires-Dist: mkdocstrings[python]>=0.24.0; extra == "docs"
|
|
31
|
+
Provides-Extra: dev
|
|
32
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
33
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
34
|
+
Requires-Dist: isort>=5.12.0; extra == "dev"
|
|
35
|
+
Requires-Dist: flake8>=6.0.0; extra == "dev"
|
|
36
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
37
|
+
Dynamic: license-file
|
|
38
|
+
|
|
39
|
+
# LASMnemonicsID
|
|
40
|
+
|
|
41
|
+
<p align="center">
|
|
42
|
+
<img src="https://github.com/Nobleza-Energy/LASMnemonicsID/blob/e44bfb606fef5cfc9c3df6e41c3d1bd0d7bb08ae/logo.png?raw=true" alt="LASMnemonicsID Logo" width="200"/>
|
|
43
|
+
</p>
|
|
44
|
+
|
|
45
|
+
<p align="center">
|
|
46
|
+
<b>Well log mnemonic identification and standardization for LAS, DLIS, and ASCII formats</b>
|
|
47
|
+
</p>
|
|
48
|
+
|
|
49
|
+
<p align="center">
|
|
50
|
+
<a href="https://pypi.org/project/lasmnemonicsid/"><img src="https://img.shields.io/pypi/v/lasmnemonicsid.svg" alt="PyPI"></a>
|
|
51
|
+
<a href="https://pypi.org/project/lasmnemonicsid/"><img src="https://img.shields.io/pypi/pyversions/lasmnemonicsid.svg" alt="Python Versions"></a>
|
|
52
|
+
<a href="https://github.com/Nobleza-Energy/LASMnemonicsID/blob/main/LICENSE"><img src="https://img.shields.io/github/license/Nobleza-Energy/LASMnemonicsID.svg" alt="License"></a>
|
|
53
|
+
</p>
|
|
54
|
+
|
|
55
|
+
---
|
|
56
|
+
|
|
57
|
+
## Features
|
|
58
|
+
|
|
59
|
+
- **Multi-format support**: LAS, DLIS, ASCII/CSV/TXT/DAT
|
|
60
|
+
- **Automatic mnemonic standardization**: GR, RHOB, NPHI, DT, SP, CALI, RT, etc.
|
|
61
|
+
- **Batch processing**: Parse entire directories recursively
|
|
62
|
+
- **Customizable naming**: Override default standard names
|
|
63
|
+
- **Case-insensitive extensions**: Works with .las/.LAS, .dlis/.DLIS, .csv/.CSV, etc.
|
|
64
|
+
- **Pandas integration**: Returns clean DataFrames ready for analysis
|
|
65
|
+
|
|
66
|
+
---
|
|
67
|
+
|
|
68
|
+
## Installation
|
|
69
|
+
|
|
70
|
+
```bash
|
|
71
|
+
pip install lasmnemonicsid
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
This installs support for **all formats** (LAS, DLIS, ASCII/CSV/TXT).
|
|
75
|
+
|
|
76
|
+
---
|
|
77
|
+
|
|
78
|
+
## Quick Start
|
|
79
|
+
|
|
80
|
+
### LAS Files
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
from LASMnemonicsID import parseLAS
|
|
84
|
+
|
|
85
|
+
# Parse single LAS file
|
|
86
|
+
df = parseLAS("well.las")
|
|
87
|
+
print(df.head())
|
|
88
|
+
|
|
89
|
+
# Parse directory
|
|
90
|
+
data = parseLAS("/path/to/las/files/")
|
|
91
|
+
for filename, df in data.items():
|
|
92
|
+
print(f"{filename}: {df.shape}")
|
|
93
|
+
```
|
|
94
|
+
|
|
95
|
+
### DLIS Files
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
from LASMnemonicsID import parseDLIS
|
|
99
|
+
|
|
100
|
+
# Parse single DLIS file
|
|
101
|
+
df = parseDLIS("well.dlis")
|
|
102
|
+
print(df.columns)
|
|
103
|
+
|
|
104
|
+
# Parse directory
|
|
105
|
+
data = parseDLIS("/path/to/dlis/files/")
|
|
106
|
+
```
|
|
107
|
+
|
|
108
|
+
### ASCII/CSV/TXT Files
|
|
109
|
+
|
|
110
|
+
```python
|
|
111
|
+
from LASMnemonicsID import parseASCII
|
|
112
|
+
|
|
113
|
+
# Parse CSV
|
|
114
|
+
df = parseASCII("well_log.csv", depth_col="DEPTH")
|
|
115
|
+
|
|
116
|
+
# Parse tab-separated TXT
|
|
117
|
+
df = parseASCII("well_log.txt", delimiter="\t")
|
|
118
|
+
|
|
119
|
+
# Parse directory
|
|
120
|
+
data = parseASCII("/path/to/csv/files/")
|
|
121
|
+
```
|
|
122
|
+
|
|
123
|
+
---
|
|
124
|
+
|
|
125
|
+
## Advanced Usage
|
|
126
|
+
|
|
127
|
+
### Custom Preferred Names
|
|
128
|
+
|
|
129
|
+
```python
|
|
130
|
+
preferred = {
|
|
131
|
+
"deepres": "RT",
|
|
132
|
+
"deepres_preferred_original": "AT90",
|
|
133
|
+
"gamma": "GR"
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
df = parseLAS("well.las", preferred_names=preferred)
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
### Batch Processing
|
|
140
|
+
|
|
141
|
+
```python
|
|
142
|
+
from pathlib import Path
|
|
143
|
+
|
|
144
|
+
dir_path = Path("/data/wells/")
|
|
145
|
+
data = parseLAS(dir_path, verbose=True, preferred_names=preferred)
|
|
146
|
+
|
|
147
|
+
for fname, df in data.items():
|
|
148
|
+
print(f"{fname}: {df.shape}")
|
|
149
|
+
print(df.head(3))
|
|
150
|
+
```
|
|
151
|
+
|
|
152
|
+
### Mixed Format Directories
|
|
153
|
+
|
|
154
|
+
```python
|
|
155
|
+
las_data = parseLAS("/data/wells/")
|
|
156
|
+
dlis_data = parseDLIS("/data/wells/")
|
|
157
|
+
ascii_data = parseASCII("/data/wells/")
|
|
158
|
+
|
|
159
|
+
all_data = {**las_data, **dlis_data, **ascii_data}
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
---
|
|
163
|
+
|
|
164
|
+
## Supported Mnemonics
|
|
165
|
+
|
|
166
|
+
The package automatically standardizes these curve types:
|
|
167
|
+
|
|
168
|
+
| Curve Type | Standard Name | Example Aliases |
|
|
169
|
+
|------------|---------------|-------------|
|
|
170
|
+
| Gamma Ray | `GR` | gr, cggr, cgr, gam, gamma, gammaray, grc, grd, hgr, sgr, lgr, pgr |
|
|
171
|
+
| Spontaneous Potential | `SP` | sp, idsp, spr, spl, spdl, spdhp, spc, sp0, sp1, cgsp, dlsp |
|
|
172
|
+
| Caliper | `CALI` | caliper, calip, cal, dcal, acal, cala, cald, cale, calh, hcal, xcal, ycal |
|
|
173
|
+
| Deep Resistivity | `RT` | rt, rtao, rt90, ild, idph, rild, rd, ae90, at90, atrt, lld, lldc, res, resd |
|
|
174
|
+
| Shallow Resistivity | `RXO` | rxo, rxoz, msfl, mcfl, sflcc, mgl, m1rx, r40o, aht10 |
|
|
175
|
+
| Density | `RHOB` | rhob, rhoz, den, denb, denc, hrho, hrhob, zden, hden, denf, denn |
|
|
176
|
+
| Density Correction | `DRHO` | dcor, dcorr, dc, decr, drh, zcor, zcorr, hhdr, denscorr |
|
|
177
|
+
| Neutron Porosity | `NPHI` | cn, phin, cnc, cns, hnphi, nphi, npor, cncc, nprl, neut, neutpor |
|
|
178
|
+
| Sonic (Compressional) | `DT` | dt, dtc, dtco, dtcomp, deltat, slow, slowness, tt, ac, acco, delt, dtcomp |
|
|
179
|
+
| Sonic (Shear) | `DTS` | dts, dtsh, dtsm, dtsc, dtsd, dtsqi, dtshear, deltas, tts, stt, dtshear |
|
|
180
|
+
| Photoelectric Factor | `PEF` | pe, pef, pefz, pdpe, pedf, pedn, hpedn, pe2, pef8, lpe |
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
---
|
|
184
|
+
|
|
185
|
+
## Testing
|
|
186
|
+
|
|
187
|
+
```bash
|
|
188
|
+
pytest tests/ -v
|
|
189
|
+
pytest tests/test_las.py -v
|
|
190
|
+
pytest tests/test_dlis.py -v
|
|
191
|
+
pytest tests/test_ascii.py -v
|
|
192
|
+
```
|
|
193
|
+
|
|
194
|
+
---
|
|
195
|
+
|
|
196
|
+
## API Reference
|
|
197
|
+
|
|
198
|
+
### parseLAS(input_path, verbose=True, preferred_names=None)
|
|
199
|
+
|
|
200
|
+
Parse LAS file(s) and standardize mnemonics.
|
|
201
|
+
|
|
202
|
+
**Parameters:**
|
|
203
|
+
- input_path (str/Path): LAS file or directory
|
|
204
|
+
- verbose (bool): Print parsing info
|
|
205
|
+
- preferred_names (dict): Custom name mappings
|
|
206
|
+
|
|
207
|
+
**Returns:** DataFrame (single file) or dict (multiple files)
|
|
208
|
+
|
|
209
|
+
### parseDLIS(input_path, verbose=True, preferred_names=None)
|
|
210
|
+
|
|
211
|
+
Parse DLIS file(s) and standardize mnemonics.
|
|
212
|
+
|
|
213
|
+
**Parameters:**
|
|
214
|
+
- input_path (str/Path): DLIS file or directory
|
|
215
|
+
- verbose (bool): Print parsing info
|
|
216
|
+
- preferred_names (dict): Custom name mappings
|
|
217
|
+
|
|
218
|
+
**Returns:** DataFrame (single file) or dict (multiple files)
|
|
219
|
+
|
|
220
|
+
### parseASCII(input_path, verbose=True, preferred_names=None, depth_col="DEPTH", delimiter=",")
|
|
221
|
+
|
|
222
|
+
Parse ASCII/CSV/TXT file(s) and standardize mnemonics.
|
|
223
|
+
|
|
224
|
+
**Parameters:**
|
|
225
|
+
- input_path (str/Path): ASCII file or directory
|
|
226
|
+
- verbose (bool): Print parsing info
|
|
227
|
+
- preferred_names (dict): Custom name mappings
|
|
228
|
+
- depth_col (str): Name of depth column
|
|
229
|
+
- delimiter (str): Field separator
|
|
230
|
+
|
|
231
|
+
**Returns:** DataFrame (single file) or dict (multiple files)
|
|
232
|
+
|
|
233
|
+
---
|
|
234
|
+
|
|
235
|
+
## How to Cite
|
|
236
|
+
|
|
237
|
+
**APA**
|
|
238
|
+
|
|
239
|
+
> Nobleza Energy. (2026). LASMnemonicsID: Well log mnemonic identification for LAS, DLIS, and ASCII formats [Software]. GitHub. https://github.com/Nobleza-Energy/LASMnemonicsID
|
|
240
|
+
|
|
241
|
+
**BibTeX**
|
|
242
|
+
|
|
243
|
+
```bibtex
|
|
244
|
+
@software{LASMnemonicsID,
|
|
245
|
+
author = {Nobleza Energy},
|
|
246
|
+
title = {LASMnemonicsID: Well log mnemonic identification for LAS, DLIS, and ASCII formats},
|
|
247
|
+
year = {2026},
|
|
248
|
+
publisher = {GitHub},
|
|
249
|
+
url = {https://github.com/Nobleza-Energy/LASMnemonicsID}
|
|
250
|
+
}
|
|
251
|
+
```
|
|
252
|
+
|
|
253
|
+
---
|
|
254
|
+
|
|
255
|
+
## License
|
|
256
|
+
|
|
257
|
+
MIT License - see [LICENSE](LICENSE) file.
|
|
258
|
+
|
|
259
|
+
---
|
|
260
|
+
|
|
261
|
+
## Contributing
|
|
262
|
+
|
|
263
|
+
Contributions welcome! Submit a Pull Request.
|
|
264
|
+
|
|
265
|
+
---
|
|
266
|
+
|
|
267
|
+
## Support
|
|
268
|
+
|
|
269
|
+
- **Issues:** [GitHub Issues](https://github.com/Nobleza-Energy/LASMnemonicsID/issues)
|
|
270
|
+
- **Discussions:** [GitHub Discussions](https://github.com/Nobleza-Energy/LASMnemonicsID/discussions)
|
|
271
|
+
|
|
272
|
+
---
|
|
273
|
+
|
|
274
|
+
<p align="center">
|
|
275
|
+
Made with ❤️ by <a href="https://nobleza-energy.com">Nobleza Energy</a>
|
|
276
|
+
</p>
|
|
@@ -0,0 +1,238 @@
|
|
|
1
|
+
# LASMnemonicsID
|
|
2
|
+
|
|
3
|
+
<p align="center">
|
|
4
|
+
<img src="https://github.com/Nobleza-Energy/LASMnemonicsID/blob/e44bfb606fef5cfc9c3df6e41c3d1bd0d7bb08ae/logo.png?raw=true" alt="LASMnemonicsID Logo" width="200"/>
|
|
5
|
+
</p>
|
|
6
|
+
|
|
7
|
+
<p align="center">
|
|
8
|
+
<b>Well log mnemonic identification and standardization for LAS, DLIS, and ASCII formats</b>
|
|
9
|
+
</p>
|
|
10
|
+
|
|
11
|
+
<p align="center">
|
|
12
|
+
<a href="https://pypi.org/project/lasmnemonicsid/"><img src="https://img.shields.io/pypi/v/lasmnemonicsid.svg" alt="PyPI"></a>
|
|
13
|
+
<a href="https://pypi.org/project/lasmnemonicsid/"><img src="https://img.shields.io/pypi/pyversions/lasmnemonicsid.svg" alt="Python Versions"></a>
|
|
14
|
+
<a href="https://github.com/Nobleza-Energy/LASMnemonicsID/blob/main/LICENSE"><img src="https://img.shields.io/github/license/Nobleza-Energy/LASMnemonicsID.svg" alt="License"></a>
|
|
15
|
+
</p>
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Features
|
|
20
|
+
|
|
21
|
+
- **Multi-format support**: LAS, DLIS, ASCII/CSV/TXT/DAT
|
|
22
|
+
- **Automatic mnemonic standardization**: GR, RHOB, NPHI, DT, SP, CALI, RT, etc.
|
|
23
|
+
- **Batch processing**: Parse entire directories recursively
|
|
24
|
+
- **Customizable naming**: Override default standard names
|
|
25
|
+
- **Case-insensitive extensions**: Works with .las/.LAS, .dlis/.DLIS, .csv/.CSV, etc.
|
|
26
|
+
- **Pandas integration**: Returns clean DataFrames ready for analysis
|
|
27
|
+
|
|
28
|
+
---
|
|
29
|
+
|
|
30
|
+
## Installation
|
|
31
|
+
|
|
32
|
+
```bash
|
|
33
|
+
pip install lasmnemonicsid
|
|
34
|
+
```
|
|
35
|
+
|
|
36
|
+
This installs support for **all formats** (LAS, DLIS, ASCII/CSV/TXT).
|
|
37
|
+
|
|
38
|
+
---
|
|
39
|
+
|
|
40
|
+
## Quick Start
|
|
41
|
+
|
|
42
|
+
### LAS Files
|
|
43
|
+
|
|
44
|
+
```python
|
|
45
|
+
from LASMnemonicsID import parseLAS
|
|
46
|
+
|
|
47
|
+
# Parse single LAS file
|
|
48
|
+
df = parseLAS("well.las")
|
|
49
|
+
print(df.head())
|
|
50
|
+
|
|
51
|
+
# Parse directory
|
|
52
|
+
data = parseLAS("/path/to/las/files/")
|
|
53
|
+
for filename, df in data.items():
|
|
54
|
+
print(f"{filename}: {df.shape}")
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### DLIS Files
|
|
58
|
+
|
|
59
|
+
```python
|
|
60
|
+
from LASMnemonicsID import parseDLIS
|
|
61
|
+
|
|
62
|
+
# Parse single DLIS file
|
|
63
|
+
df = parseDLIS("well.dlis")
|
|
64
|
+
print(df.columns)
|
|
65
|
+
|
|
66
|
+
# Parse directory
|
|
67
|
+
data = parseDLIS("/path/to/dlis/files/")
|
|
68
|
+
```
|
|
69
|
+
|
|
70
|
+
### ASCII/CSV/TXT Files
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
from LASMnemonicsID import parseASCII
|
|
74
|
+
|
|
75
|
+
# Parse CSV
|
|
76
|
+
df = parseASCII("well_log.csv", depth_col="DEPTH")
|
|
77
|
+
|
|
78
|
+
# Parse tab-separated TXT
|
|
79
|
+
df = parseASCII("well_log.txt", delimiter="\t")
|
|
80
|
+
|
|
81
|
+
# Parse directory
|
|
82
|
+
data = parseASCII("/path/to/csv/files/")
|
|
83
|
+
```
|
|
84
|
+
|
|
85
|
+
---
|
|
86
|
+
|
|
87
|
+
## Advanced Usage
|
|
88
|
+
|
|
89
|
+
### Custom Preferred Names
|
|
90
|
+
|
|
91
|
+
```python
|
|
92
|
+
preferred = {
|
|
93
|
+
"deepres": "RT",
|
|
94
|
+
"deepres_preferred_original": "AT90",
|
|
95
|
+
"gamma": "GR"
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
df = parseLAS("well.las", preferred_names=preferred)
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
### Batch Processing
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
from pathlib import Path
|
|
105
|
+
|
|
106
|
+
dir_path = Path("/data/wells/")
|
|
107
|
+
data = parseLAS(dir_path, verbose=True, preferred_names=preferred)
|
|
108
|
+
|
|
109
|
+
for fname, df in data.items():
|
|
110
|
+
print(f"{fname}: {df.shape}")
|
|
111
|
+
print(df.head(3))
|
|
112
|
+
```
|
|
113
|
+
|
|
114
|
+
### Mixed Format Directories
|
|
115
|
+
|
|
116
|
+
```python
|
|
117
|
+
las_data = parseLAS("/data/wells/")
|
|
118
|
+
dlis_data = parseDLIS("/data/wells/")
|
|
119
|
+
ascii_data = parseASCII("/data/wells/")
|
|
120
|
+
|
|
121
|
+
all_data = {**las_data, **dlis_data, **ascii_data}
|
|
122
|
+
```
|
|
123
|
+
|
|
124
|
+
---
|
|
125
|
+
|
|
126
|
+
## Supported Mnemonics
|
|
127
|
+
|
|
128
|
+
The package automatically standardizes these curve types:
|
|
129
|
+
|
|
130
|
+
| Curve Type | Standard Name | Example Aliases |
|
|
131
|
+
|------------|---------------|-------------|
|
|
132
|
+
| Gamma Ray | `GR` | gr, cggr, cgr, gam, gamma, gammaray, grc, grd, hgr, sgr, lgr, pgr |
|
|
133
|
+
| Spontaneous Potential | `SP` | sp, idsp, spr, spl, spdl, spdhp, spc, sp0, sp1, cgsp, dlsp |
|
|
134
|
+
| Caliper | `CALI` | caliper, calip, cal, dcal, acal, cala, cald, cale, calh, hcal, xcal, ycal |
|
|
135
|
+
| Deep Resistivity | `RT` | rt, rtao, rt90, ild, idph, rild, rd, ae90, at90, atrt, lld, lldc, res, resd |
|
|
136
|
+
| Shallow Resistivity | `RXO` | rxo, rxoz, msfl, mcfl, sflcc, mgl, m1rx, r40o, aht10 |
|
|
137
|
+
| Density | `RHOB` | rhob, rhoz, den, denb, denc, hrho, hrhob, zden, hden, denf, denn |
|
|
138
|
+
| Density Correction | `DRHO` | dcor, dcorr, dc, decr, drh, zcor, zcorr, hhdr, denscorr |
|
|
139
|
+
| Neutron Porosity | `NPHI` | cn, phin, cnc, cns, hnphi, nphi, npor, cncc, nprl, neut, neutpor |
|
|
140
|
+
| Sonic (Compressional) | `DT` | dt, dtc, dtco, dtcomp, deltat, slow, slowness, tt, ac, acco, delt, dtcomp |
|
|
141
|
+
| Sonic (Shear) | `DTS` | dts, dtsh, dtsm, dtsc, dtsd, dtsqi, dtshear, deltas, tts, stt, dtshear |
|
|
142
|
+
| Photoelectric Factor | `PEF` | pe, pef, pefz, pdpe, pedf, pedn, hpedn, pe2, pef8, lpe |
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
---
|
|
146
|
+
|
|
147
|
+
## Testing
|
|
148
|
+
|
|
149
|
+
```bash
|
|
150
|
+
pytest tests/ -v
|
|
151
|
+
pytest tests/test_las.py -v
|
|
152
|
+
pytest tests/test_dlis.py -v
|
|
153
|
+
pytest tests/test_ascii.py -v
|
|
154
|
+
```
|
|
155
|
+
|
|
156
|
+
---
|
|
157
|
+
|
|
158
|
+
## API Reference
|
|
159
|
+
|
|
160
|
+
### parseLAS(input_path, verbose=True, preferred_names=None)
|
|
161
|
+
|
|
162
|
+
Parse LAS file(s) and standardize mnemonics.
|
|
163
|
+
|
|
164
|
+
**Parameters:**
|
|
165
|
+
- input_path (str/Path): LAS file or directory
|
|
166
|
+
- verbose (bool): Print parsing info
|
|
167
|
+
- preferred_names (dict): Custom name mappings
|
|
168
|
+
|
|
169
|
+
**Returns:** DataFrame (single file) or dict (multiple files)
|
|
170
|
+
|
|
171
|
+
### parseDLIS(input_path, verbose=True, preferred_names=None)
|
|
172
|
+
|
|
173
|
+
Parse DLIS file(s) and standardize mnemonics.
|
|
174
|
+
|
|
175
|
+
**Parameters:**
|
|
176
|
+
- input_path (str/Path): DLIS file or directory
|
|
177
|
+
- verbose (bool): Print parsing info
|
|
178
|
+
- preferred_names (dict): Custom name mappings
|
|
179
|
+
|
|
180
|
+
**Returns:** DataFrame (single file) or dict (multiple files)
|
|
181
|
+
|
|
182
|
+
### parseASCII(input_path, verbose=True, preferred_names=None, depth_col="DEPTH", delimiter=",")
|
|
183
|
+
|
|
184
|
+
Parse ASCII/CSV/TXT file(s) and standardize mnemonics.
|
|
185
|
+
|
|
186
|
+
**Parameters:**
|
|
187
|
+
- input_path (str/Path): ASCII file or directory
|
|
188
|
+
- verbose (bool): Print parsing info
|
|
189
|
+
- preferred_names (dict): Custom name mappings
|
|
190
|
+
- depth_col (str): Name of depth column
|
|
191
|
+
- delimiter (str): Field separator
|
|
192
|
+
|
|
193
|
+
**Returns:** DataFrame (single file) or dict (multiple files)
|
|
194
|
+
|
|
195
|
+
---
|
|
196
|
+
|
|
197
|
+
## How to Cite
|
|
198
|
+
|
|
199
|
+
**APA**
|
|
200
|
+
|
|
201
|
+
> Nobleza Energy. (2026). LASMnemonicsID: Well log mnemonic identification for LAS, DLIS, and ASCII formats [Software]. GitHub. https://github.com/Nobleza-Energy/LASMnemonicsID
|
|
202
|
+
|
|
203
|
+
**BibTeX**
|
|
204
|
+
|
|
205
|
+
```bibtex
|
|
206
|
+
@software{LASMnemonicsID,
|
|
207
|
+
author = {Nobleza Energy},
|
|
208
|
+
title = {LASMnemonicsID: Well log mnemonic identification for LAS, DLIS, and ASCII formats},
|
|
209
|
+
year = {2026},
|
|
210
|
+
publisher = {GitHub},
|
|
211
|
+
url = {https://github.com/Nobleza-Energy/LASMnemonicsID}
|
|
212
|
+
}
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
---
|
|
216
|
+
|
|
217
|
+
## License
|
|
218
|
+
|
|
219
|
+
MIT License - see [LICENSE](LICENSE) file.
|
|
220
|
+
|
|
221
|
+
---
|
|
222
|
+
|
|
223
|
+
## Contributing
|
|
224
|
+
|
|
225
|
+
Contributions welcome! Submit a Pull Request.
|
|
226
|
+
|
|
227
|
+
---
|
|
228
|
+
|
|
229
|
+
## Support
|
|
230
|
+
|
|
231
|
+
- **Issues:** [GitHub Issues](https://github.com/Nobleza-Energy/LASMnemonicsID/issues)
|
|
232
|
+
- **Discussions:** [GitHub Discussions](https://github.com/Nobleza-Energy/LASMnemonicsID/discussions)
|
|
233
|
+
|
|
234
|
+
---
|
|
235
|
+
|
|
236
|
+
<p align="center">
|
|
237
|
+
Made with ❤️ by <a href="https://nobleza-energy.com">Nobleza Energy</a>
|
|
238
|
+
</p>
|
|
@@ -4,8 +4,8 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "lasmnemonicsid"
|
|
7
|
-
version = "0.0.
|
|
8
|
-
description = "Well log mnemonic identification using lasio and dlisio to load LAS/DLIS files into DataFrames"
|
|
7
|
+
version = "0.0.5"
|
|
8
|
+
description = "Well log mnemonic identification using lasio and dlisio to load LAS/DLIS/ASCII files into DataFrames"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
authors = [
|
|
11
11
|
{name = "Nobleza Energy", email = "info@nobleza-energy.com"}
|
|
@@ -0,0 +1,134 @@
|
|
|
1
|
+
|
|
2
|
+
import LASMnemonicsID.utils.mnemonics as mnm
|
|
3
|
+
from LASMnemonicsID.utils.mnemonics import (
|
|
4
|
+
gamma_names,
|
|
5
|
+
sp_names,
|
|
6
|
+
caliper_names,
|
|
7
|
+
deepres_names,
|
|
8
|
+
rxo_names,
|
|
9
|
+
density_names,
|
|
10
|
+
density_correction_names,
|
|
11
|
+
neutron_names,
|
|
12
|
+
dtc_names,
|
|
13
|
+
dts_names,
|
|
14
|
+
pe_names,
|
|
15
|
+
)
|
|
16
|
+
import os
|
|
17
|
+
import pandas as pd
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
|
|
20
|
+
# Import helper functions from LAS module
|
|
21
|
+
from ..LAS.LAS import create_mnemonic_dict, _standardize_all_curves
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def parseASCII(input_path, verbose=True, preferred_names=None, depth_col="DEPTH", delimiter=","):
|
|
25
|
+
"""
|
|
26
|
+
Parse ASCII/CSV/TXT well log file or all in directory → DataFrame or {filename: df}.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
input_path (str/Path): ASCII/CSV/TXT file or directory
|
|
30
|
+
verbose (bool): Print info
|
|
31
|
+
preferred_names (dict, optional): Mapping of curve types to preferred column names.
|
|
32
|
+
Example: {"deepres": "RT", "gamma": "GR"}
|
|
33
|
+
If not provided, defaults to standard petrophysical names.
|
|
34
|
+
depth_col (str): Name of depth column (default: "DEPTH")
|
|
35
|
+
delimiter (str): CSV delimiter (default: ",")
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
DataFrame (single) or dict {filename: df} (multiple/dir)
|
|
39
|
+
"""
|
|
40
|
+
input_path = Path(input_path)
|
|
41
|
+
|
|
42
|
+
# Define default standard names
|
|
43
|
+
std_names = {
|
|
44
|
+
"gamma": "GR",
|
|
45
|
+
"sp": "SP",
|
|
46
|
+
"caliper": "CALI",
|
|
47
|
+
"deepres": "RT",
|
|
48
|
+
"rxo": "RXO",
|
|
49
|
+
"density": "RHOB",
|
|
50
|
+
"density_correction": "DRHO",
|
|
51
|
+
"neutron": "NPHI",
|
|
52
|
+
"dtc": "DT",
|
|
53
|
+
"dts": "DTS",
|
|
54
|
+
"pe": "PEF"
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
# Update with user preferences if provided
|
|
58
|
+
if preferred_names:
|
|
59
|
+
std_names.update(preferred_names)
|
|
60
|
+
|
|
61
|
+
# All supported ASCII extensions (case-insensitive)
|
|
62
|
+
ascii_extensions = ['.csv', '.txt', '.asc', '.dat', '.ascii']
|
|
63
|
+
|
|
64
|
+
# Case 1: Single File
|
|
65
|
+
if input_path.is_file() and input_path.suffix.lower() in ascii_extensions:
|
|
66
|
+
df = _read_single_ascii(input_path, verbose, std_names, depth_col, delimiter)
|
|
67
|
+
return df if df is not None else None
|
|
68
|
+
|
|
69
|
+
# Case 2: Directory (Recursive) - CASE-INSENSITIVE
|
|
70
|
+
ascii_files = [f for f in input_path.rglob("*") if f.suffix.lower() in ascii_extensions]
|
|
71
|
+
if not ascii_files:
|
|
72
|
+
if verbose:
|
|
73
|
+
print(f"No ASCII/CSV files found in {input_path}")
|
|
74
|
+
return {}
|
|
75
|
+
|
|
76
|
+
ascii_dict = {}
|
|
77
|
+
for ascii_file in ascii_files:
|
|
78
|
+
df = _read_single_ascii(ascii_file, verbose, std_names, depth_col, delimiter)
|
|
79
|
+
if df is not None:
|
|
80
|
+
filename = ascii_file.name
|
|
81
|
+
ascii_dict[filename] = df
|
|
82
|
+
|
|
83
|
+
# Return single DF if only 1 file found, else dict
|
|
84
|
+
if len(ascii_dict) == 1:
|
|
85
|
+
return next(iter(ascii_dict.values()))
|
|
86
|
+
|
|
87
|
+
return ascii_dict
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _read_single_ascii(ascii_file_path, verbose, std_names, depth_col, delimiter):
|
|
91
|
+
"""Read single ASCII/CSV file to DataFrame and standardize ALL curves."""
|
|
92
|
+
try:
|
|
93
|
+
# Try reading the file
|
|
94
|
+
df = pd.read_csv(ascii_file_path, delimiter=delimiter)
|
|
95
|
+
|
|
96
|
+
if df.empty:
|
|
97
|
+
if verbose:
|
|
98
|
+
print(f"✗ Empty DataFrame: {ascii_file_path.name}")
|
|
99
|
+
return None
|
|
100
|
+
|
|
101
|
+
# Handle depth column (case-insensitive)
|
|
102
|
+
depth_cols = [col for col in df.columns if col.upper() == depth_col.upper()]
|
|
103
|
+
if depth_cols:
|
|
104
|
+
df.set_index(depth_cols[0], inplace=True)
|
|
105
|
+
else:
|
|
106
|
+
# Use first column as depth
|
|
107
|
+
df.set_index(df.columns[0], inplace=True)
|
|
108
|
+
|
|
109
|
+
# Ensure index is float
|
|
110
|
+
df.index = df.index.astype(float)
|
|
111
|
+
df.index.name = "DEPTH"
|
|
112
|
+
|
|
113
|
+
# Create fake las_data object for standardization
|
|
114
|
+
class FakeLASData:
|
|
115
|
+
pass
|
|
116
|
+
|
|
117
|
+
fake_las = FakeLASData()
|
|
118
|
+
|
|
119
|
+
# Standardize ALL curves (GR, RHOB, NPHI, etc.)
|
|
120
|
+
_standardize_all_curves(fake_las, df, std_names)
|
|
121
|
+
|
|
122
|
+
if verbose:
|
|
123
|
+
print(f"✓ {ascii_file_path.name}")
|
|
124
|
+
return df
|
|
125
|
+
|
|
126
|
+
except Exception as e:
|
|
127
|
+
if verbose:
|
|
128
|
+
print(f"✗ Error in {ascii_file_path.name}: {type(e).__name__}: {e}")
|
|
129
|
+
return None
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def _get_well_name(ascii_file_path):
|
|
133
|
+
"""Extract well name from ASCII file (use filename)"""
|
|
134
|
+
return ascii_file_path.stem
|