lasmnemonicsid 0.0.2__py3-none-any.whl → 0.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,134 @@
1
+
2
+ import LASMnemonicsID.utils.mnemonics as mnm
3
+ from LASMnemonicsID.utils.mnemonics import (
4
+ gamma_names,
5
+ sp_names,
6
+ caliper_names,
7
+ deepres_names,
8
+ rxo_names,
9
+ density_names,
10
+ density_correction_names,
11
+ neutron_names,
12
+ dtc_names,
13
+ dts_names,
14
+ pe_names,
15
+ )
16
+ import os
17
+ import pandas as pd
18
+ from pathlib import Path
19
+
20
+ # Import helper functions from LAS module
21
+ from ..LAS.LAS import create_mnemonic_dict, _standardize_all_curves
22
+
23
+
24
+ def parseASCII(input_path, verbose=True, preferred_names=None, depth_col="DEPTH", delimiter=","):
25
+ """
26
+ Parse ASCII/CSV/TXT well log file or all in directory → DataFrame or {filename: df}.
27
+
28
+ Args:
29
+ input_path (str/Path): ASCII/CSV/TXT file or directory
30
+ verbose (bool): Print info
31
+ preferred_names (dict, optional): Mapping of curve types to preferred column names.
32
+ Example: {"deepres": "RT", "gamma": "GR"}
33
+ If not provided, defaults to standard petrophysical names.
34
+ depth_col (str): Name of depth column (default: "DEPTH")
35
+ delimiter (str): CSV delimiter (default: ",")
36
+
37
+ Returns:
38
+ DataFrame (single) or dict {filename: df} (multiple/dir)
39
+ """
40
+ input_path = Path(input_path)
41
+
42
+ # Define default standard names
43
+ std_names = {
44
+ "gamma": "GR",
45
+ "sp": "SP",
46
+ "caliper": "CALI",
47
+ "deepres": "RT",
48
+ "rxo": "RXO",
49
+ "density": "RHOB",
50
+ "density_correction": "DRHO",
51
+ "neutron": "NPHI",
52
+ "dtc": "DT",
53
+ "dts": "DTS",
54
+ "pe": "PEF"
55
+ }
56
+
57
+ # Update with user preferences if provided
58
+ if preferred_names:
59
+ std_names.update(preferred_names)
60
+
61
+ # All supported ASCII extensions (case-insensitive)
62
+ ascii_extensions = ['.csv', '.txt', '.asc', '.dat', '.ascii']
63
+
64
+ # Case 1: Single File
65
+ if input_path.is_file() and input_path.suffix.lower() in ascii_extensions:
66
+ df = _read_single_ascii(input_path, verbose, std_names, depth_col, delimiter)
67
+ return df if df is not None else None
68
+
69
+ # Case 2: Directory (Recursive) - CASE-INSENSITIVE
70
+ ascii_files = [f for f in input_path.rglob("*") if f.suffix.lower() in ascii_extensions]
71
+ if not ascii_files:
72
+ if verbose:
73
+ print(f"No ASCII/CSV files found in {input_path}")
74
+ return {}
75
+
76
+ ascii_dict = {}
77
+ for ascii_file in ascii_files:
78
+ df = _read_single_ascii(ascii_file, verbose, std_names, depth_col, delimiter)
79
+ if df is not None:
80
+ filename = ascii_file.name
81
+ ascii_dict[filename] = df
82
+
83
+ # Return single DF if only 1 file found, else dict
84
+ if len(ascii_dict) == 1:
85
+ return next(iter(ascii_dict.values()))
86
+
87
+ return ascii_dict
88
+
89
+
90
+ def _read_single_ascii(ascii_file_path, verbose, std_names, depth_col, delimiter):
91
+ """Read single ASCII/CSV file to DataFrame and standardize ALL curves."""
92
+ try:
93
+ # Try reading the file
94
+ df = pd.read_csv(ascii_file_path, delimiter=delimiter)
95
+
96
+ if df.empty:
97
+ if verbose:
98
+ print(f"✗ Empty DataFrame: {ascii_file_path.name}")
99
+ return None
100
+
101
+ # Handle depth column (case-insensitive)
102
+ depth_cols = [col for col in df.columns if col.upper() == depth_col.upper()]
103
+ if depth_cols:
104
+ df.set_index(depth_cols[0], inplace=True)
105
+ else:
106
+ # Use first column as depth
107
+ df.set_index(df.columns[0], inplace=True)
108
+
109
+ # Ensure index is float
110
+ df.index = df.index.astype(float)
111
+ df.index.name = "DEPTH"
112
+
113
+ # Create fake las_data object for standardization
114
+ class FakeLASData:
115
+ pass
116
+
117
+ fake_las = FakeLASData()
118
+
119
+ # Standardize ALL curves (GR, RHOB, NPHI, etc.)
120
+ _standardize_all_curves(fake_las, df, std_names)
121
+
122
+ if verbose:
123
+ print(f"✓ {ascii_file_path.name}")
124
+ return df
125
+
126
+ except Exception as e:
127
+ if verbose:
128
+ print(f"✗ Error in {ascii_file_path.name}: {type(e).__name__}: {e}")
129
+ return None
130
+
131
+
132
+ def _get_well_name(ascii_file_path):
133
+ """Extract well name from ASCII file (use filename)"""
134
+ return ascii_file_path.stem
@@ -0,0 +1,2 @@
1
+
2
+ from .ASCII import *
@@ -0,0 +1,150 @@
1
+
2
+ import LASMnemonicsID.utils.mnemonics as mnm
3
+ from LASMnemonicsID.utils.mnemonics import (
4
+ gamma_names,
5
+ sp_names,
6
+ caliper_names,
7
+ deepres_names,
8
+ rxo_names,
9
+ density_names,
10
+ density_correction_names,
11
+ neutron_names,
12
+ dtc_names,
13
+ dts_names,
14
+ pe_names,
15
+ )
16
+ import os
17
+ import pandas as pd
18
+ import dlisio
19
+ from pathlib import Path
20
+
21
+ # Import helper functions from LAS module
22
+ from ..LAS.LAS import create_mnemonic_dict, _standardize_all_curves
23
+
24
+
25
+ def parseDLIS(input_path, verbose=True, preferred_names=None):
26
+ """
27
+ Parse DLIS file or all in directory → DataFrame or {filename: df}.
28
+
29
+ Args:
30
+ input_path (str/Path): DLIS file or directory
31
+ verbose (bool): Print info
32
+ preferred_names (dict, optional): Mapping of curve types to preferred column names.
33
+ Example: {"deepres": "RT", "gamma": "GR"}
34
+ If not provided, defaults to standard petrophysical names.
35
+
36
+ Returns:
37
+ DataFrame (single) or dict {filename: df} (multiple/dir)
38
+ """
39
+ input_path = Path(input_path)
40
+
41
+ # Define default standard names
42
+ std_names = {
43
+ "gamma": "GR",
44
+ "sp": "SP",
45
+ "caliper": "CALI",
46
+ "deepres": "RT",
47
+ "rxo": "RXO",
48
+ "density": "RHOB",
49
+ "density_correction": "DRHO",
50
+ "neutron": "NPHI",
51
+ "dtc": "DT",
52
+ "dts": "DTS",
53
+ "pe": "PEF"
54
+ }
55
+
56
+ # Update with user preferences if provided
57
+ if preferred_names:
58
+ std_names.update(preferred_names)
59
+
60
+ # Case 1: Single File (case-insensitive)
61
+ if input_path.is_file() and input_path.suffix.lower() == '.dlis':
62
+ df = _read_single_dlis(input_path, verbose, std_names)
63
+ return df if df is not None else None
64
+
65
+ # Case 2: Directory (Recursive) - CASE-INSENSITIVE
66
+ dlis_files = [f for f in input_path.rglob("*") if f.suffix.lower() == '.dlis']
67
+ if not dlis_files:
68
+ if verbose:
69
+ print(f"No DLIS files found in {input_path}")
70
+ return {}
71
+
72
+ dlis_dict = {}
73
+ for dlis_file in dlis_files:
74
+ df = _read_single_dlis(dlis_file, verbose, std_names)
75
+ if df is not None:
76
+ filename = dlis_file.name
77
+ dlis_dict[filename] = df
78
+
79
+ # Return single DF if only 1 file found, else dict
80
+ if len(dlis_dict) == 1:
81
+ return next(iter(dlis_dict.values()))
82
+
83
+ return dlis_dict
84
+
85
+
86
+ def _read_single_dlis(dlis_file_path, verbose, std_names):
87
+ """Read single DLIS file to DataFrame and standardize ALL curves."""
88
+ try:
89
+ with dlisio.dlis.load(str(dlis_file_path)) as (f, *rest):
90
+ if not f.frames:
91
+ if verbose:
92
+ print(f"✗ No frames: {dlis_file_path.name}")
93
+ return None
94
+
95
+ # Use first frame (typically contains main log data)
96
+ frame = f.frames[0]
97
+ curves_data = frame.curves()
98
+
99
+ # Get channel names
100
+ channels = [ch.name for ch in frame.channels]
101
+
102
+ # Create DataFrame
103
+ df = pd.DataFrame(curves_data, columns=channels)
104
+
105
+ if df.empty:
106
+ if verbose:
107
+ print(f"✗ Empty DataFrame: {dlis_file_path.name}")
108
+ return None
109
+
110
+ # Set depth index (typically first column or frame.index)
111
+ if frame.index:
112
+ index_name = frame.index
113
+ if index_name in df.columns:
114
+ df.set_index(index_name, inplace=True)
115
+ else:
116
+ # Use first column as depth
117
+ df.set_index(df.columns[0], inplace=True)
118
+
119
+ # Ensure index is float
120
+ df.index = df.index.astype(float)
121
+ df.index.name = "DEPTH"
122
+
123
+ # Create fake las_data object for standardization
124
+ class FakeLASData:
125
+ pass
126
+
127
+ fake_las = FakeLASData()
128
+
129
+ # Standardize ALL curves (GR, RHOB, NPHI, etc.)
130
+ _standardize_all_curves(fake_las, df, std_names)
131
+
132
+ if verbose:
133
+ print(f"✓ {dlis_file_path.name}")
134
+ return df
135
+
136
+ except Exception as e:
137
+ if verbose:
138
+ print(f"✗ Error in {dlis_file_path.name}: {type(e).__name__}: {e}")
139
+ return None
140
+
141
+
142
+ def _get_well_name(dlis_file_path):
143
+ """Extract well name from DLIS file"""
144
+ try:
145
+ with dlisio.dlis.load(str(dlis_file_path)) as (f, *rest):
146
+ if f.origins:
147
+ return str(f.origins[0].well_name).strip()
148
+ except:
149
+ pass
150
+ return dlis_file_path.stem
@@ -0,0 +1,2 @@
1
+
2
+ from .DLIS import parseDLIS
LASMnemonicsID/LAS/LAS.py CHANGED
@@ -20,11 +20,7 @@ from os.path import join
20
20
  from sys import stdout
21
21
  from pathlib import Path
22
22
 
23
-
24
-
25
-
26
-
27
- # Function that create the mnemonic dictionary
23
+ # Function that creates the mnemonic dictionary
28
24
  def create_mnemonic_dict(
29
25
  gamma_names,
30
26
  sp_names,
@@ -39,9 +35,8 @@ def create_mnemonic_dict(
39
35
  pe_names,
40
36
  ):
41
37
  """
42
- Function that create the mnemonic dictionary with the mnemonics per log type in the utils module
38
+ Function that creates the mnemonic dictionary with the mnemonics per log type.
43
39
  """
44
-
45
40
  mnemonic_dict = {
46
41
  "gamma": gamma_names,
47
42
  "sp": sp_names,
@@ -57,63 +52,82 @@ def create_mnemonic_dict(
57
52
  }
58
53
  return mnemonic_dict
59
54
 
60
-
61
-
62
- def parseLAS(directory_path, verbose=True):
55
+ def parseLAS(input_path, verbose=True, preferred_names=None):
63
56
  """
64
- Parse all LAS files in directory (recursive) into dict of DataFrames or single DataFrame.
57
+ Parse LAS file or all in directory DataFrame or {filename: df}.
65
58
 
66
59
  Args:
67
- directory_path (str/Path): Directory containing LAS files
68
- verbose (bool): Print processing info
60
+ input_path (str/Path): LAS file or directory
61
+ verbose (bool): Print info
62
+ preferred_names (dict, optional): Mapping of curve types to preferred column names and preferred original columns.
63
+ Example: {"deepres": "RT", "deepres_preferred_original": "AT90", "gamma": "GR"}
64
+ If not provided, defaults to standard petrophysical names.
69
65
 
70
66
  Returns:
71
- dict or DataFrame: {folder: {well: df}} or single df if one file found
67
+ DataFrame (single) or dict {filename: df} (multiple/dir)
72
68
  """
73
- directory_path = Path(directory_path)
74
- well_logs = {}
69
+ input_path = Path(input_path)
75
70
 
76
- # Find all LAS files recursively
77
- las_files = list(directory_path.rglob("*.las"))
71
+ # Define default standard names
72
+ std_names = {
73
+ "gamma": "GR",
74
+ "sp": "SP",
75
+ "caliper": "CALI",
76
+ "deepres": "RT",
77
+ "rxo": "RXO",
78
+ "density": "RHOB",
79
+ "density_correction": "DRHO",
80
+ "neutron": "NPHI",
81
+ "dtc": "DT",
82
+ "dts": "DTS",
83
+ "pe": "PEF"
84
+ }
85
+
86
+ # Update with user preferences if provided
87
+ if preferred_names:
88
+ std_names.update(preferred_names)
89
+
90
+ # Case 1: Single File
91
+ if input_path.is_file() and input_path.suffix.lower() == '.las':
92
+ df = _read_single_las(input_path, verbose, std_names)
93
+ return df if df is not None else None
78
94
 
95
+ # Case 2: Directory (Recursive)
96
+ las_files = list(input_path.rglob("*.las"))
79
97
  if not las_files:
80
98
  if verbose:
81
- print("No LAS files found.")
99
+ print(f"No LAS files found in {input_path}")
82
100
  return {}
83
101
 
84
- if len(las_files) == 1:
85
- # Return single DataFrame if only one file
86
- return _read_single_las(las_files[0], verbose)
87
-
88
- # Multiple files: group by parent folder
102
+ las_dict = {}
89
103
  for las_file in las_files:
90
- folder_name = las_file.parent.name
91
- if folder_name not in well_logs:
92
- well_logs[folder_name] = {}
93
-
94
- df = _read_single_las(las_file, verbose)
104
+ df = _read_single_las(las_file, verbose, std_names)
95
105
  if df is not None:
96
- well_name = _get_well_name(las_file)
97
- well_logs[folder_name][well_name] = df
106
+ filename = las_file.name
107
+ las_dict[filename] = df
98
108
 
99
- return well_logs
100
-
109
+ # Return single DF if only 1 file found, else dict
110
+ if len(las_dict) == 1:
111
+ return next(iter(las_dict.values()))
112
+
113
+ return las_dict
101
114
 
102
- def _read_single_las(las_file_path, verbose):
103
- """Read single LAS file to DataFrame"""
115
+ def _read_single_las(las_file_path, verbose, std_names):
116
+ """Read single LAS file to DataFrame and standardize ALL curves."""
104
117
  try:
105
118
  las_data = lasio.read(las_file_path)
106
119
  df = las_data.df()
120
+
107
121
  if df is None or df.empty:
108
122
  if verbose:
109
123
  print(f"✗ Empty DataFrame: {las_file_path.name}")
110
124
  return None
111
125
 
126
+ # Ensure index is depth (float)
112
127
  df.index = df.index.astype(float)
113
- # df.dropna(inplace=True)
114
128
 
115
- # Standardize GR curve
116
- _standardize_gr_curve(las_data, df)
129
+ # Standardize ALL curves (GR, RHOB, NPHI, etc.)
130
+ _standardize_all_curves(las_data, df, std_names)
117
131
 
118
132
  if verbose:
119
133
  print(f"✓ {las_file_path.name}")
@@ -127,7 +141,6 @@ def _read_single_las(las_file_path, verbose):
127
141
  print(f"✗ Error in {las_file_path.name}: {type(e).__name__}: {e}")
128
142
  return None
129
143
 
130
-
131
144
  def _get_well_name(las_file_path):
132
145
  """Extract well name from LAS file"""
133
146
  try:
@@ -136,13 +149,46 @@ def _get_well_name(las_file_path):
136
149
  except:
137
150
  return las_file_path.stem
138
151
 
152
+ def _standardize_all_curves(las_data, df, std_names):
153
+ """
154
+ Rename ALL curves in the DataFrame to standard abbreviations
155
+ based on the mnemonic dictionary.
156
+ """
157
+ # 1. Get the dictionary of aliases
158
+ mnem_dict = create_mnemonic_dict(
159
+ gamma_names, sp_names, caliper_names, deepres_names, rxo_names,
160
+ density_names, density_correction_names, neutron_names,
161
+ dtc_names, dts_names, pe_names
162
+ )
163
+
164
+ # 2. Track which columns we've already renamed to avoid duplicates
165
+ renamed = set()
139
166
 
140
- def _standardize_gr_curve(las_data, df):
141
- """Rename gamma ray curve to GR"""
142
- global gamma_names # Assuming gamma_names defined elsewhere
143
- for curve in las_data.curves:
144
- if curve.mnemonic.lower() in gamma_names:
145
- df.rename(columns={curve.mnemonic: "GR"}, inplace=True)
146
- break
147
-
167
+ # 3. For each curve type, find all aliases in the file
168
+ for curve_type, aliases in mnem_dict.items():
169
+ # Find all matching columns in df
170
+ matching = [col for col in df.columns if col.lower() in [a.lower() for a in aliases]]
171
+
172
+ if not matching:
173
+ continue
174
+
175
+ # Use standard name if provided, otherwise use curve_type.upper()
176
+ target_name = std_names.get(curve_type, curve_type.upper())
177
+
178
+ # If a preferred original column is specified, use it
179
+ preferred_original = std_names.get(f"{curve_type}_preferred_original")
180
+
181
+ if preferred_original and preferred_original in matching:
182
+ # Rename preferred original to target_name
183
+ df.rename(columns={preferred_original: target_name}, inplace=True)
184
+ renamed.add(target_name)
185
+ else:
186
+ # Otherwise, pick the first matching alias
187
+ df.rename(columns={matching[0]: target_name}, inplace=True)
188
+ renamed.add(target_name)
189
+
190
+ # Remove all other matching columns
191
+ for col in matching:
192
+ if col != target_name and col in df.columns:
193
+ df.drop(columns=[col], inplace=True)
148
194
 
@@ -1,12 +1,13 @@
1
1
  from .LAS import (
2
2
  parseLAS,
3
3
  create_mnemonic_dict,
4
- _read_single_las,
5
4
  _get_well_name,
6
- _standardize_gr_curve
5
+ _read_single_las # Keep helpers if needed
7
6
  )
8
7
 
9
8
  __all__ = [
10
- 'parseLAS',
11
- 'create_mnemonic_dict'
9
+ "parseLAS",
10
+ "create_mnemonic_dict",
11
+ "_get_well_name",
12
+ "_read_single_las"
12
13
  ]
@@ -1,16 +1,15 @@
1
-
2
1
  # src/LASMnemonicsID/__init__.py
3
2
 
4
3
  """LASMnemonicsID package for well log analysis."""
5
4
 
6
- # Import submodules as objects
7
5
  from . import LAS
8
- #from . import DLIS
6
+ from . import DLIS
7
+ from . import ASCII
9
8
  from . import utils
10
9
 
11
- # Import all functions directly for convenience
12
- from .LAS import *
13
- from .DLIS import *
14
- from .utils import *
10
+ from .LAS.LAS import *
11
+ from .DLIS.DLIS import *
12
+ from .ASCII.ASCII import *
13
+ from .utils.mnemonics import *
15
14
 
16
15
  __version__ = "0.0.1"
@@ -0,0 +1,276 @@
1
+ Metadata-Version: 2.4
2
+ Name: lasmnemonicsid
3
+ Version: 0.0.5
4
+ Summary: Well log mnemonic identification using lasio and dlisio to load LAS/DLIS/ASCII files into DataFrames
5
+ Author-email: Nobleza Energy <info@nobleza-energy.com>
6
+ License: MIT
7
+ Project-URL: Homepage, https://nobleza-energy.github.io/LASMnemonicsID
8
+ Project-URL: Repository, https://github.com/Nobleza-Energy/LASMnemonicsID
9
+ Project-URL: Documentation, https://nobleza-energy.github.io/LASMnemonicsID/
10
+ Project-URL: Bug Tracker, https://github.com/Nobleza-Energy/LASMnemonicsID/issues
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Science/Research
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Operating System :: OS Independent
15
+ Classifier: Programming Language :: Python :: 3
16
+ Classifier: Programming Language :: Python :: 3.10
17
+ Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Topic :: Scientific/Engineering
20
+ Requires-Python: >=3.10
21
+ Description-Content-Type: text/markdown
22
+ License-File: LICENSE
23
+ Requires-Dist: numpy>=1.21.0
24
+ Requires-Dist: pandas>=2.0.1
25
+ Requires-Dist: lasio>=0.30
26
+ Requires-Dist: dlisio>=1.0.0
27
+ Provides-Extra: docs
28
+ Requires-Dist: mkdocs>=1.5.0; extra == "docs"
29
+ Requires-Dist: mkdocs-material>=9.0.0; extra == "docs"
30
+ Requires-Dist: mkdocstrings[python]>=0.24.0; extra == "docs"
31
+ Provides-Extra: dev
32
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
33
+ Requires-Dist: black>=23.0.0; extra == "dev"
34
+ Requires-Dist: isort>=5.12.0; extra == "dev"
35
+ Requires-Dist: flake8>=6.0.0; extra == "dev"
36
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
37
+ Dynamic: license-file
38
+
39
+ # LASMnemonicsID
40
+
41
+ <p align="center">
42
+ <img src="https://github.com/Nobleza-Energy/LASMnemonicsID/blob/e44bfb606fef5cfc9c3df6e41c3d1bd0d7bb08ae/logo.png?raw=true" alt="LASMnemonicsID Logo" width="200"/>
43
+ </p>
44
+
45
+ <p align="center">
46
+ <b>Well log mnemonic identification and standardization for LAS, DLIS, and ASCII formats</b>
47
+ </p>
48
+
49
+ <p align="center">
50
+ <a href="https://pypi.org/project/lasmnemonicsid/"><img src="https://img.shields.io/pypi/v/lasmnemonicsid.svg" alt="PyPI"></a>
51
+ <a href="https://pypi.org/project/lasmnemonicsid/"><img src="https://img.shields.io/pypi/pyversions/lasmnemonicsid.svg" alt="Python Versions"></a>
52
+ <a href="https://github.com/Nobleza-Energy/LASMnemonicsID/blob/main/LICENSE"><img src="https://img.shields.io/github/license/Nobleza-Energy/LASMnemonicsID.svg" alt="License"></a>
53
+ </p>
54
+
55
+ ---
56
+
57
+ ## Features
58
+
59
+ - **Multi-format support**: LAS, DLIS, ASCII/CSV/TXT/DAT
60
+ - **Automatic mnemonic standardization**: GR, RHOB, NPHI, DT, SP, CALI, RT, etc.
61
+ - **Batch processing**: Parse entire directories recursively
62
+ - **Customizable naming**: Override default standard names
63
+ - **Case-insensitive extensions**: Works with .las/.LAS, .dlis/.DLIS, .csv/.CSV, etc.
64
+ - **Pandas integration**: Returns clean DataFrames ready for analysis
65
+
66
+ ---
67
+
68
+ ## Installation
69
+
70
+ ```bash
71
+ pip install lasmnemonicsid
72
+ ```
73
+
74
+ This installs support for **all formats** (LAS, DLIS, ASCII/CSV/TXT).
75
+
76
+ ---
77
+
78
+ ## Quick Start
79
+
80
+ ### LAS Files
81
+
82
+ ```python
83
+ from LASMnemonicsID import parseLAS
84
+
85
+ # Parse single LAS file
86
+ df = parseLAS("well.las")
87
+ print(df.head())
88
+
89
+ # Parse directory
90
+ data = parseLAS("/path/to/las/files/")
91
+ for filename, df in data.items():
92
+ print(f"{filename}: {df.shape}")
93
+ ```
94
+
95
+ ### DLIS Files
96
+
97
+ ```python
98
+ from LASMnemonicsID import parseDLIS
99
+
100
+ # Parse single DLIS file
101
+ df = parseDLIS("well.dlis")
102
+ print(df.columns)
103
+
104
+ # Parse directory
105
+ data = parseDLIS("/path/to/dlis/files/")
106
+ ```
107
+
108
+ ### ASCII/CSV/TXT Files
109
+
110
+ ```python
111
+ from LASMnemonicsID import parseASCII
112
+
113
+ # Parse CSV
114
+ df = parseASCII("well_log.csv", depth_col="DEPTH")
115
+
116
+ # Parse tab-separated TXT
117
+ df = parseASCII("well_log.txt", delimiter="\t")
118
+
119
+ # Parse directory
120
+ data = parseASCII("/path/to/csv/files/")
121
+ ```
122
+
123
+ ---
124
+
125
+ ## Advanced Usage
126
+
127
+ ### Custom Preferred Names
128
+
129
+ ```python
130
+ preferred = {
131
+ "deepres": "RT",
132
+ "deepres_preferred_original": "AT90",
133
+ "gamma": "GR"
134
+ }
135
+
136
+ df = parseLAS("well.las", preferred_names=preferred)
137
+ ```
138
+
139
+ ### Batch Processing
140
+
141
+ ```python
142
+ from pathlib import Path
143
+
144
+ dir_path = Path("/data/wells/")
145
+ data = parseLAS(dir_path, verbose=True, preferred_names=preferred)
146
+
147
+ for fname, df in data.items():
148
+ print(f"{fname}: {df.shape}")
149
+ print(df.head(3))
150
+ ```
151
+
152
+ ### Mixed Format Directories
153
+
154
+ ```python
155
+ las_data = parseLAS("/data/wells/")
156
+ dlis_data = parseDLIS("/data/wells/")
157
+ ascii_data = parseASCII("/data/wells/")
158
+
159
+ all_data = {**las_data, **dlis_data, **ascii_data}
160
+ ```
161
+
162
+ ---
163
+
164
+ ## Supported Mnemonics
165
+
166
+ The package automatically standardizes these curve types:
167
+
168
+ | Curve Type | Standard Name | Example Aliases |
169
+ |------------|---------------|-------------|
170
+ | Gamma Ray | `GR` | gr, cggr, cgr, gam, gamma, gammaray, grc, grd, hgr, sgr, lgr, pgr |
171
+ | Spontaneous Potential | `SP` | sp, idsp, spr, spl, spdl, spdhp, spc, sp0, sp1, cgsp, dlsp |
172
+ | Caliper | `CALI` | caliper, calip, cal, dcal, acal, cala, cald, cale, calh, hcal, xcal, ycal |
173
+ | Deep Resistivity | `RT` | rt, rtao, rt90, ild, idph, rild, rd, ae90, at90, atrt, lld, lldc, res, resd |
174
+ | Shallow Resistivity | `RXO` | rxo, rxoz, msfl, mcfl, sflcc, mgl, m1rx, r40o, aht10 |
175
+ | Density | `RHOB` | rhob, rhoz, den, denb, denc, hrho, hrhob, zden, hden, denf, denn |
176
+ | Density Correction | `DRHO` | dcor, dcorr, dc, decr, drh, zcor, zcorr, hhdr, denscorr |
177
+ | Neutron Porosity | `NPHI` | cn, phin, cnc, cns, hnphi, nphi, npor, cncc, nprl, neut, neutpor |
178
+ | Sonic (Compressional) | `DT` | dt, dtc, dtco, dtcomp, deltat, slow, slowness, tt, ac, acco, delt, dtcomp |
179
+ | Sonic (Shear) | `DTS` | dts, dtsh, dtsm, dtsc, dtsd, dtsqi, dtshear, deltas, tts, stt, dtshear |
180
+ | Photoelectric Factor | `PEF` | pe, pef, pefz, pdpe, pedf, pedn, hpedn, pe2, pef8, lpe |
181
+
182
+
183
+ ---
184
+
185
+ ## Testing
186
+
187
+ ```bash
188
+ pytest tests/ -v
189
+ pytest tests/test_las.py -v
190
+ pytest tests/test_dlis.py -v
191
+ pytest tests/test_ascii.py -v
192
+ ```
193
+
194
+ ---
195
+
196
+ ## API Reference
197
+
198
+ ### parseLAS(input_path, verbose=True, preferred_names=None)
199
+
200
+ Parse LAS file(s) and standardize mnemonics.
201
+
202
+ **Parameters:**
203
+ - input_path (str/Path): LAS file or directory
204
+ - verbose (bool): Print parsing info
205
+ - preferred_names (dict): Custom name mappings
206
+
207
+ **Returns:** DataFrame (single file) or dict (multiple files)
208
+
209
+ ### parseDLIS(input_path, verbose=True, preferred_names=None)
210
+
211
+ Parse DLIS file(s) and standardize mnemonics.
212
+
213
+ **Parameters:**
214
+ - input_path (str/Path): DLIS file or directory
215
+ - verbose (bool): Print parsing info
216
+ - preferred_names (dict): Custom name mappings
217
+
218
+ **Returns:** DataFrame (single file) or dict (multiple files)
219
+
220
+ ### parseASCII(input_path, verbose=True, preferred_names=None, depth_col="DEPTH", delimiter=",")
221
+
222
+ Parse ASCII/CSV/TXT file(s) and standardize mnemonics.
223
+
224
+ **Parameters:**
225
+ - input_path (str/Path): ASCII file or directory
226
+ - verbose (bool): Print parsing info
227
+ - preferred_names (dict): Custom name mappings
228
+ - depth_col (str): Name of depth column
229
+ - delimiter (str): Field separator
230
+
231
+ **Returns:** DataFrame (single file) or dict (multiple files)
232
+
233
+ ---
234
+
235
+ ## How to Cite
236
+
237
+ **APA**
238
+
239
+ > Nobleza Energy. (2026). LASMnemonicsID: Well log mnemonic identification for LAS, DLIS, and ASCII formats [Software]. GitHub. https://github.com/Nobleza-Energy/LASMnemonicsID
240
+
241
+ **BibTeX**
242
+
243
+ ```bibtex
244
+ @software{LASMnemonicsID,
245
+ author = {Nobleza Energy},
246
+ title = {LASMnemonicsID: Well log mnemonic identification for LAS, DLIS, and ASCII formats},
247
+ year = {2026},
248
+ publisher = {GitHub},
249
+ url = {https://github.com/Nobleza-Energy/LASMnemonicsID}
250
+ }
251
+ ```
252
+
253
+ ---
254
+
255
+ ## License
256
+
257
+ MIT License - see [LICENSE](LICENSE) file.
258
+
259
+ ---
260
+
261
+ ## Contributing
262
+
263
+ Contributions welcome! Submit a Pull Request.
264
+
265
+ ---
266
+
267
+ ## Support
268
+
269
+ - **Issues:** [GitHub Issues](https://github.com/Nobleza-Energy/LASMnemonicsID/issues)
270
+ - **Discussions:** [GitHub Discussions](https://github.com/Nobleza-Energy/LASMnemonicsID/discussions)
271
+
272
+ ---
273
+
274
+ <p align="center">
275
+ Made with ❤️ by <a href="https://nobleza-energy.com">Nobleza Energy</a>
276
+ </p>
@@ -0,0 +1,14 @@
1
+ LASMnemonicsID/__init__.py,sha256=gsIy4CT1aZrfdm7yngD5CyGx0c0VrUS-2kL9aPMWqNU,294
2
+ LASMnemonicsID/ASCII/ASCII.py,sha256=55_KyVai0W6WABAav-tqx5XvZOOrnxYspjoAbcbF0ws,4260
3
+ LASMnemonicsID/ASCII/__init__.py,sha256=hvl0pDTh7kEEGMi2D36hw17ftnWY6SOM3IkxBNcaaLQ,22
4
+ LASMnemonicsID/DLIS/DLIS.py,sha256=byYJoKAiahlKUORt5c_iZZ4aQmjdiDIXLp2Qr7Hbiyk,4663
5
+ LASMnemonicsID/DLIS/__init__.py,sha256=UVM8tn6cpFT8H-wl7mqMDiE8GObkIWbYR-FGq4Oqoj8,29
6
+ LASMnemonicsID/LAS/LAS.py,sha256=gxeLlARZJV3ECxIQaoqO8YeOUfnlMUBKXqRFY-JivCs,6048
7
+ LASMnemonicsID/LAS/__init__.py,sha256=dTM87nn0zNUaKp29HocOODJT_-VM1CZED9Ar_FSOr-4,232
8
+ LASMnemonicsID/utils/__init__.py,sha256=ree81DUTsdjXfO3h-q7YyNrV6mTIKSGxgWPWGGTSVU0,1388
9
+ LASMnemonicsID/utils/mnemonics.py,sha256=VU25CXmQvUo0sS3Y6kG_G7KwRE2CiuoJeC7LT6FmNzg,7283
10
+ lasmnemonicsid-0.0.5.dist-info/licenses/LICENSE,sha256=6r9JOUiNw1exfcc0jlOi50fDStidfqyQ2PAYQh4lzEQ,1071
11
+ lasmnemonicsid-0.0.5.dist-info/METADATA,sha256=F9TC9atDWz404-_5_4V6L73WTkYJrv1iIbhr6osc2xA,7852
12
+ lasmnemonicsid-0.0.5.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
13
+ lasmnemonicsid-0.0.5.dist-info/top_level.txt,sha256=bdt6EHMrwbzFA9jA_xbTqRrOV6T4zDs3QojjEz8HSBk,15
14
+ lasmnemonicsid-0.0.5.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,83 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: lasmnemonicsid
3
- Version: 0.0.2
4
- Summary: Well log mnemonic identification using lasio and dlisio to load LAS/DLIS files into DataFrames
5
- Author-email: Nobleza Energy <info@nobleza-energy.com>
6
- License: MIT
7
- Project-URL: Homepage, https://nobleza-energy.github.io/LASMnemonicsID
8
- Project-URL: Repository, https://github.com/Nobleza-Energy/LASMnemonicsID
9
- Project-URL: Documentation, https://nobleza-energy.github.io/LASMnemonicsID/
10
- Project-URL: Bug Tracker, https://github.com/Nobleza-Energy/LASMnemonicsID/issues
11
- Classifier: Development Status :: 3 - Alpha
12
- Classifier: Intended Audience :: Science/Research
13
- Classifier: License :: OSI Approved :: MIT License
14
- Classifier: Operating System :: OS Independent
15
- Classifier: Programming Language :: Python :: 3
16
- Classifier: Programming Language :: Python :: 3.10
17
- Classifier: Programming Language :: Python :: 3.11
18
- Classifier: Programming Language :: Python :: 3.12
19
- Classifier: Topic :: Scientific/Engineering
20
- Requires-Python: >=3.10
21
- Description-Content-Type: text/markdown
22
- License-File: LICENSE
23
- Requires-Dist: numpy>=1.21.0
24
- Requires-Dist: pandas>=2.0.1
25
- Requires-Dist: lasio>=0.30
26
- Requires-Dist: dlisio>=1.0.0
27
- Provides-Extra: docs
28
- Requires-Dist: mkdocs>=1.5.0; extra == "docs"
29
- Requires-Dist: mkdocs-material>=9.0.0; extra == "docs"
30
- Requires-Dist: mkdocstrings[python]>=0.24.0; extra == "docs"
31
- Provides-Extra: dev
32
- Requires-Dist: pytest>=7.0.0; extra == "dev"
33
- Requires-Dist: black>=23.0.0; extra == "dev"
34
- Requires-Dist: isort>=5.12.0; extra == "dev"
35
- Requires-Dist: flake8>=6.0.0; extra == "dev"
36
- Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
37
- Dynamic: license-file
38
-
39
-
40
- <p align="center">
41
- <img src="https://raw.githubusercontent.com/Nobleza-Energy/LASMnemonicsID/main/logo.png" alt="LASMnemonicsID Logo" width="200"/>
42
- </p>
43
-
44
- <h1 align="center">LASMnemonicsID</h1>
45
-
46
- <p align="center">
47
- <b>Well log mnemonic identification using lasio and dlisio</b>
48
- </p>
49
-
50
- <p align="center">
51
- <a href="https://pypi.org/project/lasmnemonicsid/"><img src="https://img.shields.io/pypi/v/lasmnemonicsid.svg" alt="PyPI"></a>
52
- <a href="https://pypi.org/project/lasmnemonicsid/"><img src="https://img.shields.io/pypi/pyversions/lasmnemonicsid.svg" alt="Python Versions"></a>
53
- <a href="https://github.com/Nobleza-Energy/LASMnemonicsID/blob/main/LICENSE"><img src="https://img.shields.io/github/license/Nobleza-Energy/LASMnemonicsID.svg" alt="License"></a>
54
- </p>
55
-
56
- ---
57
-
58
- ## 📦 Installation
59
-
60
- ```bash
61
- pip install lasmnemonicsid
62
- ```
63
-
64
-
65
-
66
- ## 🚀 QuickStart
67
- ```
68
- from LASMnemonicsID.LAS import parseLAS
69
-
70
- # Load LAS file
71
- df = parseLAS("your_well.las")
72
- print(df.head())
73
- ```
74
-
75
- ## 🧪 Test with your Data
76
- ```
77
- from LASMnemonicsID.LAS import parseLAS
78
-
79
- # Load and inspect
80
- df = parseLAS("path/to/well.las")
81
- print(f"✅ {len(df)} rows, {len(df.columns)} curves")
82
- print(df.columns.tolist())
83
- ```
@@ -1,11 +0,0 @@
1
- LASMnemonicsID/__init__.py,sha256=IjJHoiHWr1CfP3K01xW61UhnJYP_9LOOaCqJnhLFlPc,309
2
- LASMnemonicsID/DLIS/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- LASMnemonicsID/LAS/LAS.py,sha256=7WRetjZG9-M39KNxpUisAkbh8mv60BWW-uTEUkwvkF8,3849
4
- LASMnemonicsID/LAS/__init__.py,sha256=4cNesZ581Cl_PN_c4DHTGfkc5HiKtnbhP81vJ6-3nPM,187
5
- LASMnemonicsID/utils/__init__.py,sha256=ree81DUTsdjXfO3h-q7YyNrV6mTIKSGxgWPWGGTSVU0,1388
6
- LASMnemonicsID/utils/mnemonics.py,sha256=VU25CXmQvUo0sS3Y6kG_G7KwRE2CiuoJeC7LT6FmNzg,7283
7
- lasmnemonicsid-0.0.2.dist-info/licenses/LICENSE,sha256=6r9JOUiNw1exfcc0jlOi50fDStidfqyQ2PAYQh4lzEQ,1071
8
- lasmnemonicsid-0.0.2.dist-info/METADATA,sha256=9XdXauxiGe80GgPo9GWFrbAjSGxoyOibBPmGPmJ-WLk,2811
9
- lasmnemonicsid-0.0.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
10
- lasmnemonicsid-0.0.2.dist-info/top_level.txt,sha256=bdt6EHMrwbzFA9jA_xbTqRrOV6T4zDs3QojjEz8HSBk,15
11
- lasmnemonicsid-0.0.2.dist-info/RECORD,,