cdiscbuilder 0.2.9__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cdiscbuilder-0.2.9/LICENSE +21 -0
- cdiscbuilder-0.2.9/MANIFEST.in +2 -0
- cdiscbuilder-0.2.9/PKG-INFO +10 -0
- cdiscbuilder-0.2.9/README.md +132 -0
- cdiscbuilder-0.2.9/pyproject.toml +27 -0
- cdiscbuilder-0.2.9/setup.cfg +4 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/__init__.py +1 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/cli.py +42 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/engine/__init__.py +0 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/engine/classes/__init__.py +0 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/engine/classes/finding.py +113 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/engine/classes/general.py +240 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/engine/config.py +57 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/engine/functions.py +49 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/engine/processor.py +43 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/engine/validate.py +99 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/odm.py +103 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/sdtm.py +30 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/AE.yaml +25 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/CM.yaml +26 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/DM.yaml +51 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/DS.yaml +54 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/FA.yaml +32 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/IE.yaml +35 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/PE.yaml +25 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/SV.yaml +23 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/VS.yaml +38 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/defaults.yaml +1 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder/specs/schema.yaml +63 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder.egg-info/PKG-INFO +10 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder.egg-info/SOURCES.txt +33 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder.egg-info/dependency_links.txt +1 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder.egg-info/entry_points.txt +2 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder.egg-info/requires.txt +2 -0
- cdiscbuilder-0.2.9/src/cdiscbuilder.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Ming-Chun Chen
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: cdiscbuilder
|
|
3
|
+
Version: 0.2.9
|
|
4
|
+
Summary: A package to convert ODM XML to SDTM/ADaM Datasets
|
|
5
|
+
Author-email: Ming-Chun Chen <hellomingchun@gmail.com>
|
|
6
|
+
Requires-Python: >=3.8
|
|
7
|
+
License-File: LICENSE
|
|
8
|
+
Requires-Dist: pandas
|
|
9
|
+
Requires-Dist: pyyaml
|
|
10
|
+
Dynamic: license-file
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
# CDISC Builder
|
|
2
|
+
|
|
3
|
+
**`cdiscbuilder`** is a Python package designed to simplify the transformation of clinical trial data from **ODM (Operational Data Model)** XML format into **CDISC SDTM (Study Data Tabulation Model)** and **ADaM (Analysis Data Model)** datasets.
|
|
4
|
+
|
|
5
|
+
It provides a flexible, configuration-driven approach to data mapping, allowing users to define rules using simple YAML files or Python dictionaries without harcoding complex logic.
|
|
6
|
+
|
|
7
|
+
## Key Features
|
|
8
|
+
|
|
9
|
+
- **ODM XML Parsing**: Efficiently parses CDISC ODM strings and files into workable dataframes.
|
|
10
|
+
- **Configurable Mappings**: Define your mapping rules (source columns, hardcoded values, custom logic) in YAML.
|
|
11
|
+
- **Schema Validation**: Ensures your configuration files adhere to strict standards before processing.
|
|
12
|
+
- **Metadata-Driven Findings**: Powerful processor for Findings domains (VS, LB, FA, etc.) using granular metadata.
|
|
13
|
+
- **Excel/Parquet Output**: Generates regulatory-compliant datasets in modern formats.
|
|
14
|
+
|
|
15
|
+
## Installation
|
|
16
|
+
|
|
17
|
+
```bash
|
|
18
|
+
pip install cdiscbuilder
|
|
19
|
+
```
|
|
20
|
+
|
|
21
|
+
## Quick Start
|
|
22
|
+
|
|
23
|
+
### 1. Command Line Interface
|
|
24
|
+
|
|
25
|
+
You can generate datasets directly from your terminal:
|
|
26
|
+
|
|
27
|
+
```bash
|
|
28
|
+
# Generate SDTM datasets from an ODM XML file
|
|
29
|
+
cdisc-sdtm --xml study_data.xml --output ./sdtm_data
|
|
30
|
+
```
|
|
31
|
+
|
|
32
|
+
### 2. Python API
|
|
33
|
+
|
|
34
|
+
```python
|
|
35
|
+
from cdiscbuilder.sdtm import create_sdtm_datasets
|
|
36
|
+
|
|
37
|
+
# Define paths
|
|
38
|
+
xml_file = "study_data.xml"
|
|
39
|
+
config_dir = "path/to/my/specs"
|
|
40
|
+
output_dir = "./sdtm_outputs"
|
|
41
|
+
|
|
42
|
+
# Generate Datasets
|
|
43
|
+
create_sdtm_datasets(config_dir, xml_file, output_dir)
|
|
44
|
+
```
|
|
45
|
+
|
|
46
|
+
## Configuration
|
|
47
|
+
|
|
48
|
+
The package comes with standard configurations for common domains (`DM`, `AE`, `VS`, etc.) in `src/cdisc_builder/specs`. You can override these or add new ones by creating your own configuration directory.
|
|
49
|
+
|
|
50
|
+
### Example YAML (`DM.yaml`)
|
|
51
|
+
|
|
52
|
+
```yaml
|
|
53
|
+
DM:
|
|
54
|
+
- formoid: "FORM.DEMOG"
|
|
55
|
+
keys: ["StudyOID", "StudySubjectID"]
|
|
56
|
+
columns:
|
|
57
|
+
STUDYID:
|
|
58
|
+
source: StudyOID
|
|
59
|
+
type: str
|
|
60
|
+
USUBJID:
|
|
61
|
+
source: StudySubjectID
|
|
62
|
+
prefix: "PPT-"
|
|
63
|
+
type: str
|
|
64
|
+
AGE:
|
|
65
|
+
source: IT.AGE
|
|
66
|
+
type: int
|
|
67
|
+
type: str
|
|
68
|
+
SEX:
|
|
69
|
+
source: I_DEMOG_SEX
|
|
70
|
+
type: str
|
|
71
|
+
value_mapping:
|
|
72
|
+
"M": "Male"
|
|
73
|
+
"F": "Female"
|
|
74
|
+
|
|
75
|
+
```
|
|
76
|
+
### Finding Domains (Dynamic Mapping)
|
|
77
|
+
|
|
78
|
+
For domains like `IE`, `LB`, `FA` where many input items map to a single `Test Code` / `Test Name` pair, use `type: finding`.
|
|
79
|
+
|
|
80
|
+
```yaml
|
|
81
|
+
IE:
|
|
82
|
+
- type: finding
|
|
83
|
+
formoid: "F_ELIGIBILITY"
|
|
84
|
+
# Filter rows using Regex
|
|
85
|
+
item_group_regex: "IG_ELIGI_.*"
|
|
86
|
+
item_oid_regex: "I_ELIGI_.*"
|
|
87
|
+
|
|
88
|
+
columns:
|
|
89
|
+
# Extract part of the OID for the Short Code
|
|
90
|
+
IETESTCD:
|
|
91
|
+
source: ItemOID
|
|
92
|
+
regex_extract: "I_ELIGI_(.*)"
|
|
93
|
+
|
|
94
|
+
# Use Metadata from parsed XML for the Description
|
|
95
|
+
IETEST:
|
|
96
|
+
source: Metadata.Question
|
|
97
|
+
|
|
98
|
+
IEORRES:
|
|
99
|
+
source: Value
|
|
100
|
+
```
|
|
101
|
+
|
|
102
|
+
### Advanced Mapping Features
|
|
103
|
+
|
|
104
|
+
**Prefixing**:
|
|
105
|
+
```yaml
|
|
106
|
+
USUBJID:
|
|
107
|
+
source: StudySubjectID
|
|
108
|
+
prefix: "PPT-"
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
**Substring Extraction** (extracts chars 3-5 before mapping):
|
|
112
|
+
```yaml
|
|
113
|
+
SITEID:
|
|
114
|
+
source: FULL_ID
|
|
115
|
+
substring_start: 3
|
|
116
|
+
substring_length: 3
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
**Default Values**:
|
|
120
|
+
```yaml
|
|
121
|
+
CUSTOM_COL:
|
|
122
|
+
source: ORG_COL
|
|
123
|
+
value_mapping:
|
|
124
|
+
"A": "Alpha"
|
|
125
|
+
mapping_default: "Other" # used if not A
|
|
126
|
+
# mapping_default_source: "AnotherCol" # Fallback to column value
|
|
127
|
+
```
|
|
128
|
+
```
|
|
129
|
+
|
|
130
|
+
## License
|
|
131
|
+
|
|
132
|
+
[MIT License](LICENSE)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
[build-system]
|
|
2
|
+
requires = ["setuptools>=61.0"]
|
|
3
|
+
build-backend = "setuptools.build_meta"
|
|
4
|
+
|
|
5
|
+
[project]
|
|
6
|
+
name = "cdiscbuilder"
|
|
7
|
+
version = "0.2.9"
|
|
8
|
+
description = "A package to convert ODM XML to SDTM/ADaM Datasets"
|
|
9
|
+
authors = [{name = "Ming-Chun Chen", email = "hellomingchun@gmail.com"}]
|
|
10
|
+
dependencies = [
|
|
11
|
+
"pandas",
|
|
12
|
+
"pyyaml"
|
|
13
|
+
]
|
|
14
|
+
requires-python = ">=3.8"
|
|
15
|
+
|
|
16
|
+
[project.scripts]
|
|
17
|
+
cdisc-sdtm = "cdiscbuilder.cli:main"
|
|
18
|
+
|
|
19
|
+
[tool.setuptools.package-data]
|
|
20
|
+
cdiscbuilder = ["specs/*.yaml", "engine/classes/*.py"]
|
|
21
|
+
|
|
22
|
+
[tool.setuptools.packages.find]
|
|
23
|
+
where = ["src"]
|
|
24
|
+
|
|
25
|
+
[tool.mypy]
|
|
26
|
+
ignore_missing_imports = true
|
|
27
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
__version__ = "0.2.2"
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import argparse
|
|
3
|
+
from .odm import parse_odm_to_long_df
|
|
4
|
+
from .sdtm import create_sdtm_datasets
|
|
5
|
+
|
|
6
|
+
def main():
|
|
7
|
+
parser = argparse.ArgumentParser(description="Convert ODM XML to SDTM Datasets")
|
|
8
|
+
# Determine default config path inside package
|
|
9
|
+
current_dir = os.path.dirname(__file__)
|
|
10
|
+
default_config_path = os.path.join(current_dir, "specs")
|
|
11
|
+
|
|
12
|
+
parser.add_argument("--xml", required=True, help="Path to input ODM XML file")
|
|
13
|
+
parser.add_argument("--csv", default="odm_long.csv", help="Path to intermediate long CSV file")
|
|
14
|
+
parser.add_argument("--configs", default=default_config_path, help="Path to SDTM configuration directory")
|
|
15
|
+
parser.add_argument("--output", default="sdtm_output", help="Path to output SDTM directory")
|
|
16
|
+
|
|
17
|
+
args = parser.parse_args()
|
|
18
|
+
|
|
19
|
+
# Step 1: ODM XML -> Long CSV
|
|
20
|
+
print(f"--- Step 1: Parsing ODM XML from {args.xml} ---")
|
|
21
|
+
try:
|
|
22
|
+
df = parse_odm_to_long_df(args.xml)
|
|
23
|
+
print(f"Parsed {len(df)} rows.")
|
|
24
|
+
df.to_csv(args.csv, index=False)
|
|
25
|
+
print(f"Saved intermediate data to {args.csv}")
|
|
26
|
+
except Exception as e:
|
|
27
|
+
print(f"Error parsing XML: {e}")
|
|
28
|
+
return
|
|
29
|
+
|
|
30
|
+
# Step 2: Long CSV -> SDTM Datasets
|
|
31
|
+
print(f"\n--- Step 2: Generating SDTM Datasets using configs from {args.configs} ---")
|
|
32
|
+
if not os.path.exists(args.output):
|
|
33
|
+
os.makedirs(args.output)
|
|
34
|
+
|
|
35
|
+
try:
|
|
36
|
+
create_sdtm_datasets(args.configs, args.csv, args.output)
|
|
37
|
+
print(f"\nSuccess! SDTM datasets created in {args.output}")
|
|
38
|
+
except Exception as e:
|
|
39
|
+
print(f"Error creating SDTM datasets: {e}")
|
|
40
|
+
|
|
41
|
+
if __name__ == "__main__":
|
|
42
|
+
main()
|
|
File without changes
|
|
File without changes
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class FindingProcessor:
|
|
5
|
+
def __init__(self):
|
|
6
|
+
pass
|
|
7
|
+
|
|
8
|
+
def process(self, domain_name, sources, df_long, default_keys):
|
|
9
|
+
domain_dfs = []
|
|
10
|
+
|
|
11
|
+
for settings in sources:
|
|
12
|
+
# 0. Filter by FormOID (optional but recommended)
|
|
13
|
+
form_oid = settings.get('formoid')
|
|
14
|
+
source_df = df_long.copy()
|
|
15
|
+
if form_oid:
|
|
16
|
+
if isinstance(form_oid, list):
|
|
17
|
+
source_df = source_df[source_df['FormOID'].isin(form_oid)]
|
|
18
|
+
else:
|
|
19
|
+
source_df = source_df[source_df['FormOID'] == form_oid]
|
|
20
|
+
|
|
21
|
+
# 1. Filter by ItemGroupOID (regex or list)
|
|
22
|
+
item_group_match = settings.get('item_group_regex')
|
|
23
|
+
if item_group_match:
|
|
24
|
+
source_df = source_df[source_df['ItemGroupOID'].str.match(item_group_match, na=False)]
|
|
25
|
+
|
|
26
|
+
# 2. Filter by ItemOID (regex)
|
|
27
|
+
# This is crucial for "finding" domains - we want rows where ItemOID matches a pattern
|
|
28
|
+
item_oid_match = settings.get('item_oid_regex')
|
|
29
|
+
if item_oid_match:
|
|
30
|
+
source_df = source_df[source_df['ItemOID'].str.match(item_oid_match, na=False)]
|
|
31
|
+
|
|
32
|
+
if source_df.empty:
|
|
33
|
+
continue
|
|
34
|
+
|
|
35
|
+
# 3. Create Base DataFrame (No Pivot!)
|
|
36
|
+
# 3. Create Base DataFrame (No Pivot!)
|
|
37
|
+
# We treat every row as a potential finding
|
|
38
|
+
# Base columns: Keys + ItemOID + Value
|
|
39
|
+
keys = settings.get('keys', default_keys)
|
|
40
|
+
|
|
41
|
+
# Check if Question exists in source_df (it should based on odm.py changes)
|
|
42
|
+
base_cols = keys + ['ItemOID', 'Value']
|
|
43
|
+
if 'Question' in source_df.columns:
|
|
44
|
+
base_cols.append('Question')
|
|
45
|
+
|
|
46
|
+
final_df = source_df[base_cols].copy()
|
|
47
|
+
|
|
48
|
+
# 4. Map Columns
|
|
49
|
+
mappings = settings.get('columns', {})
|
|
50
|
+
|
|
51
|
+
for target_col, col_config in mappings.items():
|
|
52
|
+
series = None
|
|
53
|
+
|
|
54
|
+
# Config can be simple string (source col) or dict
|
|
55
|
+
source_expr = None
|
|
56
|
+
literal_expr = None
|
|
57
|
+
target_type = None
|
|
58
|
+
regex_extract = None
|
|
59
|
+
|
|
60
|
+
if isinstance(col_config, dict):
|
|
61
|
+
source_expr = col_config.get('source')
|
|
62
|
+
literal_expr = col_config.get('literal')
|
|
63
|
+
target_type = col_config.get('type')
|
|
64
|
+
regex_extract = col_config.get('regex_extract') # e.g. "I_ELIGI_(.*)"
|
|
65
|
+
else:
|
|
66
|
+
source_expr = col_config # simplistic
|
|
67
|
+
|
|
68
|
+
if literal_expr is not None:
|
|
69
|
+
series = pd.Series([literal_expr] * len(final_df), index=final_df.index)
|
|
70
|
+
|
|
71
|
+
elif source_expr:
|
|
72
|
+
if source_expr == "ItemOID":
|
|
73
|
+
series = final_df['ItemOID']
|
|
74
|
+
elif source_expr == "Value":
|
|
75
|
+
series = final_df['Value']
|
|
76
|
+
elif source_expr in final_df.columns:
|
|
77
|
+
series = final_df[source_expr]
|
|
78
|
+
elif source_expr in source_df.columns:
|
|
79
|
+
series = source_df[source_expr]
|
|
80
|
+
|
|
81
|
+
# Regex Extraction from source
|
|
82
|
+
if regex_extract and series is not None:
|
|
83
|
+
# Extract group 1
|
|
84
|
+
series = series.astype(str).str.extract(regex_extract)[0]
|
|
85
|
+
|
|
86
|
+
# Apply Prefix
|
|
87
|
+
prefix = None
|
|
88
|
+
if isinstance(col_config, dict):
|
|
89
|
+
prefix = col_config.get('prefix')
|
|
90
|
+
|
|
91
|
+
if prefix and series is not None:
|
|
92
|
+
series = prefix + series.astype(str)
|
|
93
|
+
|
|
94
|
+
if series is not None:
|
|
95
|
+
# Type Conversion
|
|
96
|
+
if target_type:
|
|
97
|
+
try:
|
|
98
|
+
if target_type == 'int':
|
|
99
|
+
series = pd.to_numeric(series, errors='coerce').astype('Int64')
|
|
100
|
+
elif target_type == 'str':
|
|
101
|
+
series = series.astype(str)
|
|
102
|
+
except Exception:
|
|
103
|
+
pass
|
|
104
|
+
|
|
105
|
+
final_df[target_col] = series
|
|
106
|
+
|
|
107
|
+
# Filter to keep only target columns
|
|
108
|
+
cols_to_keep = list(mappings.keys())
|
|
109
|
+
final_df = final_df[cols_to_keep]
|
|
110
|
+
|
|
111
|
+
domain_dfs.append(final_df)
|
|
112
|
+
|
|
113
|
+
return domain_dfs
|
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
import pandas as pd
|
|
2
|
+
|
|
3
|
+
class GeneralProcessor:
|
|
4
|
+
def _expand_settings(self, settings):
|
|
5
|
+
"""
|
|
6
|
+
Expands a settings dict with list-based sources/literals into multiple settings dicts.
|
|
7
|
+
"""
|
|
8
|
+
# Find all columns that have a list for source or literal
|
|
9
|
+
list_cols = {}
|
|
10
|
+
list_len = 0
|
|
11
|
+
|
|
12
|
+
columns = settings.get('columns', {})
|
|
13
|
+
for col_name, col_cfg in columns.items():
|
|
14
|
+
if isinstance(col_cfg, dict):
|
|
15
|
+
src = col_cfg.get('source')
|
|
16
|
+
lit = col_cfg.get('literal')
|
|
17
|
+
|
|
18
|
+
# Check source
|
|
19
|
+
if isinstance(src, list):
|
|
20
|
+
if list_len > 0 and len(src) != list_len:
|
|
21
|
+
raise ValueError(f"Column '{col_name}' source list length {len(src)} mismatch with others {list_len}")
|
|
22
|
+
list_len = len(src)
|
|
23
|
+
list_cols[col_name] = 'source'
|
|
24
|
+
|
|
25
|
+
# Check literal
|
|
26
|
+
if isinstance(lit, list):
|
|
27
|
+
if list_len > 0 and len(lit) != list_len:
|
|
28
|
+
raise ValueError(f"Column '{col_name}' literal list length {len(lit)} mismatch with others {list_len}")
|
|
29
|
+
list_len = len(lit)
|
|
30
|
+
list_cols[col_name] = 'literal'
|
|
31
|
+
|
|
32
|
+
if list_len == 0:
|
|
33
|
+
return [settings]
|
|
34
|
+
|
|
35
|
+
# Expand
|
|
36
|
+
expanded_list = []
|
|
37
|
+
for i in range(list_len):
|
|
38
|
+
new_settings = settings.copy()
|
|
39
|
+
new_cols = {}
|
|
40
|
+
for col_name, col_cfg in columns.items():
|
|
41
|
+
if isinstance(col_cfg, dict):
|
|
42
|
+
new_cfg = col_cfg.copy()
|
|
43
|
+
if col_name in list_cols:
|
|
44
|
+
param = list_cols[col_name] # 'source' or 'literal'
|
|
45
|
+
# Extract the i-th element
|
|
46
|
+
val_list = col_cfg.get(param)
|
|
47
|
+
new_cfg[param] = val_list[i]
|
|
48
|
+
new_cols[col_name] = new_cfg
|
|
49
|
+
else:
|
|
50
|
+
new_cols[col_name] = col_cfg
|
|
51
|
+
|
|
52
|
+
new_settings['columns'] = new_cols
|
|
53
|
+
expanded_list.append(new_settings)
|
|
54
|
+
|
|
55
|
+
return expanded_list
|
|
56
|
+
|
|
57
|
+
def process(self, domain_name, sources, df_long, default_keys):
|
|
58
|
+
domain_dfs = []
|
|
59
|
+
|
|
60
|
+
# Pre-expand sources if they contain lists
|
|
61
|
+
expanded_sources = []
|
|
62
|
+
for s in sources:
|
|
63
|
+
try:
|
|
64
|
+
expanded_sources.extend(self._expand_settings(s))
|
|
65
|
+
except Exception as e:
|
|
66
|
+
print(f"Error expanding settings for {domain_name}: {e}")
|
|
67
|
+
continue # Skip invalid blocks
|
|
68
|
+
|
|
69
|
+
for settings in expanded_sources:
|
|
70
|
+
# 1. Filter by FormOID
|
|
71
|
+
form_oid = settings.get('formoid')
|
|
72
|
+
if form_oid:
|
|
73
|
+
try:
|
|
74
|
+
# Filter for specific FormOID(s)
|
|
75
|
+
if isinstance(form_oid, list):
|
|
76
|
+
source_df = df_long[df_long['FormOID'].isin(form_oid)].copy()
|
|
77
|
+
else:
|
|
78
|
+
source_df = df_long[df_long['FormOID'] == form_oid].copy()
|
|
79
|
+
except Exception as e:
|
|
80
|
+
print(f"Error filtering for {domain_name} (FormOID={form_oid}): {e}")
|
|
81
|
+
continue
|
|
82
|
+
else:
|
|
83
|
+
print(f"Warning: No formoid specified for a block in {domain_name}")
|
|
84
|
+
continue
|
|
85
|
+
|
|
86
|
+
if source_df.empty:
|
|
87
|
+
continue
|
|
88
|
+
|
|
89
|
+
# 2. Key columns for pivoting (use block keys or defaults)
|
|
90
|
+
keys = settings.get('keys', default_keys)
|
|
91
|
+
|
|
92
|
+
# 3. Pivot
|
|
93
|
+
try:
|
|
94
|
+
pivoted = source_df.pivot_table(
|
|
95
|
+
index=keys,
|
|
96
|
+
columns='ItemOID',
|
|
97
|
+
values='Value',
|
|
98
|
+
aggfunc='first'
|
|
99
|
+
).reset_index()
|
|
100
|
+
except Exception as e:
|
|
101
|
+
print(f"Error pivoting for {domain_name}: {e}")
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
# 4. Map columns
|
|
105
|
+
final_df = pd.DataFrame()
|
|
106
|
+
mappings = settings.get('columns', {})
|
|
107
|
+
|
|
108
|
+
for target_col, col_config in mappings.items():
|
|
109
|
+
source_expr = None
|
|
110
|
+
literal_expr = None
|
|
111
|
+
target_type = None
|
|
112
|
+
value_map = None
|
|
113
|
+
|
|
114
|
+
# Check if simple string or object config
|
|
115
|
+
if isinstance(col_config, dict):
|
|
116
|
+
source_expr = col_config.get('source')
|
|
117
|
+
literal_expr = col_config.get('literal')
|
|
118
|
+
target_type = col_config.get('type')
|
|
119
|
+
value_map = col_config.get('value_mapping')
|
|
120
|
+
else:
|
|
121
|
+
source_expr = col_config
|
|
122
|
+
literal_expr = None
|
|
123
|
+
|
|
124
|
+
# Extract Data
|
|
125
|
+
series = None
|
|
126
|
+
if literal_expr is not None:
|
|
127
|
+
# Explicit literal value
|
|
128
|
+
series = pd.Series([literal_expr] * len(pivoted))
|
|
129
|
+
elif source_expr:
|
|
130
|
+
if source_expr in pivoted.columns:
|
|
131
|
+
series = pivoted[source_expr].copy()
|
|
132
|
+
elif source_expr in final_df.columns:
|
|
133
|
+
series = final_df[source_expr].copy()
|
|
134
|
+
else:
|
|
135
|
+
# Source defined but not found.
|
|
136
|
+
print(f"Warning: Source column '{source_expr}' not found for '{domain_name}.{target_col}'. Filling with NaN.")
|
|
137
|
+
series = pd.Series([None] * len(pivoted))
|
|
138
|
+
else:
|
|
139
|
+
print(f"Warning: No source or literal defined for '{domain_name}.{target_col}'. Filling with NaN.")
|
|
140
|
+
series = pd.Series([None] * len(pivoted))
|
|
141
|
+
|
|
142
|
+
# Apply Dependency Logic (Assign only if dependency column is not null)
|
|
143
|
+
dependency = col_config.get('dependency') if isinstance(col_config, dict) else None
|
|
144
|
+
if dependency:
|
|
145
|
+
dep_series = None
|
|
146
|
+
if dependency in pivoted.columns:
|
|
147
|
+
dep_series = pivoted[dependency]
|
|
148
|
+
elif dependency in final_df.columns:
|
|
149
|
+
dep_series = final_df[dependency]
|
|
150
|
+
|
|
151
|
+
if dep_series is not None:
|
|
152
|
+
# Mask: Keep values where dependency is NOT null, else fill with False Value (default None)
|
|
153
|
+
false_val = col_config.get('dependency_false_value')
|
|
154
|
+
# Make sure false_val is treated as literal of correct type? pandas usually handles mixed.
|
|
155
|
+
|
|
156
|
+
series = series.where(dep_series.notna(), false_val)
|
|
157
|
+
else:
|
|
158
|
+
print(f"Warning: Dependency column '{dependency}' not found for '{domain_name}.{target_col}'. Treating as all-null dependency.")
|
|
159
|
+
false_val = col_config.get('dependency_false_value')
|
|
160
|
+
series = pd.Series([false_val] * len(pivoted))
|
|
161
|
+
|
|
162
|
+
# Apply Substring Extraction (Before Value Mapping)
|
|
163
|
+
if isinstance(col_config, dict):
|
|
164
|
+
sub_start = col_config.get('substring_start')
|
|
165
|
+
sub_len = col_config.get('substring_length')
|
|
166
|
+
if sub_start is not None and sub_len is not None:
|
|
167
|
+
# Ensure series is string
|
|
168
|
+
series = series.astype(str)
|
|
169
|
+
# Slice 0-indexed or 1-indexed? Python is 0-indexed.
|
|
170
|
+
# User said "position 3-5". If string is '1110023565' and target is '002',
|
|
171
|
+
# indices are 3,4,5. So slice[3:6].
|
|
172
|
+
# Let's assume user provides 0-based start index and length.
|
|
173
|
+
series = series.str[sub_start : sub_start + sub_len]
|
|
174
|
+
|
|
175
|
+
# Apply Value Mapping
|
|
176
|
+
mapping_default = col_config.get('mapping_default') if isinstance(col_config, dict) else None
|
|
177
|
+
mapping_default_source = col_config.get('mapping_default_source') if isinstance(col_config, dict) else None
|
|
178
|
+
|
|
179
|
+
if value_map:
|
|
180
|
+
# Perform mapping (non-matches become NaN)
|
|
181
|
+
mapped_series = series.map(value_map)
|
|
182
|
+
|
|
183
|
+
if mapping_default is not None:
|
|
184
|
+
# Strict mapping with default literal
|
|
185
|
+
series = mapped_series.fillna(mapping_default)
|
|
186
|
+
elif mapping_default_source is not None:
|
|
187
|
+
# Strict mapping with default from another column
|
|
188
|
+
fallback = None
|
|
189
|
+
if mapping_default_source in final_df.columns:
|
|
190
|
+
fallback = final_df[mapping_default_source]
|
|
191
|
+
elif mapping_default_source in pivoted.columns:
|
|
192
|
+
fallback = pivoted[mapping_default_source]
|
|
193
|
+
|
|
194
|
+
if fallback is not None:
|
|
195
|
+
series = mapped_series.fillna(fallback)
|
|
196
|
+
else:
|
|
197
|
+
print(f"Warning: Default source '{mapping_default_source}' not found for '{domain_name}.{target_col}'")
|
|
198
|
+
series = mapped_series # Leave as NaN or original? mapped_series has NaNs.
|
|
199
|
+
else:
|
|
200
|
+
# Partial replacement (legacy: keep original values if not in map)
|
|
201
|
+
series = series.replace(value_map)
|
|
202
|
+
|
|
203
|
+
# Apply Prefix
|
|
204
|
+
prefix = col_config.get('prefix') if isinstance(col_config, dict) else None
|
|
205
|
+
if prefix:
|
|
206
|
+
series = prefix + series.astype(str)
|
|
207
|
+
|
|
208
|
+
# Apply Type Conversion
|
|
209
|
+
if target_type:
|
|
210
|
+
try:
|
|
211
|
+
if target_type == 'int':
|
|
212
|
+
series = pd.to_numeric(series, errors='coerce').astype('Int64')
|
|
213
|
+
elif target_type == 'float':
|
|
214
|
+
series = pd.to_numeric(series, errors='coerce')
|
|
215
|
+
elif target_type == 'str':
|
|
216
|
+
series = series.astype(str)
|
|
217
|
+
elif target_type == 'bool':
|
|
218
|
+
series = series.astype(bool)
|
|
219
|
+
except Exception as e:
|
|
220
|
+
print(f"Error converting {target_col} to {target_type}: {e}")
|
|
221
|
+
|
|
222
|
+
final_df[target_col] = series
|
|
223
|
+
|
|
224
|
+
# Validation: max_missing_pct
|
|
225
|
+
if isinstance(col_config, dict):
|
|
226
|
+
max_missing = col_config.get('max_missing_pct')
|
|
227
|
+
if max_missing is not None:
|
|
228
|
+
missing_count = series.isna().sum()
|
|
229
|
+
if target_type == 'str':
|
|
230
|
+
missing_count += (series.isin(['nan', 'None'])).sum()
|
|
231
|
+
|
|
232
|
+
total = len(series)
|
|
233
|
+
if total > 0:
|
|
234
|
+
pct = (missing_count / total) * 100
|
|
235
|
+
if pct > max_missing:
|
|
236
|
+
print(f"WARNING: [Validation] {domain_name}.{target_col} missing {pct:.2f}% (Limit: {max_missing:})")
|
|
237
|
+
|
|
238
|
+
domain_dfs.append(final_df)
|
|
239
|
+
|
|
240
|
+
return domain_dfs
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import yaml
|
|
2
|
+
import os
|
|
3
|
+
|
|
4
|
+
from .validate import load_schema, validate_domain_config
|
|
5
|
+
|
|
6
|
+
def load_config(config_dir):
|
|
7
|
+
"""
|
|
8
|
+
Loads all YAML configuration files from the specified directory.
|
|
9
|
+
Validates them against schema.yaml.
|
|
10
|
+
"""
|
|
11
|
+
config = {
|
|
12
|
+
'domains': {},
|
|
13
|
+
'defaults': {}
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
schema = load_schema()
|
|
17
|
+
|
|
18
|
+
if not os.path.exists(config_dir):
|
|
19
|
+
# Fallback to package data if default path doesn't exist?
|
|
20
|
+
# Assuming config_dir provided is valid or we expect empty.
|
|
21
|
+
return config
|
|
22
|
+
|
|
23
|
+
for filename in os.listdir(config_dir):
|
|
24
|
+
if filename.endswith(".yaml") or filename.endswith(".yml"):
|
|
25
|
+
file_path = os.path.join(config_dir, filename)
|
|
26
|
+
|
|
27
|
+
# Skip schema itself if present in same dir
|
|
28
|
+
if filename == "schema.yaml":
|
|
29
|
+
continue
|
|
30
|
+
|
|
31
|
+
with open(file_path, "r") as f:
|
|
32
|
+
try:
|
|
33
|
+
data = yaml.safe_load(f)
|
|
34
|
+
|
|
35
|
+
if filename == 'defaults.yaml':
|
|
36
|
+
# Defaults file - likely flat dict
|
|
37
|
+
config['defaults'].update(data)
|
|
38
|
+
continue
|
|
39
|
+
|
|
40
|
+
# Merge data
|
|
41
|
+
for key, value in data.items():
|
|
42
|
+
if key == 'defaults':
|
|
43
|
+
# Fallback if someone put defaults: inside another file
|
|
44
|
+
config['defaults'].update(value)
|
|
45
|
+
else:
|
|
46
|
+
# It's a domain
|
|
47
|
+
# Validate!
|
|
48
|
+
if schema:
|
|
49
|
+
if not validate_domain_config(key, value, schema):
|
|
50
|
+
print(f"Warning: {filename} failed schema validation. Proceeding with caution.")
|
|
51
|
+
|
|
52
|
+
config['domains'][key] = value
|
|
53
|
+
|
|
54
|
+
except yaml.YAMLError as exc:
|
|
55
|
+
print(f"Error parsing YAML file {filename}: {exc}")
|
|
56
|
+
|
|
57
|
+
return config
|