cdiscbuilder 0.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ __version__ = "0.2.2"
cdiscbuilder/cli.py ADDED
@@ -0,0 +1,42 @@
1
+ import os
2
+ import argparse
3
+ from .odm import parse_odm_to_long_df
4
+ from .sdtm import create_sdtm_datasets
5
+
6
+ def main():
7
+ parser = argparse.ArgumentParser(description="Convert ODM XML to SDTM Datasets")
8
+ # Determine default config path inside package
9
+ current_dir = os.path.dirname(__file__)
10
+ default_config_path = os.path.join(current_dir, "specs")
11
+
12
+ parser.add_argument("--xml", required=True, help="Path to input ODM XML file")
13
+ parser.add_argument("--csv", default="odm_long.csv", help="Path to intermediate long CSV file")
14
+ parser.add_argument("--configs", default=default_config_path, help="Path to SDTM configuration directory")
15
+ parser.add_argument("--output", default="sdtm_output", help="Path to output SDTM directory")
16
+
17
+ args = parser.parse_args()
18
+
19
+ # Step 1: ODM XML -> Long CSV
20
+ print(f"--- Step 1: Parsing ODM XML from {args.xml} ---")
21
+ try:
22
+ df = parse_odm_to_long_df(args.xml)
23
+ print(f"Parsed {len(df)} rows.")
24
+ df.to_csv(args.csv, index=False)
25
+ print(f"Saved intermediate data to {args.csv}")
26
+ except Exception as e:
27
+ print(f"Error parsing XML: {e}")
28
+ return
29
+
30
+ # Step 2: Long CSV -> SDTM Datasets
31
+ print(f"\n--- Step 2: Generating SDTM Datasets using configs from {args.configs} ---")
32
+ if not os.path.exists(args.output):
33
+ os.makedirs(args.output)
34
+
35
+ try:
36
+ create_sdtm_datasets(args.configs, args.csv, args.output)
37
+ print(f"\nSuccess! SDTM datasets created in {args.output}")
38
+ except Exception as e:
39
+ print(f"Error creating SDTM datasets: {e}")
40
+
41
+ if __name__ == "__main__":
42
+ main()
File without changes
File without changes
@@ -0,0 +1,113 @@
1
+ import pandas as pd
2
+
3
+
4
+ class FindingProcessor:
5
+ def __init__(self):
6
+ pass
7
+
8
+ def process(self, domain_name, sources, df_long, default_keys):
9
+ domain_dfs = []
10
+
11
+ for settings in sources:
12
+ # 0. Filter by FormOID (optional but recommended)
13
+ form_oid = settings.get('formoid')
14
+ source_df = df_long.copy()
15
+ if form_oid:
16
+ if isinstance(form_oid, list):
17
+ source_df = source_df[source_df['FormOID'].isin(form_oid)]
18
+ else:
19
+ source_df = source_df[source_df['FormOID'] == form_oid]
20
+
21
+ # 1. Filter by ItemGroupOID (regex or list)
22
+ item_group_match = settings.get('item_group_regex')
23
+ if item_group_match:
24
+ source_df = source_df[source_df['ItemGroupOID'].str.match(item_group_match, na=False)]
25
+
26
+ # 2. Filter by ItemOID (regex)
27
+ # This is crucial for "finding" domains - we want rows where ItemOID matches a pattern
28
+ item_oid_match = settings.get('item_oid_regex')
29
+ if item_oid_match:
30
+ source_df = source_df[source_df['ItemOID'].str.match(item_oid_match, na=False)]
31
+
32
+ if source_df.empty:
33
+ continue
34
+
35
+ # 3. Create Base DataFrame (No Pivot!)
36
+ # 3. Create Base DataFrame (No Pivot!)
37
+ # We treat every row as a potential finding
38
+ # Base columns: Keys + ItemOID + Value
39
+ keys = settings.get('keys', default_keys)
40
+
41
+ # Check if Question exists in source_df (it should based on odm.py changes)
42
+ base_cols = keys + ['ItemOID', 'Value']
43
+ if 'Question' in source_df.columns:
44
+ base_cols.append('Question')
45
+
46
+ final_df = source_df[base_cols].copy()
47
+
48
+ # 4. Map Columns
49
+ mappings = settings.get('columns', {})
50
+
51
+ for target_col, col_config in mappings.items():
52
+ series = None
53
+
54
+ # Config can be simple string (source col) or dict
55
+ source_expr = None
56
+ literal_expr = None
57
+ target_type = None
58
+ regex_extract = None
59
+
60
+ if isinstance(col_config, dict):
61
+ source_expr = col_config.get('source')
62
+ literal_expr = col_config.get('literal')
63
+ target_type = col_config.get('type')
64
+ regex_extract = col_config.get('regex_extract') # e.g. "I_ELIGI_(.*)"
65
+ else:
66
+ source_expr = col_config # simplistic
67
+
68
+ if literal_expr is not None:
69
+ series = pd.Series([literal_expr] * len(final_df), index=final_df.index)
70
+
71
+ elif source_expr:
72
+ if source_expr == "ItemOID":
73
+ series = final_df['ItemOID']
74
+ elif source_expr == "Value":
75
+ series = final_df['Value']
76
+ elif source_expr in final_df.columns:
77
+ series = final_df[source_expr]
78
+ elif source_expr in source_df.columns:
79
+ series = source_df[source_expr]
80
+
81
+ # Regex Extraction from source
82
+ if regex_extract and series is not None:
83
+ # Extract group 1
84
+ series = series.astype(str).str.extract(regex_extract)[0]
85
+
86
+ # Apply Prefix
87
+ prefix = None
88
+ if isinstance(col_config, dict):
89
+ prefix = col_config.get('prefix')
90
+
91
+ if prefix and series is not None:
92
+ series = prefix + series.astype(str)
93
+
94
+ if series is not None:
95
+ # Type Conversion
96
+ if target_type:
97
+ try:
98
+ if target_type == 'int':
99
+ series = pd.to_numeric(series, errors='coerce').astype('Int64')
100
+ elif target_type == 'str':
101
+ series = series.astype(str)
102
+ except Exception:
103
+ pass
104
+
105
+ final_df[target_col] = series
106
+
107
+ # Filter to keep only target columns
108
+ cols_to_keep = list(mappings.keys())
109
+ final_df = final_df[cols_to_keep]
110
+
111
+ domain_dfs.append(final_df)
112
+
113
+ return domain_dfs
@@ -0,0 +1,240 @@
1
+ import pandas as pd
2
+
3
+ class GeneralProcessor:
4
+ def _expand_settings(self, settings):
5
+ """
6
+ Expands a settings dict with list-based sources/literals into multiple settings dicts.
7
+ """
8
+ # Find all columns that have a list for source or literal
9
+ list_cols = {}
10
+ list_len = 0
11
+
12
+ columns = settings.get('columns', {})
13
+ for col_name, col_cfg in columns.items():
14
+ if isinstance(col_cfg, dict):
15
+ src = col_cfg.get('source')
16
+ lit = col_cfg.get('literal')
17
+
18
+ # Check source
19
+ if isinstance(src, list):
20
+ if list_len > 0 and len(src) != list_len:
21
+ raise ValueError(f"Column '{col_name}' source list length {len(src)} mismatch with others {list_len}")
22
+ list_len = len(src)
23
+ list_cols[col_name] = 'source'
24
+
25
+ # Check literal
26
+ if isinstance(lit, list):
27
+ if list_len > 0 and len(lit) != list_len:
28
+ raise ValueError(f"Column '{col_name}' literal list length {len(lit)} mismatch with others {list_len}")
29
+ list_len = len(lit)
30
+ list_cols[col_name] = 'literal'
31
+
32
+ if list_len == 0:
33
+ return [settings]
34
+
35
+ # Expand
36
+ expanded_list = []
37
+ for i in range(list_len):
38
+ new_settings = settings.copy()
39
+ new_cols = {}
40
+ for col_name, col_cfg in columns.items():
41
+ if isinstance(col_cfg, dict):
42
+ new_cfg = col_cfg.copy()
43
+ if col_name in list_cols:
44
+ param = list_cols[col_name] # 'source' or 'literal'
45
+ # Extract the i-th element
46
+ val_list = col_cfg.get(param)
47
+ new_cfg[param] = val_list[i]
48
+ new_cols[col_name] = new_cfg
49
+ else:
50
+ new_cols[col_name] = col_cfg
51
+
52
+ new_settings['columns'] = new_cols
53
+ expanded_list.append(new_settings)
54
+
55
+ return expanded_list
56
+
57
+ def process(self, domain_name, sources, df_long, default_keys):
58
+ domain_dfs = []
59
+
60
+ # Pre-expand sources if they contain lists
61
+ expanded_sources = []
62
+ for s in sources:
63
+ try:
64
+ expanded_sources.extend(self._expand_settings(s))
65
+ except Exception as e:
66
+ print(f"Error expanding settings for {domain_name}: {e}")
67
+ continue # Skip invalid blocks
68
+
69
+ for settings in expanded_sources:
70
+ # 1. Filter by FormOID
71
+ form_oid = settings.get('formoid')
72
+ if form_oid:
73
+ try:
74
+ # Filter for specific FormOID(s)
75
+ if isinstance(form_oid, list):
76
+ source_df = df_long[df_long['FormOID'].isin(form_oid)].copy()
77
+ else:
78
+ source_df = df_long[df_long['FormOID'] == form_oid].copy()
79
+ except Exception as e:
80
+ print(f"Error filtering for {domain_name} (FormOID={form_oid}): {e}")
81
+ continue
82
+ else:
83
+ print(f"Warning: No formoid specified for a block in {domain_name}")
84
+ continue
85
+
86
+ if source_df.empty:
87
+ continue
88
+
89
+ # 2. Key columns for pivoting (use block keys or defaults)
90
+ keys = settings.get('keys', default_keys)
91
+
92
+ # 3. Pivot
93
+ try:
94
+ pivoted = source_df.pivot_table(
95
+ index=keys,
96
+ columns='ItemOID',
97
+ values='Value',
98
+ aggfunc='first'
99
+ ).reset_index()
100
+ except Exception as e:
101
+ print(f"Error pivoting for {domain_name}: {e}")
102
+ continue
103
+
104
+ # 4. Map columns
105
+ final_df = pd.DataFrame()
106
+ mappings = settings.get('columns', {})
107
+
108
+ for target_col, col_config in mappings.items():
109
+ source_expr = None
110
+ literal_expr = None
111
+ target_type = None
112
+ value_map = None
113
+
114
+ # Check if simple string or object config
115
+ if isinstance(col_config, dict):
116
+ source_expr = col_config.get('source')
117
+ literal_expr = col_config.get('literal')
118
+ target_type = col_config.get('type')
119
+ value_map = col_config.get('value_mapping')
120
+ else:
121
+ source_expr = col_config
122
+ literal_expr = None
123
+
124
+ # Extract Data
125
+ series = None
126
+ if literal_expr is not None:
127
+ # Explicit literal value
128
+ series = pd.Series([literal_expr] * len(pivoted))
129
+ elif source_expr:
130
+ if source_expr in pivoted.columns:
131
+ series = pivoted[source_expr].copy()
132
+ elif source_expr in final_df.columns:
133
+ series = final_df[source_expr].copy()
134
+ else:
135
+ # Source defined but not found.
136
+ print(f"Warning: Source column '{source_expr}' not found for '{domain_name}.{target_col}'. Filling with NaN.")
137
+ series = pd.Series([None] * len(pivoted))
138
+ else:
139
+ print(f"Warning: No source or literal defined for '{domain_name}.{target_col}'. Filling with NaN.")
140
+ series = pd.Series([None] * len(pivoted))
141
+
142
+ # Apply Dependency Logic (Assign only if dependency column is not null)
143
+ dependency = col_config.get('dependency') if isinstance(col_config, dict) else None
144
+ if dependency:
145
+ dep_series = None
146
+ if dependency in pivoted.columns:
147
+ dep_series = pivoted[dependency]
148
+ elif dependency in final_df.columns:
149
+ dep_series = final_df[dependency]
150
+
151
+ if dep_series is not None:
152
+ # Mask: Keep values where dependency is NOT null, else fill with False Value (default None)
153
+ false_val = col_config.get('dependency_false_value')
154
+ # Make sure false_val is treated as literal of correct type? pandas usually handles mixed.
155
+
156
+ series = series.where(dep_series.notna(), false_val)
157
+ else:
158
+ print(f"Warning: Dependency column '{dependency}' not found for '{domain_name}.{target_col}'. Treating as all-null dependency.")
159
+ false_val = col_config.get('dependency_false_value')
160
+ series = pd.Series([false_val] * len(pivoted))
161
+
162
+ # Apply Substring Extraction (Before Value Mapping)
163
+ if isinstance(col_config, dict):
164
+ sub_start = col_config.get('substring_start')
165
+ sub_len = col_config.get('substring_length')
166
+ if sub_start is not None and sub_len is not None:
167
+ # Ensure series is string
168
+ series = series.astype(str)
169
+ # Slice 0-indexed or 1-indexed? Python is 0-indexed.
170
+ # User said "position 3-5". If string is '1110023565' and target is '002',
171
+ # indices are 3,4,5. So slice[3:6].
172
+ # Let's assume user provides 0-based start index and length.
173
+ series = series.str[sub_start : sub_start + sub_len]
174
+
175
+ # Apply Value Mapping
176
+ mapping_default = col_config.get('mapping_default') if isinstance(col_config, dict) else None
177
+ mapping_default_source = col_config.get('mapping_default_source') if isinstance(col_config, dict) else None
178
+
179
+ if value_map:
180
+ # Perform mapping (non-matches become NaN)
181
+ mapped_series = series.map(value_map)
182
+
183
+ if mapping_default is not None:
184
+ # Strict mapping with default literal
185
+ series = mapped_series.fillna(mapping_default)
186
+ elif mapping_default_source is not None:
187
+ # Strict mapping with default from another column
188
+ fallback = None
189
+ if mapping_default_source in final_df.columns:
190
+ fallback = final_df[mapping_default_source]
191
+ elif mapping_default_source in pivoted.columns:
192
+ fallback = pivoted[mapping_default_source]
193
+
194
+ if fallback is not None:
195
+ series = mapped_series.fillna(fallback)
196
+ else:
197
+ print(f"Warning: Default source '{mapping_default_source}' not found for '{domain_name}.{target_col}'")
198
+ series = mapped_series # Leave as NaN or original? mapped_series has NaNs.
199
+ else:
200
+ # Partial replacement (legacy: keep original values if not in map)
201
+ series = series.replace(value_map)
202
+
203
+ # Apply Prefix
204
+ prefix = col_config.get('prefix') if isinstance(col_config, dict) else None
205
+ if prefix:
206
+ series = prefix + series.astype(str)
207
+
208
+ # Apply Type Conversion
209
+ if target_type:
210
+ try:
211
+ if target_type == 'int':
212
+ series = pd.to_numeric(series, errors='coerce').astype('Int64')
213
+ elif target_type == 'float':
214
+ series = pd.to_numeric(series, errors='coerce')
215
+ elif target_type == 'str':
216
+ series = series.astype(str)
217
+ elif target_type == 'bool':
218
+ series = series.astype(bool)
219
+ except Exception as e:
220
+ print(f"Error converting {target_col} to {target_type}: {e}")
221
+
222
+ final_df[target_col] = series
223
+
224
+ # Validation: max_missing_pct
225
+ if isinstance(col_config, dict):
226
+ max_missing = col_config.get('max_missing_pct')
227
+ if max_missing is not None:
228
+ missing_count = series.isna().sum()
229
+ if target_type == 'str':
230
+ missing_count += (series.isin(['nan', 'None'])).sum()
231
+
232
+ total = len(series)
233
+ if total > 0:
234
+ pct = (missing_count / total) * 100
235
+ if pct > max_missing:
236
+ print(f"WARNING: [Validation] {domain_name}.{target_col} missing {pct:.2f}% (Limit: {max_missing:})")
237
+
238
+ domain_dfs.append(final_df)
239
+
240
+ return domain_dfs
@@ -0,0 +1,57 @@
1
+ import yaml
2
+ import os
3
+
4
+ from .validate import load_schema, validate_domain_config
5
+
6
+ def load_config(config_dir):
7
+ """
8
+ Loads all YAML configuration files from the specified directory.
9
+ Validates them against schema.yaml.
10
+ """
11
+ config = {
12
+ 'domains': {},
13
+ 'defaults': {}
14
+ }
15
+
16
+ schema = load_schema()
17
+
18
+ if not os.path.exists(config_dir):
19
+ # Fallback to package data if default path doesn't exist?
20
+ # Assuming config_dir provided is valid or we expect empty.
21
+ return config
22
+
23
+ for filename in os.listdir(config_dir):
24
+ if filename.endswith(".yaml") or filename.endswith(".yml"):
25
+ file_path = os.path.join(config_dir, filename)
26
+
27
+ # Skip schema itself if present in same dir
28
+ if filename == "schema.yaml":
29
+ continue
30
+
31
+ with open(file_path, "r") as f:
32
+ try:
33
+ data = yaml.safe_load(f)
34
+
35
+ if filename == 'defaults.yaml':
36
+ # Defaults file - likely flat dict
37
+ config['defaults'].update(data)
38
+ continue
39
+
40
+ # Merge data
41
+ for key, value in data.items():
42
+ if key == 'defaults':
43
+ # Fallback if someone put defaults: inside another file
44
+ config['defaults'].update(value)
45
+ else:
46
+ # It's a domain
47
+ # Validate!
48
+ if schema:
49
+ if not validate_domain_config(key, value, schema):
50
+ print(f"Warning: {filename} failed schema validation. Proceeding with caution.")
51
+
52
+ config['domains'][key] = value
53
+
54
+ except yaml.YAMLError as exc:
55
+ print(f"Error parsing YAML file {filename}: {exc}")
56
+
57
+ return config
@@ -0,0 +1,49 @@
1
+ import pandas as pd
2
+
3
+ def extract_value(df_long, form_oids, item_oids, return_col='Value', keys=None):
4
+ """
5
+ Generic extraction function for Findings.
6
+ Args:
7
+ df_long: The source long format dataframe.
8
+ form_oids: List of FormOIDs to filter.
9
+ item_oids: List (or single string) of ItemOIDs to filter.
10
+ return_col: 'Value' (default) or 'ItemOID'. What to return as the column data.
11
+ keys: List of key columns to include/index by.
12
+ Returns:
13
+ DataFrame containing Keys and the requested data column (renamed to 'Result' or similar).
14
+ """
15
+ # 1. Normalize inputs
16
+ if not isinstance(form_oids, list):
17
+ form_oids = [form_oids] if form_oids else []
18
+ if isinstance(item_oids, str):
19
+ item_oids = [item_oids]
20
+
21
+ # 2. Filter Forms
22
+ # Optimization: pre-filter df_long if passed repeatedly?
23
+ # For now, just filter.
24
+ subset = df_long[df_long['FormOID'].isin(form_oids)].copy()
25
+
26
+ if subset.empty:
27
+ return pd.DataFrame()
28
+
29
+ # 3. Filter Items
30
+ # Note: If item_oids is empty/None, do we return everything? No, usually specific.
31
+ if item_oids:
32
+ subset = subset[subset['ItemOID'].isin(item_oids)]
33
+
34
+ if subset.empty:
35
+ return pd.DataFrame() # Return empty but valid DF?
36
+
37
+ # 4. Select Columns
38
+ # We always need Keys + the Return Col
39
+ cols_to_keep = keys + [return_col] if keys else [return_col]
40
+
41
+ # If keys are missing (logic error), handle gracefully
42
+ available_cols = [c for c in cols_to_keep if c in subset.columns]
43
+ result = subset[available_cols].copy()
44
+
45
+ # 5. Rename return column for clarity?
46
+ # The caller will rename it to the target column (e.g. FAORRES).
47
+ # But if return_col is 'Value' or 'ItemOID', we keep as is for now.
48
+
49
+ return result
@@ -0,0 +1,43 @@
1
+ import pandas as pd
2
+ import os
3
+ from .classes.general import GeneralProcessor
4
+
5
+
6
+ def process_domain(domain_name, sources, df_long, default_keys, output_dir):
7
+ # Determine type of the first block (assumes all blocks in a domain are same type)
8
+ # process_domain receives 'sources' which is settings_entry.
9
+
10
+ # Normalize to list
11
+ if isinstance(sources, dict):
12
+ sources = [sources]
13
+
14
+ if not sources:
15
+ print(f"Warning: No configuration found for {domain_name}")
16
+ return
17
+
18
+ from .classes.finding import FindingProcessor
19
+
20
+ # Check type of first source to decide processor
21
+ p_type = sources[0].get('type', 'general') if sources else 'general'
22
+
23
+ if p_type == 'finding':
24
+ processor = FindingProcessor()
25
+ else:
26
+ processor = GeneralProcessor()
27
+
28
+ domain_dfs = processor.process(domain_name, sources, df_long, default_keys)
29
+
30
+ if not domain_dfs:
31
+ print(f"Warning: No data found for domain {domain_name}")
32
+ return
33
+
34
+ # Concatenate all sources for this domain
35
+ combined_df = pd.concat(domain_dfs, ignore_index=True)
36
+
37
+ # Save to Parquet
38
+ if not os.path.exists(output_dir):
39
+ os.makedirs(output_dir)
40
+
41
+ output_path = os.path.join(output_dir, f"{domain_name}.parquet")
42
+ combined_df.to_parquet(output_path, index=False)
43
+ print(f"Saved {domain_name} to {output_path} (Shape: {combined_df.shape})")
@@ -0,0 +1,99 @@
1
+ import yaml
2
+ import os
3
+
4
+ def load_schema():
5
+ # config_data is parallel to engine/
6
+ pkg_root = os.path.dirname(os.path.dirname(__file__)) # src/cdiscbuilder
7
+ schema_path = os.path.join(pkg_root, "specs", "schema.yaml")
8
+
9
+ if not os.path.exists(schema_path):
10
+ print(f"Warning: Schema file not found at {schema_path}")
11
+ return None
12
+
13
+ with open(schema_path, 'r') as f:
14
+ return yaml.safe_load(f)
15
+
16
+ def validate_domain_config(domain_name, config, schema):
17
+ """
18
+ Validates a single domain configuration against the schema.
19
+ """
20
+ if not schema:
21
+ return True
22
+
23
+ # Check if General (List) or Findings (Dict)
24
+ is_general = isinstance(config, list)
25
+ is_findings = isinstance(config, dict) and config.get('type') == 'FINDINGS'
26
+
27
+ if is_general:
28
+ return _validate_general(domain_name, config, schema['schemas']['general_domain'], schema['definitions'])
29
+ elif is_findings:
30
+ return _validate_findings(domain_name, config, schema['schemas']['findings_domain'], schema['definitions'])
31
+ else:
32
+ # Fallback or mixed type?
33
+ # If dict but not FINDINGS, maybe it's a single general block (normalized usually)
34
+ # But our loader usually normalizes.
35
+ print(f"Validation Warning: {domain_name} structure unrecognized (Not List or Findings Dict).")
36
+ return False
37
+
38
+ def _validate_general(domain, config, schema, definitions):
39
+ if not isinstance(config, list):
40
+ print(f"Schema Error: {domain} must be a list (General Domain). Got {type(config)}")
41
+ return False
42
+
43
+ for idx, block in enumerate(config):
44
+ if not isinstance(block, dict):
45
+ print(f"Schema Error: {domain}[{idx}] must be a dict. Got {type(block)}")
46
+ return False
47
+
48
+ # Validate required keys
49
+ item_schema = schema.get('item_schema', {})
50
+ for req in item_schema.get('required', []):
51
+ if req not in block:
52
+ print(f"Schema Error: {domain}[{idx}] missing required key '{req}'")
53
+ return False
54
+
55
+ # Validate columns
56
+ columns = block.get('columns', {})
57
+ if not isinstance(columns, dict):
58
+ print(f"Schema Error: {domain}[{idx}].columns must be a dict.")
59
+ return False
60
+
61
+ for col_name, col_def in columns.items():
62
+ # col_def must be dict usually, but we allow simple strings in some legacy?
63
+ # Schema says dict.
64
+ if not isinstance(col_def, dict):
65
+ # Legacy: mapped directly?
66
+ # Ignoring for strict schema
67
+ pass
68
+ else:
69
+ # Check properties
70
+ pass # Detailed prop check can be added
71
+
72
+ return True
73
+
74
+ def _validate_findings(domain, config, schema, definitions):
75
+ # Validate required
76
+ for req in schema.get('required', []):
77
+ if req not in config:
78
+ print(f"Schema Error: {domain} missing required key '{req}'")
79
+ return False
80
+
81
+ # Validate Columns List
82
+ cols = config.get('columns', [])
83
+ if not isinstance(cols, list):
84
+ print(f"Schema Error: {domain}.columns must be a list.")
85
+ return False
86
+
87
+ col_schema = definitions['column_findings']
88
+ for idx, col in enumerate(cols):
89
+ if not isinstance(col, dict):
90
+ print(f"Schema Error: {domain}.columns[{idx}] must be a dict.")
91
+ return False
92
+
93
+ # Check required
94
+ for req in col_schema.get('required', []):
95
+ if req not in col:
96
+ print(f"Schema Error: {domain}.columns[{idx}] missing '{req}'")
97
+ return False
98
+
99
+ return True
cdiscbuilder/odm.py ADDED
@@ -0,0 +1,103 @@
1
+ import xml.etree.ElementTree as ET
2
+ import pandas as pd
3
+
4
+ def parse_odm_to_long_df(xml_file):
5
+ try:
6
+ tree = ET.parse(xml_file)
7
+ root = tree.getroot()
8
+ except Exception as e:
9
+ print(f"Error parsing XML file {xml_file}: {e}")
10
+ return pd.DataFrame()
11
+
12
+ def parse_metadata(root):
13
+ item_question_map = {}
14
+ ns = {'odm': 'http://www.cdisc.org/ns/odm/v1.3'}
15
+ # Find MetaDataVersion - simplified lookup
16
+ for study in root.findall('odm:Study', ns):
17
+ for mdv in study.findall('odm:MetaDataVersion', ns):
18
+ for item_def in mdv.findall('odm:ItemDef', ns):
19
+ oid = item_def.get('OID')
20
+
21
+ # Try to find Question/TranslatedText
22
+ question = ""
23
+ q_elem = item_def.find('odm:Question', ns)
24
+ if q_elem is not None:
25
+ tt_elem = q_elem.find('odm:TranslatedText', ns)
26
+ if tt_elem is not None:
27
+ question = tt_elem.text.strip() if tt_elem.text else ""
28
+
29
+ item_question_map[oid] = question
30
+ return item_question_map
31
+
32
+ item_question_map = parse_metadata(root)
33
+
34
+ data_rows = []
35
+
36
+ def get_local_name(tag):
37
+ if '}' in tag:
38
+ return tag.split('}', 1)[1]
39
+ return tag
40
+
41
+ for cd in root:
42
+ if get_local_name(cd.tag) == 'ClinicalData':
43
+ study_oid = cd.get('StudyOID')
44
+ for sd in cd:
45
+ if get_local_name(sd.tag) == 'SubjectData':
46
+ subject_key = sd.get('SubjectKey')
47
+
48
+ # Helper for attributes
49
+ def get_attrib(elem, partial_name):
50
+ if partial_name in elem.attrib:
51
+ return elem.attrib[partial_name]
52
+ for k, v in elem.attrib.items():
53
+ if k.endswith("}" + partial_name):
54
+ return v
55
+ return None
56
+
57
+ study_subject_id = get_attrib(sd, 'StudySubjectID') or get_attrib(sd, 'studysubjectid')
58
+ if not subject_key:
59
+ subject_key = study_subject_id
60
+
61
+ for child in sd:
62
+ tag = get_local_name(child.tag)
63
+ if tag == 'StudyEventData':
64
+ study_event_oid = child.get('StudyEventOID')
65
+ study_event_repeat_key = child.get('StudyEventRepeatKey')
66
+
67
+ # Extract Namespaced StartDate
68
+ start_date = get_attrib(child, 'StartDate')
69
+
70
+ for form in child:
71
+ f_tag = get_local_name(form.tag)
72
+ if f_tag == 'FormData':
73
+ form_oid = form.get('FormOID')
74
+
75
+ for ig in form:
76
+ ig_tag = get_local_name(ig.tag)
77
+ if ig_tag == 'ItemGroupData':
78
+ item_group_oid = ig.get('ItemGroupOID')
79
+ item_group_repeat_key = ig.get('ItemGroupRepeatKey')
80
+
81
+ for item in ig:
82
+ i_tag = get_local_name(item.tag)
83
+ if i_tag == 'ItemData':
84
+ item_oid = item.get('ItemOID')
85
+ value = item.get('Value')
86
+
87
+ data_rows.append({
88
+ 'StudyOID': study_oid,
89
+ 'SubjectKey': subject_key,
90
+ 'StudySubjectID': study_subject_id,
91
+ 'StudyEventOID': study_event_oid,
92
+ 'StudyEventRepeatKey': study_event_repeat_key,
93
+ 'StudyEventStartDate': start_date,
94
+ 'FormOID': form_oid,
95
+ 'ItemGroupOID': item_group_oid,
96
+ 'ItemGroupRepeatKey': item_group_repeat_key,
97
+ 'ItemOID': item_oid,
98
+ 'Value': value,
99
+ 'Question': item_question_map.get(item_oid, "")
100
+ })
101
+
102
+ df = pd.DataFrame(data_rows)
103
+ return df
cdiscbuilder/sdtm.py ADDED
@@ -0,0 +1,30 @@
1
+ import pandas as pd
2
+ from .engine.config import load_config
3
+ from .engine.processor import process_domain
4
+
5
+ def create_sdtm_datasets(config_input, input_csv, output_dir):
6
+ if isinstance(config_input, dict):
7
+ config = config_input
8
+ # We assume it's already structured correctly or validated
9
+ else:
10
+ config = load_config(config_input)
11
+
12
+ # Get global defaults
13
+ default_keys = config.get('defaults', {}).get('keys', ["StudyOID", "SubjectKey", "ItemGroupRepeatKey", "StudyEventOID"])
14
+
15
+ print(f"Loading data from {input_csv}...")
16
+ df_long = pd.read_csv(input_csv)
17
+
18
+ for domain, settings_entry in config['domains'].items():
19
+
20
+ print(f"Processing domain: {domain}")
21
+
22
+ # Normalize to list.
23
+ if isinstance(settings_entry, list):
24
+ sources = settings_entry
25
+ else:
26
+ sources = [settings_entry]
27
+
28
+ process_domain(domain, sources, df_long, default_keys, output_dir)
29
+
30
+
@@ -0,0 +1,25 @@
1
+ AE:
2
+ - formoid: "F_AE_1"
3
+ columns:
4
+ STUDYID:
5
+ source: "StudyOID"
6
+ type: "str"
7
+ DOMAIN:
8
+ literal: "AE"
9
+ type: "str"
10
+ USUBJID:
11
+ source: "StudySubjectID"
12
+ prefix: "PPT-"
13
+ type: "str"
14
+ AESEQ:
15
+ source: "ItemGroupRepeatKey"
16
+ type: "int"
17
+ AETERM:
18
+ source: "I_AE_TERM"
19
+ type: "str"
20
+ AESTDTC:
21
+ source: "I_AE_STARTDATE"
22
+ type: "str"
23
+ AESEV:
24
+ source: "I_AE_SEVERITY"
25
+ type: "str"
@@ -0,0 +1,26 @@
1
+ CM:
2
+ - formoid: "F_CM_1"
3
+ keys: ["StudyOID", "StudySubjectID", "StudyEventOID", "StudyEventRepeatKey", "ItemGroupRepeatKey"]
4
+ columns:
5
+ STUDYID:
6
+ source: "StudyOID"
7
+ type: "str"
8
+ DOMAIN:
9
+ literal: "CM"
10
+ type: "str"
11
+ USUBJID:
12
+ source: "StudySubjectID"
13
+ prefix: "PPT-"
14
+ type: "str"
15
+ CMSEQ:
16
+ source: "ItemGroupRepeatKey"
17
+ type: "int"
18
+ CMTRT:
19
+ source: "I_CM_TERM"
20
+ type: "str"
21
+ CMDOSE:
22
+ source: "I_CM_DOSE"
23
+ type: "float"
24
+ CMDOSEU:
25
+ source: "I_CM_UNIT"
26
+ type: "str"
@@ -0,0 +1,51 @@
1
+ DM:
2
+ - formoid: ["F_DEMOG_1", "F_DEMOG_2"] # Example of merging data from multiple forms
3
+ keys: ["StudyOID", "StudySubjectID"]
4
+ columns:
5
+ STUDYID:
6
+ source: "StudyOID"
7
+ type: "str"
8
+ DOMAIN:
9
+ literal: "DM"
10
+ type: "str"
11
+ USUBJID:
12
+ source: "StudySubjectID"
13
+ prefix: "PPT-"
14
+ type: "str"
15
+ max_missing_pct: 0
16
+ SUBJID:
17
+ source: "StudySubjectID"
18
+ type: "str"
19
+ SEX:
20
+ source: "I_DEMOG_SEX"
21
+ type: "str"
22
+ value_mapping:
23
+ "F": "Female"
24
+ "M": "Male"
25
+ EDCSUBID:
26
+ source: "StudySubjectID"
27
+ type: "str"
28
+ SITEID:
29
+ source: "EDCSUBID"
30
+ type: "str"
31
+ substring_start: 3
32
+ substring_length: 3
33
+ value_mapping:
34
+ "002": "LAX"
35
+ CUSTOMSITEID:
36
+ source: "SITEID"
37
+ type: "str"
38
+ value_mapping:
39
+ "LAX": "LAX"
40
+ mapping_default: "SFO"
41
+ # mapping_default_source: "SEX"
42
+ RACE:
43
+ source: "I_DEMOG_RACE"
44
+ type: "str"
45
+ COUNTRY:
46
+ literal: "USA"
47
+ type: "str"
48
+ AGE:
49
+ source: "I_DEMOG_AGE"
50
+ type: "int"
51
+ max_missing_pct: 5
@@ -0,0 +1,54 @@
1
+ DS:
2
+ # Block 1: Informed Consent
3
+ - formoid: "F_IC_1"
4
+ keys: ["StudyOID", "SubjectKey", "StudyEventOID", "StudyEventRepeatKey", "ItemGroupRepeatKey"]
5
+ columns:
6
+ STUDYID:
7
+ source: "StudyOID"
8
+ type: "str"
9
+ DOMAIN:
10
+ literal: "DS"
11
+ type: "str"
12
+ USUBJID:
13
+ source: "SubjectKey"
14
+ type: "str"
15
+ DSSEQ:
16
+ source: "ItemGroupRepeatKey" # Will likely be 1, duplicates allowed across events in raw output
17
+ type: "int"
18
+ DSTERM:
19
+ literal: "INFORMED CONSENT" # Enrolled/Screened etc
20
+ type: "str"
21
+ DSDECOD:
22
+ literal: "INFORMED CONSENT OBTAINED"
23
+ dependency: "I_IC_DATE"
24
+ dependency_false_value: "INFORMED CONSENT NOT OBTAINED"
25
+ type: "str"
26
+ DSSTDTC:
27
+ source: "I_IC_DATE"
28
+ type: "str"
29
+
30
+ # Block 2: Disposition (Termination)
31
+ - formoid: "F_DISP_1"
32
+ keys: ["StudyOID", "SubjectKey", "StudyEventOID", "StudyEventRepeatKey", "ItemGroupRepeatKey"]
33
+ columns:
34
+ STUDYID:
35
+ source: "StudyOID"
36
+ type: "str"
37
+ DOMAIN:
38
+ literal: "DS"
39
+ type: "str"
40
+ USUBJID:
41
+ source: "SubjectKey"
42
+ type: "str"
43
+ DSSEQ:
44
+ source: "ItemGroupRepeatKey"
45
+ type: "int"
46
+ DSTERM:
47
+ source: "I_DISP_TERM"
48
+ type: "str"
49
+ DSDECOD:
50
+ source: "I_DISP_DECOD"
51
+ type: "str"
52
+ DSSTDTC:
53
+ source: "I_DISP_DTC"
54
+ type: "str"
@@ -0,0 +1,32 @@
1
+ FA:
2
+ # Findings About Adverse Events (Compact List)
3
+ - formoid: "F_AE_1"
4
+ keys: ["StudyOID", "SubjectKey", "StudyEventOID", "StudyEventRepeatKey", "ItemGroupRepeatKey"]
5
+ columns:
6
+ STUDYID: {source: "StudyOID", type: "str"}
7
+ DOMAIN: {literal: "FA", type: "str"}
8
+ USUBJID: {source: "SubjectKey", type: "str"}
9
+ FASEQ: {source: "ItemGroupRepeatKey", type: "int"}
10
+
11
+ FAOBJ:
12
+ source: "I_AE_TERM"
13
+ type: "str"
14
+
15
+ # Compact List Expansion
16
+ FATESTCD:
17
+ literal: ["SEV", "LOC"]
18
+ type: "str"
19
+
20
+ FATEST:
21
+ literal: ["Severity", "Location"]
22
+ type: "str"
23
+
24
+ FAORRES:
25
+ source: ["I_AE_SEVERITY", "I_AE_LOC_MOCK"] # LOC_MOCK will assume NaN
26
+ type: "str"
27
+
28
+ FASTRESC:
29
+ source: ["I_AE_SEVERITY", "I_AE_LOC_MOCK"]
30
+ type: "str"
31
+
32
+ FADTC: {source: "I_AE_STARTDATE", type: "str"}
@@ -0,0 +1,35 @@
1
+ IE:
2
+ - formoid: "F_IE_1"
3
+ keys: ["StudyOID", "StudySubjectID", "StudyEventOID", "StudyEventRepeatKey", "ItemGroupRepeatKey"]
4
+ columns:
5
+ STUDYID:
6
+ source: "StudyOID"
7
+ type: "str"
8
+ DOMAIN:
9
+ literal: "IE"
10
+ type: "str"
11
+ USUBJID:
12
+ source: "StudySubjectID"
13
+ prefix: "PPT-"
14
+ type: "str"
15
+ IESEQ:
16
+ source: "ItemGroupRepeatKey"
17
+ type: "int"
18
+ IETESTCD:
19
+ literal: ["INCL01", "EXCL01"]
20
+ type: "str"
21
+ IETEST:
22
+ literal: ["Age >= 18", "Pregnant"]
23
+ type: "str"
24
+ IECAT:
25
+ literal: ["INCLUSION", "EXCLUSION"]
26
+ type: "str"
27
+ IEORRES:
28
+ source: ["I_IE_INCL_01", "I_IE_EXCL_01"]
29
+ type: "str"
30
+ IESTRESC:
31
+ source: ["I_IE_INCL_01", "I_IE_EXCL_01"]
32
+ type: "str"
33
+ IEDTC:
34
+ source: "I_IE_DTC"
35
+ type: "str"
@@ -0,0 +1,25 @@
1
+ PE:
2
+ - formoid: "FORM.VITPHYEX"
3
+ keys: ["StudyOID", "SubjectKey", "StudyEventOID", "StudyEventRepeatKey", "ItemGroupRepeatKey"]
4
+ columns:
5
+ STUDYID:
6
+ source: "StudyOID"
7
+ type: "str"
8
+ DOMAIN:
9
+ literal: "PE"
10
+ type: "str"
11
+ USUBJID:
12
+ source: "SubjectKey"
13
+ type: "str"
14
+ PESEQ:
15
+ source: "ItemGroupRepeatKey"
16
+ type: "int"
17
+ PETESTCD:
18
+ source: "IT.BODY_SYS"
19
+ type: "str"
20
+ PEORRES:
21
+ source: "IT.ABNORM"
22
+ type: "str"
23
+ PECOMM:
24
+ source: "IT.COMMT1"
25
+ type: "str"
@@ -0,0 +1,23 @@
1
+ SV:
2
+ # Compact SV: Trigger on any of these forms effectively finding all Visits
3
+ - formoid: ["F_DEMOG_1", "F_VS_1", "F_DISP_1", "F_LAB_1"]
4
+
5
+ # Group by Event (Visit), ignoring which specific Form triggered it
6
+ keys: ["StudyOID", "SubjectKey", "StudyEventOID", "StudyEventRepeatKey"]
7
+
8
+ columns:
9
+ STUDYID:
10
+ source: "StudyOID"
11
+ type: "str"
12
+ DOMAIN:
13
+ literal: "SV"
14
+ type: "str"
15
+ USUBJID:
16
+ source: "SubjectKey"
17
+ type: "str"
18
+ VISIT:
19
+ source: "StudyEventOID"
20
+ type: "str"
21
+ SVSTDTC:
22
+ source: "StudyEventStartDate"
23
+ type: "str"
@@ -0,0 +1,38 @@
1
+ VS:
2
+ - formoid: "F_VS_1"
3
+ keys: ["StudyOID", "SubjectKey", "StudyEventOID", "StudyEventRepeatKey", "ItemGroupRepeatKey"]
4
+ columns:
5
+ STUDYID: {source: "StudyOID", type: "str"}
6
+ DOMAIN: {literal: "VS", type: "str"}
7
+ USUBJID: {source: "SubjectKey", type: "str"}
8
+ VSSEQ: {source: "ItemGroupRepeatKey", type: "int"}
9
+
10
+ VSTESTCD:
11
+ literal: ["HEIGHT", "WEIGHT", "SYSBP", "DIABP"]
12
+ type: "str"
13
+
14
+ VSTEST:
15
+ literal: ["Height", "Weight", "Systolic Blood Pressure", "Diastolic Blood Pressure"]
16
+ type: "str"
17
+
18
+ VSORRES:
19
+ source: ["I_VS_HEIGHT", "I_VS_WEIGHT", "I_VS_SYSBP", "I_VS_DIABP"]
20
+ type: "str"
21
+
22
+ VSORRESU:
23
+ literal: ["cm", "kg", "mmHg", "mmHg"]
24
+ type: "str"
25
+
26
+ VSSTRESC:
27
+ source: ["I_VS_HEIGHT", "I_VS_WEIGHT", "I_VS_SYSBP", "I_VS_DIABP"]
28
+ type: "str"
29
+
30
+ VSSTRESN:
31
+ source: ["I_VS_HEIGHT", "I_VS_WEIGHT", "I_VS_SYSBP", "I_VS_DIABP"]
32
+ type: "float"
33
+
34
+ VSSTRESU:
35
+ literal: ["cm", "kg", "mmHg", "mmHg"]
36
+ type: "str"
37
+
38
+ VSDTC: {literal: "", type: "str"}
@@ -0,0 +1 @@
1
+ keys: ["StudyOID", "StudySubjectID", "StudyEventOID", "StudyEventRepeatKey", "ItemGroupRepeatKey"]
@@ -0,0 +1,63 @@
1
+ version: "1.0"
2
+ definitions:
3
+ column_general:
4
+ type: dict
5
+ properties:
6
+ source: {type: str}
7
+ literal: {type: [str, int, float, bool]}
8
+ type: {type: str, enum: [str, int, float, bool]}
9
+ max_missing_pct: {type: [int, float]}
10
+ value_mapping: {type: dict}
11
+ prefix: {type: str}
12
+ substring_start: {type: int}
13
+ substring_length: {type: int}
14
+ mapping_default: {type: [str, int, float, bool]}
15
+ mapping_default_source: {type: str}
16
+ dependency: {type: str}
17
+ dependency_false_value: {type: [str, int, float, bool]}
18
+ regex_extract: {type: str}
19
+
20
+ column_findings:
21
+ type: dict
22
+ required: [name]
23
+ properties:
24
+ name: {type: str}
25
+ label: {type: str}
26
+ type: {type: str}
27
+ literal: {type: [str, int, float, bool]}
28
+ source: {type: str}
29
+ function: {type: str}
30
+ formoid: {type: [str, list]}
31
+ itemoid: {type: [str, list]}
32
+ value_mapping: {type: dict}
33
+ prefix: {type: str}
34
+ substring_start: {type: int}
35
+ substring_length: {type: int}
36
+ mapping_default: {type: [str, int, float, bool]}
37
+ mapping_default_source: {type: str}
38
+
39
+ schemas:
40
+ general_domain:
41
+ type: list
42
+ item_schema:
43
+ type: dict
44
+ required: [formoid, columns]
45
+ properties:
46
+ type: {type: str, enum: ["general", "finding"]}
47
+ item_group_regex: {type: str}
48
+ item_oid_regex: {type: str}
49
+ formoid: {type: [str, list]}
50
+ keys: {type: list}
51
+ columns:
52
+ type: dict
53
+ values_schema: {ref: column_general}
54
+
55
+ findings_domain:
56
+ type: dict
57
+ required: [type, columns]
58
+ properties:
59
+ type: {const: FINDINGS}
60
+ definitions: {type: dict} # YAML Anchors container
61
+ columns:
62
+ type: list
63
+ item_schema: {ref: column_findings}
@@ -0,0 +1,10 @@
1
+ Metadata-Version: 2.4
2
+ Name: cdiscbuilder
3
+ Version: 0.2.8
4
+ Summary: A package to convert ODM XML to SDTM/ADaM Datasets
5
+ Author-email: Ming-Chun Chen <hellomingchun@gmail.com>
6
+ Requires-Python: >=3.8
7
+ License-File: LICENSE
8
+ Requires-Dist: pandas
9
+ Requires-Dist: pyyaml
10
+ Dynamic: license-file
@@ -0,0 +1,29 @@
1
+ cdiscbuilder/__init__.py,sha256=anBcPGggCP0f-o9w0IsZYV49rl6HvJbHcuDN39x3mxw,23
2
+ cdiscbuilder/cli.py,sha256=RfSjnAgc7_BsNHDun2MKsHRvpl2aUnFHKok0gnK1HEw,1677
3
+ cdiscbuilder/odm.py,sha256=lSEvceOMr0D0oPNrr1P8MMoaT7j4XqhCEW53TN2nI1U,5229
4
+ cdiscbuilder/sdtm.py,sha256=yK4DIrAkCVNzOXae9oE-7RfqGiWkXPOrfxY6lNYCrPc,1000
5
+ cdiscbuilder/engine/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ cdiscbuilder/engine/config.py,sha256=WEcQ6ui8Ozb08GNrzcpVlK0etoVPI_5-Yb_VoDOXAhg,2093
7
+ cdiscbuilder/engine/functions.py,sha256=SdyhFftFoxIzL-pzIcjVsf9CbZmxKKo7J1J0H7t28KU,1876
8
+ cdiscbuilder/engine/processor.py,sha256=R5I6ZS2Yj49m_uBudJhvoNqmWn7A7-30xaQe59H5nnI,1457
9
+ cdiscbuilder/engine/validate.py,sha256=vGUirL3yPk6a9D6MlRpmHf54tB-PMNH7FMKCxsXROgQ,3799
10
+ cdiscbuilder/engine/classes/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
+ cdiscbuilder/engine/classes/finding.py,sha256=J3y-VTMyX0WGqLxTJ1fC-OqfcEm7wK2987A-EuLyOgo,4845
12
+ cdiscbuilder/engine/classes/general.py,sha256=gqndzK2A2Eqwe6_5qxLdYHq-bwjT9hnYn1HafdTgDyw,11755
13
+ cdiscbuilder/specs/AE.yaml,sha256=xeTuR0uNOzHXgTJerIZl8K5ewP2hL3lbBSu175WHjeg,545
14
+ cdiscbuilder/specs/CM.yaml,sha256=s-tV27tdw9c_EcnVukaZS3iONi188SVBzJ7mMqNROm8,635
15
+ cdiscbuilder/specs/DM.yaml,sha256=3N7y_F2eMP2P7SWun8oxaHfxvn80PYdgVQ_eyE3pPGg,1258
16
+ cdiscbuilder/specs/DS.yaml,sha256=YeJmkt5xjyHhYl6H7rroRzcyEvVtRxloDTpZgcRuruc,1498
17
+ cdiscbuilder/specs/FA.yaml,sha256=oe9Hkoxfw0Zx9rPNiPxatgMEJTeZUBVZU7A0u-Hw9qA,960
18
+ cdiscbuilder/specs/IE.yaml,sha256=E1U_ftpjm_76mBS10XFVg1h8Ez8TaXIPeXccFjvFUiY,913
19
+ cdiscbuilder/specs/PE.yaml,sha256=-_L8dxYROif35DRtpmL03C2te7-qRNhTrL6npMLGm8k,613
20
+ cdiscbuilder/specs/SV.yaml,sha256=M-Bo7d_gmhLRpqaKarQQpadmW9uc4Qolg1mRI36Mt4U,656
21
+ cdiscbuilder/specs/VS.yaml,sha256=Xm0IVMzImAVmHzSEVbaHe372PqDYPZ-Vq1lED7T1SMk,1168
22
+ cdiscbuilder/specs/defaults.yaml,sha256=5Hq2Yj00cC_watAGSXd6bKSqjb2fyp_AtU4nQgj5Ans,100
23
+ cdiscbuilder/specs/schema.yaml,sha256=7iBdin4xvBAk3e5oAp6lC2uzbroGyZL_tJNrDxG2DSI,1864
24
+ cdiscbuilder-0.2.8.dist-info/licenses/LICENSE,sha256=Da4Pm8tEdB1ycKkAkF2EwfHd-8l0wrJ_uwejzfBRHFE,1092
25
+ cdiscbuilder-0.2.8.dist-info/METADATA,sha256=WQgqxUnUyFXpQ8WyLHwHM8ovG5Vnc-BgmTr3vLgXjTA,292
26
+ cdiscbuilder-0.2.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
27
+ cdiscbuilder-0.2.8.dist-info/entry_points.txt,sha256=KsuiJD4hsfjRqDkdvWgLFPeQCz7QBl73RA665BmF0Pw,53
28
+ cdiscbuilder-0.2.8.dist-info/top_level.txt,sha256=3x5ENgnXBENgXKp5Zsht_4_OmA5wLVvDuk0UEkYOGNw,13
29
+ cdiscbuilder-0.2.8.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ cdisc-sdtm = cdiscbuilder.cli:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 Ming-Chun Chen
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1 @@
1
+ cdiscbuilder