metameq 2026.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- metameq/__init__.py +42 -0
- metameq/_version.py +21 -0
- metameq/config/__init__.py +0 -0
- metameq/config/config.yml +3 -0
- metameq/config/standards.yml +1648 -0
- metameq/src/__init__.py +0 -0
- metameq/src/__main__.py +34 -0
- metameq/src/metadata_configurator.py +512 -0
- metameq/src/metadata_extender.py +1168 -0
- metameq/src/metadata_merger.py +362 -0
- metameq/src/metadata_transformers.py +335 -0
- metameq/src/metadata_validator.py +387 -0
- metameq/src/util.py +299 -0
- metameq/tests/__init__.py +0 -0
- metameq/tests/data/invalid.yml +1 -0
- metameq/tests/data/test_config.yml +9 -0
- metameq/tests/test_metadata_configurator.py +2334 -0
- metameq/tests/test_metadata_extender.py +2610 -0
- metameq/tests/test_metadata_merger.py +657 -0
- metameq/tests/test_metadata_transformers.py +277 -0
- metameq/tests/test_metadata_validator.py +1191 -0
- metameq/tests/test_util.py +436 -0
- metameq-2026.1.1.dist-info/METADATA +21 -0
- metameq-2026.1.1.dist-info/RECORD +27 -0
- metameq-2026.1.1.dist-info/WHEEL +5 -0
- metameq-2026.1.1.dist-info/entry_points.txt +2 -0
- metameq-2026.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,362 @@
|
|
|
1
|
+
import pandas
|
|
2
|
+
from typing import List, Optional, Literal
|
|
3
|
+
from metameq.src.util import validate_required_columns_exist
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def merge_sample_and_subject_metadata(
|
|
7
|
+
sample_metadata_df: pandas.DataFrame,
|
|
8
|
+
subject_metadata_df: pandas.DataFrame,
|
|
9
|
+
merge_col_sample: str, merge_col_subject: Optional[str] = None,
|
|
10
|
+
join_type: Literal["left", "right", "inner", "outer"] = "left") -> \
|
|
11
|
+
pandas.DataFrame:
|
|
12
|
+
"""Merge sample metadata with subject metadata using a many-to-one relationship.
|
|
13
|
+
|
|
14
|
+
This is a convenience wrapper around merge_many_to_one_metadata that uses
|
|
15
|
+
standard naming conventions for sample and subject metadata.
|
|
16
|
+
|
|
17
|
+
Parameters
|
|
18
|
+
----------
|
|
19
|
+
sample_metadata_df : pandas.DataFrame
|
|
20
|
+
DataFrame containing sample metadata (the "many" side of the relationship).
|
|
21
|
+
subject_metadata_df : pandas.DataFrame
|
|
22
|
+
DataFrame containing subject metadata (the "one" side of the relationship).
|
|
23
|
+
merge_col_sample : str
|
|
24
|
+
Column name in sample_metadata_df to merge on.
|
|
25
|
+
merge_col_subject : str, optional
|
|
26
|
+
Column name in subject_metadata_df to merge on. If None, uses merge_col_sample.
|
|
27
|
+
Defaults to None.
|
|
28
|
+
join_type : {"left", "right", "inner", "outer"}, optional
|
|
29
|
+
Type of join to perform. Defaults to "left".
|
|
30
|
+
|
|
31
|
+
Returns
|
|
32
|
+
-------
|
|
33
|
+
pandas.DataFrame
|
|
34
|
+
Merged DataFrame containing combined sample and subject metadata.
|
|
35
|
+
|
|
36
|
+
Raises
|
|
37
|
+
------
|
|
38
|
+
ValueError
|
|
39
|
+
If merge columns are missing or contain invalid values.
|
|
40
|
+
If there are duplicate values in the subject merge column.
|
|
41
|
+
If there are non-merge columns with the same name in both DataFrames.
|
|
42
|
+
"""
|
|
43
|
+
result = merge_many_to_one_metadata(
|
|
44
|
+
sample_metadata_df, subject_metadata_df,
|
|
45
|
+
merge_col_sample, merge_col_subject,
|
|
46
|
+
"sample", "subject", join_type=join_type)
|
|
47
|
+
|
|
48
|
+
return result
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def merge_many_to_one_metadata(
|
|
52
|
+
many_metadata_df: pandas.DataFrame, one_metadata_df: pandas.DataFrame,
|
|
53
|
+
merge_col_many: str, merge_col_one: Optional[str] = None,
|
|
54
|
+
set_name_many: str = "many-set", set_name_one: str = "one-set",
|
|
55
|
+
join_type: Literal["left", "right", "inner", "outer"] = "left") -> \
|
|
56
|
+
pandas.DataFrame:
|
|
57
|
+
"""Merge two metadata DataFrames with a many-to-one relationship.
|
|
58
|
+
|
|
59
|
+
This function merges a DataFrame that may have multiple records per merge key
|
|
60
|
+
(many_metadata_df) with a DataFrame that must have unique merge keys
|
|
61
|
+
(one_metadata_df).
|
|
62
|
+
|
|
63
|
+
Parameters
|
|
64
|
+
----------
|
|
65
|
+
many_metadata_df : pandas.DataFrame
|
|
66
|
+
DataFrame that may have multiple records per merge key.
|
|
67
|
+
one_metadata_df : pandas.DataFrame
|
|
68
|
+
DataFrame that must have unique merge keys.
|
|
69
|
+
merge_col_many : str
|
|
70
|
+
Column name in many_metadata_df to merge on.
|
|
71
|
+
merge_col_one : str, optional
|
|
72
|
+
Column name in one_metadata_df to merge on. If None, uses merge_col_many.
|
|
73
|
+
Defaults to None.
|
|
74
|
+
set_name_many : str, optional
|
|
75
|
+
Name of the many_metadata_df set, used in error messages.
|
|
76
|
+
Defaults to "many-set".
|
|
77
|
+
set_name_one : str, optional
|
|
78
|
+
Name of the one_metadata_df set, used in error messages.
|
|
79
|
+
Defaults to "one-set".
|
|
80
|
+
join_type : {"left", "right", "inner", "outer"}, optional
|
|
81
|
+
Type of join to perform. Defaults to "left".
|
|
82
|
+
|
|
83
|
+
Returns
|
|
84
|
+
-------
|
|
85
|
+
pandas.DataFrame
|
|
86
|
+
Merged DataFrame containing combined metadata.
|
|
87
|
+
|
|
88
|
+
Raises
|
|
89
|
+
------
|
|
90
|
+
ValueError
|
|
91
|
+
If merge columns are missing or contain invalid values.
|
|
92
|
+
If there are duplicate values in the one_metadata_df merge column.
|
|
93
|
+
If there are non-merge columns with the same name in both DataFrames.
|
|
94
|
+
"""
|
|
95
|
+
merge_col_one = merge_col_many if merge_col_one is None else merge_col_one
|
|
96
|
+
|
|
97
|
+
# Note: duplicates in the many-set merge column are expected, as we expect
|
|
98
|
+
# there to possibly multiple records for the same one-set record
|
|
99
|
+
_validate_merge(many_metadata_df, one_metadata_df, merge_col_many,
|
|
100
|
+
merge_col_one, set_name_many, set_name_one,
|
|
101
|
+
check_left_for_dups=False)
|
|
102
|
+
|
|
103
|
+
# merge the sample and host dfs on the selected columns
|
|
104
|
+
merge_df = pandas.merge(many_metadata_df, one_metadata_df,
|
|
105
|
+
how=join_type, validate="many_to_one",
|
|
106
|
+
left_on=merge_col_many,
|
|
107
|
+
right_on=merge_col_one)
|
|
108
|
+
|
|
109
|
+
return merge_df
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def merge_one_to_one_metadata(
|
|
113
|
+
left_metadata_df: pandas.DataFrame,
|
|
114
|
+
right_metadata_df: pandas.DataFrame,
|
|
115
|
+
merge_col_left: str, merge_col_right: Optional[str] = None,
|
|
116
|
+
set_name_left: str = "left", set_name_right: str = "right",
|
|
117
|
+
join_type: Literal["left", "right", "inner", "outer"] = "left") -> \
|
|
118
|
+
pandas.DataFrame:
|
|
119
|
+
"""Merge two metadata DataFrames with a one-to-one relationship.
|
|
120
|
+
|
|
121
|
+
This function merges two DataFrames where each DataFrame's merge key must be unique in
|
|
122
|
+
that DataFrame.
|
|
123
|
+
|
|
124
|
+
Parameters
|
|
125
|
+
----------
|
|
126
|
+
left_metadata_df : pandas.DataFrame
|
|
127
|
+
Left DataFrame to merge.
|
|
128
|
+
right_metadata_df : pandas.DataFrame
|
|
129
|
+
Right DataFrame to merge.
|
|
130
|
+
merge_col_left : str
|
|
131
|
+
Column name in left_metadata_df to merge on.
|
|
132
|
+
merge_col_right : str, optional
|
|
133
|
+
Column name in right_metadata_df to merge on. If None, uses merge_col_left.
|
|
134
|
+
Defaults to None.
|
|
135
|
+
set_name_left : str, optional
|
|
136
|
+
Name of the left_metadata_df set, used in error messages.
|
|
137
|
+
Defaults to "left".
|
|
138
|
+
set_name_right : str, optional
|
|
139
|
+
Name of the right_metadata_df set, used in error messages.
|
|
140
|
+
Defaults to "right".
|
|
141
|
+
join_type : {"left", "right", "inner", "outer"}, optional
|
|
142
|
+
Type of join to perform. Defaults to "left".
|
|
143
|
+
|
|
144
|
+
Returns
|
|
145
|
+
-------
|
|
146
|
+
pandas.DataFrame
|
|
147
|
+
Merged DataFrame containing combined metadata.
|
|
148
|
+
|
|
149
|
+
Raises
|
|
150
|
+
------
|
|
151
|
+
ValueError
|
|
152
|
+
If merge columns are missing or contain invalid values.
|
|
153
|
+
If there are duplicate values in either merge column.
|
|
154
|
+
If there are non-merge columns with the same name in both DataFrames.
|
|
155
|
+
"""
|
|
156
|
+
merge_col_right = \
|
|
157
|
+
merge_col_left if merge_col_right is None else merge_col_right
|
|
158
|
+
|
|
159
|
+
_validate_merge(left_metadata_df, right_metadata_df, merge_col_left,
|
|
160
|
+
merge_col_right, set_name_left, set_name_right)
|
|
161
|
+
|
|
162
|
+
# merge the sample and host dfs on the selected columns
|
|
163
|
+
merge_df = pandas.merge(left_metadata_df, right_metadata_df,
|
|
164
|
+
how=join_type, validate="one_to_one",
|
|
165
|
+
left_on=merge_col_left,
|
|
166
|
+
right_on=merge_col_right)
|
|
167
|
+
|
|
168
|
+
return merge_df
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def find_common_df_cols(left_df: pandas.DataFrame,
|
|
172
|
+
right_df: pandas.DataFrame) -> List[str]:
|
|
173
|
+
"""Find column names that exist in both DataFrames.
|
|
174
|
+
|
|
175
|
+
Parameters
|
|
176
|
+
----------
|
|
177
|
+
left_df : pandas.DataFrame
|
|
178
|
+
First DataFrame to compare.
|
|
179
|
+
right_df : pandas.DataFrame
|
|
180
|
+
Second DataFrame to compare.
|
|
181
|
+
|
|
182
|
+
Returns
|
|
183
|
+
-------
|
|
184
|
+
List[str]
|
|
185
|
+
List of column names that exist in both DataFrames, sorted alphabetically.
|
|
186
|
+
"""
|
|
187
|
+
left_non_merge_cols = set(left_df.columns)
|
|
188
|
+
right_non_merge_cols = set(right_df.columns)
|
|
189
|
+
common_cols = left_non_merge_cols.intersection(right_non_merge_cols)
|
|
190
|
+
return sorted(list(common_cols))
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def find_common_col_names(left_cols, right_cols,
|
|
194
|
+
left_exclude_list: List[str] = None,
|
|
195
|
+
right_exclude_list: List[str] = None) -> List[str]:
|
|
196
|
+
"""Find column names that exist in both lists, excluding specified columns.
|
|
197
|
+
|
|
198
|
+
Parameters
|
|
199
|
+
----------
|
|
200
|
+
left_cols : List[str]
|
|
201
|
+
First list of column names to compare.
|
|
202
|
+
right_cols : List[str]
|
|
203
|
+
Second list of column names to compare.
|
|
204
|
+
left_exclude_list : List[str], optional
|
|
205
|
+
List of column names to exclude from left_cols.
|
|
206
|
+
Defaults to None.
|
|
207
|
+
right_exclude_list : List[str], optional
|
|
208
|
+
List of column names to exclude from right_cols.
|
|
209
|
+
Defaults to None.
|
|
210
|
+
|
|
211
|
+
Returns
|
|
212
|
+
-------
|
|
213
|
+
List[str]
|
|
214
|
+
List of column names that exist in both lists (after exclusions),
|
|
215
|
+
sorted alphabetically.
|
|
216
|
+
"""
|
|
217
|
+
if left_exclude_list is None:
|
|
218
|
+
left_exclude_list = []
|
|
219
|
+
if right_exclude_list is None:
|
|
220
|
+
right_exclude_list = []
|
|
221
|
+
|
|
222
|
+
left_non_merge_cols = set(left_cols) - set(left_exclude_list)
|
|
223
|
+
right_non_merge_cols = set(right_cols) - set(right_exclude_list)
|
|
224
|
+
common_cols = left_non_merge_cols.intersection(right_non_merge_cols)
|
|
225
|
+
return sorted(list(common_cols))
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _validate_merge(
|
|
229
|
+
left_df: pandas.DataFrame, right_df: pandas.DataFrame,
|
|
230
|
+
left_on: str, right_on: str, set_name_left: Optional[str] = "left",
|
|
231
|
+
set_name_right: Optional[str] = "right",
|
|
232
|
+
check_left_for_dups: bool = True, check_right_for_dups: bool = True) \
|
|
233
|
+
-> None:
|
|
234
|
+
"""Validate that two DataFrames can be merged.
|
|
235
|
+
|
|
236
|
+
Checks that:
|
|
237
|
+
1. Required merge columns exist
|
|
238
|
+
2. No NaN values are in merge columns
|
|
239
|
+
3. No duplicate values are in merge columns (if specified)
|
|
240
|
+
4. No common non-merge column names exist in both DataFrames
|
|
241
|
+
|
|
242
|
+
Parameters
|
|
243
|
+
----------
|
|
244
|
+
left_df : pandas.DataFrame
|
|
245
|
+
Left DataFrame to validate.
|
|
246
|
+
right_df : pandas.DataFrame
|
|
247
|
+
Right DataFrame to validate.
|
|
248
|
+
left_on : str
|
|
249
|
+
Column name in left_df to merge on.
|
|
250
|
+
right_on : str
|
|
251
|
+
Column name in right_df to merge on.
|
|
252
|
+
set_name_left : str, optional
|
|
253
|
+
Name of the left_df set, used in error messages.
|
|
254
|
+
Defaults to "left".
|
|
255
|
+
set_name_right : str, optional
|
|
256
|
+
Name of the right_df set, used in error messages.
|
|
257
|
+
Defaults to "right".
|
|
258
|
+
check_left_for_dups : bool, optional
|
|
259
|
+
Whether to check for duplicates in left_df merge column.
|
|
260
|
+
Defaults to True.
|
|
261
|
+
check_right_for_dups : bool, optional
|
|
262
|
+
Whether to check for duplicates in right_df merge column.
|
|
263
|
+
Defaults to True.
|
|
264
|
+
|
|
265
|
+
Raises
|
|
266
|
+
------
|
|
267
|
+
ValueError
|
|
268
|
+
If any validation checks fail.
|
|
269
|
+
"""
|
|
270
|
+
validate_required_columns_exist(
|
|
271
|
+
left_df, [left_on],
|
|
272
|
+
f"{set_name_left} metadata missing merge column")
|
|
273
|
+
validate_required_columns_exist(
|
|
274
|
+
right_df, [right_on],
|
|
275
|
+
f"{set_name_right} metadata missing merge column")
|
|
276
|
+
|
|
277
|
+
error_msgs = []
|
|
278
|
+
# check for nans in the merge columns
|
|
279
|
+
error_msgs.extend(_check_for_nans(
|
|
280
|
+
left_df, set_name_left, left_on))
|
|
281
|
+
error_msgs.extend(_check_for_nans(
|
|
282
|
+
right_df, set_name_right, right_on))
|
|
283
|
+
|
|
284
|
+
# check for duplicates
|
|
285
|
+
if check_left_for_dups:
|
|
286
|
+
error_msgs.extend(_check_for_duplicate_field_vals(
|
|
287
|
+
left_df, set_name_left, left_on))
|
|
288
|
+
if check_right_for_dups:
|
|
289
|
+
error_msgs.extend(_check_for_duplicate_field_vals(
|
|
290
|
+
right_df, set_name_right, right_on))
|
|
291
|
+
|
|
292
|
+
# check for non-merge columns with the same name in both dataframes
|
|
293
|
+
common_cols = find_common_col_names(
|
|
294
|
+
left_df.columns, right_df.columns, [left_on], [right_on])
|
|
295
|
+
if common_cols:
|
|
296
|
+
error_msgs.append(
|
|
297
|
+
f"Both {set_name_left} and {set_name_right} metadata have "
|
|
298
|
+
f"non-merge columns with the following names: {common_cols}")
|
|
299
|
+
|
|
300
|
+
if error_msgs:
|
|
301
|
+
joined_msgs = "\n".join(error_msgs)
|
|
302
|
+
raise ValueError(f"Errors in metadata to merge:\n{joined_msgs}")
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
def _check_for_duplicate_field_vals(
|
|
306
|
+
metadata_df: pandas.DataFrame, df_name: str,
|
|
307
|
+
col_name: str) -> List[str]:
|
|
308
|
+
"""Check for duplicate values in a DataFrame column.
|
|
309
|
+
|
|
310
|
+
Parameters
|
|
311
|
+
----------
|
|
312
|
+
metadata_df : pandas.DataFrame
|
|
313
|
+
DataFrame to check for duplicates.
|
|
314
|
+
df_name : str
|
|
315
|
+
Name of the DataFrame, used in error messages.
|
|
316
|
+
col_name : str
|
|
317
|
+
Name of the column to check for duplicates.
|
|
318
|
+
|
|
319
|
+
Returns
|
|
320
|
+
-------
|
|
321
|
+
List[str]
|
|
322
|
+
List of error messages for any duplicates found.
|
|
323
|
+
Empty list if no duplicates found.
|
|
324
|
+
"""
|
|
325
|
+
error_msgs = []
|
|
326
|
+
duplicates_mask = metadata_df.duplicated(subset=col_name)
|
|
327
|
+
if duplicates_mask.any():
|
|
328
|
+
duplicates = metadata_df.loc[duplicates_mask, col_name].unique()
|
|
329
|
+
duplicates.sort()
|
|
330
|
+
|
|
331
|
+
# generate an error message including the duplicate values
|
|
332
|
+
error_msgs.append(
|
|
333
|
+
f"'{df_name}' metadata has duplicates of the following values "
|
|
334
|
+
f"in column '{col_name}': {duplicates}")
|
|
335
|
+
return error_msgs
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
def _check_for_nans(metadata_df: pandas.DataFrame,
|
|
339
|
+
df_name: str, col_name: str) -> List[str]:
|
|
340
|
+
"""Check for NaN values in a DataFrame column.
|
|
341
|
+
|
|
342
|
+
Parameters
|
|
343
|
+
----------
|
|
344
|
+
metadata_df : pandas.DataFrame
|
|
345
|
+
DataFrame to check for NaNs.
|
|
346
|
+
df_name : str
|
|
347
|
+
Name of the DataFrame, used in error messages.
|
|
348
|
+
col_name : str
|
|
349
|
+
Name of the column to check for NaNs.
|
|
350
|
+
|
|
351
|
+
Returns
|
|
352
|
+
-------
|
|
353
|
+
List[str]
|
|
354
|
+
List of error messages for any NaNs found.
|
|
355
|
+
Empty list if no NaNs found.
|
|
356
|
+
"""
|
|
357
|
+
error_msgs = []
|
|
358
|
+
nans_mask = metadata_df[col_name].isna()
|
|
359
|
+
if nans_mask.any():
|
|
360
|
+
error_msgs.append(
|
|
361
|
+
f"'{df_name}' metadata has NaNs in column '{col_name}'")
|
|
362
|
+
return error_msgs
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
import pandas
|
|
2
|
+
from dateutil import parser
|
|
3
|
+
from typing import Any, Dict, List, Union
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
# individual transformer functions
|
|
8
|
+
def pass_through(row: pandas.Series, source_fields: List[str]) -> Any:
|
|
9
|
+
"""Pass through a value from a source field without transformation.
|
|
10
|
+
|
|
11
|
+
Parameters
|
|
12
|
+
----------
|
|
13
|
+
row : pandas.Series
|
|
14
|
+
Row of data containing the source field.
|
|
15
|
+
source_fields : List[str]
|
|
16
|
+
List containing exactly one source field name.
|
|
17
|
+
|
|
18
|
+
Returns
|
|
19
|
+
-------
|
|
20
|
+
Any
|
|
21
|
+
The value from the source field.
|
|
22
|
+
|
|
23
|
+
Raises
|
|
24
|
+
------
|
|
25
|
+
ValueError
|
|
26
|
+
If source_fields does not contain exactly one field name.
|
|
27
|
+
"""
|
|
28
|
+
return _get_one_source_field(row, source_fields, "pass_through")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def transform_input_sex_to_std_sex(row: pandas.Series, source_fields: List[str]) -> str:
|
|
32
|
+
"""Transform input sex value to standardized sex value.
|
|
33
|
+
|
|
34
|
+
Parameters
|
|
35
|
+
----------
|
|
36
|
+
row : pandas.Series
|
|
37
|
+
Row of data containing the source field.
|
|
38
|
+
source_fields : List[str]
|
|
39
|
+
List containing exactly one source field name.
|
|
40
|
+
|
|
41
|
+
Returns
|
|
42
|
+
-------
|
|
43
|
+
str
|
|
44
|
+
Standardized sex value: 'female', 'male', 'intersex', or 'not provided'.
|
|
45
|
+
|
|
46
|
+
Raises
|
|
47
|
+
------
|
|
48
|
+
ValueError
|
|
49
|
+
If source_fields does not contain exactly one field name.
|
|
50
|
+
If the input sex value is not recognized.
|
|
51
|
+
"""
|
|
52
|
+
x = _get_one_source_field(
|
|
53
|
+
row, source_fields, "standardize_input_sex")
|
|
54
|
+
|
|
55
|
+
return standardize_input_sex(x)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def transform_age_to_life_stage(row: pandas.Series, source_fields: List[str]) -> str:
|
|
59
|
+
"""Transform age in years to life stage category.
|
|
60
|
+
|
|
61
|
+
Note: Input age is assumed to be in years. Because of this, this function
|
|
62
|
+
does NOT attempt to identify neonates--children aged 0-6 *weeks*. All
|
|
63
|
+
ages under 17 are considered "child".
|
|
64
|
+
|
|
65
|
+
Parameters
|
|
66
|
+
----------
|
|
67
|
+
row : pandas.Series
|
|
68
|
+
Row of data containing the source field.
|
|
69
|
+
source_fields : List[str]
|
|
70
|
+
List containing exactly one source field name.
|
|
71
|
+
|
|
72
|
+
Returns
|
|
73
|
+
-------
|
|
74
|
+
str
|
|
75
|
+
Life stage category: 'child' for ages < 17, 'adult' for ages >= 17.
|
|
76
|
+
|
|
77
|
+
Raises
|
|
78
|
+
------
|
|
79
|
+
ValueError
|
|
80
|
+
If source_fields does not contain exactly one field name.
|
|
81
|
+
If the age value is not convertable to an integer.
|
|
82
|
+
"""
|
|
83
|
+
x = _get_one_source_field(
|
|
84
|
+
row, source_fields, "transform_age_to_life_stage")
|
|
85
|
+
return set_life_stage_from_age_yrs(x, source_fields[0])
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
def transform_date_to_formatted_date(row: pandas.Series, source_fields: List[str]) -> str:
|
|
89
|
+
"""Transform date to standardized format (YYYY-MM-DD HH:MM).
|
|
90
|
+
|
|
91
|
+
Parameters
|
|
92
|
+
----------
|
|
93
|
+
row : pandas.Series
|
|
94
|
+
Row of data containing the source field.
|
|
95
|
+
source_fields : List[str]
|
|
96
|
+
List containing exactly one source field name.
|
|
97
|
+
|
|
98
|
+
Returns
|
|
99
|
+
-------
|
|
100
|
+
str
|
|
101
|
+
Formatted date string in YYYY-MM-DD HH:MM format.
|
|
102
|
+
|
|
103
|
+
Raises
|
|
104
|
+
------
|
|
105
|
+
ValueError
|
|
106
|
+
If source_fields does not contain exactly one field name.
|
|
107
|
+
If the source field cannot be parsed as a date.
|
|
108
|
+
"""
|
|
109
|
+
x = _get_one_source_field(
|
|
110
|
+
row, source_fields, "transform_date_to_formatted_date")
|
|
111
|
+
return format_a_datetime(x, source_fields[0])
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def help_transform_mapping(
|
|
115
|
+
row: pandas.Series,
|
|
116
|
+
source_fields: List[str],
|
|
117
|
+
mapping: Dict[str, Any],
|
|
118
|
+
field_name: str = "help_transform_mapping") -> Any:
|
|
119
|
+
"""Transform a value using a provided mapping dictionary.
|
|
120
|
+
|
|
121
|
+
Parameters
|
|
122
|
+
----------
|
|
123
|
+
row : pandas.Series
|
|
124
|
+
Row of data containing the source field.
|
|
125
|
+
source_fields : List[str]
|
|
126
|
+
List containing exactly one source field name.
|
|
127
|
+
mapping : Dict[str, Any]
|
|
128
|
+
Dictionary mapping input values to output values.
|
|
129
|
+
field_name : str, optional
|
|
130
|
+
Name of the field being transformed, used in error messages.
|
|
131
|
+
Defaults to "help_transform_mapping".
|
|
132
|
+
|
|
133
|
+
Returns
|
|
134
|
+
-------
|
|
135
|
+
Any
|
|
136
|
+
The mapped value from the mapping dictionary.
|
|
137
|
+
|
|
138
|
+
Raises
|
|
139
|
+
------
|
|
140
|
+
ValueError
|
|
141
|
+
If source_fields does not contain exactly one field name.
|
|
142
|
+
If the input value is not found in the mapping dictionary.
|
|
143
|
+
"""
|
|
144
|
+
x = _get_one_source_field(
|
|
145
|
+
row, source_fields, field_name)
|
|
146
|
+
|
|
147
|
+
return _help_transform_mapping(x, mapping, field_name)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
# helper functions
|
|
151
|
+
def standardize_input_sex(input_val: str) -> str:
|
|
152
|
+
"""Standardize sex input to Qiita standard values.
|
|
153
|
+
|
|
154
|
+
Parameters
|
|
155
|
+
----------
|
|
156
|
+
input_val : str
|
|
157
|
+
Input sex value to standardize.
|
|
158
|
+
|
|
159
|
+
Returns
|
|
160
|
+
-------
|
|
161
|
+
str
|
|
162
|
+
Standardized sex value: 'female', 'male', 'intersex', or 'not provided'.
|
|
163
|
+
|
|
164
|
+
Raises
|
|
165
|
+
------
|
|
166
|
+
ValueError
|
|
167
|
+
If the input sex value is not recognized.
|
|
168
|
+
"""
|
|
169
|
+
qiita_standard_female = "female"
|
|
170
|
+
qiita_standard_male = "male"
|
|
171
|
+
qiita_standard_intersex = "intersex"
|
|
172
|
+
|
|
173
|
+
sex_mapping = {
|
|
174
|
+
"female": qiita_standard_female,
|
|
175
|
+
"f": qiita_standard_female,
|
|
176
|
+
"male": qiita_standard_male,
|
|
177
|
+
"m": qiita_standard_male,
|
|
178
|
+
"intersex": qiita_standard_intersex,
|
|
179
|
+
"prefernottoanswer": "not provided"
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
standardized_sex = _help_transform_mapping(
|
|
183
|
+
input_val, sex_mapping, "sex", make_lower=True)
|
|
184
|
+
return standardized_sex
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def set_life_stage_from_age_yrs(age_in_yrs: Union[float, int], source_name: str = "input") -> str:
|
|
188
|
+
"""Convert age in years to life stage category.
|
|
189
|
+
|
|
190
|
+
Note: Input age is assumed to be in years. Because of this, this function
|
|
191
|
+
does NOT attempt to identify neonates--children aged 0-6 *weeks*. All
|
|
192
|
+
ages under 17 are considered "child".
|
|
193
|
+
|
|
194
|
+
Parameters
|
|
195
|
+
----------
|
|
196
|
+
age_in_yrs : Union[float, int]
|
|
197
|
+
Age in years.
|
|
198
|
+
source_name : str, optional
|
|
199
|
+
Name of the source field, used in error messages.
|
|
200
|
+
Defaults to "input".
|
|
201
|
+
|
|
202
|
+
Returns
|
|
203
|
+
-------
|
|
204
|
+
str
|
|
205
|
+
Life stage category: 'child' for ages < 17, 'adult' for ages >= 17.
|
|
206
|
+
|
|
207
|
+
Raises
|
|
208
|
+
------
|
|
209
|
+
ValueError
|
|
210
|
+
If age_in_yrs is not null or convertable to an integer.
|
|
211
|
+
"""
|
|
212
|
+
if pandas.isnull(age_in_yrs):
|
|
213
|
+
return age_in_yrs
|
|
214
|
+
|
|
215
|
+
try:
|
|
216
|
+
x = int(age_in_yrs)
|
|
217
|
+
except ValueError:
|
|
218
|
+
raise ValueError(f"{source_name} must be an integer")
|
|
219
|
+
|
|
220
|
+
if x < 17:
|
|
221
|
+
return "child"
|
|
222
|
+
return "adult"
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def format_a_datetime(x: Union[str, datetime, None], source_name: str = "input") -> str:
|
|
226
|
+
"""Format a datetime value to YYYY-MM-DD HH:MM string format.
|
|
227
|
+
|
|
228
|
+
Parameters
|
|
229
|
+
----------
|
|
230
|
+
x : Union[str, datetime, None]
|
|
231
|
+
Input datetime value to format.
|
|
232
|
+
source_name : str, optional
|
|
233
|
+
Name of the source field, used in error messages.
|
|
234
|
+
Defaults to "input".
|
|
235
|
+
|
|
236
|
+
Returns
|
|
237
|
+
-------
|
|
238
|
+
str
|
|
239
|
+
Formatted datetime string in YYYY-MM-DD HH:MM format.
|
|
240
|
+
|
|
241
|
+
Raises
|
|
242
|
+
------
|
|
243
|
+
ValueError
|
|
244
|
+
If the input cannot be parsed as a datetime.
|
|
245
|
+
"""
|
|
246
|
+
if pandas.isnull(x):
|
|
247
|
+
return x
|
|
248
|
+
if hasattr(x, "strftime"):
|
|
249
|
+
strftimeable_x = x
|
|
250
|
+
else:
|
|
251
|
+
try:
|
|
252
|
+
strftimeable_x = parser.parse(x)
|
|
253
|
+
except: # noqa: E722
|
|
254
|
+
raise ValueError(f"{source_name} cannot be parsed to a date")
|
|
255
|
+
|
|
256
|
+
formatted_x = strftimeable_x.strftime('%Y-%m-%d %H:%M')
|
|
257
|
+
return formatted_x
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _get_one_source_field(row: pandas.Series, source_fields: List[str], func_name: str) -> Any:
|
|
261
|
+
"""Get a single source field value from a row of data.
|
|
262
|
+
|
|
263
|
+
Parameters
|
|
264
|
+
----------
|
|
265
|
+
row : pandas.Series
|
|
266
|
+
Row of data containing the source field.
|
|
267
|
+
source_fields : List[str]
|
|
268
|
+
List of source field names.
|
|
269
|
+
func_name : str
|
|
270
|
+
Name of the calling function, used in error messages.
|
|
271
|
+
|
|
272
|
+
Returns
|
|
273
|
+
-------
|
|
274
|
+
Any
|
|
275
|
+
The value from the source field.
|
|
276
|
+
|
|
277
|
+
Raises
|
|
278
|
+
------
|
|
279
|
+
ValueError
|
|
280
|
+
If source_fields does not contain exactly one field name.
|
|
281
|
+
"""
|
|
282
|
+
if len(source_fields) != 1:
|
|
283
|
+
raise ValueError(f"{func_name} requires exactly one source field")
|
|
284
|
+
return row[source_fields[0]]
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def _help_transform_mapping(
|
|
288
|
+
input_val: Any,
|
|
289
|
+
mapping: Dict[str, Any],
|
|
290
|
+
field_name: str = "value",
|
|
291
|
+
make_lower: bool = False) -> Any:
|
|
292
|
+
"""Transform a value using a mapping dictionary.
|
|
293
|
+
|
|
294
|
+
Parameters
|
|
295
|
+
----------
|
|
296
|
+
input_val : Any
|
|
297
|
+
Input value to transform.
|
|
298
|
+
mapping : Dict[str, Any]
|
|
299
|
+
Dictionary mapping input values to output values.
|
|
300
|
+
field_name : str, optional
|
|
301
|
+
Name of the field being transformed, used in error messages.
|
|
302
|
+
Defaults to "value".
|
|
303
|
+
make_lower : bool, optional
|
|
304
|
+
Whether to convert input to lowercase before mapping.
|
|
305
|
+
Defaults to False.
|
|
306
|
+
|
|
307
|
+
Returns
|
|
308
|
+
-------
|
|
309
|
+
Any
|
|
310
|
+
The mapped value from the mapping dictionary.
|
|
311
|
+
|
|
312
|
+
Raises
|
|
313
|
+
------
|
|
314
|
+
ValueError
|
|
315
|
+
If the input value is not found in the mapping dictionary.
|
|
316
|
+
"""
|
|
317
|
+
if pandas.isnull(input_val):
|
|
318
|
+
return input_val
|
|
319
|
+
|
|
320
|
+
if make_lower:
|
|
321
|
+
input_val = input_val.lower()
|
|
322
|
+
|
|
323
|
+
if input_val in mapping:
|
|
324
|
+
return mapping[input_val]
|
|
325
|
+
raise ValueError(f"Unrecognized {field_name}: {input_val}")
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
# def _format_field_val(row, source_fields, field_type, format_string):
|
|
329
|
+
# x = _get_one_source_field(row, source_fields, "format_field_val")
|
|
330
|
+
# result = x
|
|
331
|
+
# # format string should be something like '{0:g}' or '{0:.2f}'
|
|
332
|
+
# # field type should be something like float or int
|
|
333
|
+
# if isinstance(x, field_type) and not pandas.isnull(x):
|
|
334
|
+
# result = format_string.format(x)
|
|
335
|
+
# return result
|