napistu 0.3.7__py3-none-any.whl → 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napistu/__main__.py +8 -4
- napistu/constants.py +30 -35
- napistu/gcs/constants.py +11 -11
- napistu/ingestion/napistu_edgelist.py +4 -4
- napistu/matching/interactions.py +41 -39
- napistu/modify/gaps.py +2 -1
- napistu/network/constants.py +61 -45
- napistu/network/data_handling.py +1 -1
- napistu/network/neighborhoods.py +3 -3
- napistu/network/net_create.py +440 -616
- napistu/network/net_create_utils.py +734 -0
- napistu/network/net_propagation.py +1 -1
- napistu/network/{napistu_graph_core.py → ng_core.py} +57 -15
- napistu/network/ng_utils.py +28 -21
- napistu/network/paths.py +4 -4
- napistu/network/precompute.py +35 -74
- napistu/ontologies/id_tables.py +282 -0
- napistu/sbml_dfs_core.py +53 -63
- napistu/sbml_dfs_utils.py +126 -16
- napistu/utils.py +80 -5
- {napistu-0.3.7.dist-info → napistu-0.4.1.dist-info}/METADATA +7 -2
- {napistu-0.3.7.dist-info → napistu-0.4.1.dist-info}/RECORD +39 -34
- tests/conftest.py +102 -1
- tests/test_network_data_handling.py +5 -2
- tests/test_network_net_create.py +92 -201
- tests/test_network_net_create_utils.py +538 -0
- tests/test_network_ng_core.py +19 -0
- tests/test_network_ng_utils.py +1 -1
- tests/test_network_precompute.py +4 -3
- tests/test_ontologies_id_tables.py +198 -0
- tests/test_rpy2_callr.py +0 -1
- tests/test_rpy2_init.py +0 -1
- tests/test_sbml_dfs_core.py +30 -19
- tests/test_sbml_dfs_utils.py +115 -0
- tests/test_utils.py +26 -2
- {napistu-0.3.7.dist-info → napistu-0.4.1.dist-info}/WHEEL +0 -0
- {napistu-0.3.7.dist-info → napistu-0.4.1.dist-info}/entry_points.txt +0 -0
- {napistu-0.3.7.dist-info → napistu-0.4.1.dist-info}/licenses/LICENSE +0 -0
- {napistu-0.3.7.dist-info → napistu-0.4.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,282 @@
|
|
1
|
+
import logging
|
2
|
+
from typing import Optional, Union, Set
|
3
|
+
|
4
|
+
import pandas as pd
|
5
|
+
|
6
|
+
from napistu import sbml_dfs_utils
|
7
|
+
from napistu.constants import (
|
8
|
+
BQB,
|
9
|
+
BQB_DEFINING_ATTRS_LOOSE,
|
10
|
+
IDENTIFIERS,
|
11
|
+
SBML_DFS_SCHEMA,
|
12
|
+
SCHEMA_DEFS,
|
13
|
+
VALID_BQB_TERMS,
|
14
|
+
)
|
15
|
+
from napistu import utils
|
16
|
+
|
17
|
+
logger = logging.getLogger(__name__)
|
18
|
+
|
19
|
+
|
20
|
+
def filter_id_table(
|
21
|
+
id_table: pd.DataFrame,
|
22
|
+
identifiers: Optional[Union[str, list, set]] = None,
|
23
|
+
ontologies: Optional[Union[str, list, set]] = None,
|
24
|
+
bqbs: Optional[Union[str, list, set]] = BQB_DEFINING_ATTRS_LOOSE + [BQB.HAS_PART],
|
25
|
+
) -> pd.DataFrame:
|
26
|
+
"""
|
27
|
+
Filter an identifier table by identifiers, ontologies, and BQB terms for a given entity type.
|
28
|
+
|
29
|
+
Parameters
|
30
|
+
----------
|
31
|
+
id_table : pd.DataFrame
|
32
|
+
DataFrame containing identifier mappings to be filtered.
|
33
|
+
identifiers : str, list, set, or None, optional
|
34
|
+
Identifiers to filter by. If None, no filtering is applied on identifiers.
|
35
|
+
ontologies : str, list, set, or None, optional
|
36
|
+
Ontologies to filter by. If None, no filtering is applied on ontologies.
|
37
|
+
bqbs : str, list, set, or None, optional
|
38
|
+
BQB terms to filter by. If None, no filtering is applied on BQB terms. Default is [BQB.IS, BQB.HAS_PART].
|
39
|
+
|
40
|
+
Returns
|
41
|
+
-------
|
42
|
+
pd.DataFrame
|
43
|
+
Filtered DataFrame containing only rows matching the specified criteria.
|
44
|
+
|
45
|
+
Raises
|
46
|
+
------
|
47
|
+
ValueError
|
48
|
+
If the id_table or filter values are invalid, or required columns are missing.
|
49
|
+
"""
|
50
|
+
|
51
|
+
entity_type = sbml_dfs_utils.infer_entity_type(id_table)
|
52
|
+
_validate_id_table(id_table, entity_type)
|
53
|
+
|
54
|
+
# bqbs
|
55
|
+
if bqbs is not None:
|
56
|
+
bqbs = _sanitize_id_table_bqbs(bqbs, id_table)
|
57
|
+
id_table = id_table.query("bqb in @bqbs")
|
58
|
+
|
59
|
+
# ontologies
|
60
|
+
if ontologies is not None:
|
61
|
+
ontologies = _sanitize_id_table_ontologies(ontologies, id_table)
|
62
|
+
id_table = id_table.query("ontology in @ontologies")
|
63
|
+
|
64
|
+
# identifiers
|
65
|
+
if identifiers is not None:
|
66
|
+
identifiers = _sanitize_id_table_identifiers(identifiers, id_table)
|
67
|
+
id_table = id_table.query("identifier in @identifiers")
|
68
|
+
|
69
|
+
# return the filtered id_table
|
70
|
+
return id_table
|
71
|
+
|
72
|
+
|
73
|
+
def _validate_id_table(id_table: pd.DataFrame, entity_type: str) -> None:
|
74
|
+
"""
|
75
|
+
Validate that the id_table contains the required columns and matches the schema for the given entity_type.
|
76
|
+
|
77
|
+
Parameters
|
78
|
+
----------
|
79
|
+
id_table : pd.DataFrame
|
80
|
+
DataFrame containing identifier mappings for a given entity type.
|
81
|
+
entity_type : str
|
82
|
+
The type of entity (e.g., 'species', 'reactions') to validate against the schema.
|
83
|
+
|
84
|
+
Returns
|
85
|
+
-------
|
86
|
+
None
|
87
|
+
|
88
|
+
Raises
|
89
|
+
------
|
90
|
+
ValueError
|
91
|
+
If entity_type is not present in the schema, or if required columns are missing in id_table.
|
92
|
+
"""
|
93
|
+
|
94
|
+
schema = SBML_DFS_SCHEMA.SCHEMA
|
95
|
+
|
96
|
+
if entity_type not in schema.keys():
|
97
|
+
raise ValueError(
|
98
|
+
f"{entity_type} does not match a table in the SBML_dfs object. The tables "
|
99
|
+
f"which are present are {', '.join(schema.keys())}"
|
100
|
+
)
|
101
|
+
|
102
|
+
entity_table_attrs = schema[entity_type]
|
103
|
+
|
104
|
+
if SCHEMA_DEFS.ID not in entity_table_attrs.keys():
|
105
|
+
raise ValueError(f"{entity_type} does not have an 'id' attribute")
|
106
|
+
|
107
|
+
entity_pk = entity_table_attrs[SCHEMA_DEFS.PK]
|
108
|
+
|
109
|
+
utils.match_pd_vars(
|
110
|
+
id_table,
|
111
|
+
req_vars={
|
112
|
+
entity_pk,
|
113
|
+
IDENTIFIERS.ONTOLOGY,
|
114
|
+
IDENTIFIERS.IDENTIFIER,
|
115
|
+
IDENTIFIERS.URL,
|
116
|
+
IDENTIFIERS.BQB,
|
117
|
+
},
|
118
|
+
allow_series=False,
|
119
|
+
).assert_present()
|
120
|
+
|
121
|
+
return None
|
122
|
+
|
123
|
+
|
124
|
+
def _sanitize_id_table_values(
|
125
|
+
values: Union[str, list, set],
|
126
|
+
id_table: pd.DataFrame,
|
127
|
+
column_name: str,
|
128
|
+
valid_values: Optional[Set[str]] = None,
|
129
|
+
value_type_name: str = None,
|
130
|
+
) -> set:
|
131
|
+
"""
|
132
|
+
Generic function to sanitize and validate values against an id_table column.
|
133
|
+
|
134
|
+
Parameters
|
135
|
+
----------
|
136
|
+
values : str, list, or set
|
137
|
+
Values to sanitize and validate. Can be a single string, list of strings,
|
138
|
+
or set of strings.
|
139
|
+
id_table : pd.DataFrame
|
140
|
+
DataFrame containing the reference data to validate against.
|
141
|
+
column_name : str
|
142
|
+
Name of the column in id_table to check values against.
|
143
|
+
valid_values : set of str, optional
|
144
|
+
Optional set of globally valid values for additional validation
|
145
|
+
(e.g., VALID_BQB_TERMS). If provided, values must be a subset of this set.
|
146
|
+
value_type_name : str, optional
|
147
|
+
Human-readable name for the value type used in error messages.
|
148
|
+
If None, defaults to column_name.
|
149
|
+
|
150
|
+
Returns
|
151
|
+
-------
|
152
|
+
set
|
153
|
+
Set of sanitized and validated values.
|
154
|
+
|
155
|
+
Raises
|
156
|
+
------
|
157
|
+
ValueError
|
158
|
+
If values is not a string, list, or set.
|
159
|
+
If any values are not in valid_values (when provided).
|
160
|
+
If none of the requested values are present in the id_table.
|
161
|
+
|
162
|
+
Warnings
|
163
|
+
--------
|
164
|
+
Logs a warning if some (but not all) requested values are missing from id_table.
|
165
|
+
"""
|
166
|
+
if value_type_name is None:
|
167
|
+
value_type_name = column_name
|
168
|
+
|
169
|
+
# Convert to set
|
170
|
+
if isinstance(values, str):
|
171
|
+
values = {values}
|
172
|
+
elif isinstance(values, list):
|
173
|
+
values = set(values)
|
174
|
+
elif isinstance(values, set):
|
175
|
+
pass
|
176
|
+
else:
|
177
|
+
raise ValueError(
|
178
|
+
f"{value_type_name} must be a string, a set, or list, got {type(values).__name__}"
|
179
|
+
)
|
180
|
+
|
181
|
+
# Check against global valid values if provided
|
182
|
+
if valid_values is not None:
|
183
|
+
invalid_values = values.difference(valid_values)
|
184
|
+
if len(invalid_values) > 0:
|
185
|
+
raise ValueError(
|
186
|
+
f"The following {value_type_name} are not valid: {', '.join(invalid_values)}.\n"
|
187
|
+
f"Valid {value_type_name} are {', '.join(valid_values)}"
|
188
|
+
)
|
189
|
+
|
190
|
+
# Check against values present in the id_table
|
191
|
+
available_values = set(id_table[column_name].unique())
|
192
|
+
missing_values = values.difference(available_values)
|
193
|
+
|
194
|
+
if len(missing_values) == len(values):
|
195
|
+
raise ValueError(
|
196
|
+
f"None of the requested {value_type_name} are present in the id_table: {', '.join(missing_values)}.\n"
|
197
|
+
f"The included {value_type_name} are {', '.join(available_values)}"
|
198
|
+
)
|
199
|
+
elif len(missing_values) > 0:
|
200
|
+
logger.warning(
|
201
|
+
f"The following {value_type_name} are not present in the id_table: {', '.join(missing_values)}.\n"
|
202
|
+
f"The included {value_type_name} are {', '.join(available_values)}"
|
203
|
+
)
|
204
|
+
|
205
|
+
return values
|
206
|
+
|
207
|
+
|
208
|
+
def _sanitize_id_table_ontologies(
|
209
|
+
ontologies: Union[str, list, set], id_table: pd.DataFrame
|
210
|
+
) -> set:
|
211
|
+
"""
|
212
|
+
Sanitize and validate ontologies against the id_table.
|
213
|
+
|
214
|
+
Parameters
|
215
|
+
----------
|
216
|
+
ontologies : str, list, or set
|
217
|
+
Ontology names to validate.
|
218
|
+
id_table : pd.DataFrame
|
219
|
+
DataFrame containing ontology reference data.
|
220
|
+
|
221
|
+
Returns
|
222
|
+
-------
|
223
|
+
set
|
224
|
+
Set of validated ontology names.
|
225
|
+
"""
|
226
|
+
return _sanitize_id_table_values(
|
227
|
+
values=ontologies,
|
228
|
+
id_table=id_table,
|
229
|
+
column_name=IDENTIFIERS.ONTOLOGY,
|
230
|
+
value_type_name="ontologies",
|
231
|
+
)
|
232
|
+
|
233
|
+
|
234
|
+
def _sanitize_id_table_bqbs(bqbs: Union[str, list, set], id_table: pd.DataFrame) -> set:
|
235
|
+
"""
|
236
|
+
Sanitize and validate BQBs against the id_table.
|
237
|
+
|
238
|
+
Parameters
|
239
|
+
----------
|
240
|
+
bqbs : str, list, or set
|
241
|
+
BQB terms to validate.
|
242
|
+
id_table : pd.DataFrame
|
243
|
+
DataFrame containing BQB reference data.
|
244
|
+
|
245
|
+
Returns
|
246
|
+
-------
|
247
|
+
set
|
248
|
+
Set of validated BQB terms.
|
249
|
+
"""
|
250
|
+
return _sanitize_id_table_values(
|
251
|
+
values=bqbs,
|
252
|
+
id_table=id_table,
|
253
|
+
column_name=IDENTIFIERS.BQB,
|
254
|
+
valid_values=VALID_BQB_TERMS,
|
255
|
+
value_type_name="bqbs",
|
256
|
+
)
|
257
|
+
|
258
|
+
|
259
|
+
def _sanitize_id_table_identifiers(
|
260
|
+
identifiers: Union[str, list, set], id_table: pd.DataFrame
|
261
|
+
) -> set:
|
262
|
+
"""
|
263
|
+
Sanitize and validate identifiers against the id_table.
|
264
|
+
|
265
|
+
Parameters
|
266
|
+
----------
|
267
|
+
identifiers : str, list, or set
|
268
|
+
Identifier values to validate.
|
269
|
+
id_table : pd.DataFrame
|
270
|
+
DataFrame containing identifier reference data.
|
271
|
+
|
272
|
+
Returns
|
273
|
+
-------
|
274
|
+
set
|
275
|
+
Set of validated identifiers.
|
276
|
+
"""
|
277
|
+
return _sanitize_id_table_values(
|
278
|
+
values=identifiers,
|
279
|
+
id_table=id_table,
|
280
|
+
column_name=IDENTIFIERS.IDENTIFIER,
|
281
|
+
value_type_name="identifiers",
|
282
|
+
)
|
napistu/sbml_dfs_core.py
CHANGED
@@ -19,17 +19,23 @@ from napistu import sbml_dfs_utils
|
|
19
19
|
from napistu import source
|
20
20
|
from napistu import utils
|
21
21
|
from napistu.ingestion import sbml
|
22
|
-
from napistu.
|
23
|
-
from napistu.constants import
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
22
|
+
from napistu.ontologies import id_tables
|
23
|
+
from napistu.constants import (
|
24
|
+
BQB,
|
25
|
+
BQB_DEFINING_ATTRS_LOOSE,
|
26
|
+
BQB_PRIORITIES,
|
27
|
+
ENTITIES_W_DATA,
|
28
|
+
ENTITIES_TO_ENTITY_DATA,
|
29
|
+
IDENTIFIERS,
|
30
|
+
MINI_SBO_FROM_NAME,
|
31
|
+
MINI_SBO_TO_NAME,
|
32
|
+
NAPISTU_STANDARD_OUTPUTS,
|
33
|
+
ONTOLOGY_PRIORITIES,
|
34
|
+
SBML_DFS,
|
35
|
+
SBML_DFS_SCHEMA,
|
36
|
+
SBOTERM_NAMES,
|
37
|
+
SCHEMA_DEFS,
|
38
|
+
)
|
33
39
|
|
34
40
|
logger = logging.getLogger(__name__)
|
35
41
|
|
@@ -101,7 +107,7 @@ class SBML_dfs:
|
|
101
107
|
Remove a reactions data table by label.
|
102
108
|
remove_species_data(label)
|
103
109
|
Remove a species data table by label.
|
104
|
-
search_by_ids(
|
110
|
+
search_by_ids(id_table, identifiers=None, ontologies=None, bqbs=None)
|
105
111
|
Find entities and identifiers matching a set of query IDs.
|
106
112
|
search_by_name(name, entity_type, partial_match=True)
|
107
113
|
Find entities by exact or partial name match.
|
@@ -455,12 +461,12 @@ class SBML_dfs:
|
|
455
461
|
ValueError
|
456
462
|
If id_type is invalid or identifiers are malformed
|
457
463
|
"""
|
458
|
-
selected_table = self.get_table(id_type, {
|
464
|
+
selected_table = self.get_table(id_type, {SCHEMA_DEFS.ID})
|
459
465
|
schema = SBML_DFS_SCHEMA.SCHEMA
|
460
466
|
|
461
467
|
identifiers_dict = dict()
|
462
468
|
for sysid in selected_table.index:
|
463
|
-
id_entry = selected_table[schema[id_type][
|
469
|
+
id_entry = selected_table[schema[id_type][SCHEMA_DEFS.ID]][sysid]
|
464
470
|
|
465
471
|
if isinstance(id_entry, identifiers.Identifiers):
|
466
472
|
identifiers_dict[sysid] = pd.DataFrame(id_entry.ids)
|
@@ -473,16 +479,16 @@ class SBML_dfs:
|
|
473
479
|
)
|
474
480
|
if not identifiers_dict:
|
475
481
|
# Return empty DataFrame with expected columns if nothing found
|
476
|
-
return pd.DataFrame(columns=[schema[id_type][
|
482
|
+
return pd.DataFrame(columns=[schema[id_type][SCHEMA_DEFS.PK], "entry"])
|
477
483
|
|
478
484
|
identifiers_tbl = pd.concat(identifiers_dict)
|
479
485
|
|
480
|
-
identifiers_tbl.index.names = [schema[id_type][
|
486
|
+
identifiers_tbl.index.names = [schema[id_type][SCHEMA_DEFS.PK], "entry"]
|
481
487
|
identifiers_tbl = identifiers_tbl.reset_index()
|
482
488
|
|
483
489
|
named_identifiers = identifiers_tbl.merge(
|
484
|
-
selected_table.drop(schema[id_type][
|
485
|
-
left_on=schema[id_type][
|
490
|
+
selected_table.drop(schema[id_type][SCHEMA_DEFS.ID], axis=1),
|
491
|
+
left_on=schema[id_type][SCHEMA_DEFS.PK],
|
486
492
|
right_index=True,
|
487
493
|
)
|
488
494
|
|
@@ -1163,24 +1169,25 @@ class SBML_dfs:
|
|
1163
1169
|
|
1164
1170
|
def search_by_ids(
|
1165
1171
|
self,
|
1166
|
-
|
1167
|
-
|
1168
|
-
|
1169
|
-
|
1172
|
+
id_table: pd.DataFrame,
|
1173
|
+
identifiers: Optional[Union[str, list, set]] = None,
|
1174
|
+
ontologies: Optional[Union[str, list, set]] = None,
|
1175
|
+
bqbs: Optional[Union[str, list, set]] = BQB_DEFINING_ATTRS_LOOSE
|
1176
|
+
+ [BQB.HAS_PART],
|
1170
1177
|
) -> tuple[pd.DataFrame, pd.DataFrame]:
|
1171
1178
|
"""
|
1172
1179
|
Find entities and identifiers matching a set of query IDs.
|
1173
1180
|
|
1174
1181
|
Parameters
|
1175
1182
|
----------
|
1176
|
-
|
1177
|
-
List of identifiers to search for
|
1178
|
-
entity_type : str
|
1179
|
-
Type of entity to search (e.g., 'species', 'reactions')
|
1180
|
-
identifiers_df : pd.DataFrame
|
1183
|
+
id_table : pd.DataFrame
|
1181
1184
|
DataFrame containing identifier mappings
|
1182
|
-
|
1183
|
-
|
1185
|
+
identifiers : Optional[Union[str, list, set]], optional
|
1186
|
+
Identifiers to filter by, by default None
|
1187
|
+
ontologies : Optional[Union[str, list, set]], optional
|
1188
|
+
Ontologies to filter by, by default None
|
1189
|
+
bqbs : Optional[Union[str, list, set]], optional
|
1190
|
+
BQB terms to filter by, by default [BQB.IS, BQB.HAS_PART]
|
1184
1191
|
|
1185
1192
|
Returns
|
1186
1193
|
-------
|
@@ -1196,42 +1203,25 @@ class SBML_dfs:
|
|
1196
1203
|
If ontologies is not a set
|
1197
1204
|
"""
|
1198
1205
|
# validate inputs
|
1199
|
-
entity_table = self.get_table(entity_type, required_attributes={"id"})
|
1200
|
-
entity_pk = self.schema[entity_type]["pk"]
|
1201
|
-
|
1202
|
-
utils.match_pd_vars(
|
1203
|
-
identifiers_df,
|
1204
|
-
req_vars={
|
1205
|
-
entity_pk,
|
1206
|
-
IDENTIFIERS.ONTOLOGY,
|
1207
|
-
IDENTIFIERS.IDENTIFIER,
|
1208
|
-
IDENTIFIERS.URL,
|
1209
|
-
IDENTIFIERS.BQB,
|
1210
|
-
},
|
1211
|
-
allow_series=False,
|
1212
|
-
).assert_present()
|
1213
|
-
|
1214
|
-
if ontologies is not None:
|
1215
|
-
if not isinstance(ontologies, set):
|
1216
|
-
# for clarity this should not be reachable based on type hints
|
1217
|
-
raise TypeError(
|
1218
|
-
f"ontologies must be a set, but got {type(ontologies).__name__}"
|
1219
|
-
)
|
1220
|
-
ALL_VALID_ONTOLOGIES = identifiers_df["ontology"].unique()
|
1221
|
-
invalid_ontologies = ontologies.difference(ALL_VALID_ONTOLOGIES)
|
1222
|
-
if len(invalid_ontologies) > 0:
|
1223
|
-
raise ValueError(
|
1224
|
-
f"The following ontologies are not valid: {', '.join(invalid_ontologies)}.\n"
|
1225
|
-
f"Valid ontologies are {', '.join(ALL_VALID_ONTOLOGIES)}"
|
1226
|
-
)
|
1227
1206
|
|
1228
|
-
|
1229
|
-
|
1207
|
+
entity_type = sbml_dfs_utils.infer_entity_type(id_table)
|
1208
|
+
entity_table = self.get_table(entity_type, required_attributes={SCHEMA_DEFS.ID})
|
1209
|
+
entity_pk = self.schema[entity_type][SCHEMA_DEFS.PK]
|
1230
1210
|
|
1231
|
-
matching_identifiers =
|
1232
|
-
|
1233
|
-
|
1234
|
-
|
1211
|
+
matching_identifiers = id_tables.filter_id_table(
|
1212
|
+
id_table=id_table, identifiers=identifiers, ontologies=ontologies, bqbs=bqbs
|
1213
|
+
)
|
1214
|
+
|
1215
|
+
matching_keys = matching_identifiers[entity_pk].tolist()
|
1216
|
+
entity_subset = entity_table.loc[matching_keys]
|
1217
|
+
|
1218
|
+
if matching_identifiers.shape[0] != entity_subset.shape[0]:
|
1219
|
+
raise ValueError(
|
1220
|
+
f"Some identifiers did not match to an entity for {entity_type}. "
|
1221
|
+
"This suggests that the identifiers and sbml_dfs are not in sync. "
|
1222
|
+
"Please create new identifiers with sbml_dfs.get_characteristic_species_ids() "
|
1223
|
+
"or sbml_dfs.get_identifiers()."
|
1224
|
+
)
|
1235
1225
|
|
1236
1226
|
return entity_subset, matching_identifiers
|
1237
1227
|
|
napistu/sbml_dfs_utils.py
CHANGED
@@ -14,22 +14,29 @@ from napistu import utils
|
|
14
14
|
from napistu import identifiers
|
15
15
|
from napistu import indices
|
16
16
|
|
17
|
-
from napistu.constants import
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
17
|
+
from napistu.constants import (
|
18
|
+
BQB,
|
19
|
+
BQB_DEFINING_ATTRS,
|
20
|
+
BQB_DEFINING_ATTRS_LOOSE,
|
21
|
+
SBML_DFS,
|
22
|
+
SBML_DFS_SCHEMA,
|
23
|
+
SCHEMA_DEFS,
|
24
|
+
IDENTIFIERS,
|
25
|
+
INTERACTION_EDGELIST_EXPECTED_VARS,
|
26
|
+
ONTOLOGIES,
|
27
|
+
MINI_SBO_FROM_NAME,
|
28
|
+
MINI_SBO_TO_NAME,
|
29
|
+
REQUIRED_REACTION_FROMEDGELIST_COLUMNS,
|
30
|
+
SBO_ROLES_DEFS,
|
31
|
+
SBO_NAME_TO_ROLE,
|
32
|
+
VALID_SBO_TERM_NAMES,
|
33
|
+
VALID_SBO_TERMS,
|
34
|
+
)
|
35
|
+
from napistu.ingestion.constants import (
|
36
|
+
COMPARTMENTS_GO_TERMS,
|
37
|
+
GENERIC_COMPARTMENT,
|
38
|
+
VALID_COMPARTMENTS,
|
39
|
+
)
|
33
40
|
|
34
41
|
logger = logging.getLogger(__name__)
|
35
42
|
|
@@ -416,6 +423,65 @@ def id_formatter_inv(ids: list[str]) -> list[int]:
|
|
416
423
|
return id_val
|
417
424
|
|
418
425
|
|
426
|
+
def infer_entity_type(df: pd.DataFrame) -> str:
|
427
|
+
"""
|
428
|
+
Infer the entity type of a DataFrame based on its structure and schema.
|
429
|
+
|
430
|
+
Parameters
|
431
|
+
----------
|
432
|
+
df : pd.DataFrame
|
433
|
+
The DataFrame to analyze
|
434
|
+
|
435
|
+
Returns
|
436
|
+
-------
|
437
|
+
str
|
438
|
+
The inferred entity type name
|
439
|
+
|
440
|
+
Raises
|
441
|
+
------
|
442
|
+
ValueError
|
443
|
+
If no entity type can be determined
|
444
|
+
"""
|
445
|
+
schema = SBML_DFS_SCHEMA.SCHEMA
|
446
|
+
|
447
|
+
# Get all primary keys
|
448
|
+
primary_keys = [
|
449
|
+
entity_schema.get(SCHEMA_DEFS.PK) for entity_schema in schema.values()
|
450
|
+
]
|
451
|
+
primary_keys = [pk for pk in primary_keys if pk is not None]
|
452
|
+
|
453
|
+
# Check if index matches a primary key
|
454
|
+
if df.index.name in primary_keys:
|
455
|
+
for entity_type, entity_schema in schema.items():
|
456
|
+
if entity_schema.get(SCHEMA_DEFS.PK) == df.index.name:
|
457
|
+
return entity_type
|
458
|
+
|
459
|
+
# Get DataFrame columns that are also primary keys
|
460
|
+
df_columns = set(df.columns).intersection(primary_keys)
|
461
|
+
|
462
|
+
# Check for exact match with primary key + foreign keys
|
463
|
+
for entity_type, entity_schema in schema.items():
|
464
|
+
expected_keys = set()
|
465
|
+
|
466
|
+
# Add primary key
|
467
|
+
pk = entity_schema.get(SCHEMA_DEFS.PK)
|
468
|
+
if pk:
|
469
|
+
expected_keys.add(pk)
|
470
|
+
|
471
|
+
# Add foreign keys
|
472
|
+
fks = entity_schema.get(SCHEMA_DEFS.FK, [])
|
473
|
+
expected_keys.update(fks)
|
474
|
+
|
475
|
+
# Check for exact match
|
476
|
+
if df_columns == expected_keys:
|
477
|
+
return entity_type
|
478
|
+
|
479
|
+
# No match found
|
480
|
+
raise ValueError(
|
481
|
+
f"No entity type matches DataFrame with columns: {sorted(df_columns)}"
|
482
|
+
)
|
483
|
+
|
484
|
+
|
419
485
|
def match_entitydata_index_to_entity(
|
420
486
|
entity_data_dict: dict,
|
421
487
|
an_entity_data_type: str,
|
@@ -1281,3 +1347,47 @@ def _validate_matching_data(data_table: pd.DataFrame, ref_table: pd.DataFrame):
|
|
1281
1347
|
f"The data table was type {type(data_table).__name__}"
|
1282
1348
|
" but must be a pd.DataFrame"
|
1283
1349
|
)
|
1350
|
+
|
1351
|
+
|
1352
|
+
def _validate_sbo_values(sbo_series: pd.Series, validate: str = "names") -> None:
|
1353
|
+
"""
|
1354
|
+
Validate SBO terms or names
|
1355
|
+
|
1356
|
+
Parameters
|
1357
|
+
----------
|
1358
|
+
sbo_series : pd.Series
|
1359
|
+
The SBO terms or names to validate.
|
1360
|
+
validate : str, optional
|
1361
|
+
Whether the values are SBO terms ("terms") or names ("names", default).
|
1362
|
+
|
1363
|
+
Returns
|
1364
|
+
-------
|
1365
|
+
None
|
1366
|
+
|
1367
|
+
Raises
|
1368
|
+
------
|
1369
|
+
ValueError
|
1370
|
+
If the validation type is invalid.
|
1371
|
+
TypeError
|
1372
|
+
If the invalid_counts is not a pandas DataFrame.
|
1373
|
+
ValueError
|
1374
|
+
If some reaction species have unusable SBO terms.
|
1375
|
+
"""
|
1376
|
+
|
1377
|
+
if validate == "terms":
|
1378
|
+
valid_values = VALID_SBO_TERMS
|
1379
|
+
elif validate == "names":
|
1380
|
+
valid_values = VALID_SBO_TERM_NAMES
|
1381
|
+
else:
|
1382
|
+
raise ValueError(f"Invalid validation type: {validate}")
|
1383
|
+
|
1384
|
+
invalid_sbo_terms = sbo_series[~sbo_series.isin(valid_values)]
|
1385
|
+
|
1386
|
+
if invalid_sbo_terms.shape[0] != 0:
|
1387
|
+
invalid_counts = invalid_sbo_terms.value_counts(sbo_series.name).to_frame("N")
|
1388
|
+
if not isinstance(invalid_counts, pd.DataFrame):
|
1389
|
+
raise TypeError("invalid_counts must be a pandas DataFrame")
|
1390
|
+
print(invalid_counts)
|
1391
|
+
raise ValueError("Some reaction species have unusable SBO terms")
|
1392
|
+
|
1393
|
+
return None
|