napistu 0.3.5__py3-none-any.whl → 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- napistu/__main__.py +38 -27
- napistu/consensus.py +22 -27
- napistu/constants.py +91 -65
- napistu/context/filtering.py +2 -1
- napistu/identifiers.py +3 -6
- napistu/indices.py +3 -1
- napistu/ingestion/bigg.py +6 -6
- napistu/ingestion/sbml.py +298 -295
- napistu/ingestion/string.py +16 -19
- napistu/ingestion/trrust.py +22 -27
- napistu/ingestion/yeast.py +2 -1
- napistu/matching/interactions.py +4 -4
- napistu/matching/species.py +1 -1
- napistu/modify/uncompartmentalize.py +1 -1
- napistu/network/net_create.py +1 -1
- napistu/network/paths.py +1 -1
- napistu/ontologies/dogma.py +2 -1
- napistu/ontologies/genodexito.py +5 -1
- napistu/ontologies/renaming.py +4 -0
- napistu/sbml_dfs_core.py +1343 -2167
- napistu/sbml_dfs_utils.py +1086 -143
- napistu/utils.py +52 -41
- {napistu-0.3.5.dist-info → napistu-0.3.7.dist-info}/METADATA +2 -2
- {napistu-0.3.5.dist-info → napistu-0.3.7.dist-info}/RECORD +40 -40
- tests/conftest.py +113 -13
- tests/test_consensus.py +161 -4
- tests/test_context_filtering.py +2 -2
- tests/test_gaps.py +26 -15
- tests/test_network_net_create.py +1 -1
- tests/test_network_precompute.py +1 -1
- tests/test_ontologies_genodexito.py +3 -0
- tests/test_ontologies_mygene.py +3 -0
- tests/test_ontologies_renaming.py +28 -24
- tests/test_sbml_dfs_core.py +260 -211
- tests/test_sbml_dfs_utils.py +194 -36
- tests/test_utils.py +19 -0
- {napistu-0.3.5.dist-info → napistu-0.3.7.dist-info}/WHEEL +0 -0
- {napistu-0.3.5.dist-info → napistu-0.3.7.dist-info}/entry_points.txt +0 -0
- {napistu-0.3.5.dist-info → napistu-0.3.7.dist-info}/licenses/LICENSE +0 -0
- {napistu-0.3.5.dist-info → napistu-0.3.7.dist-info}/top_level.txt +0 -0
napistu/utils.py
CHANGED
@@ -810,50 +810,15 @@ def drop_extra_cols(
|
|
810
810
|
return df_out.loc[:, ordered_cols]
|
811
811
|
|
812
812
|
|
813
|
-
def
|
814
|
-
left_df: pd.DataFrame, right_df: pd.DataFrame, merge_context: str, **merge_kwargs
|
815
|
-
) -> pd.DataFrame:
|
813
|
+
def update_pathological_names(names: pd.Series, prefix: str) -> pd.Series:
|
816
814
|
"""
|
817
|
-
|
818
|
-
|
819
|
-
Parameters
|
820
|
-
----------
|
821
|
-
left_df : pd.DataFrame
|
822
|
-
Left DataFrame for merge
|
823
|
-
right_df : pd.DataFrame
|
824
|
-
Right DataFrame for merge
|
825
|
-
merge_context : str
|
826
|
-
Description of the merge operation for logging
|
827
|
-
**merge_kwargs : dict
|
828
|
-
Additional keyword arguments passed to pd.merge
|
815
|
+
Update pathological names in a pandas Series.
|
829
816
|
|
830
|
-
|
831
|
-
-------
|
832
|
-
pd.DataFrame
|
833
|
-
Merged DataFrame with overwritten columns removed
|
817
|
+
Add a prefix to the names if they are all numeric.
|
834
818
|
"""
|
835
|
-
|
836
|
-
|
837
|
-
|
838
|
-
# Ensure we're using the correct suffixes
|
839
|
-
merge_kwargs["suffixes"] = ("_old", "")
|
840
|
-
|
841
|
-
# Perform merge
|
842
|
-
merged_df = pd.merge(left_df, right_df, **merge_kwargs)
|
843
|
-
|
844
|
-
# Check for and log any overwritten columns
|
845
|
-
new_cols = merged_df.columns.tolist()
|
846
|
-
overwritten_cols = [col for col in original_cols if col + "_old" in new_cols]
|
847
|
-
if overwritten_cols:
|
848
|
-
logger.warning(
|
849
|
-
f"The following columns were overwritten during {merge_context} merge and their original values "
|
850
|
-
f"have been suffixed with '_old': {', '.join(overwritten_cols)}"
|
851
|
-
)
|
852
|
-
# Drop the old columns
|
853
|
-
cols_to_drop = [col + "_old" for col in overwritten_cols]
|
854
|
-
merged_df = merged_df.drop(columns=cols_to_drop)
|
855
|
-
|
856
|
-
return merged_df
|
819
|
+
if names.apply(lambda x: x.isdigit()).all():
|
820
|
+
names = names.apply(lambda x: f"{prefix}{x}")
|
821
|
+
return names
|
857
822
|
|
858
823
|
|
859
824
|
def format_identifiers_as_edgelist(
|
@@ -1108,3 +1073,49 @@ def _add_nameness_score(df, name_var):
|
|
1108
1073
|
|
1109
1074
|
df.loc[:, "nameness_score"] = df[name_var].apply(score_nameness)
|
1110
1075
|
return df
|
1076
|
+
|
1077
|
+
|
1078
|
+
def _merge_and_log_overwrites(
|
1079
|
+
left_df: pd.DataFrame, right_df: pd.DataFrame, merge_context: str, **merge_kwargs
|
1080
|
+
) -> pd.DataFrame:
|
1081
|
+
"""
|
1082
|
+
Merge two DataFrames and log any column overwrites.
|
1083
|
+
|
1084
|
+
Parameters
|
1085
|
+
----------
|
1086
|
+
left_df : pd.DataFrame
|
1087
|
+
Left DataFrame for merge
|
1088
|
+
right_df : pd.DataFrame
|
1089
|
+
Right DataFrame for merge
|
1090
|
+
merge_context : str
|
1091
|
+
Description of the merge operation for logging
|
1092
|
+
**merge_kwargs : dict
|
1093
|
+
Additional keyword arguments passed to pd.merge
|
1094
|
+
|
1095
|
+
Returns
|
1096
|
+
-------
|
1097
|
+
pd.DataFrame
|
1098
|
+
Merged DataFrame with overwritten columns removed
|
1099
|
+
"""
|
1100
|
+
# Track original columns
|
1101
|
+
original_cols = left_df.columns.tolist()
|
1102
|
+
|
1103
|
+
# Ensure we're using the correct suffixes
|
1104
|
+
merge_kwargs["suffixes"] = ("_old", "")
|
1105
|
+
|
1106
|
+
# Perform merge
|
1107
|
+
merged_df = pd.merge(left_df, right_df, **merge_kwargs)
|
1108
|
+
|
1109
|
+
# Check for and log any overwritten columns
|
1110
|
+
new_cols = merged_df.columns.tolist()
|
1111
|
+
overwritten_cols = [col for col in original_cols if col + "_old" in new_cols]
|
1112
|
+
if overwritten_cols:
|
1113
|
+
logger.warning(
|
1114
|
+
f"The following columns were overwritten during {merge_context} merge and their original values "
|
1115
|
+
f"have been suffixed with '_old': {', '.join(overwritten_cols)}"
|
1116
|
+
)
|
1117
|
+
# Drop the old columns
|
1118
|
+
cols_to_drop = [col + "_old" for col in overwritten_cols]
|
1119
|
+
merged_df = merged_df.drop(columns=cols_to_drop)
|
1120
|
+
|
1121
|
+
return merged_df
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: napistu
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.7
|
4
4
|
Summary: Connecting high-dimensional data to curated pathways
|
5
5
|
Home-page: https://github.com/napistu/napistu-py
|
6
6
|
Author: Sean Hackett
|
@@ -43,7 +43,7 @@ Requires-Dist: pytest-cov; extra == "dev"
|
|
43
43
|
Requires-Dist: ruff; extra == "dev"
|
44
44
|
Requires-Dist: testcontainers; extra == "dev"
|
45
45
|
Provides-Extra: mcp
|
46
|
-
Requires-Dist: fastmcp<
|
46
|
+
Requires-Dist: fastmcp<2.9.0,>=2.0.0; extra == "mcp"
|
47
47
|
Requires-Dist: mcp<2.0.0,>=1.0.0; extra == "mcp"
|
48
48
|
Requires-Dist: httpx>=0.24.0; extra == "mcp"
|
49
49
|
Requires-Dist: beautifulsoup4<5.0.0,>=4.11.0; extra == "mcp"
|
@@ -1,22 +1,22 @@
|
|
1
1
|
napistu/__init__.py,sha256=dFXAhIqlTLJMwowS4BUDT08-Vy3Q0u1L0CMCErSZT1Y,239
|
2
|
-
napistu/__main__.py,sha256=
|
3
|
-
napistu/consensus.py,sha256=
|
4
|
-
napistu/constants.py,sha256=
|
5
|
-
napistu/identifiers.py,sha256=
|
6
|
-
napistu/indices.py,sha256=
|
7
|
-
napistu/sbml_dfs_core.py,sha256=
|
8
|
-
napistu/sbml_dfs_utils.py,sha256=
|
2
|
+
napistu/__main__.py,sha256=cml91Be1r_eyWeel_KNSWC-42AbMXWsrKZGf4PVgkaE,28997
|
3
|
+
napistu/consensus.py,sha256=xWXiqIM6ot-SSPJZXTrVpohbINSCkZXBtRi-5REfk_g,69897
|
4
|
+
napistu/constants.py,sha256=dbofeN0HKdmjYluyKuj6nfiL88j69otPhuw2krEhHz8,13240
|
5
|
+
napistu/identifiers.py,sha256=e2-nTVzr5AINa0y1ER9218bKXyF2kAeJ9At22S4Z00o,33914
|
6
|
+
napistu/indices.py,sha256=Zjg3gE0JQ3T879lCPazYg-WXVE6hvcAr713ZKpJ32rk,9830
|
7
|
+
napistu/sbml_dfs_core.py,sha256=3Z2Kg-aVnZMGK9iK-_vztY2ORgNpta8BUMuWEZg80iE,73125
|
8
|
+
napistu/sbml_dfs_utils.py,sha256=M0qNbxWkVnPCeUYTBZAD5i4PgV66qu6JE36Eb1dKaMw,43617
|
9
9
|
napistu/source.py,sha256=UGpN70bqbC9gnKmM0ivSdQYim9hfzgABeXoQKzRr9oU,13646
|
10
|
-
napistu/utils.py,sha256=
|
10
|
+
napistu/utils.py,sha256=ckYaIYjUOy22A3ojS7wSUabq_A1rJMMOk6QLrcbr3sU,33560
|
11
11
|
napistu/context/__init__.py,sha256=LQBEqipcHKK0E5UlDEg1ct-ymCs93IlUrUaH8BCevf0,242
|
12
12
|
napistu/context/discretize.py,sha256=Qq7zg46F_I-PvQIT2_pEDQV7YEtUQCxKoRvT5Gu9QsE,15052
|
13
|
-
napistu/context/filtering.py,sha256=
|
13
|
+
napistu/context/filtering.py,sha256=l1oq-43ysSGqU9VmhTOO_pYT4DSMf20yxvktPC1MI0I,13696
|
14
14
|
napistu/gcs/__init__.py,sha256=dFXAhIqlTLJMwowS4BUDT08-Vy3Q0u1L0CMCErSZT1Y,239
|
15
15
|
napistu/gcs/constants.py,sha256=g6PaU99GY5XvaRHx4BGmWHUpcJ36-Zh_GzeNVOeHviM,2856
|
16
16
|
napistu/gcs/downloads.py,sha256=SvGv9WYr_Vt3guzyz1QiAuBndeKPTBtWSFLj1-QbLf4,6348
|
17
17
|
napistu/gcs/utils.py,sha256=eLSsvewWJdCguyj2k0ozUGP5BTemaE1PZg41Z3aY5kM,571
|
18
18
|
napistu/ingestion/__init__.py,sha256=dFXAhIqlTLJMwowS4BUDT08-Vy3Q0u1L0CMCErSZT1Y,239
|
19
|
-
napistu/ingestion/bigg.py,sha256=
|
19
|
+
napistu/ingestion/bigg.py,sha256=f65--8ARe248eYCUJpFMF284Wz53sLyFyBuwelxHmJA,4340
|
20
20
|
napistu/ingestion/constants.py,sha256=9UP47VImZ11q0kz17N3EJg2155USqLewwNWyKpA-cbA,8089
|
21
21
|
napistu/ingestion/gtex.py,sha256=X0hSC1yrpf4xSJWFhpeNcnHwJzKDII2MvjfUqYA0JN8,3720
|
22
22
|
napistu/ingestion/hpa.py,sha256=R27ExrryKQ4Crxv9ATXmBJCa-yd01TMOrDjkeBhIQac,5054
|
@@ -25,15 +25,15 @@ napistu/ingestion/napistu_edgelist.py,sha256=eVT9M7gmdBuGHcAYlvkD_zzvTtyzXufKWjw
|
|
25
25
|
napistu/ingestion/obo.py,sha256=AQkIPWbjA464Lma0tx91JucWkIwLjC7Jgv5VHGRTDkE,9601
|
26
26
|
napistu/ingestion/psi_mi.py,sha256=5eJjm7XWogL9oTyGqR52kntHClLwLsTePKqCvUGyi-w,10111
|
27
27
|
napistu/ingestion/reactome.py,sha256=Hn9X-vDp4o_HK-OtaQvel3vJeZ8_TC1-4N2rruK9Oks,7099
|
28
|
-
napistu/ingestion/sbml.py,sha256=
|
29
|
-
napistu/ingestion/string.py,sha256=
|
30
|
-
napistu/ingestion/trrust.py,sha256=
|
31
|
-
napistu/ingestion/yeast.py,sha256=
|
28
|
+
napistu/ingestion/sbml.py,sha256=l8Z98yWuOIRGns8G4UNnoQz7v_xmukZb_IZ_5ye34Ko,25296
|
29
|
+
napistu/ingestion/string.py,sha256=go1WGTkoLJejX7GQWf9bFeInFGAw4jNSpS2B_Zr5f_s,11364
|
30
|
+
napistu/ingestion/trrust.py,sha256=_6hIS48O3tRpMxX-FdIC57ekhCcV7J4owUzoaYnRqZo,9204
|
31
|
+
napistu/ingestion/yeast.py,sha256=7XwdkmgOnG1MYauKSk9nSK6fHemDrtXEPcS4ebs1_so,5268
|
32
32
|
napistu/matching/__init__.py,sha256=dFXAhIqlTLJMwowS4BUDT08-Vy3Q0u1L0CMCErSZT1Y,239
|
33
33
|
napistu/matching/constants.py,sha256=j4XSOE9Bpma9F6apVJ1LijKOUPgRk8Geo_u_rvNtpSU,610
|
34
|
-
napistu/matching/interactions.py,sha256=
|
34
|
+
napistu/matching/interactions.py,sha256=XrzZvH1zgeaZLq3qhpsV0tx4BFgvNvkjM47l8ZUEOH4,18798
|
35
35
|
napistu/matching/mount.py,sha256=8JEtiDIy7qdjWyDAs0vuVwEQkpwRf5ah4xMLZ4jKHag,19428
|
36
|
-
napistu/matching/species.py,sha256=
|
36
|
+
napistu/matching/species.py,sha256=U8OfzmDN9dMwemdnzQdV19bWfjY8MLJ9-wf83siK1bM,18888
|
37
37
|
napistu/mcp/__init__.py,sha256=EmtcdtYyfhXdxxPB5cY_pshXnFv6XZ5CtRU0JMHn3aQ,2074
|
38
38
|
napistu/mcp/__main__.py,sha256=o9C4J3YHkfh_pQi-K9OC5bwFmsNWHuK2l8GL7M90uFc,8057
|
39
39
|
napistu/mcp/client.py,sha256=Zyy7unFFq9XPBDrr2Hl7dMVDTGoZbtj_YFuUH18QScs,6350
|
@@ -56,24 +56,24 @@ napistu/modify/constants.py,sha256=H6K6twzPlxt0yp6QLAxIx0Tp8YzYhtKKXPdmXi5V_QQ,3
|
|
56
56
|
napistu/modify/curation.py,sha256=sQeSO53ZLdn14ww2GSKkoP0vJnDpAoSWb-YDjUf5hDQ,21743
|
57
57
|
napistu/modify/gaps.py,sha256=qprylC2BbSk_vPWayYPVT8lwURXDMOlW5zNLV_wMFZ4,26755
|
58
58
|
napistu/modify/pathwayannot.py,sha256=xuBSMDFWbg_d6-Gzv0Td3Q5nnFTa-Qzic48g1b1AZtQ,48081
|
59
|
-
napistu/modify/uncompartmentalize.py,sha256=
|
59
|
+
napistu/modify/uncompartmentalize.py,sha256=y5LkXn5x6u80dB_McfAIh88BxZGIAVFLujkP7sPNRh0,9690
|
60
60
|
napistu/network/__init__.py,sha256=dFXAhIqlTLJMwowS4BUDT08-Vy3Q0u1L0CMCErSZT1Y,239
|
61
61
|
napistu/network/constants.py,sha256=fC1njZDu6in1JiaZ1-T1_fhmmkcc2HKSUUomDVyQ7Dw,5789
|
62
62
|
napistu/network/data_handling.py,sha256=mxplWwyXNrjZRN-jjWWUI9IZOqX69k8qSMDIrL9h0Og,14736
|
63
63
|
napistu/network/ig_utils.py,sha256=JSlf_sZtw3DiiSIiYJ2YqJFEP4hVJMwNRox2qYTA4zY,11470
|
64
64
|
napistu/network/napistu_graph_core.py,sha256=2NbjiLcDcFWFyX1MuN17pobPDgoQFtcYWOwuPSFTT4g,10429
|
65
65
|
napistu/network/neighborhoods.py,sha256=Q9HWUvf_J4a_4RQDKd7ywEy4cp3Wq2OPOfVsotDbEe0,56098
|
66
|
-
napistu/network/net_create.py,sha256=
|
66
|
+
napistu/network/net_create.py,sha256=aAw6kfHREpkMEcwQFgwU5CHg--b8YLO559surQLRXZI,69408
|
67
67
|
napistu/network/net_propagation.py,sha256=89ZR4p2mGpkCCIemofZ53XbUjQsuNABxIc6UmF8A5n8,4935
|
68
68
|
napistu/network/ng_utils.py,sha256=ijWDa5MTuULJpdV6dcVFGmLmtB_xy87jaUG7F5nvC_k,15240
|
69
|
-
napistu/network/paths.py,sha256
|
69
|
+
napistu/network/paths.py,sha256=-dxRtaBRDYwuMw9DByGSn5OXFC3umDeO2zvVvD0TdWE,17452
|
70
70
|
napistu/network/precompute.py,sha256=pIXCCE6Mf6HY8o-fiwUaOxvQ_9_mevK0vaC8fND4RZk,9141
|
71
71
|
napistu/ontologies/__init__.py,sha256=dFXAhIqlTLJMwowS4BUDT08-Vy3Q0u1L0CMCErSZT1Y,239
|
72
72
|
napistu/ontologies/constants.py,sha256=GyOFvezSxDK1VigATcruTKtNhjcYaid1ggulEf_HEtQ,4345
|
73
|
-
napistu/ontologies/dogma.py,sha256=
|
74
|
-
napistu/ontologies/genodexito.py,sha256=
|
73
|
+
napistu/ontologies/dogma.py,sha256=VVj6NKBgNym4SdOSu8g22OohALj7cbObhIJmdY2Sfy0,8860
|
74
|
+
napistu/ontologies/genodexito.py,sha256=ZZmb7V38BmFjy9VOGdxbD3-BD5tKGl5izr0nwO_eEdA,24967
|
75
75
|
napistu/ontologies/mygene.py,sha256=RMFQTWsLkeYxmsOPxxmeIya2phdcUMcF5V2abaS8MVg,11109
|
76
|
-
napistu/ontologies/renaming.py,sha256=
|
76
|
+
napistu/ontologies/renaming.py,sha256=aZR5oxjeZhse026fuvFyQiKM8PVzbBT915J8AfXGv1M,7006
|
77
77
|
napistu/rpy2/__init__.py,sha256=8WzSK_tmdcbyMUtb17OmqdQqbisqIBl8OQrDsaFDeX4,8356
|
78
78
|
napistu/rpy2/callr.py,sha256=yFCGobZTVgH1vJc5h0njAflvFr6zmCWIP-uuLWnZL-g,3701
|
79
79
|
napistu/rpy2/constants.py,sha256=3thZa4UUfpDpXaUzGcqfDEyEIQaf4a_C01-O2pciZ7c,2760
|
@@ -81,15 +81,15 @@ napistu/rpy2/rids.py,sha256=AfXLTfTdonfspgAHYO0Ph7jSUWv8YuyT8x3fyLfAqc8,3413
|
|
81
81
|
napistu/scverse/__init__.py,sha256=Lgxr3iMQAkTzXE9BNz93CndNP5djzerLvmHM-D0PU3I,357
|
82
82
|
napistu/scverse/constants.py,sha256=0iAkhyJUIeFGHdLLU3fCaEU1O3Oix4qAsxr3CxGTjVs,653
|
83
83
|
napistu/scverse/loading.py,sha256=jqiE71XB-wdV50GyZrauFNY0Lai4bX9Fm2Gv80VR8t8,27016
|
84
|
-
napistu-0.3.
|
84
|
+
napistu-0.3.7.dist-info/licenses/LICENSE,sha256=kW8wVT__JWoHjl2BbbJDAZInWa9AxzJeR_uv6-i5x1g,1063
|
85
85
|
tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
86
|
-
tests/conftest.py,sha256=
|
87
|
-
tests/test_consensus.py,sha256=
|
86
|
+
tests/conftest.py,sha256=Tdw9-uYpnV1ZvdO9k9oto-JDEqMOTF05fsVps-EpmCE,6240
|
87
|
+
tests/test_consensus.py,sha256=Hzfrgp4SpkRDnEMVMD3f0UInSycndB8kKzC4wDDvRas,15076
|
88
88
|
tests/test_constants.py,sha256=gJLDv7QMeeBiiupyMazj6mumk20KWvGMgm2myHMKKfc,531
|
89
89
|
tests/test_context_discretize.py,sha256=5Mr9WqwHGYMO37M1TnMmSfC64UZ73mnoCiEM2IQHVDY,1667
|
90
|
-
tests/test_context_filtering.py,sha256=
|
90
|
+
tests/test_context_filtering.py,sha256=5dwC2d-99CpLdnzzTf2NvEzsRuwiIj-YU8NFqwtWp0g,9485
|
91
91
|
tests/test_curation.py,sha256=-Q2J0D7qs9PGjHZX-rM4NxRLLdwxoapytSo_98q9ItY,3864
|
92
|
-
tests/test_gaps.py,sha256=
|
92
|
+
tests/test_gaps.py,sha256=GBRb0E0RUwhXz2PsnfTsGszKDsHHvH-E3x9hUZG1DhM,4550
|
93
93
|
tests/test_gcs.py,sha256=p_uQWuY2TcGj3zV3qFC-GXBqj4St8YENR_XRpQ6UH5g,570
|
94
94
|
tests/test_identifiers.py,sha256=9QlGCG27zposzEvazEuZqoMausNMLLuUwfA3FuYU1mc,8345
|
95
95
|
tests/test_indices.py,sha256=e_8GSNzIT4JHGmelX9PseJzFXTxo-TQAus3r21AUt7I,4547
|
@@ -104,30 +104,30 @@ tests/test_mcp_server.py,sha256=bP3PWVQsEfX6-lAgXKP32njdg__o65n2WuLvkxTTHkQ,1121
|
|
104
104
|
tests/test_network_data_handling.py,sha256=oBSZuB3IRG9bwmD6n8FY-UZLe2UqGzXpNSxVtkHRSvE,12605
|
105
105
|
tests/test_network_ig_utils.py,sha256=Buoh570mNm5pcac3Hf6f3pevCjWfBwPfKuD8IkDLg58,2120
|
106
106
|
tests/test_network_neighborhoods.py,sha256=8BV17m5X1OUd5FwasTTYUOkNYUHDPUkxOKH_VZCsyBE,631
|
107
|
-
tests/test_network_net_create.py,sha256=
|
107
|
+
tests/test_network_net_create.py,sha256=LDjkA9boX8kH4wCLOpa0ENwN6JZU2c29w3qpYlhQ6Rs,16456
|
108
108
|
tests/test_network_net_propagation.py,sha256=9pKkUdduWejH4iKNCJXKFzAkdNpCfrMbiUWySgI_LH4,3244
|
109
109
|
tests/test_network_ng_utils.py,sha256=CwDw4MKTPhVZXz2HA2XU2QjjBv8CXc1_yQ0drvkBkFw,724
|
110
110
|
tests/test_network_paths.py,sha256=TWZnxY5bF3m6gahcxcYJGrBIawh2-_vUcec1LyPmXV8,1686
|
111
|
-
tests/test_network_precompute.py,sha256=
|
112
|
-
tests/test_ontologies_genodexito.py,sha256=
|
113
|
-
tests/test_ontologies_mygene.py,sha256=
|
114
|
-
tests/test_ontologies_renaming.py,sha256=
|
111
|
+
tests/test_network_precompute.py,sha256=8HqTSXdxdXuQqNewP3xxsps9UEtw6OVgPN_lUywXiNg,9012
|
112
|
+
tests/test_ontologies_genodexito.py,sha256=6fINyUiubHZqu7qxye09DQfJXw28ZMAJc3clPb-cCoY,2298
|
113
|
+
tests/test_ontologies_mygene.py,sha256=VkdRcKIWmcG6V-2dpfvsBiOJN5dO-j0RqZNxtJRcyBU,1583
|
114
|
+
tests/test_ontologies_renaming.py,sha256=pawp3pV1hxW8nskWc4f2YHwMUqTilEEBD2BtpcSay5Q,3839
|
115
115
|
tests/test_pathwayannot.py,sha256=bceosccNy9tgxQei_7j7ATBSSvBSxOngJvK-mAzR_K0,3312
|
116
116
|
tests/test_rpy2_callr.py,sha256=UVzXMvYN3wcc-ikDIjH2sA4BqkbwiNbMm561BcbnbD4,2936
|
117
117
|
tests/test_rpy2_init.py,sha256=APrNt9GEQV9va3vU5k250TxFplAoWFc-FJRFhM2GcDk,5927
|
118
118
|
tests/test_sbml.py,sha256=f25zj1NogYrmLluvBDboLameTuCiQ309433Qn3iPvhg,1483
|
119
|
-
tests/test_sbml_dfs_core.py,sha256=
|
120
|
-
tests/test_sbml_dfs_utils.py,sha256
|
119
|
+
tests/test_sbml_dfs_core.py,sha256=CH5OXNSAozWTl6qBvbHfgTG0NcgdlKJ_WcG0lTYBm3k,26217
|
120
|
+
tests/test_sbml_dfs_utils.py,sha256=-EAW6N_elEOSQdsdRhnEdDhnZQH_weCGpnVOd2Xaepc,6963
|
121
121
|
tests/test_sbo.py,sha256=x_PENFaXYsrZIzOZu9cj_Wrej7i7SNGxgBYYvcigLs0,308
|
122
122
|
tests/test_scverse_loading.py,sha256=bnU1lQSYYWhOAs0IIBoi4ZohqPokDQJ0n_rtkAfEyMU,29948
|
123
123
|
tests/test_set_coverage.py,sha256=J-6m6LuOjcQa9pxRuWglSfJk4Ltm7kt_eOrn_Q-7P6Q,1604
|
124
124
|
tests/test_source.py,sha256=hT0IlpexR5zP0OhWl5BBaho9d1aCYQlFZLwRIRRnw_Y,1969
|
125
125
|
tests/test_uncompartmentalize.py,sha256=nAk5kfAVLU9a2VWe2x2HYVcKqj-EnwmwddERIPRax8c,1289
|
126
|
-
tests/test_utils.py,sha256=
|
126
|
+
tests/test_utils.py,sha256=ls0zETdSeupuWCsnycWbgBVwLs2aXLgrAO1jyEBrfao,23190
|
127
127
|
tests/utils.py,sha256=SoWQ_5roJteFGcMaOeEiQ5ucwq3Z2Fa3AAs9iXHTsJY,749
|
128
128
|
tests/test_data/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
129
|
-
napistu-0.3.
|
130
|
-
napistu-0.3.
|
131
|
-
napistu-0.3.
|
132
|
-
napistu-0.3.
|
133
|
-
napistu-0.3.
|
129
|
+
napistu-0.3.7.dist-info/METADATA,sha256=59xinCTQICD-lrDm7UR5wdnScq6LHsXy553S6vCRk4Q,3414
|
130
|
+
napistu-0.3.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
131
|
+
napistu-0.3.7.dist-info/entry_points.txt,sha256=_QnaPOvJNA3IltxmZgWIiBoen-L1bPYX18YQfC7oJgQ,41
|
132
|
+
napistu-0.3.7.dist-info/top_level.txt,sha256=Gpvk0a_PjrtqhYcQ9IDr3zR5LqpZ-uIHidQMIpjlvhY,14
|
133
|
+
napistu-0.3.7.dist-info/RECORD,,
|
tests/conftest.py
CHANGED
@@ -1,16 +1,23 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
import functools
|
3
4
|
import os
|
4
5
|
import sys
|
6
|
+
import threading
|
5
7
|
|
8
|
+
import pandas as pd
|
9
|
+
import pytest
|
10
|
+
from pytest import fixture
|
11
|
+
from pytest import skip
|
6
12
|
|
7
13
|
from napistu import consensus
|
8
14
|
from napistu import indices
|
9
|
-
from napistu import
|
10
|
-
from napistu.
|
11
|
-
from napistu.
|
12
|
-
from
|
13
|
-
from
|
15
|
+
from napistu.identifiers import Identifiers
|
16
|
+
from napistu.sbml_dfs_core import SBML_dfs
|
17
|
+
from napistu.source import Source
|
18
|
+
from napistu.ingestion.sbml import SBML
|
19
|
+
from napistu.network.net_create import process_napistu_graph
|
20
|
+
from napistu.constants import SBML_DFS
|
14
21
|
|
15
22
|
|
16
23
|
@fixture
|
@@ -25,13 +32,13 @@ def sbml_path():
|
|
25
32
|
|
26
33
|
@fixture
|
27
34
|
def sbml_model(sbml_path):
|
28
|
-
sbml_model =
|
35
|
+
sbml_model = SBML(sbml_path)
|
29
36
|
return sbml_model
|
30
37
|
|
31
38
|
|
32
39
|
@fixture
|
33
40
|
def sbml_dfs(sbml_model):
|
34
|
-
sbml_dfs =
|
41
|
+
sbml_dfs = SBML_dfs(sbml_model)
|
35
42
|
return sbml_dfs
|
36
43
|
|
37
44
|
|
@@ -53,20 +60,73 @@ def sbml_dfs_glucose_metabolism():
|
|
53
60
|
test_data = os.path.join(test_path, "test_data")
|
54
61
|
sbml_path = os.path.join(test_data, "reactome_glucose_metabolism.sbml")
|
55
62
|
|
56
|
-
sbml_model =
|
57
|
-
sbml_dfs =
|
63
|
+
sbml_model = SBML(sbml_path)
|
64
|
+
sbml_dfs = SBML_dfs(sbml_model)
|
58
65
|
|
59
66
|
return sbml_dfs
|
60
67
|
|
61
68
|
|
69
|
+
@pytest.fixture
|
70
|
+
def minimal_valid_sbml_dfs():
|
71
|
+
"""Create a minimal valid SBML_dfs object for testing."""
|
72
|
+
blank_id = Identifiers([])
|
73
|
+
source = Source(init=True)
|
74
|
+
|
75
|
+
sbml_dict = {
|
76
|
+
SBML_DFS.COMPARTMENTS: pd.DataFrame(
|
77
|
+
{
|
78
|
+
SBML_DFS.C_NAME: ["cytosol"],
|
79
|
+
SBML_DFS.C_IDENTIFIERS: [blank_id],
|
80
|
+
SBML_DFS.C_SOURCE: [source],
|
81
|
+
},
|
82
|
+
index=pd.Index(["C00001"], name=SBML_DFS.C_ID),
|
83
|
+
),
|
84
|
+
SBML_DFS.SPECIES: pd.DataFrame(
|
85
|
+
{
|
86
|
+
SBML_DFS.S_NAME: ["ATP"],
|
87
|
+
SBML_DFS.S_IDENTIFIERS: [blank_id],
|
88
|
+
SBML_DFS.S_SOURCE: [source],
|
89
|
+
},
|
90
|
+
index=pd.Index(["S00001"], name=SBML_DFS.S_ID),
|
91
|
+
),
|
92
|
+
SBML_DFS.COMPARTMENTALIZED_SPECIES: pd.DataFrame(
|
93
|
+
{
|
94
|
+
SBML_DFS.SC_NAME: ["ATP [cytosol]"],
|
95
|
+
SBML_DFS.S_ID: ["S00001"],
|
96
|
+
SBML_DFS.C_ID: ["C00001"],
|
97
|
+
SBML_DFS.SC_SOURCE: [source],
|
98
|
+
},
|
99
|
+
index=pd.Index(["SC00001"], name=SBML_DFS.SC_ID),
|
100
|
+
),
|
101
|
+
SBML_DFS.REACTIONS: pd.DataFrame(
|
102
|
+
{
|
103
|
+
SBML_DFS.R_NAME: ["test_reaction"],
|
104
|
+
SBML_DFS.R_IDENTIFIERS: [blank_id],
|
105
|
+
SBML_DFS.R_SOURCE: [source],
|
106
|
+
SBML_DFS.R_ISREVERSIBLE: [False],
|
107
|
+
},
|
108
|
+
index=pd.Index(["R00001"], name=SBML_DFS.R_ID),
|
109
|
+
),
|
110
|
+
SBML_DFS.REACTION_SPECIES: pd.DataFrame(
|
111
|
+
{
|
112
|
+
SBML_DFS.R_ID: ["R00001"],
|
113
|
+
SBML_DFS.SC_ID: ["SC00001"],
|
114
|
+
SBML_DFS.STOICHIOMETRY: [1.0],
|
115
|
+
SBML_DFS.SBO_TERM: ["SBO:0000011"],
|
116
|
+
},
|
117
|
+
index=pd.Index(["RSC00001"], name=SBML_DFS.RSC_ID),
|
118
|
+
),
|
119
|
+
}
|
120
|
+
|
121
|
+
return SBML_dfs(sbml_dict)
|
122
|
+
|
123
|
+
|
62
124
|
@fixture
|
63
125
|
def napistu_graph(sbml_dfs):
|
64
126
|
"""
|
65
127
|
Pytest fixture to create a NapistuGraph from sbml_dfs with directed=True and topology weighting.
|
66
128
|
"""
|
67
|
-
return
|
68
|
-
sbml_dfs, directed=True, weighting_strategy="topology"
|
69
|
-
)
|
129
|
+
return process_napistu_graph(sbml_dfs, directed=True, weighting_strategy="topology")
|
70
130
|
|
71
131
|
|
72
132
|
@fixture
|
@@ -74,7 +134,7 @@ def napistu_graph_undirected(sbml_dfs):
|
|
74
134
|
"""
|
75
135
|
Pytest fixture to create a NapistuGraph from sbml_dfs with directed=False and topology weighting.
|
76
136
|
"""
|
77
|
-
return
|
137
|
+
return process_napistu_graph(
|
78
138
|
sbml_dfs, directed=False, weighting_strategy="topology"
|
79
139
|
)
|
80
140
|
|
@@ -109,3 +169,43 @@ def pytest_runtest_setup(item):
|
|
109
169
|
# Skip tests that should run only on Unix
|
110
170
|
if not is_unix and any(mark.name == "unix_only" for mark in item.iter_markers()):
|
111
171
|
skip("Test runs only on Unix systems")
|
172
|
+
|
173
|
+
|
174
|
+
def skip_on_timeout(timeout_seconds):
|
175
|
+
"""Cross-platform decorator that skips a test if it takes longer than timeout_seconds"""
|
176
|
+
|
177
|
+
def decorator(func):
|
178
|
+
@functools.wraps(func)
|
179
|
+
def wrapper(*args, **kwargs):
|
180
|
+
result = [None]
|
181
|
+
exception = [None]
|
182
|
+
finished = [False]
|
183
|
+
|
184
|
+
def target():
|
185
|
+
try:
|
186
|
+
result[0] = func(*args, **kwargs)
|
187
|
+
finished[0] = True
|
188
|
+
except Exception as e:
|
189
|
+
exception[0] = e
|
190
|
+
finished[0] = True
|
191
|
+
|
192
|
+
thread = threading.Thread(target=target)
|
193
|
+
thread.daemon = True
|
194
|
+
thread.start()
|
195
|
+
thread.join(timeout_seconds)
|
196
|
+
|
197
|
+
if not finished[0]:
|
198
|
+
# Thread is still running, timeout occurred
|
199
|
+
pytest.skip(f"Test skipped due to timeout ({timeout_seconds}s)")
|
200
|
+
|
201
|
+
if exception[0]:
|
202
|
+
raise exception[0]
|
203
|
+
|
204
|
+
return result[0]
|
205
|
+
|
206
|
+
return wrapper
|
207
|
+
|
208
|
+
return decorator
|
209
|
+
|
210
|
+
|
211
|
+
pytest.skip_on_timeout = skip_on_timeout
|
tests/test_consensus.py
CHANGED
@@ -5,10 +5,13 @@ import os
|
|
5
5
|
import pandas as pd
|
6
6
|
import pytest
|
7
7
|
from napistu import consensus
|
8
|
+
from napistu import identifiers
|
8
9
|
from napistu import indices
|
9
10
|
from napistu import source
|
11
|
+
from napistu import sbml_dfs_core
|
10
12
|
from napistu.ingestion import sbml
|
11
13
|
from napistu.modify import pathwayannot
|
14
|
+
from napistu.constants import SBML_DFS, SBML_DFS_SCHEMA, SCHEMA_DEFS, IDENTIFIERS, BQB
|
12
15
|
|
13
16
|
test_path = os.path.abspath(os.path.join(__file__, os.pardir))
|
14
17
|
test_data = os.path.join(test_path, "test_data")
|
@@ -19,11 +22,16 @@ def test_reduce_to_consensus_ids():
|
|
19
22
|
|
20
23
|
# test aggregating by IDs, by moving from compartmentalized_species -> species
|
21
24
|
|
22
|
-
sbml_model = sbml.SBML(sbml_path)
|
23
|
-
comp_species_df =
|
24
|
-
comp_species_df.index.names = [
|
25
|
+
sbml_model = sbml.SBML(sbml_path)
|
26
|
+
comp_species_df = sbml_model._define_cspecies()
|
27
|
+
comp_species_df.index.names = [SBML_DFS.S_ID]
|
25
28
|
consensus_species, species_lookup = consensus.reduce_to_consensus_ids(
|
26
|
-
comp_species_df,
|
29
|
+
comp_species_df,
|
30
|
+
{
|
31
|
+
SCHEMA_DEFS.PK: SBML_DFS.S_ID,
|
32
|
+
SCHEMA_DEFS.ID: SBML_DFS.S_IDENTIFIERS,
|
33
|
+
SCHEMA_DEFS.TABLE: SBML_DFS.SPECIES,
|
34
|
+
},
|
27
35
|
)
|
28
36
|
|
29
37
|
assert isinstance(consensus_species, pd.DataFrame)
|
@@ -246,6 +254,153 @@ def test_consensus_ontology_check():
|
|
246
254
|
assert post_shared_onto_sp_set == {"chebi", "reactome", "uniprot"}
|
247
255
|
|
248
256
|
|
257
|
+
def test_report_consensus_merges_reactions(tmp_path):
|
258
|
+
# Create two minimal SBML_dfs objects with a single reaction each, same r_id
|
259
|
+
r_id = "R00000001"
|
260
|
+
reactions = pd.DataFrame(
|
261
|
+
{
|
262
|
+
SBML_DFS.R_NAME: ["rxn1"],
|
263
|
+
SBML_DFS.R_IDENTIFIERS: [None],
|
264
|
+
SBML_DFS.R_SOURCE: [None],
|
265
|
+
SBML_DFS.R_ISREVERSIBLE: [False],
|
266
|
+
},
|
267
|
+
index=[r_id],
|
268
|
+
)
|
269
|
+
reactions.index.name = SBML_DFS.R_ID
|
270
|
+
reaction_species = pd.DataFrame(
|
271
|
+
{
|
272
|
+
SBML_DFS.R_ID: [r_id],
|
273
|
+
SBML_DFS.SC_ID: ["SC0001"],
|
274
|
+
SBML_DFS.STOICHIOMETRY: [1],
|
275
|
+
SBML_DFS.SBO_TERM: ["SBO:0000459"],
|
276
|
+
},
|
277
|
+
index=["RSC0001"],
|
278
|
+
)
|
279
|
+
reaction_species.index.name = SBML_DFS.RSC_ID
|
280
|
+
compartmentalized_species = pd.DataFrame(
|
281
|
+
{
|
282
|
+
SBML_DFS.SC_NAME: ["A [cytosol]"],
|
283
|
+
SBML_DFS.S_ID: ["S0001"],
|
284
|
+
SBML_DFS.C_ID: ["C0001"],
|
285
|
+
SBML_DFS.SC_SOURCE: [None],
|
286
|
+
},
|
287
|
+
index=["SC0001"],
|
288
|
+
)
|
289
|
+
compartmentalized_species.index.name = SBML_DFS.SC_ID
|
290
|
+
species = pd.DataFrame(
|
291
|
+
{
|
292
|
+
SBML_DFS.S_NAME: ["A"],
|
293
|
+
SBML_DFS.S_IDENTIFIERS: [None],
|
294
|
+
SBML_DFS.S_SOURCE: [None],
|
295
|
+
},
|
296
|
+
index=["S0001"],
|
297
|
+
)
|
298
|
+
species.index.name = SBML_DFS.S_ID
|
299
|
+
compartments = pd.DataFrame(
|
300
|
+
{
|
301
|
+
SBML_DFS.C_NAME: ["cytosol"],
|
302
|
+
SBML_DFS.C_IDENTIFIERS: [None],
|
303
|
+
SBML_DFS.C_SOURCE: [None],
|
304
|
+
},
|
305
|
+
index=["C0001"],
|
306
|
+
)
|
307
|
+
compartments.index.name = SBML_DFS.C_ID
|
308
|
+
sbml_dict = {
|
309
|
+
SBML_DFS.COMPARTMENTS: compartments,
|
310
|
+
SBML_DFS.SPECIES: species,
|
311
|
+
SBML_DFS.COMPARTMENTALIZED_SPECIES: compartmentalized_species,
|
312
|
+
SBML_DFS.REACTIONS: reactions,
|
313
|
+
SBML_DFS.REACTION_SPECIES: reaction_species,
|
314
|
+
}
|
315
|
+
sbml1 = sbml_dfs_core.SBML_dfs(sbml_dict, validate=False, resolve=False)
|
316
|
+
sbml2 = sbml_dfs_core.SBML_dfs(sbml_dict, validate=False, resolve=False)
|
317
|
+
sbml_dfs_dict = {"mod1": sbml1, "mod2": sbml2}
|
318
|
+
|
319
|
+
# Create a lookup_table that merges both reactions into a new_id
|
320
|
+
lookup_table = pd.DataFrame(
|
321
|
+
{
|
322
|
+
"model": ["mod1", "mod2"],
|
323
|
+
"r_id": [r_id, r_id],
|
324
|
+
"new_id": ["merged_rid", "merged_rid"],
|
325
|
+
}
|
326
|
+
)
|
327
|
+
# Use the reactions schema
|
328
|
+
table_schema = SBML_DFS_SCHEMA.SCHEMA[SBML_DFS.REACTIONS]
|
329
|
+
|
330
|
+
# Call the function and check that it runs and the merge_labels are as expected
|
331
|
+
consensus.report_consensus_merges(
|
332
|
+
lookup_table.set_index(["model", "r_id"])[
|
333
|
+
"new_id"
|
334
|
+
], # this is a Series with name 'new_id'
|
335
|
+
table_schema,
|
336
|
+
sbml_dfs_dict=sbml_dfs_dict,
|
337
|
+
n_example_merges=1,
|
338
|
+
)
|
339
|
+
# No assertion: this is a smoke test to ensure the Series output is handled without error
|
340
|
+
|
341
|
+
|
342
|
+
def test_build_consensus_identifiers_handles_merges_and_missing_ids():
|
343
|
+
|
344
|
+
# Three entities:
|
345
|
+
# - 'A' with identifier X
|
346
|
+
# - 'B' with no identifiers
|
347
|
+
# - 'C' with identifier X (should merge with 'A')
|
348
|
+
df = pd.DataFrame(
|
349
|
+
{
|
350
|
+
"s_id": ["A", "B", "C"],
|
351
|
+
"s_Identifiers": [
|
352
|
+
identifiers.Identifiers(
|
353
|
+
[
|
354
|
+
{
|
355
|
+
IDENTIFIERS.ONTOLOGY: "test",
|
356
|
+
IDENTIFIERS.IDENTIFIER: "X",
|
357
|
+
IDENTIFIERS.BQB: BQB.IS,
|
358
|
+
}
|
359
|
+
]
|
360
|
+
),
|
361
|
+
identifiers.Identifiers([]),
|
362
|
+
identifiers.Identifiers(
|
363
|
+
[
|
364
|
+
{
|
365
|
+
IDENTIFIERS.ONTOLOGY: "test",
|
366
|
+
IDENTIFIERS.IDENTIFIER: "X",
|
367
|
+
IDENTIFIERS.BQB: BQB.IS,
|
368
|
+
}
|
369
|
+
]
|
370
|
+
),
|
371
|
+
],
|
372
|
+
}
|
373
|
+
).set_index("s_id")
|
374
|
+
|
375
|
+
schema = SBML_DFS_SCHEMA.SCHEMA[SBML_DFS.SPECIES]
|
376
|
+
|
377
|
+
indexed_cluster, cluster_consensus_identifiers = (
|
378
|
+
consensus.build_consensus_identifiers(df, schema)
|
379
|
+
)
|
380
|
+
|
381
|
+
# All entities should be assigned to a cluster
|
382
|
+
assert set(indexed_cluster.index) == set(df.index)
|
383
|
+
assert not indexed_cluster.isnull().any()
|
384
|
+
# There should be a consensus identifier for each cluster
|
385
|
+
assert set(cluster_consensus_identifiers.index) == set(indexed_cluster.values)
|
386
|
+
|
387
|
+
# Entities 'A' and 'C' should be merged (same cluster)
|
388
|
+
assert indexed_cluster.loc["A"] == indexed_cluster.loc["C"]
|
389
|
+
# Entity 'B' should be in a different cluster
|
390
|
+
assert indexed_cluster.loc["B"] != indexed_cluster.loc["A"]
|
391
|
+
|
392
|
+
# The consensus identifier for the merged cluster should include identifier X
|
393
|
+
merged_cluster_id = indexed_cluster.loc["A"]
|
394
|
+
ids_obj = cluster_consensus_identifiers.loc[merged_cluster_id, schema["id"]]
|
395
|
+
assert any(i["identifier"] == "X" for i in getattr(ids_obj, "ids", []))
|
396
|
+
|
397
|
+
# The consensus identifier for the entity with no identifiers should be empty
|
398
|
+
noid_cluster_id = indexed_cluster.loc["B"]
|
399
|
+
ids_obj_noid = cluster_consensus_identifiers.loc[noid_cluster_id, schema["id"]]
|
400
|
+
assert hasattr(ids_obj_noid, "ids")
|
401
|
+
assert len(getattr(ids_obj_noid, "ids", [])) == 0
|
402
|
+
|
403
|
+
|
249
404
|
################################################
|
250
405
|
# __main__
|
251
406
|
################################################
|
@@ -256,3 +411,5 @@ if __name__ == "__main__":
|
|
256
411
|
test_source_tracking()
|
257
412
|
test_passing_entity_data()
|
258
413
|
test_consensus_ontology_check()
|
414
|
+
test_report_consensus_merges_reactions()
|
415
|
+
test_build_consensus_identifiers_handles_merges_and_missing_ids()
|
tests/test_context_filtering.py
CHANGED
@@ -3,7 +3,7 @@ from __future__ import annotations
|
|
3
3
|
import copy
|
4
4
|
import pytest
|
5
5
|
import pandas as pd
|
6
|
-
from napistu import
|
6
|
+
from napistu import sbml_dfs_utils
|
7
7
|
from napistu.constants import SBML_DFS
|
8
8
|
from napistu.context.filtering import (
|
9
9
|
filter_species_by_attribute,
|
@@ -208,7 +208,7 @@ def test_filter_reactions_with_disconnected_cspecies(sbml_dfs):
|
|
208
208
|
first_reactions = list(sbml_dfs.reactions.index[:5])
|
209
209
|
|
210
210
|
# 2. Find defining species in these reactions
|
211
|
-
reaction_species =
|
211
|
+
reaction_species = sbml_dfs_utils.add_sbo_role(sbml_dfs.reaction_species)
|
212
212
|
defining_species = (
|
213
213
|
reaction_species[reaction_species[SBML_DFS.R_ID].isin(first_reactions)]
|
214
214
|
.query("sbo_role == 'DEFINING'")
|