rasar 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
rasar-0.1.0/LICENSE ADDED
@@ -0,0 +1,177 @@
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship made available under
36
+ the License, as indicated by a copyright notice that is included in
37
+ or attached to the work (an example is provided in the Appendix below).
38
+
39
+ "Derivative Works" shall mean any work, whether in Source or Object
40
+ form, that is based on (or derived from) the Work and for which the
41
+ editorial revisions, annotations, elaborations, or other modifications
42
+ represent, as a whole, an original work of authorship. For the purposes
43
+ of this License, Derivative Works shall not include works that remain
44
+ separable from, or merely link (or bind by name) to the interfaces of,
45
+ the Work and Derivative Works thereof.
46
+
47
+ "Contribution" shall mean, as submitted to the Licensor for inclusion
48
+ in the Work by the copyright owner or by an individual or Legal Entity
49
+ authorized to submit on behalf of the copyright owner. For the purposes
50
+ of this definition, "submitted" means any form of electronic, verbal,
51
+ or written communication sent to the Licensor or its representatives,
52
+ including but not limited to communication on electronic mailing lists,
53
+ source code control systems, and issue tracking systems that are managed
54
+ by, or on behalf of, the Licensor for the purpose of discussing and
55
+ improving the Work, but excluding communication that is conspicuously
56
+ marked or designated in writing by the copyright owner as
57
+ "Not a Contribution."
58
+
59
+ "Contributor" shall mean Licensor and any Legal Entity on behalf of
60
+ whom a Contribution has been received by the Licensor and incorporated
61
+ within the Work.
62
+
63
+ 2. Grant of Copyright License. Subject to the terms and conditions of
64
+ this License, each Contributor hereby grants to You a perpetual,
65
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
66
+ copyright license to reproduce, prepare Derivative Works of,
67
+ publicly display, publicly perform, sublicense, and distribute the
68
+ Work and such Derivative Works in Source or Object form.
69
+
70
+ 3. Grant of Patent License. Subject to the terms and conditions of
71
+ this License, each Contributor hereby grants to You a perpetual,
72
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
73
+ (except as stated in this section) patent license to make, have made,
74
+ use, offer to sell, sell, import, and otherwise transfer the Work,
75
+ where such license applies only to those patent claims licensable
76
+ by such Contributor that are necessarily infringed by their
77
+ Contribution(s) alone or by combination of their Contribution(s)
78
+ with the Work to which such Contribution(s) was submitted. If You
79
+ institute patent litigation against any entity (including a cross-claim
80
+ or counterclaim in a lawsuit) alleging that the Work or any Contribution
81
+ constitutes direct or contributory patent infringement, then any patent
82
+ licenses granted to You under this License for that Work shall terminate
83
+ as of the date such litigation is filed.
84
+
85
+ 4. Redistribution. You may reproduce and distribute copies of the
86
+ Work or Derivative Works thereof in any medium, with or without
87
+ modifications, and in Source or Object form, provided that You
88
+ meet the following conditions:
89
+
90
+ (a) You must give any other recipients of the Work or Derivative
91
+ Works a copy of this License; and
92
+
93
+ (b) You must cause any modified files to carry prominent notices
94
+ stating that You changed the files; and
95
+
96
+ (c) You must retain, in the Source form of any Derivative Works
97
+ that You distribute, all copyright, patent, trademark, and
98
+ attribution notices from the Source form of the Work,
99
+ excluding those notices that do not pertain to any part of
100
+ the Derivative Works; and
101
+
102
+ (d) If the Work includes a "NOTICE" text file as part of its
103
+ distribution, You must include a readable copy of the
104
+ attribution notices contained within such NOTICE file, in
105
+ at least one of the following places: within a NOTICE text
106
+ file distributed as part of the Derivative Works; within
107
+ the Source form or documentation, if provided along with the
108
+ Derivative Works; or, within a display generated by the
109
+ Derivative Works, if and wherever such third-party notices
110
+ normally appear. The contents of the NOTICE file are for
111
+ informational purposes only and do not modify the License.
112
+ You may add Your own attribution notices within Derivative
113
+ Works that You distribute, alongside or as an addendum to
114
+ the NOTICE text from the Work, provided that such additional
115
+ attribution notices cannot be construed as modifying the License.
116
+
117
+ You may add Your own license statement for Your modifications and
118
+ may provide additional grant of rights to use, copy, modify, and
119
+ distribute Your modifications as part of a Derivative Work.
120
+
121
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
122
+ any Contribution intentionally submitted for inclusion in the Work
123
+ by You to the Licensor shall be under the terms and conditions of
124
+ this License, without any additional terms or conditions.
125
+ Notwithstanding the above, nothing herein shall supersede or modify
126
+ the terms of any separate license agreement you may have executed
127
+ with Licensor regarding such Contributions.
128
+
129
+ 6. Trademarks. This License does not grant permission to use the trade
130
+ names, trademarks, service marks, or product names of the Licensor,
131
+ except as required for reasonable and customary use in describing the
132
+ origin of the Work and reproducing the content of the NOTICE file.
133
+
134
+ 7. Disclaimer of Warranty. Unless required by applicable law or
135
+ agreed to in writing, Licensor provides the Work (and each
136
+ Contributor provides its Contributions) on an "AS IS" BASIS,
137
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
138
+ implied, including, without limitation, any warranties or conditions
139
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
140
+ PARTICULAR PURPOSE. You are solely responsible for determining the
141
+ appropriateness of using or redistributing the Work and assume any
142
+ risks associated with Your exercise of permissions under this License.
143
+
144
+ 8. Limitation of Liability. In no event and under no legal theory,
145
+ whether in tort (including negligence), contract, or otherwise,
146
+ unless required by applicable law (such as deliberate and grossly
147
+ negligent acts) or agreed to in writing, shall any Contributor be
148
+ liable to You for damages, including any direct, indirect, special,
149
+ incidental, or exemplary damages of any character arising as a
150
+ result of this License or out of the use or inability to use the
151
+ Work (including but not limited to damages for loss of goodwill,
152
+ work stoppage, computer failure or malfunction, or all other
153
+ commercial damages or losses), even if such Contributor has been
154
+ advised of the possibility of such damages.
155
+
156
+ 9. Accepting Warranty or Additional Liability. While redistributing
157
+ the Work or Derivative Works thereof, You may choose to offer,
158
+ and charge a fee for, acceptance of support, warranty, indemnity,
159
+ or other liability obligations and/or rights consistent with this
160
+ License. However, in accepting such obligations, You may offer only
161
+ conditions consistent with this License.
162
+
163
+ END OF TERMS AND CONDITIONS
164
+
165
+ Copyright 2026 rasar
166
+
167
+ Licensed under the Apache License, Version 2.0 (the "License");
168
+ you may not use this file except in compliance with the License.
169
+ You may obtain a copy of the License at
170
+
171
+ http://www.apache.org/licenses/LICENSE-2.0
172
+
173
+ Unless required by applicable law or agreed to in writing, software
174
+ distributed under the License is distributed on an "AS IS" BASIS,
175
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
176
+ See the License for the specific language governing permissions and
177
+ limitations under the License.
rasar-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,56 @@
1
+ Metadata-Version: 2.4
2
+ Name: rasar
3
+ Version: 0.1.0
4
+ Summary: The components of this module can be used for read-across related calculations.
5
+ License: Apache License 2.0
6
+ Requires-Python: >=3.10.4
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE
9
+ Requires-Dist: pandas==2.3.3
10
+ Requires-Dist: numpy==2.2.6
11
+ Requires-Dist: matplotlib==3.10.7
12
+ Requires-Dist: scikit-learn==1.7.2
13
+ Dynamic: license-file
14
+
15
+ #rasar
16
+ The components of this module can be used for read-across related calculations. It is a crucial module for cheminformatics applications.
17
+
18
+
19
+ ## Installation
20
+ pip install rasar
21
+
22
+
23
+ ## Usage
24
+ This module supports five different read-across tasks, including pairwise similarity calculation, read-across prediction, read-across optimization, read-across feature importance calculation, and rasar descriptor calculation.
25
+
26
+
27
+ ## Getting started
28
+ import pandas as pd
29
+ from rasar import ra_similarity, ra_pred, ra_optimization, ra_importance, calculate_descriptor
30
+ tr = pd.read_excel('train.xlsx', index_col=0)
31
+ te = pd.read_excel('test.xlsx', index_col=0)
32
+ xtr = tr.iloc[:,:-1]
33
+ ytr = tr.iloc[:,-1]
34
+ xte = te.iloc[:,:-1]
35
+ yte = te.iloc[:,-1]
36
+ sim = ra_similarity(des_tr=xtr, des_te=xte)
37
+ sim1 = sim.similarity_calculation(method='Euclidean Distance')
38
+ pred = ra_pred(df1=tr, df2=te).weighted_prediction(method='Laplacian Kernel', ctc=6, gamma=0.5)
39
+ opt = ra_optimization(method='Laplacian Kernel', data=tr, parameters={'CTC': [1, 3, 6, 10],
40
+ 'Gamma': [0.1, 0.5, 1],
41
+ 'Threshold': [0.0]},
42
+ objective_function="MAE",
43
+ cv_fold=5)
44
+ imp = ra_importance(df1=tr).imp_calculation(method='Laplacian Kernel', ctc=6, gamma=0.5, ths=2)
45
+ ra_importance(df1=tr).plot_importance(imp_df=imp, plot_type='coefficient', color="winter_r", index=1)
46
+ des_tr, des_te = calculate_descriptor(df1=tr, df2=te, method='Laplacian Kernel', ctc=6, gamma=0.5, merge = True)
47
+
48
+ ##Cite
49
+ To use this module, users need to cite the following paper:
50
+
51
+ Pore, S. and Roy, K., 2025. “intelligent Read Across (iRA)”-A tool for read-across-based toxicity prediction of nanoparticles. Computational and Structural Biotechnology Journal. https://doi.org/10.1016/j.csbj.2025.07.032
52
+
53
+
54
+ ##LICENSE
55
+ Apache License 2.0
56
+
rasar-0.1.0/README.md ADDED
@@ -0,0 +1,42 @@
1
+ #rasar
2
+ The components of this module can be used for read-across related calculations. It is a crucial module for cheminformatics applications.
3
+
4
+
5
+ ## Installation
6
+ pip install rasar
7
+
8
+
9
+ ## Usage
10
+ This module supports five different read-across tasks, including pairwise similarity calculation, read-across prediction, read-across optimization, read-across feature importance calculation, and rasar descriptor calculation.
11
+
12
+
13
+ ## Getting started
14
+ import pandas as pd
15
+ from rasar import ra_similarity, ra_pred, ra_optimization, ra_importance, calculate_descriptor
16
+ tr = pd.read_excel('train.xlsx', index_col=0)
17
+ te = pd.read_excel('test.xlsx', index_col=0)
18
+ xtr = tr.iloc[:,:-1]
19
+ ytr = tr.iloc[:,-1]
20
+ xte = te.iloc[:,:-1]
21
+ yte = te.iloc[:,-1]
22
+ sim = ra_similarity(des_tr=xtr, des_te=xte)
23
+ sim1 = sim.similarity_calculation(method='Euclidean Distance')
24
+ pred = ra_pred(df1=tr, df2=te).weighted_prediction(method='Laplacian Kernel', ctc=6, gamma=0.5)
25
+ opt = ra_optimization(method='Laplacian Kernel', data=tr, parameters={'CTC': [1, 3, 6, 10],
26
+ 'Gamma': [0.1, 0.5, 1],
27
+ 'Threshold': [0.0]},
28
+ objective_function="MAE",
29
+ cv_fold=5)
30
+ imp = ra_importance(df1=tr).imp_calculation(method='Laplacian Kernel', ctc=6, gamma=0.5, ths=2)
31
+ ra_importance(df1=tr).plot_importance(imp_df=imp, plot_type='coefficient', color="winter_r", index=1)
32
+ des_tr, des_te = calculate_descriptor(df1=tr, df2=te, method='Laplacian Kernel', ctc=6, gamma=0.5, merge = True)
33
+
34
+ ##Cite
35
+ To use this module, users need to cite the following paper:
36
+
37
+ Pore, S. and Roy, K., 2025. “intelligent Read Across (iRA)”-A tool for read-across-based toxicity prediction of nanoparticles. Computational and Structural Biotechnology Journal. https://doi.org/10.1016/j.csbj.2025.07.032
38
+
39
+
40
+ ##LICENSE
41
+ Apache License 2.0
42
+
@@ -0,0 +1,18 @@
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "rasar"
7
+ version = "0.1.0"
8
+ description = "The components of this module can be used for read-across related calculations."
9
+ readme = "README.md"
10
+ requires-python = ">=3.10.4"
11
+ license = { text = "Apache License 2.0" }
12
+
13
+ dependencies = [
14
+ "pandas==2.3.3",
15
+ "numpy==2.2.6",
16
+ "matplotlib==3.10.7",
17
+ "scikit-learn==1.7.2",
18
+ ]
@@ -0,0 +1,5 @@
1
+ from .similarity import ra_similarity
2
+ from .prediction import ra_pred
3
+ from .optimization import ra_optimization
4
+ from .feature_importance import ra_importance
5
+ from .rasar_descriptors import calculate_descriptor
@@ -0,0 +1,140 @@
1
+ import pandas as pd
2
+ from rasar.prediction import ra_pred
3
+ from itertools import chain, combinations
4
+ import matplotlib.pyplot as plt
5
+ import numpy as np
6
+
7
+
8
+ #factorial calculator
9
+ def factorial(n):
10
+ result = 1
11
+ for i in range(1, n + 1):
12
+ result *= i
13
+ return result
14
+
15
+ #combination generator
16
+ def generate_combinations(lst, lnt=8):
17
+ min_length = len(lst)-lnt
18
+ all_combinations = chain.from_iterable(combinations(lst, r) for r in range(len(lst), min_length-1,-1))
19
+ return [list(combo) for combo in all_combinations]
20
+
21
+ def summary_plot_cal(df1:pd.DataFrame, df2:pd.DataFrame):
22
+ """
23
+ df1: Importance file \n
24
+ df2: Non-standardize input file (xtr) \n
25
+ both files should not contain any response column"""
26
+ std_df2 = (df2-df2.min())/(df2.max()-df2.min())
27
+ abs_df1 = df1.abs()
28
+ abs_df1_mean = abs_df1.mean()
29
+ imp_fea = abs_df1_mean.sort_values(kind="mergesort")
30
+ imp_fea1 = imp_fea.index.values.tolist()
31
+ ndf1 = df1[imp_fea1].copy()
32
+ ndf2 = std_df2[imp_fea1].copy()
33
+ return ndf1, ndf2, imp_fea1
34
+
35
+
36
+
37
+ class ra_importance:
38
+
39
+ def __init__(self, df1: pd.DataFrame): #df1 is the training set
40
+ self.df1 = df1
41
+ self.x_tr = df1.iloc[:,:-1]
42
+ self.y_tr = df1.iloc[:,-1]
43
+ self.ndes = len(self.x_tr.columns)
44
+ self.ncomp = len(self.x_tr)
45
+
46
+
47
+ def imp_calculation(self, method:str, ctc, ths, sigma=1.0, gamma=1.0, similarity_cutoff=0.0, p_mk=3, degree=3.0, coef0=1):
48
+ if ths == None:
49
+ ths = int(len(self.x_tr.columns.tolist())/4)
50
+ else:
51
+ ths = ths
52
+ fea_list = self.x_tr.columns.tolist()
53
+ n_fact = factorial(n=len(fea_list))
54
+ fea_comb = generate_combinations(fea_list, lnt=ths)
55
+ print(fea_comb)
56
+ importance = []
57
+ for i in fea_list:
58
+ comb1 = [list(co) for co in fea_comb if str(i) in co]
59
+ comb1_comp = [[item for item in sublist if item != str(i)] for sublist in comb1]
60
+ cal_list=[]
61
+ print(comb1)
62
+ print(comb1_comp)
63
+ for j,k in zip(comb1, comb1_comp):
64
+ nxtr1 = self.x_tr[j].copy()
65
+ nxtr1_comp = self.x_tr[k].copy()
66
+ ndf1 = pd.concat([nxtr1, self.y_tr], axis=1)
67
+ ndf1_comp = pd.concat([nxtr1_comp, self.y_tr], axis=1)
68
+ pred1 = ra_pred(df1=ndf1, df2=ndf1).weighted_prediction(method=method, ctc=ctc, sigma=sigma,
69
+ gamma=gamma, similarity_cutoff=similarity_cutoff,
70
+ p_mk=p_mk, degree=degree, coef0=coef0)
71
+ pred2 = ra_pred(df1=ndf1_comp, df2=ndf1_comp).weighted_prediction(method=method, ctc=ctc, sigma=sigma,
72
+ gamma=gamma, similarity_cutoff=similarity_cutoff,
73
+ p_mk=p_mk, degree=degree, coef0=coef0)
74
+ pred1.fillna(0, inplace=True)
75
+ pred2.fillna(0, inplace=True)
76
+ delta_pred = ((factorial(n=len(k))*(factorial(n=len(fea_list)-len(k)-1)))/n_fact)*(pred1-pred2)
77
+ cal_list.append(delta_pred)
78
+ cal_df = pd.DataFrame(cal_list)
79
+ marginal_contribution = (cal_df.T).sum(axis=1)
80
+ importance.append(marginal_contribution)
81
+ imp_df = pd.DataFrame(importance).T
82
+ imp_df.columns=self.x_tr.columns.to_list()
83
+ imp_df.index=self.x_tr.index.values
84
+ return imp_df
85
+
86
+ def plot_importance(self, imp_df, plot_type:str, color:str, index=0):
87
+ if plot_type == "bar":
88
+ plt.rcParams['font.family'] = 'Times New Roman'
89
+ plt.rcParams['font.size'] = 17
90
+ plt.figure(figsize=(12,10))
91
+ abs_df1 = imp_df.abs()
92
+ abs_df1_mean = abs_df1.mean()
93
+ imp_fea = abs_df1_mean.sort_values(ascending=False, kind="mergesort")
94
+ plt.bar(imp_fea.index.values.tolist(), imp_fea.values.tolist(), color= "#4169E1", edgecolor="black")
95
+ plt.title("Read Across Feature Importance", fontsize=23, fontweight='bold')
96
+ plt.xlabel("Features", fontsize=17, fontweight='bold')
97
+ plt.ylabel("Feature Importance", fontsize=17, fontweight='bold')
98
+ plt.show()
99
+
100
+ if plot_type == "summary":
101
+ plt.rcParams['font.family'] = 'Times New Roman'
102
+ plt.rcParams['font.size'] = 17
103
+ plt.figure(figsize=(12,10))
104
+ ndf1, ndf2, imp_fea1 = summary_plot_cal(df1=imp_df, df2=self.x_tr)
105
+ num_points = len(ndf1)
106
+ jitter_base = np.linspace(-0.2, 0.2, num_points)
107
+ for i, feature_idx in enumerate(imp_fea1):
108
+ print(feature_idx)
109
+ y_values = i + np.sort(jitter_base)
110
+ plt.scatter(
111
+ ndf1[[feature_idx]],
112
+ y_values,
113
+ c=ndf2[[feature_idx]].values,
114
+ cmap=color,
115
+ edgecolors="k",
116
+ linewidth=0.3,
117
+ s=60
118
+ )
119
+ plt.title("Read Across Feature Importance", fontsize=23, fontweight='bold')
120
+ plt.yticks(range(len(imp_fea1)), imp_fea1)
121
+ plt.axvline(x=0, color="black", linestyle="-", linewidth=1)
122
+ plt.xlabel("Feature Importance", fontsize=17, fontweight='bold')
123
+ plt.ylabel("Features", fontsize=17, fontweight='bold')
124
+ plt.show()
125
+ if plot_type == "coefficient":
126
+ plt.rcParams['font.family'] = 'Times New Roman'
127
+ plt.rcParams['font.size'] = 17
128
+ plt.figure(figsize=(12,10))
129
+ coefficients = imp_df.iloc[index]
130
+ coefficient = coefficients.tolist()
131
+ coef_colors = ["blue" if coef > 0 else "red" for coef in coefficients]
132
+ plt.barh(coefficients.index.tolist(), coefficient, color=coef_colors, edgecolor="black", alpha=0.7)
133
+ plt.axvline(x=0, color="black", linestyle="-", linewidth=1)
134
+ plt.title(f"Read Across Feature Importance for Observation {index}", fontsize=23, fontweight='bold')
135
+ plt.xlabel("Feature Contribution", fontsize=17, fontweight='bold')
136
+ plt.ylabel("Features", fontsize=17, fontweight='bold')
137
+ plt.show()
138
+
139
+
140
+
@@ -0,0 +1,160 @@
1
+ import pandas as pd
2
+ import numpy as np
3
+ from rasar.prediction import ra_pred
4
+ from sklearn.model_selection import KFold
5
+ from itertools import product
6
+ from sklearn.metrics import accuracy_score, f1_score, matthews_corrcoef, cohen_kappa_score
7
+ from sklearn.metrics import mean_absolute_error, mean_squared_error
8
+
9
+
10
+
11
+ def ra_optimization(method: str, data: pd.DataFrame, parameters: dict, objective_function: str, cv_fold: int):
12
+ """
13
+ data: should have the arrangements like ID-Descriptors-Response
14
+ parameters: Dictionary (having different descriptor combination)
15
+ """
16
+ folds = KFold(n_splits=cv_fold)
17
+
18
+ # Generate all combinations
19
+ keys = parameters.keys()
20
+ values = parameters.values()
21
+ combinations = [dict(zip(keys, v)) for v in product(*values)]
22
+
23
+ results = [] # To store results
24
+
25
+ for comb in combinations:
26
+ split_results = [] # To store values for each fold
27
+
28
+ for i, (train_index, test_index) in enumerate(folds.split(data), start=1):
29
+ train_data = data.iloc[train_index]
30
+ test_data = data.iloc[test_index]
31
+ yte = test_data.iloc[:, -1]
32
+
33
+ if method == "Gaussian Kernel":
34
+ sigma_value = comb["Sigma"]
35
+ ctc_value = comb["CTC"]
36
+ threshold_value = comb["Threshold"]
37
+ if ctc_value == "auto":
38
+ ctc_f = "auto"
39
+ else:
40
+ ctc_f = int(ctc_value)
41
+ ytep = ra_pred(df1=train_data, df2=test_data).weighted_prediction(method=method,
42
+ ctc=ctc_f,
43
+ similarity_cutoff=float(threshold_value),
44
+ sigma=float(sigma_value))
45
+
46
+
47
+ elif method == "Laplacian Kernel" or method == "Chi2_Kernel":
48
+ gamma_value = comb["Gamma"]
49
+ ctc_value = comb["CTC"]
50
+ threshold_value = comb["Threshold"]
51
+ if ctc_value == "auto":
52
+ ctc_f = "auto"
53
+ else:
54
+ ctc_f = int(ctc_value)
55
+ ytep = ra_pred(df1=train_data, df2=test_data).weighted_prediction(method=method,
56
+ ctc=ctc_f,
57
+ similarity_cutoff=float(threshold_value),
58
+ gamma=float(gamma_value))
59
+
60
+ elif method == "Minkowski Distance":
61
+ p_value = comb["P_Value"]
62
+ ctc_value=comb["CTC"]
63
+ threshold_value = comb["Threshold"]
64
+ if ctc_value == "auto":
65
+ ctc_f = "auto"
66
+ else:
67
+ ctc_f = int(ctc_value)
68
+ ytep = ra_pred(df1=train_data, df2=test_data).weighted_prediction(method=method,
69
+ ctc=ctc_f,
70
+ similarity_cutoff=float(threshold_value),
71
+ p_mk=int(p_value))
72
+
73
+ elif method == "Polynomial_Kernel":
74
+ degree_value = comb["Degree"]
75
+ gamma_value = comb["Gamma"]
76
+ coef0_value = comb["Coef0"]
77
+ ctc_value = comb["CTC"]
78
+ threshold_value = comb["Threshold"]
79
+ if ctc_value == "auto":
80
+ ctc_f = "auto"
81
+ else:
82
+ ctc_f = int(ctc_value)
83
+ ytep = ra_pred(df1=train_data, df2=test_data).weighted_prediction(method=method,
84
+ ctc=ctc_f,
85
+ similarity_cutoff=float(threshold_value),
86
+ degree=float(degree_value),
87
+ gamma=float(gamma_value),
88
+ coef0=float(coef0_value))
89
+
90
+ elif method == "Sigmoid_Kernel":
91
+ gamma_value = comb["Gamma"]
92
+ coef0_value = comb["Coef0"]
93
+ ctc_value = comb["CTC"]
94
+ threshold_value = comb["Threshold"]
95
+ if ctc_value == "auto":
96
+ ctc_f = "auto"
97
+ else:
98
+ ctc_f = int(ctc_value)
99
+ ytep = ra_pred(df1=train_data, df2=test_data).weighted_prediction(method=method,
100
+ ctc=ctc_f,
101
+ similarity_cutoff=float(threshold_value),
102
+ gamma=float(gamma_value),
103
+ coef0=float(coef0_value))
104
+
105
+ else:
106
+ ctc_value = comb["CTC"]
107
+ threshold_value = comb["Threshold"]
108
+ if ctc_value == "auto":
109
+ ctc_f = "auto"
110
+ else:
111
+ ctc_f = int(ctc_value)
112
+ ytep = ra_pred(df1=train_data, df2=test_data).weighted_prediction(method=method,
113
+ similarity_cutoff=float(threshold_value),
114
+ ctc=ctc_f)
115
+
116
+
117
+
118
+
119
+ # Compute the objective function
120
+ if ytep.isna().any().any():
121
+ cal_val = np.nan
122
+ else:
123
+ if objective_function == "MAE":
124
+ cal_val = mean_absolute_error(yte, ytep)
125
+ elif objective_function == "MSE":
126
+ cal_val = mean_squared_error(yte, ytep)
127
+ elif objective_function == "RMSE":
128
+ cal_val = np.sqrt(mean_squared_error(yte, ytep))
129
+ elif objective_function == "accuracy":
130
+ ytep1 = (ytep >= 0.5).astype(int)
131
+ cal_val = accuracy_score(yte, ytep1)
132
+ elif objective_function == "f1-score":
133
+ ytep1 = (ytep >= 0.5).astype(int)
134
+ cal_val = f1_score(yte, ytep1)
135
+ elif objective_function == "MCC":
136
+ ytep1 = (ytep >= 0.5).astype(int)
137
+ cal_val = matthews_corrcoef(yte, ytep1)
138
+ elif objective_function == "Cohen's Kappa":
139
+ ytep1 = (ytep >= 0.5).astype(int)
140
+ cal_val = cohen_kappa_score(yte, ytep1)
141
+ else:
142
+ raise ValueError("Invalid objective function")
143
+
144
+ split_results.append(cal_val)
145
+
146
+ comb_result = comb.copy()
147
+ for j, val in enumerate(split_results, start=1):
148
+ comb_result[f"split_{j}"] = val
149
+ comb_result["Mean_Result"] = np.nanmean(split_results)
150
+ print(f"Completed combination: {comb_result}")
151
+
152
+ results.append(comb_result)
153
+
154
+
155
+ return pd.DataFrame(results)
156
+
157
+
158
+
159
+
160
+
@@ -0,0 +1,303 @@
1
+ import numpy as np
2
+ import pandas as pd
3
+ from rasar.similarity import ra_similarity
4
+
5
+
6
+ #==================================Helper Functions==================================
7
+
8
+ def standerdization(df1, df2):
9
+ mea=df1.mean()
10
+ stdv=df1.std()
11
+ ndf1=(df1-mea)/stdv
12
+ ndf2=(df2-mea)/stdv
13
+ return ndf1, ndf2
14
+
15
+ def data_sort(frame, id):#id is the index of the new data frame
16
+ df_sorted = pd.DataFrame(frame.apply(lambda row: sorted(zip(frame.columns, row),
17
+ key=lambda x: x[1], reverse=True), axis=1).tolist(), index=id)
18
+ df_val = pd.DataFrame(frame.apply(lambda row: [x[1] for x in sorted(zip(frame.columns, row),
19
+ key=lambda x: x[1], reverse=True)],
20
+ axis=1).tolist(), index=id)
21
+ df_sorted_columns = pd.DataFrame(frame.apply(lambda row: [x[0] for x in sorted(zip(frame.columns, row),
22
+ key=lambda x: x[1], reverse=True)],
23
+ axis=1).tolist(), index=id)
24
+
25
+ return df_sorted, df_val, df_sorted_columns
26
+
27
+ mylist = ["Euclidean Distance",
28
+ "Gaussian Kernel",
29
+ "Laplacian Kernel",
30
+ "Linear Kernel",
31
+ "Polynomial_Kernel",
32
+ "Sigmoid_Kernel",
33
+ "Cosine Similarity",
34
+ "Manhattan Distance",
35
+ "Chebyshev Distance",
36
+ "Minkowski Distance",
37
+ "SEuclidean Distance",
38
+ "Mahalanobis Distance",
39
+ "Canberra Distance",
40
+ "BrayCurtis Distance"]
41
+
42
+ def mse(df1:pd.DataFrame, df2:pd.DataFrame, max_ctc=10, obj="MAE"):
43
+ df1 = df1.iloc[:,0:max_ctc]
44
+ df2 = df2.iloc[:,0:max_ctc]
45
+ result = pd.DataFrame(index=df1.index)
46
+
47
+ for i in range(len(df2.columns)):
48
+ column_sum = pd.Series(0, index=df1.index)
49
+
50
+ for j in range(len(df1.columns)):
51
+ if j <= i:
52
+ if obj =="MSE":
53
+ diff_squared = ((df2.iloc[:, i] - df1.iloc[:, j]) ** 2)
54
+ elif obj =="MAE":
55
+ diff_squared = abs(df2.iloc[:, i] - df1.iloc[:, j])
56
+ column_sum += diff_squared
57
+ column_sum /= (i + 1)
58
+ result[i] = column_sum
59
+ #result/=df2
60
+ return result
61
+
62
+ def min_loc(df1:pd.DataFrame, df2:pd.DataFrame): # df1 is the MSE matrix and df2 is the WAP matrix
63
+ min_indices = []
64
+ col2 = []
65
+ corresponding_values = []
66
+
67
+ for index, row in df1.iloc[:, 1:].iterrows():
68
+ if row.isna().all(): # Check if all values in the row are NaN
69
+ min_indices.append(None)
70
+ corresponding_values.append(np.nan) # Assign NaN instead of 0
71
+ col2.append(np.nan)
72
+ else:
73
+ col_name = row.idxmin()
74
+ corresponding_value = df2.loc[index, col_name]
75
+ min_indices.append(col_name)
76
+ corresponding_values.append(corresponding_value)
77
+ col2.append(col_name + 1)
78
+
79
+ pd_fr = pd.Series(corresponding_values, index=df1.index)
80
+ ctc = pd.DataFrame(col2, index=df1.index, columns=["CTC"])
81
+
82
+ return pd_fr, ctc
83
+
84
+ def auto_na(df1:pd.DataFrame, df2:pd.DataFrame, df3:pd.DataFrame, ctc_val:pd.DataFrame, ctc_max=10):
85
+ """
86
+ This function generate files for RA metrics calculation in the case of auto CTC \n
87
+ df1: whole similarity matrix \n
88
+ df2: whole weightage matrix \n
89
+ df3: whole response matrix \n
90
+ all the thress matrix have the sorted value along the row \n
91
+ ctc_val: compound specific CTC for the prediction \n
92
+ ctc_max: upper bound
93
+ """
94
+ ndf1 = df1.iloc[:,0:ctc_max]
95
+ ndf2 = df2.iloc[:,0:ctc_max]
96
+ ndf3 = df3.iloc[:,0:ctc_max]
97
+ for idx, col_idx in enumerate(ctc_val['CTC']):
98
+ ndf1.iloc[idx, col_idx:] = np.nan
99
+ ndf2 = ndf2.where(~ndf1.isna(), np.nan)
100
+ ndf3 = ndf3.where(~ndf1.isna(), np.nan)
101
+ return ndf1, ndf2, ndf3
102
+
103
+ def ra_met(wap:pd.DataFrame, sim_df:pd.DataFrame, weight:pd.DataFrame, res_df:pd.DataFrame, tr_mean:float):
104
+
105
+ """
106
+ This function return calculated metrics of read-across function \n
107
+ wap: weighted average predicted values (have only one column) \n
108
+ sim_df: similarity matrix used in the generation of prediction \n
109
+ weight: weights used in the generation of prediction (Individual Similarity/Total Similarity) \n
110
+ res_df: response matrix of close source compounds\n
111
+ tr_mean: training set response mean (Experimental Value) \n
112
+
113
+ Return -> dictionary of the calculated RA metrics \n
114
+
115
+ """
116
+
117
+
118
+ #n_effective
119
+ weitage2 = weight**2
120
+ n_eff = ((weight.sum(axis=1))**2)/(weitage2.sum(axis=1))
121
+
122
+ #sd_activity
123
+ tem_nu = weight*((res_df.sub(wap.iloc[:,0], axis=0))**2)
124
+ tem_nu_sum = tem_nu.sum(axis=1)
125
+ sda_part1 = (tem_nu_sum)/(weight.sum(axis=1))
126
+ sda_part2 = n_eff/(n_eff-1)
127
+ sd_activity = pd.Series(np.sqrt(sda_part1*sda_part2))
128
+
129
+ #cv_activity
130
+ cv_activity = sd_activity.div(wap.iloc[:,0])
131
+
132
+ #avg_similarity
133
+ avg_similarity = sim_df.mean(axis=1)
134
+
135
+ #sd_similarity
136
+ sd_similarity = sim_df.std(axis=1)
137
+
138
+ #cv_similarity
139
+ cv_similarity = sd_similarity/avg_similarity
140
+
141
+ #standard_error
142
+ stand_error = sd_activity.div(np.sqrt(n_eff))
143
+
144
+ #max_pos & maxpos_avgsim
145
+ mp_res = res_df.copy()
146
+ mp_res[mp_res<tr_mean] = np.nan
147
+ mp_sim = sim_df.where(~mp_res.isna(), np.nan)
148
+ max_pos = mp_sim.max(axis=1)
149
+ max_pos.fillna(0, inplace=True)
150
+ maxpos_avgsim = mp_sim.mean(axis=1)
151
+ maxpos_avgsim.fillna(0, inplace=True)
152
+
153
+ #max_neg & maxneg_avgsim
154
+ ne_res = res_df.copy()
155
+ ne_res[ne_res>=tr_mean] = np.nan
156
+ ne_sim = sim_df.where(~ne_res.isna(), np.nan)
157
+ max_neg = ne_sim.max(axis=1)
158
+ max_neg.fillna(0, inplace=True)
159
+ maxneg_avgsim = ne_sim.mean(axis=1)
160
+ maxneg_avgsim.fillna(0, inplace=True)
161
+
162
+ #abs_diff
163
+ abs_diff = np.abs(max_pos-max_neg)
164
+
165
+ #g and gm
166
+ pos_frac = mp_sim.count(axis=1)/res_df.count(axis=1)
167
+ pos_frac_diff = 2*np.abs(pos_frac-0.5)
168
+ g_val = 1-pos_frac_diff
169
+ gm_val = np.where(max_pos >= max_neg, pos_frac_diff, -pos_frac_diff)
170
+
171
+ #derived metrics
172
+ gm_avgsim = gm_val*avg_similarity
173
+ gm_sdsim = gm_val*sd_similarity
174
+
175
+ #sm1 and sm2
176
+ sm1 = (max_pos-max_neg)/np.maximum(max_pos, max_neg)
177
+ sm2 = (maxpos_avgsim -maxneg_avgsim)/avg_similarity
178
+
179
+
180
+ #next gen error met
181
+ #nem1
182
+ diff = (res_df.sub(wap.iloc[:,0], axis=0)).abs()
183
+ nem1 = diff.mean(axis=1)
184
+
185
+ #nem2
186
+ sq_diff = (res_df.sub(wap.iloc[:,0], axis=0))**2
187
+ nem2 = sq_diff.mean(axis=1)
188
+
189
+
190
+ met_dict = {
191
+ "n_effective": n_eff,
192
+ "SD_Activity": sd_activity,
193
+ "CV_Activity": cv_activity,
194
+ "Avg_similarity": avg_similarity,
195
+ "SD_similarity": sd_similarity,
196
+ "CV_similarity": cv_similarity,
197
+ "Standard_Error (SE)": stand_error,
198
+ "MaxPos": max_pos,
199
+ "PosAvgSim": maxpos_avgsim,
200
+ "MaxNeg": max_neg,
201
+ "NegAvgSim": maxneg_avgsim,
202
+ "AbsDiff": abs_diff,
203
+ "g": g_val,
204
+ "gm": gm_val,
205
+ "gm*AvgSim": gm_avgsim,
206
+ "gm*SD_Similarity": gm_sdsim,
207
+ "sm1": sm1,
208
+ "sm2": sm2,
209
+ "NEM1": nem1,
210
+ "NEM2": nem2
211
+ }
212
+ return met_dict
213
+
214
+
215
+
216
+ #================================RA Prediction Class==================================
217
+
218
+ class ra_pred:
219
+
220
+ def __init__(self, df1:pd.DataFrame, df2:pd.DataFrame):
221
+ '''
222
+ df1: training data frame with response variable in the last column \n
223
+ df2: test data frame with or without response variable (if response variable is present, it should be in the last column)
224
+ '''
225
+ self.df1 = df1
226
+ self.df2 = df2
227
+ self.x_tr = df1.iloc[:,:-1]
228
+ self.y_tr = df1.iloc[:,-1]
229
+ self.xtr_len = len(self.x_tr)
230
+ if len(df1.columns)==len(df2.columns):
231
+ self.x_te = df2.iloc[:,:-1]
232
+ self.y_te = df2.iloc[:,-1]
233
+ else:
234
+ self.x_te = df2
235
+
236
+ def weighted_prediction(self, method:str, ctc, sigma=1.0, gamma=1.0,
237
+ p_mk = 3, degree=3.0, coef0 = 1, similarity_cutoff=0.0,
238
+ max_ctc=10, log_outp=False):
239
+ '''
240
+ method: similarity/distance method to be used for calculation\n
241
+ ctc: CTC value to be used for prediction (can be "auto" or integer value)\n
242
+ sigma: sigma value for Gaussian Kernel\n
243
+ gamma: gamma value for Laplacian Kernel, Chi2_Kernel, Polynomial Kernel, Sigmoid Kernel\n
244
+ p_mk: p value for Minkowski Distance\n
245
+ degree: degree value for Polynomial Kernel\n
246
+ coef0: coef0 value for Polynomial Kernel, Sigmoid Kernel\n
247
+ similarity_cutoff: similarity cutoff value for the selection of close source compounds (applicable only when ctc is an integer)\n
248
+ max_ctc: upper bound for CTC value (applicable only when ctc is "auto")\n
249
+ log_outp: whether to return the detailed output for RA metrics calculation (True/False)\n
250
+ '''
251
+ if method in mylist:
252
+ des_tr, des_te = standerdization(df1=self.x_tr, df2=self.x_te)
253
+
254
+
255
+ else:
256
+ des_tr, des_te = self.x_tr, self.x_te
257
+ sim = ra_similarity(des_tr=des_tr, des_te=des_te)
258
+ sim1=sim.similarity_calculation(method=method, sigma=sigma, gamma=gamma,
259
+ p_mk=p_mk, degree=degree, coef0=coef0)
260
+ if np.array_equal(sim1.index.values, sim1.columns.values):
261
+ np.fill_diagonal(sim1.values, 0)
262
+ else:
263
+ pass
264
+ sim2=sim1.copy()
265
+ sim2.columns=self.y_tr.values.tolist()
266
+ df_sort, sort_val, sort_col = data_sort(sim1, id=self.df2.index)
267
+ df_sort_res, sort_val_res, sort_col_res = data_sort(sim2, id=self.df2.index)
268
+ similarity_sum = sort_val_res.sum(axis=1)
269
+ if ctc == "auto":
270
+ wei = (sort_val_res.T/similarity_sum).T
271
+ wei_res = wei*sort_col_res
272
+ wei_res_cum = wei_res.cumsum(axis=1)
273
+ wei_cum = wei.cumsum(axis=1)
274
+ wap = wei_res_cum/wei_cum
275
+ mse_cal = mse(sort_col_res, wap, max_ctc=max_ctc)
276
+ yte_pred, ctc_cal = min_loc(mse_cal, wap.iloc[:,0:max_ctc])
277
+ if log_outp == True:
278
+ ramet_similarity, ramet_weitage, ramet_response = auto_na(df1=sort_val_res, df2=wei,
279
+ df3=sort_col_res, ctc_val=ctc_cal,
280
+ ctc_max=max_ctc)
281
+ else:
282
+ pass
283
+ else:
284
+ sort_val_res1=sort_val_res.iloc[:,0:ctc]
285
+ sort_col_res1=sort_col_res.iloc[:,0:ctc]
286
+ sort_val_res1 = sort_val_res1.mask(sort_val_res1 < similarity_cutoff, np.nan)
287
+ single_value_rows = sort_val_res1.notna().sum(axis=1) == 1
288
+ sort_val_res1 = sort_val_res1.mask(single_value_rows, np.nan)
289
+ sort_col_res1 = sort_col_res1.where(~sort_val_res1.isna(), np.nan)
290
+ wei1 = (sort_val_res1.T/similarity_sum).T
291
+ wei_res1 = wei1*sort_col_res1
292
+ wap=wei_res1.sum(axis=1)/wei1.sum(axis=1)
293
+ yte_pred=wap
294
+ ramet_similarity, ramet_weitage, ramet_response = sort_val_res1, wei1, sort_col_res1
295
+ if log_outp == False:
296
+ return yte_pred
297
+ if log_outp == True:
298
+ ra_metrics = ra_met(wap=pd.DataFrame(yte_pred), sim_df=ramet_similarity,
299
+ weight=ramet_weitage, res_df=ramet_response,
300
+ tr_mean=float(self.y_tr.mean()))
301
+ ra_metrics_df = pd.DataFrame(ra_metrics, index=self.x_te.index.values)
302
+ return yte_pred, ra_metrics_df, sort_col_res, sort_col
303
+
@@ -0,0 +1,23 @@
1
+ import pandas as pd
2
+ from rasar.prediction import ra_pred
3
+ import numpy as np
4
+
5
+ def calculate_descriptor(df1:pd.DataFrame, df2:pd.DataFrame,
6
+ method:str, ctc, sigma=1.0,
7
+ gamma=1.0, similarity_cutoff=0.0,
8
+ p_mk=3, degree=3.0, coef0=1, log = True, data_fusion = True):
9
+
10
+ wap_pred_tr, ra_meas_tr, __, __ = ra_pred(df1=df1, df2=df1).weighted_prediction(method=method, ctc=ctc, sigma=sigma,
11
+ gamma=gamma, similarity_cutoff=similarity_cutoff,
12
+ p_mk=p_mk, degree=degree, coef0=coef0, log_outp=log)
13
+ wap_tr_df = pd.DataFrame(wap_pred_tr, columns=["RA_function"], index=df1.index.values)
14
+ tr_met = pd.concat([wap_tr_df, ra_meas_tr], axis=1)
15
+ wap_pred_te, ra_meas_te, __, __ = ra_pred(df1=df1, df2=df2).weighted_prediction(method=method, ctc=ctc, sigma=sigma,
16
+ gamma=gamma, similarity_cutoff=similarity_cutoff,
17
+ p_mk=p_mk, degree=degree, coef0=coef0, log_outp=log)
18
+ wap_te_df = pd.DataFrame(wap_pred_te, columns=["RA_function"], index=df2.index.values)
19
+ te_met = pd.concat([wap_te_df, ra_meas_te], axis=1)
20
+ if data_fusion == True:
21
+ tr_met = pd.concat([tr_met, df1.iloc[:,:-1]], axis=1)
22
+ te_met = pd.concat([te_met, df2.iloc[:,:-1]], axis=1)
23
+ return tr_met, te_met
@@ -0,0 +1,180 @@
1
+ import pandas as pd
2
+ import numpy as np
3
+ from sklearn.metrics import DistanceMetric
4
+ from sklearn.metrics.pairwise import additive_chi2_kernel, chi2_kernel
5
+ from sklearn.metrics.pairwise import linear_kernel, polynomial_kernel, sigmoid_kernel
6
+ from sklearn.metrics.pairwise import cosine_similarity, rbf_kernel, laplacian_kernel
7
+
8
+
9
+ def fragment_calculator(row1, row2):
10
+ val1 = np.sum(row2)
11
+ val2 = np.sum(row1)
12
+ val3 = np.sum(row1 & row2)
13
+ return val1, val2, val3
14
+
15
+
16
+ class ra_similarity:
17
+
18
+ def __init__(self, des_tr: pd.DataFrame, des_te: pd.DataFrame):
19
+ '''
20
+ Pairwise Similarity Calculation:\n
21
+ This class is used to calculate the pairwise similarity between\n training and test set compounds using various methods.\n
22
+ des_tr: training descriptors \n
23
+ des_te: testing descriptors
24
+ '''
25
+ self.des_tr = des_tr
26
+ self.des_te = des_te
27
+ self.tr_comp = len(des_tr)#m
28
+ self.te_comp = len(des_te)
29
+ self.n_fea = len(des_tr.columns)
30
+
31
+
32
+ def similarity_calculation(self, method:str, sigma=1.0, gamma=1.0, p_mk = 3, degree=3.0, coef0 = 1):
33
+ '''
34
+ method: similarity/distance method to be used for calculation\n
35
+ sigma: sigma value for Gaussian Kernel\n
36
+ gamma: gamma value for Laplacian Kernel, Chi2_Kernel, Polynomial Kernel, Sigmoid Kernel\n
37
+ p_mk: p value for Minkowski Distance\n
38
+ degree: degree value for Polynomial Kernel\n
39
+ coef0: coef0 value for Polynomial Kernel, Sigmoid Kernel\n
40
+ Supported Methods:\n
41
+ 1. Minkowski Distance\n
42
+ 2. Gaussian Kernel\n
43
+ 3. Laplacian Kernel\n
44
+ 4. Linear Kernel\n
45
+ 5. Polynomial Kernel\n
46
+ 6. Sigmoid Kernel\n
47
+ 7. SEuclidean Distance\n
48
+ 8. Euclidean Distance\n
49
+ 9. Mahalanobis Distance\n
50
+ 10. Hamming Distance\n
51
+ 11. Canberra Distance\n
52
+ 12. BrayCurtis Distance\n
53
+ 13. Matching Distance\n
54
+ 14. Kulsinski Distance\n
55
+ 15. RogersTanimoto Distance\n
56
+ 16. SokalMichener Distance\n
57
+ 17. SokalSneath Distance\n
58
+ 18. Cosine Similarity\n
59
+ 19. Tanimoto Coefficient\n
60
+ 20. Dice Coefficient\n
61
+ 21. Cosine Similarity (FP)\n
62
+ 22. Russell-rao Coefficient\n
63
+ 23. Forbes Coefficient\n
64
+ 24. Euclidean Distance (FP)\n
65
+ 25. Manhattan Distance (FP)\n
66
+ 26. Manhattan Distance\n
67
+ 27. Chebyshev Distance\n
68
+ 28. Chi2_Kernel\n
69
+ 29. Additive_Chi2\n
70
+ '''
71
+ similarity = []
72
+ if method == "Minkowski Distance":
73
+ dist = DistanceMetric.get_metric("minkowski", p=p_mk)
74
+ sim = dist.pairwise(self.des_te, self.des_tr)
75
+ similarity = 1/(1+sim)
76
+ elif method == "SEuclidean Distance":
77
+ variance = self.des_tr.var()
78
+ dist = DistanceMetric.get_metric("seuclidean", V=variance)
79
+ sim = dist.pairwise(self.des_te, self.des_tr)
80
+ similarity = 1/(1+sim)
81
+ elif method == "Euclidean Distance":
82
+ dist = DistanceMetric.get_metric("euclidean")
83
+ sim = dist.pairwise(self.des_te, self.des_tr)
84
+ similarity = 1/(1+sim)
85
+ elif method == "Mahalanobis Distance":
86
+ V1 = np.cov(self.des_tr, rowvar=False)
87
+ dist = DistanceMetric.get_metric("mahalanobis", V=V1)
88
+ sim = dist.pairwise(self.des_te, self.des_tr)
89
+ similarity = 1/(1+sim)
90
+ elif method == "Hamming Distance":
91
+ dist = DistanceMetric.get_metric("hamming")
92
+ sim = dist.pairwise(self.des_te, self.des_tr)
93
+ similarity = 1/(1+sim)
94
+ elif method == "Canberra Distance":
95
+ dist = DistanceMetric.get_metric("canberra")
96
+ sim = dist.pairwise(self.des_te, self.des_tr)
97
+ similarity = 1/(1+sim)
98
+ elif method == "BrayCurtis Distance":
99
+ dist = DistanceMetric.get_metric("braycurtis")
100
+ sim = dist.pairwise(self.des_te, self.des_tr)
101
+ similarity = 1/(1+sim)
102
+
103
+ elif method == "Matching Distance":
104
+ dist = DistanceMetric.get_metric("matching")
105
+ sim = dist.pairwise(self.des_te, self.des_tr)
106
+ similarity = 1-sim
107
+
108
+ elif method == "Kulsinski Distance":
109
+ dist = DistanceMetric.get_metric("kulsinski")
110
+ sim = dist.pairwise(self.des_te, self.des_tr)
111
+ similarity = 1-sim
112
+
113
+ elif method == "RogersTanimoto Distance":
114
+ dist = DistanceMetric.get_metric("rogerstanimoto")
115
+ sim = dist.pairwise(self.des_te, self.des_tr)
116
+ similarity = 1-sim
117
+
118
+ elif method == "SokalMichener Distance":
119
+ dist = DistanceMetric.get_metric("sokalmichener")
120
+ sim = dist.pairwise(self.des_te, self.des_tr)
121
+ similarity = 1-sim
122
+
123
+ elif method == "SokalSneath Distance":
124
+ dist = DistanceMetric.get_metric("sokalsneath")
125
+ sim = dist.pairwise(self.des_te, self.des_tr)
126
+ similarity = 1-sim
127
+ elif method == "Cosine Similarity":
128
+ similarity = cosine_similarity(self.des_te, self.des_tr)
129
+ elif method == "Linear Kernel":
130
+ similarity = linear_kernel(self.des_te, self.des_tr)
131
+ elif method == "Additive_Chi2":
132
+ similarity = additive_chi2_kernel(self.des_te, self.des_tr)
133
+ elif method == "Chi2_Kernel":
134
+ similarity = chi2_kernel(self.des_te, self.des_tr, gamma=gamma)
135
+ elif method == "Polynomial_Kernel":
136
+ similarity = polynomial_kernel(self.des_te, self.des_tr, gamma=gamma, degree=degree, coef0=coef0)
137
+ elif method == "Sigmoid_Kernel":
138
+ similarity = sigmoid_kernel(self.des_te, self.des_tr, gamma=gamma, coef0=coef0)
139
+ elif method == "Gaussian Kernel":
140
+ similarity = rbf_kernel(self.des_te, self.des_tr, gamma=1/(2*(sigma**2)))
141
+ elif method == "Laplacian Kernel":
142
+ similarity = laplacian_kernel(self.des_te, self.des_tr, gamma=gamma)
143
+ else:
144
+ for i in range(len(self.des_te)):
145
+ temp = []
146
+ for j in range(len(self.des_tr)):
147
+ if method == "Tanimoto Coefficient" or method == "Soergel Distance":
148
+ a,b,c = fragment_calculator(row1=self.des_te.values[i], row2 = self.des_tr.values[j])
149
+ sim = c/(a+b-c)
150
+ if method =="Dice Coefficient":
151
+ a,b,c = fragment_calculator(row1=self.des_te.values[i], row2 = self.des_tr.values[j])
152
+ sim = (2*c)/(a+b)
153
+ if method == "Cosine Similarity (FP)":
154
+ a,b,c = fragment_calculator(row1=self.des_te.values[i], row2 = self.des_tr.values[j])
155
+ sim = c/((a*b)**0.5)
156
+ if method == "Russell-rao Coefficient":
157
+ a,b,c = fragment_calculator(row1=self.des_te.values[i], row2 = self.des_tr.values[j])
158
+ sim = c/self.tr_comp
159
+ if method == "Forbes Coefficient":
160
+ a,b,c = fragment_calculator(row1=self.des_te.values[i], row2 = self.des_tr.values[j])
161
+ sim = (c*self.tr_comp)/(a*b)
162
+ if method == "Euclidean Distance (FP)":
163
+ a,b,c = fragment_calculator(row1=self.des_te.values[i], row2 = self.des_tr.values[j])
164
+ sim = (a+b-(2*c))**0.5
165
+ sim = 1/(1+sim)
166
+ if method == "Manhattan Distance (FP)":
167
+ a,b,c = fragment_calculator(row1=self.des_te.values[i], row2 = self.des_tr.values[j])
168
+ sim = (a+b-(2*c))
169
+ sim = 1/(1+sim)
170
+ if method == "Manhattan Distance":
171
+ sim = sum(np.abs(self.des_te.values[i] - self.des_tr.values[j]))
172
+ sim = 1/(1+sim)
173
+ if method == "Chebyshev Distance":
174
+ sim = max(np.abs(self.des_te.values[i] - self.des_tr.values[j]))
175
+ sim = 1/(1+sim)
176
+ temp.append(sim)
177
+ similarity.append(temp)
178
+
179
+ sim_df = pd.DataFrame(similarity, index=self.des_te.index.values, columns=self.des_tr.index.values.tolist())
180
+ return sim_df
@@ -0,0 +1,56 @@
1
+ Metadata-Version: 2.4
2
+ Name: rasar
3
+ Version: 0.1.0
4
+ Summary: The components of this module can be used for read-across related calculations.
5
+ License: Apache License 2.0
6
+ Requires-Python: >=3.10.4
7
+ Description-Content-Type: text/markdown
8
+ License-File: LICENSE
9
+ Requires-Dist: pandas==2.3.3
10
+ Requires-Dist: numpy==2.2.6
11
+ Requires-Dist: matplotlib==3.10.7
12
+ Requires-Dist: scikit-learn==1.7.2
13
+ Dynamic: license-file
14
+
15
+ #rasar
16
+ The components of this module can be used for read-across related calculations. It is a crucial module for cheminformatics applications.
17
+
18
+
19
+ ## Installation
20
+ pip install rasar
21
+
22
+
23
+ ## Usage
24
+ This module supports five different read-across tasks, including pairwise similarity calculation, read-across prediction, read-across optimization, read-across feature importance calculation, and rasar descriptor calculation.
25
+
26
+
27
+ ## Getting started
28
+ import pandas as pd
29
+ from rasar import ra_similarity, ra_pred, ra_optimization, ra_importance, calculate_descriptor
30
+ tr = pd.read_excel('train.xlsx', index_col=0)
31
+ te = pd.read_excel('test.xlsx', index_col=0)
32
+ xtr = tr.iloc[:,:-1]
33
+ ytr = tr.iloc[:,-1]
34
+ xte = te.iloc[:,:-1]
35
+ yte = te.iloc[:,-1]
36
+ sim = ra_similarity(des_tr=xtr, des_te=xte)
37
+ sim1 = sim.similarity_calculation(method='Euclidean Distance')
38
+ pred = ra_pred(df1=tr, df2=te).weighted_prediction(method='Laplacian Kernel', ctc=6, gamma=0.5)
39
+ opt = ra_optimization(method='Laplacian Kernel', data=tr, parameters={'CTC': [1, 3, 6, 10],
40
+ 'Gamma': [0.1, 0.5, 1],
41
+ 'Threshold': [0.0]},
42
+ objective_function="MAE",
43
+ cv_fold=5)
44
+ imp = ra_importance(df1=tr).imp_calculation(method='Laplacian Kernel', ctc=6, gamma=0.5, ths=2)
45
+ ra_importance(df1=tr).plot_importance(imp_df=imp, plot_type='coefficient', color="winter_r", index=1)
46
+ des_tr, des_te = calculate_descriptor(df1=tr, df2=te, method='Laplacian Kernel', ctc=6, gamma=0.5, merge = True)
47
+
48
+ ##Cite
49
+ To use this module, users need to cite the following paper:
50
+
51
+ Pore, S. and Roy, K., 2025. “intelligent Read Across (iRA)”-A tool for read-across-based toxicity prediction of nanoparticles. Computational and Structural Biotechnology Journal. https://doi.org/10.1016/j.csbj.2025.07.032
52
+
53
+
54
+ ##LICENSE
55
+ Apache License 2.0
56
+
@@ -0,0 +1,14 @@
1
+ LICENSE
2
+ README.md
3
+ pyproject.toml
4
+ rasar/__init__.py
5
+ rasar/feature_importance.py
6
+ rasar/optimization.py
7
+ rasar/prediction.py
8
+ rasar/rasar_descriptors.py
9
+ rasar/similarity.py
10
+ rasar.egg-info/PKG-INFO
11
+ rasar.egg-info/SOURCES.txt
12
+ rasar.egg-info/dependency_links.txt
13
+ rasar.egg-info/requires.txt
14
+ rasar.egg-info/top_level.txt
@@ -0,0 +1,4 @@
1
+ pandas==2.3.3
2
+ numpy==2.2.6
3
+ matplotlib==3.10.7
4
+ scikit-learn==1.7.2
@@ -0,0 +1 @@
1
+ rasar
rasar-0.1.0/setup.cfg ADDED
@@ -0,0 +1,4 @@
1
+ [egg_info]
2
+ tag_build =
3
+ tag_date = 0
4
+