ddi-fw 0.0.238__py3-none-any.whl → 0.0.239__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ddi_fw-0.0.238.dist-info → ddi_fw-0.0.239.dist-info}/METADATA +1 -1
- {ddi_fw-0.0.238.dist-info → ddi_fw-0.0.239.dist-info}/RECORD +4 -10
- ddi_fw/drugbank/__init__.py +0 -2
- ddi_fw/drugbank/drugbank.xsd +0 -904
- ddi_fw/drugbank/drugbank_parser.py +0 -157
- ddi_fw/drugbank/drugbank_processor.py +0 -355
- ddi_fw/drugbank/drugbank_processor_org.py +0 -272
- ddi_fw/drugbank/event_extractor.py +0 -127
- {ddi_fw-0.0.238.dist-info → ddi_fw-0.0.239.dist-info}/WHEEL +0 -0
- {ddi_fw-0.0.238.dist-info → ddi_fw-0.0.239.dist-info}/top_level.txt +0 -0
@@ -1,272 +0,0 @@
|
|
1
|
-
import pandas as pd
|
2
|
-
import os
|
3
|
-
import json
|
4
|
-
import glob
|
5
|
-
from tqdm import tqdm
|
6
|
-
|
7
|
-
import csv
|
8
|
-
|
9
|
-
from rdkit import Chem
|
10
|
-
from rdkit.Chem import AllChem
|
11
|
-
import numpy as np
|
12
|
-
from ddi_fw.drugbank.event_extractor import EventExtractor
|
13
|
-
|
14
|
-
from zip_helper import ZipHelper
|
15
|
-
# from event_extractor import EventExtractor
|
16
|
-
|
17
|
-
|
18
|
-
def multiline_to_singleline(multiline):
|
19
|
-
if multiline is None:
|
20
|
-
return ""
|
21
|
-
return " ".join(line.strip() for line in multiline.splitlines())
|
22
|
-
|
23
|
-
# targets -> target -> polypeptide
|
24
|
-
# enzymes -> enzyme -> polypeptide
|
25
|
-
# pathways from KEGG, KEGG ID is obtained from ddi_fw.drugbank
|
26
|
-
# https://www.genome.jp/dbget-bin/www_bget?drug:D03136
|
27
|
-
# https://www.kegg.jp/entry/D03136
|
28
|
-
|
29
|
-
|
30
|
-
class DrugBankProcessor():
|
31
|
-
|
32
|
-
def mask_interaction(self, drug_1, drug_2, interaction):
|
33
|
-
return interaction.replace(
|
34
|
-
drug_1, "DRUG").replace(drug_2, "DRUG")
|
35
|
-
|
36
|
-
def extract_zip_files(self, input_path='zips', output_path='drugs', override=False):
|
37
|
-
if override:
|
38
|
-
zip_helper = ZipHelper()
|
39
|
-
zip_helper.extract(input_path=input_path, output_path=output_path)
|
40
|
-
|
41
|
-
def process(self, input_path='drugs', output_path='output', zip_outputs=True):
|
42
|
-
if not os.path.exists(output_path):
|
43
|
-
os.makedirs(output_path)
|
44
|
-
|
45
|
-
drugs_pickle_path = output_path+'/drugs.pkl'
|
46
|
-
drugs_csv_path = output_path+'/drugs.gzip'
|
47
|
-
ddi_pickle_path = output_path + '/ddi.pkl'
|
48
|
-
ddi_csv_path = output_path + '/ddi.gzip'
|
49
|
-
|
50
|
-
if not os.path.exists(drugs_pickle_path) or not os.path.exists(ddi_pickle_path):
|
51
|
-
drug_rows = []
|
52
|
-
all_ddis = []
|
53
|
-
all_json_files = input_path+'/*.json*'
|
54
|
-
|
55
|
-
for filepath in tqdm(glob.glob(all_json_files)):
|
56
|
-
with open(filepath, 'r', encoding="utf8") as f:
|
57
|
-
|
58
|
-
data = json.load(f)
|
59
|
-
|
60
|
-
# if data['drug-interactions'] is None:
|
61
|
-
if False:
|
62
|
-
continue
|
63
|
-
else:
|
64
|
-
drug_1 = data['name']
|
65
|
-
drug_1_id = [d['value']
|
66
|
-
for d in data['drugbank-id'] if d['primary'] == True][0]
|
67
|
-
description = multiline_to_singleline(
|
68
|
-
data['description'])
|
69
|
-
if data['drug-interactions'] is not None:
|
70
|
-
drug_interactions = [
|
71
|
-
interaction for interaction in data['drug-interactions']['drug-interaction']]
|
72
|
-
ddis = [(drug_1, interaction['name'], interaction['description'])
|
73
|
-
for interaction in data['drug-interactions']['drug-interaction']]
|
74
|
-
|
75
|
-
ddi_dict = [{
|
76
|
-
'drug_1_id': drug_1_id,
|
77
|
-
'drug_1': drug_1,
|
78
|
-
'drug_2_id': interaction['drugbank-id']['value'],
|
79
|
-
'drug_2': interaction['name'],
|
80
|
-
'interaction': interaction['description'],
|
81
|
-
'masked_interaction': self.mask_interaction(drug_1, interaction['name'], interaction['description'])}
|
82
|
-
for interaction in data['drug-interactions']['drug-interaction']]
|
83
|
-
all_ddis.extend(ddi_dict)
|
84
|
-
|
85
|
-
synthesis_reference = data['synthesis-reference']
|
86
|
-
indication = multiline_to_singleline(
|
87
|
-
data['indication'])
|
88
|
-
pharmacodynamics = multiline_to_singleline(
|
89
|
-
data['pharmacodynamics'])
|
90
|
-
mechanism_of_action = multiline_to_singleline(
|
91
|
-
data['mechanism-of-action'])
|
92
|
-
toxicity = multiline_to_singleline(data['toxicity'])
|
93
|
-
metabolism = multiline_to_singleline(
|
94
|
-
data['metabolism'])
|
95
|
-
absorption = multiline_to_singleline(
|
96
|
-
data['absorption'])
|
97
|
-
half_life = multiline_to_singleline(data['half-life'])
|
98
|
-
protein_binding = multiline_to_singleline(
|
99
|
-
data['protein-binding'])
|
100
|
-
route_of_elimination = multiline_to_singleline(
|
101
|
-
data['route-of-elimination'])
|
102
|
-
volume_of_distribution = multiline_to_singleline(
|
103
|
-
data['volume-of-distribution'])
|
104
|
-
clearance = multiline_to_singleline(data['clearance'])
|
105
|
-
|
106
|
-
food_interactions = data['food-interactions']
|
107
|
-
sequences = data['sequences'] if "sequences" in data else None
|
108
|
-
|
109
|
-
external_identifiers = data['external-identifiers'] if "external-identifiers" in data else None
|
110
|
-
experimental_properties = data['experimental-properties'] if "experimental-properties" in data else None
|
111
|
-
calculated_properties = data['calculated-properties'] if "calculated-properties" in data else None
|
112
|
-
|
113
|
-
enzymes_polypeptides = None
|
114
|
-
targets_polypeptides = None
|
115
|
-
|
116
|
-
# targets = data['targets'] if "targets" in data else None
|
117
|
-
if data['targets'] is not None:
|
118
|
-
# targets_polypeptides = [p['id'] for d in data['targets']['target'] for p in d['polypeptide'] if 'polypeptide' in d ]
|
119
|
-
targets_polypeptides = [
|
120
|
-
p['id'] for d in data['targets']['target'] if 'polypeptide' in d for p in d['polypeptide']]
|
121
|
-
|
122
|
-
if data['enzymes'] is not None:
|
123
|
-
# enzymes_polypeptides = [p['id'] for d in data['enzymes']['enzyme'] for p in d['polypeptide'] if 'polypeptide' in d]
|
124
|
-
enzymes_polypeptides = [
|
125
|
-
p['id'] for d in data['enzymes']['enzyme'] if 'polypeptide' in d for p in d['polypeptide']]
|
126
|
-
|
127
|
-
if external_identifiers is not None:
|
128
|
-
external_identifiers_dict = dict(
|
129
|
-
[(p['resource'], p['identifier']) for p in external_identifiers['external-identifier']])
|
130
|
-
|
131
|
-
# add note column
|
132
|
-
smiles = None
|
133
|
-
morgan_hashed = None
|
134
|
-
if calculated_properties is not None:
|
135
|
-
calculated_properties_dict = dict(
|
136
|
-
[(p['kind'], p['value']) for p in calculated_properties['property']])
|
137
|
-
smiles = calculated_properties_dict['SMILES'] if 'SMILES' in calculated_properties_dict else None
|
138
|
-
if smiles is not None:
|
139
|
-
try:
|
140
|
-
mol = Chem.MolFromSmiles(smiles)
|
141
|
-
morgan_hashed = AllChem.GetMorganFingerprintAsBitVect(
|
142
|
-
mol, 2, nBits=881).ToList()
|
143
|
-
except:
|
144
|
-
print("An exception occurred")
|
145
|
-
if morgan_hashed is None:
|
146
|
-
morgan_hashed = np.zeros(881)
|
147
|
-
|
148
|
-
# k = [p[k] for p in calculated_properties['property'] for k in p.keys() if k =='SMILES']
|
149
|
-
# external_identifiers['external-identifier']
|
150
|
-
# experimental_properties['property']
|
151
|
-
|
152
|
-
row = {'drugbank_id': drug_1_id,
|
153
|
-
'name': drug_1,
|
154
|
-
'description': description,
|
155
|
-
'synthesis_reference': synthesis_reference,
|
156
|
-
'indication': indication,
|
157
|
-
'pharmacodynamics': pharmacodynamics,
|
158
|
-
'mechanism_of_action': mechanism_of_action,
|
159
|
-
'toxicity': toxicity,
|
160
|
-
'metabolism': metabolism,
|
161
|
-
'absorption': absorption,
|
162
|
-
'half_life': half_life,
|
163
|
-
'protein_binding': protein_binding,
|
164
|
-
'route_of_elimination': route_of_elimination,
|
165
|
-
'volume_of_distribution': volume_of_distribution,
|
166
|
-
'clearance': clearance,
|
167
|
-
'smiles': smiles,
|
168
|
-
'smiles_morgan_fingerprint': morgan_hashed,
|
169
|
-
'enzymes_polypeptides': enzymes_polypeptides,
|
170
|
-
'targets_polypeptides': targets_polypeptides,
|
171
|
-
'external_identifiers': external_identifiers_dict
|
172
|
-
}
|
173
|
-
drug_rows.append(row)
|
174
|
-
|
175
|
-
# if len(drug_rows) == 10:
|
176
|
-
# break
|
177
|
-
# print(smiles_count)
|
178
|
-
print(f"Size of drugs {len(drug_rows)}")
|
179
|
-
print(f"Size of DDIs {len(all_ddis)}")
|
180
|
-
np.set_printoptions(threshold=np.inf)
|
181
|
-
|
182
|
-
# drug_names = [row['name'] for row in drug_rows]
|
183
|
-
drug_names = ['DRUG']
|
184
|
-
event_extractor = EventExtractor(drug_names)
|
185
|
-
|
186
|
-
replace_dict = {'MYO-029': 'Stamulumab'}
|
187
|
-
for ddi in tqdm(all_ddis):
|
188
|
-
for key, value in replace_dict.items():
|
189
|
-
ddi['masked_interaction'] = ddi['masked_interaction'].replace(
|
190
|
-
key, value)
|
191
|
-
# interaction = ddi['interaction']
|
192
|
-
# mechanism, action, drugA, drugB = event_extractor.extract(interaction)
|
193
|
-
# ddi['mechanism'] = mechanism
|
194
|
-
# ddi['action'] = action
|
195
|
-
|
196
|
-
self.drugs_df = pd.DataFrame(drug_rows)
|
197
|
-
self.drugs_df.to_pickle(drugs_pickle_path)
|
198
|
-
self.drugs_df.to_csv(
|
199
|
-
drugs_csv_path, index=False, compression='gzip')
|
200
|
-
|
201
|
-
# print('mechanism_action calculation')
|
202
|
-
self.ddis_df = pd.DataFrame(all_ddis)
|
203
|
-
|
204
|
-
count = [0]
|
205
|
-
|
206
|
-
def fnc2(interaction, count):
|
207
|
-
count[0] = count[0] + 1
|
208
|
-
if count[0] % 1000 == 0:
|
209
|
-
print(f'{count[0]}/{len(all_ddis)}')
|
210
|
-
mechanism, action, drugA, drugB = event_extractor.extract(
|
211
|
-
interaction)
|
212
|
-
return mechanism+'__' + action
|
213
|
-
|
214
|
-
# self.ddis_df['mechanism_action'] = self.ddis_df['interaction'].apply(lambda x: fnc2(x))
|
215
|
-
# tqdm.pandas()
|
216
|
-
self.ddis_df['mechanism_action'] = self.ddis_df['masked_interaction'].apply(
|
217
|
-
fnc2, args=(count,))
|
218
|
-
|
219
|
-
self.ddis_df.to_csv(ddi_csv_path, index=False, compression='gzip')
|
220
|
-
self.ddis_df.to_pickle(ddi_pickle_path)
|
221
|
-
|
222
|
-
if zip_outputs:
|
223
|
-
zip_helper = ZipHelper()
|
224
|
-
zip_helper.zip_single_file(
|
225
|
-
file_path=drugs_pickle_path, output_path=output_path+'/zips', name='drugs-pickle')
|
226
|
-
zip_helper.zip_single_file(
|
227
|
-
file_path=ddi_pickle_path, output_path=output_path+'/zips', name='ddi-pickle')
|
228
|
-
|
229
|
-
else:
|
230
|
-
print('Output path has processed data, load function is called')
|
231
|
-
self.load(output_path)
|
232
|
-
|
233
|
-
def load(self, path):
|
234
|
-
drugs_pickle_path = path+'/drugs.pkl'
|
235
|
-
ddi_pickle_path = path+'/ddi.pkl'
|
236
|
-
if os.path.exists(drugs_pickle_path) and os.path.exists(ddi_pickle_path):
|
237
|
-
self.drugs_df = pd.read_pickle(drugs_pickle_path)
|
238
|
-
self.ddis_df = pd.read_pickle(ddi_pickle_path)
|
239
|
-
else:
|
240
|
-
print('One of given paths could not found')
|
241
|
-
|
242
|
-
def load_from_csv(self, path):
|
243
|
-
drugs_csv_path = path+'/drugs.gzip'
|
244
|
-
ddi_csv_path = path+'/ddi.gzip'
|
245
|
-
if os.path.exists(drugs_csv_path) and os.path.exists(ddi_csv_path):
|
246
|
-
self.drugs_df = pd.read_csv(drugs_csv_path, compression='gzip')
|
247
|
-
self.ddis_df = pd.read_csv(ddi_csv_path, compression='gzip')
|
248
|
-
else:
|
249
|
-
print('One of given paths could not found')
|
250
|
-
|
251
|
-
def load2(self, path):
|
252
|
-
drugs_pickle_path = path+'/drugs.pkl'
|
253
|
-
ddi_csv_path = path+'/ddi.gzip'
|
254
|
-
if os.path.exists(drugs_pickle_path) and os.path.exists(ddi_csv_path):
|
255
|
-
self.drugs_df = pd.read_pickle(drugs_pickle_path)
|
256
|
-
self.ddis_df = pd.read_csv(ddi_csv_path, compression='gzip')
|
257
|
-
else:
|
258
|
-
print('One of given paths could not found')
|
259
|
-
|
260
|
-
def drugs_as_dataframe(self):
|
261
|
-
return self.drugs_df
|
262
|
-
|
263
|
-
def filtered_drugs_as_dataframe(self, drug_ids):
|
264
|
-
return self.drugs_df[self.drugs_df['drugbank_id'].isin(drug_ids)]
|
265
|
-
|
266
|
-
def ddis_as_dataframe(self):
|
267
|
-
return self.ddis_df
|
268
|
-
|
269
|
-
def filtered_ddis(self, drugs):
|
270
|
-
ddis_df = self.ddis_df.copy()
|
271
|
-
return ddis_df[(ddis_df['drug_1'] in drugs) & (
|
272
|
-
ddis_df['drug_2'] in drugs)]
|
@@ -1,127 +0,0 @@
|
|
1
|
-
'''
|
2
|
-
copied from https://github.com/YifanDengWHU/DDIMDL/blob/master/NLPProcess.py and reorganized
|
3
|
-
'''
|
4
|
-
|
5
|
-
# import stanfordnlp
|
6
|
-
# stanfordnlp.download("en")
|
7
|
-
import pandas as pd
|
8
|
-
import stanza
|
9
|
-
# stanza.download("en")
|
10
|
-
|
11
|
-
import numpy as np
|
12
|
-
|
13
|
-
|
14
|
-
class EventExtractor:
|
15
|
-
def __init__(self, druglist, use_cache=True):
|
16
|
-
self.druglist = druglist
|
17
|
-
self.druglist2 = ['_'.join(d.replace('.', ' ').replace(
|
18
|
-
',', ' ').replace('-', ' ').split(' ')) for d in druglist]
|
19
|
-
# self.events = events
|
20
|
-
self.pipeline = stanza.Pipeline(use_gpu=True)
|
21
|
-
self.cache = dict()
|
22
|
-
|
23
|
-
def prepare_event_text(self, event):
|
24
|
-
for ex, new in zip(self.druglist, self.druglist2):
|
25
|
-
event = event.replace(ex, new)
|
26
|
-
return event
|
27
|
-
|
28
|
-
def extract_all(self, events):
|
29
|
-
mechanisms = []
|
30
|
-
actions = []
|
31
|
-
drugA_list = []
|
32
|
-
drugB_list = []
|
33
|
-
for i in range(len(events)):
|
34
|
-
mechanism, action, drugA, drugB = self.extract(events[i])
|
35
|
-
mechanisms.append(mechanism)
|
36
|
-
actions.append(action)
|
37
|
-
drugA_list.append(drugA)
|
38
|
-
drugB_list.append(drugB)
|
39
|
-
return mechanisms, actions, drugA_list, drugB_list
|
40
|
-
|
41
|
-
def extract(self, event):
|
42
|
-
if event in self.cache:
|
43
|
-
return self.cache[event]
|
44
|
-
event = self.prepare_event_text(event)
|
45
|
-
drugA = None
|
46
|
-
drugB = None
|
47
|
-
|
48
|
-
def addMechanism(node):
|
49
|
-
if int(sonsNum[int(node-1)]) == 0:
|
50
|
-
return
|
51
|
-
else:
|
52
|
-
for k in sons[node-1]:
|
53
|
-
if int(k) == 0:
|
54
|
-
break
|
55
|
-
if dependency[int(k - 1)].text == drugA or dependency[int(k - 1)].text == drugB:
|
56
|
-
continue
|
57
|
-
quene.append(int(k))
|
58
|
-
addMechanism(int(k))
|
59
|
-
return quene
|
60
|
-
|
61
|
-
doc = self.pipeline(event)
|
62
|
-
dependency = []
|
63
|
-
for j in range(len(doc.sentences[0].words)):
|
64
|
-
dependency.append(doc.sentences[0].words[j])
|
65
|
-
sons = np.zeros((len(dependency), len(dependency)))
|
66
|
-
sonsNum = np.zeros(len(dependency))
|
67
|
-
flag = False
|
68
|
-
count = 0
|
69
|
-
for j in dependency:
|
70
|
-
# if j.dependency_relation=='root':
|
71
|
-
if j.deprel == 'root':
|
72
|
-
# root=int(j.index)
|
73
|
-
root = int(j.id)
|
74
|
-
action = j.lemma
|
75
|
-
if j.text in self.druglist2:
|
76
|
-
if count < 2:
|
77
|
-
if flag == True:
|
78
|
-
drugB = j.text
|
79
|
-
count += 1
|
80
|
-
else:
|
81
|
-
drugA = j.text
|
82
|
-
flag = True
|
83
|
-
count += 1
|
84
|
-
sonsNum[j.head-1] += 1
|
85
|
-
sons[j.head-1, int(sonsNum[j.head-1]-1)] = int(j.id)
|
86
|
-
quene = []
|
87
|
-
for j in range(int(sonsNum[root-1])):
|
88
|
-
if dependency[int(sons[root-1, j]-1)].deprel == 'obj' or dependency[int(sons[root-1, j]-1)].deprel == 'nsubj:pass':
|
89
|
-
quene.append(int(sons[root-1, j]))
|
90
|
-
break
|
91
|
-
quene = addMechanism(quene[0])
|
92
|
-
quene.sort()
|
93
|
-
|
94
|
-
mechanism = " ".join(dependency[j-1].text for j in quene)
|
95
|
-
if mechanism == "the fluid retaining activities":
|
96
|
-
mechanism = "the fluid"
|
97
|
-
if mechanism == "atrioventricular blocking ( AV block )":
|
98
|
-
mechanism = 'the atrioventricular blocking ( AV block ) activities increase'
|
99
|
-
|
100
|
-
self.cache[event] = (mechanism, action,
|
101
|
-
drugA.replace('_', ' ') if drugA != None else '',
|
102
|
-
drugB.replace('_', ' ') if drugB != None else '')
|
103
|
-
|
104
|
-
|
105
|
-
if drugA == '' or drugB == '':
|
106
|
-
print(event)
|
107
|
-
|
108
|
-
return mechanism, action, drugA.replace('_', ' ') if drugA != None else '', drugB.replace('_', ' ') if drugB != None else ''
|
109
|
-
|
110
|
-
|
111
|
-
# drugs_pickle_path = 'drugbank/output/drugs.pkl'
|
112
|
-
# drugs_df = pd.read_pickle(drugs_pickle_path)
|
113
|
-
|
114
|
-
# drug_names = drugs_df['name'].to_list()
|
115
|
-
|
116
|
-
|
117
|
-
# drug_names = ['Lepirudin','Ursodeoxycholic acid']
|
118
|
-
# event_extractor = EventExtractor(
|
119
|
-
# drug_names)
|
120
|
-
|
121
|
-
# mechanisms, actions, drugA_list, drugB_list = event_extractor.extract_all(
|
122
|
-
# ['The risk or severity of bleeding and bruising can be increased when Lepirudin is combined with Ursodeoxycholic acid'])
|
123
|
-
# # mechanism, action, drugA, drugB = event_extractor.extract(
|
124
|
-
# # 'Bivalirudin may increase the anticoagulant activities of Bromfenac')
|
125
|
-
|
126
|
-
|
127
|
-
# print(mechanisms)
|
File without changes
|
File without changes
|