ion-CSP 2.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ion_CSP/gen_opt.py ADDED
@@ -0,0 +1,378 @@
1
+ import os
2
+ import csv
3
+ import time
4
+ import shutil
5
+ import logging
6
+ import subprocess
7
+ from ase.io import read
8
+ from pyxtal import pyxtal
9
+ from pyxtal.msg import Comp_CompatibilityError, Symm_CompatibilityError
10
+ from dpdispatcher import Machine
11
+ from typing import List
12
+ from ion_CSP.log_and_time import redirect_dpdisp_logging
13
+
14
+
15
+ class CrystalGenerator:
16
+ def __init__(self, work_dir: str, ion_numbers: List[int], species: List[str]):
17
+ """
18
+ Initialize the class based on the provided ionic crystal composition structure files and corresponding composition numbers.
19
+ """
20
+ redirect_dpdisp_logging(os.path.join(work_dir, "dpdispatcher.log"))
21
+ self.script_dir = os.path.dirname(__file__)
22
+ self.mlp_opt_file = os.path.join(self.script_dir, "mlp_opt.py")
23
+ self.model_file = os.path.join(self.script_dir, "../../model/model.pt")
24
+ # 获取当前脚本的路径以及同路径下离子晶体组分的结构文件, 并将这一路径作为工作路径来避免可能的错误
25
+ self.base_dir = work_dir
26
+ os.chdir(self.base_dir)
27
+ self.ion_numbers = ion_numbers
28
+ self.species = species
29
+ self.species_paths = []
30
+ ion_atomss, species_atomss = [], []
31
+ # 读取离子晶体各组分的原子数,并在日志文件中记录
32
+ for ion, number in zip(self.species, self.ion_numbers):
33
+ species_path = os.path.join(self.base_dir, ion)
34
+ self.species_paths.append(species_path)
35
+ species_atom = len(read(species_path))
36
+ species_atomss.append(species_atom)
37
+ species_atoms = species_atom * number
38
+ ion_atomss.append(species_atoms)
39
+ self.cell_atoms = sum(ion_atomss)
40
+ logging.info(
41
+ f"The components of ions {self.species} in the ionic crystal are {self.ion_numbers}"
42
+ )
43
+ logging.info(
44
+ f"The number of atoms for each ion is: {species_atomss}, and the total number of atoms is {self.cell_atoms}"
45
+ )
46
+ self.generation_dir = os.path.join(self.base_dir, "1_generated")
47
+ os.makedirs(self.generation_dir, exist_ok=True)
48
+ self.POSCAR_dir = os.path.join(self.base_dir, "1_generated", "POSCAR_Files")
49
+ self.primitive_cell_dir = os.path.join(self.base_dir, "1_generated", "primitive_cell")
50
+
51
+ def _sequentially_read_files(self, directory: str, prefix_name: str):
52
+ """
53
+ Private method:
54
+ Extract numbers from file names, convert them to integers, sort them by sequence, and return a list containing both indexes and file names
55
+ """
56
+ # 获取dir文件夹中所有以prefix_name开头的文件,在此实例中为POSCAR_
57
+ files = [f for f in os.listdir(directory) if f.startswith(prefix_name)]
58
+ file_index_pairs = []
59
+ for filename in files:
60
+ index_part = filename[len(prefix_name) :] # 选取去除前缀'POSCAR_'的数字
61
+ if index_part.isdigit(): # 确保剩余部分全是数字
62
+ index = int(index_part)
63
+ file_index_pairs.append((index, filename))
64
+ file_index_pairs.sort(key=lambda pair: pair[0])
65
+ return file_index_pairs
66
+
67
+ def generate_structures(
68
+ self, num_per_group: int = 100, space_groups_limit: int = 230
69
+ ):
70
+ """
71
+ Based on the provided ion species and corresponding numbers, use pyxtal to randomly generate ion crystal structures based on crystal space groups.
72
+ """
73
+ # 如果目录不存在,则创建POSCAR_Files文件夹
74
+ os.makedirs(self.POSCAR_dir, exist_ok=True)
75
+ total_count = 0 # 用于给生成的POSCAR文件计数
76
+ assert 1 <= space_groups_limit <= 230, "Space group number out of range!"
77
+ if space_groups_limit:
78
+ # 限制空间群搜索范围,以节约测试时间
79
+ space_groups = space_groups_limit
80
+ else:
81
+ # 否则搜索所有的230个空间群
82
+ space_groups = 230
83
+ group_counts, group_exceptions = [], []
84
+ for space_group in range(1, space_groups + 1):
85
+ logging.info(f"Space group: {space_group}")
86
+ group_count, exception_message = 0, "None"
87
+ # 参数num_per_group确定对每个空间群所要生成的POSCAR结构文件个数
88
+ for i in range(num_per_group):
89
+ try:
90
+ # 调用pyxtal类
91
+ pyxtal_structure = pyxtal(molecular=True)
92
+ # 根据阴阳离子结构文件与对应的配比以及空间群信息随机生成离子晶体,N取100以上
93
+ pyxtal_structure.from_random(
94
+ dim=3,
95
+ group=space_group,
96
+ species=self.species_paths,
97
+ numIons=self.ion_numbers,
98
+ conventional=False,
99
+ )
100
+ # 生成POSCAR_n文件
101
+ POSCAR_path = os.path.join(
102
+ self.POSCAR_dir, f"POSCAR_{total_count}"
103
+ )
104
+ pyxtal_structure.to_file(POSCAR_path, fmt="poscar")
105
+ total_count += 1
106
+ group_count += 1
107
+ except (RuntimeError, Comp_CompatibilityError, Symm_CompatibilityError) as e:
108
+ # 捕获对于某一空间群生成结构的运行时间过长、组成兼容性错误、对称性兼容性错误等异常,使结构生成能够完全进行而不中断
109
+ logging.error(f"Generating structure error: {e}")
110
+ # 记录异常类型并跳出当前空间群的生成循环
111
+ exception_message = type(e).__name__
112
+ break
113
+ group_counts.append(group_count)
114
+ group_exceptions.append(exception_message)
115
+ logging.info(f" {group_count} POSCAR generated.")
116
+ generation_csv_file = os.path.join(self.generation_dir, 'generation.csv')
117
+ # 写入排序后的 .csv 文件
118
+ with open(generation_csv_file, "w", newline="", encoding="utf-8") as csv_file:
119
+ writer = csv.writer(csv_file)
120
+ # 动态生成表头
121
+ header = ["Space_group", "POSCAR_num", "Bad_num", "Exception"]
122
+ writer.writerow(header)
123
+ # 写入排序后的数
124
+ for space_group, group_count, group_exception in zip(
125
+ range(1, space_groups + 1), group_counts, group_exceptions
126
+ ):
127
+ writer.writerow([space_group, group_count, 0, group_exception])
128
+ # 保存group_counts供后续使用
129
+ self.group_counts = group_counts
130
+ logging.info(
131
+ f"Using pyxtal.from_random, {total_count} ion crystal structures were randomly generated based on crystal space groups."
132
+ )
133
+
134
+ def _single_phonopy_processing(self, filename):
135
+ # 按顺序处理POSCAR文件,首先复制一份无数字后缀的POSCAR文件
136
+ shutil.copy(f"{self.POSCAR_dir}/{filename}", f"{self.POSCAR_dir}/POSCAR")
137
+ try:
138
+ subprocess.run(["nohup", "phonopy", "--symmetry", "POSCAR"], check=True)
139
+ except subprocess.CalledProcessError as e:
140
+ # 新增:捕获phonopy执行错误
141
+ logging.error(f"Phonopy execution failed for {filename}: {str(e)}")
142
+ raise
143
+
144
+ # 将phonopy生成的PPOSCAR(对称化原胞)和BPOSCAR(对称化常规胞)放到对应的文件夹中,并将文件名改回POSCAR_index
145
+ shutil.move(
146
+ f"{self.POSCAR_dir}/PPOSCAR", f"{self.primitive_cell_dir}/{filename}"
147
+ )
148
+ cell_atoms = len(read(f"{self.primitive_cell_dir}/{filename}"))
149
+
150
+ # 检查生成的POSCAR中的原子数,如果不匹配则删除该POSCAR并在日志中记录
151
+ if cell_atoms != self.cell_atoms:
152
+ error_message = f"Atom number mismatch ({cell_atoms} vs {self.cell_atoms})"
153
+ logging.error(f"{filename} - {error_message}")
154
+
155
+ # 新增:回溯空间群归属
156
+ poscar_index = int(filename.split('_')[1]) # 提取POSCAR编号
157
+ space_group = self._find_space_group(poscar_index)
158
+
159
+ # 更新CSV文件
160
+ csv_path = os.path.join(self.generation_dir, 'generation.csv')
161
+ with open(csv_path, 'r') as f:
162
+ reader = csv.reader(f)
163
+ rows = list(reader)
164
+
165
+ # 更新对应空间群的Bad_num和Exception
166
+ for row in rows[1:]: # 跳过表头
167
+ if int(row[0]) == space_group:
168
+ row[2] = str(int(row[2]) + 1)
169
+ row[3] = "AtomNumberError"
170
+ break
171
+ # 将更新的信息写入 .csv 文件
172
+ with open(csv_path, 'w', newline='') as f:
173
+ writer = csv.writer(f)
174
+ writer.writerows(rows)
175
+ # 删除原子数不匹配的POSCAR
176
+ os.remove(f"{self.primitive_cell_dir}/{filename}")
177
+
178
+ def _find_space_group(self, poscar_index: int) -> int:
179
+ """根据POSCAR编号查找对应的空间群"""
180
+ cumulative = 0
181
+ for idx, count in enumerate(self.group_counts, start=1):
182
+ if cumulative <= poscar_index < cumulative + count:
183
+ return idx
184
+ cumulative += count
185
+ raise ValueError(f"POSCAR {poscar_index} not found in any space group")
186
+
187
+ def phonopy_processing(self):
188
+ """
189
+ Use phonopy to check and generate symmetric primitive cells, reducing the complexity of subsequent optimization calculations, and preventing pyxtal.from_random from generating double proportioned supercells.
190
+ """
191
+ os.makedirs(self.primitive_cell_dir, exist_ok=True)
192
+ logging.info("The necessary files are fully prepared.")
193
+ POSCAR_file_index_pairs = self._sequentially_read_files(
194
+ self.POSCAR_dir, prefix_name="POSCAR_"
195
+ )
196
+ # 改变工作目录为POSCAR_Files,便于运行shell命令进行phonopy对称性检查和原胞与常规胞的生成
197
+ os.chdir(self.POSCAR_dir)
198
+ try:
199
+ logging.info("Start running phonopy processing ...")
200
+ for _, filename in POSCAR_file_index_pairs:
201
+ self._single_phonopy_processing(filename=filename)
202
+ # 准备dpdispatcher运行所需的文件,将其复制到primitive_cell文件夹中
203
+ self.required_files = [self.mlp_opt_file, self.model_file]
204
+ for file in self.required_files:
205
+ shutil.copy(file, self.primitive_cell_dir)
206
+ logging.info(
207
+ "The phonopy processing has been completed!!\nThe symmetrized primitive cells have been saved in POSCAR format to the primitive_cell folder."
208
+ )
209
+ # 在 phonopy 成功进行对称化处理后,删除 1_generated/POSCAR_Files 文件夹以节省空间
210
+ shutil.rmtree(self.POSCAR_dir)
211
+ except FileNotFoundError:
212
+ logging.error(
213
+ "There are no POSCAR structure files after generating.\nPlease check the error during generation"
214
+ )
215
+ raise FileNotFoundError(
216
+ "There are no POSCAR structure files after generating.\nPlease check the error during generation"
217
+ )
218
+
219
+ def dpdisp_mlp_tasks(self, machine: str, resources: str, nodes: int = 1):
220
+ """
221
+ Based on the dpdispatcher module, prepare and submit files for optimization on remote server or local machine.
222
+ """
223
+ # 调整工作目录,减少错误发生
224
+ os.chdir(self.primitive_cell_dir)
225
+ # 读取machine和resources的参数
226
+ if machine.endswith(".json"):
227
+ machine = Machine.load_from_json(machine)
228
+ elif machine.endswith(".yaml"):
229
+ machine = Machine.load_from_yaml(machine)
230
+ else:
231
+ raise KeyError("Not supported machine file type")
232
+ # 由于dpdispatcher对于远程服务器以及本地运行的forward_common_files的默认存放位置不同,因此需要预先进行判断,从而不改动优化脚本
233
+ machine_inform = machine.serialize()
234
+ if machine_inform["context_type"] == "SSHContext":
235
+ # 如果调用远程服务器,则创建二级目录
236
+ parent = "data/"
237
+ elif machine_inform["context_type"] == "LocalContext":
238
+ # 如果在本地运行作业,则只在后续创建一级目录
239
+ parent = ""
240
+ # 如果是本地运行,则根据显存占用率阈值,等待可用的GPU
241
+ selected_gpu = wait_for_gpu(memory_percent_threshold=30, wait_time=300)
242
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(selected_gpu)
243
+
244
+ from dpdispatcher import Resources, Task, Submission
245
+
246
+ if resources.endswith(".json"):
247
+ resources = Resources.load_from_json(resources)
248
+ elif resources.endswith(".yaml"):
249
+ resources = Resources.load_from_yaml(resources)
250
+ else:
251
+ raise KeyError("Not supported resources file type")
252
+ # 依次读取primitive_cell文件夹中的所有POSCAR文件和对应的序号
253
+ primitive_cell_file_index_pairs = self._sequentially_read_files(
254
+ self.primitive_cell_dir, prefix_name="POSCAR_"
255
+ )
256
+ total_files = len(primitive_cell_file_index_pairs)
257
+ logging.info(f"The total number of POSCAR files to be optimized: {total_files}")
258
+ # 创建一个嵌套列表来存储每个GPU的任务并将文件平均依次分配给每个GPU
259
+ # 例如:对于10个结构文件任务分发给4个GPU的情况,则4个GPU领到的任务分别[0, 4, 8], [1, 5, 9], [2, 6], [3, 7], 便于快速分辨GPU与作业的分配关系
260
+ node_jobs = [[] for _ in range(nodes)]
261
+ for index, _ in primitive_cell_file_index_pairs:
262
+ node_index = index % nodes
263
+ node_jobs[node_index].append(index)
264
+ task_list = []
265
+ for pop in range(nodes):
266
+ remote_task_dir = f"{parent}pop{pop}"
267
+ command = "python mlp_opt.py"
268
+ forward_files = ["mlp_opt.py", "model.pt"]
269
+ backward_files = ["log", "err"]
270
+ # 将mlp_opt.py和model.pt复制一份到task_dir下
271
+ task_dir = os.path.join(self.primitive_cell_dir, f"{parent}pop{pop}")
272
+ os.makedirs(task_dir, exist_ok=True)
273
+ for file in forward_files:
274
+ shutil.copyfile(
275
+ f"{self.primitive_cell_dir}/{file}", f"{task_dir}/{file}"
276
+ )
277
+ for job_i in node_jobs[pop]:
278
+ # 将分配好的POSCAR文件添加到对应的上传文件中
279
+ forward_files.append(f"POSCAR_{job_i}")
280
+ # 每个POSCAR文件在优化后都取回对应的CONTCAR和OUTCAR输出文件
281
+ backward_files.append(f"CONTCAR_{job_i}")
282
+ backward_files.append(f"OUTCAR_{job_i}")
283
+ shutil.copyfile(
284
+ f"{self.primitive_cell_dir}/POSCAR_{job_i}",
285
+ f"{task_dir}/POSCAR_{job_i}",
286
+ )
287
+ shutil.copyfile(
288
+ f"{self.primitive_cell_dir}/POSCAR_{job_i}",
289
+ f"{task_dir}/ori_POSCAR_{job_i}",
290
+ )
291
+
292
+ task = Task(
293
+ command=command,
294
+ task_work_path=remote_task_dir,
295
+ forward_files=forward_files,
296
+ backward_files=backward_files,
297
+ )
298
+ task_list.append(task)
299
+
300
+ submission = Submission(
301
+ work_base=self.primitive_cell_dir,
302
+ machine=machine,
303
+ resources=resources,
304
+ task_list=task_list,
305
+ )
306
+ submission.run_submission()
307
+
308
+ # 创建用于存放优化后文件的 mlp_optimized 目录
309
+ optimized_dir = os.path.join(self.base_dir, "2_mlp_optimized")
310
+ os.makedirs(optimized_dir, exist_ok=True)
311
+ for pop in range(nodes):
312
+ # 从传回 primitive_cell 目录下的 pop 文件夹中将结果文件取到 mlp_optimized 目录
313
+ task_dir = os.path.join(self.primitive_cell_dir, f"{parent}pop{pop}")
314
+ # 按照给定的 POSCAR 结构文件按顺序读取 CONTCAR 和 OUTCAR 文件并复制
315
+ task_file_index_pairs = self._sequentially_read_files(
316
+ task_dir, prefix_name="POSCAR_"
317
+ )
318
+ for index, _ in task_file_index_pairs:
319
+ shutil.copyfile(
320
+ f"{task_dir}/CONTCAR_{index}", f"{optimized_dir}/CONTCAR_{index}"
321
+ )
322
+ shutil.copyfile(
323
+ f"{task_dir}/OUTCAR_{index}", f"{optimized_dir}/OUTCAR_{index}"
324
+ )
325
+ # 在成功完成机器学习势优化后,删除 1_generated/primitive_cell/{parent}/pop{n} 文件夹以节省空间
326
+ shutil.rmtree(task_dir)
327
+ if machine_inform["context_type"] == "SSHContext":
328
+ # 如果调用远程服务器,则删除data级目录
329
+ shutil.rmtree(os.path.join(self.primitive_cell_dir, parent))
330
+ # 完成后删除不必要的运行文件以节省空间,并记录优化完成的信息
331
+ for file in ["mlp_opt.py", "model.pt"]:
332
+ os.remove(f"{self.primitive_cell_dir}/{file}")
333
+ logging.info("Batch optimization completed!!!")
334
+
335
+
336
+ def get_available_gpus(memory_percent_threshold=40):
337
+ """获取可用的 GPU 节点,内存负载低于指定阈值且没有其他用户的任务在运行"""
338
+ try:
339
+ # 获取 nvidia-smi 的输出
340
+ output = subprocess.check_output(
341
+ [
342
+ "nvidia-smi",
343
+ "--query-gpu=index,memory.used,memory.total",
344
+ "--format=csv,noheader,nounits",
345
+ ],
346
+ encoding="utf-8",
347
+ )
348
+ available_gpus = []
349
+ for line in output.strip().split("\n"):
350
+ index, memory_used, memory_total = map(int, line.split(","))
351
+ memory_used_percent = memory_used / memory_total * 100
352
+ # 判断内存负载是否低于阈值
353
+ if memory_used_percent < memory_percent_threshold:
354
+ available_gpus.append((index, memory_used_percent))
355
+ # 根据内存负载百分比排序,负载小的优先
356
+ available_gpus.sort(key=lambda x: x[1])
357
+ # 只返回 GPU 索引
358
+ return [gpu[0] for gpu in available_gpus]
359
+ except subprocess.CalledProcessError as e:
360
+ logging.error(f"Error while getting GPU info: {e}")
361
+ return []
362
+ except Exception as e:
363
+ logging.error(f"Unexpected error: {e}")
364
+ return []
365
+
366
+
367
+ def wait_for_gpu(memory_percent_threshold=40, wait_time=300):
368
+ """等待直到有可用的 GPU"""
369
+ while True:
370
+ available_gpus = get_available_gpus(memory_percent_threshold)
371
+ logging.info(f"Available GPU: {available_gpus}")
372
+ if available_gpus:
373
+ selected_gpu = available_gpus[0]
374
+ logging.info(f"Using GPU: {selected_gpu}")
375
+ return selected_gpu
376
+ else:
377
+ logging.info(f"No available GPUs found. Waiting for {wait_time} second ...")
378
+ time.sleep(wait_time) # 等待 5 分钟
@@ -0,0 +1,88 @@
1
+ import os
2
+ import logging
3
+ from typing import Tuple, List, Dict
4
+ from collections import defaultdict, Counter
5
+ from ase.io import read
6
+ from ase.neighborlist import NeighborList, natural_cutoffs
7
+
8
+
9
+ def identify_molecules(atoms) -> Tuple[List[Dict[str, int]], bool]:
10
+ visited = set() # 用于记录已经访问过的原子索引
11
+ identified_molecules = [] # 用于存储识别到的独立分子
12
+ # 基于共价半径为每个原子生成径向截止
13
+ cutoffs = natural_cutoffs(atoms, mult=0.7)
14
+ # 获取成键原子,考虑周期性边界条件
15
+ nl = NeighborList(cutoffs=cutoffs, bothways=True, self_interaction=False)
16
+ nl.update(atoms) # 更新邻居列表
17
+ # 遍历所有原子
18
+ for i in range(len(atoms)):
19
+ # 如果当前原子尚未被访问
20
+ if i not in visited:
21
+ current_molecule = defaultdict(int) # 用于统计元素及其数量
22
+ stack = [i] # 使用栈进行深度优先搜索,初始化栈为当前原子索引
23
+ # 深度优先搜索
24
+ while stack:
25
+ atom_index = stack.pop() # 从栈中取出一个原子索引
26
+ if atom_index not in visited:
27
+ visited.add(atom_index) # 标记为已访问
28
+ atom_symbol = atoms[atom_index].symbol # 获取原子的元素符号
29
+ current_molecule[atom_symbol] += 1 # 统计该元素的数量
30
+ # 获取与当前原子成键的原子索引
31
+ bonded_indices, _ = nl.get_neighbors(atom_index)
32
+ # 将未访问的成键原子索引添加到栈中
33
+ stack.extend(idx for idx in bonded_indices if idx not in visited)
34
+ # 如果当前分子包含元素信息,则将其添加到分子列表中
35
+ if current_molecule:
36
+ identified_molecules.append(current_molecule)
37
+ # 用于合并分子及其计数
38
+ merged_molecules = defaultdict(int)
39
+ # 将识别到的分子转换为集合,方便与初始分子进行比较
40
+ identified_set = set()
41
+ for molecule in identified_molecules:
42
+ # 将分子信息转换为可哈希的元组形式,以便合并
43
+ molecule_tuple = frozenset(molecule.items())
44
+ merged_molecules[molecule_tuple] += 1 # 计数相同的分子
45
+ identified_set.add(frozenset(molecule.items()))
46
+ # 获取当前目录下所有 .gjf 文件
47
+ initial_gjf_files = [f for f in os.listdir('./') if f.endswith('.gjf')]
48
+ initial_counts = defaultdict(int)
49
+ for gjf in initial_gjf_files:
50
+ # 提取 .gjf 文件中的元素与原子数量
51
+ gjf_atoms = read(gjf)
52
+ elements = gjf_atoms.get_chemical_symbols()
53
+ counts = Counter(elements)
54
+ # 将元素计数转换为 frozenset 以便于比较
55
+ initial_counts[frozenset(counts.items())] += 1
56
+ # 将初始的分子转换为集合,方便与识别到的分子进行比较
57
+ initial_set = set(initial_counts.keys())
58
+ molecules_flag = (initial_set == identified_set)
59
+ initial_information = [{element: count for element, count in mol} for mol in initial_set]
60
+ # 返回合并后的分子及其数量, molecules_flag 标志表示离子数与初始的比对结果
61
+ return merged_molecules, molecules_flag, initial_information
62
+
63
+ def molecules_information(molecules: List[Dict[str, int]], molecules_flag: bool, initial_information: List[Dict[str, int]]):
64
+ """
65
+ Set the output format of the molecule. Output simplified element information in the specified order of C, N, O, H, which may include other elements.
66
+ """
67
+ # 定义固定顺序的元素
68
+ fixed_order = ['C', 'N', 'O', 'H']
69
+ logging.info(f"Initial molecules: {initial_information}")
70
+ logging.info('Identified independent molecules:')
71
+ for idx, (molecule, count) in enumerate(molecules.items()):
72
+ molecule = dict(molecule)
73
+ total_atoms = sum(molecule.values()) # 计算当前分子的原子总数
74
+ # 构建输出字符串
75
+ output = []
76
+ for element in fixed_order:
77
+ if element in molecule:
78
+ output.append(f"{element} {molecule[element]}")
79
+ # 如果有其他元素,添加到输出中
80
+ for element in molecule:
81
+ if element not in fixed_order:
82
+ output.append(f"{element} {molecule[element]}")
83
+ formatted_output = ' '.join(output)
84
+ logging.info(f' Molecule {idx + 1} (Total Atoms: {total_atoms}, Count: {count}): {formatted_output}')
85
+ if molecules_flag:
86
+ logging.info('Molecular Comparison Successful\n')
87
+ else:
88
+ logging.warning('Molecular Comparison Failed\n')