pwact 0.1.7__py3-none-any.whl → 0.1.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,16 @@
1
1
  import subprocess
2
+ import pkg_resources
2
3
  def check_envs():
3
4
  # for pwmat
4
- subprocess.call("module load pwmat", shell=True)
5
+ pass
6
+ # check pwdata
7
+ # try:
8
+ # package_version = pkg_resources.get_distribution('pwdata').version
9
+ # if pkg_resources.parse_version(min_version) <= pkg_resources.parse_version(package_version) <= pkg_resources.parse_version(max_version):
10
+ # print(f"{package_name} version {package_version} is within the required range [{min_version}, {max_version}].")
11
+ # return True
12
+ # else:
13
+ # print(f"{package_name} version {package_version} is NOT within the required range [{min_version}, {max_version}].")
14
+ # return False
15
+
16
+ # check PWMLFF???
@@ -21,7 +21,7 @@ from pwact.active_learning.explore.select_image import select_image
21
21
  from pwact.active_learning.user_input.resource import Resource
22
22
  from pwact.active_learning.user_input.iter_input import InputParam, MdDetail
23
23
  from pwact.utils.constant import AL_STRUCTURE, TEMP_STRUCTURE, EXPLORE_FILE_STRUCTURE, TRAIN_FILE_STRUCTUR, \
24
- FORCEFILED, ENSEMBLE, LAMMPS, LAMMPS_CMD, UNCERTAINTY, DFT_STYLE, SLURM_OUT, SLURM_JOB_TYPE, PWDATA
24
+ FORCEFILED, ENSEMBLE, LAMMPS, LAMMPS_CMD, UNCERTAINTY, DFT_STYLE, SLURM_OUT, SLURM_JOB_TYPE, PWDATA, MODEL_TYPE
25
25
 
26
26
  from pwact.utils.format_input_output import get_iter_from_iter_name, get_sub_md_sys_template_name,\
27
27
  make_md_sys_name, get_md_sys_template_name, make_temp_press_name, make_temp_name, make_train_name
@@ -116,8 +116,6 @@ class Explore(object):
116
116
  jobname = "md{}".format(g_index)
117
117
  tag_name = "{}-{}".format(g_index, EXPLORE_FILE_STRUCTURE.md_tag)
118
118
  tag = os.path.join(self.md_dir, tag_name)
119
- gpu_per_node = None
120
- cpu_per_node = 1
121
119
 
122
120
  # if self.resource.explore_resource.gpu_per_node > 0:
123
121
  # if self.input_param.strategy.uncertainty.upper() == UNCERTAINTY.committee.upper():
@@ -135,9 +133,9 @@ class Explore(object):
135
133
  run_cmd = self.resource.explore_resource.command
136
134
 
137
135
  group_slurm_script = set_slurm_script_content(
138
- gpu_per_node=gpu_per_node,
136
+ gpu_per_node=self.resource.explore_resource.gpu_per_node,
139
137
  number_node = self.resource.explore_resource.number_node, #1
140
- cpu_per_node = cpu_per_node,
138
+ cpu_per_node = self.resource.explore_resource.cpu_per_node,
141
139
  queue_name = self.resource.explore_resource.queue_name,
142
140
  custom_flags = self.resource.explore_resource.custom_flags,
143
141
  env_script = self.resource.explore_resource.env_script,
@@ -192,11 +190,14 @@ class Explore(object):
192
190
  input_format=md_detail.config_file_format[sys_index],
193
191
  wrap = False,
194
192
  direct = True,
195
- sort = True,
193
+ sort = False,
196
194
  save_format=PWDATA.lammps_lmp,
197
195
  save_path=md_dir,
198
196
  save_name=LAMMPS.lammps_sys_config)
199
-
197
+ # import dpdata
198
+ # _config = dpdata.System(md_detail.config_file_list[sys_index], fmt=md_detail.config_file_format[sys_index])
199
+ # target_config = os.path.join(md_dir, LAMMPS.lammps_sys_config)
200
+ # _config.to("lammps/lmp", target_config, frame_idx=0)
200
201
  #2. set forcefiled file
201
202
  md_model_paths = self.set_forcefiled_file(md_dir)
202
203
 
@@ -246,13 +247,16 @@ class Explore(object):
246
247
  def set_forcefiled_file(self, md_dir:str):
247
248
  model_name = ""
248
249
  md_model_paths = []
249
- if self.input_param.strategy.md_type == FORCEFILED.libtorch_lmps:
250
- model_name += TRAIN_FILE_STRUCTUR.script_dp_name
251
- elif self.input_param.strategy.md_type == FORCEFILED.fortran_lmps:
252
- if self.input_param.strategy.compress:
253
- raise Exception("ERROR! The compress model does not support fortran lammps md! Please change the 'md_type' to 2!")
254
- else:
255
- model_name += "{}/{}".format(TRAIN_FILE_STRUCTUR.fortran_dp, TRAIN_FILE_STRUCTUR.fortran_dp_name)
250
+ if self.input_param.train.model_type == MODEL_TYPE.nep:
251
+ model_name += "{}/{}".format(TRAIN_FILE_STRUCTUR.model_record, TRAIN_FILE_STRUCTUR.nep_model_lmps)
252
+ elif self.input_param.train.model_type == MODEL_TYPE.dp:
253
+ if self.input_param.strategy.md_type == FORCEFILED.libtorch_lmps:
254
+ model_name += "{}/{}".format(TRAIN_FILE_STRUCTUR.model_record, TRAIN_FILE_STRUCTUR.script_dp_name)
255
+ elif self.input_param.strategy.md_type == FORCEFILED.fortran_lmps:
256
+ if self.input_param.strategy.compress:
257
+ raise Exception("ERROR! The compress model does not support fortran lammps md! Please change the 'md_type' to 2!")
258
+ else:
259
+ model_name += "{}/{}".format(TRAIN_FILE_STRUCTUR.fortran_dp, TRAIN_FILE_STRUCTUR.fortran_dp_name)
256
260
 
257
261
  for model_index in range(self.input_param.strategy.model_num):
258
262
  model_name_i = "{}/{}".format(make_train_name(model_index), model_name)
@@ -154,31 +154,33 @@ class SlurmJob(object):
154
154
  def check_lammps_out_file(self):
155
155
  # read last line of md.log file
156
156
  md_dirs = self.get_slurm_works_dir()
157
- for md_dir in md_dirs:
158
- tag_md_file = os.path.join(md_dir, "tag.md.success")
159
- md_log = os.path.join(md_dir, "md.log")
160
- if os.path.exists(tag_md_file):
161
- continue
162
- if not os.path.exists(md_log):
163
- return False
164
-
165
- with open(md_log, "rb") as file:
166
- file.seek(-2, 2) # 定位到文件末尾前两个字节
167
- while file.read(1) != b'\n': # 逐字节向前查找换行符
168
- file.seek(-2, 1) # 向前移动两个字节
169
- last_line = file.readline().decode().strip() # 读取最后一行并去除换行符和空白字符
170
- if "ERROR: there are two atoms" in last_line:
171
- with open(tag_md_file, 'w') as wf:
172
- wf.writelines("ERROR: there are two atoms too close")
173
- return True
174
- elif "Total wall time" in last_line:
175
- with open(tag_md_file, 'w') as wf:
176
- wf.writelines("Job Done!")
177
- return True
178
- else:
179
- return False
157
+ try:
158
+ for md_dir in md_dirs:
159
+ tag_md_file = os.path.join(md_dir, "tag.md.success")
160
+ md_log = os.path.join(md_dir, "md.log")
161
+ if os.path.exists(tag_md_file):
162
+ continue
163
+ if not os.path.exists(md_log):
164
+ return False
180
165
 
181
- return True
166
+ with open(md_log, "rb") as file:
167
+ file.seek(-2, 2) # 定位到文件末尾前两个字节
168
+ while file.read(1) != b'\n': # 逐字节向前查找换行符
169
+ file.seek(-2, 1) # 向前移动两个字节
170
+ last_line = file.readline().decode().strip() # 读取最后一行并去除换行符和空白字符
171
+ if "ERROR: there are two atoms" in last_line:
172
+ with open(tag_md_file, 'w') as wf:
173
+ wf.writelines("ERROR: there are two atoms too close")
174
+ return True
175
+ elif "Total wall time" in last_line:
176
+ with open(tag_md_file, 'w') as wf:
177
+ wf.writelines("Job Done!")
178
+ return True
179
+ else:
180
+ return False
181
+ return True
182
+ except Exception as e:
183
+ return False
182
184
 
183
185
 
184
186
  class Mission(object):
@@ -302,7 +304,7 @@ class Mission(object):
302
304
  for job in self.job_list:
303
305
  if job.status == JobStatus.terminated:
304
306
  if job.submit_num <= JobStatus.submit_limit.value:
305
- print("resubmit job: {}, the time is {}\n".format(job.submit_cmd, job.submit_num))
307
+ print("resubmit job {}: {}, the time is {}\n".format(job.jobid, job.submit_cmd, job.submit_num))
306
308
  job.submit()
307
309
  else:
308
310
  job.status = JobStatus.resubmit_failed
@@ -36,7 +36,33 @@ def _reciprocal_box(box):
36
36
  rbox = np.linalg.inv(box)
37
37
  rbox = rbox.T
38
38
  return rbox
39
+
40
+ def get_energy_dftb_vasp():
41
+ aimd_dir = "/data/home/wuxingxing/datas/al_dir/HfO2/dftb/init_bulk_hfo2/temp_init_bulk_work/aimd"#/init_config_0/init/0_aimd
42
+ scf_dir = "/data/home/wuxingxing/datas/al_dir/HfO2/dftb/init_bulk_hfo2/temp_init_bulk_work/scf"#init_config_0/init/0_aimd/0-scf
43
+ save_file = "/data/home/wuxingxing/datas/al_dir/HfO2/dftb/init_bulk_hfo2/energy_count_xtb.txt"
44
+ aimd_dir = glob.glob(os.path.join(aimd_dir, "init_config_*"))
45
+ aimd_dir = sorted(aimd_dir, key=lambda x: int(os.path.basename(x).split('_')[-1]))
46
+
47
+ save_text = []
48
+ for aimd in aimd_dir:
49
+ mvm_config = Config(format="pwmat/movement", data_path=os.path.join(aimd, "init/0_aimd/MOVEMENT"))
50
+ scf_list = glob.glob(os.path.join(scf_dir, os.path.basename(aimd), "init/0_aimd/*-scf"))
51
+ scf_list = sorted(scf_list, key=lambda x: int(os.path.basename(x).split('-')[0]))
52
+ for scf in scf_list:
53
+ index = int(os.path.basename(scf).split('-')[0])
54
+ scf_config = Config(format="vasp/outcar", data_path=os.path.join(scf, "OUTCAR"))
55
+ if index == 0:
56
+ base = scf_config.images[0].Ep - mvm_config.images[index].Ep
57
+ save_text.append("aimd {} index {} dftb_energy {} vasp_energy {} vasp_just {}"\
58
+ .format(os.path.basename(aimd), index, \
59
+ mvm_config.images[index].Ep, scf_config.images[0].Ep,\
60
+ scf_config.images[0].Ep - base))
39
61
 
62
+ with open(save_file, 'w') as wf:
63
+ for line in save_text:
64
+ wf.write(line)
65
+ wf.write("\n")
40
66
  if __name__=="__main__":
41
67
  # parser = argparse.ArgumentParser()
42
68
  # parser.add_argument('-i', '--config', help="specify config file path of config", type=str, default='atom.config')
@@ -44,33 +70,6 @@ if __name__=="__main__":
44
70
  # parser.add_argument('-k', '--kspacing', help="specify the kspacing, the default 0.5", type=float, default=0.5)
45
71
  # args = parser.parse_args()
46
72
  # make_kspacing_kpoints(config=args.config, format=args.format, kspacing=args.kspacing)
47
- make_kspacing_kpoints(config="/data/home/wuxingxing/datas/al_dir/HfO2/dftb/init_bulk_hfo2/temp_init_bulk_work/scf/init_config_0/init/0_aimd/0-scf/POSCAR",
48
- format="vasp/poscar", kspacing=0.5)
49
-
50
- # work_dir = "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory"
51
- # data_list = [
52
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_000_2650",
53
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_001_2650",
54
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_002_2650",
55
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_003_2650",
56
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_004_3858",
57
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_005_3860",
58
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_006_3860",
59
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_007_3859",
60
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_008_700",
61
- # "/data/home/wuxingxing/datas/PWMLFF_library_data/HfO2/hfo2_dpgen/HfO2_liutheory/mvms/sys_009_700"]
62
- # work_dir = "/data/home/wuxingxing/datas/al_dir/HfO2/baseline_model"
63
-
64
- # train_job = os.path.join(work_dir, "init_model/train.job")
65
- # train_json = os.path.join(work_dir, "init_model/train.json")
66
-
67
- # json_dict = json.load(open(train_json))
68
- # for data in data_list:
69
- # data_dir = os.path.join(work_dir, os.path.basename(data))
70
- # if os.path.exists(data_dir):
71
- # shutil.rmtree(data_dir)
72
- # os.makedirs(data_dir)
73
- # shutil.copyfile(train_job, os.path.join(data_dir, "train.job"))
74
- # json_dict["datasets_path"].append(data)
75
- # json.dump(json_dict, open(os.path.join(data_dir, "train.json"), "w"), indent=4)
76
-
73
+ # make_kspacing_kpoints(config="/data/home/wuxingxing/datas/al_dir/HfO2/dftb/init_bulk_hfo2/temp_init_bulk_work/scf/init_config_0/init/0_aimd/0-scf/POSCAR",
74
+ # format="vasp/poscar", kspacing=0.5)
75
+ get_energy_dftb_vasp()
@@ -9,7 +9,7 @@ from pwact.active_learning.user_input.iter_input import InputParam
9
9
 
10
10
  from pwact.utils.format_input_output import make_train_name, get_seed_by_time, get_iter_from_iter_name, make_iter_name
11
11
  from pwact.utils.constant import AL_STRUCTURE, UNCERTAINTY, TEMP_STRUCTURE, MODEL_CMD, \
12
- TRAIN_INPUT_PARAM, TRAIN_FILE_STRUCTUR, FORCEFILED, LABEL_FILE_STRUCTURE, SLURM_OUT
12
+ TRAIN_INPUT_PARAM, TRAIN_FILE_STRUCTUR, FORCEFILED, LABEL_FILE_STRUCTURE, SLURM_OUT, MODEL_TYPE
13
13
 
14
14
  from pwact.utils.file_operation import save_json_file, write_to_file, del_dir, search_files, add_postfix_dir, mv_file, copy_dir, del_file_list
15
15
  '''
@@ -106,18 +106,20 @@ class ModelTrian(object):
106
106
  script = ""
107
107
  pwmlff = self.resource.train_resource.command
108
108
  script += "{} {} {} >> {}\n\n".format(pwmlff, MODEL_CMD.train, TRAIN_FILE_STRUCTUR.train_json, SLURM_OUT.train_out)
109
- if self.input_param.strategy.compress:
110
- script += " {} {} {} -d {} -o {} -s {} >> {}\n\n".format(pwmlff, MODEL_CMD.compress, model_path, \
111
- self.input_param.strategy.compress_dx, self.input_param.strategy.compress_order, TRAIN_FILE_STRUCTUR.compree_dp_name, SLURM_OUT.train_out)
112
- cmp_model_path = "{}".format(TRAIN_FILE_STRUCTUR.compree_dp_name)
109
+
110
+ # do nothing for nep model
111
+ if self.input_param.train.model_type == MODEL_TYPE.dp:
112
+ if self.input_param.strategy.compress:
113
+ script += " {} {} {} -d {} -o {} -s {}/{} >> {}\n\n".format(pwmlff, MODEL_CMD.compress, model_path, \
114
+ self.input_param.strategy.compress_dx, self.input_param.strategy.compress_order, TRAIN_FILE_STRUCTUR.model_record, TRAIN_FILE_STRUCTUR.compree_dp_name, SLURM_OUT.train_out)
115
+ cmp_model_path = "{}/{}".format(TRAIN_FILE_STRUCTUR.model_record, TRAIN_FILE_STRUCTUR.compree_dp_name)
113
116
 
114
- if self.input_param.strategy.md_type == FORCEFILED.libtorch_lmps:
115
- if cmp_model_path is None:
116
- # script model_record/dp_model.ckpt the torch_script_module.pt will in model_record dir
117
- script += " {} {} {} >> {}\n".format(pwmlff, MODEL_CMD.script, model_path, SLURM_OUT.train_out)
118
- script += " mv ./{}/{} .\n\n".format(TRAIN_FILE_STRUCTUR.model_record, TRAIN_FILE_STRUCTUR.script_dp_name)
119
- else:
120
- script += " {} {} {} >> {}\n\n".format(pwmlff, MODEL_CMD.script, cmp_model_path, SLURM_OUT.train_out)
117
+ if self.input_param.strategy.md_type == FORCEFILED.libtorch_lmps:
118
+ if cmp_model_path is None:
119
+ # script model_record/dp_model.ckpt the torch_script_module.pt will in model_record dir
120
+ script += " {} {} {} {}/{} >> {}\n".format(pwmlff, MODEL_CMD.script, model_path, TRAIN_FILE_STRUCTUR.model_record, TRAIN_FILE_STRUCTUR.script_dp_name, SLURM_OUT.train_out)
121
+ else:
122
+ script += " {} {} {} {}/{} >> {}\n\n".format(pwmlff, MODEL_CMD.script, cmp_model_path, TRAIN_FILE_STRUCTUR.model_record, TRAIN_FILE_STRUCTUR.script_dp_name, SLURM_OUT.train_out)
121
123
  return script
122
124
 
123
125
  '''
@@ -3,7 +3,7 @@ import glob
3
3
 
4
4
  from pwact.utils.json_operation import get_parameter, get_required_parameter
5
5
  from pwact.utils.constant import MODEL_CMD, FORCEFILED, UNCERTAINTY, PWDATA
6
- from pwact.active_learning.user_input.train_param.train_param import TrainParam
6
+ from pwact.active_learning.user_input.train_param.train_param import InputParam as TrainParam
7
7
  from pwact.active_learning.user_input.scf_param import SCFParam
8
8
  class InputParam(object):
9
9
  # _instance = None
@@ -12,7 +12,8 @@ class InputParam(object):
12
12
  if not os.path.isabs(self.root_dir):
13
13
  self.root_dir = os.path.realpath(self.root_dir)
14
14
  self.record_file = get_parameter("record_file", json_dict, "al.record")
15
- print("Warning! record_file not provided, automatically set to {}! ".format(self.record_file))
15
+ if "record_file" not in json_dict.keys():
16
+ print("Warning! record_file not provided, automatically set to {}! ".format(self.record_file))
16
17
 
17
18
  self.reserve_work = get_parameter("reserve_work", json_dict, False)
18
19
  # self.reserve_feature = get_parameter("reserve_feature", json_dict, False)
@@ -69,7 +69,10 @@ class Resource(object):
69
69
  env_script = ""
70
70
  if len(source_list) > 0:
71
71
  for source in source_list:
72
- if "source" != source.split()[0].lower():
72
+ if "source" != source.split()[0].lower() and \
73
+ "export" != source.split()[0].lower() and \
74
+ "module" != source.split()[0].lower() and \
75
+ "conda" != source.split()[0].lower():
73
76
  tmp_source = "source {}\n".format(source)
74
77
  else:
75
78
  tmp_source = "{}\n".format(source)
@@ -104,4 +107,16 @@ class ResourceDetail(object):
104
107
  self.env_script = env_script
105
108
 
106
109
  if self.gpu_per_node is None and self.cpu_per_node is None:
107
- raise Exception("ERROR! Both CPU and GPU resources are not specified!")
110
+ raise Exception("ERROR! Both CPU and GPU resources are not specified!")
111
+ # check param
112
+ if "$SLURM_NTASKS".lower() in command.lower():
113
+ pass
114
+ else:
115
+ if "mpirun -np" in command:
116
+ np_num = command.split()[2]
117
+ try:
118
+ np_num = int(np_num)
119
+ if np_num > cpu_per_node:
120
+ raise Exception("the 'command' in resource.json {} set error! The nums of np can not be bigger than 'cpu_per_node'!".format(command))
121
+ except Exception:
122
+ raise Exception("the 'command' in resource.json {} set error! The nums of np can not be parsed!".format(command))
@@ -165,7 +165,7 @@ class DFTInput(object):
165
165
 
166
166
  if "MP_N123" in key_values and self.kspacing is not None:
167
167
  error_info = "ERROR! The 'kspacing' in DFT/input/{} dict and 'MP_N123' in {} file cannot coexist.\n".format(os.path.basename(self.input_file), os.path.basename(self.input_file))
168
- error_info += "If 'MP_N123' is not indicated in DFT/input/{}, we will use 'kspacing' param to generate the 'MP_N123' parameter\n".format(os.path.basename(self.input_file))
168
+ error_info += "If 'MP_N123' is not indicated in DFT/input/{}, the 'kspacing' param will be used to generate the 'MP_N123' parameter\n".format(os.path.basename(self.input_file))
169
169
  raise Exception(error_info)
170
170
  elif "MP_N123" not in key_values and self.kspacing is None:
171
171
  self.kspacing = 0.5
@@ -31,7 +31,8 @@ class NetParam(object):
31
31
  if "type_" in self.net_type:
32
32
  dicts["physical_property"] = self.physical_property
33
33
  #dicts["bias"] = self.bias,
34
- #dicts["resnet_dt"] = self. resnet_dt,
34
+ # if self.resnet_dt is False:
35
+ # dicts["resnet_dt"] = self.resnet_dt
35
36
  #dicts["activation"] = self.activation
36
37
  return dicts
37
38
 
@@ -40,6 +41,7 @@ class ModelParam(object):
40
41
  self.type_embedding_net = None
41
42
  self.embedding_net = None
42
43
  self.fitting_net = None
44
+ self.nep_param:NetParam = None
43
45
 
44
46
  '''
45
47
  description:
@@ -75,7 +77,9 @@ class ModelParam(object):
75
77
  def set_nn_fitting_net(self, fitting_net_dict:dict):
76
78
  # fitting_net_dict = get_parameter("fitting_net",json_input, {})
77
79
  network_size = get_parameter("network_size", fitting_net_dict,[15,15,1])
78
- if network_size[-1] != 1:
80
+ if not isinstance(network_size, list):
81
+ network_size = [network_size]
82
+ if len(network_size) > 1 and network_size[-1] != 1:
79
83
  raise Exception("Error: The last layer of the fitting network should have a size of 1 for etot energy, but the input size is {}!".format(network_size[-1]))
80
84
  bias = True # get_parameter("bias", fitting_net_dict, True)
81
85
  resnet_dt = False # get_parameter("resnet_dt", fitting_net_dict, False)
@@ -91,4 +95,3 @@ class ModelParam(object):
91
95
  # # dicts[self.fitting_net.net_type] = self.fitting_net.to_dict()
92
96
  # return self.fitting_net.to_dict_std()
93
97
 
94
-