openmc-data 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
openmc_data/__init__.py CHANGED
@@ -13,7 +13,7 @@ except PackageNotFoundError:
13
13
 
14
14
  __all__ = ["__version__"]
15
15
 
16
- from .utils import download, extract, process_neutron, process_thermal, state_download_size
16
+ from .utils import download, extract, process_neutron, process_thermal, calculate_download_size, get_file_types, state_download_size
17
17
  from .urls import all_release_details
18
18
  from .urls_h5 import all_h5_release_details
19
19
  from .urls_xml import all_chain_release_details
openmc_data/_version.py CHANGED
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '0.2.5'
16
- __version_tuple__ = version_tuple = (0, 2, 5)
15
+ __version__ = version = '0.2.7'
16
+ __version_tuple__ = version_tuple = (0, 2, 7)
@@ -11,7 +11,7 @@ import sys
11
11
  from pathlib import Path
12
12
 
13
13
  import openmc.data
14
- from openmc_data import download, extract, state_download_size, all_release_details
14
+ from openmc_data import download, extract, all_release_details, get_file_types, calculate_download_size
15
15
 
16
16
  # Make sure Python version is sufficient
17
17
  assert sys.version_info >= (3, 6), "Python 3.6+ is required"
@@ -94,48 +94,33 @@ def main():
94
94
  # can be exstened to accommodated new releases
95
95
  release_details = all_release_details[library_name][release]
96
96
 
97
- compressed_file_size, uncompressed_file_size = 0, 0
98
- for p in ("neutron", "photon"):
99
- compressed_file_size += release_details[p]["compressed_file_size"]
100
- uncompressed_file_size += release_details[p]["uncompressed_file_size"]
97
+ file_types = get_file_types(args.particles)
101
98
 
102
99
  # ==============================================================================
103
100
  # DOWNLOAD FILES FROM NNDC SITE
104
101
 
105
102
  if args.download:
106
- state_download_size(
107
- compressed_file_size,
108
- uncompressed_file_size,
109
- 'MB'
110
- )
103
+ calculate_download_size(library_name, release, args.particles, file_types)
111
104
  for particle in args.particles:
112
- particle_download_path = download_path / particle
113
105
  for f, checksum in zip(
114
- release_details[particle]["compressed_files"],
115
- release_details[particle]["checksums"],
106
+ release_details[particle][file_types[particle]]["compressed_files"],
107
+ release_details[particle][file_types[particle]]["checksums"],
116
108
  ):
117
109
  # Establish connection to URL
118
- url = release_details[particle]["base_url"] + f
119
- downloaded_file = download(
120
- url, output_path=particle_download_path, checksum=checksum
121
- )
110
+ url = release_details[particle][file_types[particle]]["base_url"] + f
111
+ download(url, output_path=download_path / particle, checksum=checksum)
122
112
 
123
113
  # ==============================================================================
124
114
  # EXTRACT FILES FROM TGZ
125
115
 
126
116
  if args.extract:
127
117
  for particle in args.particles:
128
- if release_details[particle]["file_type"] == "ace":
129
- extraction_dir = ace_files_dir
130
- elif release_details[particle]["file_type"] == "endf":
131
- extraction_dir = endf_files_dir
132
-
133
118
  extract(
134
119
  compressed_files=[
135
120
  download_path / particle / f
136
- for f in release_details[particle]["compressed_files"]
121
+ for f in release_details[particle][file_types[particle]]["compressed_files"]
137
122
  ],
138
- extraction_dir=extraction_dir,
123
+ extraction_dir=Path("-".join([library_name, release, file_types[particle]])),
139
124
  del_compressed_file=args.cleanup,
140
125
  )
141
126
 
@@ -164,7 +149,7 @@ def main():
164
149
  library = openmc.data.DataLibrary()
165
150
 
166
151
  for particle in args.particles:
167
- details = release_details[particle]
152
+ details = release_details[particle][file_types[particle]]
168
153
  if particle == "neutron":
169
154
  for cls, files in [
170
155
  (openmc.data.IncidentNeutron, ace_files_dir.rglob(details["ace_files"])),
@@ -1,8 +1,7 @@
1
1
  #!/usr/bin/env python3
2
2
 
3
3
  """
4
- Download FENDL-3.2 FENDL-3.1d, FENDL-3.1a, FENDL-3.0 or FENDL-2.1 ACE
5
- data from the IAEA and convert it to a HDF5 library for use with OpenMC..
4
+ Download FENDL ACE files from the IAEA and convert it to a HDF5 library for use with OpenMC..
6
5
  """
7
6
 
8
7
  import argparse
@@ -15,7 +14,7 @@ from textwrap import dedent
15
14
  from urllib.parse import urljoin
16
15
 
17
16
  import openmc.data
18
- from openmc_data import download, state_download_size, all_release_details
17
+ from openmc_data import download, all_release_details, calculate_download_size, get_file_types
19
18
 
20
19
 
21
20
  class CustomFormatter(
@@ -59,11 +58,10 @@ parser.add_argument(
59
58
  parser.add_argument(
60
59
  "-r",
61
60
  "--release",
62
- choices=["3.2", "3.1d", "3.1a", "3.0", "2.1"],
63
- default="3.2",
64
- help="The nuclear data library "
65
- "release version. The currently supported options are "
66
- "3.2, 3.1d, 3.1a, 3.0 and 2.1",
61
+ choices=["3.2b", "3.2a", "3.2", "3.1d", "3.1a", "3.1", "3.0", "2.1"],
62
+ default="3.2b",
63
+ help="The nuclear data library release version. The currently supported "
64
+ "options are 3.2b, 3.2a, 3.2, 3.1d, 3.1a, 3.1, 3.0, and 2.1",
67
65
  )
68
66
  parser.add_argument(
69
67
  "-p",
@@ -76,13 +74,13 @@ parser.add_argument(
76
74
  parser.add_argument(
77
75
  "--cleanup",
78
76
  action="store_true",
79
- help="Remove download directories when data has " "been processed",
77
+ help="Remove download directories when data has been processed",
80
78
  )
81
79
  parser.add_argument(
82
80
  "--no-cleanup",
83
81
  dest="cleanup",
84
82
  action="store_false",
85
- help="Do not remove download directories when data has " "been processed",
83
+ help="Do not remove download directories when data has been processed",
86
84
  )
87
85
  parser.set_defaults(download=True, extract=True, cleanup=False)
88
86
  args = parser.parse_args()
@@ -127,6 +125,7 @@ def check_special_case(particle_details, script_step):
127
125
  def main():
128
126
 
129
127
  library_name = "fendl"
128
+ file_types = get_file_types(args.particles)
130
129
  cwd = Path.cwd()
131
130
 
132
131
  ace_files_dir = cwd.joinpath("-".join([library_name, args.release, "ace"]))
@@ -144,20 +143,10 @@ def main():
144
143
 
145
144
  # todo refactor this into the release dictionary
146
145
  if args.release == "3.0":
147
- release_details[args.release]["neutron"]["special_cases"] = {
146
+ release_details[args.release]["neutron"]['ace']["special_cases"] = {
148
147
  "process": {"19K_039.ace": fendl30_k39}
149
148
  }
150
149
 
151
- compressed_file_size = uncompressed_file_size = 0
152
- for p in ("neutron", "photon"):
153
- if p in args.particles:
154
- compressed_file_size += release_details[args.release][p][
155
- "compressed_file_size"
156
- ]
157
- uncompressed_file_size += release_details[args.release][p][
158
- "uncompressed_file_size"
159
- ]
160
-
161
150
  # Warnings to be printed at the end of the script.
162
151
  output_warnings = []
163
152
 
@@ -165,19 +154,16 @@ def main():
165
154
  # DOWNLOAD FILES FROM IAEA SITE
166
155
 
167
156
  if args.download:
168
- state_download_size(compressed_file_size, uncompressed_file_size, 'MB')
157
+ calculate_download_size(library_name, args.release, args.particles, file_types, 'GB')
169
158
 
170
159
  for particle in args.particles:
171
- # Create a directory to hold the downloads
172
- particle_download_path = download_path / particle
173
-
174
- particle_details = release_details[args.release][particle]
160
+ particle_details = release_details[args.release][particle][file_types[particle]]
175
161
  for f in particle_details["compressed_files"]:
176
162
  download(
177
163
  urljoin(particle_details["base_url"], f),
178
164
  as_browser=True,
179
165
  context=ssl._create_unverified_context(),
180
- output_path=particle_download_path,
166
+ output_path=download_path / particle,
181
167
  )
182
168
 
183
169
  # ==============================================================================
@@ -185,13 +171,13 @@ def main():
185
171
  if args.extract:
186
172
  for particle in args.particles:
187
173
 
188
- particle_details = release_details[args.release][particle]
174
+ particle_details = release_details[args.release][particle][file_types[particle]]
189
175
 
190
176
  special_cases = check_special_case(particle_details, "extract")
191
177
 
192
- if particle_details["file_type"] == "ace":
178
+ if file_types[particle] == "ace":
193
179
  extraction_dir = ace_files_dir
194
- elif particle_details["file_type"] == "endf":
180
+ elif file_types[particle] == "endf":
195
181
  extraction_dir = endf_files_dir
196
182
 
197
183
  for f in particle_details["compressed_files"]:
@@ -223,7 +209,7 @@ def main():
223
209
  particle_destination = args.destination / particle
224
210
  particle_destination.mkdir(parents=True, exist_ok=True)
225
211
 
226
- particle_details = release_details[args.release][particle]
212
+ particle_details = release_details[args.release][particle][file_types[particle]]
227
213
 
228
214
  # Get dictionary of special cases for particle
229
215
  special_cases = check_special_case(particle_details, "process")
@@ -231,7 +217,7 @@ def main():
231
217
  if particle == "neutron":
232
218
  # Get a list of all ACE files
233
219
  neutron_files = ace_files_dir.glob(
234
- release_details[args.release]["neutron"]["ace_files"]
220
+ release_details[args.release]["neutron"][file_types[particle]]["ace_files"]
235
221
  )
236
222
 
237
223
  # excluding files ending with _ that are
@@ -269,7 +255,7 @@ def main():
269
255
  elif particle == "photon":
270
256
 
271
257
  photon_files = endf_files_dir.glob(
272
- release_details[args.release]["photon"]["endf_files"]
258
+ release_details[args.release]["photon"][file_types[particle]]["endf_files"]
273
259
  )
274
260
 
275
261
  for photo_path in sorted(photon_files):
@@ -14,7 +14,7 @@ from string import digits
14
14
  from urllib.parse import urljoin
15
15
 
16
16
  import openmc.data
17
- from openmc_data import download, state_download_size, all_release_details
17
+ from openmc_data import download, calculate_download_size, all_release_details, get_file_types
18
18
 
19
19
 
20
20
  class CustomFormatter(
@@ -113,7 +113,7 @@ args = parser.parse_args()
113
113
  def main():
114
114
 
115
115
  library_name = "jeff"
116
-
116
+ file_types = get_file_types(['neutron'])
117
117
  cwd = Path.cwd()
118
118
 
119
119
  ace_files_dir = cwd.joinpath("-".join([library_name, args.release, "ace"]))
@@ -123,13 +123,13 @@ def main():
123
123
  args.destination = Path("-".join([library_name, args.release, "hdf5"]))
124
124
 
125
125
  # This dictionary contains all the unique information about each release. This can be exstened to accommodated new releases
126
- details = all_release_details[library_name][args.release]['neutron']
126
+ details = all_release_details[library_name][args.release]['neutron'][file_types['neutron']]
127
127
 
128
128
  # ==============================================================================
129
129
  # DOWNLOAD FILES FROM OECD SITE
130
130
 
131
131
  if args.download:
132
- state_download_size(details["compressed_file_size"], details["uncompressed_file_size"], 'GB')
132
+ calculate_download_size(library_name, args.release, ['neutron'], file_types, 'GB')
133
133
  for f, t in zip(details["compressed_files"], details["temperatures"]):
134
134
  if t in args.temperatures or t is None:
135
135
  download(
@@ -13,7 +13,7 @@ from urllib.parse import urljoin
13
13
 
14
14
  import openmc.data
15
15
 
16
- from openmc_data import download, extract, state_download_size, all_release_details
16
+ from openmc_data import download, extract, calculate_download_size, all_release_details, get_file_types
17
17
 
18
18
 
19
19
  # Make sure Python version is sufficient
@@ -112,7 +112,7 @@ def key(p):
112
112
  def main():
113
113
 
114
114
  library_name = "jeff"
115
-
115
+ file_types = get_file_types(['neutron'])
116
116
  cwd = Path.cwd()
117
117
 
118
118
  ace_files_dir = cwd.joinpath("-".join([library_name, args.release, "ace"]))
@@ -120,13 +120,13 @@ def main():
120
120
 
121
121
  # This dictionary contains all the unique information about each release.
122
122
  # This can be extended to accommodate new releases
123
- details = all_release_details[library_name][args.release]['neutron']
123
+ details = all_release_details[library_name][args.release]['neutron'][file_types['neutron']]
124
124
 
125
125
  # ==============================================================================
126
126
  # DOWNLOAD FILES FROM WEBSITE
127
127
 
128
128
  if args.download:
129
- state_download_size(details["compressed_file_size"], details["uncompressed_file_size"], 'GB')
129
+ calculate_download_size(library_name, args.release, ['neutron'], file_types, 'GB')
130
130
  for f, t in zip(details["compressed_files"], details["temperatures"]):
131
131
  if t in args.temperatures or t is None:
132
132
  download(urljoin(details["base_url"], f), output_path=download_path)
@@ -11,7 +11,7 @@ from pathlib import Path
11
11
  from urllib.parse import urljoin
12
12
 
13
13
  import openmc.data
14
- from openmc_data import download, extract, state_download_size, all_release_details
14
+ from openmc_data import download, extract, calculate_download_size, all_release_details, get_file_types
15
15
 
16
16
 
17
17
  # Make sure Python version is sufficient
@@ -81,7 +81,7 @@ args = parser.parse_args()
81
81
  def main():
82
82
 
83
83
  library_name = "tendl"
84
-
84
+ file_types = get_file_types(["neutron"])
85
85
  cwd = Path.cwd()
86
86
 
87
87
  ace_files_dir = cwd.joinpath("-".join([library_name, args.release, "ace"]))
@@ -93,17 +93,13 @@ def main():
93
93
 
94
94
  # This dictionary contains all the unique information about each release.
95
95
  # This can be extended to accommodated new releases
96
- release_details = all_release_details[library_name][args.release]["neutron"]
96
+ release_details = all_release_details[library_name][args.release]["neutron"][file_types['neutron']]
97
97
 
98
98
  # ==============================================================================
99
99
  # DOWNLOAD FILES FROM WEBSITE
100
100
 
101
101
  if args.download:
102
- state_download_size(
103
- release_details['compressed_file_size'],
104
- release_details['uncompressed_file_size'],
105
- 'GB'
106
- )
102
+ calculate_download_size(library_name, args.release, ['neutron'], file_types, 'GB')
107
103
  for f in release_details["compressed_files"]:
108
104
  # Establish connection to URL
109
105
  download(urljoin(release_details["base_url"], f), output_path=download_path)
@@ -125,7 +121,7 @@ def main():
125
121
 
126
122
  metastables = ace_files_dir.glob(release_details["metastables"])
127
123
  for path in metastables:
128
- print(" Fixing {} (ensure metastable)...".format(path))
124
+ print(f" Fixing {path} (ensure metastable)...")
129
125
  text = open(path, "r").read()
130
126
  mass_first_digit = int(text[3])
131
127
  if mass_first_digit <= 2:
@@ -159,7 +155,7 @@ def main():
159
155
 
160
156
  # Export HDF5 file
161
157
  h5_file = args.destination / f"{data.name}.h5"
162
- print("Writing {}...".format(h5_file))
158
+ print(f"Writing {h5_file}...")
163
159
  data.export_to_hdf5(h5_file, "w", libver=args.libver)
164
160
 
165
161
  # Register with library
@@ -1,12 +1,13 @@
1
1
  import argparse
2
2
  import json
3
- import typing
4
3
  from pathlib import Path
5
4
 
6
5
  import openmc.deplete
7
6
 
8
7
 
9
- parser = argparse.ArgumentParser()
8
+ parser = argparse.ArgumentParser(prog="add_branching_ratio",
9
+ description="Adds branching ratios to n,gamma reactions in OpenMC chain files",
10
+ )
10
11
  parser.add_argument(
11
12
  "-i", "--chain_in", type=Path, required=True, help="Path of the input chain file"
12
13
  )
@@ -2,7 +2,7 @@
2
2
 
3
3
  import glob
4
4
  import os
5
- from argparse import ArgumentParser
5
+ import argparse
6
6
  from pathlib import Path
7
7
  from zipfile import ZipFile
8
8
  from collections import defaultdict
@@ -30,7 +30,9 @@ URLS = [
30
30
  ]
31
31
 
32
32
  # Parse command line arguments
33
- parser = ArgumentParser()
33
+ parser = argparse.ArgumentParser(prog="generate_endf71_chain_casl",
34
+ description="Generates a CASL OpenMC chain file",
35
+ )
34
36
  parser.add_argument(
35
37
  "-d",
36
38
  "--destination",
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env python3
2
2
 
3
- from argparse import ArgumentParser
3
+ import argparse
4
4
  from pathlib import Path
5
5
  from urllib.parse import urljoin
6
6
 
@@ -11,7 +11,9 @@ from openmc_data import all_decay_release_details
11
11
 
12
12
 
13
13
  # Parse command line arguments
14
- parser = ArgumentParser()
14
+ parser = argparse.ArgumentParser(prog="generate_endf_chain",
15
+ description="Generates a OpenMC chain file from ENDF nuclear data files",
16
+ )
15
17
  parser.add_argument('-r', '--release', choices=['b7.1', 'b8.0'],
16
18
  default='b8.0', help="The nuclear data library release "
17
19
  "version. The currently supported options are b7.1, b8.0")
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env python3
2
2
 
3
- from argparse import ArgumentParser
3
+ import argparse
4
4
  from pathlib import Path
5
5
  from urllib.parse import urljoin
6
6
 
@@ -11,7 +11,9 @@ from openmc_data import all_decay_release_details
11
11
 
12
12
 
13
13
  # Parse command line arguments
14
- parser = ArgumentParser()
14
+ parser = argparse.ArgumentParser(prog="generate_jeff_chain",
15
+ description="Generates a OpenMC chain file from JEFF nuclear data files",
16
+ )
15
17
  parser.add_argument('-r', '--release', choices=['3.3'],
16
18
  default='3.3', help="The nuclear data library release "
17
19
  "version. The currently supported options are 3.3")
@@ -4,14 +4,16 @@
4
4
  Determine Q values equivalent to the defaults in Serpent
5
5
  """
6
6
 
7
- from argparse import ArgumentParser
7
+ import argparse
8
8
  from pathlib import Path
9
9
  import json
10
10
 
11
11
  import openmc.data
12
12
 
13
13
  # Get command line argument
14
- parser = ArgumentParser()
14
+ parser = argparse.ArgumentParser(prog="generate_serpent_fissq",
15
+ description="Generates Q values for fission reactions",
16
+ )
15
17
  parser.add_argument('dir', type=Path, help='Directory containing ENDF neutron sublibrary files')
16
18
  parser.add_argument(
17
19
  "-d",
@@ -7,7 +7,7 @@ be borrowed from another library. The --lib flag for this script indicates what
7
7
  library should be used for decay and FPY evaluations and defaults to JEFF 3.3.
8
8
  """
9
9
 
10
- from argparse import ArgumentParser
10
+ import argparse
11
11
  from pathlib import Path
12
12
  from urllib.parse import urljoin
13
13
 
@@ -18,7 +18,9 @@ from openmc_data.utils import download, extract
18
18
  from openmc_data import all_decay_release_details
19
19
 
20
20
  # Parse command line arguments
21
- parser = ArgumentParser()
21
+ parser = argparse.ArgumentParser(prog="generate_tendl_chain",
22
+ description="Generates a OpenMC chain file from TENDL nuclear data files",
23
+ )
22
24
  parser.add_argument(
23
25
  "--lib",
24
26
  choices=("jeff33", "endf80"),
@@ -3,20 +3,53 @@ from pathlib import Path
3
3
  import openmc.deplete
4
4
 
5
5
 
6
- parser = argparse.ArgumentParser()
7
- parser.add_argument('chain_in', type=Path)
8
- parser.add_argument('chain_out', type=Path)
9
- args = parser.parse_args()
6
+ parser = argparse.ArgumentParser(
7
+ prog="reduce_chain",
8
+ description="Removes nuclides with short half lives from OpenMC chain files",
9
+ )
10
+ parser.add_argument(
11
+ "-i", "--chain_in", type=Path, required=True, help="Path of the input chain file"
12
+ )
13
+ parser.add_argument(
14
+ "-o",
15
+ "--chain_out",
16
+ type=Path,
17
+ required=True,
18
+ help="Path of the produced chain file",
19
+ )
10
20
 
21
+ parser.add_argument(
22
+ "-hl",
23
+ "--half_life",
24
+ type=float,
25
+ required=True,
26
+ default=1e15,
27
+ help=(
28
+ "Value of half life in seconds to use when filtering out nuclides, "
29
+ "half lives below the specified half life nuclides will be excluded "
30
+ "from the output chain file"
31
+ )
32
+
33
+ )
11
34
 
12
- def main():
35
+ args = parser.parse_args()
13
36
 
14
- chain_full = openmc.deplete.Chain.from_xml(args.chain_in)
37
+
38
+ def remove_long_half_life_nuclides(chain_in: Path, chain_out: Path, half_life: float):
39
+ chain_full = openmc.deplete.Chain.from_xml(chain_in)
15
40
  stable = [
16
41
  nuc.name
17
42
  for nuc in chain_full.nuclides
18
- if nuc.half_life is None or nuc.half_life > 1e15
43
+ if nuc.half_life is None or nuc.half_life > half_life
19
44
  ]
20
45
 
21
46
  chain_reduced = chain_full.reduce(stable)
22
- chain_reduced.export_to_xml(args.chain_out)
47
+ chain_reduced.export_to_xml(chain_out)
48
+
49
+
50
+ def main():
51
+ remove_long_half_life_nuclides(args.chain_in, args.chain_out, args.half_life)
52
+
53
+
54
+ if __name__ == "__main__":
55
+ main()
@@ -230,13 +230,9 @@ def main():
230
230
  url = details['base_url'] + f
231
231
  if 'checksums' in details.keys():
232
232
  checksum = details['checksums'][i]
233
- downloaded_file = download(url,
234
- output_path=download_path / particle,
235
- checksum=checksum)
233
+ download(url, output_path=download_path / particle, checksum=checksum)
236
234
  else:
237
- downloaded_file = download(url,
238
- output_path=download_path / particle,
239
- )
235
+ download(url, output_path=download_path / particle)
240
236
 
241
237
  # ==============================================================================
242
238
  # EXTRACT FILES FROM TGZ