openmc-data 0.2.5__tar.gz → 0.2.7__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {openmc_data-0.2.5 → openmc_data-0.2.7}/.github/workflows/test_processing.yml +8 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/PKG-INFO +1 -1
- {openmc_data-0.2.5 → openmc_data-0.2.7}/pyproject.toml +5 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/__init__.py +1 -1
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/_version.py +2 -2
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/convert/convert_endf.py +10 -25
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/convert/convert_fendl.py +19 -33
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/convert/convert_jeff32.py +4 -4
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/convert/convert_jeff33.py +4 -4
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/convert/convert_tendl.py +6 -10
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/add_branching_ratios.py +3 -2
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/generate_endf71_chain_casl.py +4 -2
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/generate_endf_chain.py +4 -2
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/generate_jeff_chain.py +4 -2
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/generate_serpent_fissq.py +4 -2
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/generate_tendl_chain.py +4 -2
- openmc_data-0.2.7/src/openmc_data/depletion/reduce_chain.py +55 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/generate/generate_endf.py +2 -6
- openmc_data-0.2.7/src/openmc_data/generate/generate_fendl.py +124 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/generate/generate_jendl.py +1 -1
- openmc_data-0.2.7/src/openmc_data/urls.py +471 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/utils.py +26 -5
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data.egg-info/PKG-INFO +1 -1
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data.egg-info/SOURCES.txt +1 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data.egg-info/entry_points.txt +1 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/tests/test_urls.py +19 -18
- openmc_data-0.2.5/src/openmc_data/depletion/reduce_chain.py +0 -22
- openmc_data-0.2.5/src/openmc_data/urls.py +0 -333
- {openmc_data-0.2.5 → openmc_data-0.2.7}/.github/workflows/python-publish.yml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/.github/workflows/test_package.yml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/.github/workflows/test_urls.yml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/.gitignore +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/LICENSE +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/README.md +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/setup.cfg +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/convert/__init__.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/convert/convert_lib80x.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/convert/convert_mcnp70.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/convert/convert_mcnp71.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/__init__.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/branching_ratios_pwr.json +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/branching_ratios_sfr.json +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/casl_chain.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain-nndc-b7.1.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_casl_pwr.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_casl_sfr.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_endf_b7.1.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_endf_b7.1_pwr.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_endf_b7.1_sfr.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_endf_b8.0.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_endf_b8.0_pwr.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_endf_b8.0_sfr.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_endfb71_pwr.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/chain_endfb71_sfr.xml +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/serpent_fissq.json +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/tendl2019_nuclides.json +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/tendl2021_nuclides.json +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/download/__init__.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/download/download_endf.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/download/download_endf_chain.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/download/download_tendl.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/generate/__init__.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/generate/generate_cendl.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/generate/generate_jeff33.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/other/__init__.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/other/combine_libraries.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/other/convert_tendl_rand.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/other/make_compton.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/other/make_stopping_powers.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/other/sample_sandy.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/urls_chain.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/urls_h5.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/urls_xml.py +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data.egg-info/dependency_links.txt +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data.egg-info/requires.txt +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data.egg-info/top_level.txt +0 -0
- {openmc_data-0.2.5 → openmc_data-0.2.7}/tests/test_version.py +0 -0
|
@@ -26,6 +26,7 @@ jobs:
|
|
|
26
26
|
src/openmc_data/convert/convert_*.py
|
|
27
27
|
src/openmc_data/download/download_*.py
|
|
28
28
|
src/openmc_data/depletion/add_branching_ratios.py
|
|
29
|
+
src/openmc_data/depletion/reduce_chain.py
|
|
29
30
|
|
|
30
31
|
- name: Set up Python
|
|
31
32
|
if: steps.changed-files-specific.outputs.any_changed == 'true'
|
|
@@ -123,3 +124,10 @@ jobs:
|
|
|
123
124
|
download_endf_chain -r b8.0
|
|
124
125
|
add_branching_ratios -i chain-endf-b8.0.xml -o chain_endf_b8.0_sfr.xml -b sfr
|
|
125
126
|
add_branching_ratios -i chain-endf-b8.0.xml -o chain_endf_b8.0_pwr.xml -b pwr
|
|
127
|
+
|
|
128
|
+
- name: test default reduce_chain
|
|
129
|
+
if: contains(steps.changed-files-specific.outputs.modified_files, 'src/openmc_data/depletion/reduce_chain.py')
|
|
130
|
+
run: |
|
|
131
|
+
echo "reduce_chain.py file has been modified."
|
|
132
|
+
download_endf_chain -r b8.0
|
|
133
|
+
reduce_chain -i chain-endf-b8.0.xml -o chain_endf_b8.0_sfr.xml -hl 1e15
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: openmc_data
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.7
|
|
4
4
|
Summary: A Python package containing a collection of scripts for producing and downloading data for OpenMC
|
|
5
5
|
Author-email: Jonathan Shimwell <mail@jshimwell.com>
|
|
6
6
|
License: Copyright (c) 2019-2022 UChicago Argonne LLC and contributors
|
|
@@ -53,15 +53,19 @@ convert_mcnp70 = "openmc_data.convert.convert_mcnp70:main"
|
|
|
53
53
|
convert_mcnp71 = "openmc_data.convert.convert_mcnp71:main"
|
|
54
54
|
convert_endf = "openmc_data.convert.convert_endf:main"
|
|
55
55
|
convert_tendl = "openmc_data.convert.convert_tendl:main"
|
|
56
|
+
|
|
56
57
|
generate_cendl = "openmc_data.generate.generate_cendl:main"
|
|
57
58
|
generate_endf = "openmc_data.generate.generate_endf:main"
|
|
58
59
|
generate_jeff33 = "openmc_data.generate.generate_jeff33:main"
|
|
59
60
|
generate_jendl = "openmc_data.generate.generate_jendl:main"
|
|
61
|
+
generate_fendl = "openmc_data.generate.generate_fendl:main"
|
|
62
|
+
|
|
60
63
|
generate_endf71_chain_casl = "openmc_data.depletion.generate_endf71_chain_casl:main"
|
|
61
64
|
generate_endf_chain = "openmc_data.depletion.generate_endf_chain:main"
|
|
62
65
|
generate_jeff_chain = "openmc_data.depletion.generate_jeff_chain:main"
|
|
63
66
|
generate_serpent_fissq = "openmc_data.depletion.generate_serpent_fissq:main"
|
|
64
67
|
generate_tendl_chain = "openmc_data.depletion.generate_tendl_chain:main"
|
|
68
|
+
|
|
65
69
|
convert_tendl_rand = "openmc_data.other.convert_tendl_rand:main"
|
|
66
70
|
make_compton = "openmc_data.other.make_compton:main"
|
|
67
71
|
make_stopping_powers = "openmc_data.other.make_stopping_powers:main"
|
|
@@ -69,6 +73,7 @@ add_branching_ratios = "openmc_data.depletion.add_branching_ratios:main"
|
|
|
69
73
|
combine_libraries = "openmc_data.other.combine_libraries:main"
|
|
70
74
|
sample_sandy = "openmc_data.other.sample_sandy:main"
|
|
71
75
|
reduce_chain = "openmc_data.depletion.reduce_chain:main"
|
|
76
|
+
|
|
72
77
|
download_endf = "openmc_data.download.download_endf:main"
|
|
73
78
|
download_tendl = "openmc_data.download.download_tendl:main"
|
|
74
79
|
download_endf_chain = "openmc_data.download.download_endf_chain:main"
|
|
@@ -13,7 +13,7 @@ except PackageNotFoundError:
|
|
|
13
13
|
|
|
14
14
|
__all__ = ["__version__"]
|
|
15
15
|
|
|
16
|
-
from .utils import download, extract, process_neutron, process_thermal, state_download_size
|
|
16
|
+
from .utils import download, extract, process_neutron, process_thermal, calculate_download_size, get_file_types, state_download_size
|
|
17
17
|
from .urls import all_release_details
|
|
18
18
|
from .urls_h5 import all_h5_release_details
|
|
19
19
|
from .urls_xml import all_chain_release_details
|
|
@@ -11,7 +11,7 @@ import sys
|
|
|
11
11
|
from pathlib import Path
|
|
12
12
|
|
|
13
13
|
import openmc.data
|
|
14
|
-
from openmc_data import download, extract,
|
|
14
|
+
from openmc_data import download, extract, all_release_details, get_file_types, calculate_download_size
|
|
15
15
|
|
|
16
16
|
# Make sure Python version is sufficient
|
|
17
17
|
assert sys.version_info >= (3, 6), "Python 3.6+ is required"
|
|
@@ -94,48 +94,33 @@ def main():
|
|
|
94
94
|
# can be exstened to accommodated new releases
|
|
95
95
|
release_details = all_release_details[library_name][release]
|
|
96
96
|
|
|
97
|
-
|
|
98
|
-
for p in ("neutron", "photon"):
|
|
99
|
-
compressed_file_size += release_details[p]["compressed_file_size"]
|
|
100
|
-
uncompressed_file_size += release_details[p]["uncompressed_file_size"]
|
|
97
|
+
file_types = get_file_types(args.particles)
|
|
101
98
|
|
|
102
99
|
# ==============================================================================
|
|
103
100
|
# DOWNLOAD FILES FROM NNDC SITE
|
|
104
101
|
|
|
105
102
|
if args.download:
|
|
106
|
-
|
|
107
|
-
compressed_file_size,
|
|
108
|
-
uncompressed_file_size,
|
|
109
|
-
'MB'
|
|
110
|
-
)
|
|
103
|
+
calculate_download_size(library_name, release, args.particles, file_types)
|
|
111
104
|
for particle in args.particles:
|
|
112
|
-
particle_download_path = download_path / particle
|
|
113
105
|
for f, checksum in zip(
|
|
114
|
-
release_details[particle]["compressed_files"],
|
|
115
|
-
release_details[particle]["checksums"],
|
|
106
|
+
release_details[particle][file_types[particle]]["compressed_files"],
|
|
107
|
+
release_details[particle][file_types[particle]]["checksums"],
|
|
116
108
|
):
|
|
117
109
|
# Establish connection to URL
|
|
118
|
-
url = release_details[particle]["base_url"] + f
|
|
119
|
-
|
|
120
|
-
url, output_path=particle_download_path, checksum=checksum
|
|
121
|
-
)
|
|
110
|
+
url = release_details[particle][file_types[particle]]["base_url"] + f
|
|
111
|
+
download(url, output_path=download_path / particle, checksum=checksum)
|
|
122
112
|
|
|
123
113
|
# ==============================================================================
|
|
124
114
|
# EXTRACT FILES FROM TGZ
|
|
125
115
|
|
|
126
116
|
if args.extract:
|
|
127
117
|
for particle in args.particles:
|
|
128
|
-
if release_details[particle]["file_type"] == "ace":
|
|
129
|
-
extraction_dir = ace_files_dir
|
|
130
|
-
elif release_details[particle]["file_type"] == "endf":
|
|
131
|
-
extraction_dir = endf_files_dir
|
|
132
|
-
|
|
133
118
|
extract(
|
|
134
119
|
compressed_files=[
|
|
135
120
|
download_path / particle / f
|
|
136
|
-
for f in release_details[particle]["compressed_files"]
|
|
121
|
+
for f in release_details[particle][file_types[particle]]["compressed_files"]
|
|
137
122
|
],
|
|
138
|
-
extraction_dir=
|
|
123
|
+
extraction_dir=Path("-".join([library_name, release, file_types[particle]])),
|
|
139
124
|
del_compressed_file=args.cleanup,
|
|
140
125
|
)
|
|
141
126
|
|
|
@@ -164,7 +149,7 @@ def main():
|
|
|
164
149
|
library = openmc.data.DataLibrary()
|
|
165
150
|
|
|
166
151
|
for particle in args.particles:
|
|
167
|
-
details = release_details[particle]
|
|
152
|
+
details = release_details[particle][file_types[particle]]
|
|
168
153
|
if particle == "neutron":
|
|
169
154
|
for cls, files in [
|
|
170
155
|
(openmc.data.IncidentNeutron, ace_files_dir.rglob(details["ace_files"])),
|
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
|
|
3
3
|
"""
|
|
4
|
-
Download FENDL
|
|
5
|
-
data from the IAEA and convert it to a HDF5 library for use with OpenMC..
|
|
4
|
+
Download FENDL ACE files from the IAEA and convert it to a HDF5 library for use with OpenMC..
|
|
6
5
|
"""
|
|
7
6
|
|
|
8
7
|
import argparse
|
|
@@ -15,7 +14,7 @@ from textwrap import dedent
|
|
|
15
14
|
from urllib.parse import urljoin
|
|
16
15
|
|
|
17
16
|
import openmc.data
|
|
18
|
-
from openmc_data import download,
|
|
17
|
+
from openmc_data import download, all_release_details, calculate_download_size, get_file_types
|
|
19
18
|
|
|
20
19
|
|
|
21
20
|
class CustomFormatter(
|
|
@@ -59,11 +58,10 @@ parser.add_argument(
|
|
|
59
58
|
parser.add_argument(
|
|
60
59
|
"-r",
|
|
61
60
|
"--release",
|
|
62
|
-
choices=["3.2", "3.1d", "3.1a", "3.0", "2.1"],
|
|
63
|
-
default="3.
|
|
64
|
-
help="The nuclear data library "
|
|
65
|
-
"
|
|
66
|
-
"3.2, 3.1d, 3.1a, 3.0 and 2.1",
|
|
61
|
+
choices=["3.2b", "3.2a", "3.2", "3.1d", "3.1a", "3.1", "3.0", "2.1"],
|
|
62
|
+
default="3.2b",
|
|
63
|
+
help="The nuclear data library release version. The currently supported "
|
|
64
|
+
"options are 3.2b, 3.2a, 3.2, 3.1d, 3.1a, 3.1, 3.0, and 2.1",
|
|
67
65
|
)
|
|
68
66
|
parser.add_argument(
|
|
69
67
|
"-p",
|
|
@@ -76,13 +74,13 @@ parser.add_argument(
|
|
|
76
74
|
parser.add_argument(
|
|
77
75
|
"--cleanup",
|
|
78
76
|
action="store_true",
|
|
79
|
-
help="Remove download directories when data has
|
|
77
|
+
help="Remove download directories when data has been processed",
|
|
80
78
|
)
|
|
81
79
|
parser.add_argument(
|
|
82
80
|
"--no-cleanup",
|
|
83
81
|
dest="cleanup",
|
|
84
82
|
action="store_false",
|
|
85
|
-
help="Do not remove download directories when data has
|
|
83
|
+
help="Do not remove download directories when data has been processed",
|
|
86
84
|
)
|
|
87
85
|
parser.set_defaults(download=True, extract=True, cleanup=False)
|
|
88
86
|
args = parser.parse_args()
|
|
@@ -127,6 +125,7 @@ def check_special_case(particle_details, script_step):
|
|
|
127
125
|
def main():
|
|
128
126
|
|
|
129
127
|
library_name = "fendl"
|
|
128
|
+
file_types = get_file_types(args.particles)
|
|
130
129
|
cwd = Path.cwd()
|
|
131
130
|
|
|
132
131
|
ace_files_dir = cwd.joinpath("-".join([library_name, args.release, "ace"]))
|
|
@@ -144,20 +143,10 @@ def main():
|
|
|
144
143
|
|
|
145
144
|
# todo refactor this into the release dictionary
|
|
146
145
|
if args.release == "3.0":
|
|
147
|
-
release_details[args.release]["neutron"]["special_cases"] = {
|
|
146
|
+
release_details[args.release]["neutron"]['ace']["special_cases"] = {
|
|
148
147
|
"process": {"19K_039.ace": fendl30_k39}
|
|
149
148
|
}
|
|
150
149
|
|
|
151
|
-
compressed_file_size = uncompressed_file_size = 0
|
|
152
|
-
for p in ("neutron", "photon"):
|
|
153
|
-
if p in args.particles:
|
|
154
|
-
compressed_file_size += release_details[args.release][p][
|
|
155
|
-
"compressed_file_size"
|
|
156
|
-
]
|
|
157
|
-
uncompressed_file_size += release_details[args.release][p][
|
|
158
|
-
"uncompressed_file_size"
|
|
159
|
-
]
|
|
160
|
-
|
|
161
150
|
# Warnings to be printed at the end of the script.
|
|
162
151
|
output_warnings = []
|
|
163
152
|
|
|
@@ -165,19 +154,16 @@ def main():
|
|
|
165
154
|
# DOWNLOAD FILES FROM IAEA SITE
|
|
166
155
|
|
|
167
156
|
if args.download:
|
|
168
|
-
|
|
157
|
+
calculate_download_size(library_name, args.release, args.particles, file_types, 'GB')
|
|
169
158
|
|
|
170
159
|
for particle in args.particles:
|
|
171
|
-
|
|
172
|
-
particle_download_path = download_path / particle
|
|
173
|
-
|
|
174
|
-
particle_details = release_details[args.release][particle]
|
|
160
|
+
particle_details = release_details[args.release][particle][file_types[particle]]
|
|
175
161
|
for f in particle_details["compressed_files"]:
|
|
176
162
|
download(
|
|
177
163
|
urljoin(particle_details["base_url"], f),
|
|
178
164
|
as_browser=True,
|
|
179
165
|
context=ssl._create_unverified_context(),
|
|
180
|
-
output_path=
|
|
166
|
+
output_path=download_path / particle,
|
|
181
167
|
)
|
|
182
168
|
|
|
183
169
|
# ==============================================================================
|
|
@@ -185,13 +171,13 @@ def main():
|
|
|
185
171
|
if args.extract:
|
|
186
172
|
for particle in args.particles:
|
|
187
173
|
|
|
188
|
-
particle_details = release_details[args.release][particle]
|
|
174
|
+
particle_details = release_details[args.release][particle][file_types[particle]]
|
|
189
175
|
|
|
190
176
|
special_cases = check_special_case(particle_details, "extract")
|
|
191
177
|
|
|
192
|
-
if
|
|
178
|
+
if file_types[particle] == "ace":
|
|
193
179
|
extraction_dir = ace_files_dir
|
|
194
|
-
elif
|
|
180
|
+
elif file_types[particle] == "endf":
|
|
195
181
|
extraction_dir = endf_files_dir
|
|
196
182
|
|
|
197
183
|
for f in particle_details["compressed_files"]:
|
|
@@ -223,7 +209,7 @@ def main():
|
|
|
223
209
|
particle_destination = args.destination / particle
|
|
224
210
|
particle_destination.mkdir(parents=True, exist_ok=True)
|
|
225
211
|
|
|
226
|
-
particle_details = release_details[args.release][particle]
|
|
212
|
+
particle_details = release_details[args.release][particle][file_types[particle]]
|
|
227
213
|
|
|
228
214
|
# Get dictionary of special cases for particle
|
|
229
215
|
special_cases = check_special_case(particle_details, "process")
|
|
@@ -231,7 +217,7 @@ def main():
|
|
|
231
217
|
if particle == "neutron":
|
|
232
218
|
# Get a list of all ACE files
|
|
233
219
|
neutron_files = ace_files_dir.glob(
|
|
234
|
-
release_details[args.release]["neutron"]["ace_files"]
|
|
220
|
+
release_details[args.release]["neutron"][file_types[particle]]["ace_files"]
|
|
235
221
|
)
|
|
236
222
|
|
|
237
223
|
# excluding files ending with _ that are
|
|
@@ -269,7 +255,7 @@ def main():
|
|
|
269
255
|
elif particle == "photon":
|
|
270
256
|
|
|
271
257
|
photon_files = endf_files_dir.glob(
|
|
272
|
-
release_details[args.release]["photon"]["endf_files"]
|
|
258
|
+
release_details[args.release]["photon"][file_types[particle]]["endf_files"]
|
|
273
259
|
)
|
|
274
260
|
|
|
275
261
|
for photo_path in sorted(photon_files):
|
|
@@ -14,7 +14,7 @@ from string import digits
|
|
|
14
14
|
from urllib.parse import urljoin
|
|
15
15
|
|
|
16
16
|
import openmc.data
|
|
17
|
-
from openmc_data import download,
|
|
17
|
+
from openmc_data import download, calculate_download_size, all_release_details, get_file_types
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
class CustomFormatter(
|
|
@@ -113,7 +113,7 @@ args = parser.parse_args()
|
|
|
113
113
|
def main():
|
|
114
114
|
|
|
115
115
|
library_name = "jeff"
|
|
116
|
-
|
|
116
|
+
file_types = get_file_types(['neutron'])
|
|
117
117
|
cwd = Path.cwd()
|
|
118
118
|
|
|
119
119
|
ace_files_dir = cwd.joinpath("-".join([library_name, args.release, "ace"]))
|
|
@@ -123,13 +123,13 @@ def main():
|
|
|
123
123
|
args.destination = Path("-".join([library_name, args.release, "hdf5"]))
|
|
124
124
|
|
|
125
125
|
# This dictionary contains all the unique information about each release. This can be exstened to accommodated new releases
|
|
126
|
-
details = all_release_details[library_name][args.release]['neutron']
|
|
126
|
+
details = all_release_details[library_name][args.release]['neutron'][file_types['neutron']]
|
|
127
127
|
|
|
128
128
|
# ==============================================================================
|
|
129
129
|
# DOWNLOAD FILES FROM OECD SITE
|
|
130
130
|
|
|
131
131
|
if args.download:
|
|
132
|
-
|
|
132
|
+
calculate_download_size(library_name, args.release, ['neutron'], file_types, 'GB')
|
|
133
133
|
for f, t in zip(details["compressed_files"], details["temperatures"]):
|
|
134
134
|
if t in args.temperatures or t is None:
|
|
135
135
|
download(
|
|
@@ -13,7 +13,7 @@ from urllib.parse import urljoin
|
|
|
13
13
|
|
|
14
14
|
import openmc.data
|
|
15
15
|
|
|
16
|
-
from openmc_data import download, extract,
|
|
16
|
+
from openmc_data import download, extract, calculate_download_size, all_release_details, get_file_types
|
|
17
17
|
|
|
18
18
|
|
|
19
19
|
# Make sure Python version is sufficient
|
|
@@ -112,7 +112,7 @@ def key(p):
|
|
|
112
112
|
def main():
|
|
113
113
|
|
|
114
114
|
library_name = "jeff"
|
|
115
|
-
|
|
115
|
+
file_types = get_file_types(['neutron'])
|
|
116
116
|
cwd = Path.cwd()
|
|
117
117
|
|
|
118
118
|
ace_files_dir = cwd.joinpath("-".join([library_name, args.release, "ace"]))
|
|
@@ -120,13 +120,13 @@ def main():
|
|
|
120
120
|
|
|
121
121
|
# This dictionary contains all the unique information about each release.
|
|
122
122
|
# This can be extended to accommodate new releases
|
|
123
|
-
details = all_release_details[library_name][args.release]['neutron']
|
|
123
|
+
details = all_release_details[library_name][args.release]['neutron'][file_types['neutron']]
|
|
124
124
|
|
|
125
125
|
# ==============================================================================
|
|
126
126
|
# DOWNLOAD FILES FROM WEBSITE
|
|
127
127
|
|
|
128
128
|
if args.download:
|
|
129
|
-
|
|
129
|
+
calculate_download_size(library_name, args.release, ['neutron'], file_types, 'GB')
|
|
130
130
|
for f, t in zip(details["compressed_files"], details["temperatures"]):
|
|
131
131
|
if t in args.temperatures or t is None:
|
|
132
132
|
download(urljoin(details["base_url"], f), output_path=download_path)
|
|
@@ -11,7 +11,7 @@ from pathlib import Path
|
|
|
11
11
|
from urllib.parse import urljoin
|
|
12
12
|
|
|
13
13
|
import openmc.data
|
|
14
|
-
from openmc_data import download, extract,
|
|
14
|
+
from openmc_data import download, extract, calculate_download_size, all_release_details, get_file_types
|
|
15
15
|
|
|
16
16
|
|
|
17
17
|
# Make sure Python version is sufficient
|
|
@@ -81,7 +81,7 @@ args = parser.parse_args()
|
|
|
81
81
|
def main():
|
|
82
82
|
|
|
83
83
|
library_name = "tendl"
|
|
84
|
-
|
|
84
|
+
file_types = get_file_types(["neutron"])
|
|
85
85
|
cwd = Path.cwd()
|
|
86
86
|
|
|
87
87
|
ace_files_dir = cwd.joinpath("-".join([library_name, args.release, "ace"]))
|
|
@@ -93,17 +93,13 @@ def main():
|
|
|
93
93
|
|
|
94
94
|
# This dictionary contains all the unique information about each release.
|
|
95
95
|
# This can be extended to accommodated new releases
|
|
96
|
-
release_details = all_release_details[library_name][args.release]["neutron"]
|
|
96
|
+
release_details = all_release_details[library_name][args.release]["neutron"][file_types['neutron']]
|
|
97
97
|
|
|
98
98
|
# ==============================================================================
|
|
99
99
|
# DOWNLOAD FILES FROM WEBSITE
|
|
100
100
|
|
|
101
101
|
if args.download:
|
|
102
|
-
|
|
103
|
-
release_details['compressed_file_size'],
|
|
104
|
-
release_details['uncompressed_file_size'],
|
|
105
|
-
'GB'
|
|
106
|
-
)
|
|
102
|
+
calculate_download_size(library_name, args.release, ['neutron'], file_types, 'GB')
|
|
107
103
|
for f in release_details["compressed_files"]:
|
|
108
104
|
# Establish connection to URL
|
|
109
105
|
download(urljoin(release_details["base_url"], f), output_path=download_path)
|
|
@@ -125,7 +121,7 @@ def main():
|
|
|
125
121
|
|
|
126
122
|
metastables = ace_files_dir.glob(release_details["metastables"])
|
|
127
123
|
for path in metastables:
|
|
128
|
-
print(" Fixing {} (ensure metastable)..."
|
|
124
|
+
print(f" Fixing {path} (ensure metastable)...")
|
|
129
125
|
text = open(path, "r").read()
|
|
130
126
|
mass_first_digit = int(text[3])
|
|
131
127
|
if mass_first_digit <= 2:
|
|
@@ -159,7 +155,7 @@ def main():
|
|
|
159
155
|
|
|
160
156
|
# Export HDF5 file
|
|
161
157
|
h5_file = args.destination / f"{data.name}.h5"
|
|
162
|
-
print("Writing {}..."
|
|
158
|
+
print(f"Writing {h5_file}...")
|
|
163
159
|
data.export_to_hdf5(h5_file, "w", libver=args.libver)
|
|
164
160
|
|
|
165
161
|
# Register with library
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
import argparse
|
|
2
2
|
import json
|
|
3
|
-
import typing
|
|
4
3
|
from pathlib import Path
|
|
5
4
|
|
|
6
5
|
import openmc.deplete
|
|
7
6
|
|
|
8
7
|
|
|
9
|
-
parser = argparse.ArgumentParser(
|
|
8
|
+
parser = argparse.ArgumentParser(prog="add_branching_ratio",
|
|
9
|
+
description="Adds branching ratios to n,gamma reactions in OpenMC chain files",
|
|
10
|
+
)
|
|
10
11
|
parser.add_argument(
|
|
11
12
|
"-i", "--chain_in", type=Path, required=True, help="Path of the input chain file"
|
|
12
13
|
)
|
{openmc_data-0.2.5 → openmc_data-0.2.7}/src/openmc_data/depletion/generate_endf71_chain_casl.py
RENAMED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
import glob
|
|
4
4
|
import os
|
|
5
|
-
|
|
5
|
+
import argparse
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
from zipfile import ZipFile
|
|
8
8
|
from collections import defaultdict
|
|
@@ -30,7 +30,9 @@ URLS = [
|
|
|
30
30
|
]
|
|
31
31
|
|
|
32
32
|
# Parse command line arguments
|
|
33
|
-
parser = ArgumentParser(
|
|
33
|
+
parser = argparse.ArgumentParser(prog="generate_endf71_chain_casl",
|
|
34
|
+
description="Generates a CASL OpenMC chain file",
|
|
35
|
+
)
|
|
34
36
|
parser.add_argument(
|
|
35
37
|
"-d",
|
|
36
38
|
"--destination",
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
import argparse
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
from urllib.parse import urljoin
|
|
6
6
|
|
|
@@ -11,7 +11,9 @@ from openmc_data import all_decay_release_details
|
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
# Parse command line arguments
|
|
14
|
-
parser = ArgumentParser(
|
|
14
|
+
parser = argparse.ArgumentParser(prog="generate_endf_chain",
|
|
15
|
+
description="Generates a OpenMC chain file from ENDF nuclear data files",
|
|
16
|
+
)
|
|
15
17
|
parser.add_argument('-r', '--release', choices=['b7.1', 'b8.0'],
|
|
16
18
|
default='b8.0', help="The nuclear data library release "
|
|
17
19
|
"version. The currently supported options are b7.1, b8.0")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
import argparse
|
|
4
4
|
from pathlib import Path
|
|
5
5
|
from urllib.parse import urljoin
|
|
6
6
|
|
|
@@ -11,7 +11,9 @@ from openmc_data import all_decay_release_details
|
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
# Parse command line arguments
|
|
14
|
-
parser = ArgumentParser(
|
|
14
|
+
parser = argparse.ArgumentParser(prog="generate_jeff_chain",
|
|
15
|
+
description="Generates a OpenMC chain file from JEFF nuclear data files",
|
|
16
|
+
)
|
|
15
17
|
parser.add_argument('-r', '--release', choices=['3.3'],
|
|
16
18
|
default='3.3', help="The nuclear data library release "
|
|
17
19
|
"version. The currently supported options are 3.3")
|
|
@@ -4,14 +4,16 @@
|
|
|
4
4
|
Determine Q values equivalent to the defaults in Serpent
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
-
|
|
7
|
+
import argparse
|
|
8
8
|
from pathlib import Path
|
|
9
9
|
import json
|
|
10
10
|
|
|
11
11
|
import openmc.data
|
|
12
12
|
|
|
13
13
|
# Get command line argument
|
|
14
|
-
parser = ArgumentParser(
|
|
14
|
+
parser = argparse.ArgumentParser(prog="generate_serpent_fissq",
|
|
15
|
+
description="Generates Q values for fission reactions",
|
|
16
|
+
)
|
|
15
17
|
parser.add_argument('dir', type=Path, help='Directory containing ENDF neutron sublibrary files')
|
|
16
18
|
parser.add_argument(
|
|
17
19
|
"-d",
|
|
@@ -7,7 +7,7 @@ be borrowed from another library. The --lib flag for this script indicates what
|
|
|
7
7
|
library should be used for decay and FPY evaluations and defaults to JEFF 3.3.
|
|
8
8
|
"""
|
|
9
9
|
|
|
10
|
-
|
|
10
|
+
import argparse
|
|
11
11
|
from pathlib import Path
|
|
12
12
|
from urllib.parse import urljoin
|
|
13
13
|
|
|
@@ -18,7 +18,9 @@ from openmc_data.utils import download, extract
|
|
|
18
18
|
from openmc_data import all_decay_release_details
|
|
19
19
|
|
|
20
20
|
# Parse command line arguments
|
|
21
|
-
parser = ArgumentParser(
|
|
21
|
+
parser = argparse.ArgumentParser(prog="generate_tendl_chain",
|
|
22
|
+
description="Generates a OpenMC chain file from TENDL nuclear data files",
|
|
23
|
+
)
|
|
22
24
|
parser.add_argument(
|
|
23
25
|
"--lib",
|
|
24
26
|
choices=("jeff33", "endf80"),
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
import openmc.deplete
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
parser = argparse.ArgumentParser(
|
|
7
|
+
prog="reduce_chain",
|
|
8
|
+
description="Removes nuclides with short half lives from OpenMC chain files",
|
|
9
|
+
)
|
|
10
|
+
parser.add_argument(
|
|
11
|
+
"-i", "--chain_in", type=Path, required=True, help="Path of the input chain file"
|
|
12
|
+
)
|
|
13
|
+
parser.add_argument(
|
|
14
|
+
"-o",
|
|
15
|
+
"--chain_out",
|
|
16
|
+
type=Path,
|
|
17
|
+
required=True,
|
|
18
|
+
help="Path of the produced chain file",
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
parser.add_argument(
|
|
22
|
+
"-hl",
|
|
23
|
+
"--half_life",
|
|
24
|
+
type=float,
|
|
25
|
+
required=True,
|
|
26
|
+
default=1e15,
|
|
27
|
+
help=(
|
|
28
|
+
"Value of half life in seconds to use when filtering out nuclides, "
|
|
29
|
+
"half lives below the specified half life nuclides will be excluded "
|
|
30
|
+
"from the output chain file"
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
args = parser.parse_args()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def remove_long_half_life_nuclides(chain_in: Path, chain_out: Path, half_life: float):
|
|
39
|
+
chain_full = openmc.deplete.Chain.from_xml(chain_in)
|
|
40
|
+
stable = [
|
|
41
|
+
nuc.name
|
|
42
|
+
for nuc in chain_full.nuclides
|
|
43
|
+
if nuc.half_life is None or nuc.half_life > half_life
|
|
44
|
+
]
|
|
45
|
+
|
|
46
|
+
chain_reduced = chain_full.reduce(stable)
|
|
47
|
+
chain_reduced.export_to_xml(chain_out)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def main():
|
|
51
|
+
remove_long_half_life_nuclides(args.chain_in, args.chain_out, args.half_life)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
if __name__ == "__main__":
|
|
55
|
+
main()
|
|
@@ -230,13 +230,9 @@ def main():
|
|
|
230
230
|
url = details['base_url'] + f
|
|
231
231
|
if 'checksums' in details.keys():
|
|
232
232
|
checksum = details['checksums'][i]
|
|
233
|
-
|
|
234
|
-
output_path=download_path / particle,
|
|
235
|
-
checksum=checksum)
|
|
233
|
+
download(url, output_path=download_path / particle, checksum=checksum)
|
|
236
234
|
else:
|
|
237
|
-
|
|
238
|
-
output_path=download_path / particle,
|
|
239
|
-
)
|
|
235
|
+
download(url, output_path=download_path / particle)
|
|
240
236
|
|
|
241
237
|
# ==============================================================================
|
|
242
238
|
# EXTRACT FILES FROM TGZ
|