fargopy 0.4.0__py3-none-any.whl → 1.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fargopy/__init__.py +8 -346
- fargopy/base.py +377 -0
- fargopy/bin/ifargopy +91 -0
- fargopy/bin/vfargopy +2111 -0
- fargopy/data/fargopy_logo.png +0 -0
- fargopy/fields.py +1400 -415
- fargopy/flux.py +809 -723
- fargopy/plot.py +553 -8
- fargopy/simulation.py +1548 -577
- fargopy/sys.py +116 -65
- fargopy/tests/test_base.py +8 -0
- fargopy/tests/test_flux.py +76 -0
- fargopy/tests/test_interp.py +132 -0
- fargopy-1.0.1.data/scripts/ifargopy +91 -0
- fargopy-1.0.1.data/scripts/vfargopy +2111 -0
- fargopy-1.0.1.dist-info/METADATA +450 -0
- fargopy-1.0.1.dist-info/RECORD +21 -0
- {fargopy-0.4.0.dist-info → fargopy-1.0.1.dist-info}/WHEEL +1 -1
- fargopy-1.0.1.dist-info/licenses/LICENSE +661 -0
- fargopy/fsimulation.py +0 -429
- fargopy/tests/test___init__.py +0 -0
- fargopy/util.py +0 -21
- fargopy/version.py +0 -1
- fargopy-0.4.0.data/scripts/ifargopy +0 -15
- fargopy-0.4.0.dist-info/METADATA +0 -492
- fargopy-0.4.0.dist-info/RECORD +0 -17
- fargopy-0.4.0.dist-info/licenses/LICENSE +0 -21
- {fargopy-0.4.0.dist-info → fargopy-1.0.1.dist-info}/entry_points.txt +0 -0
- {fargopy-0.4.0.dist-info → fargopy-1.0.1.dist-info}/top_level.txt +0 -0
fargopy/simulation.py
CHANGED
|
@@ -15,99 +15,169 @@ import time
|
|
|
15
15
|
import gdown
|
|
16
16
|
import os
|
|
17
17
|
import signal
|
|
18
|
+
import ipywidgets as widgets
|
|
19
|
+
import matplotlib.pyplot as plt
|
|
20
|
+
from IPython.display import display, clear_output
|
|
21
|
+
from pathlib import Path
|
|
18
22
|
|
|
19
23
|
###############################################################
|
|
20
24
|
# Constants
|
|
21
25
|
###############################################################
|
|
22
26
|
KB = 1.380650424e-16 # Boltzmann constant: erg/K, erg = g cm^2 / s^2
|
|
23
|
-
MP = 1.672623099e-24
|
|
24
|
-
GCONST = 6.67259e-8
|
|
25
|
-
RGAS = 8.314472e7
|
|
26
|
-
MSUN = 1.9891e33
|
|
27
|
-
AU = 1.49598e13
|
|
28
|
-
YEAR = 31557600.0
|
|
29
|
-
|
|
30
|
-
PRECOMPUTED_BASEURL =
|
|
27
|
+
MP = 1.672623099e-24 # Mass of the proton, g
|
|
28
|
+
GCONST = 6.67259e-8 # Gravitational constant, cm^3/g/s^2
|
|
29
|
+
RGAS = 8.314472e7 # Gas constant, erg/K/mol
|
|
30
|
+
MSUN = 1.9891e33 # g
|
|
31
|
+
AU = 1.49598e13 # cm
|
|
32
|
+
YEAR = 31557600.0 # s
|
|
33
|
+
|
|
34
|
+
PRECOMPUTED_BASEURL = "https://docs.google.com/uc?export=download&id="
|
|
31
35
|
PRECOMPUTED_SIMULATIONS = dict(
|
|
32
36
|
# Download link: https://drive.google.com/file/d/1YXLKlf9fCGHgLej2fSOHgStD05uFB2C3/view?usp=drive_link
|
|
33
37
|
fargo=dict(
|
|
34
|
-
id=
|
|
38
|
+
id="1YXLKlf9fCGHgLej2fSOHgStD05uFB2C3",
|
|
35
39
|
description="""Protoplanetary disk with a Jovian planet [2D]""",
|
|
36
|
-
size=55
|
|
40
|
+
size=55,
|
|
37
41
|
),
|
|
38
42
|
# Download link: https://drive.google.com/file/d/1KMp_82ylQn3ne_aNWEF1T9ElX2aWzYX6/view?usp=drive_link
|
|
39
43
|
p3diso=dict(
|
|
40
|
-
id=
|
|
44
|
+
id="1KMp_82ylQn3ne_aNWEF1T9ElX2aWzYX6",
|
|
41
45
|
description="""Protoplanetary disk with a Super earth planet [3D]""",
|
|
42
|
-
size=220
|
|
46
|
+
size=220,
|
|
43
47
|
),
|
|
44
48
|
# Download link: https://drive.google.com/file/d/1Xzgk9qatZPNX8mLmB58R9NIi_YQUrHz9/view?usp=sharing
|
|
45
49
|
p3disoj=dict(
|
|
46
|
-
id=
|
|
50
|
+
id="1Xzgk9qatZPNX8mLmB58R9NIi_YQUrHz9",
|
|
47
51
|
description="""Protoplanetary disk with a Jovian planet [3D]""",
|
|
48
|
-
size=84
|
|
52
|
+
size=84,
|
|
49
53
|
),
|
|
50
54
|
# Download link: https://drive.google.com/file/d/1KSQyxH_kbAqHQcsE30GQFRVgAPhMAcp7/view?usp=drive_link
|
|
51
55
|
fargo_multifluid=dict(
|
|
52
|
-
id=
|
|
56
|
+
id="1KSQyxH_kbAqHQcsE30GQFRVgAPhMAcp7",
|
|
53
57
|
description="""Protoplanetary disk with several fluids (dust) and a Jovian planet in 2D""",
|
|
54
|
-
size=100
|
|
58
|
+
size=100,
|
|
55
59
|
),
|
|
56
60
|
# Download link: https://drive.google.com/file/d/12ZWoQS_9ISe6eDij5KWWbqR-bHyyVs2N/view?usp=drive_link
|
|
57
61
|
binary=dict(
|
|
58
|
-
id=
|
|
62
|
+
id="12ZWoQS_9ISe6eDij5KWWbqR-bHyyVs2N",
|
|
59
63
|
description="""Disk around a binary with the properties of Kepler-38 in 2D""",
|
|
60
|
-
size=140
|
|
64
|
+
size=140,
|
|
65
|
+
),
|
|
66
|
+
# Download link: https://drive.google.com/file/d/1Xhunx2-eiW770p91OeTg1kvQdGwOOHhZ/view?usp=sharing
|
|
67
|
+
pds70iso=dict(
|
|
68
|
+
id="1Xhunx2-eiW770p91OeTg1kvQdGwOOHhZ",
|
|
69
|
+
description="""PDS70c - isothermal protoplanetary disk and circumplanetary disk in [3D]""",
|
|
70
|
+
size=140,
|
|
61
71
|
),
|
|
62
72
|
)
|
|
63
73
|
|
|
64
74
|
if not fargopy.IN_COLAB:
|
|
65
|
-
|
|
75
|
+
# SIGCHLD is only available on Unix-like systems (Linux, macOS)
|
|
76
|
+
if hasattr(signal, "SIGCHLD"):
|
|
77
|
+
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
|
|
66
78
|
|
|
67
79
|
###############################################################
|
|
68
80
|
# Classes
|
|
69
81
|
###############################################################
|
|
82
|
+
|
|
83
|
+
|
|
70
84
|
class Simulation(fargopy.Fargobj):
|
|
85
|
+
"""Manage a FARGO3D simulation and its filesystem, execution and I/O.
|
|
71
86
|
|
|
72
|
-
|
|
73
|
-
|
|
87
|
+
The ``Simulation`` object centralizes configuration, units, setup
|
|
88
|
+
discovery, compilation and runtime control for a FARGO3D case. It
|
|
89
|
+
stores derived paths (setup directory, output directory), unit
|
|
90
|
+
conversion factors and exposes convenience methods to compile,
|
|
91
|
+
launch and inspect simulation outputs.
|
|
74
92
|
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
93
|
+
Attributes
|
|
94
|
+
----------
|
|
95
|
+
fargo3d_dir : str
|
|
96
|
+
Base directory of FARGO3D installation.
|
|
97
|
+
outputs_dir : str
|
|
98
|
+
Directory where outputs are stored.
|
|
99
|
+
setups_dir : str
|
|
100
|
+
Directory where setups are located.
|
|
101
|
+
setup : str
|
|
102
|
+
Active setup name.
|
|
103
|
+
fargo3d_binary : str
|
|
104
|
+
Path to the compiled binary.
|
|
105
|
+
units_system : str
|
|
106
|
+
Unit system ('CGS' or 'MKS').
|
|
78
107
|
|
|
79
|
-
|
|
80
|
-
|
|
108
|
+
Parameters
|
|
109
|
+
----------
|
|
110
|
+
**kwargs : dict
|
|
111
|
+
Configuration keywords. Typical keys include ``setup``,
|
|
112
|
+
``fargo3d_dir``, ``output_dir`` and ``load``. When ``load=True``
|
|
113
|
+
the object attempts to read serialized metadata from the
|
|
114
|
+
specified setup directory.
|
|
81
115
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
Connect to an existing simulation:
|
|
86
|
-
>>> sim = fargopy.Simulation(output_dir='/tmp/public/outputs/fargo')
|
|
116
|
+
Examples
|
|
117
|
+
--------
|
|
118
|
+
Create a simulation object:
|
|
87
119
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
120
|
+
>>> import fargopy as fp
|
|
121
|
+
>>> sim = fp.Simulation()
|
|
122
|
+
|
|
123
|
+
Select a setup:
|
|
124
|
+
|
|
125
|
+
>>> sim.set_setup('fargo')
|
|
126
|
+
|
|
127
|
+
Compile the simulation:
|
|
128
|
+
|
|
129
|
+
>>> sim.compile(parallel=0, gpu=0)
|
|
130
|
+
|
|
131
|
+
Run the simulation (clean run):
|
|
132
|
+
|
|
133
|
+
>>> sim.run(cleanrun=True)
|
|
134
|
+
|
|
135
|
+
Check status:
|
|
136
|
+
|
|
137
|
+
>>> sim.status()
|
|
138
|
+
|
|
139
|
+
Stop simulation:
|
|
140
|
+
|
|
141
|
+
>>> sim.stop()
|
|
142
|
+
"""
|
|
91
143
|
|
|
144
|
+
def __init__(self, **kwargs):
|
|
145
|
+
"""Create and configure the simulation object.
|
|
146
|
+
|
|
147
|
+
The constructor initializes unit systems, default paths and
|
|
148
|
+
optionally loads previously saved simulation metadata when the
|
|
149
|
+
``load`` flag is provided in ``kwargs``.
|
|
150
|
+
|
|
151
|
+
Parameters
|
|
152
|
+
----------
|
|
153
|
+
**kwargs : dict
|
|
154
|
+
Same as described in the class-level docstring. When
|
|
155
|
+
``load=True`` the object expects a valid ``setup`` name and
|
|
156
|
+
a FARGO3D base directory to locate a
|
|
157
|
+
``fargopy_simulation.json`` file to restore state.
|
|
158
|
+
"""
|
|
92
159
|
super().__init__(**kwargs)
|
|
93
|
-
|
|
160
|
+
|
|
161
|
+
# Default to CGS units
|
|
162
|
+
self.units_system = "CGS"
|
|
163
|
+
self._set_constants_cgs()
|
|
164
|
+
|
|
94
165
|
# Load simulation configuration from a file
|
|
95
|
-
if (
|
|
96
|
-
|
|
97
|
-
if not 'setup' in kwargs.keys():
|
|
166
|
+
if ("load" in kwargs.keys()) and kwargs["load"]:
|
|
167
|
+
if not "setup" in kwargs.keys():
|
|
98
168
|
raise AssertionError(f"You must provide a setup name.")
|
|
99
169
|
else:
|
|
100
|
-
setup = kwargs[
|
|
101
|
-
if
|
|
102
|
-
fargo3d_dir = kwargs[
|
|
170
|
+
setup = kwargs["setup"]
|
|
171
|
+
if "fargo3d_dir" in kwargs.keys():
|
|
172
|
+
fargo3d_dir = kwargs["fargo3d_dir"]
|
|
103
173
|
else:
|
|
104
174
|
fargo3d_dir = fargopy.Conf.FP_FARGO3D_DIR
|
|
105
|
-
|
|
175
|
+
|
|
106
176
|
# Load simulation
|
|
107
|
-
load_from =
|
|
177
|
+
load_from = os.path.join(fargo3d_dir, "setups", setup)
|
|
108
178
|
if not os.path.isdir(load_from):
|
|
109
179
|
print(f"Directory for loading simulation '{load_from}' not found.")
|
|
110
|
-
json_file =
|
|
180
|
+
json_file = os.path.join(load_from, "fargopy_simulation.json")
|
|
111
181
|
|
|
112
182
|
print(f"Loading simulation from '{json_file}'")
|
|
113
183
|
if not os.path.isfile(json_file):
|
|
@@ -117,8 +187,8 @@ class Simulation(fargopy.Fargobj):
|
|
|
117
187
|
attributes = json.load(file_handler)
|
|
118
188
|
|
|
119
189
|
# Check if there are not serializable items and set to None
|
|
120
|
-
for key,item in attributes.items():
|
|
121
|
-
if item ==
|
|
190
|
+
for key, item in attributes.items():
|
|
191
|
+
if item == "<not serializable>":
|
|
122
192
|
attributes[key] = None
|
|
123
193
|
|
|
124
194
|
# Update the object
|
|
@@ -129,50 +199,52 @@ class Simulation(fargopy.Fargobj):
|
|
|
129
199
|
self.set_setup(self.setup)
|
|
130
200
|
self.set_output_dir(self.output_dir)
|
|
131
201
|
return
|
|
132
|
-
|
|
202
|
+
|
|
133
203
|
# Set units by default
|
|
134
|
-
self.set_units(UL=AU,UM=MSUN)
|
|
135
|
-
|
|
204
|
+
self.set_units(UL=AU, UM=MSUN)
|
|
205
|
+
|
|
136
206
|
# Set properties
|
|
137
|
-
self.set_property(
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
self.set_property(
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
self.set_output_dir)
|
|
146
|
-
self.set_property('fargo3d_compilation_options',
|
|
147
|
-
dict(parallel=0,gpu=0,options=''))
|
|
207
|
+
self.set_property(
|
|
208
|
+
"fargo3d_dir", fargopy.Conf.FP_FARGO3D_DIR, self.set_fargo3d_dir
|
|
209
|
+
)
|
|
210
|
+
self.set_property("setup", None, self.set_setup)
|
|
211
|
+
self.set_property("output_dir", None, self.set_output_dir)
|
|
212
|
+
self.set_property(
|
|
213
|
+
"fargo3d_compilation_options", dict(parallel=0, gpu=0, options="")
|
|
214
|
+
)
|
|
148
215
|
|
|
149
216
|
# Check if binary is already compiled
|
|
150
|
-
fargo3d_binary,compile_options = self._generate_binary_name(
|
|
151
|
-
|
|
152
|
-
|
|
217
|
+
fargo3d_binary, compile_options = self._generate_binary_name(
|
|
218
|
+
parallel=self.fargo3d_compilation_options["parallel"],
|
|
219
|
+
gpu=self.fargo3d_compilation_options["gpu"],
|
|
220
|
+
options=self.fargo3d_compilation_options["options"],
|
|
221
|
+
)
|
|
153
222
|
if os.path.isfile(f"{self.fargo3d_dir}/{fargo3d_binary}"):
|
|
154
223
|
print(f"FARGO3D binary '{fargo3d_binary}' found.")
|
|
155
224
|
self.fargo3d_binary = fargo3d_binary
|
|
156
225
|
else:
|
|
157
226
|
self.fargo3d_binary = None
|
|
158
227
|
|
|
159
|
-
# Simulation process does not exist
|
|
160
|
-
self.set_property(
|
|
161
|
-
|
|
162
|
-
|
|
228
|
+
# Simulation process does not exist
|
|
229
|
+
self.set_property("fargo3d_process", None)
|
|
230
|
+
|
|
163
231
|
# ##########################################################################
|
|
164
232
|
# Set special properties
|
|
165
|
-
#
|
|
166
|
-
def set_fargo3d_dir(self,dir=None):
|
|
167
|
-
"""Set
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
233
|
+
# #########################################a#################################
|
|
234
|
+
def set_fargo3d_dir(self, dir=None):
|
|
235
|
+
"""Set the base FARGO3D installation directory.
|
|
236
|
+
|
|
237
|
+
Parameters
|
|
238
|
+
----------
|
|
239
|
+
dir : str or None
|
|
240
|
+
Path to the FARGO3D installation. When ``None`` the method
|
|
241
|
+
is a no-op.
|
|
242
|
+
|
|
243
|
+
Returns
|
|
244
|
+
-------
|
|
245
|
+
None
|
|
246
|
+
The method does not return meaningful data; it updates
|
|
247
|
+
internal path attributes and prints diagnostics.
|
|
176
248
|
"""
|
|
177
249
|
if dir is None:
|
|
178
250
|
return
|
|
@@ -180,47 +252,53 @@ class Simulation(fargopy.Fargobj):
|
|
|
180
252
|
print(f"FARGO3D directory '{dir}' does not exist.")
|
|
181
253
|
return
|
|
182
254
|
else:
|
|
183
|
-
fargo_header =
|
|
255
|
+
fargo_header = os.path.join(dir, fargopy.Conf.FP_FARGO3D_HEADER)
|
|
184
256
|
if not os.path.isfile(fargo_header):
|
|
185
257
|
print(f"No header file for FARGO3D found in '{fargo_header}'")
|
|
186
258
|
else:
|
|
187
259
|
print(f"Your simulation is now connected with '{dir}'")
|
|
188
|
-
|
|
260
|
+
|
|
189
261
|
# Set derivative dirs
|
|
190
262
|
self.fargo3d_dir = dir
|
|
191
|
-
self.outputs_dir = (self.fargo3d_dir
|
|
192
|
-
self.setups_dir = (self.fargo3d_dir
|
|
193
|
-
|
|
194
|
-
def set_setup(self,setup):
|
|
195
|
-
"""
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
263
|
+
self.outputs_dir = os.path.join(self.fargo3d_dir, "outputs")
|
|
264
|
+
self.setups_dir = os.path.join(self.fargo3d_dir, "setups")
|
|
265
|
+
|
|
266
|
+
def set_setup(self, setup):
|
|
267
|
+
"""Associate the simulation with a named setup directory.
|
|
268
|
+
|
|
269
|
+
Parameters
|
|
270
|
+
----------
|
|
271
|
+
setup : str or None
|
|
272
|
+
Setup name present under the configured ``setups_dir``. If
|
|
273
|
+
``None`` the setup association is cleared.
|
|
274
|
+
|
|
275
|
+
Returns
|
|
276
|
+
-------
|
|
277
|
+
str or None
|
|
278
|
+
The assigned setup name on success, otherwise ``None``.
|
|
204
279
|
"""
|
|
205
280
|
if setup is None:
|
|
206
281
|
self.setup_dir = None
|
|
207
282
|
return None
|
|
208
|
-
setup_dir =
|
|
283
|
+
setup_dir = os.path.join(self.setups_dir, setup)
|
|
209
284
|
if self.set_setup_dir(setup_dir):
|
|
210
285
|
self.setup = setup
|
|
211
|
-
self.logfile =
|
|
286
|
+
self.logfile = os.path.join(self.setup_dir, f"{self.setup}.log")
|
|
212
287
|
return setup
|
|
213
|
-
|
|
214
|
-
def set_setup_dir(self,dir):
|
|
215
|
-
"""Set setup directory
|
|
216
288
|
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
289
|
+
def set_setup_dir(self, dir):
|
|
290
|
+
"""Set the absolute path to a setup directory and validate it.
|
|
291
|
+
|
|
292
|
+
Parameters
|
|
293
|
+
----------
|
|
294
|
+
dir : str
|
|
295
|
+
Filesystem path to the setup directory.
|
|
220
296
|
|
|
221
|
-
Returns
|
|
222
|
-
|
|
223
|
-
|
|
297
|
+
Returns
|
|
298
|
+
-------
|
|
299
|
+
bool
|
|
300
|
+
``True`` when the directory exists and is accepted,
|
|
301
|
+
otherwise ``False``.
|
|
224
302
|
"""
|
|
225
303
|
if dir is None:
|
|
226
304
|
return False
|
|
@@ -233,53 +311,132 @@ class Simulation(fargopy.Fargobj):
|
|
|
233
311
|
self.setup_dir = dir
|
|
234
312
|
return True
|
|
235
313
|
|
|
236
|
-
def set_output_dir(self,dir):
|
|
237
|
-
"""
|
|
314
|
+
def set_output_dir(self, dir):
|
|
315
|
+
"""Set the output directory where FARGO3D stores run results.
|
|
316
|
+
|
|
317
|
+
Parameters
|
|
318
|
+
----------
|
|
319
|
+
dir : str or None
|
|
320
|
+
Output directory path. When present the method attempts to
|
|
321
|
+
load simulation properties from a ``variables.par`` file.
|
|
238
322
|
"""
|
|
239
323
|
if dir is None:
|
|
240
324
|
return
|
|
241
325
|
if not os.path.isdir(dir):
|
|
242
326
|
print(f"Output directory '{dir}' does not exist.")
|
|
243
327
|
return
|
|
244
|
-
else:
|
|
328
|
+
else:
|
|
245
329
|
print(f"Now you are connected with output directory '{dir}'")
|
|
246
330
|
self.output_dir = dir
|
|
247
|
-
# Check if there are results
|
|
248
|
-
par_file =
|
|
331
|
+
# Check if there are results
|
|
332
|
+
par_file = os.path.join(dir, "variables.par")
|
|
249
333
|
if os.path.isfile(par_file):
|
|
250
334
|
print(f"Found a variables.par file in '{dir}', loading properties")
|
|
251
335
|
self.load_properties()
|
|
252
|
-
|
|
336
|
+
|
|
253
337
|
return
|
|
254
338
|
|
|
255
|
-
|
|
256
|
-
|
|
339
|
+
################################
|
|
340
|
+
## UNITS
|
|
341
|
+
################################
|
|
342
|
+
|
|
343
|
+
def units(self, system):
|
|
344
|
+
"""Switch the simulation unit system between ``'CGS'`` and
|
|
345
|
+
``'MKS'``.
|
|
346
|
+
|
|
347
|
+
Parameters
|
|
348
|
+
----------
|
|
349
|
+
system : {'CGS','MKS'}
|
|
350
|
+
Unit system identifier.
|
|
351
|
+
"""
|
|
352
|
+
if system.upper() == "CGS":
|
|
353
|
+
self._set_constants_cgs()
|
|
354
|
+
self.units_system = "CGS"
|
|
355
|
+
print("Units set to CGS.")
|
|
356
|
+
elif system.upper() == "MKS":
|
|
357
|
+
self._set_constants_mks()
|
|
358
|
+
self.units_system = "MKS"
|
|
359
|
+
print("Units set to MKS.")
|
|
360
|
+
else:
|
|
361
|
+
raise ValueError("Invalid units system. Use 'CGS' or 'MKS'.")
|
|
362
|
+
|
|
363
|
+
def _set_constants_cgs(self):
|
|
364
|
+
"""Configure physical constants for the CGS unit system."""
|
|
365
|
+
self.KB = 1.380650424e-16 # Boltzmann constant: erg/K
|
|
366
|
+
self.MP = 1.672623099e-24 # Mass of the proton, g
|
|
367
|
+
self.GCONST = 6.67259e-8 # Gravitational constant, cm^3/g/s^2
|
|
368
|
+
self.RGAS = 8.314472e7 # Gas constant, erg/K/mol
|
|
369
|
+
self.MSUN = 1.9891e33 # Solar mass, g
|
|
370
|
+
self.AU = 1.49598e13 # Astronomical unit, cm
|
|
371
|
+
self.YEAR = 31557600.0 # Year, s
|
|
372
|
+
|
|
373
|
+
def _set_constants_mks(self):
|
|
374
|
+
"""Configure physical constants for the MKS unit system."""
|
|
375
|
+
self.KB = 1.380649e-23 # Boltzmann constant: J/K
|
|
376
|
+
self.MP = 1.6726219e-27 # Mass of the proton, kg
|
|
377
|
+
self.GCONST = 6.67430e-11 # Gravitational constant, m^3/kg/s^2
|
|
378
|
+
self.RGAS = 8.314462618 # Gas constant, J/K/mol
|
|
379
|
+
self.MSUN = 1.9891e30 # Solar mass, kg
|
|
380
|
+
self.AU = 1.49598e11 # Astronomical unit, m
|
|
381
|
+
self.YEAR = 31557600.0 # Year, s
|
|
382
|
+
|
|
383
|
+
def set_units(self, UM=MSUN, UL=AU, G=1, mu=2.35):
|
|
384
|
+
"""Define simulation base units and derived unit scales.
|
|
385
|
+
|
|
386
|
+
Parameters
|
|
387
|
+
----------
|
|
388
|
+
UM : float
|
|
389
|
+
Mass unit (default: MSUN).
|
|
390
|
+
UL : float
|
|
391
|
+
Length unit (default: AU).
|
|
392
|
+
G : float
|
|
393
|
+
Dimensionless gravitational constant (default: 1).
|
|
394
|
+
mu : float
|
|
395
|
+
Mean molecular weight used to compute temperature units.
|
|
257
396
|
"""
|
|
258
397
|
# Basic
|
|
259
398
|
self.UM = UM
|
|
260
399
|
self.UL = UL
|
|
261
400
|
self.G = G
|
|
262
|
-
self.UT = (G*self.UL**3/(GCONST*self.UM))**0.5
|
|
401
|
+
self.UT = (G * self.UL**3 / (GCONST * self.UM)) ** 0.5 # In seconds
|
|
263
402
|
|
|
264
403
|
# Thermodynamics
|
|
265
|
-
self.UTEMP = (GCONST*MP*mu/KB)*self.UM/self.UL
|
|
266
|
-
|
|
404
|
+
self.UTEMP = (GCONST * MP * mu / KB) * self.UM / self.UL # In K
|
|
405
|
+
|
|
267
406
|
# Derivative
|
|
268
|
-
self.USIGMA = self.UM/self.UL**2
|
|
269
|
-
self.URHO = self.UM/self.UL**3
|
|
270
|
-
self.UEPS = self.UM/(self.UL*self.UT**2) # In J/m^3
|
|
271
|
-
self.UV = self.UL/self.UT
|
|
272
|
-
|
|
407
|
+
self.USIGMA = self.UM / self.UL**2 # In g/cm^2
|
|
408
|
+
self.URHO = self.UM / self.UL**3 # In kg/m^3
|
|
409
|
+
self.UEPS = self.UM / (self.UL * self.UT**2) # In J/m^3
|
|
410
|
+
self.UV = self.UL / self.UT
|
|
411
|
+
|
|
273
412
|
# ##########################################################################
|
|
274
413
|
# Control methods
|
|
275
|
-
# ##########################################################################
|
|
276
|
-
def compile(self,setup=None,parallel=0,gpu=0,options=
|
|
277
|
-
"""Compile FARGO3D
|
|
414
|
+
# ##########################################################################
|
|
415
|
+
def compile(self, setup=None, parallel=0, gpu=0, options="", force=False):
|
|
416
|
+
"""Compile the FARGO3D executable for the active setup.
|
|
417
|
+
|
|
418
|
+
Parameters
|
|
419
|
+
----------
|
|
420
|
+
setup : str, optional
|
|
421
|
+
Setup name to compile. When provided the method will try to
|
|
422
|
+
set the setup before compiling.
|
|
423
|
+
parallel : int, optional
|
|
424
|
+
Parallel compilation flag (default: 0).
|
|
425
|
+
gpu : int, optional
|
|
426
|
+
GPU-enabled compilation flag (default: 0).
|
|
427
|
+
options : str, optional
|
|
428
|
+
Additional make options passed to the build system.
|
|
429
|
+
force : bool, optional
|
|
430
|
+
If ``True`` perform a clean before building.
|
|
431
|
+
|
|
432
|
+
Examples
|
|
433
|
+
--------
|
|
434
|
+
>>> sim.compile(parallel=1, gpu=0)
|
|
278
435
|
"""
|
|
279
436
|
if setup is not None:
|
|
280
437
|
if not self.set_setup(setup):
|
|
281
438
|
print("Failed")
|
|
282
|
-
return
|
|
439
|
+
return
|
|
283
440
|
|
|
284
441
|
# Clean directrory
|
|
285
442
|
if force:
|
|
@@ -287,87 +444,141 @@ class Simulation(fargopy.Fargobj):
|
|
|
287
444
|
cmd = f"make -C {self.fargo3d_dir} clean mrproper"
|
|
288
445
|
# Clean all binaries
|
|
289
446
|
compl = f"rm -rf {self.fargo3d_dir}/fargo3d_*"
|
|
290
|
-
error,output_clean = fargopy.Sys.run(cmd +
|
|
291
|
-
|
|
447
|
+
error, output_clean = fargopy.Sys.run(cmd + "&&" + compl)
|
|
448
|
+
|
|
292
449
|
# Prepare compilation
|
|
293
|
-
fargo3d_binary,compile_options = self._generate_binary_name(
|
|
294
|
-
|
|
295
|
-
|
|
450
|
+
fargo3d_binary, compile_options = self._generate_binary_name(
|
|
451
|
+
parallel=parallel, gpu=gpu, options=options
|
|
452
|
+
)
|
|
453
|
+
# compile_options = f"SETUP={self.setup} PARALLEL={parallel} GPU={gpu} "+options
|
|
454
|
+
# fargo3d_binary = f"fargo3d-{compile_options.replace(' ','-').replace('=','_').strip('-')}"
|
|
296
455
|
|
|
297
456
|
# Compile binary
|
|
298
457
|
print(f"Compiling {fargo3d_binary}...")
|
|
299
458
|
cmd = f"cd {self.fargo3d_dir};make {compile_options} 2>&1 |tee {self.setup_dir}/compilation.log"
|
|
300
459
|
compl = f"mv fargo3d {fargo3d_binary}"
|
|
301
460
|
pipe = f""
|
|
302
|
-
error,output_compilation = fargopy.Sys.run(cmd+
|
|
461
|
+
error, output_compilation = fargopy.Sys.run(cmd + " && " + compl)
|
|
303
462
|
|
|
304
463
|
# Check compilation result
|
|
305
464
|
if os.path.isfile(f"{self.fargo3d_dir}/{fargo3d_binary}"):
|
|
306
465
|
self.fargo3d_binary = fargo3d_binary
|
|
307
466
|
print(f"Succesful compilation of FARGO3D binary {self.fargo3d_binary}")
|
|
308
|
-
self.fargo3d_compilation_options=dict(
|
|
309
|
-
parallel=parallel,
|
|
310
|
-
gpu=gpu,
|
|
311
|
-
options=options
|
|
467
|
+
self.fargo3d_compilation_options = dict(
|
|
468
|
+
parallel=parallel, gpu=gpu, options=options
|
|
312
469
|
)
|
|
313
470
|
else:
|
|
314
|
-
print(
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
471
|
+
print(
|
|
472
|
+
f"Something failed when compiling FARGO3D. For details check '{self.setup_dir}/compilation.log"
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
def _generate_binary_name(self, parallel=0, gpu=0, options=""):
|
|
476
|
+
"""Produce the target binary filename and the make options.
|
|
477
|
+
|
|
478
|
+
Parameters
|
|
479
|
+
----------
|
|
480
|
+
parallel : int
|
|
481
|
+
Parallel compilation flag.
|
|
482
|
+
gpu : int
|
|
483
|
+
GPU compilation flag.
|
|
484
|
+
options : str
|
|
485
|
+
Extra options appended to the make command.
|
|
486
|
+
|
|
487
|
+
Returns
|
|
488
|
+
-------
|
|
489
|
+
tuple
|
|
490
|
+
``(binary_name, compile_options)`` where ``binary_name`` is
|
|
491
|
+
the filename to expect after a successful build and
|
|
492
|
+
``compile_options`` is the command fragment passed to
|
|
493
|
+
``make``.
|
|
318
494
|
"""
|
|
319
|
-
compile_options = f"SETUP={self.setup} PARALLEL={parallel} GPU={gpu} "+options
|
|
320
|
-
fargo3d_binary =
|
|
495
|
+
compile_options = f"SETUP={self.setup} PARALLEL={parallel} GPU={gpu} " + options
|
|
496
|
+
fargo3d_binary = (
|
|
497
|
+
f"fargo3d-{compile_options.replace(' ', '-').replace('=', '_').strip('-')}"
|
|
498
|
+
)
|
|
321
499
|
return fargo3d_binary, compile_options
|
|
322
500
|
|
|
323
|
-
def run(
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
501
|
+
def run(
|
|
502
|
+
self,
|
|
503
|
+
mode="async",
|
|
504
|
+
options="-m",
|
|
505
|
+
mpioptions="-np 1",
|
|
506
|
+
resume=False,
|
|
507
|
+
cleanrun=False,
|
|
508
|
+
test=False,
|
|
509
|
+
unlock=True,
|
|
510
|
+
):
|
|
511
|
+
"""Run the FARGO3D simulation.
|
|
512
|
+
|
|
513
|
+
Parameters
|
|
514
|
+
----------
|
|
515
|
+
mode : str, optional
|
|
516
|
+
'async' for asynchronous or 'sync' for synchronous execution.
|
|
517
|
+
options : str, optional
|
|
518
|
+
Command-line options for FARGO3D.
|
|
519
|
+
mpioptions : str, optional
|
|
520
|
+
MPI options for parallel runs.
|
|
521
|
+
resume : bool, optional
|
|
522
|
+
Resume from previous run.
|
|
523
|
+
cleanrun : bool, optional
|
|
524
|
+
Clean output directory before running.
|
|
525
|
+
test : bool, optional
|
|
526
|
+
If True, do not actually run the simulation.
|
|
527
|
+
unlock : bool, optional
|
|
528
|
+
If True, unlock the simulation if locked.
|
|
529
|
+
|
|
530
|
+
Examples
|
|
531
|
+
--------
|
|
532
|
+
Run asynchronously:
|
|
533
|
+
|
|
534
|
+
>>> sim.run(cleanrun=True)
|
|
535
|
+
|
|
536
|
+
Run synchronously:
|
|
537
|
+
|
|
538
|
+
>>> sim.run(mode='sync', cleanrun=True)
|
|
539
|
+
"""
|
|
331
540
|
|
|
332
541
|
if self.fargo3d_binary is None:
|
|
333
|
-
print(
|
|
542
|
+
print(
|
|
543
|
+
"You must first compile your simulation with: <simulation>.compile(<option>)."
|
|
544
|
+
)
|
|
334
545
|
return
|
|
335
546
|
|
|
336
547
|
if self._is_running():
|
|
337
548
|
print(f"There is a running process. Please stop it before running/resuming")
|
|
338
549
|
return
|
|
339
550
|
|
|
340
|
-
lock_info = fargopy.Sys.is_locked(self.setup_dir)
|
|
551
|
+
lock_info = fargopy.Sys.is_locked(self.setup_dir)
|
|
341
552
|
if lock_info:
|
|
342
553
|
print(f"The process is locked by PID {lock_info['pid']}")
|
|
343
554
|
return
|
|
344
555
|
|
|
345
556
|
# Mandatory options
|
|
346
557
|
options = options + " -t"
|
|
347
|
-
if
|
|
558
|
+
if "fargo3d_run_options" not in self.__dict__.keys():
|
|
348
559
|
self.fargo3d_run_options = options
|
|
349
|
-
|
|
560
|
+
|
|
350
561
|
# Clean output if available
|
|
351
562
|
if cleanrun:
|
|
352
563
|
# Check if there is an output director
|
|
353
|
-
output_dir =
|
|
564
|
+
output_dir = os.path.join(self.outputs_dir, self.setup)
|
|
354
565
|
if os.path.isdir(output_dir):
|
|
355
566
|
self.output_dir = output_dir
|
|
356
567
|
self.clean_output()
|
|
357
568
|
else:
|
|
358
569
|
print(f"No output directory {output_dir} yet created.")
|
|
359
|
-
|
|
570
|
+
|
|
360
571
|
# Select command to run
|
|
361
|
-
precmd=
|
|
362
|
-
if self.fargo3d_compilation_options[
|
|
572
|
+
precmd = ""
|
|
573
|
+
if self.fargo3d_compilation_options["parallel"]:
|
|
363
574
|
precmd = f"mpirun {mpioptions} "
|
|
364
575
|
|
|
365
576
|
# Preparing command
|
|
366
577
|
run_cmd = f"{precmd} ./{self.fargo3d_binary} {options} setups/{self.setup}/{self.setup}.par"
|
|
367
578
|
|
|
368
|
-
self.json_file =
|
|
369
|
-
if mode ==
|
|
370
|
-
# Save object
|
|
579
|
+
self.json_file = os.path.join(self.setup_dir, "fargopy_simulation.json")
|
|
580
|
+
if mode == "sync":
|
|
581
|
+
# Save object
|
|
371
582
|
self.save_object(self.json_file)
|
|
372
583
|
|
|
373
584
|
# Run synchronously
|
|
@@ -376,57 +587,71 @@ class Simulation(fargopy.Fargobj):
|
|
|
376
587
|
fargopy.Sys.simple(cmd)
|
|
377
588
|
self.fargo3d_process = None
|
|
378
589
|
|
|
379
|
-
elif mode ==
|
|
590
|
+
elif mode == "async":
|
|
380
591
|
# Run asynchronously
|
|
381
|
-
|
|
592
|
+
|
|
382
593
|
# Select logfile mode accroding to if the process is resuming
|
|
383
|
-
logmode =
|
|
384
|
-
logfile_handler=open(self.logfile,logmode)
|
|
594
|
+
logmode = "a" if resume else "w"
|
|
595
|
+
logfile_handler = open(self.logfile, logmode)
|
|
385
596
|
|
|
386
597
|
# Launch process
|
|
387
598
|
print(f"Running asynchronously (test = {test}): {run_cmd}")
|
|
388
599
|
if not test:
|
|
389
|
-
|
|
390
600
|
# Check it is not locked
|
|
391
601
|
lock_info = fargopy.Sys.is_locked(dir=self.setup_dir)
|
|
392
602
|
if lock_info:
|
|
393
603
|
if unlock:
|
|
394
604
|
self.unlock_simulation(lock_info)
|
|
395
605
|
else:
|
|
396
|
-
print(
|
|
606
|
+
print(
|
|
607
|
+
f"Output directory {self.setup_dir} is locked by a running process"
|
|
608
|
+
)
|
|
397
609
|
return
|
|
398
610
|
|
|
399
|
-
process = subprocess.Popen(
|
|
400
|
-
|
|
401
|
-
|
|
611
|
+
process = subprocess.Popen(
|
|
612
|
+
run_cmd.split(),
|
|
613
|
+
cwd=self.fargo3d_dir,
|
|
614
|
+
stdout=logfile_handler,
|
|
615
|
+
stderr=logfile_handler,
|
|
616
|
+
close_fds=True,
|
|
617
|
+
)
|
|
618
|
+
|
|
402
619
|
# Introduce a short delay to verify if the process has failed
|
|
403
620
|
time.sleep(1.0)
|
|
404
621
|
|
|
405
622
|
if process.poll() is None:
|
|
406
623
|
# Check if program is effectively running
|
|
407
|
-
self.fargo3d_process = process
|
|
408
|
-
|
|
624
|
+
self.fargo3d_process = process
|
|
625
|
+
|
|
409
626
|
# Lock
|
|
410
627
|
fargopy.Sys.lock(
|
|
411
|
-
dir=self.setup_dir,
|
|
412
|
-
content=dict(pid=self.fargo3d_process.pid)
|
|
628
|
+
dir=self.setup_dir, content=dict(pid=self.fargo3d_process.pid)
|
|
413
629
|
)
|
|
414
630
|
|
|
415
|
-
# Setup output directory
|
|
416
|
-
self.set_output_dir(
|
|
631
|
+
# Setup output directory
|
|
632
|
+
self.set_output_dir(os.path.join(self.outputs_dir, self.setup))
|
|
417
633
|
|
|
418
|
-
# Save object
|
|
634
|
+
# Save object
|
|
419
635
|
self.save_object(self.json_file)
|
|
420
636
|
else:
|
|
421
|
-
print(
|
|
637
|
+
print(
|
|
638
|
+
f"Process running failed. Please check the logfile {self.logfile}"
|
|
639
|
+
)
|
|
422
640
|
else:
|
|
423
641
|
print(f"Mode {mode} not recognized (valid are 'sync', 'async')")
|
|
424
642
|
return
|
|
425
|
-
|
|
426
|
-
def stop(self):
|
|
643
|
+
|
|
644
|
+
def stop(self, verbose=False):
|
|
645
|
+
"""Stop a running FARGO3D process and release the lock on the setup.
|
|
646
|
+
|
|
647
|
+
If a process is associated with the simulation the method attempts
|
|
648
|
+
to terminate it and then unlock the setup directory. If no
|
|
649
|
+
process handler is available the method simply tries to remove
|
|
650
|
+
any filesystem lock.
|
|
651
|
+
"""
|
|
427
652
|
# Check if the directory is locked
|
|
428
653
|
lock_info = fargopy.Sys.is_locked(self.setup_dir)
|
|
429
|
-
|
|
654
|
+
|
|
430
655
|
if lock_info:
|
|
431
656
|
print(f"The process is locked by PID {lock_info['pid']}")
|
|
432
657
|
|
|
@@ -446,42 +671,52 @@ class Simulation(fargopy.Fargobj):
|
|
|
446
671
|
self.unlock_simulation(lock_info)
|
|
447
672
|
print(f"The process has finished. Check logfile {self.logfile}.")
|
|
448
673
|
|
|
449
|
-
def unlock_simulation(self,lock_info=None,force=True):
|
|
450
|
-
"""
|
|
674
|
+
def unlock_simulation(self, lock_info=None, force=True, verbose=False):
|
|
675
|
+
"""Remove a simulation lock and optionally kill the owning PID.
|
|
676
|
+
|
|
677
|
+
Parameters
|
|
678
|
+
----------
|
|
679
|
+
lock_info : dict or None
|
|
680
|
+
Lock metadata as returned by ``fargopy.Sys.is_locked``. If
|
|
681
|
+
``None`` the method will attempt to discover the lock for
|
|
682
|
+
the active setup directory.
|
|
683
|
+
force : bool, optional
|
|
684
|
+
When ``True`` attempt to forcibly terminate the process
|
|
685
|
+
owning the lock (``kill -9``) before releasing the lock.
|
|
451
686
|
"""
|
|
452
687
|
if lock_info is None and force:
|
|
453
688
|
lock_info = fargopy.Sys.is_locked(self.setup_dir)
|
|
454
689
|
if lock_info:
|
|
455
690
|
print(f"Unlocking simulation (pid = {lock_info['pid']})")
|
|
456
|
-
|
|
691
|
+
|
|
457
692
|
if lock_info:
|
|
458
|
-
pid = lock_info[
|
|
693
|
+
pid = lock_info["pid"]
|
|
459
694
|
fargopy.Debug.trace(f"Unlocking simulation (pid = {pid})")
|
|
460
|
-
error,output = fargopy.Sys.run(f"kill -9 {pid}")
|
|
695
|
+
error, output = fargopy.Sys.run(f"kill -9 {pid}")
|
|
461
696
|
fargopy.Sys.unlock(self.setup_dir)
|
|
462
|
-
|
|
463
|
-
def status(self,mode='isrunning',verbose=True,**kwargs):
|
|
464
|
-
"""Check the status of the running process
|
|
465
|
-
|
|
466
|
-
Parameters:
|
|
467
|
-
mode: string, defaul='isrunning':
|
|
468
|
-
Available modes:
|
|
469
|
-
'isrunning': Just show if the process is running.
|
|
470
|
-
'logfile': Show the latest lines of the logfile
|
|
471
|
-
'outputs': Show (and return) a list of outputs
|
|
472
|
-
'snapshots': Show (and return) a list of snapshots
|
|
473
|
-
'progress': Show progress in realtime
|
|
474
|
-
'locking': Show if the directory is locked
|
|
475
697
|
|
|
698
|
+
def status(self, mode="isrunning", verbose=True, **kwargs):
|
|
699
|
+
"""Report process, logfile and output status for the simulation.
|
|
700
|
+
|
|
701
|
+
Parameters
|
|
702
|
+
----------
|
|
703
|
+
mode : {'isrunning','logfile','outputs','progress','summary','locking','all'}, optional
|
|
704
|
+
Which status block to display. ``'all'`` prints every
|
|
705
|
+
section.
|
|
706
|
+
verbose : bool, optional
|
|
707
|
+
When ``True`` print human-readable diagnostics to stdout.
|
|
708
|
+
**kwargs : dict
|
|
709
|
+
Backend-specific options used by certain modes (for
|
|
710
|
+
instance ``numstatus`` for the ``'progress'`` mode).
|
|
476
711
|
"""
|
|
477
|
-
# Bar separating output
|
|
478
|
-
bar = f"\n{''.join(['#']*80)}\n"
|
|
479
|
-
|
|
712
|
+
# Bar separating output
|
|
713
|
+
bar = f"\n{''.join(['#'] * 80)}\n"
|
|
714
|
+
|
|
480
715
|
# vprint
|
|
481
|
-
vprint = print if verbose else lambda x:x
|
|
716
|
+
vprint = print if verbose else lambda x: x
|
|
482
717
|
|
|
483
|
-
if
|
|
484
|
-
vprint(bar+"Running status of the process:")
|
|
718
|
+
if "isrunning" in mode or mode == "all":
|
|
719
|
+
vprint(bar + "Running status of the process:")
|
|
485
720
|
if self.fargo3d_process:
|
|
486
721
|
poll = self.fargo3d_process.poll()
|
|
487
722
|
if poll is None:
|
|
@@ -490,29 +725,29 @@ class Simulation(fargopy.Fargobj):
|
|
|
490
725
|
# Unlock any remaining process
|
|
491
726
|
lock_info = fargopy.Sys.is_locked(self.setup_dir)
|
|
492
727
|
self.unlock_simulation(lock_info)
|
|
493
|
-
|
|
728
|
+
|
|
494
729
|
vprint(f"\tThe process has ended with termination code {poll}.")
|
|
495
730
|
else:
|
|
496
731
|
vprint(f"\tThe process is stopped.")
|
|
497
732
|
|
|
498
|
-
if
|
|
499
|
-
vprint(bar+"Logfile content:")
|
|
500
|
-
if
|
|
733
|
+
if "logfile" in mode or mode == "all":
|
|
734
|
+
vprint(bar + "Logfile content:")
|
|
735
|
+
if "logfile" in self.__dict__.keys() and os.path.isfile(self.logfile):
|
|
501
736
|
vprint("The latest 10 lines of the logfile:\n")
|
|
502
737
|
if verbose:
|
|
503
738
|
os.system(f"tail -n 10 {self.logfile}")
|
|
504
739
|
else:
|
|
505
740
|
vprint("No log file created yet")
|
|
506
741
|
|
|
507
|
-
if
|
|
508
|
-
vprint(bar+"Output content:")
|
|
509
|
-
error,output = fargopy.Sys.run(f"ls {self.output_dir}/*.dat")
|
|
742
|
+
if "outputs" in mode or mode == "all":
|
|
743
|
+
vprint(bar + "Output content:")
|
|
744
|
+
error, output = fargopy.Sys.run(f"ls {self.output_dir}/*.dat")
|
|
510
745
|
if not error:
|
|
511
|
-
files = [file.split(
|
|
746
|
+
files = [file.split("/")[-1] for file in output[:-1]]
|
|
512
747
|
file_list = ""
|
|
513
|
-
for i,file in enumerate(files):
|
|
748
|
+
for i, file in enumerate(files):
|
|
514
749
|
file_list += f"{file}, "
|
|
515
|
-
if ((i+1)%10) == 0:
|
|
750
|
+
if ((i + 1) % 10) == 0:
|
|
516
751
|
file_list += "\n"
|
|
517
752
|
file_list = file_list.strip("\n,")
|
|
518
753
|
vprint(f"\n{len(files)} available datafiles:\n")
|
|
@@ -521,107 +756,125 @@ class Simulation(fargopy.Fargobj):
|
|
|
521
756
|
else:
|
|
522
757
|
vprint("No datafiles yet available")
|
|
523
758
|
|
|
524
|
-
if
|
|
525
|
-
vprint(bar+"Summary:")
|
|
759
|
+
if "summary" in mode or mode == "all":
|
|
760
|
+
vprint(bar + "Summary:")
|
|
526
761
|
nsnaps = self._get_nsnaps()
|
|
527
|
-
print(
|
|
762
|
+
print(
|
|
763
|
+
f"The simulation has been ran for {nsnaps} time-steps (including the initial one)."
|
|
764
|
+
)
|
|
528
765
|
|
|
529
|
-
if
|
|
530
|
-
vprint(bar+"Locking status:")
|
|
766
|
+
if "locking" in mode or mode == "all":
|
|
767
|
+
vprint(bar + "Locking status:")
|
|
531
768
|
lock_info = fargopy.Sys.is_locked(self.setup_dir)
|
|
532
769
|
print(lock_info)
|
|
533
770
|
|
|
534
|
-
if
|
|
771
|
+
if "progress" in mode:
|
|
535
772
|
vprint(bar)
|
|
536
773
|
numstatus = 5
|
|
537
|
-
if
|
|
538
|
-
numstatus = int(kwargs[
|
|
774
|
+
if "numstatus" in kwargs.keys():
|
|
775
|
+
numstatus = int(kwargs["numstatus"])
|
|
539
776
|
self._status_progress(numstatus=numstatus)
|
|
540
777
|
|
|
541
|
-
print(
|
|
778
|
+
print(
|
|
779
|
+
f"\nOther status modes: 'isrunning', 'logfile', 'outputs', 'progress', 'summary'"
|
|
780
|
+
)
|
|
542
781
|
|
|
543
|
-
def _status_progress(self,minfreq=0.1,numstatus=100):
|
|
544
|
-
"""
|
|
782
|
+
def _status_progress(self, minfreq=0.1, numstatus=100):
|
|
783
|
+
"""Display a live progress summary by tailing the simulation log.
|
|
545
784
|
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
785
|
+
The routine periodically reads the simulation logfile and prints
|
|
786
|
+
snapshot progress information until the process stops or the
|
|
787
|
+
requested number of updates is reached.
|
|
549
788
|
|
|
550
|
-
|
|
551
|
-
|
|
789
|
+
Parameters
|
|
790
|
+
----------
|
|
791
|
+
minfreq : float, optional
|
|
792
|
+
Minimum polling interval in seconds.
|
|
793
|
+
numstatus : int, optional
|
|
794
|
+
Maximum number of status frames to emit.
|
|
552
795
|
"""
|
|
553
796
|
# Prepare
|
|
554
|
-
if
|
|
797
|
+
if "status_frequency" not in self.__dict__.keys():
|
|
555
798
|
frequency = minfreq
|
|
556
799
|
else:
|
|
557
800
|
frequency = self.status_frequency
|
|
558
|
-
previous_output =
|
|
801
|
+
previous_output = ""
|
|
559
802
|
previous_resumable_snapshot = 1e100
|
|
560
803
|
time_previous = time.time()
|
|
561
804
|
|
|
562
805
|
# Infinite loop checking for output
|
|
563
806
|
n = 0
|
|
564
|
-
print(f"Progress of the simulation (interrupt by pressing
|
|
565
|
-
while True and (n<numstatus):
|
|
566
|
-
|
|
807
|
+
print(f"Progress of the simulation (interrupt by pressing the stop button):")
|
|
808
|
+
while True and (n < numstatus):
|
|
567
809
|
# Check if the process is running locally
|
|
568
810
|
if not self._is_running():
|
|
569
811
|
print("The simulation is not running anymore")
|
|
570
812
|
return
|
|
571
|
-
|
|
572
|
-
error,output = fargopy.Sys.run(f"grep OUTPUT {self.logfile} |tail -n 1")
|
|
573
|
-
|
|
813
|
+
|
|
814
|
+
error, output = fargopy.Sys.run(f"grep OUTPUT {self.logfile} |tail -n 1")
|
|
815
|
+
|
|
574
816
|
if not error:
|
|
575
817
|
# Get the latest output
|
|
576
818
|
latest_output = output[-2]
|
|
577
819
|
if latest_output != previous_output:
|
|
578
|
-
print(
|
|
579
|
-
|
|
820
|
+
print(
|
|
821
|
+
f"{n + 1}:{latest_output} [output pace = {frequency:.1f} secs]"
|
|
822
|
+
)
|
|
823
|
+
n += 1
|
|
580
824
|
# Fun the number of the output
|
|
581
|
-
find = re.findall(r
|
|
825
|
+
find = re.findall(r"OUTPUTS\s+(\d+)", latest_output)
|
|
582
826
|
resumable_snapshot = int(find[0])
|
|
583
827
|
# Get the time elapsed since last status check
|
|
584
828
|
time_now = time.time()
|
|
585
|
-
frequency = max(time_now - time_previous,minfreq)/2
|
|
586
|
-
if (resumable_snapshot - previous_resumable_snapshot)>1:
|
|
829
|
+
frequency = max(time_now - time_previous, minfreq) / 2
|
|
830
|
+
if (resumable_snapshot - previous_resumable_snapshot) > 1:
|
|
587
831
|
# Reduce frequency if snapshots are accelerating
|
|
588
|
-
frequency = frequency/2
|
|
832
|
+
frequency = frequency / 2
|
|
589
833
|
self.status_frequency = frequency
|
|
590
834
|
previous_resumable_snapshot = resumable_snapshot
|
|
591
835
|
time_previous = time_now
|
|
592
836
|
previous_output = latest_output
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
|
|
837
|
+
|
|
838
|
+
try:
|
|
839
|
+
time.sleep(frequency)
|
|
840
|
+
except KeyboardInterrupt:
|
|
841
|
+
print("Interrupted by user.")
|
|
842
|
+
return
|
|
843
|
+
|
|
844
|
+
def resume(self, snapshot=-1, mpioptions="-np 1"):
|
|
845
|
+
"""Resume an interrupted simulation from the specified snapshot.
|
|
846
|
+
|
|
847
|
+
Parameters
|
|
848
|
+
----------
|
|
849
|
+
snapshot : int, optional
|
|
850
|
+
Snapshot index to resume from. Use ``-1`` to resume from
|
|
851
|
+
the most recent resumable snapshot.
|
|
852
|
+
mpioptions : str, optional
|
|
853
|
+
MPI launch options for parallel executions.
|
|
854
|
+
"""
|
|
608
855
|
latest_snapshot_resumable = self._is_resumable()
|
|
609
|
-
if latest_snapshot_resumable<0:
|
|
856
|
+
if latest_snapshot_resumable < 0:
|
|
610
857
|
return
|
|
611
858
|
if self._is_running():
|
|
612
859
|
print(f"There is a running process. Please stop it before resuming")
|
|
613
860
|
return
|
|
614
|
-
if
|
|
861
|
+
if "fargo3d_run_options" not in self.__dict__.keys():
|
|
615
862
|
print(f"The process has not been run before.")
|
|
616
863
|
return
|
|
617
864
|
# Resume
|
|
618
|
-
if snapshot<0:
|
|
865
|
+
if snapshot < 0:
|
|
619
866
|
snapshot = latest_snapshot_resumable
|
|
620
867
|
print(f"Resuming from snapshot {snapshot}...")
|
|
621
|
-
self.run(
|
|
622
|
-
|
|
868
|
+
self.run(
|
|
869
|
+
mode="async",
|
|
870
|
+
mpioptions=mpioptions,
|
|
871
|
+
resume=True,
|
|
872
|
+
options=self.fargo3d_run_options + f" -S {snapshot}",
|
|
873
|
+
test=False,
|
|
874
|
+
)
|
|
623
875
|
|
|
624
876
|
def _has_finished(self):
|
|
877
|
+
"""Return ``True`` if the associated FARGO3D process has exited."""
|
|
625
878
|
if self.fargo3d_process:
|
|
626
879
|
poll = self.fargo3d_process.poll()
|
|
627
880
|
if poll is None:
|
|
@@ -631,13 +884,24 @@ class Simulation(fargopy.Fargobj):
|
|
|
631
884
|
return True
|
|
632
885
|
|
|
633
886
|
def _is_resumable(self):
|
|
887
|
+
"""Determine the latest snapshot index from which the simulation can resume.
|
|
888
|
+
|
|
889
|
+
Returns
|
|
890
|
+
-------
|
|
891
|
+
int
|
|
892
|
+
Index of the latest resumable snapshot, or ``-1`` when the
|
|
893
|
+
simulation is not resumable or no outputs are present.
|
|
894
|
+
"""
|
|
634
895
|
if self.logfile is None:
|
|
635
|
-
print(
|
|
896
|
+
print(
|
|
897
|
+
f"The simulation has not been ran yet. Run <simulation>.run() before resuming"
|
|
898
|
+
)
|
|
636
899
|
return -1
|
|
637
900
|
latest_snapshot_resumable = max(self._get_nsnaps() - 2, 0)
|
|
638
901
|
return latest_snapshot_resumable
|
|
639
|
-
|
|
902
|
+
|
|
640
903
|
def clean_output(self):
|
|
904
|
+
"""Remove all files from the configured output directory."""
|
|
641
905
|
if self.output_dir is None:
|
|
642
906
|
print(f"Output directory has not been set.")
|
|
643
907
|
return
|
|
@@ -645,20 +909,33 @@ class Simulation(fargopy.Fargobj):
|
|
|
645
909
|
if self._is_running():
|
|
646
910
|
print(f"There is a running process. Please stop it before cleaning")
|
|
647
911
|
return
|
|
648
|
-
|
|
912
|
+
|
|
649
913
|
print(f"Cleaning output directory {self.output_dir}")
|
|
650
914
|
cmd = f"rm -rf {self.output_dir}/*"
|
|
651
|
-
error,output = fargopy.Sys.run(cmd)
|
|
915
|
+
error, output = fargopy.Sys.run(cmd)
|
|
916
|
+
|
|
917
|
+
def _is_running(self, verbose=False):
|
|
918
|
+
"""Return ``True`` when a live FARGO3D process is detected.
|
|
652
919
|
|
|
653
|
-
|
|
920
|
+
Parameters
|
|
921
|
+
----------
|
|
922
|
+
verbose : bool, optional
|
|
923
|
+
When ``True`` print diagnostic messages.
|
|
924
|
+
|
|
925
|
+
Returns
|
|
926
|
+
-------
|
|
927
|
+
bool
|
|
928
|
+
``True`` if a running process is associated with the
|
|
929
|
+
simulation, otherwise ``False``.
|
|
930
|
+
"""
|
|
654
931
|
lock_info = fargopy.Sys.is_locked(self.setup_dir)
|
|
655
932
|
if lock_info:
|
|
656
933
|
# Check if process is up
|
|
657
|
-
error,output = fargopy.Sys.run(f"ps -p {lock_info['pid']}")
|
|
934
|
+
error, output = fargopy.Sys.run(f"ps -p {lock_info['pid']}")
|
|
658
935
|
if error == 0:
|
|
659
936
|
return True
|
|
660
937
|
|
|
661
|
-
if not self.has(
|
|
938
|
+
if not self.has("fargo3d_process"):
|
|
662
939
|
if verbose:
|
|
663
940
|
print("The simulation has not been run before.")
|
|
664
941
|
return False
|
|
@@ -667,7 +944,9 @@ class Simulation(fargopy.Fargobj):
|
|
|
667
944
|
poll = self.fargo3d_process.poll()
|
|
668
945
|
if poll is None:
|
|
669
946
|
if verbose:
|
|
670
|
-
print(
|
|
947
|
+
print(
|
|
948
|
+
f"The process is already running with pid '{self.fargo3d_process.pid}'"
|
|
949
|
+
)
|
|
671
950
|
return True
|
|
672
951
|
else:
|
|
673
952
|
return False
|
|
@@ -675,6 +954,7 @@ class Simulation(fargopy.Fargobj):
|
|
|
675
954
|
return False
|
|
676
955
|
|
|
677
956
|
def _check_process(self):
|
|
957
|
+
"""Return ``True`` if a process handler is present for the run."""
|
|
678
958
|
if self.fargo3d_process is None:
|
|
679
959
|
print(f"There is no FARGO3D process handler available.")
|
|
680
960
|
return False
|
|
@@ -682,53 +962,84 @@ class Simulation(fargopy.Fargobj):
|
|
|
682
962
|
|
|
683
963
|
# ##########################################################################
|
|
684
964
|
# Operations on the FARGO3D directories
|
|
685
|
-
# ##########################################################################
|
|
686
|
-
def list_fields(self,quiet=False):
|
|
965
|
+
# ##########################################################################
|
|
966
|
+
def list_fields(self, quiet=False):
|
|
967
|
+
"""Return a list of data file names present in the output directory.
|
|
968
|
+
|
|
969
|
+
Parameters
|
|
970
|
+
----------
|
|
971
|
+
quiet : bool, optional
|
|
972
|
+
When ``True`` avoid printing the detailed file list.
|
|
973
|
+
|
|
974
|
+
Returns
|
|
975
|
+
-------
|
|
976
|
+
list
|
|
977
|
+
Filenames found in the output directory.
|
|
978
|
+
"""
|
|
687
979
|
if self.output_dir is None:
|
|
688
|
-
print(
|
|
980
|
+
print(
|
|
981
|
+
f"You have to set forst the outputs directory with <sim>.set_outputs('<directory>')"
|
|
982
|
+
)
|
|
689
983
|
else:
|
|
690
|
-
error,output = fargopy.Sys.run(f"ls {self.output_dir}")
|
|
984
|
+
error, output = fargopy.Sys.run(f"ls {self.output_dir}")
|
|
691
985
|
if error == 0:
|
|
692
986
|
files = output[:-1]
|
|
693
987
|
print(f"{len(files)} files in output directory")
|
|
694
988
|
if not quiet:
|
|
695
989
|
file_list = ""
|
|
696
|
-
for i,file in enumerate(files):
|
|
990
|
+
for i, file in enumerate(files):
|
|
697
991
|
file_list += f"{file}, "
|
|
698
|
-
if ((i+1)%10) == 0:
|
|
992
|
+
if ((i + 1) % 10) == 0:
|
|
699
993
|
file_list += "\n"
|
|
700
994
|
print(file_list)
|
|
701
995
|
return files
|
|
702
|
-
|
|
703
996
|
|
|
704
|
-
def load_macros(self, summaryfile=
|
|
705
|
-
"""
|
|
706
|
-
|
|
707
|
-
|
|
708
|
-
|
|
997
|
+
def load_macros(self, summaryfile="summary0.dat"):
|
|
998
|
+
"""Parse the PREPROCESSOR MACROS SECTION from a summary file.
|
|
999
|
+
|
|
1000
|
+
Parameters
|
|
1001
|
+
----------
|
|
1002
|
+
summaryfile : str, optional
|
|
1003
|
+
Summary filename relative to the output directory (default
|
|
1004
|
+
``'summary0.dat'``).
|
|
1005
|
+
|
|
1006
|
+
Returns
|
|
1007
|
+
-------
|
|
1008
|
+
dict
|
|
1009
|
+
Mapping of macro names to parsed values. Returns an empty
|
|
1010
|
+
dict when the file is missing or parsing fails.
|
|
709
1011
|
"""
|
|
710
|
-
summary_path =
|
|
1012
|
+
summary_path = os.path.join(self.output_dir, summaryfile)
|
|
711
1013
|
if not os.path.isfile(summary_path):
|
|
712
1014
|
print(f"No summary file '{summary_path}' found.")
|
|
713
1015
|
return {}
|
|
714
1016
|
|
|
715
1017
|
macros = {}
|
|
716
1018
|
in_macros_section = False
|
|
717
|
-
with open(summary_path,
|
|
1019
|
+
with open(summary_path, "r") as f:
|
|
718
1020
|
for line in f:
|
|
719
1021
|
# Detect the start of the macros section
|
|
720
|
-
if
|
|
1022
|
+
if "PREPROCESSOR MACROS SECTION" in line:
|
|
721
1023
|
in_macros_section = True
|
|
722
1024
|
continue
|
|
723
1025
|
# Stop if another section starts
|
|
724
|
-
if
|
|
1026
|
+
if (
|
|
1027
|
+
in_macros_section
|
|
1028
|
+
and "SECTION" in line
|
|
1029
|
+
and "PREPROCESSOR MACROS SECTION" not in line
|
|
1030
|
+
):
|
|
725
1031
|
break
|
|
726
1032
|
if in_macros_section:
|
|
727
1033
|
# Skip empty lines and lines that don't contain '='
|
|
728
|
-
if
|
|
1034
|
+
if (
|
|
1035
|
+
line.strip() == ""
|
|
1036
|
+
or "=" not in line
|
|
1037
|
+
or line.strip().startswith("=")
|
|
1038
|
+
or line.strip().startswith("#")
|
|
1039
|
+
):
|
|
729
1040
|
continue
|
|
730
1041
|
# Extract parameter name and value after the last '='
|
|
731
|
-
parts = line.split(
|
|
1042
|
+
parts = line.split("=")
|
|
732
1043
|
if len(parts) >= 2:
|
|
733
1044
|
key = parts[0].strip()
|
|
734
1045
|
value_str = parts[-1].strip()
|
|
@@ -741,99 +1052,150 @@ class Simulation(fargopy.Fargobj):
|
|
|
741
1052
|
self.simulation_macros = macros
|
|
742
1053
|
return macros
|
|
743
1054
|
|
|
744
|
-
|
|
745
|
-
def load_planet_summary(self, summaryfile='summary0.dat'):
|
|
1055
|
+
def load_planets(self, snapshot=0, summaryfile=None):
|
|
746
1056
|
"""
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
1057
|
+
Load planet data from a summary file.
|
|
1058
|
+
|
|
1059
|
+
Parameters
|
|
1060
|
+
----------
|
|
1061
|
+
snapshot : int, optional
|
|
1062
|
+
Snapshot index to load.
|
|
1063
|
+
summaryfile : str, optional
|
|
1064
|
+
Name of the summary file.
|
|
1065
|
+
|
|
1066
|
+
Returns
|
|
1067
|
+
-------
|
|
1068
|
+
list of Planet
|
|
1069
|
+
List of Planet objects for the given snapshot.
|
|
750
1070
|
"""
|
|
751
|
-
|
|
752
|
-
if os.path.isfile(summary_path):
|
|
753
|
-
planets = []
|
|
754
|
-
with open(summary_path, 'r') as f:
|
|
755
|
-
lines = f.readlines()
|
|
756
|
-
for line in lines:
|
|
757
|
-
# Skip comments, empty lines, and separators
|
|
758
|
-
if line.strip().startswith('#') or not line.strip() or set(line.strip()) == set('-'):
|
|
759
|
-
continue
|
|
760
|
-
parts = line.split()
|
|
761
|
-
if len(parts) >= 3:
|
|
762
|
-
# Only accept if the first field is NOT a number
|
|
763
|
-
try:
|
|
764
|
-
float(parts[0])
|
|
765
|
-
continue # If it's a number, skip the line
|
|
766
|
-
except ValueError:
|
|
767
|
-
pass # If not a number, it's a planet name
|
|
768
|
-
try:
|
|
769
|
-
planet = {
|
|
770
|
-
'name': parts[0],
|
|
771
|
-
'distance': float(parts[1]),
|
|
772
|
-
'mass': float(parts[2])
|
|
773
|
-
}
|
|
774
|
-
planets.append(planet)
|
|
775
|
-
except ValueError:
|
|
776
|
-
continue
|
|
777
|
-
return planets
|
|
1071
|
+
import os
|
|
778
1072
|
|
|
779
|
-
#
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
planet_mass = None
|
|
786
|
-
orbital_radius = None
|
|
787
|
-
planet_config = None
|
|
788
|
-
with open(varfile, 'r') as f:
|
|
789
|
-
for line in f:
|
|
790
|
-
# Look for PLANETMASS, ORBITALRADIUS, PLANETCONFIG lines
|
|
791
|
-
if line.strip().startswith('PLANETMASS'):
|
|
792
|
-
try:
|
|
793
|
-
planet_mass = float(line.split()[1])
|
|
794
|
-
except Exception:
|
|
795
|
-
pass
|
|
796
|
-
elif line.strip().startswith('ORBITALRADIUS'):
|
|
797
|
-
try:
|
|
798
|
-
orbital_radius = float(line.split()[1])
|
|
799
|
-
except Exception:
|
|
800
|
-
pass
|
|
801
|
-
elif line.strip().startswith('PLANETCONFIG'):
|
|
802
|
-
try:
|
|
803
|
-
planet_config = line.split()[1]
|
|
804
|
-
except Exception:
|
|
805
|
-
pass
|
|
806
|
-
|
|
807
|
-
if planet_mass is not None and orbital_radius is not None:
|
|
808
|
-
# Use the config file name as planet name if available
|
|
809
|
-
planet_name = planet_config.split('/')[-1].split('.')[0] if planet_config else 'planet'
|
|
810
|
-
return [{
|
|
811
|
-
'name': planet_name,
|
|
812
|
-
'distance': orbital_radius,
|
|
813
|
-
'mass': planet_mass
|
|
814
|
-
}]
|
|
815
|
-
else:
|
|
816
|
-
print("No planet data found in summary.dat or variables.par.")
|
|
1073
|
+
# Determine summary file
|
|
1074
|
+
if summaryfile is None:
|
|
1075
|
+
summaryfile = f"summary{snapshot}.dat"
|
|
1076
|
+
summary_path = os.path.join(self.output_dir, summaryfile)
|
|
1077
|
+
if not os.path.isfile(summary_path):
|
|
1078
|
+
print(f"No summary file '{summary_path}' found.")
|
|
817
1079
|
return []
|
|
818
1080
|
|
|
1081
|
+
# Get stellar mass
|
|
1082
|
+
if (
|
|
1083
|
+
not hasattr(self, "simulation_macros")
|
|
1084
|
+
or "MSTAR" not in self.simulation_macros
|
|
1085
|
+
):
|
|
1086
|
+
self.load_macros()
|
|
1087
|
+
mstar = self.simulation_macros["MSTAR"]
|
|
1088
|
+
|
|
1089
|
+
# Parse initial positions from the top of the file (if available)
|
|
1090
|
+
initial_planets = []
|
|
1091
|
+
with open(summary_path, "r") as f:
|
|
1092
|
+
lines = f.readlines()
|
|
1093
|
+
for line in lines:
|
|
1094
|
+
if (
|
|
1095
|
+
line.strip().startswith("#")
|
|
1096
|
+
or not line.strip()
|
|
1097
|
+
or set(line.strip()) == set("-")
|
|
1098
|
+
):
|
|
1099
|
+
continue
|
|
1100
|
+
parts = line.split()
|
|
1101
|
+
if len(parts) >= 3:
|
|
1102
|
+
try:
|
|
1103
|
+
float(parts[0])
|
|
1104
|
+
continue # skip if first field is a number
|
|
1105
|
+
except ValueError:
|
|
1106
|
+
pass
|
|
1107
|
+
try:
|
|
1108
|
+
name = parts[0]
|
|
1109
|
+
x0 = float(parts[1])
|
|
1110
|
+
y0 = float(parts[2]) if len(parts) > 3 else 0.0
|
|
1111
|
+
z0 = 0.0
|
|
1112
|
+
initial_planets.append({"name": name, "posi": (x0, y0, z0)})
|
|
1113
|
+
except Exception:
|
|
1114
|
+
continue
|
|
819
1115
|
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
1116
|
+
# Find PLANETARY SYSTEM SECTION
|
|
1117
|
+
planet_indices = []
|
|
1118
|
+
in_section = False
|
|
1119
|
+
for i, line in enumerate(lines):
|
|
1120
|
+
if "PLANETARY SYSTEM SECTION" in line:
|
|
1121
|
+
in_section = True
|
|
1122
|
+
continue
|
|
1123
|
+
if in_section and line.strip().startswith("#### Planet"):
|
|
1124
|
+
planet_indices.append(i + 1) # next line has the data
|
|
1125
|
+
if in_section and line.strip().startswith("***"):
|
|
1126
|
+
break
|
|
1127
|
+
|
|
1128
|
+
planets = []
|
|
1129
|
+
for idx, data_idx in enumerate(planet_indices):
|
|
1130
|
+
data_line = lines[data_idx].strip()
|
|
1131
|
+
parts = data_line.split()
|
|
1132
|
+
if len(parts) < 7:
|
|
1133
|
+
continue
|
|
1134
|
+
x, y, z = map(float, parts[0:3])
|
|
1135
|
+
vx, vy, vz = map(float, parts[3:6])
|
|
1136
|
+
mass = float(parts[-1]) # Always take the last column as mass
|
|
1137
|
+
|
|
1138
|
+
# Get initial position if available
|
|
1139
|
+
if idx < len(initial_planets):
|
|
1140
|
+
name = initial_planets[idx]["name"]
|
|
1141
|
+
posi = initial_planets[idx]["posi"]
|
|
1142
|
+
else:
|
|
1143
|
+
name = f"Planet {idx}"
|
|
1144
|
+
posi = (x, y, z)
|
|
1145
|
+
planet = Planet(
|
|
1146
|
+
name=name,
|
|
1147
|
+
pos=(x, y, z),
|
|
1148
|
+
vel=(vx, vy, vz),
|
|
1149
|
+
mass=mass,
|
|
1150
|
+
posi=posi,
|
|
1151
|
+
mstar=mstar,
|
|
1152
|
+
)
|
|
1153
|
+
planets.append(planet)
|
|
1154
|
+
return planets
|
|
1155
|
+
|
|
1156
|
+
def load_properties(
|
|
1157
|
+
self,
|
|
1158
|
+
quiet=False,
|
|
1159
|
+
varfile="variables.par",
|
|
1160
|
+
domain_prefix="domain_",
|
|
1161
|
+
dimsfile="dims.dat",
|
|
1162
|
+
summaryfile="summary0.dat",
|
|
1163
|
+
):
|
|
1164
|
+
"""
|
|
1165
|
+
Load and print simulation properties, including variables, domains, dimensions, and planets.
|
|
1166
|
+
|
|
1167
|
+
Parameters
|
|
1168
|
+
----------
|
|
1169
|
+
quiet : bool, optional
|
|
1170
|
+
If True, suppress output.
|
|
1171
|
+
varfile : str, optional
|
|
1172
|
+
Name of the variables file.
|
|
1173
|
+
domain_prefix : str, optional
|
|
1174
|
+
Prefix for domain files.
|
|
1175
|
+
dimsfile : str, optional
|
|
1176
|
+
Name of the dimensions file.
|
|
1177
|
+
summaryfile : str, optional
|
|
1178
|
+
Name of the summary file.
|
|
1179
|
+
|
|
1180
|
+
Returns
|
|
1181
|
+
-------
|
|
1182
|
+
str
|
|
1183
|
+
Summary information string.
|
|
1184
|
+
"""
|
|
1185
|
+
info = []
|
|
826
1186
|
if self.output_dir is None:
|
|
827
|
-
|
|
828
|
-
|
|
1187
|
+
msg = f"You have to set first the outputs directory with <sim>.set_outputs('<directory>')"
|
|
1188
|
+
print(msg)
|
|
1189
|
+
return msg
|
|
829
1190
|
|
|
830
1191
|
# Read variables
|
|
831
1192
|
vars = self._load_variables(varfile)
|
|
832
1193
|
if not vars:
|
|
833
|
-
return
|
|
1194
|
+
return "No variables found."
|
|
1195
|
+
info.append(f"Simulation in {vars.DIM} dimensions")
|
|
834
1196
|
print(f"Simulation in {vars.DIM} dimensions")
|
|
835
|
-
|
|
836
|
-
# Read domains
|
|
1197
|
+
|
|
1198
|
+
# Read domains
|
|
837
1199
|
domains = self._load_domains(vars, domain_prefix)
|
|
838
1200
|
|
|
839
1201
|
# Store the variables in the object
|
|
@@ -843,60 +1205,97 @@ class Simulation(fargopy.Fargobj):
|
|
|
843
1205
|
# Optionally read dims
|
|
844
1206
|
dims = self._load_dims(dimsfile)
|
|
845
1207
|
if len(dims):
|
|
846
|
-
self.dims = dims
|
|
1208
|
+
self.dims = dims
|
|
847
1209
|
|
|
848
1210
|
# Read the summary files
|
|
849
1211
|
self.nsnaps = self._get_nsnaps()
|
|
1212
|
+
info.append(f"Number of snapshots in output directory: {self.nsnaps}")
|
|
850
1213
|
print(f"Number of snapshots in output directory: {self.nsnaps}")
|
|
851
1214
|
|
|
852
|
-
#
|
|
853
|
-
self.planets = self.
|
|
1215
|
+
# Read planets from summary.dat using load_planet_states
|
|
1216
|
+
self.planets = self.load_planets(snapshot=0, summaryfile=summaryfile)
|
|
854
1217
|
if self.planets:
|
|
1218
|
+
info.append("Planets found in summary.dat:")
|
|
855
1219
|
print("Planets found in summary.dat:")
|
|
856
1220
|
for planet in self.planets:
|
|
857
|
-
|
|
1221
|
+
planet_info = f" Name: {planet.name}, Initial pos: {planet.posi}, Mass: {planet.mass}"
|
|
1222
|
+
info.append(planet_info)
|
|
1223
|
+
print(planet_info)
|
|
858
1224
|
else:
|
|
1225
|
+
info.append("No planet data found in summary.dat.")
|
|
859
1226
|
print("No planet data found in summary.dat.")
|
|
860
1227
|
|
|
861
|
-
|
|
1228
|
+
# Devuelve el string para mostrar en el cuadro de diálogo
|
|
1229
|
+
return "\n".join(info)
|
|
862
1230
|
|
|
863
1231
|
def _get_nsnaps(self):
|
|
864
|
-
"""Get the number of snapshots in an output directory
|
|
865
1232
|
"""
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
1233
|
+
Get the number of snapshots in the output directory.
|
|
1234
|
+
|
|
1235
|
+
Returns
|
|
1236
|
+
-------
|
|
1237
|
+
int
|
|
1238
|
+
Number of snapshot files.
|
|
1239
|
+
"""
|
|
1240
|
+
try:
|
|
1241
|
+
# List all files in the output directory
|
|
1242
|
+
files = [
|
|
1243
|
+
f
|
|
1244
|
+
for f in os.listdir(self.output_dir)
|
|
1245
|
+
if f.startswith("summary") and f.endswith(".dat")
|
|
1246
|
+
]
|
|
869
1247
|
nsnaps = len(files)
|
|
870
1248
|
return nsnaps
|
|
871
|
-
|
|
1249
|
+
except FileNotFoundError:
|
|
872
1250
|
print(f"No summary file in {self.output_dir}")
|
|
873
1251
|
return 0
|
|
874
1252
|
|
|
875
1253
|
def _load_dims(self, dimsfile):
|
|
876
|
-
"""Parse the dim directory
|
|
877
1254
|
"""
|
|
878
|
-
|
|
1255
|
+
Parse the dimensions file.
|
|
1256
|
+
|
|
1257
|
+
Parameters
|
|
1258
|
+
----------
|
|
1259
|
+
dimsfile : str
|
|
1260
|
+
Name of the dimensions file.
|
|
1261
|
+
|
|
1262
|
+
Returns
|
|
1263
|
+
-------
|
|
1264
|
+
np.ndarray or list
|
|
1265
|
+
Array of dimensions or empty list if not found.
|
|
1266
|
+
"""
|
|
1267
|
+
dimsfile = os.path.join(self.output_dir, dimsfile)
|
|
879
1268
|
if not os.path.isfile(dimsfile):
|
|
880
|
-
#print(f"No file with dimensions '{dimsfile}' found.")
|
|
1269
|
+
# print(f"No file with dimensions '{dimsfile}' found.")
|
|
881
1270
|
return []
|
|
882
1271
|
dims = np.loadtxt(dimsfile)
|
|
883
1272
|
return dims
|
|
884
1273
|
|
|
885
|
-
def _load_variables(self,varfile):
|
|
886
|
-
"""Parse the file with the variables
|
|
1274
|
+
def _load_variables(self, varfile):
|
|
887
1275
|
"""
|
|
1276
|
+
Parse the file with the simulation variables.
|
|
1277
|
+
|
|
1278
|
+
Parameters
|
|
1279
|
+
----------
|
|
1280
|
+
varfile : str
|
|
1281
|
+
Name of the variables file.
|
|
888
1282
|
|
|
889
|
-
|
|
1283
|
+
Returns
|
|
1284
|
+
-------
|
|
1285
|
+
fargopy.Dictobj
|
|
1286
|
+
Object containing simulation variables.
|
|
1287
|
+
"""
|
|
1288
|
+
varfile = os.path.join(self.output_dir, varfile)
|
|
890
1289
|
if not os.path.isfile(varfile):
|
|
891
1290
|
print(f"No file with variables named '{varfile}' found.")
|
|
892
1291
|
return
|
|
893
1292
|
|
|
894
1293
|
print(f"Loading variables")
|
|
895
1294
|
variables = np.genfromtxt(
|
|
896
|
-
varfile,
|
|
897
|
-
|
|
1295
|
+
varfile,
|
|
1296
|
+
dtype={"names": ("parameters", "values"), "formats": ("|S30", "|S300")},
|
|
898
1297
|
).tolist()
|
|
899
|
-
|
|
1298
|
+
|
|
900
1299
|
vars = dict()
|
|
901
1300
|
for posicion in variables:
|
|
902
1301
|
str_value = posicion[1].decode("utf-8")
|
|
@@ -908,33 +1307,51 @@ class Simulation(fargopy.Fargobj):
|
|
|
908
1307
|
except:
|
|
909
1308
|
value = str_value
|
|
910
1309
|
vars[posicion[0].decode("utf-8")] = value
|
|
911
|
-
|
|
1310
|
+
|
|
912
1311
|
vars = fargopy.Dictobj(dict=vars)
|
|
913
1312
|
print(f"{len(vars.__dict__.keys())} variables loaded")
|
|
914
1313
|
|
|
915
1314
|
# Create additional variables
|
|
916
|
-
variables = [
|
|
917
|
-
if vars.COORDINATES ==
|
|
918
|
-
variables = [
|
|
919
|
-
elif vars.COORDINATES ==
|
|
920
|
-
variables = [
|
|
1315
|
+
variables = ["x", "y", "z"]
|
|
1316
|
+
if vars.COORDINATES == "cylindrical":
|
|
1317
|
+
variables = ["phi", "r", "z"]
|
|
1318
|
+
elif vars.COORDINATES == "spherical":
|
|
1319
|
+
variables = ["phi", "r", "theta"]
|
|
921
1320
|
vars.VARIABLES = variables
|
|
922
1321
|
|
|
923
|
-
vars.__dict__[f
|
|
924
|
-
vars.__dict__[f
|
|
925
|
-
vars.__dict__[f
|
|
1322
|
+
vars.__dict__[f"N{variables[0].upper()}"] = vars.NX
|
|
1323
|
+
vars.__dict__[f"N{variables[1].upper()}"] = vars.NY
|
|
1324
|
+
vars.__dict__[f"N{variables[2].upper()}"] = vars.NZ
|
|
926
1325
|
|
|
927
1326
|
# Dimension of the domain
|
|
928
1327
|
vars.DIM = 2 if vars.NZ == 1 else 3
|
|
929
|
-
|
|
930
|
-
return vars
|
|
931
1328
|
|
|
932
|
-
|
|
933
|
-
borders=[[],[3,-3],[3,-3]],
|
|
934
|
-
middle=True):
|
|
1329
|
+
return vars
|
|
935
1330
|
|
|
1331
|
+
def _load_domains(self, vars, domain_prefix, borders=None, middle=True):
|
|
1332
|
+
"""
|
|
1333
|
+
Load domain coordinate arrays from files.
|
|
1334
|
+
|
|
1335
|
+
Parameters
|
|
1336
|
+
----------
|
|
1337
|
+
vars : fargopy.Dictobj
|
|
1338
|
+
Simulation variables.
|
|
1339
|
+
domain_prefix : str
|
|
1340
|
+
Prefix for domain files.
|
|
1341
|
+
borders : list, optional
|
|
1342
|
+
List of border slices for each variable.
|
|
1343
|
+
middle : bool, optional
|
|
1344
|
+
If True, average between cell coordinates.
|
|
1345
|
+
|
|
1346
|
+
Returns
|
|
1347
|
+
-------
|
|
1348
|
+
fargopy.Dictobj
|
|
1349
|
+
Object containing domain arrays.
|
|
1350
|
+
"""
|
|
1351
|
+
if borders is None:
|
|
1352
|
+
borders = [[], [3, -3], [3, -3]]
|
|
936
1353
|
# Coordinates
|
|
937
|
-
variable_suffixes = [
|
|
1354
|
+
variable_suffixes = ["x", "y", "z"]
|
|
938
1355
|
print(f"Loading domain in {vars.COORDINATES} coordinates:")
|
|
939
1356
|
|
|
940
1357
|
# Correct dims in case of 2D
|
|
@@ -943,134 +1360,212 @@ class Simulation(fargopy.Fargobj):
|
|
|
943
1360
|
|
|
944
1361
|
# Load domains
|
|
945
1362
|
domains = dict()
|
|
946
|
-
domains[
|
|
1363
|
+
domains["extrema"] = dict()
|
|
947
1364
|
|
|
948
|
-
for i,variable_suffix in enumerate(variable_suffixes):
|
|
949
|
-
domain_file =
|
|
1365
|
+
for i, variable_suffix in enumerate(variable_suffixes):
|
|
1366
|
+
domain_file = os.path.join(
|
|
1367
|
+
self.output_dir, f"{domain_prefix}{variable_suffix}.dat"
|
|
1368
|
+
)
|
|
950
1369
|
if os.path.isfile(domain_file):
|
|
951
|
-
|
|
952
1370
|
# Load data from file
|
|
953
1371
|
domains[vars.VARIABLES[i]] = np.genfromtxt(domain_file)
|
|
954
1372
|
|
|
955
1373
|
if len(borders[i]) > 0:
|
|
956
1374
|
# Drop the border of the domain
|
|
957
|
-
domains[vars.VARIABLES[i]] = domains[vars.VARIABLES[i]][
|
|
1375
|
+
domains[vars.VARIABLES[i]] = domains[vars.VARIABLES[i]][
|
|
1376
|
+
borders[i][0] : borders[i][1]
|
|
1377
|
+
]
|
|
958
1378
|
|
|
959
1379
|
if middle:
|
|
960
1380
|
# Average between domain cell coordinates
|
|
961
|
-
domains[vars.VARIABLES[i]] = 0.5*(
|
|
962
|
-
|
|
1381
|
+
domains[vars.VARIABLES[i]] = 0.5 * (
|
|
1382
|
+
domains[vars.VARIABLES[i]][:-1] + domains[vars.VARIABLES[i]][1:]
|
|
1383
|
+
)
|
|
1384
|
+
|
|
963
1385
|
# Show indices and value map
|
|
964
|
-
domains[
|
|
965
|
-
|
|
966
|
-
|
|
1386
|
+
domains["extrema"][vars.VARIABLES[i]] = [
|
|
1387
|
+
[0, domains[vars.VARIABLES[i]][0]],
|
|
1388
|
+
[-1, domains[vars.VARIABLES[i]][-1]],
|
|
1389
|
+
]
|
|
1390
|
+
|
|
1391
|
+
print(
|
|
1392
|
+
f"\tVariable {vars.VARIABLES[i]}: {len(domains[vars.VARIABLES[i]])} {domains['extrema'][vars.VARIABLES[i]]}"
|
|
1393
|
+
)
|
|
967
1394
|
else:
|
|
968
1395
|
print(f"\tDomain file {domain_file} not found.")
|
|
969
1396
|
domains = fargopy.Dictobj(dict=domains)
|
|
970
1397
|
|
|
971
|
-
return domains
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
1398
|
+
return domains
|
|
1399
|
+
|
|
1400
|
+
def load_field(
|
|
1401
|
+
self,
|
|
1402
|
+
fields,
|
|
1403
|
+
slice=None,
|
|
1404
|
+
snapshot=None,
|
|
1405
|
+
type=None,
|
|
1406
|
+
interpolate=None,
|
|
1407
|
+
coords="cartesian",
|
|
1408
|
+
cut=None,
|
|
1409
|
+
):
|
|
975
1410
|
"""
|
|
976
|
-
Load a field from the simulation.
|
|
977
|
-
|
|
978
|
-
Parameters:
|
|
979
|
-
fields (str or list of str): Field name(s) to load.
|
|
980
|
-
slice (str, optional): Slice for 2D data (e.g., 'theta=1.5').
|
|
981
|
-
snapshot (int, optional): Snapshot index to load.
|
|
982
|
-
type (str, optional): Type of the field ('scalar' or 'vector'). Default is 'scalar'.
|
|
983
|
-
interpolate (bool, optional): Whether to interpolate the field. Default is False.
|
|
984
|
-
|
|
985
|
-
Returns:
|
|
986
|
-
fargopy.Field or fargopy.DataHandler: The loaded field(s) or DataHandler object(s).
|
|
1411
|
+
Load a field or multiple fields from the simulation.
|
|
987
1412
|
"""
|
|
988
|
-
|
|
1413
|
+
|
|
1414
|
+
# Ensure fields is a list (but keep single-field compatibility)
|
|
1415
|
+
single_input = False
|
|
989
1416
|
if isinstance(fields, str):
|
|
1417
|
+
single_input = True
|
|
990
1418
|
fields = [fields]
|
|
991
1419
|
|
|
992
|
-
#
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1002
|
-
#
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1011
|
-
|
|
1012
|
-
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1026
|
-
if field_type == 'scalar':
|
|
1027
|
-
file_name = f"{field}{str(snapshot)}.dat"
|
|
1028
|
-
file_field = f"{self.output_dir}/{file_name}".replace('//', '/')
|
|
1029
|
-
field_data = self._load_field_scalar(file_field)
|
|
1030
|
-
elif field_type == 'vector':
|
|
1031
|
-
field_data = []
|
|
1032
|
-
variables = ['x', 'y']
|
|
1033
|
-
if self.vars.DIM == 3:
|
|
1034
|
-
variables += ['z']
|
|
1035
|
-
for variable in variables:
|
|
1036
|
-
file_name = f"{field}{variable}{str(snapshot)}.dat"
|
|
1037
|
-
file_field = f"{self.output_dir}/{file_name}".replace('//', '/')
|
|
1038
|
-
field_data.append(self._load_field_scalar(file_field))
|
|
1420
|
+
# Default behavior: when interpolate is None, treat it as True
|
|
1421
|
+
# to preserve the FieldInterpolator-based API by default.
|
|
1422
|
+
# Backward compatibility:
|
|
1423
|
+
# - interpolate=True -> return FieldInterpolator (explicit)
|
|
1424
|
+
# - interpolate=None -> treated as True -> return FieldInterpolator
|
|
1425
|
+
# - interpolate=False -> return legacy Field or list of Fields
|
|
1426
|
+
|
|
1427
|
+
if interpolate is None:
|
|
1428
|
+
interpolate = True
|
|
1429
|
+
|
|
1430
|
+
# --- INTERPOLATE == True : return FieldInterpolator ---
|
|
1431
|
+
if interpolate is True:
|
|
1432
|
+
handler = fargopy.FieldInterpolator(self)
|
|
1433
|
+
handler.load_data(
|
|
1434
|
+
fields=fields, slice=slice, snapshots=snapshot, cut=cut, coords=coords
|
|
1435
|
+
)
|
|
1436
|
+
return handler
|
|
1437
|
+
|
|
1438
|
+
# --- INTERPOLATE == False : legacy single-field behavior ---
|
|
1439
|
+
if not self.has("vars"):
|
|
1440
|
+
dims, vars, domains = self.load_properties()
|
|
1441
|
+
|
|
1442
|
+
snapshot = 0 if snapshot is None else snapshot
|
|
1443
|
+
loaded_fields = []
|
|
1444
|
+
|
|
1445
|
+
for field in fields:
|
|
1446
|
+
# Infer field type unless provided
|
|
1447
|
+
field_type = type
|
|
1448
|
+
if field_type is None:
|
|
1449
|
+
if field in ["gasdens", "gasenergy"]:
|
|
1450
|
+
field_type = "scalar"
|
|
1451
|
+
elif field == "gasv":
|
|
1452
|
+
field_type = "vector"
|
|
1039
1453
|
else:
|
|
1040
|
-
raise ValueError(f"
|
|
1041
|
-
|
|
1042
|
-
|
|
1043
|
-
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
|
|
1048
|
-
|
|
1454
|
+
raise ValueError(f"Field type for '{field}' could not be inferred.")
|
|
1455
|
+
|
|
1456
|
+
# Load scalar
|
|
1457
|
+
if field_type == "scalar":
|
|
1458
|
+
file_name = f"{field}{snapshot}.dat"
|
|
1459
|
+
file_field = os.path.join(self.output_dir, file_name)
|
|
1460
|
+
data = self._load_field_scalar(file_field)
|
|
1461
|
+
|
|
1462
|
+
# Load vector
|
|
1463
|
+
elif field_type == "vector":
|
|
1464
|
+
data = []
|
|
1465
|
+
components = ["x", "y"] + (["z"] if self.vars.DIM == 3 else [])
|
|
1466
|
+
for comp in components:
|
|
1467
|
+
file_name = f"{field}{comp}{snapshot}.dat"
|
|
1468
|
+
file_field = os.path.join(self.output_dir, file_name)
|
|
1469
|
+
data.append(self._load_field_scalar(file_field))
|
|
1470
|
+
data = np.array(data)
|
|
1471
|
+
|
|
1472
|
+
# Create Field
|
|
1473
|
+
loaded_field = fargopy.Field(
|
|
1474
|
+
data=np.array(data),
|
|
1475
|
+
coordinates=self.vars.COORDINATES,
|
|
1476
|
+
domains=self.domains,
|
|
1477
|
+
type=field_type,
|
|
1478
|
+
)
|
|
1479
|
+
|
|
1480
|
+
# Apply slicing
|
|
1481
|
+
if slice:
|
|
1482
|
+
sliced_data, mesh = loaded_field.meshslice(slice=slice)
|
|
1483
|
+
loaded_field = fargopy.Dictobj(dict=dict(data=sliced_data, mesh=mesh))
|
|
1049
1484
|
|
|
1050
|
-
|
|
1051
|
-
if slice:
|
|
1052
|
-
sliced_data, mesh = loaded_field.meshslice(slice=slice)
|
|
1053
|
-
# Pass a single dictionary to Dictobj
|
|
1054
|
-
loaded_field = fargopy.Dictobj(dict=dict(data=sliced_data, mesh=mesh))
|
|
1485
|
+
loaded_fields.append(loaded_field)
|
|
1055
1486
|
|
|
1487
|
+
result = loaded_fields if len(loaded_fields) > 1 else loaded_fields[0]
|
|
1488
|
+
return result
|
|
1056
1489
|
|
|
1490
|
+
def _load_field_scalar(self, file):
|
|
1491
|
+
"""
|
|
1492
|
+
Load a scalar field from a file.
|
|
1057
1493
|
|
|
1058
|
-
|
|
1059
|
-
|
|
1060
|
-
|
|
1061
|
-
|
|
1494
|
+
Parameters
|
|
1495
|
+
----------
|
|
1496
|
+
file : str
|
|
1497
|
+
Path to the field file.
|
|
1062
1498
|
|
|
1063
|
-
|
|
1064
|
-
|
|
1499
|
+
Returns
|
|
1500
|
+
-------
|
|
1501
|
+
np.ndarray
|
|
1502
|
+
Field data array.
|
|
1065
1503
|
"""
|
|
1066
1504
|
if os.path.isfile(file):
|
|
1067
|
-
field_data = np.fromfile(file).reshape(
|
|
1505
|
+
field_data = np.fromfile(file).reshape(
|
|
1506
|
+
int(self.vars.NZ), int(self.vars.NY), int(self.vars.NX)
|
|
1507
|
+
)
|
|
1068
1508
|
return field_data
|
|
1069
1509
|
else:
|
|
1070
1510
|
raise AssertionError(f"File with field '{file}' not found")
|
|
1071
|
-
|
|
1072
|
-
def
|
|
1073
|
-
"""
|
|
1511
|
+
|
|
1512
|
+
def _load_field_raw(self, field, snapshot=0, field_type=None):
|
|
1513
|
+
"""
|
|
1514
|
+
Internal helper to load a single field as a `fargopy.Field` without going
|
|
1515
|
+
through `load_field` dispatching. This prevents recursion when higher-level
|
|
1516
|
+
helpers request raw data.
|
|
1517
|
+
"""
|
|
1518
|
+
# Infer type if not provided
|
|
1519
|
+
if field_type is None:
|
|
1520
|
+
if field in ["gasdens", "gasenergy"]:
|
|
1521
|
+
field_type = "scalar"
|
|
1522
|
+
elif field == "gasv":
|
|
1523
|
+
field_type = "vector"
|
|
1524
|
+
else:
|
|
1525
|
+
raise ValueError(f"Field type for '{field}' could not be inferred.")
|
|
1526
|
+
|
|
1527
|
+
# Load scalar
|
|
1528
|
+
if field_type == "scalar":
|
|
1529
|
+
file_name = f"{field}{snapshot}.dat"
|
|
1530
|
+
file_field = os.path.join(self.output_dir, file_name)
|
|
1531
|
+
data = self._load_field_scalar(file_field)
|
|
1532
|
+
|
|
1533
|
+
# Load vector
|
|
1534
|
+
elif field_type == "vector":
|
|
1535
|
+
data = []
|
|
1536
|
+
components = ["x", "y"]
|
|
1537
|
+
if self.vars.DIM == 3:
|
|
1538
|
+
components += ["z"]
|
|
1539
|
+
for comp in components:
|
|
1540
|
+
file_name = f"{field}{comp}{snapshot}.dat"
|
|
1541
|
+
file_field = os.path.join(self.output_dir, file_name)
|
|
1542
|
+
data.append(self._load_field_scalar(file_field))
|
|
1543
|
+
data = np.array(data)
|
|
1544
|
+
|
|
1545
|
+
return fargopy.Field(
|
|
1546
|
+
data=np.array(data),
|
|
1547
|
+
coordinates=self.vars.COORDINATES,
|
|
1548
|
+
domains=self.domains,
|
|
1549
|
+
type=field_type,
|
|
1550
|
+
)
|
|
1551
|
+
|
|
1552
|
+
def load_allfields(self, fluid, snapshot=None, type="scalar"):
|
|
1553
|
+
"""
|
|
1554
|
+
Load all fields in the output directory for a given fluid.
|
|
1555
|
+
|
|
1556
|
+
Parameters
|
|
1557
|
+
----------
|
|
1558
|
+
fluid : str
|
|
1559
|
+
Name of the fluid (e.g., 'gas').
|
|
1560
|
+
snapshot : int, optional
|
|
1561
|
+
Snapshot index to load. If None, loads all snapshots.
|
|
1562
|
+
type : str, optional
|
|
1563
|
+
Field type ('scalar' or 'vector').
|
|
1564
|
+
|
|
1565
|
+
Returns
|
|
1566
|
+
-------
|
|
1567
|
+
fargopy.Dictobj
|
|
1568
|
+
Object containing all loaded fields.
|
|
1074
1569
|
"""
|
|
1075
1570
|
qall = False
|
|
1076
1571
|
if snapshot is None:
|
|
@@ -1078,14 +1573,16 @@ class Simulation(fargopy.Fargobj):
|
|
|
1078
1573
|
fields = fargopy.Dictobj()
|
|
1079
1574
|
else:
|
|
1080
1575
|
fields = fargopy.Dictobj()
|
|
1081
|
-
|
|
1576
|
+
|
|
1082
1577
|
# Search for field files
|
|
1083
|
-
pattern =
|
|
1084
|
-
|
|
1578
|
+
pattern = os.path.join(self.output_dir, f"{fluid}*.dat")
|
|
1579
|
+
import glob
|
|
1580
|
+
|
|
1581
|
+
files_found = sorted(glob.glob(pattern))
|
|
1085
1582
|
|
|
1086
|
-
if
|
|
1583
|
+
if files_found:
|
|
1087
1584
|
size = 0
|
|
1088
|
-
for file_field in
|
|
1585
|
+
for file_field in files_found:
|
|
1089
1586
|
comps = Simulation._parse_file_field(file_field)
|
|
1090
1587
|
if comps:
|
|
1091
1588
|
if qall:
|
|
@@ -1093,16 +1590,16 @@ class Simulation(fargopy.Fargobj):
|
|
|
1093
1590
|
field_name = comps[0]
|
|
1094
1591
|
field_snap = int(comps[1])
|
|
1095
1592
|
|
|
1096
|
-
if type ==
|
|
1593
|
+
if type == "scalar":
|
|
1097
1594
|
field_data = self._load_field_scalar(file_field)
|
|
1098
|
-
elif type ==
|
|
1595
|
+
elif type == "vector":
|
|
1099
1596
|
field_data = []
|
|
1100
|
-
variables = [
|
|
1597
|
+
variables = ["x", "y"]
|
|
1101
1598
|
if self.vars.DIM == 3:
|
|
1102
|
-
variables += [
|
|
1103
|
-
for i,variable in enumerate(variables):
|
|
1599
|
+
variables += ["z"]
|
|
1600
|
+
for i, variable in enumerate(variables):
|
|
1104
1601
|
file_name = f"{fluid}{variable}{str(field_snap)}.dat"
|
|
1105
|
-
file_field =
|
|
1602
|
+
file_field = os.path.join(self.output_dir, file_name)
|
|
1106
1603
|
field_data += [self._load_field_scalar(file_field)]
|
|
1107
1604
|
field_data = np.array(field_data)
|
|
1108
1605
|
field_name = field_name[:-1]
|
|
@@ -1110,52 +1607,97 @@ class Simulation(fargopy.Fargobj):
|
|
|
1110
1607
|
if str(field_snap) not in fields.keys():
|
|
1111
1608
|
fields.__dict__[str(field_snap)] = fargopy.Dictobj()
|
|
1112
1609
|
size += field_data.nbytes
|
|
1113
|
-
(fields.__dict__[str(field_snap)]).__dict__[f"{field_name}"] =
|
|
1610
|
+
(fields.__dict__[str(field_snap)]).__dict__[f"{field_name}"] = (
|
|
1611
|
+
fargopy.Field(
|
|
1612
|
+
data=field_data,
|
|
1613
|
+
coordinates=self.vars.COORDINATES,
|
|
1614
|
+
domains=self.domains,
|
|
1615
|
+
type=type,
|
|
1616
|
+
)
|
|
1617
|
+
)
|
|
1114
1618
|
|
|
1115
1619
|
else:
|
|
1116
1620
|
# Store a specific snapshot
|
|
1117
1621
|
if int(comps[1]) == snapshot:
|
|
1118
1622
|
field_name = comps[0]
|
|
1119
1623
|
|
|
1120
|
-
if type ==
|
|
1624
|
+
if type == "scalar":
|
|
1121
1625
|
field_data = self._load_field_scalar(file_field)
|
|
1122
|
-
elif type ==
|
|
1626
|
+
elif type == "vector":
|
|
1123
1627
|
field_data = []
|
|
1124
|
-
variables = [
|
|
1628
|
+
variables = ["x", "y"]
|
|
1125
1629
|
if self.vars.DIM == 3:
|
|
1126
|
-
variables += [
|
|
1127
|
-
for i,variable in enumerate(variables):
|
|
1128
|
-
file_name =
|
|
1129
|
-
|
|
1630
|
+
variables += ["z"]
|
|
1631
|
+
for i, variable in enumerate(variables):
|
|
1632
|
+
file_name = (
|
|
1633
|
+
f"{fluid}{variable}{str(field_snap)}.dat"
|
|
1634
|
+
)
|
|
1635
|
+
file_field = os.path.join(
|
|
1636
|
+
self.output_dir, file_name
|
|
1637
|
+
)
|
|
1130
1638
|
field_data += [self._load_field_scalar(file_field)]
|
|
1131
1639
|
field_data = np.array(field_data)
|
|
1132
1640
|
field_name = field_name[:-1]
|
|
1133
1641
|
|
|
1134
1642
|
size += field_data.nbytes
|
|
1135
|
-
fields.__dict__[f"{field_name}"] = fargopy.Field(
|
|
1643
|
+
fields.__dict__[f"{field_name}"] = fargopy.Field(
|
|
1644
|
+
data=field_data,
|
|
1645
|
+
coordinates=self.vars.COORDINATES,
|
|
1646
|
+
domains=self.domains,
|
|
1647
|
+
type=type,
|
|
1648
|
+
)
|
|
1136
1649
|
|
|
1137
1650
|
else:
|
|
1138
|
-
raise ValueError(
|
|
1139
|
-
|
|
1651
|
+
raise ValueError(
|
|
1652
|
+
f"No field found with pattern '{pattern}'. Change the fluid"
|
|
1653
|
+
)
|
|
1654
|
+
|
|
1140
1655
|
if qall:
|
|
1141
|
-
fields.snapshots = sorted([int(s) for s in fields.keys() if s !=
|
|
1142
|
-
fields.size = size/1024**2
|
|
1656
|
+
fields.snapshots = sorted([int(s) for s in fields.keys() if s != "size"])
|
|
1657
|
+
fields.size = size / 1024**2
|
|
1143
1658
|
return fields
|
|
1144
1659
|
|
|
1145
1660
|
@staticmethod
|
|
1146
1661
|
def _parse_file_field(file_field):
|
|
1662
|
+
"""
|
|
1663
|
+
Parse a field filename to extract the field name and snapshot number.
|
|
1664
|
+
|
|
1665
|
+
Parameters
|
|
1666
|
+
----------
|
|
1667
|
+
file_field : str
|
|
1668
|
+
Filename of the field.
|
|
1669
|
+
|
|
1670
|
+
Returns
|
|
1671
|
+
-------
|
|
1672
|
+
list or None
|
|
1673
|
+
List with field name and snapshot number, or None if not matched.
|
|
1674
|
+
"""
|
|
1147
1675
|
basename = os.path.basename(file_field)
|
|
1148
1676
|
comps = None
|
|
1149
|
-
match = re.match(
|
|
1677
|
+
match = re.match("([a-zA-Z]+)(\d+).dat", basename)
|
|
1150
1678
|
if match is not None:
|
|
1151
|
-
comps = [match.group(i) for i in range(1,match.lastindex+1)]
|
|
1679
|
+
comps = [match.group(i) for i in range(1, match.lastindex + 1)]
|
|
1152
1680
|
return comps
|
|
1153
1681
|
|
|
1154
1682
|
def __repr__(self):
|
|
1683
|
+
"""
|
|
1684
|
+
String representation of the Simulation object.
|
|
1685
|
+
|
|
1686
|
+
Returns
|
|
1687
|
+
-------
|
|
1688
|
+
str
|
|
1689
|
+
"""
|
|
1155
1690
|
repr = f"""FARGOPY simulation (fargo3d_dir = '{self.fargo3d_dir}', setup = '{self.setup}')"""
|
|
1156
1691
|
return repr
|
|
1157
1692
|
|
|
1158
1693
|
def __str__(self):
|
|
1694
|
+
"""
|
|
1695
|
+
Detailed string with simulation information.
|
|
1696
|
+
|
|
1697
|
+
Returns
|
|
1698
|
+
-------
|
|
1699
|
+
str
|
|
1700
|
+
"""
|
|
1159
1701
|
str = f"""Simulation information:
|
|
1160
1702
|
FARGO3D directory: {self.fargo3d_dir}
|
|
1161
1703
|
Outputs: {self.outputs_dir}
|
|
@@ -1178,74 +1720,503 @@ class Simulation(fargopy.Fargobj):
|
|
|
1178
1720
|
# ##########################################################################
|
|
1179
1721
|
@staticmethod
|
|
1180
1722
|
def list_setups():
|
|
1181
|
-
"""List setups available in the FARGO3D directory
|
|
1182
1723
|
"""
|
|
1183
|
-
|
|
1184
|
-
|
|
1185
|
-
|
|
1186
|
-
|
|
1187
|
-
|
|
1188
|
-
|
|
1724
|
+
Print all valid setup directories detected under ``FP_FARGO3D_DIR``.
|
|
1725
|
+
|
|
1726
|
+
Returns
|
|
1727
|
+
-------
|
|
1728
|
+
None
|
|
1729
|
+
The setups are printed to stdout; nothing is returned.
|
|
1730
|
+
"""
|
|
1731
|
+
import glob
|
|
1732
|
+
|
|
1733
|
+
pattern = os.path.join(fargopy.Conf.FP_FARGO3D_DIR, "setups", "*")
|
|
1734
|
+
output = sorted(glob.glob(pattern))
|
|
1735
|
+
list_str = ""
|
|
1736
|
+
for setup_dir in output:
|
|
1737
|
+
setup_dir = setup_dir.replace("//", "/")
|
|
1738
|
+
setup_name = setup_dir.split("/")[-1]
|
|
1739
|
+
setup_par = os.path.join(setup_dir, f"{setup_name}.par")
|
|
1189
1740
|
if os.path.isfile(setup_par):
|
|
1190
|
-
|
|
1191
|
-
print(
|
|
1192
|
-
|
|
1741
|
+
list_str += f"Setup '{setup_name}' in '{setup_dir}'\n"
|
|
1742
|
+
print(list_str)
|
|
1743
|
+
|
|
1193
1744
|
@staticmethod
|
|
1194
1745
|
def list_precomputed():
|
|
1195
|
-
"""List the available precomputed simulations
|
|
1196
1746
|
"""
|
|
1197
|
-
|
|
1198
|
-
print(f"{key}:\n\tDescription: {item['description']}\n\tSize: {item['size']} MB")
|
|
1199
|
-
|
|
1200
|
-
@staticmethod
|
|
1201
|
-
def download_precomputed(setup=None,download_dir='/tmp',quiet=False,clean=True):
|
|
1202
|
-
"""Download a precomputed output from Google Drive FARGOpy public repository.
|
|
1203
|
-
|
|
1204
|
-
Args:
|
|
1205
|
-
setup: string, default = None:
|
|
1206
|
-
Name of the setup. For a list see fargopu.PRECOMPUTED_SIMULATIONS dictionary.
|
|
1747
|
+
Display the catalog of downloadable precomputed simulations with descriptions and sizes.
|
|
1207
1748
|
|
|
1208
|
-
|
|
1209
|
-
|
|
1210
|
-
|
|
1211
|
-
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
|
|
1215
|
-
|
|
1216
|
-
|
|
1217
|
-
|
|
1218
|
-
Return:
|
|
1219
|
-
If successful returns the output directory.
|
|
1749
|
+
Returns
|
|
1750
|
+
-------
|
|
1751
|
+
None
|
|
1752
|
+
The listing is printed to stdout.
|
|
1753
|
+
"""
|
|
1754
|
+
for key, item in PRECOMPUTED_SIMULATIONS.items():
|
|
1755
|
+
print(
|
|
1756
|
+
f"{key}:\n\tDescription: {item['description']}\n\tSize: {item['size']} MB"
|
|
1757
|
+
)
|
|
1220
1758
|
|
|
1759
|
+
@staticmethod
|
|
1760
|
+
def download_precomputed(setup=None, download_dir=None, quiet=False, clean=True):
|
|
1761
|
+
"""
|
|
1762
|
+
Download and extract a precomputed output archive from the public repository.
|
|
1763
|
+
|
|
1764
|
+
Parameters
|
|
1765
|
+
----------
|
|
1766
|
+
setup : str, optional
|
|
1767
|
+
Name of the entry in ``PRECOMPUTED_SIMULATIONS``.
|
|
1768
|
+
download_dir : str, optional
|
|
1769
|
+
Destination directory for the compressed file and extracted output.
|
|
1770
|
+
quiet : bool, optional
|
|
1771
|
+
When True, suppress download progress indicators.
|
|
1772
|
+
clean : bool, optional
|
|
1773
|
+
Remove the downloaded archive after extraction when set to True.
|
|
1774
|
+
|
|
1775
|
+
Returns
|
|
1776
|
+
-------
|
|
1777
|
+
str
|
|
1778
|
+
Absolute path to the extracted output directory, or an empty string on failure.
|
|
1221
1779
|
"""
|
|
1222
1780
|
if setup is None:
|
|
1223
|
-
print(
|
|
1224
|
-
|
|
1781
|
+
print(
|
|
1782
|
+
f"You must provide a setup name. Available setups: {list(PRECOMPUTED_SIMULATIONS.keys())}"
|
|
1783
|
+
)
|
|
1784
|
+
return ""
|
|
1785
|
+
|
|
1786
|
+
# Set default download directory based on OS
|
|
1787
|
+
if download_dir is None:
|
|
1788
|
+
if os.name == "nt": # Windows
|
|
1789
|
+
download_dir = os.path.join(
|
|
1790
|
+
os.environ.get("TEMP", "C:\\temp"), "fargopy_data"
|
|
1791
|
+
)
|
|
1792
|
+
else: # Unix-like systems
|
|
1793
|
+
download_dir = "/tmp"
|
|
1794
|
+
# Create directory if it doesn't exist
|
|
1795
|
+
os.makedirs(download_dir, exist_ok=True)
|
|
1796
|
+
|
|
1225
1797
|
if not os.path.isdir(download_dir):
|
|
1226
1798
|
print(f"Download directory '{download_dir}' does not exist.")
|
|
1227
|
-
return
|
|
1799
|
+
return ""
|
|
1228
1800
|
if setup not in PRECOMPUTED_SIMULATIONS.keys():
|
|
1229
|
-
print(
|
|
1230
|
-
|
|
1231
|
-
|
|
1232
|
-
|
|
1801
|
+
print(
|
|
1802
|
+
f"Precomputed setup '{setup}' is not among the available setups: {list(PRECOMPUTED_SIMULATIONS.keys())}"
|
|
1803
|
+
)
|
|
1804
|
+
return ""
|
|
1805
|
+
|
|
1806
|
+
output_dir = os.path.join(download_dir, setup)
|
|
1233
1807
|
if os.path.isdir(output_dir):
|
|
1234
1808
|
print(f"Precomputed output directory '{output_dir}' already exist")
|
|
1235
1809
|
return output_dir
|
|
1236
1810
|
else:
|
|
1237
|
-
filename = setup +
|
|
1238
|
-
fileloc = download_dir
|
|
1811
|
+
filename = setup + ".tgz"
|
|
1812
|
+
fileloc = os.path.join(download_dir, filename)
|
|
1239
1813
|
if os.path.isfile(fileloc):
|
|
1240
1814
|
print(f"Precomputed file '{fileloc}' already downloaded")
|
|
1241
1815
|
else:
|
|
1242
1816
|
# Download the setups
|
|
1243
|
-
print(
|
|
1244
|
-
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1249
|
-
|
|
1250
|
-
|
|
1817
|
+
print(
|
|
1818
|
+
f"Downloading {filename} from cloud (compressed size around {PRECOMPUTED_SIMULATIONS[setup]['size']} MB) into {download_dir}"
|
|
1819
|
+
)
|
|
1820
|
+
url = PRECOMPUTED_BASEURL + PRECOMPUTED_SIMULATIONS[setup]["id"]
|
|
1821
|
+
gdown.download(url, fileloc, quiet=quiet)
|
|
1822
|
+
|
|
1823
|
+
# Uncompress the setups - Windows compatible
|
|
1824
|
+
print(f"Uncompressing {filename} into {output_dir}")
|
|
1825
|
+
try:
|
|
1826
|
+
import tarfile
|
|
1827
|
+
|
|
1828
|
+
with tarfile.open(fileloc, "r:gz") as tar:
|
|
1829
|
+
tar.extractall(path=download_dir)
|
|
1830
|
+
print(f"Done.")
|
|
1831
|
+
|
|
1832
|
+
# Clean up the tar file if requested
|
|
1833
|
+
if clean:
|
|
1834
|
+
os.remove(fileloc)
|
|
1835
|
+
|
|
1836
|
+
except Exception as e:
|
|
1837
|
+
print(f"Error uncompressing file: {e}")
|
|
1838
|
+
# Fallback to system command for Unix-like systems
|
|
1839
|
+
if os.name != "nt":
|
|
1840
|
+
fargopy.Sys.simple(f"cd {download_dir};tar zxf {filename}")
|
|
1841
|
+
if clean:
|
|
1842
|
+
fargopy.Sys.simple(f"cd {download_dir};rm -rf {filename}")
|
|
1843
|
+
else:
|
|
1844
|
+
print(
|
|
1845
|
+
"Failed to decompress on Windows. Please install tar or 7-zip."
|
|
1846
|
+
)
|
|
1847
|
+
return ""
|
|
1848
|
+
|
|
1849
|
+
return output_dir
|
|
1850
|
+
|
|
1851
|
+
def time_scale(self, scale="orbits"):
|
|
1852
|
+
"""
|
|
1853
|
+
Calculates the time scale of the simulation in different units.
|
|
1854
|
+
|
|
1855
|
+
Parameters
|
|
1856
|
+
----------
|
|
1857
|
+
scale : str, optional
|
|
1858
|
+
'orbits' to calculate the number of orbits completed by the planet,
|
|
1859
|
+
'duration' for the total simulation time in simulation units.
|
|
1860
|
+
|
|
1861
|
+
Returns
|
|
1862
|
+
-------
|
|
1863
|
+
float
|
|
1864
|
+
Number of orbits or total simulation time, depending on scale.
|
|
1865
|
+
"""
|
|
1866
|
+
import contextlib
|
|
1867
|
+
import io
|
|
1868
|
+
import numpy as np
|
|
1869
|
+
|
|
1870
|
+
with contextlib.redirect_stdout(io.StringIO()):
|
|
1871
|
+
# Load necessary parameters
|
|
1872
|
+
self.load_macros()
|
|
1873
|
+
self.load_planet_summary()
|
|
1874
|
+
|
|
1875
|
+
# Extract parameters from macros and planet summary
|
|
1876
|
+
G = self.G # Gravitational constant in simulation units
|
|
1877
|
+
M_star = self.simulation_macros["MSTAR"] # Stellar mass in simulation units
|
|
1878
|
+
planet = self.planets[0] # Assume the first planet for calculations
|
|
1879
|
+
a = planet["distance"] # Orbital radius in simulation units
|
|
1880
|
+
|
|
1881
|
+
# Calculate orbital period (T) using Kepler's third law
|
|
1882
|
+
T = 2 * np.pi * np.sqrt(a**3 / (G * M_star))
|
|
1883
|
+
|
|
1884
|
+
# Extract simulation parameters
|
|
1885
|
+
NINTERM = self._load_variables("variables.par").NINTERM
|
|
1886
|
+
DT = self._load_variables("variables.par").DT
|
|
1887
|
+
NTOT = self._load_variables("variables.par").NTOT
|
|
1888
|
+
|
|
1889
|
+
# Calculate total simulation time
|
|
1890
|
+
total_time = NTOT * DT # Total time in simulation units
|
|
1891
|
+
|
|
1892
|
+
if scale == "orbits":
|
|
1893
|
+
# Calculate the number of orbits completed by the planet
|
|
1894
|
+
orbits_num = total_time / T
|
|
1895
|
+
return orbits_num
|
|
1251
1896
|
|
|
1897
|
+
elif scale == "duration":
|
|
1898
|
+
# Return the total simulation time in simulation units
|
|
1899
|
+
return total_time
|
|
1900
|
+
|
|
1901
|
+
else:
|
|
1902
|
+
raise ValueError("Invalid scale. Choose either 'orbits' or 'duration'.")
|
|
1903
|
+
|
|
1904
|
+
def to_paraview(self, snapshot=0, dir=".", basename=None):
|
|
1905
|
+
"""
|
|
1906
|
+
Export a snapshot to a VTU (UnstructuredGrid) file with physical XYZ coordinates
|
|
1907
|
+
and point data arrays for density and Cartesian velocity.
|
|
1908
|
+
|
|
1909
|
+
Parameters
|
|
1910
|
+
----------
|
|
1911
|
+
snapshot : int
|
|
1912
|
+
Snapshot index to export (default: 0).
|
|
1913
|
+
dir : str
|
|
1914
|
+
Output directory where the .vtu file will be written (default: current directory).
|
|
1915
|
+
basename : str or None
|
|
1916
|
+
Base name for the output file (without extension). If None, a default name
|
|
1917
|
+
using the simulation setup and snapshot is created.
|
|
1918
|
+
|
|
1919
|
+
Notes
|
|
1920
|
+
-----
|
|
1921
|
+
- Requires the 'vtk' Python package (and vtk.util.numpy_support).
|
|
1922
|
+
- Expects self.load_field('gasdens') and self.load_field('gasv') to return
|
|
1923
|
+
fargopy.Field-like objects where .data is a numpy array with shapes:
|
|
1924
|
+
rho: (nt, nr, nphi)
|
|
1925
|
+
vel: either (3, nt, nr, nphi) or (nt, nr, nphi, 3)
|
|
1926
|
+
- Domains expected in self.domains as theta, r, phi arrays.
|
|
1927
|
+
- Output is a single .vtu unstructured grid with VTK_VOXEL cells and point data arrays:
|
|
1928
|
+
- "rho" (scalar)
|
|
1929
|
+
- "vel_cart" (3-component vector)
|
|
1930
|
+
"""
|
|
1931
|
+
import os
|
|
1932
|
+
import numpy as np
|
|
1933
|
+
|
|
1934
|
+
try:
|
|
1935
|
+
import vtk
|
|
1936
|
+
from vtk.util import numpy_support as ns
|
|
1937
|
+
except Exception as e:
|
|
1938
|
+
raise RuntimeError(
|
|
1939
|
+
"VTK is required for to_paraview. Install the 'vtk' package."
|
|
1940
|
+
) from e
|
|
1941
|
+
|
|
1942
|
+
# prepare output path
|
|
1943
|
+
os.makedirs(dir, exist_ok=True)
|
|
1944
|
+
if basename is None:
|
|
1945
|
+
base = getattr(self, "setup", "simulation")
|
|
1946
|
+
basename = f"{base}_snap{snapshot}"
|
|
1947
|
+
|
|
1948
|
+
filename = os.path.join(dir, basename)
|
|
1949
|
+
|
|
1950
|
+
# Load fields (tolerant to different return types)
|
|
1951
|
+
gasdens = self.load_field(
|
|
1952
|
+
fields="gasdens", snapshot=snapshot, interpolate=False
|
|
1953
|
+
)
|
|
1954
|
+
gasv = self.load_field(fields="gasv", snapshot=snapshot, interpolate=False)
|
|
1955
|
+
|
|
1956
|
+
rho = np.log10(
|
|
1957
|
+
getattr(gasdens, "data", gasdens) * self.URHO
|
|
1958
|
+
) # convert to physical units and log10
|
|
1959
|
+
vel = getattr(gasv, "data", gasv) * self.UL / self.UT * 1e-5
|
|
1960
|
+
|
|
1961
|
+
# Normalize shapes
|
|
1962
|
+
if rho.ndim == 4 and rho.shape[0] == 1:
|
|
1963
|
+
rho = rho[0]
|
|
1964
|
+
if rho.ndim != 3:
|
|
1965
|
+
raise ValueError(
|
|
1966
|
+
f"Unexpected rho shape: {rho.shape}. Expected (nt,nr,nphi)."
|
|
1967
|
+
)
|
|
1968
|
+
|
|
1969
|
+
# velocity can be (3,nt,nr,nphi) or (nt,nr,nphi,3)
|
|
1970
|
+
if vel.ndim == 4 and vel.shape[0] == 3:
|
|
1971
|
+
vel_components = vel
|
|
1972
|
+
elif vel.ndim == 4 and vel.shape[-1] == 3:
|
|
1973
|
+
vel_components = np.moveaxis(vel, -1, 0)
|
|
1974
|
+
else:
|
|
1975
|
+
raise ValueError(
|
|
1976
|
+
f"Unexpected vel shape: {vel.shape}. Expected (3,nt,nr,nphi) or (nt,nr,nphi,3)."
|
|
1977
|
+
)
|
|
1978
|
+
|
|
1979
|
+
# Get spherical coordinate grids from self.domains
|
|
1980
|
+
try:
|
|
1981
|
+
theta = self.domains.theta
|
|
1982
|
+
r = self.domains.r * self.UL / self.AU
|
|
1983
|
+
phi = self.domains.phi
|
|
1984
|
+
except Exception:
|
|
1985
|
+
# If domain attribute names differ, try common alternatives
|
|
1986
|
+
try:
|
|
1987
|
+
theta = self.domains.theta
|
|
1988
|
+
r = self.domains.r * self.UL / self.AU
|
|
1989
|
+
phi = self.domains.phi
|
|
1990
|
+
except Exception as e:
|
|
1991
|
+
raise RuntimeError(
|
|
1992
|
+
"Could not find theta, r, phi in self.domains"
|
|
1993
|
+
) from e
|
|
1994
|
+
|
|
1995
|
+
# Create meshgrid (vectorized)
|
|
1996
|
+
TT, RR, PP = np.meshgrid(theta, r, phi, indexing="ij") # shape (nt,nr,nphi)
|
|
1997
|
+
|
|
1998
|
+
# Coordinates in Cartesian
|
|
1999
|
+
X = (RR * np.sin(TT) * np.cos(PP)).ravel(order="C").astype(np.float64)
|
|
2000
|
+
Y = (RR * np.sin(TT) * np.sin(PP)).ravel(order="C").astype(np.float64)
|
|
2001
|
+
Z = (RR * np.cos(TT)).ravel(order="C").astype(np.float64)
|
|
2002
|
+
pts = np.column_stack([X, Y, Z])
|
|
2003
|
+
|
|
2004
|
+
# Cartesian velocity components (vectorized)
|
|
2005
|
+
v_theta = vel_components[0]
|
|
2006
|
+
v_r = vel_components[1]
|
|
2007
|
+
v_phi = vel_components[2]
|
|
2008
|
+
|
|
2009
|
+
v_x = (
|
|
2010
|
+
(
|
|
2011
|
+
v_r * np.sin(TT) * np.cos(PP)
|
|
2012
|
+
+ v_theta * np.cos(TT) * np.cos(PP)
|
|
2013
|
+
- v_phi * np.sin(PP)
|
|
2014
|
+
)
|
|
2015
|
+
.ravel(order="C")
|
|
2016
|
+
.astype(np.float64)
|
|
2017
|
+
)
|
|
2018
|
+
|
|
2019
|
+
v_y = (
|
|
2020
|
+
(
|
|
2021
|
+
v_r * np.sin(TT) * np.sin(PP)
|
|
2022
|
+
+ v_theta * np.cos(TT) * np.sin(PP)
|
|
2023
|
+
+ v_phi * np.cos(PP)
|
|
2024
|
+
)
|
|
2025
|
+
.ravel(order="C")
|
|
2026
|
+
.astype(np.float64)
|
|
2027
|
+
)
|
|
2028
|
+
|
|
2029
|
+
v_z = (
|
|
2030
|
+
(v_r * np.cos(TT) - v_theta * np.sin(TT))
|
|
2031
|
+
.ravel(order="C")
|
|
2032
|
+
.astype(np.float64)
|
|
2033
|
+
)
|
|
2034
|
+
|
|
2035
|
+
vel_cart = np.column_stack([v_x, v_y, v_z])
|
|
2036
|
+
|
|
2037
|
+
# Density flattened
|
|
2038
|
+
rho_flat = rho.ravel(order="C").astype(np.float64)
|
|
2039
|
+
|
|
2040
|
+
# VTK points
|
|
2041
|
+
vtk_points = vtk.vtkPoints()
|
|
2042
|
+
vtk_points.SetData(ns.numpy_to_vtk(pts, deep=True))
|
|
2043
|
+
|
|
2044
|
+
# Build VOXEL connectivity (vectorized)
|
|
2045
|
+
nt, nr, nphi = rho.shape
|
|
2046
|
+
ntm = nt - 1
|
|
2047
|
+
nrm = nr - 1
|
|
2048
|
+
npm = nphi - 1
|
|
2049
|
+
ncells = ntm * nrm * npm
|
|
2050
|
+
|
|
2051
|
+
it, ir_, ip = np.meshgrid(
|
|
2052
|
+
np.arange(ntm), np.arange(nrm), np.arange(npm), indexing="ij"
|
|
2053
|
+
)
|
|
2054
|
+
it = it.ravel()
|
|
2055
|
+
ir_ = ir_.ravel()
|
|
2056
|
+
ip = ip.ravel()
|
|
2057
|
+
base = it * nr * nphi + ir_ * nphi + ip
|
|
2058
|
+
|
|
2059
|
+
p000 = base
|
|
2060
|
+
p001 = base + 1
|
|
2061
|
+
p010 = base + nphi
|
|
2062
|
+
p011 = base + nphi + 1
|
|
2063
|
+
p100 = base + nr * nphi
|
|
2064
|
+
p101 = base + nr * nphi + 1
|
|
2065
|
+
p110 = base + nr * nphi + nphi
|
|
2066
|
+
p111 = base + nr * nphi + nphi + 1
|
|
2067
|
+
|
|
2068
|
+
cells = np.column_stack(
|
|
2069
|
+
[
|
|
2070
|
+
np.full(ncells, 8, dtype=np.int64),
|
|
2071
|
+
p000,
|
|
2072
|
+
p001,
|
|
2073
|
+
p010,
|
|
2074
|
+
p011,
|
|
2075
|
+
p100,
|
|
2076
|
+
p101,
|
|
2077
|
+
p110,
|
|
2078
|
+
p111,
|
|
2079
|
+
]
|
|
2080
|
+
).ravel()
|
|
2081
|
+
|
|
2082
|
+
vtk_cells = vtk.vtkCellArray()
|
|
2083
|
+
vtk_cells.SetCells(ncells, ns.numpy_to_vtkIdTypeArray(cells, deep=True))
|
|
2084
|
+
|
|
2085
|
+
# Unstructured grid
|
|
2086
|
+
grid = vtk.vtkUnstructuredGrid()
|
|
2087
|
+
grid.SetPoints(vtk_points)
|
|
2088
|
+
grid.SetCells(vtk.VTK_VOXEL, vtk_cells)
|
|
2089
|
+
|
|
2090
|
+
# Point data arrays
|
|
2091
|
+
vtk_rho = ns.numpy_to_vtk(rho_flat, deep=True)
|
|
2092
|
+
vtk_rho.SetName("rho")
|
|
2093
|
+
vtk_vel = ns.numpy_to_vtk(vel_cart, deep=True)
|
|
2094
|
+
vtk_vel.SetNumberOfComponents(3)
|
|
2095
|
+
vtk_vel.SetName("vel_cart")
|
|
2096
|
+
|
|
2097
|
+
grid.GetPointData().AddArray(vtk_rho)
|
|
2098
|
+
grid.GetPointData().AddArray(vtk_vel)
|
|
2099
|
+
|
|
2100
|
+
# Write VTU
|
|
2101
|
+
writer = vtk.vtkXMLUnstructuredGridWriter()
|
|
2102
|
+
writer.SetFileName(filename + ".vtu")
|
|
2103
|
+
writer.SetInputData(grid)
|
|
2104
|
+
if writer.Write() == 0:
|
|
2105
|
+
raise RuntimeError(f"Failed writing VTU file '{filename}.vtu'.")
|
|
2106
|
+
|
|
2107
|
+
return filename + ".vtu"
|
|
2108
|
+
|
|
2109
|
+
|
|
2110
|
+
class Planet:
|
|
2111
|
+
"""
|
|
2112
|
+
Represents a planet in the simulation, holding its physical state and properties.
|
|
2113
|
+
|
|
2114
|
+
Attributes
|
|
2115
|
+
----------
|
|
2116
|
+
name : str
|
|
2117
|
+
Name or label of the planet.
|
|
2118
|
+
mass : float
|
|
2119
|
+
Planet mass (in Msun or simulation units).
|
|
2120
|
+
pos : Planet.Vector
|
|
2121
|
+
Current cartesian position (x, y, z) in AU.
|
|
2122
|
+
vel : Planet.Vector
|
|
2123
|
+
Current cartesian velocity (vx, vy, vz) in AU/UT.
|
|
2124
|
+
posi : Planet.Vector
|
|
2125
|
+
Initial cartesian position (x, y, z) in AU.
|
|
2126
|
+
mstar : float
|
|
2127
|
+
Stellar mass (in Msun or simulation units).
|
|
2128
|
+
|
|
2129
|
+
Properties
|
|
2130
|
+
----------
|
|
2131
|
+
hill_radius : float
|
|
2132
|
+
The Hill radius of the planet (in AU), computed from its current position and mass.
|
|
2133
|
+
|
|
2134
|
+
Example
|
|
2135
|
+
-------
|
|
2136
|
+
>>> jupiter = planets[0]
|
|
2137
|
+
>>> print(jupiter.pos.x, jupiter.vel.y, jupiter.hill_radius)
|
|
2138
|
+
"""
|
|
2139
|
+
|
|
2140
|
+
def __init__(self, name, pos, vel, mass, posi, mstar):
|
|
2141
|
+
self.name = name
|
|
2142
|
+
self.mass = mass
|
|
2143
|
+
self.pos = Planet._Vector(*pos)
|
|
2144
|
+
self.vel = Planet._Vector(*vel)
|
|
2145
|
+
self.posi = Planet._Vector(*posi)
|
|
2146
|
+
self.mstar = mstar
|
|
2147
|
+
|
|
2148
|
+
class _Vector:
|
|
2149
|
+
"""
|
|
2150
|
+
Simple vector class for 3D coordinates, allowing attribute and index access.
|
|
2151
|
+
|
|
2152
|
+
Attributes
|
|
2153
|
+
----------
|
|
2154
|
+
x : float
|
|
2155
|
+
X coordinate.
|
|
2156
|
+
y : float
|
|
2157
|
+
Y coordinate.
|
|
2158
|
+
z : float
|
|
2159
|
+
Z coordinate.
|
|
2160
|
+
"""
|
|
2161
|
+
|
|
2162
|
+
def __init__(self, x, y, z):
|
|
2163
|
+
self.x = x
|
|
2164
|
+
self.y = y
|
|
2165
|
+
self.z = z
|
|
2166
|
+
|
|
2167
|
+
def __getitem__(self, idx):
|
|
2168
|
+
"""
|
|
2169
|
+
Return the coordinate at index ``idx`` (0→x, 1→y, 2→z).
|
|
2170
|
+
"""
|
|
2171
|
+
return [self.x, self.y, self.z][idx]
|
|
2172
|
+
|
|
2173
|
+
def __array__(self):
|
|
2174
|
+
"""
|
|
2175
|
+
Expose the vector as a NumPy array for downstream numerical routines.
|
|
2176
|
+
"""
|
|
2177
|
+
import numpy as np
|
|
2178
|
+
|
|
2179
|
+
return np.array([self.x, self.y, self.z])
|
|
2180
|
+
|
|
2181
|
+
def __repr__(self):
|
|
2182
|
+
"""
|
|
2183
|
+
Debug-friendly string showing the three Cartesian components.
|
|
2184
|
+
"""
|
|
2185
|
+
return f"[{self.x}, {self.y}, {self.z}]"
|
|
2186
|
+
|
|
2187
|
+
@property
|
|
2188
|
+
def hill_radius(self):
|
|
2189
|
+
"""
|
|
2190
|
+
.. :no-index:
|
|
2191
|
+
|
|
2192
|
+
Returns the Hill radius in AU, using the current position and mass.
|
|
2193
|
+
|
|
2194
|
+
Returns
|
|
2195
|
+
-------
|
|
2196
|
+
float
|
|
2197
|
+
Hill radius in AU.
|
|
2198
|
+
"""
|
|
2199
|
+
AU_to_cm = 1.495978707e13
|
|
2200
|
+
Mjup_to_g = 1.898e30
|
|
2201
|
+
Msun_to_g = 1.989e33
|
|
2202
|
+
|
|
2203
|
+
# Distance to star (AU)
|
|
2204
|
+
r_au = (self.pos.x**2 + self.pos.y**2 + self.pos.z**2) ** 0.5
|
|
2205
|
+
m_jup = self.mass * 1e3 # Msun to Mjup
|
|
2206
|
+
mstar_g = float(self.mstar) * Msun_to_g
|
|
2207
|
+
r_cm = r_au * AU_to_cm
|
|
2208
|
+
m_p = m_jup * Mjup_to_g
|
|
2209
|
+
|
|
2210
|
+
r_hill_cm = r_cm * (m_p / (3 * mstar_g)) ** (1 / 3)
|
|
2211
|
+
r_hill_au = r_hill_cm / AU_to_cm
|
|
2212
|
+
return r_hill_au
|
|
2213
|
+
|
|
2214
|
+
def __repr__(self):
|
|
2215
|
+
"""
|
|
2216
|
+
String representation of the Planet object.
|
|
2217
|
+
|
|
2218
|
+
Returns
|
|
2219
|
+
-------
|
|
2220
|
+
str
|
|
2221
|
+
"""
|
|
2222
|
+
return f"Planet(name={self.name}, mass={self.mass}, pos={self.pos}, vel={self.vel}, posi={self.posi})"
|