llg3d 1.4.0__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
llg3d/llg3d.py DELETED
@@ -1,742 +0,0 @@
1
- """
2
- Solver for the stochastic Landau-Lifshitz-Gilbert equation in 3D
3
- """
4
-
5
- import argparse
6
- import json
7
- import sys
8
- import time
9
- from dataclasses import dataclass
10
-
11
- import numpy as np
12
- from mpi4py import MPI
13
-
14
- comm = MPI.COMM_WORLD
15
- rank = comm.Get_rank()
16
- size = comm.Get_size()
17
- status = MPI.Status()
18
-
19
- # Parameters: default value and description
20
- parameters = {
21
- "element": ("Cobalt", "Chemical element of the sample"),
22
- "N": (5000, "Number of time iterations"), # default 5000
23
- "dt": (1.0e-14, "Time step"), # default 1.e-14
24
- "Jx": (300, "Number of points in x"),
25
- "Jy": (21, "Number of points in y"),
26
- "Jz": (21, "Number of points in z"),
27
- "dx": (1.0e-9, "Step in x"), # default 1.e-9
28
- "T": (0.0, "Temperature"),
29
- "H_ext": (
30
- 0.0 / (4 * np.pi * 1.0e-7),
31
- "External field",
32
- ), # must be constant, default 0.0
33
- "n_average": (4000, "Start index of time average"), # default 4000
34
- "n_integral": (1, "Spatial average frequency (number of iterations)"),
35
- "n_profile": (0, "x-profile save frequency (number of iterations)"),
36
- }
37
-
38
- json_file = "run.json"
39
-
40
-
41
- def progress_bar(rank: int, it, prefix="", size=60, out=sys.stdout):
42
- """
43
- Displays a progress bar
44
- (Source: https://stackoverflow.com/a/34482761/16593179)
45
- """
46
-
47
- count = len(it)
48
-
49
- def show(j):
50
- x = int(size * j / count)
51
- if rank == 0:
52
- print(
53
- f"{prefix}[{u'█'*x}{('.'*(size-x))}] {j}/{count}",
54
- end="\r",
55
- file=out,
56
- flush=True,
57
- )
58
-
59
- show(0)
60
- for i, item in enumerate(it):
61
- yield item
62
- # To avoid slowing down the computation, we do not display at every iteration
63
- if i % 5 == 0:
64
- show(i + 1)
65
- show(i + 1)
66
- if rank == 0:
67
- print("\n", flush=True, file=out)
68
-
69
-
70
- @dataclass
71
- class Grid:
72
- """Stores grid data"""
73
-
74
- # Parameter values correspond to the global grid
75
- Jx: int
76
- Jy: int
77
- Jz: int
78
- dx: float
79
-
80
- def __post_init__(self) -> None:
81
- """Compute grid characteristics"""
82
- self.dy = self.dz = self.dx # Setting dx = dy = dz
83
- self.Lx = (self.Jx - 1) * self.dx
84
- self.Ly = (self.Jy - 1) * self.dy
85
- self.Lz = (self.Jz - 1) * self.dz
86
- # shape of the local array to the process
87
- self.dims = self.Jx // size, self.Jy, self.Jz
88
- # elemental volume of a cell
89
- self.dV = self.dx * self.dy * self.dz
90
- # total volume
91
- self.V = self.Lx * self.Ly * self.Lz
92
- # total number of points
93
- self.ntot = self.Jx * self.Jy * self.Jz
94
- self.ncell = (self.Jx - 1) * (self.Jy - 1) * (self.Jz - 1)
95
-
96
- def __repr__(self):
97
- s = "\t" + "\t\t".join(("x", "y", "z")) + "\n"
98
- s += f"J =\t{self.Jx}\t\t{self.Jy}\t\t{self.Jz}\n"
99
- s += f"L =\t{self.Lx}\t\t{self.Ly}\t\t{self.Lz}\n"
100
- s += f"d =\t{self.dx:.08e}\t{self.dy:.08e}\t{self.dz:.08e}\n\n"
101
- s += f"dV = {self.dV:.08e}\n"
102
- s += f"V = {self.V:.08e}\n"
103
- s += f"ntot = {self.ntot:d}\n"
104
-
105
- return s
106
-
107
- def get_filename(
108
- self, T: float, name: str = "m1_integral_space", extension="txt"
109
- ) -> str:
110
- """
111
- Returns the output file name for a given temperature
112
-
113
- >>> g = Grid(Jx=300, Jy=21, Jz=21, dx=1.e-9)
114
- >>> g.get_filename(1100)
115
- 'm1_integral_space_T1100_300x21x21_np<MPI_size>.txt'
116
- """
117
- suffix = f"T{int(T)}_{self.Jx}x{self.Jy}x{self.Jz}_np{size}"
118
- return f"{name}_{suffix}.{extension}"
119
-
120
- def get_mesh(self, loc: bool = True) -> list:
121
- """
122
- Returns a list of 3D arrays with the coordinates
123
- of the grid points
124
-
125
- Args:
126
- loc: if True, returns the local coordinates,
127
- otherwise the global coordinates
128
- """
129
- xglob = np.linspace(0, self.Lx, self.Jx) # global coordinates
130
- if loc:
131
- xloc = np.split(xglob, size)[rank] # local coordinates
132
- x = xloc
133
- else:
134
- x = xglob
135
- return np.meshgrid(
136
- x,
137
- np.linspace(0, self.Ly, self.Jy),
138
- np.linspace(0, self.Lz, self.Jz),
139
- indexing="ij",
140
- )
141
-
142
-
143
- class Element:
144
- """Abstract class for chemical elements"""
145
-
146
- A = 0.0
147
- K = 0.0
148
- gamma = 0.0
149
- mu_0 = 0.0
150
- k_B = 0.0
151
- lambda_G = 0.0
152
- M_s = 0.0
153
- a_eff = 0.0
154
-
155
- def __init__(self, T: float, H_ext: float, g: Grid, dt: float) -> None:
156
- self.H_ext = H_ext
157
- self.g = g
158
- self.dt = dt
159
- self.gamma_0 = self.gamma * self.mu_0
160
- self.d0 = np.sqrt(self.A / self.K) # only if K is positive
161
-
162
- # --- Characteristic Scales ---
163
- self.coeff_1 = self.gamma_0 * 2.0 * self.A / (self.mu_0 * self.M_s)
164
- self.coeff_2 = self.gamma_0 * 2.0 * self.K / (self.mu_0 * self.M_s)
165
- self.coeff_3 = self.gamma_0 * H_ext
166
-
167
- # corresponds to the temperature actually put into the random field
168
- T_simu = T * self.g.dx / self.a_eff
169
- # calculation of the random field related to temperature
170
- # (we only take the volume over one mesh)
171
- h_alea = np.sqrt(
172
- 2
173
- * self.lambda_G
174
- * self.k_B
175
- / (self.gamma_0 * self.mu_0 * self.M_s * self.g.dV)
176
- )
177
- H_alea = h_alea * np.sqrt(T_simu) * np.sqrt(1.0 / self.dt)
178
- self.coeff_4 = H_alea * self.gamma_0
179
-
180
- def get_CFL(self):
181
- return self.dt * self.coeff_1 / self.g.dx**2
182
-
183
-
184
- class Cobalt(Element):
185
- A = 30.0e-12
186
- K = 520.0e3
187
- gamma = 1.76e11
188
- mu_0 = 1.26e-6
189
- k_B = 1.38e-23
190
- lambda_G = 0.5 # 0.5 par defaut
191
- M_s = 1400.0e3
192
- a_eff = 0.25e-9
193
- anisotropy = "uniaxial"
194
-
195
-
196
- class Iron(Element):
197
- A = 21.0e-12
198
- K = 48.0e3
199
- gamma = 1.76e11
200
- mu_0 = 1.26e-6
201
- gamma_0 = gamma * mu_0
202
- k_B = 1.38e-23
203
- lambda_G = 0.5 # 0.5 par defaut
204
- M_s = 1700.0e3
205
- a_eff = 0.286e-9
206
- anisotropy = "cubic"
207
-
208
-
209
- class Nickel(Element):
210
- A = 9.0e-12
211
- K = -5.7e3
212
- gamma = 1.76e11
213
- mu_0 = 1.26e-6
214
- gamma_0 = gamma * mu_0
215
- k_B = 1.38e-23
216
- lambda_G = 0.5 # 0.5 par defaut
217
- M_s = 490.0e3
218
- a_eff = 0.345e-9
219
- anisotropy = "cubic"
220
-
221
-
222
- def get_boundaries_x(
223
- g: Grid, m, blocking: bool = False
224
- ) -> tuple[np.ndarray, np.ndarray, MPI.Request, MPI.Request]:
225
- """
226
- Returns the boundaries asynchronously:
227
- allows overlapping communication time of boundaries
228
- with calculations
229
- """
230
-
231
- # Extract slices for Neumann boundary conditions
232
-
233
- m_start_x = np.empty((1, g.Jy, g.Jz))
234
- m_end_x = np.empty_like(m_start_x)
235
-
236
- # Prepare ring communication:
237
- # Even if procs 0 and size - 1 shouldn't receive anything from left
238
- # and right respectively, it's simpler to express it like this
239
- right = (rank + 1) % size
240
- left = (rank - 1 + size) % size
241
-
242
- if blocking:
243
- # Wait for boundaries to be available
244
- comm.Sendrecv(m[:1, :, :], dest=left, sendtag=0, recvbuf=m_end_x, source=right)
245
- comm.Sendrecv(
246
- m[-1:, :, :], dest=right, sendtag=1, recvbuf=m_start_x, source=left
247
- )
248
- return m_start_x, m_end_x, None, None
249
- else:
250
- request_start = comm.Irecv(m_start_x, source=left, tag=201)
251
- request_end = comm.Irecv(m_end_x, source=right, tag=202)
252
- comm.Isend(m[-1:, :, :], dest=right, tag=201)
253
- comm.Isend(m[:1, :, :], dest=left, tag=202)
254
-
255
- return m_start_x, m_end_x, request_start, request_end
256
-
257
-
258
- def calculate_laplacian(
259
- e: Element,
260
- g: Grid,
261
- m: np.ndarray,
262
- m_start_x: np.ndarray,
263
- m_end_x: np.ndarray,
264
- request_end: MPI.Request,
265
- request_start: MPI.Request,
266
- ) -> np.ndarray:
267
- """
268
- Returns the Laplacian of m (* coeff_1) in 3D.
269
- We start by calculating contributions in y and z, to wait
270
- for the end of communications in x.
271
- """
272
-
273
- # Extract slices for Neumann boundary conditions
274
- m_start_y = m[:, 1:2, :]
275
- m_end_y = m[:, -2:-1, :]
276
-
277
- m_start_z = m[:, :, 1:2]
278
- m_end_z = m[:, :, -2:-1]
279
-
280
- m_y_plus = np.concatenate((m[:, 1:, :], m_end_y), axis=1)
281
- m_y_minus = np.concatenate((m_start_y, m[:, :-1, :]), axis=1)
282
- m_z_plus = np.concatenate((m[:, :, 1:], m_end_z), axis=2)
283
- m_z_minus = np.concatenate((m_start_z, m[:, :, :-1]), axis=2)
284
-
285
- laplacian = (
286
- (m_y_plus + m_y_minus) / g.dy**2
287
- + (m_z_plus + m_z_minus) / g.dz**2
288
- - 2 * (1 / g.dx**2 + 1 / g.dy**2 + 1 / g.dz**2) * m
289
- )
290
-
291
- # Wait for x-boundaries to be available (communications completed)
292
- try:
293
- request_end.Wait(status)
294
- request_start.Wait(status)
295
- except AttributeError:
296
- pass
297
-
298
- # For extreme procs, apply Neumann boundary conditions in x
299
- if rank == size - 1:
300
- m_end_x = m[-2:-1, :, :]
301
- if rank == 0:
302
- m_start_x = m[1:2, :, :]
303
-
304
- m_x_plus = np.concatenate((m[1:, :, :], m_end_x), axis=0)
305
- m_x_minus = np.concatenate((m_start_x, m[:-1, :, :]), axis=0)
306
-
307
- laplacian += (m_x_plus + m_x_minus) / g.dx**2
308
-
309
- return e.coeff_1 * laplacian
310
-
311
-
312
- def calculate_si(
313
- e: Element,
314
- g: Grid,
315
- m1: np.ndarray,
316
- m2: np.ndarray,
317
- m3: np.ndarray,
318
- R_alea: np.ndarray,
319
- boundaries,
320
- ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
321
- """Returns the s_i = a_i + b_i"""
322
-
323
- # Precalculate terms used multiple times
324
-
325
- l_G_m1m2 = e.lambda_G * m1 * m2
326
- l_G_m1m3 = e.lambda_G * m1 * m3
327
- l_G_m2m3 = e.lambda_G * m2 * m3
328
-
329
- m1m1 = m1 * m1
330
- m2m2 = m2 * m2
331
- m3m3 = m3 * m3
332
-
333
- laplacian_m1 = calculate_laplacian(e, g, m1, *boundaries[0])
334
- laplacian_m2 = calculate_laplacian(e, g, m2, *boundaries[1])
335
- laplacian_m3 = calculate_laplacian(e, g, m3, *boundaries[2])
336
-
337
- if e.anisotropy == "uniaxial":
338
- aniso_1 = m1
339
- aniso_2 = np.zeros(np.shape(m1))
340
- aniso_3 = np.zeros(np.shape(m1))
341
-
342
- if e.anisotropy == "cubic":
343
- aniso_1 = -(1 - m1m1 + m2m2 * m3m3) * m1
344
- aniso_2 = -(1 - m2m2 + m1m1 * m3m3) * m2
345
- aniso_3 = -(1 - m3m3 + m1m1 * m2m2) * m3
346
-
347
- R_1 = laplacian_m1 + e.coeff_2 * aniso_1 + e.coeff_3 + e.coeff_4 * R_alea[0]
348
- R_2 = laplacian_m2 + e.coeff_2 * aniso_2 + e.coeff_4 * R_alea[1]
349
- R_3 = laplacian_m3 + e.coeff_2 * aniso_3 + e.coeff_4 * R_alea[2]
350
-
351
- s1 = (
352
- (-m2 - l_G_m1m3) * R_3
353
- + (m3 - l_G_m1m2) * R_2
354
- + e.lambda_G * (m2m2 + m3m3) * R_1
355
- )
356
-
357
- s2 = (
358
- (-m3 - l_G_m1m2) * R_1
359
- + (m1 - l_G_m2m3) * R_3
360
- + e.lambda_G * (m1m1 + m3m3) * R_2
361
- )
362
-
363
- s3 = (
364
- (-m1 - l_G_m2m3) * R_2
365
- + (m2 - l_G_m1m3) * R_1
366
- + e.lambda_G * (m1m1 + m2m2) * R_3
367
- )
368
-
369
- return s1, s2, s3
370
-
371
-
372
- def integral(g, m: np.ndarray) -> float:
373
- """
374
- Returns the spatial average of m of shape (g.dims)
375
- using the midpoint method on each process
376
- """
377
-
378
- # Make a copy of m to avoid modifying its value
379
- mm = m.copy()
380
-
381
- # On y and z edges, divide the contribution by 2
382
- mm[:, 0, :] /= 2
383
- mm[:, -1, :] /= 2
384
- mm[:, :, 0] /= 2
385
- mm[:, :, -1] /= 2
386
-
387
- # On x edges (only on extreme procs), divide the contribution by 2
388
- if rank == 0:
389
- mm[0] /= 2
390
- if rank == size - 1:
391
- mm[-1] /= 2
392
- local_sum = mm.sum()
393
-
394
- # Sum across all processes gathered by process 0
395
- global_sum = comm.reduce(local_sum)
396
-
397
- # Spatial average is the global sum divided by the number of cells
398
- return global_sum / g.ncell if rank == 0 else 0.0
399
-
400
-
401
- def integral_yz(m: np.ndarray) -> np.ndarray:
402
- """
403
- Returns the spatial average of shape (g.dims[0],)
404
- in y and z of m of shape (g.dims) using the midpoint method
405
- """
406
-
407
- # Make a copy of m to avoid modifying its value
408
- mm = m.copy()
409
-
410
- # On y and z edges, divide the contribution by 2
411
- mm[:, 0, :] /= 2
412
- mm[:, -1, :] /= 2
413
- mm[:, :, 0] /= 2
414
- mm[:, :, -1] /= 2
415
-
416
- n_cell_yz = (mm.shape[1] - 1) * (mm.shape[2] - 1)
417
- return mm.sum(axis=(1, 2)) / n_cell_yz
418
-
419
-
420
- def profile(m: np.ndarray, m_xprof: np.ndarray):
421
- """
422
- Retrieves the x profile of the average of m in y and z
423
- """
424
-
425
- # Gather m in mglob
426
- m_mean_yz = integral_yz(m)
427
- comm.Gather(m_mean_yz, m_xprof)
428
-
429
-
430
- def theta_init(t: float, g: Grid) -> np.ndarray:
431
- """Initialization of theta"""
432
- x, y, z = g.get_mesh()
433
- # return 2.*np.arctan(np.exp(-(x-g.Lx/2+e.d0*e.coeff_3*e.lambda_G*t)/e.d0))
434
- return np.zeros(g.dims)
435
-
436
-
437
- def phi_init(t: float, g: Grid, e: Element) -> np.ndarray:
438
- """Initialization of phi"""
439
- # return np.zeros(shape) + e.coeff_3 * t
440
- return np.zeros(g.dims) + e.gamma_0 * e.H_ext * t
441
-
442
-
443
- def simulate(
444
- N: int,
445
- Jx: int,
446
- Jy: int,
447
- Jz: int,
448
- dx: float,
449
- T: float,
450
- H_ext: float,
451
- dt: float,
452
- n_average: int,
453
- n_integral: int,
454
- n_profile: int,
455
- element: Element = Cobalt,
456
- blocking: bool = False,
457
- ):
458
- """
459
- Simulates the system for N iterations
460
- Returns the computation time, output filename
461
- and the temporal average
462
- """
463
-
464
- if Jx % size != 0:
465
- if rank == 0:
466
- print(
467
- f"Error: Jx must be divisible by the number of processes"
468
- f"({Jx = }, np = {size})"
469
- )
470
- comm.barrier()
471
- MPI.Finalize()
472
- exit(2)
473
-
474
- # Initialize a sequence of random seeds
475
- # See: https://numpy.org/doc/stable/reference/random/parallel.html
476
- ss = np.random.SeedSequence(12345)
477
-
478
- # Deploy size x SeedSequence to pass to child processes
479
- child_seeds = ss.spawn(size)
480
- streams = [np.random.default_rng(s) for s in child_seeds]
481
- rng = streams[rank]
482
-
483
- # Create the grid
484
- g = Grid(Jx=Jx, Jy=Jy, Jz=Jz, dx=dx)
485
-
486
- if rank == 0:
487
- print(g)
488
-
489
- e = element(T, H_ext, g, dt)
490
- if rank == 0:
491
- print(f"CFL = {e.get_CFL()}")
492
-
493
- m1 = np.zeros((2,) + g.dims)
494
- m2 = np.zeros_like(m1)
495
- m3 = np.zeros_like(m1)
496
-
497
- theta = theta_init(0, g)
498
- phi = phi_init(0, g, e)
499
-
500
- m1[0] = np.cos(theta)
501
- m2[0] = np.sin(theta) * np.cos(phi)
502
- m3[0] = np.sin(theta) * np.sin(phi)
503
-
504
- m_xprof = np.zeros(g.Jx) # global coordinates
505
-
506
- output_filenames = []
507
- if n_integral != 0:
508
- output_filenames.append(g.get_filename(T, extension="txt"))
509
- if n_profile != 0:
510
- output_filenames.extend(
511
- [g.get_filename(T, name=f"m{i+1}", extension="npy") for i in range(3)]
512
- )
513
- if rank == 0:
514
- if n_integral != 0:
515
- f_integral = open(output_filenames[0], "w") # integral of m1
516
- if n_profile != 0:
517
- f_profiles = [
518
- open(output_filename, "wb") for output_filename in output_filenames[1:]
519
- ] # x profiles of m_i
520
-
521
- t = 0.0
522
- m1_average = 0.0
523
-
524
- start_time = time.perf_counter()
525
-
526
- for n in progress_bar(rank, range(1, N + 1), "Iteration: ", 40):
527
- t += dt
528
-
529
- x_boundaries = [
530
- get_boundaries_x(g, m[0], blocking=blocking) for m in (m1, m2, m3)
531
- ]
532
-
533
- # adding randomness: effect of temperature
534
- R_random = rng.standard_normal((3, *g.dims))
535
-
536
- # prediction phase
537
- s1_pre, s2_pre, s3_pre = calculate_si(
538
- e, g, m1[0], m2[0], m3[0], R_random, x_boundaries
539
- )
540
-
541
- # update
542
- m1[1] = m1[0] + dt * s1_pre
543
- m2[1] = m2[0] + dt * s2_pre
544
- m3[1] = m3[0] + dt * s3_pre
545
-
546
- # correction phase
547
- x_boundaries = [
548
- get_boundaries_x(g, m[0], blocking=blocking) for m in (m1, m2, m3)
549
- ]
550
-
551
- s1_cor, s2_cor, s3_cor = calculate_si(
552
- e, g, m1[1], m2[1], m3[1], R_random, x_boundaries
553
- )
554
-
555
- # update
556
- m1[1] = m1[0] + dt * 0.5 * (s1_pre + s1_cor)
557
- m2[1] = m2[0] + dt * 0.5 * (s2_pre + s2_cor)
558
- m3[1] = m3[0] + dt * 0.5 * (s3_pre + s3_cor)
559
-
560
- # renormalize to verify the constraint of being on the sphere
561
- norm = np.sqrt(m1[1] ** 2 + m2[1] ** 2 + m3[1] ** 2)
562
- m1[1] /= norm
563
- m2[1] /= norm
564
- m3[1] /= norm
565
-
566
- m1[0] = m1[1]
567
- m2[0] = m2[1]
568
- m3[0] = m3[1]
569
-
570
- # Export the average of m1 to a file
571
- if n_integral != 0 and n % n_integral == 0:
572
- m1_integral_global = integral(g, m1[0])
573
- if rank == 0:
574
- if n >= n_average:
575
- m1_average += m1_integral_global * n_integral
576
- f_integral.write(f"{t:10.8e} {m1_integral_global:10.8e}\n")
577
- # Export the x profiles of the averaged m_i in y and z
578
- if n_profile != 0 and n % n_profile == 0:
579
- for i, m in enumerate((m1[0], m2[0], m3[0])):
580
- profile(m, m_xprof)
581
- if rank == 0:
582
- # add an x profile to the file
583
- np.save(f_profiles[i], m_xprof)
584
-
585
- total_time = time.perf_counter() - start_time
586
-
587
- if rank == 0:
588
- if n_integral != 0:
589
- f_integral.close()
590
- if n_profile != 0:
591
- for i in range(3):
592
- f_profiles[i].close()
593
-
594
- if n > n_average:
595
- m1_average /= N - n_average
596
- print(f"{m1_average = :e}")
597
-
598
- return total_time, output_filenames, m1_average
599
-
600
-
601
- class ArgumentParser(argparse.ArgumentParser):
602
- """An argument parser compatible with MPI"""
603
-
604
- def _print_message(self, message, file=None):
605
- if rank == 0 and message:
606
- if file is None:
607
- file = sys.stderr
608
- file.write(message)
609
-
610
- def exit(self, status=0, message=None):
611
- if message:
612
- self._print_message(message, sys.stderr)
613
- comm.barrier()
614
- MPI.Finalize()
615
- exit(status)
616
-
617
-
618
- def get_element_class(element_name: str):
619
- """Returns the chemical element class from its name"""
620
- for cls in Element.__subclasses__():
621
- if cls.__name__ == element_name:
622
- return cls
623
-
624
-
625
- def simulate_temperature(params: dict) -> dict:
626
- """
627
- Runs a simulation for a given parameter set.
628
- Returns a dictionary of the run.
629
- """
630
- # Backup copy as run['element'] will be modified
631
- run = {"params": params.copy(), "results": {}}
632
- run["params"]["np"] = size
633
- # Referencing the element class from the string
634
- params["element"] = globals()[run["params"]["element"]]
635
-
636
- # Run the simulation
637
- total_time, filenames, m1_mean = simulate(**params)
638
-
639
- if rank == 0:
640
- N = run["params"]["N"]
641
- run["results"] = {
642
- params["T"]: {"total_time": total_time}
643
- }
644
- # Export the integral of m1
645
- if len(filenames) > 0:
646
- run["results"][params["T"]]["integral_file"] = filenames[0]
647
- print(f"Integral of m1 in {filenames[0]}")
648
- # Export the x-profiles of m1, m2 and m3
649
- if len(filenames) > 1:
650
- for i in range(1, 4):
651
- run["results"][params["T"]][f"xprofile_m{i}"] = filenames[i]
652
- print(f"x-profile of m{i} in {filenames[i]}")
653
-
654
- print(f"N iterations = {N}")
655
- print(f"total_time [s] = {total_time:.03f}")
656
- print(f"time/ite [s/iter] = {total_time / N:.03e}")
657
- if N > run["params"]["n_average"]:
658
- print(f"m1_mean = {m1_mean:e}")
659
- run["results"][params["T"]]["m1_mean"] = m1_mean
660
-
661
- return run
662
-
663
-
664
- def temperature_variation(params: dict):
665
- """Sweeps an array of temperature"""
666
-
667
- # Initialize the run dictionary
668
- run = {"params": params.copy(), "results": {}}
669
- for T in params["T"]:
670
- if rank == 0:
671
- print("-------------")
672
- print(f"T[K] = {T}")
673
- print("-------------")
674
- params_T = params.copy()
675
- params_T["T"] = T # replace the list with the element
676
- run_T = simulate_temperature(params_T)
677
- # Update the results dictionary
678
- if rank == 0:
679
- run["results"][T] = run_T["results"][T]
680
- return run
681
-
682
-
683
- def parameter_list(d: dict) -> str:
684
- """Returns parameter values as a string"""
685
- width = max([len(s) for s in d])
686
- s = ""
687
- for k, v in d.items():
688
- sep = ":" if isinstance(v, str) else "="
689
- s += "{0:<{1}} {2} {3}\n".format(k, width, sep, v)
690
- return s
691
-
692
-
693
- def write_json(run: dict):
694
- """Writes the run dictionary to a JSON file"""
695
- if rank == 0:
696
- with open(json_file, "w") as f:
697
- json.dump(run, f, indent=4)
698
- print(f"Summary in {json_file}")
699
-
700
-
701
- def parse_args(args) -> argparse.Namespace:
702
- """Argument parser for llg3d"""
703
- parser = ArgumentParser(
704
- description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter
705
- )
706
- # Automatically add arguments from the parameter dictionary
707
- for name, data in parameters.items():
708
- value, description = data
709
- nargs = "*" if name == "T" else None # T may be a list
710
- parser.add_argument(
711
- f"-{name}", type=type(value), nargs=nargs, default=value, help=description
712
- )
713
- # Add an argument to handle the type of communications
714
- parser.add_argument(
715
- "-b",
716
- "--blocking",
717
- action="store_true",
718
- help="Use blocking communications",
719
- )
720
- return parser.parse_args(args)
721
-
722
-
723
- def main(args_main=None):
724
- """Evaluates the command line and runs the simulation"""
725
-
726
- args = parse_args(args_main)
727
- if rank == 0:
728
- # Display parameters as a list
729
- print(parameter_list(vars(args)))
730
-
731
- if isinstance(args.T, float) or len(args.T) == 1:
732
- if isinstance(args.T, list):
733
- vars(args)["T"] = vars(args)["T"][0]
734
- run = simulate_temperature(vars(args))
735
- write_json(run)
736
- else:
737
- run = temperature_variation(vars(args))
738
- write_json(run)
739
-
740
-
741
- if __name__ == "__main__":
742
- main()