pyfemtet 0.4.21__py3-none-any.whl → 0.4.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyfemtet might be problematic. Click here for more details.

Files changed (54) hide show
  1. pyfemtet/__init__.py +1 -1
  2. pyfemtet/_test_util.py +0 -2
  3. pyfemtet/message/messages.py +15 -1
  4. pyfemtet/opt/_femopt.py +233 -199
  5. pyfemtet/opt/_femopt_core.py +116 -47
  6. pyfemtet/opt/femprj_sample/ParametricIF.py +0 -2
  7. pyfemtet/opt/femprj_sample/cad_ex01_NX.py +0 -8
  8. pyfemtet/opt/femprj_sample/cad_ex01_SW.py +0 -8
  9. pyfemtet/opt/femprj_sample/gal_ex58_parametric.py +0 -8
  10. pyfemtet/opt/femprj_sample/gau_ex08_parametric.py +0 -8
  11. pyfemtet/opt/femprj_sample/her_ex40_parametric.py +0 -8
  12. pyfemtet/opt/femprj_sample/paswat_ex1_parametric.py +0 -8
  13. pyfemtet/opt/femprj_sample/paswat_ex1_parametric_parallel.py +0 -8
  14. pyfemtet/opt/femprj_sample/wat_ex14_parametric.py +0 -8
  15. pyfemtet/opt/femprj_sample/wat_ex14_parametric_parallel.py +0 -8
  16. pyfemtet/opt/femprj_sample_jp/ParametricIF_jp.py +0 -2
  17. pyfemtet/opt/femprj_sample_jp/cad_ex01_NX_jp.py +0 -8
  18. pyfemtet/opt/femprj_sample_jp/cad_ex01_SW_jp.py +0 -8
  19. pyfemtet/opt/femprj_sample_jp/gal_ex58_parametric_jp.py +0 -8
  20. pyfemtet/opt/femprj_sample_jp/gau_ex08_parametric_jp.py +0 -8
  21. pyfemtet/opt/femprj_sample_jp/her_ex40_parametric_jp.py +0 -8
  22. pyfemtet/opt/femprj_sample_jp/paswat_ex1_parametric_jp.py +0 -8
  23. pyfemtet/opt/femprj_sample_jp/paswat_ex1_parametric_parallel_jp.py +0 -8
  24. pyfemtet/opt/femprj_sample_jp/wat_ex14_parametric_jp.py +0 -8
  25. pyfemtet/opt/femprj_sample_jp/wat_ex14_parametric_parallel_jp.py +0 -8
  26. pyfemtet/opt/opt/_base.py +4 -4
  27. pyfemtet/opt/opt/_optuna.py +33 -1
  28. pyfemtet/opt/opt/_optuna_botorch_helper.py +209 -0
  29. pyfemtet/opt/visualization/complex_components/main_graph.py +22 -5
  30. pyfemtet/opt/visualization/complex_components/pm_graph.py +77 -25
  31. pyfemtet/opt/visualization/complex_components/pm_graph_creator.py +7 -0
  32. pyfemtet/opt/visualization/process_monitor/application.py +10 -6
  33. pyfemtet/opt/visualization/process_monitor/pages.py +102 -0
  34. pyfemtet/opt/visualization/result_viewer/application.py +6 -0
  35. pyfemtet/opt/visualization/result_viewer/pages.py +1 -1
  36. {pyfemtet-0.4.21.dist-info → pyfemtet-0.4.23.dist-info}/METADATA +2 -4
  37. {pyfemtet-0.4.21.dist-info → pyfemtet-0.4.23.dist-info}/RECORD +40 -53
  38. pyfemtet/FemtetPJTSample/NX_ex01/NX_ex01.femprj +0 -0
  39. pyfemtet/FemtetPJTSample/NX_ex01/NX_ex01.prt +0 -0
  40. pyfemtet/FemtetPJTSample/NX_ex01/NX_ex01.py +0 -118
  41. pyfemtet/FemtetPJTSample/Sldworks_ex01/Sldworks_ex01.SLDPRT +0 -0
  42. pyfemtet/FemtetPJTSample/Sldworks_ex01/Sldworks_ex01.femprj +0 -0
  43. pyfemtet/FemtetPJTSample/Sldworks_ex01/Sldworks_ex01.py +0 -121
  44. pyfemtet/FemtetPJTSample/_her_ex40_parametric.py +0 -148
  45. pyfemtet/FemtetPJTSample/gau_ex08_parametric.femprj +0 -0
  46. pyfemtet/FemtetPJTSample/gau_ex08_parametric.py +0 -58
  47. pyfemtet/FemtetPJTSample/her_ex40_parametric.femprj +0 -0
  48. pyfemtet/FemtetPJTSample/her_ex40_parametric.py +0 -148
  49. pyfemtet/FemtetPJTSample/wat_ex14_parallel_parametric.py +0 -65
  50. pyfemtet/FemtetPJTSample/wat_ex14_parametric.femprj +0 -0
  51. pyfemtet/FemtetPJTSample/wat_ex14_parametric.py +0 -64
  52. {pyfemtet-0.4.21.dist-info → pyfemtet-0.4.23.dist-info}/LICENSE +0 -0
  53. {pyfemtet-0.4.21.dist-info → pyfemtet-0.4.23.dist-info}/WHEEL +0 -0
  54. {pyfemtet-0.4.21.dist-info → pyfemtet-0.4.23.dist-info}/entry_points.txt +0 -0
pyfemtet/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = "0.4.21"
1
+ __version__ = "0.4.23"
pyfemtet/_test_util.py CHANGED
@@ -103,8 +103,6 @@ def record_result(src: FEMOpt or str, py_path, suffix=''):
103
103
 
104
104
 
105
105
  def _get_obj_from_csv(csv_path, encoding='cp932'):
106
- print('=====')
107
- print(csv_path)
108
106
  df = pd.read_csv(csv_path, encoding=encoding, header=2)
109
107
  columns = df.columns
110
108
  with open(csv_path, mode='r', encoding=encoding, newline='\n') as f:
@@ -50,6 +50,8 @@ class Message:
50
50
  'Please free this file before exiting the program, '
51
51
  'otherwise history data will be lost.')
52
52
  OPTIMIZATION_FINISHED = _('Optimization finished. Results were saved in following:')
53
+ ERR_NO_BOUNDS = _('No bounds specified.')
54
+ CONFIRM_BEFORE_EXIT = _('The optimization is now complete. You can view the results on the monitor until you press Enter to exit the program.')
53
55
 
54
56
  # ===== pyfemtet.opt.interface =====
55
57
  ERR_RUN_JOURNAL_NOT_FOUND = _(r'"%UGII_BASE_DIR%\NXBIN\run_journal.exe" is not found. Make sure you have NX installed and the environment variable UGII_BASE_DIR is set.')
@@ -87,6 +89,7 @@ class Message:
87
89
  INFO_INFEASIBLE = _('The constraints were not satisfied for the following sets of variables:')
88
90
  ERR_FEM_FAILED_AND_CANNOT_CONTINUE = _('Current parameter set cannot update FEM and this optimization method cannot skip current parameter set. The optimization process will be terminated.')
89
91
  WARN_INTERRUPTED_IN_SCIPY = _('Optimization has been interrupted. Note that you cannot acquire the OptimizationResult in case of `trust-constr`, `TNC`, `SLSQP` or `COBYLA`.')
92
+ ERR_PARAMETER_CONSTRAINT_ONLY_BOTORCH = _('You can use parameter constraint only with BoTorchSampler.')
90
93
 
91
94
  # ===== pyfemtet.opt.visualization =====
92
95
  # control_femtet.py
@@ -120,6 +123,7 @@ class Message:
120
123
  ERR_NO_FEM_RESULT = _('No FEM result (yet).')
121
124
  ERR_NO_PREDICTION_MODEL = _('Prediction model is not calculated yet.')
122
125
  ERR_CANNOT_SELECT_SAME_PARAMETER = _('Cannot select same parameter')
126
+ LABEL_SWITCH_PREDICTION_MODEL_3D = _('3D graph (two or more parameters required)')
123
127
  # pm_graph_creator
124
128
  GRAPH_TITLE_PREDICTION_MODEL = _('Prediction Model of Objective')
125
129
  LEGEND_LABEL_PREDICTION_MODEL = _('prediction model')
@@ -128,6 +132,7 @@ class Message:
128
132
  PAGE_TITLE_PROGRESS = _('Progress')
129
133
  PAGE_TITLE_PREDICTION_MODEL = _('Prediction')
130
134
  PAGE_TITLE_WORKERS = _('Workers')
135
+ PAGE_TITLE_OPTUNA_VISUALIZATION = _('Details')
131
136
  # process monitor pages
132
137
  DEFAULT_STATUS_ALERT = _('Optimization status will be shown here.')
133
138
  LABEL_AUTO_UPDATE = _('Auto-update graph')
@@ -170,5 +175,14 @@ class Message:
170
175
  ERR_SAMPLE_CSV_NOT_FOUND = _('Sample csv is not found. Please consider to re-install pyfemtet by `py -m pip install pyfemtet -U --force-reinstall`')
171
176
  ERR_SAMPLE_FEMPRJ_NOT_FOUND = _('Sample femprj file is not found. Please consider to re-install pyfemtet by `py -m pip install pyfemtet -U --force-reinstall`')
172
177
  ERR_FEMPRJ_RESULT_NOT_FOUND = _('Sample femprj result folder is not found. Please consider to re-install pyfemtet by `py -m pip install pyfemtet -U --force-reinstall`')
173
-
178
+ # DETAIL VISUALIZATION PAGES
179
+ DETAIL_PAGE_TEXT_BEFORE_LOADING = _('Loading data...')
180
+ DETAIL_PAGE_HISTORY_HEADER = _('Plots of objectives versus trials')
181
+ DETAIL_PAGE_HISTORY_DESCRIPTION = _('The vertical axis is the objective, and the horizontal axis is the number of trials.')
182
+ DETAIL_PAGE_PARALLEL_COOR_HEADER = _('Parallel coordinate plots')
183
+ DETAIL_PAGE_PARALLEL_COOR_DESCRIPTION = _('The vertical axis is an objective or parameters, and one polyline indicates one result.')
184
+ DETAIL_PAGE_CONTOUR_HEADER = _('The heatmap of objectives')
185
+ DETAIL_PAGE_CONTOUR_DESCRIPTION = _('The axes are parameters, and the color shows objective value.')
186
+ DETAIL_PAGE_SLICE_HEADER = _('The response of an objective versus one parameter')
187
+ DETAIL_PAGE_SLICE_DESCRIPTION = _('The vertical axis is objective, and the horizontal axis is parameter.')
174
188
 
pyfemtet/opt/_femopt.py CHANGED
@@ -1,10 +1,13 @@
1
1
  # built-in
2
- from typing import Optional, Any, Callable
2
+ import inspect
3
+ import warnings
4
+ from typing import Optional, Any, Callable, List
3
5
  import os
4
6
  import datetime
5
7
  from time import time, sleep
6
8
  from threading import Thread
7
9
  import json
10
+ from traceback import print_exception
8
11
 
9
12
  # 3rd-party
10
13
  import numpy as np
@@ -28,6 +31,29 @@ from pyfemtet.message import Msg, encoding
28
31
  from pyfemtet.opt.parameter import Parameter, Expression
29
32
 
30
33
 
34
+ def add_worker(client, worker_name):
35
+ import sys
36
+ from subprocess import Popen, DEVNULL
37
+
38
+ current_n_workers = len(client.nthreads().keys())
39
+
40
+ Popen(
41
+ f'{sys.executable} -m dask worker '
42
+ f'{client.scheduler.address} '
43
+ f'--nthreads 1 '
44
+ f'--nworkers 1 '
45
+ f'--name {worker_name} '
46
+ f'--no-nanny',
47
+ shell=True,
48
+ stderr=DEVNULL,
49
+ stdout=DEVNULL,
50
+ )
51
+
52
+ # worker が増えるまで待つ
53
+ client.wait_for_workers(n_workers=current_n_workers + 1)
54
+
55
+
56
+
31
57
  class FEMOpt:
32
58
  """Class to control FEM interface and optimizer.
33
59
 
@@ -84,7 +110,6 @@ class FEMOpt:
84
110
  self.monitor_process_future = None
85
111
  self.monitor_server_kwargs = dict()
86
112
  self.monitor_process_worker_name = None
87
- self._is_error_exit = False
88
113
 
89
114
  # multiprocess 時に pickle できないオブジェクト参照の削除
90
115
  def __getstate__(self):
@@ -282,6 +307,47 @@ class FEMOpt:
282
307
 
283
308
  self.opt.constraints[name] = Constraint(fun, name, lower_bound, upper_bound, strict, args, kwargs)
284
309
 
310
+ def add_parameter_constraint(
311
+ self,
312
+ fun: Callable[[List[float], Any], float],
313
+ name: Optional[str] = None,
314
+ lower_bound: Optional[float] = None,
315
+ upper_bound: Optional[float] = None,
316
+ ):
317
+ """Add constraint in case of parameter-only.
318
+
319
+ Args:
320
+ fun (Callable[[List[float], Any], float]): Function to constraint. The name of arguments must be one of ones of parameter or variables.
321
+ name (Optional[str], optional): Name of constraint. Defaults to None and then name is set to 'cns_{i}'.
322
+ lower_bound (Optional[float], optional): Lower bound of return value. Defaults to None.
323
+ upper_bound (Optional[float], optional): Upper bound of return value. Defaults to None.
324
+ """
325
+
326
+ # candidate default name
327
+ if name is None:
328
+ prefix = Constraint.default_name
329
+ i = 0
330
+ while True:
331
+ candidate = f'{prefix}_{str(int(i))}'
332
+ is_existing = candidate in list(self.opt.constraints.keys())
333
+ if not is_existing:
334
+ break
335
+ else:
336
+ i += 1
337
+ name = candidate
338
+
339
+ # assert at least 1 bound exist
340
+ assert lower_bound is not None or upper_bound is not None, Msg.ERR_NO_BOUNDS
341
+
342
+ from pyfemtet.opt._femopt_core import ParameterConstraint
343
+ self.opt.constraints[name] = ParameterConstraint(fun, name, lower_bound, upper_bound, self.opt)
344
+ if hasattr(self.opt, 'add_parameter_constraints'):
345
+ prm_args = [p.name for p in inspect.signature(fun).parameters.values()]
346
+ if lower_bound is not None:
347
+ self.opt.add_parameter_constraints(lambda *args, **kwargs: fun(*args, **kwargs) - lower_bound, prm_args=prm_args)
348
+ if upper_bound is not None:
349
+ self.opt.add_parameter_constraints(lambda *args, **kwargs: upper_bound - fun(*args, **kwargs), prm_args=prm_args)
350
+
285
351
  def get_parameter(self, format='dict'):
286
352
  raise DeprecationWarning('FEMOpt.get_parameter() was deprecated. Use Femopt.opt.get_parameter() instead.')
287
353
 
@@ -312,6 +378,7 @@ class FEMOpt:
312
378
  n_parallel=1,
313
379
  timeout=None,
314
380
  wait_setup=True,
381
+ confirm_before_exit=True,
315
382
  ):
316
383
  """Runs the main optimization process.
317
384
 
@@ -320,6 +387,7 @@ class FEMOpt:
320
387
  n_parallel (int, optional): The number of parallel processes. Defaults to 1.
321
388
  timeout (float or None, optional): The maximum amount of time in seconds that each trial can run. Defaults to None.
322
389
  wait_setup (bool, optional): Wait for all workers launching FEM system. Defaults to True.
390
+ confirm_before_exit (bool, optional): Insert stop before exit to continue to show process monitor.
323
391
 
324
392
  Tip:
325
393
  If set_monitor_host() is not executed, a local server for monitoring will be started at localhost:8080.
@@ -398,16 +466,7 @@ class FEMOpt:
398
466
  # monitor worker の設定
399
467
  logger.info('Launching monitor server. This may take a few seconds.')
400
468
  self.monitor_process_worker_name = datetime.datetime.now().strftime("Monitor%Y%m%d%H%M%S")
401
- current_n_workers = len(self.client.nthreads().keys())
402
- from subprocess import Popen
403
- import sys
404
- Popen(
405
- f'{sys.executable} -m dask worker {self.client.scheduler.address} --nthreads 1 --nworkers 1 --name {self.monitor_process_worker_name} --no-nanny',
406
- shell=True
407
- )
408
-
409
- # monitor 用 worker が増えるまで待つ
410
- self.client.wait_for_workers(n_workers=current_n_workers + 1)
469
+ add_worker(self.client, self.monitor_process_worker_name)
411
470
 
412
471
  else:
413
472
  # ローカルクラスターを構築
@@ -425,200 +484,175 @@ class FEMOpt:
425
484
  self.monitor_process_worker_name = worker_addresses[0]
426
485
  worker_addresses[0] = 'Main'
427
486
 
428
- # Femtet 特有の処理
429
- metadata = None
430
- if isinstance(self.fem, FemtetInterface):
431
- # 結果 csv に記載する femprj に関する情報
432
- metadata = json.dumps(
433
- dict(
434
- femprj_path=self.fem.original_femprj_path,
435
- model_name=self.fem.model_name
487
+ with self.client.cluster as _cluster, self.client as _client:
488
+
489
+ # Femtet 特有の処理
490
+ metadata = None
491
+ if isinstance(self.fem, FemtetInterface):
492
+ # 結果 csv に記載する femprj に関する情報
493
+ metadata = json.dumps(
494
+ dict(
495
+ femprj_path=self.fem.original_femprj_path,
496
+ model_name=self.fem.model_name
497
+ )
436
498
  )
499
+ # Femtet の parametric 設定を目的関数に用いるかどうか
500
+ if self.fem.parametric_output_indexes_use_as_objective is not None:
501
+ from pyfemtet.opt.interface._femtet_parametric import add_parametric_results_as_objectives
502
+ indexes = list(self.fem.parametric_output_indexes_use_as_objective.keys())
503
+ directions = list(self.fem.parametric_output_indexes_use_as_objective.values())
504
+ add_parametric_results_as_objectives(
505
+ self,
506
+ indexes,
507
+ directions,
508
+ )
509
+
510
+ # actor の設定
511
+ self.status = OptimizationStatus(_client)
512
+ self.worker_status_list = [OptimizationStatus(_client, name) for name in worker_addresses] # tqdm 検討
513
+ self.status.set(OptimizationStatus.SETTING_UP)
514
+ self.history = History(
515
+ self.history_path,
516
+ self.opt.variables.get_parameter_names(),
517
+ list(self.opt.objectives.keys()),
518
+ list(self.opt.constraints.keys()),
519
+ _client,
520
+ metadata,
437
521
  )
438
- # Femtet の parametric 設定を目的関数に用いるかどうか
439
- if self.fem.parametric_output_indexes_use_as_objective is not None:
440
- from pyfemtet.opt.interface._femtet_parametric import add_parametric_results_as_objectives
441
- indexes = list(self.fem.parametric_output_indexes_use_as_objective.keys())
442
- directions = list(self.fem.parametric_output_indexes_use_as_objective.values())
443
- add_parametric_results_as_objectives(
444
- self,
445
- indexes,
446
- directions,
447
- )
448
-
449
- # actor の設定
450
- self.status = OptimizationStatus(self.client)
451
- self.worker_status_list = [OptimizationStatus(self.client, name) for name in worker_addresses] # tqdm 検討
452
- self.status.set(OptimizationStatus.SETTING_UP)
453
- self.history = History(
454
- self.history_path,
455
- self.opt.variables.get_parameter_names(),
456
- list(self.opt.objectives.keys()),
457
- list(self.opt.constraints.keys()),
458
- self.client,
459
- metadata,
460
- )
461
-
462
- # launch monitor
463
- self.monitor_process_future = self.client.submit(
464
- # func
465
- _start_monitor_server,
466
- # args
467
- self.history,
468
- self.status,
469
- worker_addresses,
470
- self.worker_status_list,
471
- # kwargs
472
- **self.monitor_server_kwargs,
473
- # kwargs of submit
474
- workers=self.monitor_process_worker_name,
475
- allow_other_workers=False
476
- )
477
-
478
- # fem
479
- self.fem._setup_before_parallel(self.client)
480
-
481
- # opt
482
- self.opt.fem_class = type(self.fem)
483
- self.opt.fem_kwargs = self.fem.kwargs
484
- self.opt.entire_status = self.status
485
- self.opt.history = self.history
486
- self.opt._setup_before_parallel()
487
-
488
- # クラスターでの計算開始
489
- self.status.set(OptimizationStatus.LAUNCHING_FEM)
490
- start = time()
491
- calc_futures = self.client.map(
492
- self.opt._run,
493
- subprocess_indices,
494
- [self.worker_status_list] * len(subprocess_indices),
495
- [wait_setup] * len(subprocess_indices),
496
- workers=worker_addresses,
497
- allow_other_workers=False,
498
- )
499
522
 
500
- t_main = None
501
- if not self.opt.is_cluster:
502
- # ローカルプロセスでの計算(opt._main 相当の処理)
503
- subprocess_idx = 0
504
-
505
- # set_fem
506
- self.opt.fem = self.fem
507
- self.opt._reconstruct_fem(skip_reconstruct=True)
508
-
509
- t_main = Thread(
510
- target=self.opt._run,
511
- args=(
512
- subprocess_idx,
513
- self.worker_status_list,
514
- wait_setup,
515
- ),
516
- kwargs=dict(
517
- skip_set_fem=True,
518
- )
523
+ # launch monitor
524
+ self.monitor_process_future = _client.submit(
525
+ # func
526
+ _start_monitor_server,
527
+ # args
528
+ self.history,
529
+ self.status,
530
+ worker_addresses,
531
+ self.worker_status_list,
532
+ # kwargs
533
+ **self.monitor_server_kwargs,
534
+ # kwargs of submit
535
+ workers=self.monitor_process_worker_name,
536
+ allow_other_workers=False
519
537
  )
520
- t_main.start()
521
-
522
- # save history
523
- def save_history():
524
- while True:
525
- sleep(2)
526
- try:
527
- self.history.save()
528
- except PermissionError:
529
- logger.warning(Msg.WARN_HISTORY_CSV_NOT_ACCESSIBLE)
530
- if self.status.get() >= OptimizationStatus.TERMINATED:
531
- break
532
-
533
- t_save_history = Thread(target=save_history)
534
- t_save_history.start()
535
-
536
- # 終了を待つ
537
- local_opt_crashed = False
538
- opt_crashed_list = self.client.gather(calc_futures)
539
- if not self.opt.is_cluster: # 既存の fem を使っているならそれも待つ
540
- if t_main is not None:
541
- t_main.join()
542
- local_opt_crashed = self.opt._is_error_exit
543
- opt_crashed_list.append(local_opt_crashed)
544
- self.status.set(OptimizationStatus.TERMINATED)
545
- end = time()
546
-
547
- # 一応
548
- t_save_history.join()
549
-
550
- # logger.info(f'計算が終了しました. 実行時間は {int(end - start)} 秒でした。ウィンドウを閉じると終了します.')
551
- # logger.info(f'結果は{self.history.path}を確認してください.')
552
- logger.info(Msg.OPTIMIZATION_FINISHED)
553
- logger.info(self.history.path)
554
538
 
555
- # ひとつでも crashed ならばフラグを立てる
556
- if any(opt_crashed_list):
557
- self._is_error_exit = True
558
-
559
- return self.history.local_data
560
-
561
-
562
- def terminate_all(self):
563
- """Try to terminate all launched processes.
564
-
565
- If distributed computing, Scheduler and Workers will NOT be terminated.
566
-
567
- """
568
-
569
- # monitor が terminated 状態で少なくとも一度更新されなければ running のまま固まる
570
- sleep(1)
571
-
572
- # terminate monitor process
573
- self.status.set(OptimizationStatus.TERMINATE_ALL)
574
- logger.info(self.monitor_process_future.result())
575
- sleep(1)
576
-
577
- # terminate actors
578
- self.client.cancel(self.history._future, force=True)
579
- self.client.cancel(self.status._future, force=True)
580
- for worker_status in self.worker_status_list:
581
- self.client.cancel(worker_status._future, force=True)
582
- logger.info('Terminate actors.')
583
- sleep(1)
584
-
585
- # terminate monitor worker
586
- n_workers = len(self.client.nthreads())
587
-
588
- found_worker_dict = self.client.retire_workers(
589
- names=[self.monitor_process_worker_name], # name
590
- close_workers=True,
591
- remove=True,
592
- )
593
-
594
- if len(found_worker_dict) == 0:
595
- found_worker_dict = self.client.retire_workers(
596
- workers=[self.monitor_process_worker_name], # address
597
- close_workers=True,
598
- remove=True,
539
+ # fem
540
+ self.fem._setup_before_parallel(_client)
541
+
542
+ # opt
543
+ self.opt.fem_class = type(self.fem)
544
+ self.opt.fem_kwargs = self.fem.kwargs
545
+ self.opt.entire_status = self.status
546
+ self.opt.history = self.history
547
+ self.opt._setup_before_parallel()
548
+
549
+ # クラスターでの計算開始
550
+ self.status.set(OptimizationStatus.LAUNCHING_FEM)
551
+ start = time()
552
+ calc_futures = _client.map(
553
+ self.opt._run,
554
+ subprocess_indices,
555
+ [self.worker_status_list] * len(subprocess_indices),
556
+ [wait_setup] * len(subprocess_indices),
557
+ workers=worker_addresses,
558
+ allow_other_workers=False,
599
559
  )
600
560
 
601
- if len(found_worker_dict) > 0:
602
- while n_workers == len(self.client.nthreads()):
603
- sleep(1)
604
- logger.info('Terminate monitor processes worker.')
605
- sleep(1)
606
- else:
607
- logger.warn('Monitor process worker not found.')
608
-
609
- # close FEM (if specified to quit when deconstruct)
610
- del self.fem
611
- logger.info('Terminate FEM.')
612
- sleep(1)
613
-
614
- # close scheduler, other workers(, cluster)
615
- self.client.shutdown()
616
- logger.info('Terminate all relative processes.')
617
- sleep(3)
618
-
619
- # if optimization was crashed, raise Exception
620
- if self._is_error_exit:
621
- raise RuntimeError('At least 1 of optimization processes have been crashed. See console log.')
561
+ t_main = None
562
+ if not self.opt.is_cluster:
563
+ # ローカルプロセスでの計算(opt._main 相当の処理)
564
+ subprocess_idx = 0
565
+
566
+ # set_fem
567
+ self.opt.fem = self.fem
568
+ self.opt._reconstruct_fem(skip_reconstruct=True)
569
+
570
+ t_main = Thread(
571
+ target=self.opt._run,
572
+ args=(
573
+ subprocess_idx,
574
+ self.worker_status_list,
575
+ wait_setup,
576
+ ),
577
+ kwargs=dict(
578
+ skip_set_fem=True,
579
+ )
580
+ )
581
+ t_main.start()
582
+
583
+ # save history
584
+ def save_history():
585
+ while True:
586
+ sleep(2)
587
+ try:
588
+ self.history.save()
589
+ except PermissionError:
590
+ logger.warning(Msg.WARN_HISTORY_CSV_NOT_ACCESSIBLE)
591
+ if self.status.get() >= OptimizationStatus.TERMINATED:
592
+ break
593
+
594
+ t_save_history = Thread(target=save_history)
595
+ t_save_history.start()
596
+
597
+ # ===== 終了 =====
598
+
599
+ # クラスターの Unexpected Exception のリストを取得
600
+ opt_exceptions: list[Exception or None] = _client.gather(calc_futures) # gather() で終了待ちも兼ねる
601
+
602
+ # ローカルの opt で計算している場合、その Exception も取得
603
+ local_opt_exception: Exception or None = None
604
+ if not self.opt.is_cluster:
605
+ if t_main is not None:
606
+ t_main.join() # 終了待ち
607
+ local_opt_exception = self.opt._exception # Exception を取得
608
+ opt_exceptions.append(local_opt_exception)
609
+
610
+ # 終了
611
+ self.status.set(OptimizationStatus.TERMINATED)
612
+ end = time()
613
+
614
+ # 一応
615
+ t_save_history.join()
616
+
617
+ # 結果通知
618
+ logger.info(Msg.OPTIMIZATION_FINISHED)
619
+ logger.info(self.history.path)
620
+
621
+ # monitor worker を終了する準備
622
+ # 実際の終了は monitor worker の終了時
623
+ self.status.set(OptimizationStatus.TERMINATE_ALL)
624
+ logger.info(self.monitor_process_future.result())
625
+ sleep(1) # monitor が terminated 状態で少なくとも一度更新されなければ running のまま固まる
626
+
627
+ # 全ての Exception を再表示
628
+ for i, opt_exception in enumerate(opt_exceptions):
629
+ if opt_exception is not None:
630
+ print()
631
+ print(f'===== unexpected exception raised on worker {i} =====')
632
+ print_exception(opt_exception)
633
+ print()
634
+
635
+ # monitor worker を残してユーザーが結果を確認できるようにする
636
+ if confirm_before_exit:
637
+ print()
638
+ print('='*len(Msg.CONFIRM_BEFORE_EXIT))
639
+ print(Msg.CONFIRM_BEFORE_EXIT)
640
+ print('='*len(Msg.CONFIRM_BEFORE_EXIT))
641
+ input()
642
+
643
+ return self.history.get_df() # with 文を抜けると actor は消えるが .copy() はこの段階では不要
644
+
645
+ @staticmethod
646
+ def terminate_all():
647
+ warnings.warn(
648
+ "terminate_all() is deprecated and will be removed in a future version. "
649
+ "In current and later versions, the equivalent of terminate_all() will be executed when optimize() finishes. "
650
+ "Therefore, you can simply remove terminate_all() from your code. "
651
+ "If you want to stop program before terminating monitor process, "
652
+ "use ``confirm_before_exit`` argument like ``FEMOpt.optimize(confirm_before_exit=True)``",
653
+ DeprecationWarning,
654
+ stacklevel=2
655
+ )
622
656
 
623
657
 
624
658
  def _start_monitor_server(