pyfemtet 0.7.0__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of pyfemtet might be problematic. Click here for more details.
- pyfemtet/__init__.py +1 -1
- pyfemtet/_message/locales/ja/LC_MESSAGES/messages.mo +0 -0
- pyfemtet/_message/locales/ja/LC_MESSAGES/messages.po +112 -90
- pyfemtet/_message/locales/messages.pot +105 -89
- pyfemtet/_message/messages.py +6 -2
- pyfemtet/_util/dask_util.py +10 -0
- pyfemtet/_util/excel_macro_util.py +16 -4
- pyfemtet/_util/excel_parse_util.py +138 -0
- pyfemtet/_util/sample.xlsx +0 -0
- pyfemtet/brep/__init__.py +0 -3
- pyfemtet/brep/_impl.py +7 -3
- pyfemtet/opt/_femopt.py +69 -31
- pyfemtet/opt/_femopt_core.py +100 -36
- pyfemtet/opt/advanced_samples/excel_ui/(ref) original_project.femprj +0 -0
- pyfemtet/opt/advanced_samples/excel_ui/femtet-macro.xlsm +0 -0
- pyfemtet/opt/advanced_samples/excel_ui/pyfemtet-core.py +291 -0
- pyfemtet/opt/advanced_samples/excel_ui/test-pyfemtet-core.cmd +22 -0
- pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data.py +60 -0
- pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data_jp.py +57 -0
- pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate.py +100 -0
- pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate_jp.py +90 -0
- pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_parametric.femprj +0 -0
- pyfemtet/opt/interface/__init__.py +2 -0
- pyfemtet/opt/interface/_base.py +3 -0
- pyfemtet/opt/interface/_excel_interface.py +565 -204
- pyfemtet/opt/interface/_femtet.py +26 -29
- pyfemtet/opt/interface/_surrogate/__init__.py +5 -0
- pyfemtet/opt/interface/_surrogate/_base.py +85 -0
- pyfemtet/opt/interface/_surrogate/_chaospy.py +71 -0
- pyfemtet/opt/interface/_surrogate/_singletaskgp.py +70 -0
- pyfemtet/opt/optimizer/_base.py +30 -19
- pyfemtet/opt/optimizer/_optuna/_optuna.py +20 -8
- pyfemtet/opt/optimizer/_optuna/_pof_botorch.py +60 -18
- pyfemtet/opt/prediction/_base.py +8 -0
- pyfemtet/opt/prediction/single_task_gp.py +85 -62
- pyfemtet/opt/visualization/_complex_components/main_figure_creator.py +5 -5
- pyfemtet/opt/visualization/_complex_components/main_graph.py +7 -1
- pyfemtet/opt/visualization/_complex_components/pm_graph.py +1 -1
- pyfemtet/opt/visualization/_process_monitor/application.py +2 -2
- pyfemtet/opt/visualization/_process_monitor/pages.py +1 -1
- pyfemtet/opt/visualization/result_viewer/pages.py +1 -1
- {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/METADATA +3 -2
- {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/RECORD +46 -29
- {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/WHEEL +1 -1
- {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/LICENSE +0 -0
- {pyfemtet-0.7.0.dist-info → pyfemtet-0.8.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import numpy as np
|
|
3
|
+
import pandas as pd
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
__all__ = [
|
|
7
|
+
'ParseAsParameter',
|
|
8
|
+
'ParseAsConstraint',
|
|
9
|
+
'ParseAsObjective',
|
|
10
|
+
'search_index',
|
|
11
|
+
'search_r',
|
|
12
|
+
'search_c',
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def parse_excel(book_path, sheet_name, keyword, required, optional) -> pd.DataFrame:
|
|
17
|
+
"""Excel シートからパラメータを取得します。
|
|
18
|
+
|
|
19
|
+
シートのパースプロセスは以下の通りです。
|
|
20
|
+
|
|
21
|
+
1. シート全体 (A1 セルから、値が入力されている最終セルまで) をデータに取り込みます。
|
|
22
|
+
2. すべてのセルが空白である列をデータから除きます。
|
|
23
|
+
3. すべてのセルが空白である行をデータから除きます。
|
|
24
|
+
4. 最も左上(上が優先)にある keyword に一致するセルより上および左の行・列をデータから除きます。
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
book_path: Excel book のパス。
|
|
28
|
+
sheet_name: シート名。
|
|
29
|
+
keyword (str): 必ず含まれるべき、表データの最初の列名として使う文字列。
|
|
30
|
+
required (list[str]): 必ず含まれるべき、表データの列名として使う文字列のリスト。
|
|
31
|
+
optional (list[str]): 表データの列名として使ってよい文字列のリスト。
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
# 存在チェック
|
|
38
|
+
if not os.path.exists(book_path):
|
|
39
|
+
raise FileNotFoundError(book_path)
|
|
40
|
+
|
|
41
|
+
# 読み込み
|
|
42
|
+
df = pd.read_excel(book_path, sheet_name, header=None)
|
|
43
|
+
|
|
44
|
+
# NaN のみからなる列を削除する
|
|
45
|
+
valid_columns = [col for col in df.columns if df[col].notna().sum()]
|
|
46
|
+
df = df[valid_columns]
|
|
47
|
+
|
|
48
|
+
# NaN のみからなる行を削除する
|
|
49
|
+
valid_rows = [row for row in df.index if df.loc[row].notna().sum()]
|
|
50
|
+
df = df.loc[valid_rows]
|
|
51
|
+
|
|
52
|
+
# 「変数名」を左上とする表にする
|
|
53
|
+
df: pd.DataFrame
|
|
54
|
+
idx = np.where(df.values == keyword)
|
|
55
|
+
r = idx[0][0]
|
|
56
|
+
c = idx[1][0]
|
|
57
|
+
df = pd.DataFrame(df.iloc[1+r:, c:].values, columns=df.iloc[r, c:].values)
|
|
58
|
+
|
|
59
|
+
# パースが成功しているかチェックする
|
|
60
|
+
lack = True
|
|
61
|
+
for col in df.columns:
|
|
62
|
+
lack *= col == keyword
|
|
63
|
+
lack *= col in required
|
|
64
|
+
if lack:
|
|
65
|
+
raise RuntimeError(f'Some required keywords are lacked. '
|
|
66
|
+
f'Required keywords are {keyword} (required and must be first) and '
|
|
67
|
+
f'{required} (required), '
|
|
68
|
+
f'and {optional} is optional.')
|
|
69
|
+
|
|
70
|
+
return df
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class ParseBase:
|
|
74
|
+
KEYWORD = ''
|
|
75
|
+
REQUIRED_COLUMNS = []
|
|
76
|
+
OPTIONAL_COLUMNS = []
|
|
77
|
+
|
|
78
|
+
@classmethod
|
|
79
|
+
def parse(cls, book_path, sheet_name) -> pd.DataFrame:
|
|
80
|
+
return parse_excel(book_path, sheet_name, cls.KEYWORD, cls.REQUIRED_COLUMNS, cls.OPTIONAL_COLUMNS)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class ParseAsParameter(ParseBase):
|
|
84
|
+
name = '変数名'
|
|
85
|
+
value = '値'
|
|
86
|
+
lb = '下限'
|
|
87
|
+
ub = '上限'
|
|
88
|
+
step = 'ステップ'
|
|
89
|
+
use = '使用'
|
|
90
|
+
KEYWORD = name
|
|
91
|
+
REQUIRED_COLUMNS = [value, lb, ub]
|
|
92
|
+
OPTIONAL_COLUMNS = [step, use]
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class ParseAsObjective(ParseBase):
|
|
96
|
+
name = '目的名'
|
|
97
|
+
value = '値'
|
|
98
|
+
direction = '目標'
|
|
99
|
+
use = '使用'
|
|
100
|
+
KEYWORD = name
|
|
101
|
+
REQUIRED_COLUMNS = [value, direction]
|
|
102
|
+
OPTIONAL_COLUMNS = [use]
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class ParseAsConstraint(ParseBase):
|
|
106
|
+
name = '拘束名'
|
|
107
|
+
value = '値'
|
|
108
|
+
lb = '下限'
|
|
109
|
+
ub = '上限'
|
|
110
|
+
strict = '厳守'
|
|
111
|
+
use = '使用'
|
|
112
|
+
calc_before_solve = 'ソルブ前に計算'
|
|
113
|
+
KEYWORD = name
|
|
114
|
+
REQUIRED_COLUMNS = [value]
|
|
115
|
+
OPTIONAL_COLUMNS = [lb, ub, strict, use, calc_before_solve]
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def search_index(book_path, sheet_name, value):
|
|
119
|
+
df = pd.read_excel(book_path, sheet_name, header=None)
|
|
120
|
+
idx = np.where(df.values == value)
|
|
121
|
+
r = idx[0][0]
|
|
122
|
+
c = idx[1][0]
|
|
123
|
+
return r, c
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
def search_r(book_path, sheet_name, value):
|
|
127
|
+
return search_index(book_path, sheet_name, value)[0]
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def search_c(book_path, sheet_name, value):
|
|
131
|
+
return search_index(book_path, sheet_name, value)[1]
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
if __name__ == '__main__':
|
|
135
|
+
os.chdir(os.path.dirname(__file__))
|
|
136
|
+
print(ParseAsParameter.parse('sample.xlsx', 'Sheet1'))
|
|
137
|
+
print(search_r('sample.xlsx', 'Sheet1', 'X2'))
|
|
138
|
+
print(search_c('sample.xlsx', 'Sheet1', '値'))
|
|
Binary file
|
pyfemtet/brep/__init__.py
CHANGED
pyfemtet/brep/_impl.py
CHANGED
|
@@ -1,7 +1,10 @@
|
|
|
1
|
+
import warnings
|
|
2
|
+
|
|
3
|
+
|
|
1
4
|
try:
|
|
2
5
|
import brepmatching
|
|
6
|
+
|
|
3
7
|
except ModuleNotFoundError as e:
|
|
4
|
-
import warnings
|
|
5
8
|
warnings.warn(
|
|
6
9
|
'There is no installation of `brepmatching`. '
|
|
7
10
|
'Please confirm installation via '
|
|
@@ -10,5 +13,6 @@ except ModuleNotFoundError as e:
|
|
|
10
13
|
)
|
|
11
14
|
raise e
|
|
12
15
|
|
|
13
|
-
|
|
14
|
-
|
|
16
|
+
except FileNotFoundError as e:
|
|
17
|
+
warnings.warn(str(e))
|
|
18
|
+
raise e
|
pyfemtet/opt/_femopt.py
CHANGED
|
@@ -12,7 +12,7 @@ from traceback import print_exception
|
|
|
12
12
|
# 3rd-party
|
|
13
13
|
import numpy as np
|
|
14
14
|
import pandas as pd
|
|
15
|
-
from dask.distributed import LocalCluster, Client
|
|
15
|
+
from dask.distributed import LocalCluster, Client, get_worker, Nanny
|
|
16
16
|
|
|
17
17
|
# pyfemtet relative
|
|
18
18
|
from pyfemtet.opt.interface import FEMInterface, FemtetInterface
|
|
@@ -136,6 +136,7 @@ class FEMOpt:
|
|
|
136
136
|
self.monitor_server_kwargs = dict()
|
|
137
137
|
self.monitor_process_worker_name = None
|
|
138
138
|
self._hv_reference = None
|
|
139
|
+
self._extra_space_dir = None
|
|
139
140
|
|
|
140
141
|
# multiprocess 時に pickle できないオブジェクト参照の削除
|
|
141
142
|
def __getstate__(self):
|
|
@@ -164,6 +165,7 @@ class FEMOpt:
|
|
|
164
165
|
step: float = None,
|
|
165
166
|
properties: dict[str, str or float] = None,
|
|
166
167
|
pass_to_fem: bool = True,
|
|
168
|
+
fix: bool = False,
|
|
167
169
|
):
|
|
168
170
|
# noinspection PyUnresolvedReferences
|
|
169
171
|
"""Adds a parameter to the optimization problem.
|
|
@@ -176,6 +178,15 @@ class FEMOpt:
|
|
|
176
178
|
step (float, optional): The step of parameter. If specified, parameter is used as discrete. Defaults to None.
|
|
177
179
|
properties (dict[str, str or float], optional): Additional information about the parameter. Defaults to None.
|
|
178
180
|
pass_to_fem (bool, optional): If this variable is used directly in FEM model update or not. If False, this parameter can be just used as inpt of expressions. Defaults to True.
|
|
181
|
+
fix (bool, optiona):
|
|
182
|
+
パラメータを initial_value で固定します。
|
|
183
|
+
開発時にパラメータを振るか振らないかを
|
|
184
|
+
簡単に変更するための便利引数です。
|
|
185
|
+
True のとき、lower_bound, upper_bound, step, properties の
|
|
186
|
+
値は、有効かどうかのチェックには使われますが、最適化では
|
|
187
|
+
使われなくなります。
|
|
188
|
+
デフォルトは False です。
|
|
189
|
+
|
|
179
190
|
|
|
180
191
|
Raises:
|
|
181
192
|
ValueError: If initial_value is not specified and the value for the given name is also not specified in FEM.
|
|
@@ -208,16 +219,27 @@ class FEMOpt:
|
|
|
208
219
|
if initial_value is None:
|
|
209
220
|
raise ValueError('initial_value を指定してください.')
|
|
210
221
|
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
222
|
+
if not fix:
|
|
223
|
+
prm = Parameter(
|
|
224
|
+
name=name,
|
|
225
|
+
value=float(initial_value),
|
|
226
|
+
lower_bound=float(lower_bound) if lower_bound is not None else None,
|
|
227
|
+
upper_bound=float(upper_bound) if upper_bound is not None else None,
|
|
228
|
+
step=float(step) if step is not None else None,
|
|
229
|
+
pass_to_fem=pass_to_fem,
|
|
230
|
+
properties=properties,
|
|
231
|
+
)
|
|
232
|
+
self.opt.variables.add_parameter(prm)
|
|
233
|
+
|
|
234
|
+
else:
|
|
235
|
+
warnings.filterwarnings('ignore', category=UserWarning, message="The function 'add_expression' is experimental")
|
|
236
|
+
self.add_expression(
|
|
237
|
+
name=name,
|
|
238
|
+
fun=lambda: initial_value,
|
|
239
|
+
pass_to_fem=pass_to_fem,
|
|
240
|
+
properties=properties,
|
|
241
|
+
)
|
|
242
|
+
|
|
221
243
|
|
|
222
244
|
@experimental_feature
|
|
223
245
|
def add_expression(
|
|
@@ -298,7 +320,7 @@ class FEMOpt:
|
|
|
298
320
|
|
|
299
321
|
def add_objective(
|
|
300
322
|
self,
|
|
301
|
-
fun,
|
|
323
|
+
fun: callable or None = None,
|
|
302
324
|
name: str or None = None,
|
|
303
325
|
direction: str or float = 'minimize',
|
|
304
326
|
args: tuple or None = None,
|
|
@@ -308,7 +330,7 @@ class FEMOpt:
|
|
|
308
330
|
"""Adds an objective to the optimization problem.
|
|
309
331
|
|
|
310
332
|
Args:
|
|
311
|
-
fun (callable): The objective function.
|
|
333
|
+
fun (callable or None, optional): The objective function. This argument is optional but
|
|
312
334
|
name (str or None, optional): The name of the objective. Defaults to None.
|
|
313
335
|
direction (str or float, optional): The optimization direction. Varid values are 'maximize', 'minimize' or a float value. Defaults to 'minimize'.
|
|
314
336
|
args (tuple or None, optional): Additional arguments for the objective function. Defaults to None.
|
|
@@ -343,6 +365,10 @@ class FEMOpt:
|
|
|
343
365
|
"""
|
|
344
366
|
|
|
345
367
|
# 引数の処理
|
|
368
|
+
if fun is None:
|
|
369
|
+
from pyfemtet.opt.interface._surrogate._base import SurrogateModelInterfaceBase
|
|
370
|
+
if not isinstance(self.fem, SurrogateModelInterfaceBase):
|
|
371
|
+
raise ValueError('`fun` argument is not specified.')
|
|
346
372
|
if args is None:
|
|
347
373
|
args = tuple()
|
|
348
374
|
elif not isinstance(args, tuple):
|
|
@@ -402,7 +428,6 @@ class FEMOpt:
|
|
|
402
428
|
kwargs=kwargs,
|
|
403
429
|
)
|
|
404
430
|
|
|
405
|
-
|
|
406
431
|
def add_constraint(
|
|
407
432
|
self,
|
|
408
433
|
fun,
|
|
@@ -611,6 +636,7 @@ class FEMOpt:
|
|
|
611
636
|
if self.fem._load_problem_from_me:
|
|
612
637
|
self.fem.load_parameter(self.opt)
|
|
613
638
|
self.fem.load_objective(self.opt)
|
|
639
|
+
self.fem.load_constraint(self.opt)
|
|
614
640
|
|
|
615
641
|
# resolve expression dependencies
|
|
616
642
|
self.opt.variables.resolve()
|
|
@@ -675,10 +701,6 @@ class FEMOpt:
|
|
|
675
701
|
directions,
|
|
676
702
|
)
|
|
677
703
|
|
|
678
|
-
# Femtet の confirm_before_exit のセット
|
|
679
|
-
self.fem.confirm_before_exit = confirm_before_exit
|
|
680
|
-
self.fem.kwargs['confirm_before_exit'] = confirm_before_exit
|
|
681
|
-
|
|
682
704
|
logger.info('Femtet loaded successfully.')
|
|
683
705
|
|
|
684
706
|
# クラスターの設定
|
|
@@ -718,30 +740,43 @@ class FEMOpt:
|
|
|
718
740
|
# これは CLI の --no-nanny オプションも同様らしい。
|
|
719
741
|
|
|
720
742
|
# クラスターの構築
|
|
743
|
+
# noinspection PyTypeChecker
|
|
721
744
|
cluster = LocalCluster(
|
|
722
745
|
processes=True,
|
|
723
746
|
n_workers=n_parallel,
|
|
724
747
|
threads_per_worker=1,
|
|
748
|
+
worker_class=Nanny,
|
|
725
749
|
)
|
|
726
750
|
logger.info('LocalCluster launched successfully.')
|
|
727
751
|
|
|
728
|
-
self.client = Client(
|
|
729
|
-
|
|
752
|
+
self.client = Client(
|
|
753
|
+
cluster,
|
|
754
|
+
direct_to_workers=False,
|
|
755
|
+
)
|
|
730
756
|
logger.info('Client launched successfully.')
|
|
731
757
|
|
|
732
|
-
|
|
733
|
-
subprocess_indices = list(range(n_parallel))[1:]
|
|
734
|
-
worker_addresses = list(self.client.nthreads().keys())
|
|
758
|
+
self.scheduler_address = self.client.scheduler.address
|
|
735
759
|
|
|
736
|
-
#
|
|
737
|
-
|
|
738
|
-
|
|
760
|
+
# worker address を取得
|
|
761
|
+
nannies_dict: dict[Any, Nanny] = self.client.cluster.workers
|
|
762
|
+
nannies = tuple(nannies_dict.values())
|
|
763
|
+
|
|
764
|
+
# ひとつの Nanny を選んで monitor 用にしつつ
|
|
765
|
+
# その space は main process に使わせるために記憶する
|
|
766
|
+
self.monitor_process_worker_name = nannies[0].worker_address
|
|
767
|
+
self._extra_space_dir = nannies[0].worker_dir
|
|
768
|
+
|
|
769
|
+
# 名前と address がごちゃごちゃになっていて可読性が悪いが
|
|
770
|
+
# 選んだ以外の Nanny は計算を割り当てる用にする
|
|
771
|
+
worker_addresses = ['Main']
|
|
772
|
+
worker_addresses.extend([n.worker_address for n in nannies[1:]])
|
|
773
|
+
subprocess_indices = list(range(n_parallel))[1:]
|
|
739
774
|
|
|
740
775
|
with self.client.cluster as _cluster, self.client as _client:
|
|
741
776
|
|
|
742
777
|
# actor の設定
|
|
743
|
-
self.status = OptimizationStatus(_client)
|
|
744
|
-
self.worker_status_list = [OptimizationStatus(_client, name) for name in worker_addresses] # tqdm 検討
|
|
778
|
+
self.status = OptimizationStatus(_client, worker_address=self.monitor_process_worker_name)
|
|
779
|
+
self.worker_status_list = [OptimizationStatus(_client, worker_address=self.monitor_process_worker_name, name=name) for name in worker_addresses] # tqdm 検討
|
|
745
780
|
self.status.set(OptimizationStatus.SETTING_UP)
|
|
746
781
|
self.history = History(
|
|
747
782
|
self.history_path,
|
|
@@ -773,7 +808,6 @@ class FEMOpt:
|
|
|
773
808
|
logger.info('Process monitor initialized successfully.')
|
|
774
809
|
|
|
775
810
|
# fem
|
|
776
|
-
# TODO: n_parallel=1 のときもアップロードしている。これを使うべきか、アップロードしないべき。
|
|
777
811
|
self.fem._setup_before_parallel(_client)
|
|
778
812
|
|
|
779
813
|
# opt
|
|
@@ -794,7 +828,7 @@ class FEMOpt:
|
|
|
794
828
|
subprocess_indices,
|
|
795
829
|
[self.worker_status_list] * len(subprocess_indices),
|
|
796
830
|
[wait_setup] * len(subprocess_indices),
|
|
797
|
-
workers=worker_addresses,
|
|
831
|
+
workers=worker_addresses if self.opt.is_cluster else worker_addresses[1:],
|
|
798
832
|
allow_other_workers=False,
|
|
799
833
|
)
|
|
800
834
|
|
|
@@ -818,6 +852,7 @@ class FEMOpt:
|
|
|
818
852
|
),
|
|
819
853
|
kwargs=dict(
|
|
820
854
|
skip_reconstruct=True,
|
|
855
|
+
space_dir=self._extra_space_dir,
|
|
821
856
|
)
|
|
822
857
|
)
|
|
823
858
|
t_main.start()
|
|
@@ -882,7 +917,10 @@ class FEMOpt:
|
|
|
882
917
|
print('='*len(Msg.CONFIRM_BEFORE_EXIT))
|
|
883
918
|
input()
|
|
884
919
|
|
|
885
|
-
|
|
920
|
+
df = self.history.get_df() # with 文を抜けると actor は消えるが .copy() はこの段階では不要
|
|
921
|
+
|
|
922
|
+
return df
|
|
923
|
+
|
|
886
924
|
|
|
887
925
|
@staticmethod
|
|
888
926
|
def terminate_all():
|
pyfemtet/opt/_femopt_core.py
CHANGED
|
@@ -262,6 +262,8 @@ def is_feasible(value, lb, ub):
|
|
|
262
262
|
Returns:
|
|
263
263
|
bool: True if the value satisfies the bounds; False otherwise.
|
|
264
264
|
"""
|
|
265
|
+
if np.isnan(value):
|
|
266
|
+
return False
|
|
265
267
|
if lb is None and ub is not None:
|
|
266
268
|
return value <= ub
|
|
267
269
|
elif lb is not None and ub is None:
|
|
@@ -290,10 +292,11 @@ class Function:
|
|
|
290
292
|
# COM 定数を一度 _Scapegoat 型のオブジェクトにする
|
|
291
293
|
# ParametricIF で使う dll 関数は _FuncPtr 型であって __globals__ を持たないが、
|
|
292
294
|
# これは絶対に constants を持たないので単に無視すればよい。
|
|
293
|
-
if not
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
fun.__globals__[varname]
|
|
295
|
+
if fun is not None:
|
|
296
|
+
if not isinstance(fun, ctypes._CFuncPtr):
|
|
297
|
+
for varname in fun.__globals__:
|
|
298
|
+
if isinstance(fun.__globals__[varname], Constants):
|
|
299
|
+
fun.__globals__[varname] = _Scapegoat()
|
|
297
300
|
|
|
298
301
|
self.fun = fun
|
|
299
302
|
self.name = name
|
|
@@ -309,6 +312,9 @@ class Function:
|
|
|
309
312
|
Returns:
|
|
310
313
|
float
|
|
311
314
|
"""
|
|
315
|
+
if self.fun is None:
|
|
316
|
+
RuntimeError(f'`fun` of {self.name} is not specified.')
|
|
317
|
+
|
|
312
318
|
args = self.args
|
|
313
319
|
# Femtet 特有の処理
|
|
314
320
|
if isinstance(fem, FemtetInterface):
|
|
@@ -318,11 +324,12 @@ class Function:
|
|
|
318
324
|
def _restore_constants(self):
|
|
319
325
|
"""Helper function for parallelize Femtet."""
|
|
320
326
|
fun = self.fun
|
|
321
|
-
if not
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
if
|
|
325
|
-
fun.__globals__[varname]
|
|
327
|
+
if fun is not None:
|
|
328
|
+
if not isinstance(fun, ctypes._CFuncPtr):
|
|
329
|
+
for varname in fun.__globals__:
|
|
330
|
+
if isinstance(fun.__globals__[varname], _Scapegoat):
|
|
331
|
+
if not fun.__globals__[varname]._ignore_when_restore_constants:
|
|
332
|
+
fun.__globals__[varname] = constants
|
|
326
333
|
|
|
327
334
|
|
|
328
335
|
class Objective(Function):
|
|
@@ -621,16 +628,30 @@ class History:
|
|
|
621
628
|
|
|
622
629
|
self.set_df(df)
|
|
623
630
|
|
|
624
|
-
def
|
|
631
|
+
def filter_valid(self, df_, keep_trial_num=False):
|
|
632
|
+
buff = df_[self.obj_names].notna()
|
|
633
|
+
idx = buff.prod(axis=1).astype(bool)
|
|
634
|
+
filtered_df = df_[idx]
|
|
635
|
+
if not keep_trial_num:
|
|
636
|
+
filtered_df.loc[:, 'trial'] = np.arange(len(filtered_df)) + 1
|
|
637
|
+
return filtered_df
|
|
638
|
+
|
|
639
|
+
def get_df(self, valid_only=False) -> pd.DataFrame:
|
|
625
640
|
if self.__scheduler_address is None:
|
|
626
|
-
|
|
641
|
+
if valid_only:
|
|
642
|
+
return self.filter_valid(self._df)
|
|
643
|
+
else:
|
|
644
|
+
return self._df
|
|
627
645
|
else:
|
|
628
646
|
# scheduler がまだ存命か確認する
|
|
629
647
|
try:
|
|
630
648
|
with Lock('access-df'):
|
|
631
649
|
client_: 'Client' = get_client(self.__scheduler_address)
|
|
632
650
|
if 'df' in client_.list_datasets():
|
|
633
|
-
|
|
651
|
+
if valid_only:
|
|
652
|
+
return self.filter_valid(client_.get_dataset('df'))
|
|
653
|
+
else:
|
|
654
|
+
return client_.get_dataset('df')
|
|
634
655
|
else:
|
|
635
656
|
logger.debug('Access df of History before it is initialized.')
|
|
636
657
|
return pd.DataFrame()
|
|
@@ -806,17 +827,19 @@ class History:
|
|
|
806
827
|
df['non_domi'] = False
|
|
807
828
|
|
|
808
829
|
# feasible のものに non_domi の評価結果を代入する
|
|
809
|
-
|
|
830
|
+
if len(non_domi) > 0:
|
|
831
|
+
df.loc[idx, 'non_domi'] = non_domi
|
|
810
832
|
|
|
811
833
|
def _calc_hypervolume(self, objectives, df):
|
|
812
834
|
|
|
835
|
+
# 単目的最適化ならば 0 埋めして終了
|
|
813
836
|
if len(objectives) < 2:
|
|
814
837
|
df.loc[len(df) - 1, 'hypervolume'] = 0.
|
|
815
838
|
return
|
|
816
839
|
|
|
817
840
|
# 最小化問題に変換された objective values を取得
|
|
818
841
|
raw_objective_values = df[self.obj_names].values
|
|
819
|
-
objective_values = np.
|
|
842
|
+
objective_values = np.full_like(raw_objective_values, np.nan)
|
|
820
843
|
for n_trial in range(len(raw_objective_values)):
|
|
821
844
|
for obj_idx, (_, objective) in enumerate(objectives.items()):
|
|
822
845
|
objective_values[n_trial, obj_idx] = objective.convert(raw_objective_values[n_trial, obj_idx])
|
|
@@ -830,13 +853,20 @@ class History:
|
|
|
830
853
|
pareto_set_ = np.empty((0, len(self.obj_names)))
|
|
831
854
|
for i in range(len(objective_values_)):
|
|
832
855
|
target = objective_values_[i]
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
856
|
+
|
|
857
|
+
if any(np.isnan(target)):
|
|
858
|
+
# infeasible な場合 pareto_set の計算に含めない
|
|
859
|
+
dominated = True
|
|
860
|
+
|
|
861
|
+
else:
|
|
862
|
+
dominated = False
|
|
863
|
+
# TODO: Array の計算に直して高速化する
|
|
864
|
+
for j in range(len(objective_values_)):
|
|
865
|
+
compare = objective_values_[j]
|
|
866
|
+
if all(target > compare):
|
|
867
|
+
dominated = True
|
|
868
|
+
break
|
|
869
|
+
|
|
840
870
|
if not dominated:
|
|
841
871
|
pareto_set_ = np.concatenate([pareto_set_, [target]], axis=0)
|
|
842
872
|
|
|
@@ -848,34 +878,63 @@ class History:
|
|
|
848
878
|
else:
|
|
849
879
|
return pareto_set_
|
|
850
880
|
|
|
881
|
+
def get_valid_worst_converted_objective_values(objective_values_: np.ndarray) -> np.ndarray:
|
|
882
|
+
# objective_values.max(axis=0)
|
|
883
|
+
ret = []
|
|
884
|
+
for row in objective_values_:
|
|
885
|
+
if not any(np.isnan(row)):
|
|
886
|
+
ret.append(row)
|
|
887
|
+
return np.array(ret).max(axis=0)
|
|
888
|
+
|
|
851
889
|
if self._hv_reference == 'dynamic-pareto':
|
|
852
890
|
pareto_set, pareto_set_list = get_pareto(objective_values, with_partial=True)
|
|
853
891
|
for i, partial_pareto_set in enumerate(pareto_set_list):
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
892
|
+
# 並列計算時など Valid な解がまだ一つもない場合は pareto_set が長さ 0 になる
|
|
893
|
+
# その場合 max() を取るとエラーになる
|
|
894
|
+
if len(pareto_set) == 0:
|
|
895
|
+
df.loc[i, 'hypervolume'] = 0
|
|
896
|
+
else:
|
|
897
|
+
ref_point = pareto_set.max(axis=0) + 1e-8
|
|
898
|
+
hv = compute_hypervolume(partial_pareto_set, ref_point)
|
|
899
|
+
df.loc[i, 'hypervolume'] = hv
|
|
857
900
|
return
|
|
858
901
|
|
|
859
902
|
elif self._hv_reference == 'dynamic-nadir':
|
|
860
903
|
_, pareto_set_list = get_pareto(objective_values, with_partial=True)
|
|
861
904
|
for i, partial_pareto_set in enumerate(pareto_set_list):
|
|
862
|
-
|
|
863
|
-
|
|
864
|
-
|
|
905
|
+
# filter valid objective values only
|
|
906
|
+
values = get_valid_worst_converted_objective_values(objective_values)
|
|
907
|
+
|
|
908
|
+
# 並列計算時など Valid な解がまだ一つもない場合は長さ 0 になる
|
|
909
|
+
# その場合 max() を取るとエラーになる
|
|
910
|
+
if len(values) == 0:
|
|
911
|
+
df.loc[i, 'hypervolume'] = 0
|
|
912
|
+
|
|
913
|
+
else:
|
|
914
|
+
ref_point = values.max(axis=0) + 1e-8
|
|
915
|
+
hv = compute_hypervolume(partial_pareto_set, ref_point)
|
|
916
|
+
df.loc[i, 'hypervolume'] = hv
|
|
865
917
|
return
|
|
866
918
|
|
|
867
919
|
elif self._hv_reference == 'nadir':
|
|
868
920
|
pareto_set = get_pareto(objective_values)
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
921
|
+
values = get_valid_worst_converted_objective_values(objective_values)
|
|
922
|
+
if len(values) == 0:
|
|
923
|
+
df.loc[len(df) - 1, 'hypervolume'] = 0
|
|
924
|
+
else:
|
|
925
|
+
ref_point = values.max(axis=0) + 1e-8
|
|
926
|
+
hv = compute_hypervolume(pareto_set, ref_point)
|
|
927
|
+
df.loc[len(df) - 1, 'hypervolume'] = hv
|
|
872
928
|
return
|
|
873
929
|
|
|
874
930
|
elif self._hv_reference == 'pareto':
|
|
875
931
|
pareto_set = get_pareto(objective_values)
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
932
|
+
if len(pareto_set) == 0:
|
|
933
|
+
df.loc[len(df) - 1, 'hypervolume'] = 0
|
|
934
|
+
else:
|
|
935
|
+
ref_point = pareto_set.max(axis=0) + 1e-8
|
|
936
|
+
hv = compute_hypervolume(pareto_set, ref_point)
|
|
937
|
+
df.loc[len(df) - 1, 'hypervolume'] = hv
|
|
879
938
|
return
|
|
880
939
|
|
|
881
940
|
elif (
|
|
@@ -935,7 +994,7 @@ class History:
|
|
|
935
994
|
study = optuna.create_study(**kwargs)
|
|
936
995
|
|
|
937
996
|
# add trial to study
|
|
938
|
-
df: pd.DataFrame = self.get_df()
|
|
997
|
+
df: pd.DataFrame = self.get_df(valid_only=True)
|
|
939
998
|
for i, row in df.iterrows():
|
|
940
999
|
FD = optuna.distributions.FloatDistribution
|
|
941
1000
|
kwargs = dict(
|
|
@@ -980,8 +1039,13 @@ class OptimizationStatus:
|
|
|
980
1039
|
TERMINATE_ALL = 60
|
|
981
1040
|
CRASHED = 70
|
|
982
1041
|
|
|
983
|
-
def __init__(self, client, name='entire'):
|
|
984
|
-
self._future = client.submit(
|
|
1042
|
+
def __init__(self, client, worker_address, name='entire'):
|
|
1043
|
+
self._future = client.submit(
|
|
1044
|
+
_OptimizationStatusActor,
|
|
1045
|
+
actor=True,
|
|
1046
|
+
workers=[worker_address],
|
|
1047
|
+
allow_other_workers=False,
|
|
1048
|
+
)
|
|
985
1049
|
self._actor = self._future.result()
|
|
986
1050
|
self.name = name
|
|
987
1051
|
self.set(self.INITIALIZING)
|
|
Binary file
|
|
Binary file
|