pyfemtet 1.0.4__py3-none-any.whl → 1.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyfemtet might be problematic. Click here for more details.

Files changed (25) hide show
  1. pyfemtet/_i18n/locales/ja/LC_MESSAGES/messages.mo +0 -0
  2. pyfemtet/_i18n/locales/ja/LC_MESSAGES/messages.po +2 -2
  3. pyfemtet/opt/femopt.py +9 -0
  4. pyfemtet/opt/history/_history.py +108 -11
  5. pyfemtet/opt/optimizer/_base_optimizer.py +50 -8
  6. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/enable_nonlinear_constraint.py +4 -2
  7. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/pof_botorch_sampler.py +63 -11
  8. pyfemtet/opt/prediction/_botorch_utils.py +59 -2
  9. pyfemtet/opt/prediction/_model.py +2 -2
  10. pyfemtet/opt/problem/problem.py +9 -0
  11. pyfemtet/opt/problem/variable_manager/_variable_manager.py +1 -1
  12. pyfemtet/opt/visualization/history_viewer/_complex_components/detail_graphs.py +551 -0
  13. pyfemtet/opt/visualization/history_viewer/_detail_page.py +106 -0
  14. pyfemtet/opt/visualization/history_viewer/_process_monitor/_application.py +3 -2
  15. pyfemtet/opt/visualization/history_viewer/result_viewer/_application.py +3 -2
  16. pyfemtet/opt/visualization/history_viewer/result_viewer/_pages.py +1 -0
  17. pyfemtet/opt/visualization/plotter/contour_creator.py +105 -0
  18. pyfemtet/opt/visualization/plotter/parallel_plot_creator.py +33 -0
  19. pyfemtet/opt/visualization/plotter/pm_graph_creator.py +7 -7
  20. {pyfemtet-1.0.4.dist-info → pyfemtet-1.0.6.dist-info}/METADATA +1 -1
  21. {pyfemtet-1.0.4.dist-info → pyfemtet-1.0.6.dist-info}/RECORD +25 -21
  22. {pyfemtet-1.0.4.dist-info → pyfemtet-1.0.6.dist-info}/LICENSE +0 -0
  23. {pyfemtet-1.0.4.dist-info → pyfemtet-1.0.6.dist-info}/LICENSE_THIRD_PARTY.txt +0 -0
  24. {pyfemtet-1.0.4.dist-info → pyfemtet-1.0.6.dist-info}/WHEEL +0 -0
  25. {pyfemtet-1.0.4.dist-info → pyfemtet-1.0.6.dist-info}/entry_points.txt +0 -0
@@ -976,11 +976,11 @@ msgstr "解析モデル互換性チェックで不明なエラーが発生しま
976
976
 
977
977
  #: pyfemtet/opt/visualization/plotter/pm_graph_creator.py:146
978
978
  msgid "Upper of pred. std-dev"
979
- msgstr "予測標準偏差の上限"
979
+ msgstr "予測の標準偏差(+)"
980
980
 
981
981
  #: pyfemtet/opt/visualization/plotter/pm_graph_creator.py:150
982
982
  msgid "Lower of pred. std-dev"
983
- msgstr "予測標準偏差の小さい方"
983
+ msgstr "予測の標準偏差(-)"
984
984
 
985
985
  #: pyfemtet/opt/visualization/plotter/pm_graph_creator.py:183
986
986
  msgid "Std-dev of pred."
pyfemtet/opt/femopt.py CHANGED
@@ -157,6 +157,15 @@ class FEMOpt:
157
157
  ):
158
158
  self.opt.add_constraint(name, fun, lower_bound, upper_bound, args, kwargs, strict, using_fem)
159
159
 
160
+ def add_other_output(
161
+ self,
162
+ name: str,
163
+ fun: Callable[..., float],
164
+ args: tuple | None = None,
165
+ kwargs: dict | None = None,
166
+ ):
167
+ self.opt.add_other_output(name, fun, args, kwargs)
168
+
160
169
  def add_sub_fidelity_model(
161
170
  self,
162
171
  name: str,
@@ -261,6 +261,14 @@ class DataFrameWrapper:
261
261
  class CorrespondingColumnNameRuler:
262
262
  """:meta private:"""
263
263
 
264
+ @staticmethod
265
+ def cns_lower_bound_name(cns_name):
266
+ return cns_name + '_lower_bound'
267
+
268
+ @staticmethod
269
+ def cns_upper_bound_name(cns_name):
270
+ return cns_name + '_upper_bound'
271
+
264
272
  @staticmethod
265
273
  def direction_name(obj_name):
266
274
  return obj_name + '_direction'
@@ -318,6 +326,7 @@ class ColumnManager:
318
326
  parameters: TrialInput
319
327
  y_names: list[str]
320
328
  c_names: list[str]
329
+ other_output_names: list[str]
321
330
  column_dtypes: dict[str, type]
322
331
  meta_columns: list[str]
323
332
 
@@ -332,12 +341,14 @@ class ColumnManager:
332
341
  parameters: TrialInput,
333
342
  y_names,
334
343
  c_names,
344
+ other_output_names,
335
345
  additional_data: dict,
336
346
  column_order_mode: str = ColumnOrderMode.per_category,
337
347
  ):
338
348
  self.parameters = parameters
339
349
  self.y_names = y_names
340
350
  self.c_names = c_names
351
+ self.other_output_names=other_output_names
341
352
  self.set_full_sorted_column_information(
342
353
  additional_data=additional_data,
343
354
  column_order_mode=column_order_mode,
@@ -348,12 +359,14 @@ class ColumnManager:
348
359
  extra_parameters: TrialInput = None,
349
360
  extra_y_names: list[str] = None,
350
361
  extra_c_names: list[str] = None,
362
+ extra_other_output_names: list[str] = None,
351
363
  additional_data: dict = None,
352
364
  column_order_mode: str = ColumnOrderMode.per_category,
353
365
  ):
354
366
  extra_parameters = extra_parameters or TrialInput()
355
367
  extra_y_names = extra_y_names or []
356
368
  extra_c_names = extra_c_names or []
369
+ extra_other_output_names = extra_other_output_names or []
357
370
 
358
371
  # column name になるので重複は許されない
359
372
  column_dtypes: dict = NoDuplicateDict()
@@ -453,7 +466,7 @@ class ColumnManager:
453
466
 
454
467
  # later
455
468
  target_cds.update({f(name): object}) # str | float
456
- target_mcs.append(f('obj'))
469
+ target_mcs.append('obj.direction')
457
470
 
458
471
  for name in extra_y_names:
459
472
  # later
@@ -466,16 +479,46 @@ class ColumnManager:
466
479
 
467
480
  elif key == 'c':
468
481
 
482
+ f_lb = CorrespondingColumnNameRuler.cns_lower_bound_name
483
+ f_ub = CorrespondingColumnNameRuler.cns_upper_bound_name
484
+
469
485
  for name in self.c_names:
470
486
  # important
471
487
  column_dtypes.update({name: float})
472
488
  meta_columns.append('cns')
473
489
 
490
+ # later
491
+ target_cds.update({f_lb(name): float})
492
+ target_mcs.append('cns.lower_bound')
493
+
494
+ # later
495
+ target_cds.update({f_ub(name): float})
496
+ target_mcs.append('cns.upper_bound')
497
+
474
498
  for name in extra_c_names:
475
499
  # later
476
500
  target_cds.update({name: float})
477
501
  target_mcs.append('')
478
502
 
503
+ # later
504
+ target_cds.update({f_lb(name): float})
505
+ target_mcs.append('')
506
+
507
+ # later
508
+ target_cds.update({f_ub(name): float})
509
+ target_mcs.append('')
510
+
511
+ elif key == 'other_outputs':
512
+ for name in self.other_output_names:
513
+ # important
514
+ column_dtypes.update({name: float})
515
+ meta_columns.append('other_output.value')
516
+
517
+ for name in extra_other_output_names:
518
+ # later
519
+ target_cds.update({name: float})
520
+ target_mcs.append('')
521
+
479
522
  # additional_data を入れる
480
523
  elif key == self._get_additional_data_column():
481
524
  # important
@@ -551,6 +594,9 @@ class ColumnManager:
551
594
  def get_cns_names(self) -> list[str]:
552
595
  return self.filter_columns('cns')
553
596
 
597
+ def get_other_output_names(self) -> list[str]:
598
+ return self.filter_columns('other_output')
599
+
554
600
  @staticmethod
555
601
  def _is_numerical_parameter(prm_name, columns):
556
602
  prm_lb_name = CorrespondingColumnNameRuler.prm_lower_bound_name(prm_name)
@@ -645,6 +691,7 @@ class Record:
645
691
  x: TrialInput = dataclasses.field(default_factory=TrialInput)
646
692
  y: TrialOutput = dataclasses.field(default_factory=TrialOutput)
647
693
  c: TrialConstraintOutput = dataclasses.field(default_factory=TrialConstraintOutput)
694
+ other_outputs: TrialFunctionOutput = dataclasses.field(default_factory=TrialFunctionOutput)
648
695
  state: TrialState = TrialState.undefined
649
696
  datetime_start: datetime.datetime = dataclasses.field(default_factory=datetime.datetime.now)
650
697
  datetime_end: datetime.datetime = dataclasses.field(default_factory=datetime.datetime.now)
@@ -662,6 +709,7 @@ class Record:
662
709
  x: TrialInput = d.pop('x')
663
710
  y: TrialOutput = d.pop('y')
664
711
  c: TrialConstraintOutput = d.pop('c')
712
+ other_outputs: TrialFunctionOutput = d.pop('other_outputs')
665
713
 
666
714
  # prm
667
715
  for prm_name, param in x.items():
@@ -682,10 +730,22 @@ class Record:
682
730
  # messages to str
683
731
  messages_str = _RECORD_MESSAGE_DELIMITER.join(d['messages'])
684
732
  d.update({'messages': messages_str})
685
-
733
+
734
+ # obj
686
735
  d.update(**{k: v.value for k, v in y.items()})
687
- d.update(**{f'{k}_direction': v.direction for k, v in y.items()})
736
+ d.update(**{f'{CorrespondingColumnNameRuler.direction_name(k)}': v.direction for k, v in y.items()})
737
+
738
+ # cns
688
739
  d.update(**{k: v.value for k, v in c.items()})
740
+ f_lb = CorrespondingColumnNameRuler.cns_lower_bound_name
741
+ d.update(**{f'{f_lb(k)}': v.lower_bound
742
+ for k, v in c.items()})
743
+ f_ub = CorrespondingColumnNameRuler.cns_upper_bound_name
744
+ d.update(**{f'{f_ub(k)}': v.upper_bound
745
+ for k, v in c.items()})
746
+
747
+ # function
748
+ d.update(**{k: v.value for k, v in other_outputs.items()})
689
749
 
690
750
  df = pd.DataFrame(
691
751
  {k: [v] for k, v in d.items()},
@@ -881,11 +941,13 @@ class Records:
881
941
  loaded_prm_names = set(self.column_manager._filter_prm_names(loaded_columns, loaded_meta_columns))
882
942
  loaded_obj_names = set(self.column_manager._filter_columns('obj', loaded_columns, loaded_meta_columns))
883
943
  loaded_cns_names = set(self.column_manager._filter_columns('cns', loaded_columns, loaded_meta_columns))
944
+ loaded_other_output_names = set(self.column_manager._filter_columns('other_output.value', loaded_columns, loaded_meta_columns))
884
945
 
885
946
  # loaded df に存在するが Record に存在しないカラムを Record に追加
886
947
  extra_parameters = {}
887
948
  extra_y_names = []
888
949
  extra_c_names = []
950
+ extra_oo_names = []
889
951
  for l_col, l_meta in zip(loaded_columns, loaded_meta_columns):
890
952
 
891
953
  # 現在の Record に含まれないならば
@@ -922,6 +984,10 @@ class Records:
922
984
  elif l_col in loaded_cns_names:
923
985
  extra_c_names.append(l_col)
924
986
 
987
+ # other_output_name ならば
988
+ elif l_col in loaded_other_output_names:
989
+ extra_oo_names.append(l_col)
990
+
925
991
  # additional data を取得
926
992
  a_data = self.column_manager._get_additional_data(loaded_columns, loaded_meta_columns)
927
993
 
@@ -929,6 +995,7 @@ class Records:
929
995
  extra_parameters=extra_parameters,
930
996
  extra_y_names=extra_y_names,
931
997
  extra_c_names=extra_c_names,
998
+ extra_other_output_names=extra_oo_names,
932
999
  additional_data=a_data,
933
1000
  column_order_mode=column_order_mode,
934
1001
  )
@@ -1103,6 +1170,7 @@ class History:
1103
1170
  prm_names: list[str]
1104
1171
  obj_names: list[str]
1105
1172
  cns_names: list[str]
1173
+ other_output_names: list[str]
1106
1174
  sub_fidelity_names: list[str]
1107
1175
  is_restart: bool
1108
1176
  additional_data: dict
@@ -1115,6 +1183,10 @@ class History:
1115
1183
  when the optimization process starts.
1116
1184
  """
1117
1185
 
1186
+ @property
1187
+ def all_output_names(self) -> list[str]:
1188
+ return self.obj_names + self.cns_names + self.other_output_names
1189
+
1118
1190
  def __init__(self):
1119
1191
  self._records = Records()
1120
1192
  self.path: str | None = None
@@ -1153,6 +1225,7 @@ class History:
1153
1225
  self.prm_names = ColumnManager._filter_prm_names(df.columns, meta_columns)
1154
1226
  self.obj_names = ColumnManager._filter_columns('obj', df.columns, meta_columns)
1155
1227
  self.cns_names = ColumnManager._filter_columns('cns', df.columns, meta_columns)
1228
+ self.other_output_names = ColumnManager._filter_columns('other_output.value', df.columns, meta_columns)
1156
1229
  self.sub_fidelity_names = ColumnManager._get_sub_fidelity_names(df)
1157
1230
  self.additional_data = ColumnManager._get_additional_data(df.columns, meta_columns)
1158
1231
 
@@ -1165,6 +1238,7 @@ class History:
1165
1238
  parameters,
1166
1239
  self.obj_names,
1167
1240
  self.cns_names,
1241
+ self.other_output_names,
1168
1242
  self.sub_fidelity_names,
1169
1243
  self.additional_data,
1170
1244
  )
@@ -1174,6 +1248,7 @@ class History:
1174
1248
  parameters: TrialInput,
1175
1249
  obj_names,
1176
1250
  cns_names,
1251
+ other_output_names,
1177
1252
  sub_fidelity_names,
1178
1253
  additional_data,
1179
1254
  ):
@@ -1182,13 +1257,15 @@ class History:
1182
1257
  self.prm_names = list(parameters.keys())
1183
1258
  self.obj_names = list(obj_names)
1184
1259
  self.cns_names = list(cns_names)
1260
+ self.other_output_names = list(other_output_names)
1185
1261
  self.sub_fidelity_names = list(sub_fidelity_names)
1186
1262
  self.additional_data.update(additional_data)
1187
1263
 
1188
1264
  if not self._finalized:
1189
1265
  # ここで column_dtypes が決定する
1190
1266
  self._records.column_manager.initialize(
1191
- parameters, self.obj_names, self.cns_names, self.additional_data, self.column_order_mode
1267
+ parameters, self.obj_names, self.cns_names, self.other_output_names,
1268
+ self.additional_data, self.column_order_mode
1192
1269
  )
1193
1270
 
1194
1271
  # initialize
@@ -1320,7 +1397,7 @@ class History:
1320
1397
  if row is not None:
1321
1398
  self_.postprocess_after_recording(row)
1322
1399
 
1323
- # save history
1400
+ # save history if no FEMOpt
1324
1401
  client = get_client()
1325
1402
  if client is None:
1326
1403
  self.save()
@@ -1332,9 +1409,21 @@ class History:
1332
1409
 
1333
1410
  The destination path is :class:`History.path`.
1334
1411
  """
1412
+
1413
+ # flask server 情報のように、最適化の途中で
1414
+ # 書き換えられるケースがあるので
1415
+ # additional data を再度ここで meta_columns に反映する
1416
+ cm = self._records.column_manager
1417
+ for i, column in enumerate(cm.column_dtypes.keys()):
1418
+ # additional_data を入れる
1419
+ if column == cm._get_additional_data_column():
1420
+ cm.meta_columns[i] = json.dumps(self.additional_data or dict())
1421
+
1335
1422
  self._records.save(self.path)
1336
1423
 
1337
1424
  def _create_optuna_study_for_visualization(self):
1425
+ """出力は internal ではない値で、objective は出力という意味であり cns, other_output を含む。"""
1426
+
1338
1427
  import optuna
1339
1428
 
1340
1429
  # create study
@@ -1342,10 +1431,10 @@ class History:
1342
1431
  # storage='sqlite:///' + os.path.basename(self.path) + '_dummy.db',
1343
1432
  sampler=None, pruner=None, study_name='dummy',
1344
1433
  )
1345
- if len(self.obj_names) == 1:
1434
+ if len(self.all_output_names) == 1:
1346
1435
  kwargs.update(dict(direction='minimize'))
1347
1436
  else:
1348
- kwargs.update(dict(directions=['minimize']*len(self.obj_names)))
1437
+ kwargs.update(dict(directions=['minimize']*len(self.all_output_names)))
1349
1438
  study = optuna.create_study(**kwargs)
1350
1439
 
1351
1440
  # add trial to study
@@ -1395,11 +1484,19 @@ class History:
1395
1484
  )
1396
1485
  trial_kwargs.update(dict(distributions=distributions))
1397
1486
 
1398
- # objective
1399
- if len(self.obj_names) == 1:
1400
- trial_kwargs.update(dict(value=row[self.obj_names].values[0]))
1487
+ # objective (+ constraints + other_outputs as objective)
1488
+ if len(self.all_output_names) == 1:
1489
+ if len(self.obj_names) == 1:
1490
+ trial_kwargs.update(dict(value=row[self.obj_names].values[0]))
1491
+ elif len(self.cns_names) == 1:
1492
+ trial_kwargs.update(dict(value=row[self.cns_names].values[0]))
1493
+ elif len(self.other_output_names) == 1:
1494
+ trial_kwargs.update(dict(value=row[self.other_output_names].values[0]))
1495
+ else:
1496
+ assert False
1401
1497
  else:
1402
- trial_kwargs.update(dict(values=row[self.obj_names].values))
1498
+ values = row[self.all_output_names].values
1499
+ trial_kwargs.update(dict(values=values))
1403
1500
 
1404
1501
  # add to study
1405
1502
  trial = optuna.create_trial(**trial_kwargs)
@@ -99,6 +99,7 @@ class AbstractOptimizer:
99
99
  self.variable_manager = VariableManager()
100
100
  self.objectives = Objectives()
101
101
  self.constraints = Constraints()
102
+ self.other_outputs = Functions()
102
103
 
103
104
  # multi-fidelity
104
105
  self.fidelity = None
@@ -339,6 +340,21 @@ class AbstractOptimizer:
339
340
  _duplicated_name_check(name, self.constraints.keys())
340
341
  self.constraints.update({name: cns})
341
342
 
343
+ def add_other_output(
344
+ self,
345
+ name: str,
346
+ fun: Callable[..., float],
347
+ args: tuple | None = None,
348
+ kwargs: dict | None = None,
349
+ ):
350
+
351
+ other_func = Function()
352
+ other_func.fun = fun
353
+ other_func.args = args or ()
354
+ other_func.kwargs = kwargs or {}
355
+ _duplicated_name_check(name, self.other_outputs.keys())
356
+ self.other_outputs.update({name: other_func})
357
+
342
358
  def add_sub_fidelity_model(
343
359
  self,
344
360
  name: str,
@@ -352,12 +368,12 @@ class AbstractOptimizer:
352
368
  _duplicated_name_check(name, self.sub_fidelity_models.keys())
353
369
  self.sub_fidelity_models._update(name, sub_fidelity_model, fidelity)
354
370
 
355
- def get_variables(self, format='dict'):
371
+ def get_variables(self, format: Literal['dict', 'values', 'raw'] = 'dict'):
356
372
  return self.variable_manager.get_variables(
357
373
  format=format,
358
374
  )
359
375
 
360
- def get_parameter(self, format='dict'):
376
+ def get_parameter(self, format: Literal['dict', 'values', 'raw'] = 'dict'):
361
377
  return self.variable_manager.get_variables(
362
378
  format=format, filter='parameter'
363
379
  )
@@ -399,6 +415,12 @@ class AbstractOptimizer:
399
415
  out.update({name: cns_result})
400
416
  return out
401
417
 
418
+ def _other_outputs(self, out: TrialFunctionOutput) -> TrialFunctionOutput:
419
+ for name, other_func in self.other_outputs.items():
420
+ other_func_result = FunctionResult(other_func, self.fem)
421
+ out.update({name: other_func_result})
422
+ return out
423
+
402
424
  def _get_hard_constraint_violation_names(self, hard_c: TrialConstraintOutput) -> list[str]:
403
425
  violation_names = []
404
426
  for name, result in hard_c.items():
@@ -573,6 +595,7 @@ class AbstractOptimizer:
573
595
 
574
596
  try:
575
597
  y: TrialOutput = opt_._y()
598
+ record.y = y
576
599
  opt_._check_and_raise_interruption()
577
600
 
578
601
  # if intentional error (by user)
@@ -604,7 +627,6 @@ class AbstractOptimizer:
604
627
  _c.update(soft_c)
605
628
  _c.update(hard_c)
606
629
 
607
- record.y = y
608
630
  record.c = _c
609
631
  record.state = TrialState.get_corresponding_state_from_exception(e)
610
632
  record.messages.append(
@@ -619,12 +641,31 @@ class AbstractOptimizer:
619
641
  c.update(soft_c)
620
642
  c.update(hard_c)
621
643
 
644
+ # ===== evaluate other functions =====
645
+ logger.info(_('evaluating other functions...'))
646
+
647
+ other_outputs = TrialFunctionOutput()
648
+ try:
649
+ opt_._other_outputs(other_outputs)
650
+
651
+ # if intentional error (by user)
652
+ except _HiddenConstraintViolation as e:
653
+ _log_hidden_constraint(e)
654
+
655
+ record.other_outputs = other_outputs
656
+ record.state = TrialState.get_corresponding_state_from_exception(e)
657
+ record.messages.append(
658
+ _('Hidden constraint violation during '
659
+ 'another output function evaluation: ')
660
+ + create_err_msg_from_exception(e))
661
+
662
+ raise e
663
+
622
664
  # get values as minimize
623
665
  y_internal: dict = opt_._convert_y(y)
624
666
 
625
667
  logger.info(_('output:'))
626
668
  logger.info(y)
627
- record.y = y
628
669
  record.c = c
629
670
  record.state = TrialState.succeeded
630
671
 
@@ -858,10 +899,11 @@ class AbstractOptimizer:
858
899
  filter='parameter', format='raw'
859
900
  )
860
901
  self.history.finalize(
861
- parameters,
862
- list(self.objectives.keys()),
863
- list(self.constraints.keys()),
864
- [self.sub_fidelity_name] + list(self.sub_fidelity_models.keys()),
902
+ parameters=parameters,
903
+ obj_names=list(self.objectives.keys()),
904
+ cns_names=list(self.constraints.keys()),
905
+ other_output_names=list(self.other_outputs.keys()),
906
+ sub_fidelity_names=[self.sub_fidelity_name] + list(self.sub_fidelity_models.keys()),
865
907
  additional_data=self._collect_additional_data()
866
908
  )
867
909
 
@@ -113,7 +113,7 @@ class _ConvertedConstraint:
113
113
 
114
114
  """
115
115
 
116
- x = X.detach().numpy()
116
+ x = X.detach().cpu().numpy()
117
117
  cns_value = _evaluate_pyfemtet_cns(
118
118
  self.cns,
119
119
  self.opt,
@@ -124,6 +124,8 @@ class _ConvertedConstraint:
124
124
  return Tensor([cns_value - self.cns.lower_bound - self.ce])
125
125
  elif self.ub_or_lb == 'ub':
126
126
  return Tensor([self.cns.upper_bound - cns_value - self.ce])
127
+ else:
128
+ assert False
127
129
 
128
130
 
129
131
  # list[pyfemtet.opt.Constraint] について、正規化された入力に対し、 feasible or not を返す関数
@@ -184,7 +186,7 @@ class NonlinearInequalityConstraints:
184
186
  for each_num_restarts in ic_batch:
185
187
  feasible_q_list = []
186
188
  for each_q in each_num_restarts:
187
- x: np.ndarray = each_q.detach().numpy() # normalized parameters
189
+ x: np.ndarray = each_q.detach().cpu().numpy() # normalized parameters
188
190
  if _is_feasible(self.constraints, self.opt, self.trans, x, self.ce, self.cs):
189
191
  feasible_q_list.append(each_q) # Keep only feasible rows
190
192
 
@@ -841,6 +841,7 @@ class PoFConfig:
841
841
  )
842
842
  feasibility_cdf_threshold: float | str = 0.5 # or 'sample_mean'
843
843
  feasibility_noise: float | str | None = None # 'no' to fixed minimum noise
844
+ remove_hard_constraints_from_gp: bool = False # if consider_explicit_hard_constraint is False, no effect.
844
845
 
845
846
 
846
847
  # TODO:
@@ -1079,6 +1080,8 @@ class PoFBoTorchSampler(BoTorchSampler):
1079
1080
  if n_trials < self._n_startup_trials:
1080
1081
  return {}
1081
1082
 
1083
+ # ===== ここまで変更なし =====
1084
+
1082
1085
  trans = _SearchSpaceTransform(search_space)
1083
1086
  n_objectives = len(study.directions)
1084
1087
  values: numpy.ndarray | torch.Tensor = numpy.empty(
@@ -1099,23 +1102,48 @@ class PoFBoTorchSampler(BoTorchSampler):
1099
1102
  ): # BoTorch always assumes maximization.
1100
1103
  value *= -1
1101
1104
  values[trial_idx, obj_idx] = value
1105
+
1102
1106
  if self._constraints_func is not None:
1107
+
1108
+ # get constraints
1103
1109
  constraints = study._storage.get_trial_system_attrs(trial._trial_id).get(
1104
1110
  _CONSTRAINTS_KEY
1105
1111
  )
1112
+
1113
+ # Explicit hard constraints を optimize_acqf で扱うならば
1114
+ # GP にはそれを渡さない (option)
1115
+ if (
1116
+ constraints is not None
1117
+ and self.pof_config.consider_explicit_hard_constraint
1118
+ and self.pof_config.remove_hard_constraints_from_gp
1119
+ ):
1120
+ constraints = filter_soft_constraints_only(
1121
+ constraints, self.pyfemtet_optimizer
1122
+ )
1123
+
1106
1124
  if constraints is not None:
1125
+
1107
1126
  n_constraints = len(constraints)
1108
1127
 
1109
- if con is None:
1110
- con = numpy.full(
1111
- (n_completed_trials, n_constraints), numpy.nan, dtype=numpy.float64
1112
- )
1113
- elif n_constraints != con.shape[1]:
1114
- raise RuntimeError(
1115
- f"Expected {con.shape[1]} constraints "
1116
- f"but received {n_constraints}."
1117
- )
1118
- con[trial_idx] = constraints
1128
+ # remove_hard_constraints_from_gp
1129
+ # constraints がなくなった場合、
1130
+ # そもそも今後ここに来る必要がない
1131
+ if n_constraints == 0:
1132
+ self._constraints_func = None
1133
+
1134
+ else:
1135
+
1136
+ if con is None:
1137
+ con = numpy.full(
1138
+ (n_completed_trials, n_constraints), numpy.nan, dtype=numpy.float64
1139
+ )
1140
+ elif n_constraints != con.shape[1]:
1141
+ raise RuntimeError(
1142
+ f"Expected {con.shape[1]} constraints "
1143
+ f"but received {n_constraints}."
1144
+ )
1145
+ con[trial_idx] = constraints
1146
+
1119
1147
  elif trial.state == TrialState.RUNNING:
1120
1148
  if all(p in trial.params for p in search_space):
1121
1149
  params[trial_idx] = trans.transform(trial.params)
@@ -1162,7 +1190,6 @@ class PoFBoTorchSampler(BoTorchSampler):
1162
1190
  else:
1163
1191
  running_params = None
1164
1192
 
1165
- # ===== ここまで変更なし =====
1166
1193
 
1167
1194
  # TODO: ミーゼスなどの場合にこれらのシード固定法も試す
1168
1195
  # if self._seed is not None:
@@ -1247,3 +1274,28 @@ class PoFBoTorchSampler(BoTorchSampler):
1247
1274
  # ===== ここまで変更なし =====
1248
1275
 
1249
1276
  return trans.untransform(candidates.cpu().numpy())
1277
+
1278
+
1279
+ def filter_soft_constraints_only(
1280
+ constraints: tuple[float],
1281
+ opt: AbstractOptimizer,
1282
+ ) -> list[float]:
1283
+ # constraints を取得
1284
+ # lb, ub の存在に応じて hard, soft を展開
1285
+ is_soft_list = []
1286
+ for cns in opt.constraints.values():
1287
+ if cns.lower_bound is not None:
1288
+ is_soft_list.append(not cns.hard)
1289
+ if cns.upper_bound is not None:
1290
+ is_soft_list.append(not cns.hard)
1291
+
1292
+ # constraints と比べる
1293
+ assert len(constraints) == len(is_soft_list)
1294
+
1295
+ # soft に該当する要素のみで組立て返す
1296
+ ret = []
1297
+ for is_soft, value in zip(is_soft_list, constraints):
1298
+ if is_soft:
1299
+ ret.append(value)
1300
+
1301
+ return ret