pyfemtet 0.9.5__py3-none-any.whl → 1.0.0b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyfemtet might be problematic. Click here for more details.

Files changed (272) hide show
  1. pyfemtet/__init__.py +6 -1
  2. pyfemtet/_i18n/1. make_pot_and_update_po.bat +8 -0
  3. pyfemtet/_i18n/2. build_mo.bat +5 -0
  4. pyfemtet/_i18n/__init__.py +4 -0
  5. pyfemtet/_i18n/babel.cfg +2 -0
  6. pyfemtet/_i18n/i18n.py +37 -0
  7. pyfemtet/_i18n/locales/ja/LC_MESSAGES/messages.mo +0 -0
  8. pyfemtet/_i18n/locales/ja/LC_MESSAGES/messages.po +1020 -0
  9. pyfemtet/_i18n/locales/messages.pot +987 -0
  10. pyfemtet/{_message → _i18n}/messages.py +128 -41
  11. pyfemtet/_util/closing.py +19 -0
  12. pyfemtet/_util/dask_util.py +89 -7
  13. pyfemtet/_util/df_util.py +29 -0
  14. pyfemtet/_util/excel_macro_util.py +8 -3
  15. pyfemtet/_util/excel_parse_util.py +43 -23
  16. pyfemtet/_util/femtet_access_inspection.py +120 -0
  17. pyfemtet/{_femtet_config_util/autosave.py → _util/femtet_autosave.py} +7 -0
  18. pyfemtet/_util/femtet_exit.py +105 -0
  19. pyfemtet/_util/femtet_version.py +20 -0
  20. pyfemtet/_util/helper.py +94 -0
  21. pyfemtet/_util/process_util.py +107 -0
  22. pyfemtet/_util/str_enum.py +44 -0
  23. pyfemtet/core.py +15 -47
  24. pyfemtet/dispatch_extensions/__init__.py +8 -11
  25. pyfemtet/dispatch_extensions/_impl.py +42 -198
  26. pyfemtet/logger/__init__.py +8 -1
  27. pyfemtet/logger/_impl.py +5 -6
  28. pyfemtet/opt/__init__.py +3 -17
  29. pyfemtet/opt/exceptions.py +45 -0
  30. pyfemtet/opt/femopt.py +608 -0
  31. pyfemtet/opt/history/__init__.py +11 -0
  32. pyfemtet/opt/history/_history.py +1404 -0
  33. pyfemtet/opt/history/_hypervolume.py +169 -0
  34. pyfemtet/opt/history/_optimality.py +79 -0
  35. pyfemtet/opt/interface/__init__.py +17 -24
  36. pyfemtet/opt/interface/_base_interface.py +222 -0
  37. pyfemtet/opt/interface/_excel_interface/__init__.py +3 -0
  38. pyfemtet/opt/interface/_excel_interface/debug-excel-interface.xlsm +0 -0
  39. pyfemtet/opt/interface/_excel_interface/excel_interface.py +999 -0
  40. pyfemtet/opt/interface/_femtet_interface/__init__.py +3 -0
  41. pyfemtet/opt/interface/{_femtet_parametric.py → _femtet_interface/_femtet_parametric.py} +20 -12
  42. pyfemtet/opt/interface/{_femtet.py → _femtet_interface/femtet_interface.py} +505 -349
  43. pyfemtet/opt/interface/_femtet_with_nx_interface/__init__.py +5 -0
  44. pyfemtet/opt/interface/_femtet_with_nx_interface/femtet_with_nx_interface.py +230 -0
  45. pyfemtet/opt/interface/_femtet_with_nx_interface/model1.prt +0 -0
  46. pyfemtet/opt/interface/_femtet_with_nx_interface/model1.x_t +98 -0
  47. pyfemtet/opt/interface/{_femtet_with_nx → _femtet_with_nx_interface}/update_model.py +1 -3
  48. pyfemtet/opt/interface/_femtet_with_solidworks/__init__.py +5 -0
  49. pyfemtet/opt/interface/_femtet_with_solidworks/femtet_with_solidworks_interface.py +122 -0
  50. pyfemtet/opt/interface/_solidworks_interface/__init__.py +5 -0
  51. pyfemtet/opt/interface/_solidworks_interface/solidworks_interface.py +206 -0
  52. pyfemtet/opt/interface/_surrogate_model_interface/__init__.py +8 -0
  53. pyfemtet/opt/interface/_surrogate_model_interface/base_surrogate_interface.py +150 -0
  54. pyfemtet/opt/interface/_surrogate_model_interface/botorch_interface.py +298 -0
  55. pyfemtet/opt/interface/_surrogate_model_interface/debug-pof-botorch.reccsv +18 -0
  56. pyfemtet/opt/interface/_with_excel_settings/__init__.py +61 -0
  57. pyfemtet/opt/interface/_with_excel_settings/with_excel_settings.py +134 -0
  58. pyfemtet/opt/meta_script/YAML_Generator.xlsm +0 -0
  59. pyfemtet/opt/meta_script/__main__.py +58 -36
  60. pyfemtet/opt/optimizer/__init__.py +7 -9
  61. pyfemtet/opt/optimizer/_base_optimizer.py +885 -0
  62. pyfemtet/opt/optimizer/optuna_optimizer/__init__.py +9 -0
  63. pyfemtet/opt/optimizer/optuna_optimizer/_optuna_attribute.py +73 -0
  64. pyfemtet/opt/optimizer/optuna_optimizer/_optuna_optimizer.py +678 -0
  65. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/__init__.py +7 -0
  66. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/debug-pof-botorch.reccsv +18 -0
  67. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/enable_nonlinear_constraint.py +244 -0
  68. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/pof_botorch_sampler.py +1249 -0
  69. pyfemtet/opt/optimizer/optuna_optimizer/wat_ex14_parametric_jp.femprj +0 -0
  70. pyfemtet/opt/optimizer/scipy_optimizer/__init__.py +1 -0
  71. pyfemtet/opt/optimizer/scipy_optimizer/_scipy_optimizer.py +364 -0
  72. pyfemtet/opt/prediction/__init__.py +7 -0
  73. pyfemtet/opt/prediction/_botorch_utils.py +133 -0
  74. pyfemtet/opt/prediction/_gpytorch_modules_extension.py +142 -0
  75. pyfemtet/opt/prediction/_helper.py +155 -0
  76. pyfemtet/opt/prediction/_model.py +118 -0
  77. pyfemtet/opt/problem/problem.py +304 -0
  78. pyfemtet/opt/problem/variable_manager/__init__.py +20 -0
  79. pyfemtet/opt/problem/variable_manager/_string_as_expression.py +115 -0
  80. pyfemtet/opt/problem/variable_manager/_variable_manager.py +295 -0
  81. pyfemtet/opt/visualization/history_viewer/__main__.py +5 -0
  82. pyfemtet/opt/visualization/{_base.py → history_viewer/_base_application.py} +18 -13
  83. pyfemtet/opt/visualization/history_viewer/_common_pages.py +150 -0
  84. pyfemtet/opt/visualization/{_complex_components → history_viewer/_complex_components}/alert_region.py +10 -5
  85. pyfemtet/opt/visualization/{_complex_components → history_viewer/_complex_components}/control_femtet.py +16 -13
  86. pyfemtet/opt/visualization/{_complex_components → history_viewer/_complex_components}/main_graph.py +117 -47
  87. pyfemtet/opt/visualization/{_complex_components → history_viewer/_complex_components}/pm_graph.py +159 -138
  88. pyfemtet/opt/visualization/history_viewer/_process_monitor/_application.py +173 -0
  89. pyfemtet/opt/visualization/history_viewer/_process_monitor/_pages.py +291 -0
  90. pyfemtet/opt/visualization/{_wrapped_components → history_viewer/_wrapped_components}/dbc.py +1 -1
  91. pyfemtet/opt/visualization/{_wrapped_components → history_viewer/_wrapped_components}/dcc.py +1 -1
  92. pyfemtet/opt/visualization/{_wrapped_components → history_viewer/_wrapped_components}/html.py +1 -1
  93. pyfemtet/opt/visualization/history_viewer/result_viewer/__main__.py +5 -0
  94. pyfemtet/opt/visualization/{result_viewer/application.py → history_viewer/result_viewer/_application.py} +6 -6
  95. pyfemtet/opt/visualization/{result_viewer/pages.py → history_viewer/result_viewer/_pages.py} +106 -82
  96. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08.csv +18 -0
  97. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08.db +0 -0
  98. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8.jpg +0 -0
  99. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8.log +45 -0
  100. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8.pdt +0 -0
  101. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_1.jpg +0 -0
  102. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_1.pdt +0 -0
  103. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_10.jpg +0 -0
  104. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_10.pdt +0 -0
  105. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_11.jpg +0 -0
  106. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_11.pdt +0 -0
  107. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_12.jpg +0 -0
  108. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_12.pdt +0 -0
  109. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_13.jpg +0 -0
  110. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_13.pdt +0 -0
  111. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_14.jpg +0 -0
  112. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_14.pdt +0 -0
  113. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_15.jpg +0 -0
  114. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_15.pdt +0 -0
  115. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_16.jpg +0 -0
  116. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_16.pdt +0 -0
  117. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_17.jpg +0 -0
  118. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_17.pdt +0 -0
  119. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_18.jpg +0 -0
  120. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_18.pdt +0 -0
  121. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_19.jpg +0 -0
  122. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_19.pdt +0 -0
  123. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_2.jpg +0 -0
  124. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_2.pdt +0 -0
  125. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_20.jpg +0 -0
  126. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_20.pdt +0 -0
  127. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_3.jpg +0 -0
  128. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_3.pdt +0 -0
  129. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.bgr +0 -0
  130. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.bnd +0 -0
  131. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.btr +0 -0
  132. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.jpg +0 -0
  133. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.mtl +0 -0
  134. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.pdt +0 -0
  135. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.prm +0 -0
  136. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_5.jpg +0 -0
  137. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_5.pdt +0 -0
  138. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_6.jpg +0 -0
  139. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_6.pdt +0 -0
  140. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_7.jpg +0 -0
  141. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_7.pdt +0 -0
  142. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_8.jpg +0 -0
  143. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_8.pdt +0 -0
  144. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_9.jpg +0 -0
  145. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_9.pdt +0 -0
  146. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.femprj +0 -0
  147. pyfemtet/opt/visualization/plotter/main_figure_creator.py +536 -0
  148. pyfemtet/opt/visualization/plotter/pm_graph_creator.py +359 -0
  149. pyfemtet/opt/worker_status.py +120 -0
  150. {pyfemtet-0.9.5.dist-info → pyfemtet-1.0.0b0.dist-info}/METADATA +23 -24
  151. pyfemtet-1.0.0b0.dist-info/RECORD +172 -0
  152. pyfemtet-1.0.0b0.dist-info/entry_points.txt +3 -0
  153. pyfemtet/_femtet_config_util/exit.py +0 -59
  154. pyfemtet/_message/1. make_pot.bat +0 -11
  155. pyfemtet/_message/2. make_mo.bat +0 -6
  156. pyfemtet/_message/__init__.py +0 -5
  157. pyfemtet/_message/babel.cfg +0 -2
  158. pyfemtet/_message/locales/ja/LC_MESSAGES/messages.mo +0 -0
  159. pyfemtet/_message/locales/ja/LC_MESSAGES/messages.po +0 -570
  160. pyfemtet/_message/locales/messages.pot +0 -551
  161. pyfemtet/_warning.py +0 -87
  162. pyfemtet/brep/_impl.py +0 -18
  163. pyfemtet/opt/_femopt.py +0 -1007
  164. pyfemtet/opt/_femopt_core.py +0 -1169
  165. pyfemtet/opt/_test_utils/control_femtet.py +0 -39
  166. pyfemtet/opt/_test_utils/hyper_sphere.py +0 -24
  167. pyfemtet/opt/_test_utils/record_history.py +0 -130
  168. pyfemtet/opt/advanced_samples/excel_ui/(ref) original_project.femprj +0 -0
  169. pyfemtet/opt/advanced_samples/excel_ui/femtet-macro.xlsm +0 -0
  170. pyfemtet/opt/advanced_samples/excel_ui/pyfemtet-core.py +0 -291
  171. pyfemtet/opt/advanced_samples/excel_ui/test-pyfemtet-core.cmd +0 -22
  172. pyfemtet/opt/advanced_samples/restart/gal_ex13_parametric.femprj +0 -0
  173. pyfemtet/opt/advanced_samples/restart/gal_ex13_parametric_restart.py +0 -99
  174. pyfemtet/opt/advanced_samples/restart/gal_ex13_parametric_restart_jp.py +0 -102
  175. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data.py +0 -60
  176. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data_jp.py +0 -57
  177. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate.py +0 -100
  178. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate_jp.py +0 -90
  179. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_parametric.femprj +0 -0
  180. pyfemtet/opt/interface/_base.py +0 -101
  181. pyfemtet/opt/interface/_excel_interface.py +0 -984
  182. pyfemtet/opt/interface/_femtet_excel.py +0 -141
  183. pyfemtet/opt/interface/_femtet_with_nx/__init__.py +0 -3
  184. pyfemtet/opt/interface/_femtet_with_nx/_interface.py +0 -178
  185. pyfemtet/opt/interface/_femtet_with_sldworks.py +0 -298
  186. pyfemtet/opt/interface/_surrogate/__init__.py +0 -5
  187. pyfemtet/opt/interface/_surrogate/_base.py +0 -129
  188. pyfemtet/opt/interface/_surrogate/_chaospy.py +0 -71
  189. pyfemtet/opt/interface/_surrogate/_singletaskgp.py +0 -71
  190. pyfemtet/opt/interface/_surrogate_excel.py +0 -102
  191. pyfemtet/opt/optimizer/_base.py +0 -376
  192. pyfemtet/opt/optimizer/_optuna/_botorch_patch/enable_nonlinear_constraint.py +0 -220
  193. pyfemtet/opt/optimizer/_optuna/_optuna.py +0 -434
  194. pyfemtet/opt/optimizer/_optuna/_pof_botorch.py +0 -1914
  195. pyfemtet/opt/optimizer/_scipy.py +0 -159
  196. pyfemtet/opt/optimizer/_scipy_scalar.py +0 -127
  197. pyfemtet/opt/optimizer/parameter.py +0 -113
  198. pyfemtet/opt/prediction/_base.py +0 -61
  199. pyfemtet/opt/prediction/single_task_gp.py +0 -119
  200. pyfemtet/opt/samples/femprj_sample/ParametricIF.femprj +0 -0
  201. pyfemtet/opt/samples/femprj_sample/ParametricIF.py +0 -29
  202. pyfemtet/opt/samples/femprj_sample/ParametricIF_test_result.reccsv +0 -13
  203. pyfemtet/opt/samples/femprj_sample/cad_ex01_NX.femprj +0 -0
  204. pyfemtet/opt/samples/femprj_sample/cad_ex01_NX.prt +0 -0
  205. pyfemtet/opt/samples/femprj_sample/cad_ex01_NX.py +0 -135
  206. pyfemtet/opt/samples/femprj_sample/cad_ex01_NX_test_result.reccsv +0 -23
  207. pyfemtet/opt/samples/femprj_sample/cad_ex01_SW.SLDPRT +0 -0
  208. pyfemtet/opt/samples/femprj_sample/cad_ex01_SW.femprj +0 -0
  209. pyfemtet/opt/samples/femprj_sample/cad_ex01_SW.py +0 -131
  210. pyfemtet/opt/samples/femprj_sample/cad_ex01_SW_test_result.reccsv +0 -23
  211. pyfemtet/opt/samples/femprj_sample/constrained_pipe.femprj +0 -0
  212. pyfemtet/opt/samples/femprj_sample/constrained_pipe.py +0 -96
  213. pyfemtet/opt/samples/femprj_sample/constrained_pipe_test_result.reccsv +0 -13
  214. pyfemtet/opt/samples/femprj_sample/gal_ex58_parametric.femprj +0 -0
  215. pyfemtet/opt/samples/femprj_sample/gal_ex58_parametric.py +0 -74
  216. pyfemtet/opt/samples/femprj_sample/gal_ex58_parametric_test_result.reccsv +0 -13
  217. pyfemtet/opt/samples/femprj_sample/gau_ex08_parametric.femprj +0 -0
  218. pyfemtet/opt/samples/femprj_sample/gau_ex08_parametric.py +0 -58
  219. pyfemtet/opt/samples/femprj_sample/gau_ex08_parametric_test_result.reccsv +0 -23
  220. pyfemtet/opt/samples/femprj_sample/gau_ex12_parametric.femprj +0 -0
  221. pyfemtet/opt/samples/femprj_sample/gau_ex12_parametric.py +0 -52
  222. pyfemtet/opt/samples/femprj_sample/her_ex40_parametric.femprj +0 -0
  223. pyfemtet/opt/samples/femprj_sample/her_ex40_parametric.py +0 -138
  224. pyfemtet/opt/samples/femprj_sample/her_ex40_parametric_test_result.reccsv +0 -18
  225. pyfemtet/opt/samples/femprj_sample/paswat_ex1_parametric.femprj +0 -0
  226. pyfemtet/opt/samples/femprj_sample/paswat_ex1_parametric.py +0 -60
  227. pyfemtet/opt/samples/femprj_sample/paswat_ex1_parametric_parallel.py +0 -61
  228. pyfemtet/opt/samples/femprj_sample/paswat_ex1_parametric_test_result.reccsv +0 -18
  229. pyfemtet/opt/samples/femprj_sample/wat_ex14_parametric.femprj +0 -0
  230. pyfemtet/opt/samples/femprj_sample/wat_ex14_parametric.py +0 -58
  231. pyfemtet/opt/samples/femprj_sample/wat_ex14_parametric_parallel.py +0 -58
  232. pyfemtet/opt/samples/femprj_sample/wat_ex14_parametric_test_result.reccsv +0 -18
  233. pyfemtet/opt/samples/femprj_sample_jp/ParametricIF_jp.femprj +0 -0
  234. pyfemtet/opt/samples/femprj_sample_jp/ParametricIF_jp.py +0 -29
  235. pyfemtet/opt/samples/femprj_sample_jp/cad_ex01_NX_jp.femprj +0 -0
  236. pyfemtet/opt/samples/femprj_sample_jp/cad_ex01_NX_jp.py +0 -129
  237. pyfemtet/opt/samples/femprj_sample_jp/cad_ex01_SW_jp.femprj +0 -0
  238. pyfemtet/opt/samples/femprj_sample_jp/cad_ex01_SW_jp.py +0 -125
  239. pyfemtet/opt/samples/femprj_sample_jp/constrained_pipe_jp.py +0 -93
  240. pyfemtet/opt/samples/femprj_sample_jp/gal_ex58_parametric_jp.femprj +0 -0
  241. pyfemtet/opt/samples/femprj_sample_jp/gal_ex58_parametric_jp.py +0 -70
  242. pyfemtet/opt/samples/femprj_sample_jp/gau_ex08_parametric_jp.femprj +0 -0
  243. pyfemtet/opt/samples/femprj_sample_jp/gau_ex08_parametric_jp.py +0 -57
  244. pyfemtet/opt/samples/femprj_sample_jp/gau_ex12_parametric_jp.py +0 -52
  245. pyfemtet/opt/samples/femprj_sample_jp/her_ex40_parametric_jp.femprj +0 -0
  246. pyfemtet/opt/samples/femprj_sample_jp/her_ex40_parametric_jp.py +0 -138
  247. pyfemtet/opt/samples/femprj_sample_jp/paswat_ex1_parametric_jp.femprj +0 -0
  248. pyfemtet/opt/samples/femprj_sample_jp/paswat_ex1_parametric_jp.py +0 -58
  249. pyfemtet/opt/samples/femprj_sample_jp/paswat_ex1_parametric_parallel_jp.py +0 -59
  250. pyfemtet/opt/samples/femprj_sample_jp/wat_ex14_parametric_jp.py +0 -56
  251. pyfemtet/opt/samples/femprj_sample_jp/wat_ex14_parametric_parallel_jp.py +0 -56
  252. pyfemtet/opt/visualization/_complex_components/main_figure_creator.py +0 -332
  253. pyfemtet/opt/visualization/_complex_components/pm_graph_creator.py +0 -201
  254. pyfemtet/opt/visualization/_process_monitor/application.py +0 -226
  255. pyfemtet/opt/visualization/_process_monitor/pages.py +0 -406
  256. pyfemtet/opt/visualization/_wrapped_components/__init__.py +0 -0
  257. pyfemtet/opt/visualization/result_viewer/__init__.py +0 -0
  258. pyfemtet-0.9.5.dist-info/RECORD +0 -158
  259. pyfemtet-0.9.5.dist-info/entry_points.txt +0 -3
  260. /pyfemtet/{_femtet_config_util → opt/problem}/__init__.py +0 -0
  261. /pyfemtet/{brep → opt/visualization/history_viewer}/__init__.py +0 -0
  262. /pyfemtet/opt/{_test_utils → visualization/history_viewer/_complex_components}/__init__.py +0 -0
  263. /pyfemtet/opt/{optimizer/_optuna → visualization/history_viewer/_process_monitor}/__init__.py +0 -0
  264. /pyfemtet/opt/{optimizer/_optuna/_botorch_patch → visualization/history_viewer/_wrapped_components}/__init__.py +0 -0
  265. /pyfemtet/opt/visualization/{_wrapped_components → history_viewer/_wrapped_components}/str_enum.py +0 -0
  266. /pyfemtet/opt/visualization/{result_viewer → history_viewer/result_viewer}/.gitignore +0 -0
  267. /pyfemtet/opt/visualization/{_complex_components → history_viewer/result_viewer}/__init__.py +0 -0
  268. /pyfemtet/opt/visualization/{_process_monitor → plotter}/__init__.py +0 -0
  269. /pyfemtet/opt/{samples/femprj_sample_jp/wat_ex14_parametric_jp.femprj → wat_ex14_parametric_jp.femprj} +0 -0
  270. {pyfemtet-0.9.5.dist-info → pyfemtet-1.0.0b0.dist-info}/LICENSE +0 -0
  271. {pyfemtet-0.9.5.dist-info → pyfemtet-1.0.0b0.dist-info}/LICENSE_THIRD_PARTY.txt +0 -0
  272. {pyfemtet-0.9.5.dist-info → pyfemtet-1.0.0b0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,1249 @@
1
+ """This algorithm is based on BoTorchSampler of optuna_integration[1] and the paper[2].
2
+
3
+
4
+ ** LICENSE NOTICE OF [1] **
5
+
6
+ MIT License
7
+
8
+ Copyright (c) 2018 Preferred Networks, Inc.
9
+ Copyright (c) 2024 Kazuma NAITO.
10
+
11
+ Permission is hereby granted, free of charge, to any person obtaining a copy
12
+ of this software and associated documentation files (the "Software"), to deal
13
+ in the Software without restriction, including without limitation the rights
14
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
+ copies of the Software, and to permit persons to whom the Software is
16
+ furnished to do so, subject to the following conditions:
17
+
18
+ The above copyright notice and this permission notice shall be included in all
19
+ copies or substantial portions of the Software.
20
+
21
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
+ SOFTWARE.
28
+
29
+
30
+ ** reference of [2] **
31
+ LEE, H., et al. Optimization subject to hidden constraints via statistical
32
+ emulation. Pacific Journal of Optimization, 2011, 7.3: 467-478
33
+
34
+ """
35
+
36
+ # import
37
+ from __future__ import annotations
38
+
39
+ import warnings
40
+ import dataclasses
41
+ from packaging import version
42
+ from typing import Callable, Sequence, Any, TYPE_CHECKING
43
+
44
+ # import optuna
45
+ from optuna.logging import get_logger
46
+ from optuna._imports import try_import
47
+ from optuna._transform import _SearchSpaceTransform
48
+ from optuna.trial import FrozenTrial, TrialState
49
+ from optuna._experimental import experimental_class
50
+ from optuna._experimental import experimental_func
51
+ from optuna.distributions import BaseDistribution
52
+ from optuna.samplers import BaseSampler
53
+ # from optuna.samplers import RandomSampler
54
+ from optuna.samplers._base import _CONSTRAINTS_KEY
55
+ # from optuna.samplers._base import _process_constraints_after_trial
56
+ # from optuna.search_space import IntersectionSearchSpace
57
+ from optuna.study import Study, StudyDirection
58
+
59
+ from optuna_integration.botorch import BoTorchSampler
60
+
61
+ # import others
62
+ import numpy
63
+ import torch
64
+ from torch.distributions import Normal
65
+
66
+ with try_import() as _imports:
67
+ from botorch.models import SingleTaskGP
68
+ from botorch.models.transforms import Normalize
69
+
70
+ from botorch.acquisition.knowledge_gradient import qKnowledgeGradient
71
+ from botorch.acquisition.monte_carlo import qExpectedImprovement
72
+ from botorch.acquisition.monte_carlo import qNoisyExpectedImprovement
73
+ from botorch.acquisition.multi_objective import monte_carlo
74
+ from botorch.acquisition.multi_objective.analytic import ExpectedHypervolumeImprovement
75
+ from botorch.acquisition.multi_objective.objective import (
76
+ FeasibilityWeightedMCMultiOutputObjective,
77
+ )
78
+ from botorch.acquisition.multi_objective.objective import IdentityMCMultiOutputObjective
79
+ from botorch.acquisition.objective import ConstrainedMCObjective
80
+ from botorch.acquisition.objective import GenericMCObjective
81
+ from botorch.models import ModelListGP
82
+ from botorch.optim import optimize_acqf
83
+ from botorch.sampling import SobolQMCNormalSampler
84
+ from botorch.sampling.list_sampler import ListSampler
85
+ import botorch.version
86
+
87
+ if version.parse(botorch.version.version) < version.parse("0.8.0"):
88
+ # noinspection PyUnresolvedReferences
89
+ from botorch.fit import fit_gpytorch_model as fit_gpytorch_mll
90
+
91
+ def _get_sobol_qmc_normal_sampler(num_samples: int) -> SobolQMCNormalSampler:
92
+ # noinspection PyTypeChecker
93
+ return SobolQMCNormalSampler(num_samples)
94
+
95
+ else:
96
+ from botorch.fit import fit_gpytorch_mll
97
+
98
+ def _get_sobol_qmc_normal_sampler(num_samples: int) -> SobolQMCNormalSampler:
99
+ return SobolQMCNormalSampler(torch.Size((num_samples,)))
100
+
101
+ from gpytorch.mlls import ExactMarginalLogLikelihood
102
+ from gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood
103
+
104
+ from botorch.utils.multi_objective.box_decompositions import NondominatedPartitioning
105
+ from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization
106
+ from botorch.utils.sampling import manual_seed
107
+ from botorch.utils.sampling import sample_simplex
108
+
109
+ from botorch.generation.gen import gen_candidates_scipy
110
+ from botorch.generation.gen import gen_candidates_torch
111
+
112
+ with try_import() as _imports_logei:
113
+ from botorch.acquisition.analytic import LogConstrainedExpectedImprovement
114
+ from botorch.acquisition.analytic import LogExpectedImprovement
115
+
116
+
117
+ with try_import() as _imports_qhvkg:
118
+ from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import (
119
+ qHypervolumeKnowledgeGradient,
120
+ )
121
+
122
+ from pyfemtet.opt.history import TrialState as PFTrialState
123
+ from pyfemtet.opt.exceptions import *
124
+ from pyfemtet.opt.optimizer.optuna_optimizer._optuna_attribute import OptunaAttribute
125
+ from pyfemtet.opt.optimizer.optuna_optimizer._pof_botorch.enable_nonlinear_constraint import (
126
+ NonlinearInequalityConstraints
127
+ )
128
+ from pyfemtet.opt.prediction._botorch_utils import *
129
+ from pyfemtet.logger import get_module_logger
130
+
131
+ # warnings to filter
132
+ from botorch.exceptions.warnings import InputDataWarning
133
+ from optuna.exceptions import ExperimentalWarning
134
+
135
+ if TYPE_CHECKING:
136
+ from pyfemtet.opt.optimizer import AbstractOptimizer
137
+
138
+ # noinspection PyTypeChecker
139
+ _logger = get_logger(False)
140
+
141
+ DEBUG = False
142
+ logger = get_module_logger('opt.PoFBoTorchSampler', DEBUG)
143
+
144
+ warnings.filterwarnings('ignore', category=InputDataWarning)
145
+ warnings.filterwarnings('ignore', category=ExperimentalWarning)
146
+
147
+ CandidateFunc = Callable[
148
+ [
149
+ torch.Tensor,
150
+ torch.Tensor,
151
+ torch.Tensor | None,
152
+ torch.Tensor,
153
+ torch.Tensor | None,
154
+ SingleTaskGP,
155
+ NonlinearInequalityConstraints | None,
156
+ 'PoFConfig',
157
+ float | str | None, # observation noise
158
+ 'PartialOptimizeACQFInput',
159
+ ],
160
+ tuple[torch.Tensor, SingleTaskGP],
161
+ ]
162
+
163
+
164
+ __all__ = [
165
+ 'PartialOptimizeACQFConfig',
166
+ 'PoFConfig',
167
+ 'PoFBoTorchSampler',
168
+ ]
169
+
170
+
171
+ def _validate_botorch_version_for_constrained_opt(func_name: str) -> None:
172
+ if version.parse(botorch.version.version) < version.parse("0.9.0"):
173
+ raise ImportError(
174
+ f"{func_name} requires botorch>=0.9.0 for constrained problems, but got "
175
+ f"botorch={botorch.version.version}.\n"
176
+ "Please run ``pip install botorch --upgrade``."
177
+ )
178
+
179
+
180
+ def _get_constraint_funcs(n_constraints: int) -> list[Callable[[torch.Tensor], torch.Tensor]]:
181
+ return [lambda Z: Z[..., -n_constraints + i] for i in range(n_constraints)]
182
+
183
+
184
+ def log_sigmoid(X: torch.Tensor) -> torch.Tensor:
185
+ return torch.log(torch.sigmoid(X))
186
+
187
+
188
+ class PartialOptimizeACQFConfig:
189
+
190
+ default_method = 'SLSQP'
191
+
192
+ def __init__(
193
+ self,
194
+ *,
195
+ gen_candidates: str = 'scipy', # 'scipy' or 'torch'
196
+
197
+ timeout_sec: float = None,
198
+ # 制限1. 初期条件の探索には適用されない。
199
+ # 制限2. scipy.optimize の iter の callback で判定されるので
200
+ # その途中では timeout の判定処理が入らない。
201
+ # 目的関数を wrap して botorch\optim\utils\timeout.py のように
202
+ # raise OptimizationTimeoutError(current_x=xk, runtime=runtime)
203
+ # すればもっと小刻みに timeout 判定が得られるかもしれない。
204
+
205
+ # scipy
206
+ method: str = None, # 'COBYLA, COBYQA, SLSQP or trust-constr
207
+ scipy_minimize_kwargs: dict = None, # 'maxiter': 200 など
208
+ # torch
209
+ # pyfemtet
210
+ constraint_enhancement: float = None,
211
+ constraint_scaling: float = 1e6,
212
+
213
+ ):
214
+
215
+ # gen_candidate_scipy が scipy.optimize.minimize の
216
+ # トレランス(eps)の範囲内で拘束違反を起こす解を提案する
217
+ # 場合がある
218
+ # eps の範囲内で x が変動した際の constraint の変動量は
219
+ # 予測も規定もできないので定数を指定させる
220
+ self.constraint_enhancement = constraint_enhancement or 0.
221
+
222
+ # 単純に violation を大きく評価すれば解決する問題でもある
223
+ self.constraint_scaling = constraint_scaling or 1.
224
+
225
+ # method = 'COBYLA'
226
+ # scipy_minimize_kwargs = dict(
227
+ # tol=0.01, # 獲得関数のトレンランス
228
+ # catol=constraint_enhancement / 10., # 拘束のトレランス
229
+ # )
230
+
231
+ # method = 'SLSQP'
232
+ # scipy_minimize_kwargs = dict(
233
+ # ftol=0.1, # 獲得関数のトレランス. 拘束のトレランスは共通?
234
+ # )
235
+
236
+ # method = 'COBYQA'
237
+ # scipy_minimize_kwargs = dict(
238
+ # final_tr_radius=0.1,
239
+ # feasibility_tol=constraint_enhancement / 10.,
240
+ # )
241
+
242
+ # method = 'trust-constr'
243
+ # scipy_minimize_kwargs = dict(
244
+ # xtol=0.1,
245
+ # barrier_tol=constraint_enhancement / 10.,
246
+ # )
247
+
248
+ method = method or self.default_method
249
+ scipy_minimize_kwargs = scipy_minimize_kwargs or {}
250
+
251
+ if gen_candidates:
252
+ if gen_candidates == 'scipy':
253
+ gen_candidates = gen_candidates_scipy
254
+
255
+ elif gen_candidates == 'torch':
256
+ # gen_candidates = gen_candidates_torch
257
+ raise NotImplementedError
258
+ else:
259
+ raise ValueError('`gen_candidates` must be "scipy".')
260
+
261
+ self.kwargs = dict(
262
+ gen_candidates=gen_candidates,
263
+ options=dict(
264
+ # scipy
265
+ method=method,
266
+ **scipy_minimize_kwargs, # For method-specific options, see :func:`show_options()`.
267
+ # torch
268
+ # ExpMAStoppingCriterion
269
+ # lr
270
+ ),
271
+ timeout_sec=timeout_sec,
272
+ )
273
+
274
+
275
+ def _optimize_acqf_util(
276
+ partial_optimize_acqf_kwargs: PartialOptimizeACQFConfig,
277
+ botorch_nlc,
278
+ acqf,
279
+ bounds,
280
+ original_options,
281
+ original_q,
282
+ original_num_restarts,
283
+ original_raw_samples,
284
+ ):
285
+ options = original_options
286
+
287
+ # ACQF Patch 内で log-sigmoid しているので
288
+ # non-negative にしてもよい
289
+ options.update({'nonnegative': True})
290
+
291
+ # ユーザー側で挙動を調整するための引数を取得
292
+ kwargs = partial_optimize_acqf_kwargs.kwargs.copy()
293
+ options.update(kwargs.pop('options'))
294
+
295
+ # optimize_acqf の探索に parameter constraints を追加します。
296
+ if botorch_nlc is not None:
297
+
298
+ # parameter constraint を適用するための kwargs
299
+ nlc_kwargs = botorch_nlc.create_kwargs()
300
+ q = nlc_kwargs.pop('q')
301
+ batch_limit = nlc_kwargs.pop('options_batch_limit')
302
+
303
+ # parameter constraint を適用するための option
304
+ options.update({"batch_limit": batch_limit}) # batch_limit must be 1
305
+ options.update({'nonnegative': True}) # nonnegative must be True (実際に得る獲得関数も log-sigmoid で wrap しているので判定不要で True にしてよい)
306
+
307
+ candidates, _ = optimize_acqf(
308
+ acq_function=acqf,
309
+ bounds=bounds,
310
+ q=q,
311
+ num_restarts=original_num_restarts, # =20,
312
+ raw_samples=original_raw_samples, # =1024,
313
+ options=options,
314
+ sequential=True,
315
+ **nlc_kwargs,
316
+ **kwargs,
317
+ )
318
+
319
+ # しません。
320
+ else:
321
+
322
+ candidates, _ = optimize_acqf(
323
+ acq_function=acqf,
324
+ bounds=bounds,
325
+ q=original_q,
326
+ num_restarts=original_num_restarts,
327
+ raw_samples=original_raw_samples,
328
+ options=options,
329
+ sequential=True,
330
+ **kwargs,
331
+ )
332
+
333
+ return candidates
334
+
335
+
336
+ # noinspection PyUnusedLocal,PyIncorrectDocstring
337
+ @experimental_func("3.3.0")
338
+ def logei_candidates_func(
339
+ train_x: torch.Tensor,
340
+ train_obj: torch.Tensor,
341
+ train_con: torch.Tensor | None,
342
+ bounds: torch.Tensor,
343
+ pending_x: torch.Tensor | None,
344
+ model_c: SingleTaskGP,
345
+ botorch_nlc: NonlinearInequalityConstraints | None,
346
+ pof_config: 'PoFConfig',
347
+ observation_noise: str | float | None,
348
+ partial_optimize_acqf_kwargs: PartialOptimizeACQFConfig,
349
+ ) -> tuple[torch.Tensor, SingleTaskGP]:
350
+ """Log Expected Improvement (LogEI).
351
+
352
+ The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
353
+ with single-objective optimization.
354
+
355
+ Args:
356
+ train_x:
357
+ Previous parameter configurations. A ``torch.Tensor`` of shape
358
+ ``(n_trials, n_params)``. ``n_trials`` is the number of already observed trials
359
+ and ``n_params`` is the number of parameters. ``n_params`` may be larger than the
360
+ actual number of parameters if categorical parameters are included in the search
361
+ space, since these parameters are one-hot encoded.
362
+ Values are not normalized.
363
+ train_obj:
364
+ Previously observed objectives. A ``torch.Tensor`` of shape
365
+ ``(n_trials, n_objectives)``. ``n_trials`` is identical to that of ``train_x``.
366
+ ``n_objectives`` is the number of objectives. Observations are not normalized.
367
+ train_con:
368
+ Objective constraints. A ``torch.Tensor`` of shape ``(n_trials, n_constraints)``.
369
+ ``n_trials`` is identical to that of ``train_x``. ``n_constraints`` is the number of
370
+ constraints. A constraint is violated if strictly larger than 0. If no constraints are
371
+ involved in the optimization, this argument will be :obj:`None`.
372
+ bounds:
373
+ Search space bounds. A ``torch.Tensor`` of shape ``(2, n_params)``. ``n_params`` is
374
+ identical to that of ``train_x``. The first and the second rows correspond to the
375
+ lower and upper bounds for each parameter respectively.
376
+ pending_x:
377
+ Pending parameter configurations. A ``torch.Tensor`` of shape
378
+ ``(n_pending, n_params)``. ``n_pending`` is the number of the trials which are already
379
+ suggested all their parameters but have not completed their evaluation, and
380
+ ``n_params`` is identical to that of ``train_x``.
381
+
382
+ Returns:
383
+ Next set of candidates. Usually the return value of BoTorch's ``optimize_acqf``.
384
+
385
+ """
386
+
387
+ # ===== ここから変更なし =====
388
+
389
+ # We need botorch >=0.8.1 for LogExpectedImprovement.
390
+ if not _imports_logei.is_successful():
391
+ raise ImportError(
392
+ "logei_candidates_func requires botorch >=0.8.1. "
393
+ "Please upgrade botorch or use qei_candidates_func as candidates_func instead."
394
+ )
395
+
396
+ if train_obj.size(-1) != 1:
397
+ raise ValueError("Objective may only contain single values with logEI.")
398
+ n_constraints = train_con.size(1) if train_con is not None else 0
399
+ if n_constraints > 0:
400
+ assert train_con is not None
401
+ train_y = torch.cat([train_obj, train_con], dim=-1)
402
+
403
+ is_feas = (train_con <= 0).all(dim=-1)
404
+ train_obj_feas = train_obj[is_feas]
405
+
406
+ if train_obj_feas.numel() == 0:
407
+ _logger.warning(
408
+ "No objective values are feasible. Using 0 as the best objective in logEI."
409
+ )
410
+ best_f = train_obj.min()
411
+ else:
412
+ best_f = train_obj_feas.max()
413
+
414
+ else:
415
+ train_y = train_obj
416
+ best_f = train_obj.max()
417
+ # ===== ここまで変更なし =====
418
+
419
+ model = setup_gp(train_x, train_y, bounds, observation_noise)
420
+
421
+ if n_constraints > 0:
422
+ ACQF = acqf_patch_factory(
423
+ LogConstrainedExpectedImprovement,
424
+ is_log_acqf=True,
425
+ )
426
+ acqf = ACQF(
427
+ model=model,
428
+ best_f=best_f,
429
+ objective_index=0,
430
+ constraints={i: (None, 0.0) for i in range(1, n_constraints + 1)},
431
+ )
432
+ else:
433
+ ACQF = acqf_patch_factory(
434
+ LogExpectedImprovement,
435
+ is_log_acqf=True,
436
+ )
437
+ acqf = ACQF(
438
+ model=model,
439
+ best_f=best_f,
440
+ )
441
+ acqf.set(model_c, pof_config)
442
+
443
+ candidates = _optimize_acqf_util(
444
+ partial_optimize_acqf_kwargs,
445
+ botorch_nlc,
446
+ acqf,
447
+ bounds,
448
+ original_options={"batch_limit": 5, "maxiter": 200},
449
+ original_q=1,
450
+ original_num_restarts=10,
451
+ original_raw_samples=512,
452
+ )
453
+
454
+ return candidates.detach(), model
455
+
456
+
457
+ # noinspection PyUnusedLocal,PyIncorrectDocstring
458
+ @experimental_func("2.4.0")
459
+ def qei_candidates_func(
460
+ train_x: torch.Tensor,
461
+ train_obj: torch.Tensor,
462
+ train_con: torch.Tensor | None,
463
+ bounds: torch.Tensor,
464
+ pending_x: torch.Tensor | None,
465
+ model_c: SingleTaskGP,
466
+ botorch_nlc: NonlinearInequalityConstraints | None,
467
+ pof_config: 'PoFConfig',
468
+ observation_noise: str | float | None,
469
+ partial_optimize_acqf_kwargs: PartialOptimizeACQFConfig,
470
+ ) -> tuple[torch.Tensor, SingleTaskGP]:
471
+ """Quasi MC-based batch Expected Improvement (qEI).
472
+
473
+ Args:
474
+ train_x:
475
+ Previous parameter configurations. A ``torch.Tensor`` of shape
476
+ ``(n_trials, n_params)``. ``n_trials`` is the number of already observed trials
477
+ and ``n_params`` is the number of parameters. ``n_params`` may be larger than the
478
+ actual number of parameters if categorical parameters are included in the search
479
+ space, since these parameters are one-hot encoded.
480
+ Values are not normalized.
481
+ train_obj:
482
+ Previously observed objectives. A ``torch.Tensor`` of shape
483
+ ``(n_trials, n_objectives)``. ``n_trials`` is identical to that of ``train_x``.
484
+ ``n_objectives`` is the number of objectives. Observations are not normalized.
485
+ train_con:
486
+ Objective constraints. A ``torch.Tensor`` of shape ``(n_trials, n_constraints)``.
487
+ ``n_trials`` is identical to that of ``train_x``. ``n_constraints`` is the number of
488
+ constraints. A constraint is violated if strictly larger than 0. If no constraints are
489
+ involved in the optimization, this argument will be :obj:`None`.
490
+ bounds:
491
+ Search space bounds. A ``torch.Tensor`` of shape ``(2, n_params)``. ``n_params`` is
492
+ identical to that of ``train_x``. The first and the second rows correspond to the
493
+ lower and upper bounds for each parameter respectively.
494
+ pending_x:
495
+ Pending parameter configurations. A ``torch.Tensor`` of shape
496
+ ``(n_pending, n_params)``. ``n_pending`` is the number of the trials which are already
497
+ suggested all their parameters but have not completed their evaluation, and
498
+ ``n_params`` is identical to that of ``train_x``.
499
+ Returns:
500
+ Next set of candidates. Usually the return value of BoTorch's ``optimize_acqf``.
501
+
502
+ """
503
+
504
+ # ===== ここから変更なし =====
505
+
506
+ if train_obj.size(-1) != 1:
507
+ raise ValueError("Objective may only contain single values with qEI.")
508
+ if train_con is not None:
509
+ _validate_botorch_version_for_constrained_opt("qei_candidates_func")
510
+ train_y = torch.cat([train_obj, train_con], dim=-1)
511
+
512
+ is_feas = (train_con <= 0).all(dim=-1)
513
+ train_obj_feas = train_obj[is_feas]
514
+
515
+ if train_obj_feas.numel() == 0:
516
+ # TODO(hvy): Do not use 0 as the best observation.
517
+ _logger.warning(
518
+ "No objective values are feasible. Using 0 as the best objective in qEI."
519
+ )
520
+ best_f = torch.zeros(())
521
+ else:
522
+ best_f = train_obj_feas.max()
523
+
524
+ n_constraints = train_con.size(1)
525
+ additonal_qei_kwargs = {
526
+ "objective": GenericMCObjective(lambda Z, X: Z[..., 0]),
527
+ "constraints": _get_constraint_funcs(n_constraints),
528
+ }
529
+ else:
530
+ train_y = train_obj
531
+
532
+ best_f = train_obj.max()
533
+
534
+ additonal_qei_kwargs = {}
535
+
536
+ # ===== ここまで変更なし =====
537
+
538
+ # train_x = normalize(train_x, bounds=bounds)
539
+ # if pending_x is not None:
540
+ # pending_x = normalize(pending_x, bounds=bounds)
541
+ #
542
+ # model = SingleTaskGP(train_x, train_y, outcome_transform=Standardize(m=train_y.size(-1)))
543
+ # mll = ExactMarginalLogLikelihood(model.likelihood, model)
544
+ # fit_gpytorch_mll(mll)
545
+
546
+ model = setup_gp(train_x, train_y, bounds, observation_noise)
547
+
548
+ ACQF = acqf_patch_factory(
549
+ qExpectedImprovement,
550
+ is_log_acqf=False,
551
+ )
552
+ acqf = ACQF(
553
+ model=model,
554
+ best_f=best_f,
555
+ sampler=_get_sobol_qmc_normal_sampler(256),
556
+ X_pending=pending_x,
557
+ **additonal_qei_kwargs,
558
+ )
559
+ acqf.set(model_c, pof_config)
560
+
561
+ candidates = _optimize_acqf_util(
562
+ partial_optimize_acqf_kwargs,
563
+ botorch_nlc,
564
+ acqf,
565
+ bounds,
566
+ original_options={"batch_limit": 5, "maxiter": 200},
567
+ original_q=1,
568
+ original_num_restarts=10,
569
+ original_raw_samples=512,
570
+ )
571
+
572
+ return candidates.detach(), model
573
+
574
+
575
+ @experimental_func("2.4.0")
576
+ def qehvi_candidates_func(
577
+ train_x: torch.Tensor,
578
+ train_obj: torch.Tensor,
579
+ train_con: torch.Tensor | None,
580
+ bounds: torch.Tensor,
581
+ pending_x: torch.Tensor | None,
582
+ model_c: SingleTaskGP,
583
+ botorch_nlc: NonlinearInequalityConstraints | None,
584
+ pof_config: 'PoFConfig',
585
+ observation_noise: str | float | None,
586
+ partial_optimize_acqf_kwargs: PartialOptimizeACQFConfig,
587
+ ) -> tuple[torch.Tensor, SingleTaskGP]:
588
+ """Quasi MC-based batch Expected Hypervolume Improvement (qEHVI).
589
+
590
+ The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
591
+ with multi-objective optimization when the number of objectives is three or less.
592
+
593
+ .. seealso::
594
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
595
+ descriptions.
596
+ """
597
+
598
+ # ===== ここから変更なし =====
599
+ n_objectives = train_obj.size(-1)
600
+
601
+ if train_con is not None:
602
+ train_y = torch.cat([train_obj, train_con], dim=-1)
603
+
604
+ is_feas = (train_con <= 0).all(dim=-1)
605
+ train_obj_feas = train_obj[is_feas]
606
+
607
+ n_constraints = train_con.size(1)
608
+ additional_qehvi_kwargs = {
609
+ "objective": IdentityMCMultiOutputObjective(outcomes=list(range(n_objectives))),
610
+ "constraints": _get_constraint_funcs(n_constraints),
611
+ }
612
+ else:
613
+ train_y = train_obj
614
+
615
+ train_obj_feas = train_obj
616
+
617
+ additional_qehvi_kwargs = {}
618
+ # ===== ここまで変更なし =====
619
+
620
+ model = setup_gp(train_x, train_y, bounds, observation_noise)
621
+
622
+ # Approximate box decomposition similar to Ax when the number of objectives is large.
623
+ # https://github.com/pytorch/botorch/blob/36d09a4297c2a0ff385077b7fcdd5a9d308e40cc/botorch/acquisition/multi_objective/utils.py#L46-L63
624
+ if n_objectives > 4:
625
+ alpha = 10 ** (-8 + n_objectives)
626
+ else:
627
+ alpha = 0.0
628
+
629
+ ref_point = train_obj.min(dim=0).values - 1e-8
630
+
631
+ partitioning = NondominatedPartitioning(ref_point=ref_point, Y=train_obj_feas, alpha=alpha)
632
+
633
+ ref_point_list = ref_point.tolist()
634
+
635
+ ACQF = acqf_patch_factory(
636
+ monte_carlo.qExpectedHypervolumeImprovement,
637
+ is_log_acqf=False,
638
+ )
639
+ acqf = ACQF(
640
+ model=model,
641
+ ref_point=ref_point_list,
642
+ partitioning=partitioning,
643
+ sampler=_get_sobol_qmc_normal_sampler(256),
644
+ X_pending=pending_x,
645
+ **additional_qehvi_kwargs,
646
+ )
647
+ acqf.set(model_c, pof_config)
648
+
649
+ candidates = _optimize_acqf_util(
650
+ partial_optimize_acqf_kwargs,
651
+ botorch_nlc,
652
+ acqf,
653
+ bounds,
654
+ original_options={"batch_limit": 5, "maxiter": 200, "nonnegative": True},
655
+ original_q=1,
656
+ original_num_restarts=20,
657
+ original_raw_samples=1024,
658
+ )
659
+
660
+ return candidates.detach(), model
661
+
662
+
663
+ @experimental_func("3.5.0")
664
+ def ehvi_candidates_func(
665
+ train_x: torch.Tensor,
666
+ train_obj: torch.Tensor,
667
+ train_con: torch.Tensor | None,
668
+ bounds: torch.Tensor,
669
+ pending_x: torch.Tensor | None,
670
+ model_c: SingleTaskGP,
671
+ botorch_nlc: NonlinearInequalityConstraints | None,
672
+ pof_config: 'PoFConfig',
673
+ observation_noise: str | float | None,
674
+ partial_optimize_acqf_kwargs: PartialOptimizeACQFConfig,
675
+ ) -> tuple[torch.Tensor, SingleTaskGP]:
676
+ """Expected Hypervolume Improvement (EHVI).
677
+
678
+ The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
679
+ with multi-objective optimization without constraints.
680
+
681
+ .. seealso::
682
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
683
+ descriptions.
684
+ """
685
+
686
+ # ===== ここから変更なし =====
687
+ n_objectives = train_obj.size(-1)
688
+ if train_con is not None:
689
+ raise ValueError("Constraints are not supported with ehvi_candidates_func.")
690
+
691
+ train_y = train_obj
692
+ # ===== ここまで変更なし =====
693
+
694
+ # インスペクション避け
695
+ # noinspection PyUnusedLocal
696
+ pending_x = pending_x
697
+
698
+ model = setup_gp(train_x, train_y, bounds, observation_noise)
699
+
700
+ # Approximate box decomposition similar to Ax when the number of objectives is large.
701
+ # https://github.com/pytorch/botorch/blob/36d09a4297c2a0ff385077b7fcdd5a9d308e40cc/botorch/acquisition/multi_objective/utils.py#L46-L63
702
+ if n_objectives > 4:
703
+ alpha = 10 ** (-8 + n_objectives)
704
+ else:
705
+ alpha = 0.0
706
+
707
+ ref_point = train_obj.min(dim=0).values - 1e-8
708
+
709
+ partitioning = NondominatedPartitioning(ref_point=ref_point, Y=train_y, alpha=alpha)
710
+
711
+ ref_point_list = ref_point.tolist()
712
+
713
+ ACQF = acqf_patch_factory(
714
+ ExpectedHypervolumeImprovement,
715
+ is_log_acqf=False,
716
+ )
717
+ acqf = ACQF(
718
+ model=model,
719
+ ref_point=ref_point_list,
720
+ partitioning=partitioning,
721
+ )
722
+ acqf.set(model_c, pof_config)
723
+
724
+ candidates = _optimize_acqf_util(
725
+ partial_optimize_acqf_kwargs,
726
+ botorch_nlc,
727
+ acqf,
728
+ bounds,
729
+ original_options={"batch_limit": 5, "maxiter": 200},
730
+ original_q=1,
731
+ original_num_restarts=20,
732
+ original_raw_samples=1024,
733
+ )
734
+
735
+ return candidates.detach(), model
736
+
737
+
738
+ @experimental_func("2.4.0")
739
+ def qparego_candidates_func(
740
+ train_x: torch.Tensor,
741
+ train_obj: torch.Tensor,
742
+ train_con: torch.Tensor | None,
743
+ bounds: torch.Tensor,
744
+ pending_x: torch.Tensor | None,
745
+ model_c: SingleTaskGP,
746
+ botorch_nlc: NonlinearInequalityConstraints | None,
747
+ pof_config: 'PoFConfig',
748
+ observation_noise: str | float | None,
749
+ partial_optimize_acqf_kwargs: PartialOptimizeACQFConfig,
750
+ ) -> tuple[torch.Tensor, SingleTaskGP]:
751
+ """Quasi MC-based extended ParEGO (qParEGO) for constrained multi-objective optimization.
752
+
753
+ The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
754
+ with multi-objective optimization when the number of objectives is larger than three.
755
+
756
+ .. seealso::
757
+ :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
758
+ descriptions.
759
+ """
760
+
761
+ # ===== ここから変更なし =====
762
+ n_objectives = train_obj.size(-1)
763
+
764
+ weights = sample_simplex(n_objectives).squeeze()
765
+ scalarization = get_chebyshev_scalarization(weights=weights, Y=train_obj)
766
+
767
+ if train_con is not None:
768
+ _validate_botorch_version_for_constrained_opt("qparego_candidates_func")
769
+ train_y = torch.cat([train_obj, train_con], dim=-1)
770
+ n_constraints = train_con.size(1)
771
+ # なぜかここの lambda がちゃんと文法解析できないので inspection を無視
772
+ # noinspection PyArgumentList
773
+ objective = GenericMCObjective(lambda Z, X: scalarization(Z[..., :n_objectives]))
774
+ additional_qei_kwargs = {
775
+ "constraints": _get_constraint_funcs(n_constraints),
776
+ }
777
+ else:
778
+ train_y = train_obj
779
+
780
+ objective = GenericMCObjective(scalarization)
781
+ additional_qei_kwargs = {}
782
+ # ===== ここまで変更なし =====
783
+
784
+ model = setup_gp(train_x, train_y, bounds, observation_noise)
785
+
786
+ ACQF = acqf_patch_factory(
787
+ qExpectedImprovement,
788
+ is_log_acqf=False,
789
+ )
790
+ acqf = ACQF(
791
+ model=model,
792
+ best_f=objective(train_y).max(),
793
+ sampler=_get_sobol_qmc_normal_sampler(256),
794
+ objective=objective,
795
+ X_pending=pending_x,
796
+ **additional_qei_kwargs,
797
+ )
798
+ acqf.set(model_c, pof_config)
799
+
800
+ candidates = _optimize_acqf_util(
801
+ partial_optimize_acqf_kwargs,
802
+ botorch_nlc,
803
+ acqf,
804
+ bounds,
805
+ original_options={"batch_limit": 5, "maxiter": 200},
806
+ original_q=1,
807
+ original_num_restarts=20,
808
+ original_raw_samples=1024,
809
+ )
810
+
811
+ return candidates.detach(), model
812
+
813
+
814
+ def _get_default_candidates_func(
815
+ n_objectives: int,
816
+ has_constraint: bool,
817
+ consider_running_trials: bool,
818
+ ) -> CandidateFunc:
819
+ if n_objectives > 3 and not has_constraint and not consider_running_trials:
820
+ return ehvi_candidates_func
821
+ elif n_objectives > 3:
822
+ return qparego_candidates_func
823
+ elif n_objectives > 1:
824
+ return qehvi_candidates_func
825
+ elif consider_running_trials:
826
+ return qei_candidates_func
827
+ else:
828
+ return logei_candidates_func
829
+
830
+
831
+ @dataclasses.dataclass
832
+ class PoFConfig:
833
+ consider_pof: bool = True
834
+ consider_explicit_hard_constraint: bool = True
835
+ _states_to_consider_pof: list[PFTrialState] = \
836
+ dataclasses.field(
837
+ default_factory=lambda: [
838
+ PFTrialState.hard_constraint_violation,
839
+ *PFTrialState.get_hidden_constraint_violation_states()
840
+ ]
841
+ )
842
+ feasibility_cdf_threshold: float | str = 0.5 # or 'sample_mean'
843
+ feasibility_noise: float | str | None = None # 'no' to fixed minimum noise
844
+
845
+
846
+ # TODO:
847
+ # log の場合は base acqf との足し算にしていたが、
848
+ # pof が小さすぎる場合に -inf になるので一旦取りやめ
849
+ # clamp_min で勾配の問題が起きなければそっちのほうが健全
850
+ def acqf_patch_factory(acqf_class, is_log_acqf=False):
851
+
852
+ class ACQFWithPoF(acqf_class):
853
+
854
+ model_c: SingleTaskGP
855
+ config: PoFConfig
856
+
857
+ def set(self, model_c: SingleTaskGP, config: PoFConfig):
858
+ self.model_c = model_c
859
+ self.config = config
860
+
861
+ def pof(self, X: torch.Tensor):
862
+
863
+ # 予測点の平均と標準偏差をもとにした正規分布を作る
864
+ _X = X.squeeze(1)
865
+ posterior = self.model_c.posterior(_X)
866
+ mean = posterior.mean
867
+ sigma = posterior.variance.sqrt()
868
+ normal = Normal(mean, sigma)
869
+
870
+ # threshold を決める
871
+ if isinstance(self.config.feasibility_cdf_threshold, float):
872
+ threshold = self.config.feasibility_cdf_threshold
873
+ elif isinstance(self.config.feasibility_cdf_threshold, str):
874
+ if self.config.feasibility_cdf_threshold == 'sample_mean':
875
+ train_y: torch.Tensor = self.model_c.train_targets
876
+ threshold = train_y.mean()
877
+ else:
878
+ raise ValueError
879
+ else:
880
+ raise ValueError
881
+
882
+ # 積分する
883
+ if isinstance(threshold, float):
884
+ threshold = torch.tensor(threshold, dtype=X.dtype, device=X.device)
885
+ elif isinstance(threshold, torch.Tensor):
886
+ pass
887
+ else:
888
+ raise ValueError
889
+ cdf = 1. - normal.cdf(threshold)
890
+
891
+ return cdf.squeeze(1)
892
+
893
+ def forward(self, X: torch.Tensor) -> torch.Tensor:
894
+
895
+ # ===== ベース目的関数 =====
896
+ base_acqf: torch.Tensor = super().forward(X)
897
+
898
+ # ===== pof =====
899
+ pof = self.pof(X) if self.config.consider_pof else 1.
900
+
901
+ return -log_sigmoid(-base_acqf) * pof
902
+
903
+ return ACQFWithPoF
904
+
905
+
906
+ @experimental_class("2.4.0")
907
+ class PoFBoTorchSampler(BoTorchSampler):
908
+
909
+ observation_noise: float | str | None
910
+ pof_config: PoFConfig
911
+ pyfemtet_optimizer: AbstractOptimizer
912
+ partial_optimize_acqf_kwargs: PartialOptimizeACQFConfig
913
+ current_gp_model: SingleTaskGP | None
914
+
915
+ _candidates_func: CandidateFunc | None
916
+
917
+ def __init__(
918
+ self,
919
+ *,
920
+ candidates_func: CandidateFunc = None,
921
+ constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None,
922
+ n_startup_trials: int = 10,
923
+ consider_running_trials: bool = False,
924
+ independent_sampler: BaseSampler | None = None,
925
+ seed: int | None = None,
926
+ device: torch.device | None = None,
927
+ # common
928
+ observation_noise: float | str | None = None, # 'no' to minimum fixed noise
929
+ # acqf_input
930
+ partial_optimize_acqf_kwargs: PartialOptimizeACQFConfig = None,
931
+ # pof_config
932
+ pof_config: PoFConfig = None,
933
+ ):
934
+ super().__init__(candidates_func=candidates_func, constraints_func=constraints_func,
935
+ n_startup_trials=n_startup_trials, consider_running_trials=consider_running_trials,
936
+ independent_sampler=independent_sampler, seed=seed, device=device)
937
+
938
+ self.partial_optimize_acqf_kwargs = \
939
+ partial_optimize_acqf_kwargs or PartialOptimizeACQFConfig()
940
+
941
+ self.observation_noise = observation_noise
942
+ self.pof_config = pof_config or PoFConfig()
943
+ self.current_gp_model = None
944
+
945
+ def train_model_c(
946
+ self,
947
+ study,
948
+ search_space,
949
+ feature_dim_bound: list[float] = None,
950
+ feature_param: float | numpy.ndarray = None
951
+ ):
952
+
953
+ # ===== 準備 =====
954
+ if feature_dim_bound is not None:
955
+ assert feature_param is not None
956
+ elif feature_param is not None:
957
+ assert feature_dim_bound is not None
958
+
959
+ trans = _SearchSpaceTransform(search_space, transform_0_1=False)
960
+
961
+ # ===== trials の整理 =====
962
+ # 正常に終了した trial
963
+ completed_trials: list[FrozenTrial] = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))
964
+
965
+ # hard constraint 違反またはモデル破綻で prune された trial
966
+ pruned_trials: list[FrozenTrial] = study.get_trials(deepcopy=False, states=(TrialState.PRUNED,))
967
+
968
+ # PoF の考慮に soft constraint は選べるが、デフォルトとしては選ぶべきではないので警告
969
+ if self.pof_config._states_to_consider_pof != PoFConfig()._states_to_consider_pof:
970
+ warnings.warn('非推奨のパラメータを変更しています。')
971
+
972
+ # 分別
973
+ feasible_trials: list[FrozenTrial] = []
974
+ infeasible_trials: list[FrozenTrial] = []
975
+ for trial in (completed_trials + pruned_trials):
976
+ state: PFTrialState = OptunaAttribute.get_pf_state_from_trial_attr(
977
+ trial.user_attrs[OptunaAttribute.main_fidelity_key()]
978
+ )
979
+
980
+ if state in self.pof_config._states_to_consider_pof:
981
+ infeasible_trials.append(trial)
982
+
983
+ else:
984
+ # TODO: 意図としてはこうあるべきだが、テスト時以外はこの確認を行わない
985
+ assert state in (PFTrialState.succeeded, PFTrialState.soft_constraint_violation), \
986
+ (state, self.pof_config._states_to_consider_pof)
987
+
988
+ feasible_trials.append(trial)
989
+
990
+ # 統合
991
+ trials: list[FrozenTrial] = feasible_trials + infeasible_trials
992
+
993
+ # 対応する Feasibility を作成
994
+ Feasibility = int
995
+ corresponding_feas: list[Feasibility] = [1 for __ in feasible_trials] + [0 for __ in infeasible_trials]
996
+
997
+ # ===== bounds, params, feasibility の作成 =====
998
+ # bounds: (d(+1), 2) shaped array
999
+ # params: (n, d) shaped array
1000
+ # values: (n, m(=1)) shaped array
1001
+ bounds: numpy.ndarray = trans.bounds
1002
+ if feature_dim_bound is not None:
1003
+ bounds: numpy.ndarray = numpy.concatenate([[feature_dim_bound], bounds], axis=0)
1004
+ params: numpy.ndarray = numpy.empty((len(trials), bounds.shape[0]), dtype=numpy.float64)
1005
+ values: numpy.ndarray = numpy.empty((len(trials), 1), dtype=numpy.float64)
1006
+
1007
+ # 元実装と違い、このモデルを基に次の点を提案する
1008
+ # わけではないので running は考えなくてよい
1009
+ feasibility: Feasibility
1010
+ for trial_idx, (trial, feasibility) in enumerate(zip(trials, corresponding_feas)):
1011
+
1012
+ # train_x
1013
+ if feature_param is not None:
1014
+ params[trial_idx, 0] = feature_param
1015
+ params[trial_idx, 1:] = trans.transform(trial.params)
1016
+
1017
+ else:
1018
+ params[trial_idx] = trans.transform(trial.params)
1019
+
1020
+ # train_y
1021
+ values[trial_idx, 0] = feasibility
1022
+
1023
+ # ===== Tensor の作成 =====
1024
+ # bounds: (2, d(+1)) shaped Tensor
1025
+ # train_x: (n, d) shaped Tensor
1026
+ # train_y: (n, m(=1)) shaped Tensor
1027
+ # train_y_var: (n, m(=1)) shaped Tensor or None
1028
+ bounds: torch.Tensor = torch.from_numpy(bounds).to(self._device).transpose(0, 1)
1029
+ train_x_c: torch.Tensor = torch.from_numpy(params).to(self._device)
1030
+ train_y_c: torch.Tensor = torch.from_numpy(values).to(self._device)
1031
+ # yvar
1032
+ train_yvar_c, standardizer = setup_yvar_and_standardizer(
1033
+ train_y_c, self.pof_config.feasibility_noise
1034
+ )
1035
+
1036
+ # ===== model_c を作る =====
1037
+ # train_x, train_y は元実装にあわせないと
1038
+ # ACQF.forward(X) の引数と一致しなくなる。
1039
+ with manual_seed(self._seed):
1040
+ model_c = SingleTaskGP(
1041
+ train_X=train_x_c,
1042
+ train_Y=train_y_c,
1043
+ train_Yvar=train_yvar_c,
1044
+ input_transform=Normalize(d=train_x_c.shape[-1], bounds=bounds),
1045
+ outcome_transform=standardizer
1046
+ )
1047
+ mll_c = ExactMarginalLogLikelihood(
1048
+ model_c.likelihood,
1049
+ model_c
1050
+ )
1051
+ fit_gpytorch_mll(mll_c)
1052
+
1053
+ return model_c
1054
+
1055
+ def sample_relative(
1056
+ self,
1057
+ study: Study,
1058
+ trial: FrozenTrial,
1059
+ search_space: dict[str, BaseDistribution],
1060
+ ) -> dict[str, Any]:
1061
+
1062
+ assert hasattr(self, 'pyfemtet_optimizer')
1063
+
1064
+ # ===== ここから変更なし =====
1065
+
1066
+ assert isinstance(search_space, dict)
1067
+
1068
+ if len(search_space) == 0:
1069
+ return {}
1070
+
1071
+ completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))
1072
+ running_trials = [
1073
+ t for t in study.get_trials(deepcopy=False, states=(TrialState.RUNNING,)) if t != trial
1074
+ ]
1075
+ trials = completed_trials + running_trials
1076
+
1077
+ n_trials = len(trials)
1078
+ n_completed_trials = len(completed_trials)
1079
+ if n_trials < self._n_startup_trials:
1080
+ return {}
1081
+
1082
+ trans = _SearchSpaceTransform(search_space)
1083
+ n_objectives = len(study.directions)
1084
+ values: numpy.ndarray | torch.Tensor = numpy.empty(
1085
+ (n_trials, n_objectives), dtype=numpy.float64
1086
+ )
1087
+ params: numpy.ndarray | torch.Tensor
1088
+ con: numpy.ndarray | torch.Tensor | None = None
1089
+ bounds: numpy.ndarray | torch.Tensor = trans.bounds
1090
+ params = numpy.empty((n_trials, trans.bounds.shape[0]), dtype=numpy.float64)
1091
+ for trial_idx, trial in enumerate(trials):
1092
+ if trial.state == TrialState.COMPLETE:
1093
+ params[trial_idx] = trans.transform(trial.params)
1094
+ assert len(study.directions) == len(trial.values)
1095
+ for obj_idx, (direction, value) in enumerate(zip(study.directions, trial.values)):
1096
+ assert value is not None
1097
+ if (
1098
+ direction == StudyDirection.MINIMIZE
1099
+ ): # BoTorch always assumes maximization.
1100
+ value *= -1
1101
+ values[trial_idx, obj_idx] = value
1102
+ if self._constraints_func is not None:
1103
+ constraints = study._storage.get_trial_system_attrs(trial._trial_id).get(
1104
+ _CONSTRAINTS_KEY
1105
+ )
1106
+ if constraints is not None:
1107
+ n_constraints = len(constraints)
1108
+
1109
+ if con is None:
1110
+ con = numpy.full(
1111
+ (n_completed_trials, n_constraints), numpy.nan, dtype=numpy.float64
1112
+ )
1113
+ elif n_constraints != con.shape[1]:
1114
+ raise RuntimeError(
1115
+ f"Expected {con.shape[1]} constraints "
1116
+ f"but received {n_constraints}."
1117
+ )
1118
+ con[trial_idx] = constraints
1119
+ elif trial.state == TrialState.RUNNING:
1120
+ if all(p in trial.params for p in search_space):
1121
+ params[trial_idx] = trans.transform(trial.params)
1122
+ else:
1123
+ params[trial_idx] = numpy.nan
1124
+ else:
1125
+ assert False, "trail.state must be TrialState.COMPLETE or TrialState.RUNNING."
1126
+
1127
+ if self._constraints_func is not None:
1128
+ if con is None:
1129
+ warnings.warn(
1130
+ "`constraints_func` was given but no call to it correctly computed "
1131
+ "constraints. Constraints passed to `candidates_func` will be `None`."
1132
+ )
1133
+ elif numpy.isnan(con).any():
1134
+ warnings.warn(
1135
+ "`constraints_func` was given but some calls to it did not correctly compute "
1136
+ "constraints. Constraints passed to `candidates_func` will contain NaN."
1137
+ )
1138
+
1139
+ values = torch.from_numpy(values).to(self._device)
1140
+ params = torch.from_numpy(params).to(self._device)
1141
+ if con is not None:
1142
+ con = torch.from_numpy(con).to(self._device)
1143
+ bounds = torch.from_numpy(bounds).to(self._device)
1144
+
1145
+ if con is not None:
1146
+ if con.dim() == 1:
1147
+ con.unsqueeze_(-1)
1148
+ bounds.transpose_(0, 1)
1149
+
1150
+ if self._candidates_func is None:
1151
+ self._candidates_func = _get_default_candidates_func(
1152
+ n_objectives=n_objectives,
1153
+ has_constraint=con is not None,
1154
+ consider_running_trials=self._consider_running_trials,
1155
+ )
1156
+
1157
+ completed_values = values[:n_completed_trials]
1158
+ completed_params = params[:n_completed_trials]
1159
+ if self._consider_running_trials:
1160
+ running_params = params[n_completed_trials:]
1161
+ running_params = running_params[~torch.isnan(running_params).any(dim=1)]
1162
+ else:
1163
+ running_params = None
1164
+
1165
+ # ===== ここまで変更なし =====
1166
+
1167
+ # TODO: ミーゼスなどの場合にこれらのシード固定法も試す
1168
+ # if self._seed is not None:
1169
+ # random.seed(self._seed)
1170
+ # numpy.random.seed(self._seed)
1171
+ # torch.manual_seed(self._seed)
1172
+ # torch.backends.cudnn.benchmark = False
1173
+ # torch.backends.cudnn.deterministic = True
1174
+
1175
+ with manual_seed(self._seed):
1176
+ # `manual_seed` makes the default candidates functions reproducible.
1177
+ # `SobolQMCNormalSampler`'s constructor has a `seed` argument, but its behavior is
1178
+ # deterministic when the BoTorch's seed is fixed.
1179
+
1180
+ # feasibility model
1181
+ model_c = self.train_model_c(study, search_space)
1182
+
1183
+ # hard constraints
1184
+ if self.pof_config.consider_explicit_hard_constraint:
1185
+ hard_constraints = [
1186
+ cns for cns in self.pyfemtet_optimizer.constraints.values()
1187
+ if cns.hard
1188
+ ]
1189
+ else:
1190
+ hard_constraints = []
1191
+
1192
+ if len(hard_constraints) > 0:
1193
+
1194
+ ce = self.partial_optimize_acqf_kwargs.constraint_enhancement
1195
+ cs = self.partial_optimize_acqf_kwargs.constraint_scaling
1196
+ botorch_nli_cons = NonlinearInequalityConstraints(
1197
+ hard_constraints,
1198
+ self.pyfemtet_optimizer,
1199
+ trans,
1200
+ ce,
1201
+ cs,
1202
+ )
1203
+ else:
1204
+ botorch_nli_cons = None
1205
+
1206
+ candidates, model = self._candidates_func(
1207
+ completed_params, completed_values, con, bounds, running_params,
1208
+ model_c, botorch_nli_cons, self.pof_config, self.observation_noise,
1209
+ self.partial_optimize_acqf_kwargs
1210
+ )
1211
+ if self._seed is not None:
1212
+ self._seed += 1
1213
+
1214
+ self.current_gp_model = model
1215
+
1216
+ if DEBUG:
1217
+ post = model_c.posterior(candidates)
1218
+ mean = post.mean.detach()
1219
+ sigma = post.variance.sqrt().detach()
1220
+ normal = Normal(mean, sigma)
1221
+ threshold = self.pof_config.feasibility_cdf_threshold
1222
+ threshold = torch.tensor(threshold)
1223
+ cdf = 1. - normal.cdf(threshold)
1224
+ logger.debug(f'PoF is {cdf}')
1225
+
1226
+ # ===== ここから変更なし =====
1227
+
1228
+ if not isinstance(candidates, torch.Tensor):
1229
+ raise TypeError("Candidates must be a torch.Tensor.")
1230
+ if candidates.dim() == 2:
1231
+ if candidates.size(0) != 1:
1232
+ raise ValueError(
1233
+ "Candidates batch optimization is not supported and the first dimension must "
1234
+ "have size 1 if candidates is a two-dimensional tensor. Actual: "
1235
+ f"{candidates.size()}."
1236
+ )
1237
+ # Batch size is one. Get rid of the batch dimension.
1238
+ candidates = candidates.squeeze(0)
1239
+ if candidates.dim() != 1:
1240
+ raise ValueError("Candidates must be one or two-dimensional.")
1241
+ if candidates.size(0) != bounds.size(1):
1242
+ raise ValueError(
1243
+ "Candidates size must match with the given bounds. Actual candidates: "
1244
+ f"{candidates.size(0)}, bounds: {bounds.size(1)}."
1245
+ )
1246
+
1247
+ # ===== ここまで変更なし =====
1248
+
1249
+ return trans.untransform(candidates.cpu().numpy())