pyfemtet 0.9.6__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyfemtet might be problematic. Click here for more details.

Files changed (272) hide show
  1. pyfemtet/__init__.py +6 -1
  2. pyfemtet/_i18n/1. make_pot_and_update_po.bat +8 -0
  3. pyfemtet/_i18n/2. build_mo.bat +5 -0
  4. pyfemtet/_i18n/__init__.py +4 -0
  5. pyfemtet/_i18n/babel.cfg +2 -0
  6. pyfemtet/_i18n/i18n.py +37 -0
  7. pyfemtet/_i18n/locales/ja/LC_MESSAGES/messages.mo +0 -0
  8. pyfemtet/_i18n/locales/ja/LC_MESSAGES/messages.po +1020 -0
  9. pyfemtet/_i18n/locales/messages.pot +987 -0
  10. pyfemtet/{_message → _i18n}/messages.py +128 -41
  11. pyfemtet/_util/closing.py +19 -0
  12. pyfemtet/_util/dask_util.py +89 -7
  13. pyfemtet/_util/df_util.py +46 -0
  14. pyfemtet/_util/excel_macro_util.py +8 -3
  15. pyfemtet/_util/excel_parse_util.py +43 -23
  16. pyfemtet/_util/femtet_access_inspection.py +120 -0
  17. pyfemtet/{_femtet_config_util/autosave.py → _util/femtet_autosave.py} +7 -0
  18. pyfemtet/_util/femtet_exit.py +105 -0
  19. pyfemtet/_util/femtet_version.py +20 -0
  20. pyfemtet/_util/helper.py +103 -0
  21. pyfemtet/_util/process_util.py +107 -0
  22. pyfemtet/_util/str_enum.py +44 -0
  23. pyfemtet/core.py +15 -47
  24. pyfemtet/dispatch_extensions/__init__.py +8 -11
  25. pyfemtet/dispatch_extensions/_impl.py +42 -198
  26. pyfemtet/logger/__init__.py +8 -1
  27. pyfemtet/logger/_impl.py +5 -6
  28. pyfemtet/opt/__init__.py +3 -17
  29. pyfemtet/opt/exceptions.py +45 -0
  30. pyfemtet/opt/femopt.py +621 -0
  31. pyfemtet/opt/history/__init__.py +11 -0
  32. pyfemtet/opt/history/_history.py +1416 -0
  33. pyfemtet/opt/history/_hypervolume.py +169 -0
  34. pyfemtet/opt/history/_optimality.py +79 -0
  35. pyfemtet/opt/interface/__init__.py +17 -24
  36. pyfemtet/opt/interface/_base_interface.py +222 -0
  37. pyfemtet/opt/interface/_excel_interface/__init__.py +3 -0
  38. pyfemtet/opt/interface/_excel_interface/debug-excel-interface.xlsm +0 -0
  39. pyfemtet/opt/interface/_excel_interface/excel_interface.py +997 -0
  40. pyfemtet/opt/interface/_femtet_interface/__init__.py +3 -0
  41. pyfemtet/opt/interface/{_femtet_parametric.py → _femtet_interface/_femtet_parametric.py} +20 -12
  42. pyfemtet/opt/interface/{_femtet.py → _femtet_interface/femtet_interface.py} +508 -353
  43. pyfemtet/opt/interface/_femtet_with_nx_interface/__init__.py +5 -0
  44. pyfemtet/opt/interface/_femtet_with_nx_interface/femtet_with_nx_interface.py +230 -0
  45. pyfemtet/opt/interface/_femtet_with_nx_interface/model1.prt +0 -0
  46. pyfemtet/opt/interface/_femtet_with_nx_interface/model1.x_t +98 -0
  47. pyfemtet/opt/interface/{_femtet_with_nx → _femtet_with_nx_interface}/update_model.py +1 -3
  48. pyfemtet/opt/interface/_femtet_with_solidworks/__init__.py +5 -0
  49. pyfemtet/opt/interface/_femtet_with_solidworks/femtet_with_solidworks_interface.py +142 -0
  50. pyfemtet/opt/interface/_solidworks_interface/__init__.py +5 -0
  51. pyfemtet/opt/interface/_solidworks_interface/solidworks_interface.py +227 -0
  52. pyfemtet/opt/interface/_surrogate_model_interface/__init__.py +8 -0
  53. pyfemtet/opt/interface/_surrogate_model_interface/base_surrogate_interface.py +150 -0
  54. pyfemtet/opt/interface/_surrogate_model_interface/botorch_interface.py +298 -0
  55. pyfemtet/opt/interface/_surrogate_model_interface/debug-pof-botorch.reccsv +18 -0
  56. pyfemtet/opt/interface/_with_excel_settings/__init__.py +61 -0
  57. pyfemtet/opt/interface/_with_excel_settings/with_excel_settings.py +134 -0
  58. pyfemtet/opt/meta_script/YAML_Generator.xlsm +0 -0
  59. pyfemtet/opt/meta_script/__main__.py +58 -36
  60. pyfemtet/opt/optimizer/__init__.py +7 -9
  61. pyfemtet/opt/optimizer/_base_optimizer.py +911 -0
  62. pyfemtet/opt/optimizer/optuna_optimizer/__init__.py +9 -0
  63. pyfemtet/opt/optimizer/optuna_optimizer/_optuna_attribute.py +63 -0
  64. pyfemtet/opt/optimizer/optuna_optimizer/_optuna_optimizer.py +796 -0
  65. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/__init__.py +7 -0
  66. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/debug-pof-botorch.reccsv +18 -0
  67. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/enable_nonlinear_constraint.py +244 -0
  68. pyfemtet/opt/optimizer/optuna_optimizer/_pof_botorch/pof_botorch_sampler.py +1249 -0
  69. pyfemtet/opt/optimizer/optuna_optimizer/wat_ex14_parametric_jp.femprj +0 -0
  70. pyfemtet/opt/optimizer/scipy_optimizer/__init__.py +1 -0
  71. pyfemtet/opt/optimizer/scipy_optimizer/_scipy_optimizer.py +383 -0
  72. pyfemtet/opt/prediction/__init__.py +7 -0
  73. pyfemtet/opt/prediction/_botorch_utils.py +133 -0
  74. pyfemtet/opt/prediction/_gpytorch_modules_extension.py +142 -0
  75. pyfemtet/opt/prediction/_helper.py +155 -0
  76. pyfemtet/opt/prediction/_model.py +118 -0
  77. pyfemtet/opt/problem/problem.py +304 -0
  78. pyfemtet/opt/problem/variable_manager/__init__.py +20 -0
  79. pyfemtet/opt/problem/variable_manager/_string_as_expression.py +115 -0
  80. pyfemtet/opt/problem/variable_manager/_variable_manager.py +295 -0
  81. pyfemtet/opt/visualization/history_viewer/__main__.py +5 -0
  82. pyfemtet/opt/visualization/{_base.py → history_viewer/_base_application.py} +18 -13
  83. pyfemtet/opt/visualization/history_viewer/_common_pages.py +150 -0
  84. pyfemtet/opt/visualization/{_complex_components → history_viewer/_complex_components}/alert_region.py +10 -5
  85. pyfemtet/opt/visualization/{_complex_components → history_viewer/_complex_components}/control_femtet.py +16 -13
  86. pyfemtet/opt/visualization/{_complex_components → history_viewer/_complex_components}/main_graph.py +117 -47
  87. pyfemtet/opt/visualization/{_complex_components → history_viewer/_complex_components}/pm_graph.py +159 -138
  88. pyfemtet/opt/visualization/history_viewer/_process_monitor/_application.py +173 -0
  89. pyfemtet/opt/visualization/history_viewer/_process_monitor/_pages.py +291 -0
  90. pyfemtet/opt/visualization/{_wrapped_components → history_viewer/_wrapped_components}/dbc.py +1 -1
  91. pyfemtet/opt/visualization/{_wrapped_components → history_viewer/_wrapped_components}/dcc.py +1 -1
  92. pyfemtet/opt/visualization/{_wrapped_components → history_viewer/_wrapped_components}/html.py +1 -1
  93. pyfemtet/opt/visualization/history_viewer/result_viewer/__main__.py +5 -0
  94. pyfemtet/opt/visualization/{result_viewer/application.py → history_viewer/result_viewer/_application.py} +6 -6
  95. pyfemtet/opt/visualization/{result_viewer/pages.py → history_viewer/result_viewer/_pages.py} +106 -82
  96. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08.csv +18 -0
  97. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08.db +0 -0
  98. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8.jpg +0 -0
  99. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8.log +45 -0
  100. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8.pdt +0 -0
  101. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_1.jpg +0 -0
  102. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_1.pdt +0 -0
  103. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_10.jpg +0 -0
  104. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_10.pdt +0 -0
  105. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_11.jpg +0 -0
  106. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_11.pdt +0 -0
  107. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_12.jpg +0 -0
  108. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_12.pdt +0 -0
  109. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_13.jpg +0 -0
  110. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_13.pdt +0 -0
  111. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_14.jpg +0 -0
  112. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_14.pdt +0 -0
  113. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_15.jpg +0 -0
  114. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_15.pdt +0 -0
  115. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_16.jpg +0 -0
  116. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_16.pdt +0 -0
  117. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_17.jpg +0 -0
  118. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_17.pdt +0 -0
  119. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_18.jpg +0 -0
  120. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_18.pdt +0 -0
  121. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_19.jpg +0 -0
  122. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_19.pdt +0 -0
  123. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_2.jpg +0 -0
  124. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_2.pdt +0 -0
  125. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_20.jpg +0 -0
  126. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_20.pdt +0 -0
  127. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_3.jpg +0 -0
  128. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_3.pdt +0 -0
  129. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.bgr +0 -0
  130. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.bnd +0 -0
  131. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.btr +0 -0
  132. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.jpg +0 -0
  133. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.mtl +0 -0
  134. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.pdt +0 -0
  135. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_4.prm +0 -0
  136. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_5.jpg +0 -0
  137. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_5.pdt +0 -0
  138. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_6.jpg +0 -0
  139. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_6.pdt +0 -0
  140. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_7.jpg +0 -0
  141. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_7.pdt +0 -0
  142. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_8.jpg +0 -0
  143. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_8.pdt +0 -0
  144. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_9.jpg +0 -0
  145. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.Results/ex8_trial_9.pdt +0 -0
  146. pyfemtet/opt/visualization/history_viewer/result_viewer/tutorial_files/tutorial_gau_ex08_parametric.femprj +0 -0
  147. pyfemtet/opt/visualization/plotter/main_figure_creator.py +536 -0
  148. pyfemtet/opt/visualization/plotter/pm_graph_creator.py +359 -0
  149. pyfemtet/opt/worker_status.py +120 -0
  150. {pyfemtet-0.9.6.dist-info → pyfemtet-1.0.0.dist-info}/METADATA +23 -24
  151. pyfemtet-1.0.0.dist-info/RECORD +172 -0
  152. pyfemtet-1.0.0.dist-info/entry_points.txt +3 -0
  153. pyfemtet/_femtet_config_util/exit.py +0 -59
  154. pyfemtet/_message/1. make_pot.bat +0 -11
  155. pyfemtet/_message/2. make_mo.bat +0 -6
  156. pyfemtet/_message/__init__.py +0 -5
  157. pyfemtet/_message/babel.cfg +0 -2
  158. pyfemtet/_message/locales/ja/LC_MESSAGES/messages.mo +0 -0
  159. pyfemtet/_message/locales/ja/LC_MESSAGES/messages.po +0 -570
  160. pyfemtet/_message/locales/messages.pot +0 -551
  161. pyfemtet/_warning.py +0 -87
  162. pyfemtet/brep/_impl.py +0 -18
  163. pyfemtet/opt/_femopt.py +0 -1007
  164. pyfemtet/opt/_femopt_core.py +0 -1169
  165. pyfemtet/opt/_test_utils/control_femtet.py +0 -39
  166. pyfemtet/opt/_test_utils/hyper_sphere.py +0 -24
  167. pyfemtet/opt/_test_utils/record_history.py +0 -130
  168. pyfemtet/opt/advanced_samples/excel_ui/(ref) original_project.femprj +0 -0
  169. pyfemtet/opt/advanced_samples/excel_ui/femtet-macro.xlsm +0 -0
  170. pyfemtet/opt/advanced_samples/excel_ui/pyfemtet-core.py +0 -291
  171. pyfemtet/opt/advanced_samples/excel_ui/test-pyfemtet-core.cmd +0 -22
  172. pyfemtet/opt/advanced_samples/restart/gal_ex13_parametric.femprj +0 -0
  173. pyfemtet/opt/advanced_samples/restart/gal_ex13_parametric_restart.py +0 -99
  174. pyfemtet/opt/advanced_samples/restart/gal_ex13_parametric_restart_jp.py +0 -102
  175. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data.py +0 -60
  176. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_create_training_data_jp.py +0 -57
  177. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate.py +0 -100
  178. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_optimize_with_surrogate_jp.py +0 -90
  179. pyfemtet/opt/advanced_samples/surrogate_model/gal_ex13_parametric.femprj +0 -0
  180. pyfemtet/opt/interface/_base.py +0 -101
  181. pyfemtet/opt/interface/_excel_interface.py +0 -984
  182. pyfemtet/opt/interface/_femtet_excel.py +0 -141
  183. pyfemtet/opt/interface/_femtet_with_nx/__init__.py +0 -3
  184. pyfemtet/opt/interface/_femtet_with_nx/_interface.py +0 -178
  185. pyfemtet/opt/interface/_femtet_with_sldworks.py +0 -298
  186. pyfemtet/opt/interface/_surrogate/__init__.py +0 -5
  187. pyfemtet/opt/interface/_surrogate/_base.py +0 -129
  188. pyfemtet/opt/interface/_surrogate/_chaospy.py +0 -71
  189. pyfemtet/opt/interface/_surrogate/_singletaskgp.py +0 -71
  190. pyfemtet/opt/interface/_surrogate_excel.py +0 -102
  191. pyfemtet/opt/optimizer/_base.py +0 -376
  192. pyfemtet/opt/optimizer/_optuna/_botorch_patch/enable_nonlinear_constraint.py +0 -220
  193. pyfemtet/opt/optimizer/_optuna/_optuna.py +0 -434
  194. pyfemtet/opt/optimizer/_optuna/_pof_botorch.py +0 -1914
  195. pyfemtet/opt/optimizer/_scipy.py +0 -159
  196. pyfemtet/opt/optimizer/_scipy_scalar.py +0 -127
  197. pyfemtet/opt/optimizer/parameter.py +0 -113
  198. pyfemtet/opt/prediction/_base.py +0 -61
  199. pyfemtet/opt/prediction/single_task_gp.py +0 -119
  200. pyfemtet/opt/samples/femprj_sample/ParametricIF.femprj +0 -0
  201. pyfemtet/opt/samples/femprj_sample/ParametricIF.py +0 -29
  202. pyfemtet/opt/samples/femprj_sample/ParametricIF_test_result.reccsv +0 -13
  203. pyfemtet/opt/samples/femprj_sample/cad_ex01_NX.femprj +0 -0
  204. pyfemtet/opt/samples/femprj_sample/cad_ex01_NX.prt +0 -0
  205. pyfemtet/opt/samples/femprj_sample/cad_ex01_NX.py +0 -135
  206. pyfemtet/opt/samples/femprj_sample/cad_ex01_NX_test_result.reccsv +0 -23
  207. pyfemtet/opt/samples/femprj_sample/cad_ex01_SW.SLDPRT +0 -0
  208. pyfemtet/opt/samples/femprj_sample/cad_ex01_SW.femprj +0 -0
  209. pyfemtet/opt/samples/femprj_sample/cad_ex01_SW.py +0 -131
  210. pyfemtet/opt/samples/femprj_sample/cad_ex01_SW_test_result.reccsv +0 -23
  211. pyfemtet/opt/samples/femprj_sample/constrained_pipe.femprj +0 -0
  212. pyfemtet/opt/samples/femprj_sample/constrained_pipe.py +0 -96
  213. pyfemtet/opt/samples/femprj_sample/constrained_pipe_test_result.reccsv +0 -13
  214. pyfemtet/opt/samples/femprj_sample/gal_ex58_parametric.femprj +0 -0
  215. pyfemtet/opt/samples/femprj_sample/gal_ex58_parametric.py +0 -74
  216. pyfemtet/opt/samples/femprj_sample/gal_ex58_parametric_test_result.reccsv +0 -13
  217. pyfemtet/opt/samples/femprj_sample/gau_ex08_parametric.femprj +0 -0
  218. pyfemtet/opt/samples/femprj_sample/gau_ex08_parametric.py +0 -58
  219. pyfemtet/opt/samples/femprj_sample/gau_ex08_parametric_test_result.reccsv +0 -23
  220. pyfemtet/opt/samples/femprj_sample/gau_ex12_parametric.femprj +0 -0
  221. pyfemtet/opt/samples/femprj_sample/gau_ex12_parametric.py +0 -52
  222. pyfemtet/opt/samples/femprj_sample/her_ex40_parametric.femprj +0 -0
  223. pyfemtet/opt/samples/femprj_sample/her_ex40_parametric.py +0 -138
  224. pyfemtet/opt/samples/femprj_sample/her_ex40_parametric_test_result.reccsv +0 -18
  225. pyfemtet/opt/samples/femprj_sample/paswat_ex1_parametric.femprj +0 -0
  226. pyfemtet/opt/samples/femprj_sample/paswat_ex1_parametric.py +0 -60
  227. pyfemtet/opt/samples/femprj_sample/paswat_ex1_parametric_parallel.py +0 -61
  228. pyfemtet/opt/samples/femprj_sample/paswat_ex1_parametric_test_result.reccsv +0 -18
  229. pyfemtet/opt/samples/femprj_sample/wat_ex14_parametric.femprj +0 -0
  230. pyfemtet/opt/samples/femprj_sample/wat_ex14_parametric.py +0 -58
  231. pyfemtet/opt/samples/femprj_sample/wat_ex14_parametric_parallel.py +0 -58
  232. pyfemtet/opt/samples/femprj_sample/wat_ex14_parametric_test_result.reccsv +0 -18
  233. pyfemtet/opt/samples/femprj_sample_jp/ParametricIF_jp.femprj +0 -0
  234. pyfemtet/opt/samples/femprj_sample_jp/ParametricIF_jp.py +0 -29
  235. pyfemtet/opt/samples/femprj_sample_jp/cad_ex01_NX_jp.femprj +0 -0
  236. pyfemtet/opt/samples/femprj_sample_jp/cad_ex01_NX_jp.py +0 -129
  237. pyfemtet/opt/samples/femprj_sample_jp/cad_ex01_SW_jp.femprj +0 -0
  238. pyfemtet/opt/samples/femprj_sample_jp/cad_ex01_SW_jp.py +0 -125
  239. pyfemtet/opt/samples/femprj_sample_jp/constrained_pipe_jp.py +0 -93
  240. pyfemtet/opt/samples/femprj_sample_jp/gal_ex58_parametric_jp.femprj +0 -0
  241. pyfemtet/opt/samples/femprj_sample_jp/gal_ex58_parametric_jp.py +0 -70
  242. pyfemtet/opt/samples/femprj_sample_jp/gau_ex08_parametric_jp.femprj +0 -0
  243. pyfemtet/opt/samples/femprj_sample_jp/gau_ex08_parametric_jp.py +0 -57
  244. pyfemtet/opt/samples/femprj_sample_jp/gau_ex12_parametric_jp.py +0 -52
  245. pyfemtet/opt/samples/femprj_sample_jp/her_ex40_parametric_jp.femprj +0 -0
  246. pyfemtet/opt/samples/femprj_sample_jp/her_ex40_parametric_jp.py +0 -138
  247. pyfemtet/opt/samples/femprj_sample_jp/paswat_ex1_parametric_jp.femprj +0 -0
  248. pyfemtet/opt/samples/femprj_sample_jp/paswat_ex1_parametric_jp.py +0 -58
  249. pyfemtet/opt/samples/femprj_sample_jp/paswat_ex1_parametric_parallel_jp.py +0 -59
  250. pyfemtet/opt/samples/femprj_sample_jp/wat_ex14_parametric_jp.py +0 -56
  251. pyfemtet/opt/samples/femprj_sample_jp/wat_ex14_parametric_parallel_jp.py +0 -56
  252. pyfemtet/opt/visualization/_complex_components/main_figure_creator.py +0 -332
  253. pyfemtet/opt/visualization/_complex_components/pm_graph_creator.py +0 -201
  254. pyfemtet/opt/visualization/_process_monitor/application.py +0 -226
  255. pyfemtet/opt/visualization/_process_monitor/pages.py +0 -406
  256. pyfemtet/opt/visualization/_wrapped_components/__init__.py +0 -0
  257. pyfemtet/opt/visualization/result_viewer/__init__.py +0 -0
  258. pyfemtet-0.9.6.dist-info/RECORD +0 -158
  259. pyfemtet-0.9.6.dist-info/entry_points.txt +0 -3
  260. /pyfemtet/{_femtet_config_util → opt/problem}/__init__.py +0 -0
  261. /pyfemtet/{brep → opt/visualization/history_viewer}/__init__.py +0 -0
  262. /pyfemtet/opt/{_test_utils → visualization/history_viewer/_complex_components}/__init__.py +0 -0
  263. /pyfemtet/opt/{optimizer/_optuna → visualization/history_viewer/_process_monitor}/__init__.py +0 -0
  264. /pyfemtet/opt/{optimizer/_optuna/_botorch_patch → visualization/history_viewer/_wrapped_components}/__init__.py +0 -0
  265. /pyfemtet/opt/visualization/{_wrapped_components → history_viewer/_wrapped_components}/str_enum.py +0 -0
  266. /pyfemtet/opt/visualization/{result_viewer → history_viewer/result_viewer}/.gitignore +0 -0
  267. /pyfemtet/opt/visualization/{_complex_components → history_viewer/result_viewer}/__init__.py +0 -0
  268. /pyfemtet/opt/visualization/{_process_monitor → plotter}/__init__.py +0 -0
  269. /pyfemtet/opt/{samples/femprj_sample_jp/wat_ex14_parametric_jp.femprj → wat_ex14_parametric_jp.femprj} +0 -0
  270. {pyfemtet-0.9.6.dist-info → pyfemtet-1.0.0.dist-info}/LICENSE +0 -0
  271. {pyfemtet-0.9.6.dist-info → pyfemtet-1.0.0.dist-info}/LICENSE_THIRD_PARTY.txt +0 -0
  272. {pyfemtet-0.9.6.dist-info → pyfemtet-1.0.0.dist-info}/WHEEL +0 -0
@@ -1,1914 +0,0 @@
1
- """This algorithm is based on BoTorchSampler of optuna_integration[1] and the paper[2].
2
-
3
-
4
- ** LICENSE NOTICE OF [1] **
5
-
6
- MIT License
7
-
8
- Copyright (c) 2018 Preferred Networks, Inc.
9
- Copyright (c) 2024 Kazuma NAITO.
10
-
11
- Permission is hereby granted, free of charge, to any person obtaining a copy
12
- of this software and associated documentation files (the "Software"), to deal
13
- in the Software without restriction, including without limitation the rights
14
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15
- copies of the Software, and to permit persons to whom the Software is
16
- furnished to do so, subject to the following conditions:
17
-
18
- The above copyright notice and this permission notice shall be included in all
19
- copies or substantial portions of the Software.
20
-
21
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
26
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27
- SOFTWARE.
28
-
29
-
30
- ** reference of [2] **
31
- LEE, H., et al. Optimization subject to hidden constraints via statistical
32
- emulation. Pacific Journal of Optimization, 2011, 7.3: 467-478
33
-
34
-
35
-
36
- """
37
-
38
-
39
- from __future__ import annotations
40
-
41
- # ===== constant =====
42
- _USE_FIXED_NOISE = True
43
- def _get_use_fixed_noise() -> bool:
44
- return _USE_FIXED_NOISE
45
- def _set_use_fixed_noise(value: bool):
46
- global _USE_FIXED_NOISE
47
- _USE_FIXED_NOISE = value
48
-
49
- # ignore warnings
50
- import warnings
51
- from botorch.exceptions.warnings import InputDataWarning
52
- from optuna.exceptions import ExperimentalWarning
53
-
54
- warnings.filterwarnings('ignore', category=InputDataWarning)
55
- warnings.filterwarnings('ignore', category=ExperimentalWarning)
56
-
57
- from pyfemtet.opt.optimizer._optuna._botorch_patch.enable_nonlinear_constraint import NonlinearInequalityConstraints
58
-
59
- from collections.abc import Callable
60
- from collections.abc import Sequence
61
- from typing import Any
62
- import random
63
-
64
- from dataclasses import dataclass
65
-
66
- import numpy
67
- from optuna import logging
68
- from optuna._experimental import experimental_class
69
- from optuna._experimental import experimental_func
70
- from optuna._imports import try_import
71
- from optuna._transform import _SearchSpaceTransform
72
- from optuna.distributions import BaseDistribution
73
- from optuna.samplers import BaseSampler
74
- from optuna.samplers import RandomSampler
75
- from optuna.samplers._base import _CONSTRAINTS_KEY
76
- from optuna.samplers._base import _process_constraints_after_trial
77
- from optuna.search_space import IntersectionSearchSpace
78
- from optuna.study import Study
79
- from optuna.study import StudyDirection
80
- from optuna.trial import FrozenTrial
81
- from optuna.trial import TrialState
82
- from packaging import version
83
-
84
- with try_import() as _imports:
85
- from botorch.acquisition.knowledge_gradient import qKnowledgeGradient
86
- from botorch.acquisition.monte_carlo import qExpectedImprovement
87
- from botorch.acquisition.monte_carlo import qNoisyExpectedImprovement
88
- from botorch.acquisition.multi_objective import monte_carlo
89
- from botorch.acquisition.multi_objective.analytic import ExpectedHypervolumeImprovement
90
- from botorch.acquisition.multi_objective.objective import (
91
- FeasibilityWeightedMCMultiOutputObjective,
92
- )
93
- from botorch.acquisition.multi_objective.objective import IdentityMCMultiOutputObjective
94
- from botorch.acquisition.objective import ConstrainedMCObjective
95
- from botorch.acquisition.objective import GenericMCObjective
96
- from botorch.models import ModelListGP
97
- from botorch.models import SingleTaskGP
98
- from botorch.models.transforms.outcome import Standardize
99
- from botorch.optim import optimize_acqf
100
- from botorch.sampling import SobolQMCNormalSampler
101
- from botorch.sampling.list_sampler import ListSampler
102
- import botorch.version
103
-
104
- if version.parse(botorch.version.version) < version.parse("0.8.0"):
105
- from botorch.fit import fit_gpytorch_model as fit_gpytorch_mll
106
-
107
-
108
- def _get_sobol_qmc_normal_sampler(num_samples: int) -> SobolQMCNormalSampler:
109
- return SobolQMCNormalSampler(num_samples)
110
-
111
- else:
112
- from botorch.fit import fit_gpytorch_mll
113
-
114
-
115
- def _get_sobol_qmc_normal_sampler(num_samples: int) -> SobolQMCNormalSampler:
116
- return SobolQMCNormalSampler(torch.Size((num_samples,)))
117
-
118
- from gpytorch.mlls import ExactMarginalLogLikelihood
119
- from gpytorch.mlls.sum_marginal_log_likelihood import SumMarginalLogLikelihood
120
- import torch
121
-
122
- from botorch.utils.multi_objective.box_decompositions import NondominatedPartitioning
123
- from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization
124
- from botorch.utils.sampling import manual_seed
125
- from botorch.utils.sampling import sample_simplex
126
- from botorch.utils.transforms import normalize
127
- from botorch.utils.transforms import unnormalize
128
-
129
- _logger = logging.get_logger(__name__)
130
-
131
- with try_import() as _imports_logei:
132
- from botorch.acquisition.analytic import LogConstrainedExpectedImprovement
133
- from botorch.acquisition.analytic import LogExpectedImprovement
134
-
135
- with try_import() as _imports_qhvkg:
136
- from botorch.acquisition.multi_objective.hypervolume_knowledge_gradient import (
137
- qHypervolumeKnowledgeGradient,
138
- )
139
-
140
-
141
- def _validate_botorch_version_for_constrained_opt(func_name: str) -> None:
142
- if version.parse(botorch.version.version) < version.parse("0.9.0"):
143
- raise ImportError(
144
- f"{func_name} requires botorch>=0.9.0 for constrained problems, but got "
145
- f"botorch={botorch.version.version}.\n"
146
- "Please run ``pip install botorch --upgrade``."
147
- )
148
-
149
-
150
- def _get_constraint_funcs(n_constraints: int) -> list[Callable[["torch.Tensor"], "torch.Tensor"]]:
151
- return [lambda Z: Z[..., -n_constraints + i] for i in range(n_constraints)]
152
-
153
-
154
- # helper function
155
- def symlog(x):
156
- """Symmetric logarithm function.
157
-
158
- Args:
159
- x (torch.Tensor): Input tensor.
160
-
161
- Returns:
162
- torch.Tensor: The symlog of the input tensor.
163
- """
164
- # Apply the symlog transformation
165
- return torch.where(
166
- x >= 0,
167
- torch.log(x + 1),
168
- -torch.log(1 - x)
169
- )
170
-
171
-
172
- def get_minimum_YVar_and_standardizer(Y: torch.Tensor):
173
- standardizer = Standardize(m=Y.shape[-1])
174
- if _get_use_fixed_noise():
175
- import gpytorch
176
- min_noise = gpytorch.settings.min_fixed_noise.value(Y.dtype)
177
-
178
- standardizer.forward(Y) # require to un-transform
179
- _, YVar = standardizer.untransform(Y, min_noise * torch.ones_like(Y))
180
-
181
- else:
182
- YVar = None
183
-
184
- return YVar, standardizer
185
-
186
-
187
- # ベースとなる獲得関数クラスに pof 係数を追加したクラスを作成する関数
188
- def acqf_patch_factory(acqf_class, pof_config=None):
189
- """ベース acqf クラスに pof 係数の計算を追加したクラスを作成します。
190
-
191
- 出力されたクラスは、 set_model_c() メソッドで学習済みの
192
- feasibility を評価するための SingleTaskGP オブジェクトを
193
- 指定する必要があります。
194
- """
195
- from torch.distributions import Normal
196
-
197
- if pof_config is None:
198
- pof_config = PoFConfig()
199
-
200
- # optuna_integration.botorch.botorch.qExpectedImprovement
201
- class ACQFWithPOF(acqf_class):
202
- """Introduces PoF coefficients for a given class of acquisition functions."""
203
- model_c: SingleTaskGP
204
-
205
- enable_pof: bool = pof_config.enable_pof # PoF を考慮するかどうかを規定します。
206
- gamma: float or torch.Tensor = pof_config.gamma # PoF に対する指数です。大きいほど feasibility を重視します。0 だと PoF を考慮しません。
207
- threshold: float or torch.Tensor = pof_config.threshold # PoF を cdf で計算する際の境界値です。0 ~ 1 が基本で、 0.5 が推奨です。大きいほど feasibility を重視します。
208
-
209
- enable_log: bool = pof_config.enable_log # ベース獲得関数値に symlog を適用します。
210
- enable_positive_only_pof: bool = pof_config.enable_positive_only_pof # ベース獲得関数が正のときのみ PoF を乗じます。
211
-
212
- enable_dynamic_pof: bool = pof_config.enable_dynamic_pof # gamma を動的に変更します。 True のとき、gamma は無視されます。
213
- enable_dynamic_threshold: bool = pof_config.enable_dynamic_threshold # threshold を動的に変更します。 True のとき、threshold は無視されます。
214
-
215
- enable_repeat_penalty: bool = pof_config.enable_repeat_penalty # サンプル済みの点の近傍のベース獲得関数値にペナルティ係数を適用します。
216
- _repeat_penalty: float or torch.Tensor = pof_config._repeat_penalty # enable_repeat_penalty が True のときに使用される内部変数です。
217
-
218
- enable_dynamic_repeat_penalty: bool = pof_config.enable_dynamic_repeat_penalty # 同じ値が繰り返された場合にペナルティ係数を強化します。True の場合、enable_repeat_penalty は True として振舞います。
219
- repeat_watch_window: int = pof_config.repeat_watch_window # enable_dynamic_repeat_penalty が True のとき、直近いくつの提案値を参照してペナルティの大きさを決めるかを既定します。
220
- repeat_watch_norm_distance: float = pof_config.repeat_watch_norm_distance # [0, 1] で正規化されたパラメータ空間においてパラメータの提案同士のノルムがどれくらいの大きさ以下であればペナルティを強くするかを規定します。極端な値は数値不安定性を引き起こす可能性があります。
221
- _repeat_penalty_gamma: float or torch.Tensor = pof_config._repeat_penalty_gamma # _repeat_penalty の指数で、内部変数です。
222
-
223
-
224
- def set_model_c(self, model_c: SingleTaskGP):
225
- self.model_c = model_c
226
-
227
- def pof(self, X: torch.Tensor):
228
- # 予測点の平均と標準偏差をもとにした正規分布の関数を作る
229
- _X = X.squeeze(1)
230
- posterior = self.model_c.posterior(_X)
231
- mean = posterior.mean
232
- sigma = posterior.variance.sqrt()
233
-
234
- # 積分する
235
- normal = Normal(mean, sigma)
236
- # ここの閾値を true に近づけるほど厳しくなる
237
- # true の値を超えて大きくしすぎると、多分 true も false も
238
- # 差が出なくなる
239
- if isinstance(self.threshold, float):
240
- cdf = 1. - normal.cdf(torch.tensor(self.threshold, device='cpu').double())
241
- else:
242
- cdf = 1. - normal.cdf(self.threshold)
243
-
244
- return cdf.squeeze(1)
245
-
246
- def forward(self, X: torch.Tensor) -> torch.Tensor:
247
- # ===== ベース目的関数 =====
248
- base_acqf = super().forward(X)
249
-
250
- # ===== 各種 dynamic 手法を使う際の共通処理 =====
251
- if (
252
- self.enable_dynamic_pof
253
- or self.enable_dynamic_threshold
254
- or self.enable_dynamic_threshold
255
- or self.enable_repeat_penalty
256
- or self.enable_dynamic_repeat_penalty
257
- ):
258
- # ===== 正規化不確実性の計算 =====
259
- _X = X.squeeze(1) # batch x 1 x dim -> batch x dim
260
- # X の予測標準偏差を取得する
261
- post = self.model_c.posterior(_X)
262
- current_stddev = post.variance.sqrt() # batch x dim
263
- # 既知のポイントの標準偏差を取得する
264
- post = self.model_c.posterior(self.model_c.train_inputs[0])
265
- known_stddev = post.variance.sqrt().mean(dim=0)
266
- # known_stddev: サンプル済みポイントの標準偏差なので小さいはず。
267
- # current_stddev: 未知の点の標準偏差なので大きいはず。逆に、小さければ既知の点に近い。
268
- # 既知のポイントの標準偏差で規格化し、平均を取って一次元にする
269
- buff = current_stddev / known_stddev
270
- norm_stddev = buff.mean(dim=1) # (batch, ), 1 ~ 100 くらいの値
271
-
272
- # ===== 動的 gamma =====
273
- if self.enable_dynamic_pof:
274
- buff = 1000. / norm_stddev # 1 ~ 100 くらいの値
275
- buff = symlog(buff) # 1 ~ 4 くらいの値?
276
- self.gamma = buff
277
-
278
- # ===== 動的 threshold =====
279
- if self.enable_dynamic_threshold:
280
- # 効きすぎる傾向?
281
- self.threshold = (1 - torch.sigmoid(norm_stddev - 1 - 4) / 2).unsqueeze(1)
282
-
283
- # ===== 繰り返しペナルティ =====
284
- if self.enable_repeat_penalty:
285
- # ベースペナルティは不確実性
286
- # stddev が小さい
287
- # = サンプル済み付近
288
- # = 獲得関数を小さくしたい
289
- # = stddev をそのまま係数にする
290
- self._repeat_penalty = norm_stddev
291
-
292
- # ===== 動的繰り返しペナルティ =====
293
- if self.enable_dynamic_repeat_penalty:
294
- # 計算コストが多くないので念のためベースペナルティを(再)定義
295
- self._repeat_penalty = norm_stddev
296
- # サンプル数が watch_window 以下なら何もできない
297
- if len(self.model_c.train_inputs[0]) > self.repeat_watch_window:
298
- # 直近 N サンプルの x のばらつきが小さいほど
299
- # その optimize_scipy 全体でペナルティを強化する
300
- monitor_window = self.model_c.train_inputs[0][-self.repeat_watch_window:]
301
- g = monitor_window.mean(dim=0)
302
- distance = torch.norm(monitor_window - g, dim=1).mean()
303
- self._repeat_penalty_gamma = self.repeat_watch_norm_distance / distance
304
-
305
- # ===== PoF 計算 =====
306
- if self.enable_pof:
307
- pof = self.pof(X)
308
- else:
309
- pof = 1.
310
-
311
- # ===== その他 =====
312
- if self.enable_log:
313
- base_acqf = symlog(base_acqf)
314
-
315
- if self.enable_positive_only_pof:
316
- pof = torch.where(
317
- base_acqf >= 0,
318
- pof,
319
- torch.ones_like(pof)
320
- )
321
-
322
- ret = -torch.log(1 - torch.sigmoid(base_acqf)) * pof ** self.gamma * self._repeat_penalty ** self._repeat_penalty_gamma
323
- return ret
324
-
325
- return ACQFWithPOF
326
-
327
-
328
- # noinspection PyIncorrectDocstring
329
- @experimental_func("3.3.0")
330
- def logei_candidates_func(
331
- train_x: "torch.Tensor",
332
- train_obj: "torch.Tensor",
333
- train_con: "torch.Tensor" | None,
334
- bounds: "torch.Tensor",
335
- pending_x: "torch.Tensor" | None,
336
- model_c: "SingleTaskGP",
337
- _constraints,
338
- _study,
339
- _opt,
340
- pof_config,
341
- ) -> "torch.Tensor":
342
- """Log Expected Improvement (LogEI).
343
-
344
- The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
345
- with single-objective optimization.
346
-
347
- Args:
348
- train_x:
349
- Previous parameter configurations. A ``torch.Tensor`` of shape
350
- ``(n_trials, n_params)``. ``n_trials`` is the number of already observed trials
351
- and ``n_params`` is the number of parameters. ``n_params`` may be larger than the
352
- actual number of parameters if categorical parameters are included in the search
353
- space, since these parameters are one-hot encoded.
354
- Values are not normalized.
355
- train_obj:
356
- Previously observed objectives. A ``torch.Tensor`` of shape
357
- ``(n_trials, n_objectives)``. ``n_trials`` is identical to that of ``train_x``.
358
- ``n_objectives`` is the number of objectives. Observations are not normalized.
359
- train_con:
360
- Objective constraints. A ``torch.Tensor`` of shape ``(n_trials, n_constraints)``.
361
- ``n_trials`` is identical to that of ``train_x``. ``n_constraints`` is the number of
362
- constraints. A constraint is violated if strictly larger than 0. If no constraints are
363
- involved in the optimization, this argument will be :obj:`None`.
364
- bounds:
365
- Search space bounds. A ``torch.Tensor`` of shape ``(2, n_params)``. ``n_params`` is
366
- identical to that of ``train_x``. The first and the second rows correspond to the
367
- lower and upper bounds for each parameter respectively.
368
- pending_x:
369
- Pending parameter configurations. A ``torch.Tensor`` of shape
370
- ``(n_pending, n_params)``. ``n_pending`` is the number of the trials which are already
371
- suggested all their parameters but have not completed their evaluation, and
372
- ``n_params`` is identical to that of ``train_x``.
373
- model_c:
374
- Feasibility model.
375
-
376
- Returns:
377
- Next set of candidates. Usually the return value of BoTorch's ``optimize_acqf``.
378
-
379
- """
380
-
381
- # We need botorch >=0.8.1 for LogExpectedImprovement.
382
- if not _imports_logei.is_successful():
383
- raise ImportError(
384
- "logei_candidates_func requires botorch >=0.8.1. "
385
- "Please upgrade botorch or use qei_candidates_func as candidates_func instead."
386
- )
387
-
388
- if train_obj.size(-1) != 1:
389
- raise ValueError("Objective may only contain single values with logEI.")
390
- n_constraints = train_con.size(1) if train_con is not None else 0
391
- if n_constraints > 0:
392
- assert train_con is not None
393
- train_y = torch.cat([train_obj, train_con], dim=-1)
394
-
395
- is_feas = (train_con <= 0).all(dim=-1)
396
- train_obj_feas = train_obj[is_feas]
397
-
398
- if train_obj_feas.numel() == 0:
399
- _logger.warning(
400
- "No objective values are feasible. Using 0 as the best objective in logEI."
401
- )
402
- best_f = train_obj.min()
403
- else:
404
- best_f = train_obj_feas.max()
405
-
406
- else:
407
- train_y = train_obj
408
- best_f = train_obj.max()
409
-
410
- train_x = normalize(train_x, bounds=bounds)
411
-
412
- train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
413
-
414
- model = SingleTaskGP(
415
- train_x,
416
- train_y,
417
- train_Yvar=train_yvar,
418
- outcome_transform=standardizer,
419
- )
420
- mll = ExactMarginalLogLikelihood(model.likelihood, model)
421
- fit_gpytorch_mll(mll)
422
- if n_constraints > 0:
423
- ACQF = acqf_patch_factory(LogConstrainedExpectedImprovement, pof_config)
424
- acqf = ACQF(
425
- model=model,
426
- best_f=best_f,
427
- objective_index=0,
428
- constraints={i: (None, 0.0) for i in range(1, n_constraints + 1)},
429
- )
430
- else:
431
- ACQF = acqf_patch_factory(LogExpectedImprovement, pof_config)
432
- acqf = ACQF(
433
- model=model,
434
- best_f=best_f,
435
- )
436
- acqf.set_model_c(model_c)
437
-
438
- standard_bounds = torch.zeros_like(bounds)
439
- standard_bounds[1] = 1
440
-
441
- # optimize_acqf の探索に parameter constraints を追加します。
442
- if len(_constraints) > 0:
443
- nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
444
-
445
- # 1, batch_limit, nonlinear_..., ic_generator
446
- kwargs = nc.create_kwargs()
447
- q = kwargs.pop('q')
448
- batch_limit = kwargs.pop('options')["batch_limit"]
449
-
450
- candidates, _ = optimize_acqf(
451
- acq_function=acqf,
452
- bounds=standard_bounds,
453
- q=q,
454
- num_restarts=10,
455
- raw_samples=512,
456
- options={"batch_limit": batch_limit, "maxiter": 200},
457
- sequential=True,
458
- **kwargs
459
- )
460
-
461
- else:
462
- candidates, _ = optimize_acqf(
463
- acq_function=acqf,
464
- bounds=standard_bounds,
465
- q=1,
466
- num_restarts=10,
467
- raw_samples=512,
468
- options={"batch_limit": 5, "maxiter": 200},
469
- sequential=True,
470
- )
471
-
472
- candidates = unnormalize(candidates.detach(), bounds=bounds)
473
-
474
- return candidates
475
-
476
-
477
- # noinspection PyIncorrectDocstring
478
- @experimental_func("2.4.0")
479
- def qei_candidates_func(
480
- train_x: "torch.Tensor",
481
- train_obj: "torch.Tensor",
482
- train_con: "torch.Tensor" | None,
483
- bounds: "torch.Tensor",
484
- pending_x: "torch.Tensor" | None,
485
- model_c: "SingleTaskGP",
486
- _constraints,
487
- _study,
488
- _opt,
489
- pof_config,
490
- ) -> "torch.Tensor":
491
- """Quasi MC-based batch Expected Improvement (qEI).
492
-
493
- Args:
494
- train_x:
495
- Previous parameter configurations. A ``torch.Tensor`` of shape
496
- ``(n_trials, n_params)``. ``n_trials`` is the number of already observed trials
497
- and ``n_params`` is the number of parameters. ``n_params`` may be larger than the
498
- actual number of parameters if categorical parameters are included in the search
499
- space, since these parameters are one-hot encoded.
500
- Values are not normalized.
501
- train_obj:
502
- Previously observed objectives. A ``torch.Tensor`` of shape
503
- ``(n_trials, n_objectives)``. ``n_trials`` is identical to that of ``train_x``.
504
- ``n_objectives`` is the number of objectives. Observations are not normalized.
505
- train_con:
506
- Objective constraints. A ``torch.Tensor`` of shape ``(n_trials, n_constraints)``.
507
- ``n_trials`` is identical to that of ``train_x``. ``n_constraints`` is the number of
508
- constraints. A constraint is violated if strictly larger than 0. If no constraints are
509
- involved in the optimization, this argument will be :obj:`None`.
510
- bounds:
511
- Search space bounds. A ``torch.Tensor`` of shape ``(2, n_params)``. ``n_params`` is
512
- identical to that of ``train_x``. The first and the second rows correspond to the
513
- lower and upper bounds for each parameter respectively.
514
- pending_x:
515
- Pending parameter configurations. A ``torch.Tensor`` of shape
516
- ``(n_pending, n_params)``. ``n_pending`` is the number of the trials which are already
517
- suggested all their parameters but have not completed their evaluation, and
518
- ``n_params`` is identical to that of ``train_x``.
519
- model_c:
520
- Feasibility model.
521
- Returns:
522
- Next set of candidates. Usually the return value of BoTorch's ``optimize_acqf``.
523
-
524
- """
525
-
526
- if train_obj.size(-1) != 1:
527
- raise ValueError("Objective may only contain single values with qEI.")
528
- if train_con is not None:
529
- _validate_botorch_version_for_constrained_opt("qei_candidates_func")
530
- train_y = torch.cat([train_obj, train_con], dim=-1)
531
-
532
- is_feas = (train_con <= 0).all(dim=-1)
533
- train_obj_feas = train_obj[is_feas]
534
-
535
- if train_obj_feas.numel() == 0:
536
- # TODO(hvy): Do not use 0 as the best observation.
537
- _logger.warning(
538
- "No objective values are feasible. Using 0 as the best objective in qEI."
539
- )
540
- best_f = torch.zeros(())
541
- else:
542
- best_f = train_obj_feas.max()
543
-
544
- n_constraints = train_con.size(1)
545
- additonal_qei_kwargs = {
546
- "objective": GenericMCObjective(lambda Z, X: Z[..., 0]),
547
- "constraints": _get_constraint_funcs(n_constraints),
548
- }
549
- else:
550
- train_y = train_obj
551
-
552
- best_f = train_obj.max()
553
-
554
- additonal_qei_kwargs = {}
555
-
556
- train_x = normalize(train_x, bounds=bounds)
557
- if pending_x is not None:
558
- pending_x = normalize(pending_x, bounds=bounds)
559
-
560
- train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
561
-
562
- model = SingleTaskGP(
563
- train_x,
564
- train_y,
565
- train_Yvar=train_yvar,
566
- outcome_transform=standardizer,
567
- )
568
- mll = ExactMarginalLogLikelihood(model.likelihood, model)
569
- fit_gpytorch_mll(mll)
570
-
571
- ACQF = acqf_patch_factory(qExpectedImprovement, pof_config)
572
- acqf = ACQF(
573
- model=model,
574
- best_f=best_f,
575
- sampler=_get_sobol_qmc_normal_sampler(256),
576
- X_pending=pending_x,
577
- **additonal_qei_kwargs,
578
- )
579
- acqf.set_model_c(model_c)
580
-
581
- standard_bounds = torch.zeros_like(bounds)
582
- standard_bounds[1] = 1
583
-
584
- if len(_constraints) > 0:
585
- nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
586
-
587
- # 1, batch_limit, nonlinear_..., ic_generator
588
- kwargs = nc.create_kwargs()
589
- q = kwargs.pop('q')
590
- batch_limit = kwargs.pop('options')["batch_limit"]
591
-
592
- candidates, _ = optimize_acqf(
593
- acq_function=acqf,
594
- bounds=standard_bounds,
595
- q=q,
596
- num_restarts=10,
597
- raw_samples=512,
598
- options={"batch_limit": batch_limit, "maxiter": 200},
599
- sequential=True,
600
- **kwargs
601
- )
602
-
603
- else:
604
-
605
- candidates, _ = optimize_acqf(
606
- acq_function=acqf,
607
- bounds=standard_bounds,
608
- q=1,
609
- num_restarts=10,
610
- raw_samples=512,
611
- options={"batch_limit": 5, "maxiter": 200},
612
- sequential=True,
613
- )
614
-
615
- candidates = unnormalize(candidates.detach(), bounds=bounds)
616
-
617
- return candidates
618
-
619
-
620
- # noinspection PyIncorrectDocstring
621
- @experimental_func("3.3.0")
622
- def qnei_candidates_func(
623
- train_x: "torch.Tensor",
624
- train_obj: "torch.Tensor",
625
- train_con: "torch.Tensor" | None,
626
- bounds: "torch.Tensor",
627
- pending_x: "torch.Tensor" | None,
628
- model_c: "SingleTaskGP",
629
- _constraints,
630
- _study,
631
- _opt,
632
- pof_config,
633
- ) -> "torch.Tensor":
634
- """Quasi MC-based batch Noisy Expected Improvement (qNEI).
635
-
636
- This function may perform better than qEI (`qei_candidates_func`) when
637
- the evaluated values of objective function are noisy.
638
-
639
- .. seealso::
640
- :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
641
- descriptions.
642
- """
643
- if train_obj.size(-1) != 1:
644
- raise ValueError("Objective may only contain single values with qNEI.")
645
- if train_con is not None:
646
- _validate_botorch_version_for_constrained_opt("qnei_candidates_func")
647
- train_y = torch.cat([train_obj, train_con], dim=-1)
648
-
649
- n_constraints = train_con.size(1)
650
- additional_qnei_kwargs = {
651
- "objective": GenericMCObjective(lambda Z, X: Z[..., 0]),
652
- "constraints": _get_constraint_funcs(n_constraints),
653
- }
654
- else:
655
- train_y = train_obj
656
-
657
- additional_qnei_kwargs = {}
658
-
659
- train_x = normalize(train_x, bounds=bounds)
660
- if pending_x is not None:
661
- pending_x = normalize(pending_x, bounds=bounds)
662
-
663
- train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
664
-
665
- model = SingleTaskGP(
666
- train_x,
667
- train_y,
668
- train_Yvar=train_yvar,
669
- outcome_transform=standardizer,
670
- )
671
- mll = ExactMarginalLogLikelihood(model.likelihood, model)
672
- fit_gpytorch_mll(mll)
673
-
674
- ACQF = acqf_patch_factory(qNoisyExpectedImprovement, pof_config)
675
- acqf = ACQF(
676
- model=model,
677
- X_baseline=train_x,
678
- sampler=_get_sobol_qmc_normal_sampler(256),
679
- X_pending=pending_x,
680
- **additional_qnei_kwargs,
681
- )
682
- acqf.set_model_c(model_c)
683
-
684
- standard_bounds = torch.zeros_like(bounds)
685
- standard_bounds[1] = 1
686
-
687
- # optimize_acqf の探索に parameter constraints を追加します。
688
- if len(_constraints) > 0:
689
- nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
690
-
691
- # 1, batch_limit, nonlinear_..., ic_generator
692
- kwargs = nc.create_kwargs()
693
- q = kwargs.pop('q')
694
- batch_limit = kwargs.pop('options')["batch_limit"]
695
-
696
- candidates, _ = optimize_acqf(
697
- acq_function=acqf,
698
- bounds=standard_bounds,
699
- q=q,
700
- num_restarts=10,
701
- raw_samples=512,
702
- options={"batch_limit": batch_limit, "maxiter": 200},
703
- sequential=True,
704
- **kwargs
705
- )
706
-
707
- else:
708
- candidates, _ = optimize_acqf(
709
- acq_function=acqf,
710
- bounds=standard_bounds,
711
- q=1,
712
- num_restarts=10,
713
- raw_samples=512,
714
- options={"batch_limit": 5, "maxiter": 200},
715
- sequential=True,
716
- )
717
-
718
- candidates = unnormalize(candidates.detach(), bounds=bounds)
719
-
720
- return candidates
721
-
722
-
723
- # noinspection PyIncorrectDocstring
724
- @experimental_func("2.4.0")
725
- def qehvi_candidates_func(
726
- train_x: "torch.Tensor",
727
- train_obj: "torch.Tensor",
728
- train_con: "torch.Tensor" | None,
729
- bounds: "torch.Tensor",
730
- pending_x: "torch.Tensor" | None,
731
- model_c: "SingleTaskGP",
732
- _constraints,
733
- _study,
734
- _opt,
735
- pof_config,
736
- ) -> "torch.Tensor":
737
- """Quasi MC-based batch Expected Hypervolume Improvement (qEHVI).
738
-
739
- The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
740
- with multi-objective optimization when the number of objectives is three or less.
741
-
742
- .. seealso::
743
- :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
744
- descriptions.
745
- """
746
-
747
- n_objectives = train_obj.size(-1)
748
-
749
- if train_con is not None:
750
- train_y = torch.cat([train_obj, train_con], dim=-1)
751
-
752
- is_feas = (train_con <= 0).all(dim=-1)
753
- train_obj_feas = train_obj[is_feas]
754
-
755
- n_constraints = train_con.size(1)
756
- additional_qehvi_kwargs = {
757
- "objective": IdentityMCMultiOutputObjective(outcomes=list(range(n_objectives))),
758
- "constraints": _get_constraint_funcs(n_constraints),
759
- }
760
- else:
761
- train_y = train_obj
762
-
763
- train_obj_feas = train_obj
764
-
765
- additional_qehvi_kwargs = {}
766
-
767
- train_x = normalize(train_x, bounds=bounds)
768
- if pending_x is not None:
769
- pending_x = normalize(pending_x, bounds=bounds)
770
-
771
- train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
772
-
773
- model = SingleTaskGP(
774
- train_x,
775
- train_y,
776
- train_Yvar=train_yvar,
777
- outcome_transform=standardizer,
778
- )
779
- mll = ExactMarginalLogLikelihood(model.likelihood, model)
780
- fit_gpytorch_mll(mll)
781
-
782
- # Approximate box decomposition similar to Ax when the number of objectives is large.
783
- # https://github.com/pytorch/botorch/blob/36d09a4297c2a0ff385077b7fcdd5a9d308e40cc/botorch/acquisition/multi_objective/utils.py#L46-L63
784
- if n_objectives > 4:
785
- alpha = 10 ** (-8 + n_objectives)
786
- else:
787
- alpha = 0.0
788
-
789
- ref_point = train_obj.min(dim=0).values - 1e-8
790
-
791
- partitioning = NondominatedPartitioning(ref_point=ref_point, Y=train_obj_feas, alpha=alpha)
792
-
793
- ref_point_list = ref_point.tolist()
794
-
795
- ACQF = acqf_patch_factory(monte_carlo.qExpectedHypervolumeImprovement, pof_config)
796
- acqf = ACQF(
797
- model=model,
798
- ref_point=ref_point_list,
799
- partitioning=partitioning,
800
- sampler=_get_sobol_qmc_normal_sampler(256),
801
- X_pending=pending_x,
802
- **additional_qehvi_kwargs,
803
- )
804
- acqf.set_model_c(model_c)
805
-
806
- standard_bounds = torch.zeros_like(bounds)
807
- standard_bounds[1] = 1
808
-
809
- # optimize_acqf の探索に parameter constraints を追加します。
810
- if len(_constraints) > 0:
811
- nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
812
-
813
- # 1, batch_limit, nonlinear_..., ic_generator
814
- kwargs = nc.create_kwargs()
815
- q = kwargs.pop('q')
816
- batch_limit = kwargs.pop('options')["batch_limit"]
817
-
818
- candidates, _ = optimize_acqf(
819
- acq_function=acqf,
820
- bounds=standard_bounds,
821
- q=q,
822
- num_restarts=20,
823
- raw_samples=1024,
824
- options={"batch_limit": batch_limit, "maxiter": 200, "nonnegative": True},
825
- sequential=True,
826
- **kwargs
827
- )
828
-
829
- else:
830
- candidates, _ = optimize_acqf(
831
- acq_function=acqf,
832
- bounds=standard_bounds,
833
- q=1,
834
- num_restarts=20,
835
- raw_samples=1024,
836
- options={"batch_limit": 5, "maxiter": 200, "nonnegative": True},
837
- sequential=True,
838
- )
839
-
840
- candidates = unnormalize(candidates.detach(), bounds=bounds)
841
-
842
- return candidates
843
-
844
-
845
- # noinspection PyIncorrectDocstring
846
- @experimental_func("3.5.0")
847
- def ehvi_candidates_func(
848
- train_x: "torch.Tensor",
849
- train_obj: "torch.Tensor",
850
- train_con: "torch.Tensor" | None,
851
- bounds: "torch.Tensor",
852
- pending_x: "torch.Tensor" | None,
853
- model_c: "SingleTaskGP",
854
- _constraints,
855
- _study,
856
- _opt,
857
- pof_config,
858
- ) -> "torch.Tensor":
859
- """Expected Hypervolume Improvement (EHVI).
860
-
861
- The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
862
- with multi-objective optimization without constraints.
863
-
864
- .. seealso::
865
- :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
866
- descriptions.
867
- """
868
-
869
- n_objectives = train_obj.size(-1)
870
- if train_con is not None:
871
- raise ValueError("Constraints are not supported with ehvi_candidates_func.")
872
-
873
- train_y = train_obj
874
- train_x = normalize(train_x, bounds=bounds)
875
-
876
- train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
877
-
878
- model = SingleTaskGP(
879
- train_x,
880
- train_y,
881
- train_Yvar=train_yvar,
882
- outcome_transform=standardizer,
883
- )
884
- mll = ExactMarginalLogLikelihood(model.likelihood, model)
885
- fit_gpytorch_mll(mll)
886
-
887
- # Approximate box decomposition similar to Ax when the number of objectives is large.
888
- # https://github.com/pytorch/botorch/blob/36d09a4297c2a0ff385077b7fcdd5a9d308e40cc/botorch/acquisition/multi_objective/utils.py#L46-L63
889
- if n_objectives > 4:
890
- alpha = 10 ** (-8 + n_objectives)
891
- else:
892
- alpha = 0.0
893
-
894
- ref_point = train_obj.min(dim=0).values - 1e-8
895
-
896
- partitioning = NondominatedPartitioning(ref_point=ref_point, Y=train_y, alpha=alpha)
897
-
898
- ref_point_list = ref_point.tolist()
899
-
900
- ACQF = acqf_patch_factory(ExpectedHypervolumeImprovement)
901
- acqf = ACQF(
902
- model=model,
903
- ref_point=ref_point_list,
904
- partitioning=partitioning,
905
- )
906
- acqf.set_model_c(model_c)
907
- standard_bounds = torch.zeros_like(bounds)
908
- standard_bounds[1] = 1
909
-
910
- # optimize_acqf の探索に parameter constraints を追加します。
911
- if len(_constraints) > 0:
912
- nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
913
-
914
- # 1, batch_limit, nonlinear_..., ic_generator
915
- kwargs = nc.create_kwargs()
916
- q = kwargs.pop('q')
917
- batch_limit = kwargs.pop('options')["batch_limit"]
918
-
919
- candidates, _ = optimize_acqf(
920
- acq_function=acqf,
921
- bounds=standard_bounds,
922
- q=q,
923
- num_restarts=10,
924
- raw_samples=512,
925
- options={"batch_limit": batch_limit, "maxiter": 200},
926
- sequential=True,
927
- **kwargs
928
- )
929
-
930
- else:
931
- candidates, _ = optimize_acqf(
932
- acq_function=acqf,
933
- bounds=standard_bounds,
934
- q=1,
935
- num_restarts=20,
936
- raw_samples=1024,
937
- options={"batch_limit": 5, "maxiter": 200},
938
- sequential=True,
939
- )
940
-
941
- candidates = unnormalize(candidates.detach(), bounds=bounds)
942
-
943
- return candidates
944
-
945
-
946
- # noinspection PyIncorrectDocstring
947
- @experimental_func("3.1.0")
948
- def qnehvi_candidates_func(
949
- train_x: "torch.Tensor",
950
- train_obj: "torch.Tensor",
951
- train_con: "torch.Tensor" | None,
952
- bounds: "torch.Tensor",
953
- pending_x: "torch.Tensor" | None,
954
- model_c: "SingleTaskGP",
955
- _constraints,
956
- _study,
957
- _opt,
958
- pof_config,
959
- ) -> "torch.Tensor":
960
- """Quasi MC-based batch Noisy Expected Hypervolume Improvement (qNEHVI).
961
-
962
- According to Botorch/Ax documentation,
963
- this function may perform better than qEHVI (`qehvi_candidates_func`).
964
- (cf. https://botorch.org/tutorials/constrained_multi_objective_bo )
965
-
966
- .. seealso::
967
- :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
968
- descriptions.
969
- """
970
-
971
- n_objectives = train_obj.size(-1)
972
-
973
- if train_con is not None:
974
- train_y = torch.cat([train_obj, train_con], dim=-1)
975
-
976
- n_constraints = train_con.size(1)
977
- additional_qnehvi_kwargs = {
978
- "objective": IdentityMCMultiOutputObjective(outcomes=list(range(n_objectives))),
979
- "constraints": _get_constraint_funcs(n_constraints),
980
- }
981
- else:
982
- train_y = train_obj
983
-
984
- additional_qnehvi_kwargs = {}
985
-
986
- train_x = normalize(train_x, bounds=bounds)
987
- if pending_x is not None:
988
- pending_x = normalize(pending_x, bounds=bounds)
989
-
990
- train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
991
-
992
- model = SingleTaskGP(
993
- train_x,
994
- train_y,
995
- train_Yvar=train_yvar,
996
- outcome_transform=standardizer,
997
- )
998
- mll = ExactMarginalLogLikelihood(model.likelihood, model)
999
- fit_gpytorch_mll(mll)
1000
-
1001
- # Approximate box decomposition similar to Ax when the number of objectives is large.
1002
- # https://github.com/pytorch/botorch/blob/36d09a4297c2a0ff385077b7fcdd5a9d308e40cc/botorch/acquisition/multi_objective/utils.py#L46-L63
1003
- if n_objectives > 4:
1004
- alpha = 10 ** (-8 + n_objectives)
1005
- else:
1006
- alpha = 0.0
1007
-
1008
- ref_point = train_obj.min(dim=0).values - 1e-8
1009
-
1010
- ref_point_list = ref_point.tolist()
1011
-
1012
- # prune_baseline=True is generally recommended by the documentation of BoTorch.
1013
- # cf. https://botorch.org/api/acquisition.html (accessed on 2022/11/18)
1014
- ACQF = acqf_patch_factory(monte_carlo.qNoisyExpectedHypervolumeImprovement, pof_config)
1015
- acqf = ACQF(
1016
- model=model,
1017
- ref_point=ref_point_list,
1018
- X_baseline=train_x,
1019
- alpha=alpha,
1020
- prune_baseline=True,
1021
- sampler=_get_sobol_qmc_normal_sampler(256),
1022
- X_pending=pending_x,
1023
- **additional_qnehvi_kwargs,
1024
- )
1025
- acqf.set_model_c(model_c)
1026
-
1027
- standard_bounds = torch.zeros_like(bounds)
1028
- standard_bounds[1] = 1
1029
-
1030
- # optimize_acqf の探索に parameter constraints を追加します。
1031
- if len(_constraints) > 0:
1032
- nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
1033
-
1034
- # 1, batch_limit, nonlinear_..., ic_generator
1035
- kwargs = nc.create_kwargs()
1036
- q = kwargs.pop('q')
1037
- batch_limit = kwargs.pop('options')["batch_limit"]
1038
-
1039
- candidates, _ = optimize_acqf(
1040
- acq_function=acqf,
1041
- bounds=standard_bounds,
1042
- q=q,
1043
- num_restarts=20,
1044
- raw_samples=1024,
1045
- options={"batch_limit": batch_limit, "maxiter": 200, "nonnegative": True},
1046
- sequential=True,
1047
- **kwargs
1048
- )
1049
-
1050
- else:
1051
- candidates, _ = optimize_acqf(
1052
- acq_function=acqf,
1053
- bounds=standard_bounds,
1054
- q=1,
1055
- num_restarts=20,
1056
- raw_samples=1024,
1057
- options={"batch_limit": 5, "maxiter": 200, "nonnegative": True},
1058
- sequential=True,
1059
- )
1060
-
1061
- candidates = unnormalize(candidates.detach(), bounds=bounds)
1062
-
1063
- return candidates
1064
-
1065
-
1066
- # noinspection PyIncorrectDocstring
1067
- @experimental_func("2.4.0")
1068
- def qparego_candidates_func(
1069
- train_x: "torch.Tensor",
1070
- train_obj: "torch.Tensor",
1071
- train_con: "torch.Tensor" | None,
1072
- bounds: "torch.Tensor",
1073
- pending_x: "torch.Tensor" | None,
1074
- model_c: "SingleTaskGP",
1075
- _constraints,
1076
- _study,
1077
- _opt,
1078
- pof_config,
1079
- ) -> "torch.Tensor":
1080
- """Quasi MC-based extended ParEGO (qParEGO) for constrained multi-objective optimization.
1081
-
1082
- The default value of ``candidates_func`` in :class:`~optuna_integration.BoTorchSampler`
1083
- with multi-objective optimization when the number of objectives is larger than three.
1084
-
1085
- .. seealso::
1086
- :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
1087
- descriptions.
1088
- """
1089
-
1090
- n_objectives = train_obj.size(-1)
1091
-
1092
- weights = sample_simplex(n_objectives).squeeze()
1093
- scalarization = get_chebyshev_scalarization(weights=weights, Y=train_obj)
1094
-
1095
- if train_con is not None:
1096
- _validate_botorch_version_for_constrained_opt("qparego_candidates_func")
1097
- train_y = torch.cat([train_obj, train_con], dim=-1)
1098
- n_constraints = train_con.size(1)
1099
- objective = GenericMCObjective(lambda Z, X: scalarization(Z[..., :n_objectives]))
1100
- additional_qei_kwargs = {
1101
- "constraints": _get_constraint_funcs(n_constraints),
1102
- }
1103
- else:
1104
- train_y = train_obj
1105
-
1106
- objective = GenericMCObjective(scalarization)
1107
- additional_qei_kwargs = {}
1108
-
1109
- train_x = normalize(train_x, bounds=bounds)
1110
- if pending_x is not None:
1111
- pending_x = normalize(pending_x, bounds=bounds)
1112
-
1113
- train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
1114
-
1115
- model = SingleTaskGP(
1116
- train_x,
1117
- train_y,
1118
- train_Yvar=train_yvar,
1119
- outcome_transform=standardizer,
1120
- )
1121
- mll = ExactMarginalLogLikelihood(model.likelihood, model)
1122
- fit_gpytorch_mll(mll)
1123
-
1124
- ACQF = acqf_patch_factory(qExpectedImprovement, pof_config)
1125
- acqf = ACQF(
1126
- model=model,
1127
- best_f=objective(train_y).max(),
1128
- sampler=_get_sobol_qmc_normal_sampler(256),
1129
- objective=objective,
1130
- X_pending=pending_x,
1131
- **additional_qei_kwargs,
1132
- )
1133
- acqf.set_model_c(model_c)
1134
-
1135
- standard_bounds = torch.zeros_like(bounds)
1136
- standard_bounds[1] = 1
1137
-
1138
- # optimize_acqf の探索に parameter constraints を追加します。
1139
- if len(_constraints) > 0:
1140
- nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
1141
-
1142
- # 1, batch_limit, nonlinear_..., ic_generator
1143
- kwargs = nc.create_kwargs()
1144
- q = kwargs.pop('q')
1145
- batch_limit = kwargs.pop('options')["batch_limit"]
1146
-
1147
- candidates, _ = optimize_acqf(
1148
- acq_function=acqf,
1149
- bounds=standard_bounds,
1150
- q=q,
1151
- num_restarts=20,
1152
- raw_samples=1024,
1153
- options={"batch_limit": batch_limit, "maxiter": 200},
1154
- sequential=True,
1155
- **kwargs
1156
- )
1157
-
1158
- else:
1159
- candidates, _ = optimize_acqf(
1160
- acq_function=acqf,
1161
- bounds=standard_bounds,
1162
- q=1,
1163
- num_restarts=20,
1164
- raw_samples=1024,
1165
- options={"batch_limit": 5, "maxiter": 200},
1166
- sequential=True,
1167
- )
1168
-
1169
- candidates = unnormalize(candidates.detach(), bounds=bounds)
1170
-
1171
- return candidates
1172
-
1173
-
1174
- @experimental_func("4.0.0")
1175
- def qkg_candidates_func(
1176
- train_x: "torch.Tensor",
1177
- train_obj: "torch.Tensor",
1178
- train_con: "torch.Tensor" | None,
1179
- bounds: "torch.Tensor",
1180
- pending_x: "torch.Tensor" | None,
1181
- model_c: "SingleTaskGP",
1182
- _constraints,
1183
- _study,
1184
- _opt,
1185
- pof_config,
1186
- ) -> "torch.Tensor":
1187
- """Quasi MC-based batch Knowledge Gradient (qKG).
1188
-
1189
- According to Botorch/Ax documentation,
1190
- this function may perform better than qEI (`qei_candidates_func`).
1191
- (cf. https://botorch.org/tutorials/one_shot_kg )
1192
-
1193
- .. seealso::
1194
- :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
1195
- descriptions.
1196
-
1197
- """
1198
-
1199
- if train_obj.size(-1) != 1:
1200
- raise ValueError("Objective may only contain single values with qKG.")
1201
- if train_con is not None:
1202
- train_y = torch.cat([train_obj, train_con], dim=-1)
1203
- n_constraints = train_con.size(1)
1204
- objective = ConstrainedMCObjective(
1205
- objective=lambda Z, X: Z[..., 0],
1206
- constraints=_get_constraint_funcs(n_constraints),
1207
- )
1208
- else:
1209
- train_y = train_obj
1210
- objective = None # Using the default identity objective.
1211
-
1212
- train_x = normalize(train_x, bounds=bounds)
1213
- if pending_x is not None:
1214
- pending_x = normalize(pending_x, bounds=bounds)
1215
-
1216
- train_yvar, standardizer = get_minimum_YVar_and_standardizer(train_y)
1217
-
1218
- model = SingleTaskGP(
1219
- train_x,
1220
- train_y,
1221
- train_Yvar=train_yvar,
1222
- outcome_transform=standardizer,
1223
- )
1224
- mll = ExactMarginalLogLikelihood(model.likelihood, model)
1225
- fit_gpytorch_mll(mll)
1226
-
1227
- ACQF = acqf_patch_factory(qKnowledgeGradient, pof_config)
1228
- acqf = ACQF(
1229
- model=model,
1230
- num_fantasies=256,
1231
- objective=objective,
1232
- X_pending=pending_x,
1233
- )
1234
- acqf.set_model_c(model_c)
1235
-
1236
- standard_bounds = torch.zeros_like(bounds)
1237
- standard_bounds[1] = 1
1238
-
1239
- # optimize_acqf の探索に parameter constraints を追加します。
1240
- if len(_constraints) > 0:
1241
- nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
1242
-
1243
- # 1, batch_limit, nonlinear_..., ic_generator
1244
- kwargs = nc.create_kwargs()
1245
- q = kwargs.pop('q')
1246
- batch_limit = kwargs.pop('options')["batch_limit"]
1247
-
1248
- candidates, _ = optimize_acqf(
1249
- acq_function=acqf,
1250
- bounds=standard_bounds,
1251
- q=q,
1252
- num_restarts=10,
1253
- raw_samples=512,
1254
- options={"batch_limit": batch_limit, "maxiter": 200},
1255
- sequential=True,
1256
- **kwargs
1257
- )
1258
-
1259
- else:
1260
- candidates, _ = optimize_acqf(
1261
- acq_function=acqf,
1262
- bounds=standard_bounds,
1263
- q=1,
1264
- num_restarts=10,
1265
- raw_samples=512,
1266
- options={"batch_limit": 8, "maxiter": 200},
1267
- sequential=True,
1268
- )
1269
-
1270
- candidates = unnormalize(candidates.detach(), bounds=bounds)
1271
-
1272
- return candidates
1273
-
1274
-
1275
- # noinspection PyIncorrectDocstring,SpellCheckingInspection
1276
- @experimental_func("4.0.0")
1277
- def qhvkg_candidates_func(
1278
- train_x: "torch.Tensor",
1279
- train_obj: "torch.Tensor",
1280
- train_con: "torch.Tensor" | None,
1281
- bounds: "torch.Tensor",
1282
- pending_x: "torch.Tensor" | None,
1283
- model_c: "SingleTaskGP",
1284
- _constraints,
1285
- _study,
1286
- _opt,
1287
- pof_config,
1288
- ) -> "torch.Tensor":
1289
- """Quasi MC-based batch Hypervolume Knowledge Gradient (qHVKG).
1290
-
1291
- According to Botorch/Ax documentation,
1292
- this function may perform better than qEHVI (`qehvi_candidates_func`).
1293
- (cf. https://botorch.org/tutorials/decoupled_mobo )
1294
-
1295
- .. seealso::
1296
- :func:`~optuna_integration.botorch.qei_candidates_func` for argument and return value
1297
- descriptions.
1298
- """
1299
-
1300
- # We need botorch >=0.9.5 for qHypervolumeKnowledgeGradient.
1301
- if not _imports_qhvkg.is_successful():
1302
- raise ImportError(
1303
- "qhvkg_candidates_func requires botorch >=0.9.5. "
1304
- "Please upgrade botorch or use qehvi_candidates_func as candidates_func instead."
1305
- )
1306
-
1307
- if train_con is not None:
1308
- train_y = torch.cat([train_obj, train_con], dim=-1)
1309
- else:
1310
- train_y = train_obj
1311
-
1312
- train_x = normalize(train_x, bounds=bounds)
1313
- if pending_x is not None:
1314
- pending_x = normalize(pending_x, bounds=bounds)
1315
-
1316
- models = [
1317
- SingleTaskGP(
1318
- train_x,
1319
- train_y[..., [i]],
1320
- train_Yvar=get_minimum_YVar_and_standardizer(train_y[..., [i]])[0],
1321
- outcome_transform=Standardize(m=1)
1322
- )
1323
- for i in range(train_y.shape[-1])
1324
- ]
1325
- model = ModelListGP(*models)
1326
- mll = SumMarginalLogLikelihood(model.likelihood, model)
1327
- fit_gpytorch_mll(mll)
1328
-
1329
- n_constraints = train_con.size(1) if train_con is not None else 0
1330
- objective = FeasibilityWeightedMCMultiOutputObjective(
1331
- model,
1332
- X_baseline=train_x,
1333
- constraint_idcs=[-n_constraints + i for i in range(n_constraints)],
1334
- )
1335
-
1336
- ref_point = train_obj.min(dim=0).values - 1e-8
1337
-
1338
- ACQF = acqf_patch_factory(qHypervolumeKnowledgeGradient, pof_config)
1339
- acqf = ACQF(
1340
- model=model,
1341
- ref_point=ref_point,
1342
- num_fantasies=16,
1343
- X_pending=pending_x,
1344
- objective=objective,
1345
- sampler=ListSampler(
1346
- *[
1347
- SobolQMCNormalSampler(sample_shape=torch.Size([16]))
1348
- for _ in range(model.num_outputs)
1349
- ]
1350
- ),
1351
- inner_sampler=SobolQMCNormalSampler(sample_shape=torch.Size([32])),
1352
- )
1353
- acqf.set_model_c(model_c)
1354
-
1355
- standard_bounds = torch.zeros_like(bounds)
1356
- standard_bounds[1] = 1
1357
-
1358
- # optimize_acqf の探索に parameter constraints を追加します。
1359
- if len(_constraints) > 0:
1360
- nc = NonlinearInequalityConstraints(_study, _constraints, _opt)
1361
-
1362
- # 1, batch_limit, nonlinear_..., ic_generator
1363
- kwargs = nc.create_kwargs()
1364
- q = kwargs.pop('q')
1365
- batch_limit = kwargs.pop('options')["batch_limit"]
1366
-
1367
- candidates, _ = optimize_acqf(
1368
- acq_function=acqf,
1369
- bounds=standard_bounds,
1370
- q=q,
1371
- num_restarts=1,
1372
- raw_samples=1024,
1373
- options={"batch_limit": batch_limit, "maxiter": 200, "nonnegative": True},
1374
- sequential=True,
1375
- **kwargs
1376
- )
1377
-
1378
- else:
1379
- candidates, _ = optimize_acqf(
1380
- acq_function=acqf,
1381
- bounds=standard_bounds,
1382
- q=1,
1383
- num_restarts=1,
1384
- raw_samples=1024,
1385
- options={"batch_limit": 4, "maxiter": 200, "nonnegative": True},
1386
- sequential=False,
1387
- )
1388
-
1389
- candidates = unnormalize(candidates.detach(), bounds=bounds)
1390
-
1391
- return candidates
1392
-
1393
-
1394
- def _get_default_candidates_func(
1395
- n_objectives: int,
1396
- has_constraint: bool,
1397
- consider_running_trials: bool,
1398
- ) -> Callable[
1399
- [
1400
- "torch.Tensor",
1401
- "torch.Tensor",
1402
- "torch.Tensor" | None,
1403
- "torch.Tensor",
1404
- "torch.Tensor" | None,
1405
- "SingleTaskGP",
1406
- "list[Constraint]",
1407
- "Study",
1408
- "OptunaOptimizer",
1409
- "PoFConfig",
1410
- ],
1411
- "torch.Tensor",
1412
- ]:
1413
- if n_objectives > 3 and not has_constraint and not consider_running_trials:
1414
- return ehvi_candidates_func
1415
- elif n_objectives > 3:
1416
- return qparego_candidates_func
1417
- elif n_objectives > 1:
1418
- return qehvi_candidates_func
1419
- elif consider_running_trials:
1420
- return qei_candidates_func
1421
- else:
1422
- return logei_candidates_func
1423
-
1424
-
1425
- # ===== main re-implementation of BoTorchSampler =====
1426
- @dataclass
1427
- class PoFConfig:
1428
- """Configuration of PoFBoTorchSampler
1429
-
1430
- Args:
1431
- enable_pof (bool):
1432
- Whether to consider Probability of Feasibility.
1433
- Defaults to True.
1434
-
1435
- gamma (float or torch.Tensor):
1436
- Exponent for Probability of Feasibility. A larger value places more emphasis on feasibility.
1437
- If 0, Probability of Feasibility is not considered.
1438
- Defaults to 1.
1439
-
1440
- threshold (float or torch.Tensor):
1441
- Boundary value for calculating Probability of Feasibility with CDF.
1442
- Generally between 0 and 1, with 0.5 being recommended. A larger value places more emphasis on feasibility.
1443
- Defaults to 0.5.
1444
-
1445
- enable_log (bool):
1446
- Whether to apply symlog to the base acquisition function values.
1447
- Defaults to True.
1448
-
1449
- enable_positive_only_pof (bool):
1450
- Whether to apply Probability of Feasibility only when the base acquisition function is positive.
1451
- Defaults to False.
1452
-
1453
- enable_dynamic_pof (bool):
1454
- Whether to change gamma dynamically. When True, ```gamma``` argument is ignored.
1455
- Defaults to True.
1456
-
1457
- enable_dynamic_threshold (bool):
1458
- Whether to change threshold dynamically. When True, ```threshold``` argument is ignored.
1459
- Defaults to False.
1460
-
1461
- enable_repeat_penalty (bool):
1462
- Whether to apply a penalty coefficient on the base acquisition function values near sampled points.
1463
- Defaults to True.
1464
-
1465
- enable_dynamic_repeat_penalty (bool):
1466
- Enhances the penalty coefficient if the same value is repeated. When True, it behaves as if enable_repeat_penalty is set to True.
1467
- Defaults to True.
1468
-
1469
- repeat_watch_window (int):
1470
- Specifies how many recent proposal values are referenced when determining the magnitude of penalties when enable_dynamic_repeat_penalty is True.
1471
- Defaults to 3.
1472
-
1473
- repeat_watch_norm_distance (float):
1474
- Defines how small the norm distance between proposed parameters needs to be in normalized parameter space [0, 1]
1475
- for a stronger penalty effect. Extreme values may cause numerical instability.
1476
- Defaults to 0.1.
1477
-
1478
- enable_no_noise (bool):
1479
- Whether to treat observation errors as non-existent
1480
- when training the regression model with the objective
1481
- function value. The default is True because there is
1482
- essentially no observational error in a FEM analysis.
1483
- This is different from the original BoTorchSampler
1484
- implementation.
1485
-
1486
- """
1487
- enable_pof: bool = True # PoF を考慮するかどうかを規定します。
1488
- gamma: float or torch.Tensor = 1.0 # PoF に対する指数です。大きいほど feasibility を重視します。0 だと PoF を考慮しません。
1489
- threshold: float or torch.Tensor = 0.5 # PoF を cdf で計算する際の境界値です。0 ~ 1 が基本で、 0.5 が推奨です。大きいほど feasibility を重視します。
1490
-
1491
- enable_log: bool = True # ベース獲得関数値に symlog を適用します。
1492
- enable_positive_only_pof: bool = False # ベース獲得関数が正のときのみ PoF を乗じます。
1493
-
1494
- enable_dynamic_pof: bool = False # gamma を動的に変更します。 True のとき、gamma は無視されます。
1495
- enable_dynamic_threshold: bool = False # threshold を動的に変更します。 True のとき、threshold は無視されます。
1496
-
1497
- enable_repeat_penalty: bool = False # サンプル済みの点の近傍のベース獲得関数値にペナルティ係数を適用します。
1498
- _repeat_penalty: float or torch.Tensor = 1. # enable_repeat_penalty が True のときに使用される内部変数です。
1499
-
1500
- enable_dynamic_repeat_penalty: bool = False # 同じ値が繰り返された場合にペナルティ係数を強化します。True の場合、enable_repeat_penalty は True として振舞います。
1501
- repeat_watch_window: int = 3 # enable_dynamic_repeat_penalty が True のとき、直近いくつの提案値を参照してペナルティの大きさを決めるかを既定します。
1502
- repeat_watch_norm_distance: float = 0.1 # [0, 1] で正規化されたパラメータ空間においてパラメータの提案同士のノルムがどれくらいの大きさ以下であればペナルティを強くするかを規定します。極端な値は数値不安定性を引き起こす可能性があります。
1503
- _repeat_penalty_gamma: float or torch.Tensor = 1. # _repeat_penalty の指数で、内部変数です。
1504
-
1505
- enable_no_noise: bool = True
1506
-
1507
- def _disable_all_features(self):
1508
- # 拘束以外のすべてを disable にすることで、
1509
- # BoTorchSampler の実装と同じにします。
1510
- self.enable_pof = False
1511
- self.enable_log = False
1512
- self.enable_positive_only_pof = False
1513
- self.enable_dynamic_pof = False
1514
- self.enable_dynamic_threshold = False
1515
- self.enable_repeat_penalty = False
1516
- self.enable_dynamic_repeat_penalty = False
1517
- self.enable_no_noise = False
1518
-
1519
-
1520
- @experimental_class("2.4.0")
1521
- class PoFBoTorchSampler(BaseSampler):
1522
- """A sampler that forked from BoTorchSampler.
1523
-
1524
- This sampler improves the BoTorchSampler to account
1525
- for known/hidden constraints and repeated penalties.
1526
-
1527
- See Also:
1528
- https://optuna.readthedocs.io/en/v3.0.0-b1/reference/generated/optuna.integration.BoTorchSampler.html
1529
-
1530
- Args:
1531
- candidates_func:
1532
- An optional function that suggests the next candidates. It must take the training
1533
- data, the objectives, the constraints, the search space bounds and return the next
1534
- candidates. The arguments are of type ``torch.Tensor``. The return value must be a
1535
- ``torch.Tensor``. However, if ``constraints_func`` is omitted, constraints will be
1536
- :obj:`None`. For any constraints that failed to compute, the tensor will contain
1537
- NaN.
1538
-
1539
- If omitted, it is determined automatically based on the number of objectives and
1540
- whether a constraint is specified. If the
1541
- number of objectives is one and no constraint is specified, log-Expected Improvement
1542
- is used. If constraints are specified, quasi MC-based batch Expected Improvement
1543
- (qEI) is used.
1544
- If the number of objectives is either two or three, Quasi MC-based
1545
- batch Expected Hypervolume Improvement (qEHVI) is used. Otherwise, for a larger number
1546
- of objectives, analytic Expected Hypervolume Improvement is used if no constraints
1547
- are specified, or the faster Quasi MC-based extended ParEGO (qParEGO) is used if
1548
- constraints are present.
1549
-
1550
- The function should assume *maximization* of the objective.
1551
-
1552
- .. seealso::
1553
- See :func:`optuna_integration.botorch.qei_candidates_func` for an example.
1554
- constraints_func:
1555
- An optional function that computes the objective constraints. It must take a
1556
- :class:`~optuna.trial.FrozenTrial` and return the constraints. The return value must
1557
- be a sequence of :obj:`float` s. A value strictly larger than 0 means that a
1558
- constraint is violated. A value equal to or smaller than 0 is considered feasible.
1559
-
1560
- If omitted, no constraints will be passed to ``candidates_func`` nor taken into
1561
- account during suggestion.
1562
- n_startup_trials:
1563
- Number of initial trials, that is the number of trials to resort to independent
1564
- sampling.
1565
- consider_running_trials:
1566
- If True, the acquisition function takes into consideration the running parameters
1567
- whose evaluation has not completed. Enabling this option is considered to improve the
1568
- performance of parallel optimization.
1569
-
1570
- .. note::
1571
- Added in v3.2.0 as an experimental argument.
1572
- independent_sampler:
1573
- An independent sampler to use for the initial trials and for parameters that are
1574
- conditional.
1575
- seed:
1576
- Seed for random number generator.
1577
- device:
1578
- A ``torch.device`` to store input and output data of BoTorch. Please set a CUDA device
1579
- if you fasten sampling.
1580
- pof_config (PoFConfig or None):
1581
- Sampler settings.
1582
- """
1583
-
1584
- def __init__(
1585
- self,
1586
- *,
1587
- candidates_func: (
1588
- Callable[
1589
- [
1590
- "torch.Tensor",
1591
- "torch.Tensor",
1592
- "torch.Tensor" | None,
1593
- "torch.Tensor",
1594
- "torch.Tensor" | None,
1595
- "SingleTaskGP",
1596
- "list[Constraint]",
1597
- "Study",
1598
- "OptunaOptimizer",
1599
- "PoFConfig",
1600
- ],
1601
- "torch.Tensor",
1602
- ]
1603
- | None
1604
- ) = None,
1605
- constraints_func: Callable[[FrozenTrial], Sequence[float]] | None = None,
1606
- n_startup_trials: int = 10,
1607
- consider_running_trials: bool = False,
1608
- independent_sampler: BaseSampler | None = None,
1609
- seed: int | None = None,
1610
- device: "torch.device" | None = None,
1611
- pof_config: PoFConfig or None = None,
1612
- ):
1613
- _imports.check()
1614
-
1615
- self._candidates_func = candidates_func
1616
- self._constraints_func = constraints_func
1617
- self._consider_running_trials = consider_running_trials
1618
- self._independent_sampler = independent_sampler or RandomSampler(seed=seed)
1619
- self._n_startup_trials = n_startup_trials
1620
- self._seed = seed
1621
-
1622
- self._study_id: int | None = None
1623
- self._search_space = IntersectionSearchSpace()
1624
- self._device = device or torch.device("cpu")
1625
-
1626
- self.pof_config = pof_config or PoFConfig()
1627
- _set_use_fixed_noise(self.pof_config.enable_no_noise)
1628
-
1629
-
1630
- @property
1631
- def use_fixed_noise(self) -> bool:
1632
- return _get_use_fixed_noise()
1633
-
1634
- @use_fixed_noise.setter
1635
- def use_fixed_noise(self, value: bool):
1636
- _set_use_fixed_noise(value)
1637
-
1638
- def infer_relative_search_space(
1639
- self,
1640
- study: Study,
1641
- trial: FrozenTrial,
1642
- ) -> dict[str, BaseDistribution]:
1643
- if self._study_id is None:
1644
- self._study_id = study._study_id
1645
- if self._study_id != study._study_id:
1646
- # Note that the check below is meaningless when `InMemoryStorage` is used
1647
- # because `InMemoryStorage.create_new_study` always returns the same study ID.
1648
- raise RuntimeError("BoTorchSampler cannot handle multiple studies.")
1649
-
1650
- search_space: dict[str, BaseDistribution] = {}
1651
- for name, distribution in self._search_space.calculate(study).items():
1652
- if distribution.single():
1653
- # built-in `candidates_func` cannot handle distributions that contain just a
1654
- # single value, so we skip them. Note that the parameter values for such
1655
- # distributions are sampled in `Trial`.
1656
- continue
1657
- search_space[name] = distribution
1658
-
1659
- return search_space
1660
-
1661
- def sample_relative(
1662
- self,
1663
- study: Study,
1664
- trial: FrozenTrial,
1665
- search_space: dict[str, BaseDistribution],
1666
- ) -> dict[str, Any]:
1667
- assert isinstance(search_space, dict)
1668
-
1669
- if len(search_space) == 0:
1670
- return {}
1671
-
1672
- completed_trials = study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,))
1673
- running_trials = [
1674
- t for t in study.get_trials(deepcopy=False, states=(TrialState.RUNNING,)) if t != trial
1675
- ]
1676
- trials = completed_trials + running_trials
1677
-
1678
- n_trials = len(trials)
1679
- n_completed_trials = len(completed_trials)
1680
- if n_trials < self._n_startup_trials:
1681
- return {}
1682
-
1683
- trans = _SearchSpaceTransform(search_space)
1684
- n_objectives = len(study.directions)
1685
- values: numpy.ndarray | torch.Tensor = numpy.empty(
1686
- (n_trials, n_objectives), dtype=numpy.float64
1687
- )
1688
- params: numpy.ndarray | torch.Tensor
1689
- con: numpy.ndarray | torch.Tensor | None = None
1690
- bounds: numpy.ndarray | torch.Tensor = trans.bounds
1691
- params = numpy.empty((n_trials, trans.bounds.shape[0]), dtype=numpy.float64)
1692
- for trial_idx, trial in enumerate(trials):
1693
- if trial.state == TrialState.COMPLETE:
1694
- params[trial_idx] = trans.transform(trial.params)
1695
- assert len(study.directions) == len(trial.values)
1696
- for obj_idx, (direction, value) in enumerate(zip(study.directions, trial.values)):
1697
- assert value is not None
1698
- if (
1699
- direction == StudyDirection.MINIMIZE
1700
- ): # BoTorch always assumes maximization.
1701
- value *= -1
1702
- values[trial_idx, obj_idx] = value
1703
- if self._constraints_func is not None:
1704
- constraints = study._storage.get_trial_system_attrs(trial._trial_id).get(
1705
- _CONSTRAINTS_KEY
1706
- )
1707
- if constraints is not None:
1708
- n_constraints = len(constraints)
1709
-
1710
- if con is None:
1711
- con = numpy.full(
1712
- (n_completed_trials, n_constraints), numpy.nan, dtype=numpy.float64
1713
- )
1714
- elif n_constraints != con.shape[1]:
1715
- raise RuntimeError(
1716
- f"Expected {con.shape[1]} constraints "
1717
- f"but received {n_constraints}."
1718
- )
1719
- con[trial_idx] = constraints
1720
- elif trial.state == TrialState.RUNNING:
1721
- if all(p in trial.params for p in search_space):
1722
- params[trial_idx] = trans.transform(trial.params)
1723
- else:
1724
- params[trial_idx] = numpy.nan
1725
- else:
1726
- assert False, "trail.state must be TrialState.COMPLETE or TrialState.RUNNING."
1727
-
1728
- if self._constraints_func is not None:
1729
- if con is None:
1730
- warnings.warn(
1731
- "`constraints_func` was given but no call to it correctly computed "
1732
- "constraints. Constraints passed to `candidates_func` will be `None`."
1733
- )
1734
- elif numpy.isnan(con).any():
1735
- warnings.warn(
1736
- "`constraints_func` was given but some calls to it did not correctly compute "
1737
- "constraints. Constraints passed to `candidates_func` will contain NaN."
1738
- )
1739
-
1740
- values = torch.from_numpy(values).to(self._device)
1741
- params = torch.from_numpy(params).to(self._device)
1742
- if con is not None:
1743
- con = torch.from_numpy(con).to(self._device)
1744
- bounds = torch.from_numpy(bounds).to(self._device)
1745
-
1746
- if con is not None:
1747
- if con.dim() == 1:
1748
- con.unsqueeze_(-1)
1749
- bounds.transpose_(0, 1)
1750
-
1751
- if self._candidates_func is None:
1752
- self._candidates_func = _get_default_candidates_func(
1753
- n_objectives=n_objectives,
1754
- has_constraint=con is not None,
1755
- consider_running_trials=self._consider_running_trials,
1756
- )
1757
-
1758
- completed_values = values[:n_completed_trials]
1759
- completed_params = params[:n_completed_trials]
1760
- if self._consider_running_trials:
1761
- running_params = params[n_completed_trials:]
1762
- running_params = running_params[~torch.isnan(running_params).any(dim=1)]
1763
- else:
1764
- running_params = None
1765
-
1766
- # 一時的に取り消し:TPESampler と整合性が取れない
1767
- # if self._seed is not None:
1768
- # random.seed(self._seed)
1769
- # numpy.random.seed(self._seed)
1770
- # torch.manual_seed(self._seed)
1771
- # torch.backends.cudnn.benchmark = False
1772
- # torch.backends.cudnn.deterministic = True
1773
-
1774
- with manual_seed(self._seed):
1775
-
1776
- # ===== model_c 構築 =====
1777
- # ===== model_c を作成する =====
1778
- # ----- bounds, train_x, train_y を準備する -----
1779
- # train_x, train_y は元実装にあわせないと
1780
- # ACQF.forward(X) の引数と一致しなくなる。
1781
-
1782
- # strict constraint 違反またはモデル破綻で prune された trial
1783
- pruned_trials = study.get_trials(deepcopy=False, states=(TrialState.PRUNED,))
1784
- # 元実装と違い、このモデルを基に次の点を提案するわけではないので running は考えなくてよい
1785
- trials = completed_trials + pruned_trials
1786
- n_trials = len(trials)
1787
-
1788
- # ----- train_x, train_y (completed_params, completed_values) を作る -----
1789
- # trials から x, y(=feasibility) を収集する
1790
- trans = _SearchSpaceTransform(search_space)
1791
- bounds: numpy.ndarray | torch.Tensor = trans.bounds
1792
- params: numpy.ndarray | torch.Tensor = numpy.empty((n_trials, trans.bounds.shape[0]), dtype=numpy.float64)
1793
- values: numpy.ndarray | torch.Tensor = numpy.empty((n_trials, 1), dtype=numpy.float64)
1794
- for trial_idx, trial in enumerate(trials):
1795
- params[trial_idx] = trans.transform(trial.params)
1796
- if trial.state == TrialState.COMPLETE:
1797
- # complete, but infeasible (in case of weak constraint)
1798
- if 'constraints' in trial.user_attrs.keys():
1799
- cns = trial.user_attrs['constraints']
1800
- if cns is None:
1801
- values[trial_idx, 0] = 1. # feasible (or should RuntimeError)
1802
- else:
1803
- if numpy.array(cns).max() > 0:
1804
- values[trial_idx, 0] = 1. # feasible
1805
- else:
1806
- values[trial_idx, 0] = 1. # feasible
1807
- else:
1808
- values[trial_idx, 0] = 1. # feasible
1809
- elif trial.state == TrialState.PRUNED:
1810
- values[trial_idx, 0] = 0. # infeasible
1811
- else:
1812
- assert False, "trial.state must be TrialState.COMPLETE or TrialState.PRUNED."
1813
- bounds = torch.from_numpy(bounds).to(self._device)
1814
- params = torch.from_numpy(params).to(self._device) # 未正規化, n_points x n_parameters Tensor
1815
- values = torch.from_numpy(values).to(self._device) # 0 or 1, n_points x 1 Tensor
1816
- bounds.transpose_(0, 1) # 未正規化, 2 x n_parameters Tensor
1817
-
1818
- # ----- model_c を作る -----
1819
- # with manual_seed(self._seed):
1820
- train_x_c = normalize(params, bounds=bounds)
1821
- train_y_c = values
1822
- model_c = SingleTaskGP(
1823
- train_x_c, # n_data x n_prm
1824
- train_y_c, # n_data x n_obj
1825
- # train_Yvar=1e-4 + torch.zeros_like(train_y_c),
1826
- outcome_transform=Standardize(
1827
- m=train_y_c.shape[-1], # The output dimension.
1828
- )
1829
- )
1830
- mll_c = ExactMarginalLogLikelihood(
1831
- model_c.likelihood,
1832
- model_c
1833
- )
1834
- fit_gpytorch_mll(mll_c)
1835
-
1836
- # ===== NonlinearConstraints の実装に必要なクラスを渡す =====
1837
- # PyFemtet 専用関数が前提になっているからこの実装をせざるを得ない。
1838
- # 将来的に optuna の拘束関数の取り扱いの実装が変わったら
1839
- # そちらに実装を変更する(Constraints の変換をして optuna 単体でも使えるようにする)
1840
- # これらは Optimizer の中でセットする
1841
-
1842
- # noinspection PyUnresolvedReferences
1843
- _constraints = self._pyfemtet_constraints
1844
- # noinspection PyUnresolvedReferences
1845
- _opt = self._pyfemtet_optimizer
1846
-
1847
- # `manual_seed` makes the default candidates functions reproducible.
1848
- # `SobolQMCNormalSampler`'s constructor has a `seed` argument, but its behavior is
1849
- # deterministic when the BoTorch's seed is fixed.
1850
- candidates = self._candidates_func(
1851
- completed_params,
1852
- completed_values,
1853
- con,
1854
- bounds,
1855
- running_params,
1856
- model_c,
1857
- _constraints,
1858
- study,
1859
- _opt,
1860
- self.pof_config,
1861
- )
1862
- if self._seed is not None:
1863
- self._seed += 1
1864
-
1865
- if not isinstance(candidates, torch.Tensor):
1866
- raise TypeError("Candidates must be a torch.Tensor.")
1867
- if candidates.dim() == 2:
1868
- if candidates.size(0) != 1:
1869
- raise ValueError(
1870
- "Candidates batch optimization is not supported and the first dimension must "
1871
- "have size 1 if candidates is a two-dimensional tensor. Actual: "
1872
- f"{candidates.size()}."
1873
- )
1874
- # Batch size is one. Get rid of the batch dimension.
1875
- candidates = candidates.squeeze(0)
1876
- if candidates.dim() != 1:
1877
- raise ValueError("Candidates must be one or two-dimensional.")
1878
- if candidates.size(0) != bounds.size(1):
1879
- raise ValueError(
1880
- "Candidates size must match with the given bounds. Actual candidates: "
1881
- f"{candidates.size(0)}, bounds: {bounds.size(1)}."
1882
- )
1883
-
1884
- return trans.untransform(candidates.cpu().numpy())
1885
-
1886
- def sample_independent(
1887
- self,
1888
- study: Study,
1889
- trial: FrozenTrial,
1890
- param_name: str,
1891
- param_distribution: BaseDistribution,
1892
- ) -> Any:
1893
- return self._independent_sampler.sample_independent(
1894
- study, trial, param_name, param_distribution
1895
- )
1896
-
1897
- def reseed_rng(self) -> None:
1898
- self._independent_sampler.reseed_rng()
1899
- if self._seed is not None:
1900
- self._seed = numpy.random.RandomState().randint(numpy.iinfo(numpy.int32).max)
1901
-
1902
- def before_trial(self, study: Study, trial: FrozenTrial) -> None:
1903
- self._independent_sampler.before_trial(study, trial)
1904
-
1905
- def after_trial(
1906
- self,
1907
- study: Study,
1908
- trial: FrozenTrial,
1909
- state: TrialState,
1910
- values: Sequence[float] | None,
1911
- ) -> None:
1912
- if self._constraints_func is not None:
1913
- _process_constraints_after_trial(self._constraints_func, study, trial, state)
1914
- self._independent_sampler.after_trial(study, trial, state, values)