meerk40t 0.9.3001__py2.py3-none-any.whl → 0.9.7020__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (446) hide show
  1. meerk40t/__init__.py +1 -1
  2. meerk40t/balormk/balor_params.py +167 -167
  3. meerk40t/balormk/clone_loader.py +457 -457
  4. meerk40t/balormk/controller.py +1566 -1512
  5. meerk40t/balormk/cylindermod.py +64 -0
  6. meerk40t/balormk/device.py +966 -1959
  7. meerk40t/balormk/driver.py +778 -591
  8. meerk40t/balormk/galvo_commands.py +1194 -0
  9. meerk40t/balormk/gui/balorconfig.py +237 -111
  10. meerk40t/balormk/gui/balorcontroller.py +191 -184
  11. meerk40t/balormk/gui/baloroperationproperties.py +116 -115
  12. meerk40t/balormk/gui/corscene.py +845 -0
  13. meerk40t/balormk/gui/gui.py +179 -147
  14. meerk40t/balormk/livelightjob.py +466 -382
  15. meerk40t/balormk/mock_connection.py +131 -109
  16. meerk40t/balormk/plugin.py +133 -135
  17. meerk40t/balormk/usb_connection.py +306 -301
  18. meerk40t/camera/__init__.py +1 -1
  19. meerk40t/camera/camera.py +514 -397
  20. meerk40t/camera/gui/camerapanel.py +1241 -1095
  21. meerk40t/camera/gui/gui.py +58 -58
  22. meerk40t/camera/plugin.py +441 -399
  23. meerk40t/ch341/__init__.py +27 -27
  24. meerk40t/ch341/ch341device.py +628 -628
  25. meerk40t/ch341/libusb.py +595 -589
  26. meerk40t/ch341/mock.py +171 -171
  27. meerk40t/ch341/windriver.py +157 -157
  28. meerk40t/constants.py +13 -0
  29. meerk40t/core/__init__.py +1 -1
  30. meerk40t/core/bindalias.py +550 -539
  31. meerk40t/core/core.py +47 -47
  32. meerk40t/core/cutcode/cubiccut.py +73 -73
  33. meerk40t/core/cutcode/cutcode.py +315 -312
  34. meerk40t/core/cutcode/cutgroup.py +141 -137
  35. meerk40t/core/cutcode/cutobject.py +192 -185
  36. meerk40t/core/cutcode/dwellcut.py +37 -37
  37. meerk40t/core/cutcode/gotocut.py +29 -29
  38. meerk40t/core/cutcode/homecut.py +29 -29
  39. meerk40t/core/cutcode/inputcut.py +34 -34
  40. meerk40t/core/cutcode/linecut.py +33 -33
  41. meerk40t/core/cutcode/outputcut.py +34 -34
  42. meerk40t/core/cutcode/plotcut.py +335 -335
  43. meerk40t/core/cutcode/quadcut.py +61 -61
  44. meerk40t/core/cutcode/rastercut.py +168 -148
  45. meerk40t/core/cutcode/waitcut.py +34 -34
  46. meerk40t/core/cutplan.py +1843 -1316
  47. meerk40t/core/drivers.py +330 -329
  48. meerk40t/core/elements/align.py +801 -669
  49. meerk40t/core/elements/branches.py +1858 -1507
  50. meerk40t/core/elements/clipboard.py +229 -219
  51. meerk40t/core/elements/element_treeops.py +4595 -2837
  52. meerk40t/core/elements/element_types.py +125 -105
  53. meerk40t/core/elements/elements.py +4315 -3617
  54. meerk40t/core/elements/files.py +117 -64
  55. meerk40t/core/elements/geometry.py +473 -224
  56. meerk40t/core/elements/grid.py +467 -316
  57. meerk40t/core/elements/materials.py +158 -94
  58. meerk40t/core/elements/notes.py +50 -38
  59. meerk40t/core/elements/offset_clpr.py +934 -912
  60. meerk40t/core/elements/offset_mk.py +963 -955
  61. meerk40t/core/elements/penbox.py +339 -267
  62. meerk40t/core/elements/placements.py +300 -83
  63. meerk40t/core/elements/render.py +785 -687
  64. meerk40t/core/elements/shapes.py +2618 -2092
  65. meerk40t/core/elements/testcases.py +105 -0
  66. meerk40t/core/elements/trace.py +651 -563
  67. meerk40t/core/elements/tree_commands.py +415 -409
  68. meerk40t/core/elements/undo_redo.py +116 -58
  69. meerk40t/core/elements/wordlist.py +319 -200
  70. meerk40t/core/exceptions.py +9 -9
  71. meerk40t/core/laserjob.py +220 -220
  72. meerk40t/core/logging.py +63 -63
  73. meerk40t/core/node/blobnode.py +83 -86
  74. meerk40t/core/node/bootstrap.py +105 -103
  75. meerk40t/core/node/branch_elems.py +40 -31
  76. meerk40t/core/node/branch_ops.py +45 -38
  77. meerk40t/core/node/branch_regmark.py +48 -41
  78. meerk40t/core/node/cutnode.py +29 -32
  79. meerk40t/core/node/effect_hatch.py +375 -257
  80. meerk40t/core/node/effect_warp.py +398 -0
  81. meerk40t/core/node/effect_wobble.py +441 -309
  82. meerk40t/core/node/elem_ellipse.py +404 -309
  83. meerk40t/core/node/elem_image.py +1082 -801
  84. meerk40t/core/node/elem_line.py +358 -292
  85. meerk40t/core/node/elem_path.py +259 -201
  86. meerk40t/core/node/elem_point.py +129 -102
  87. meerk40t/core/node/elem_polyline.py +310 -246
  88. meerk40t/core/node/elem_rect.py +376 -286
  89. meerk40t/core/node/elem_text.py +445 -418
  90. meerk40t/core/node/filenode.py +59 -40
  91. meerk40t/core/node/groupnode.py +138 -74
  92. meerk40t/core/node/image_processed.py +777 -766
  93. meerk40t/core/node/image_raster.py +156 -113
  94. meerk40t/core/node/layernode.py +31 -31
  95. meerk40t/core/node/mixins.py +135 -107
  96. meerk40t/core/node/node.py +1427 -1304
  97. meerk40t/core/node/nutils.py +117 -114
  98. meerk40t/core/node/op_cut.py +463 -335
  99. meerk40t/core/node/op_dots.py +296 -251
  100. meerk40t/core/node/op_engrave.py +414 -311
  101. meerk40t/core/node/op_image.py +755 -369
  102. meerk40t/core/node/op_raster.py +787 -522
  103. meerk40t/core/node/place_current.py +37 -40
  104. meerk40t/core/node/place_point.py +329 -126
  105. meerk40t/core/node/refnode.py +58 -47
  106. meerk40t/core/node/rootnode.py +225 -219
  107. meerk40t/core/node/util_console.py +48 -48
  108. meerk40t/core/node/util_goto.py +84 -65
  109. meerk40t/core/node/util_home.py +61 -61
  110. meerk40t/core/node/util_input.py +102 -102
  111. meerk40t/core/node/util_output.py +102 -102
  112. meerk40t/core/node/util_wait.py +65 -65
  113. meerk40t/core/parameters.py +709 -707
  114. meerk40t/core/planner.py +875 -785
  115. meerk40t/core/plotplanner.py +656 -652
  116. meerk40t/core/space.py +120 -113
  117. meerk40t/core/spoolers.py +706 -705
  118. meerk40t/core/svg_io.py +1836 -1549
  119. meerk40t/core/treeop.py +534 -445
  120. meerk40t/core/undos.py +278 -124
  121. meerk40t/core/units.py +784 -680
  122. meerk40t/core/view.py +393 -322
  123. meerk40t/core/webhelp.py +62 -62
  124. meerk40t/core/wordlist.py +513 -504
  125. meerk40t/cylinder/cylinder.py +247 -0
  126. meerk40t/cylinder/gui/cylindersettings.py +41 -0
  127. meerk40t/cylinder/gui/gui.py +24 -0
  128. meerk40t/device/__init__.py +1 -1
  129. meerk40t/device/basedevice.py +322 -123
  130. meerk40t/device/devicechoices.py +50 -0
  131. meerk40t/device/dummydevice.py +163 -128
  132. meerk40t/device/gui/defaultactions.py +618 -602
  133. meerk40t/device/gui/effectspanel.py +114 -0
  134. meerk40t/device/gui/formatterpanel.py +253 -290
  135. meerk40t/device/gui/warningpanel.py +337 -260
  136. meerk40t/device/mixins.py +13 -13
  137. meerk40t/dxf/__init__.py +1 -1
  138. meerk40t/dxf/dxf_io.py +766 -554
  139. meerk40t/dxf/plugin.py +47 -35
  140. meerk40t/external_plugins.py +79 -79
  141. meerk40t/external_plugins_build.py +28 -28
  142. meerk40t/extra/cag.py +112 -116
  143. meerk40t/extra/coolant.py +403 -0
  144. meerk40t/extra/encode_detect.py +204 -0
  145. meerk40t/extra/ezd.py +1165 -1165
  146. meerk40t/extra/hershey.py +834 -340
  147. meerk40t/extra/imageactions.py +322 -316
  148. meerk40t/extra/inkscape.py +628 -622
  149. meerk40t/extra/lbrn.py +424 -424
  150. meerk40t/extra/outerworld.py +283 -0
  151. meerk40t/extra/param_functions.py +1542 -1556
  152. meerk40t/extra/potrace.py +257 -253
  153. meerk40t/extra/serial_exchange.py +118 -0
  154. meerk40t/extra/updater.py +602 -453
  155. meerk40t/extra/vectrace.py +147 -146
  156. meerk40t/extra/winsleep.py +83 -83
  157. meerk40t/extra/xcs_reader.py +597 -0
  158. meerk40t/fill/fills.py +781 -335
  159. meerk40t/fill/patternfill.py +1061 -1061
  160. meerk40t/fill/patterns.py +614 -567
  161. meerk40t/grbl/control.py +87 -87
  162. meerk40t/grbl/controller.py +990 -903
  163. meerk40t/grbl/device.py +1084 -768
  164. meerk40t/grbl/driver.py +989 -771
  165. meerk40t/grbl/emulator.py +532 -497
  166. meerk40t/grbl/gcodejob.py +783 -767
  167. meerk40t/grbl/gui/grblconfiguration.py +373 -298
  168. meerk40t/grbl/gui/grblcontroller.py +485 -271
  169. meerk40t/grbl/gui/grblhardwareconfig.py +269 -153
  170. meerk40t/grbl/gui/grbloperationconfig.py +105 -0
  171. meerk40t/grbl/gui/gui.py +147 -116
  172. meerk40t/grbl/interpreter.py +44 -44
  173. meerk40t/grbl/loader.py +22 -22
  174. meerk40t/grbl/mock_connection.py +56 -56
  175. meerk40t/grbl/plugin.py +294 -264
  176. meerk40t/grbl/serial_connection.py +93 -88
  177. meerk40t/grbl/tcp_connection.py +81 -79
  178. meerk40t/grbl/ws_connection.py +112 -0
  179. meerk40t/gui/__init__.py +1 -1
  180. meerk40t/gui/about.py +2042 -296
  181. meerk40t/gui/alignment.py +1644 -1608
  182. meerk40t/gui/autoexec.py +199 -0
  183. meerk40t/gui/basicops.py +791 -670
  184. meerk40t/gui/bufferview.py +77 -71
  185. meerk40t/gui/busy.py +232 -133
  186. meerk40t/gui/choicepropertypanel.py +1662 -1469
  187. meerk40t/gui/consolepanel.py +706 -542
  188. meerk40t/gui/devicepanel.py +687 -581
  189. meerk40t/gui/dialogoptions.py +110 -107
  190. meerk40t/gui/executejob.py +316 -306
  191. meerk40t/gui/fonts.py +90 -90
  192. meerk40t/gui/functionwrapper.py +252 -0
  193. meerk40t/gui/gui_mixins.py +729 -0
  194. meerk40t/gui/guicolors.py +205 -182
  195. meerk40t/gui/help_assets/help_assets.py +218 -201
  196. meerk40t/gui/helper.py +154 -0
  197. meerk40t/gui/hersheymanager.py +1440 -846
  198. meerk40t/gui/icons.py +3422 -2747
  199. meerk40t/gui/imagesplitter.py +555 -508
  200. meerk40t/gui/keymap.py +354 -344
  201. meerk40t/gui/laserpanel.py +897 -806
  202. meerk40t/gui/laserrender.py +1470 -1232
  203. meerk40t/gui/lasertoolpanel.py +805 -793
  204. meerk40t/gui/magnetoptions.py +436 -0
  205. meerk40t/gui/materialmanager.py +2944 -0
  206. meerk40t/gui/materialtest.py +1722 -1694
  207. meerk40t/gui/mkdebug.py +646 -359
  208. meerk40t/gui/mwindow.py +163 -140
  209. meerk40t/gui/navigationpanels.py +2605 -2467
  210. meerk40t/gui/notes.py +143 -142
  211. meerk40t/gui/opassignment.py +414 -410
  212. meerk40t/gui/operation_info.py +310 -299
  213. meerk40t/gui/plugin.py +500 -328
  214. meerk40t/gui/position.py +714 -669
  215. meerk40t/gui/preferences.py +901 -650
  216. meerk40t/gui/propertypanels/attributes.py +1461 -1131
  217. meerk40t/gui/propertypanels/blobproperty.py +117 -114
  218. meerk40t/gui/propertypanels/consoleproperty.py +83 -80
  219. meerk40t/gui/propertypanels/gotoproperty.py +77 -0
  220. meerk40t/gui/propertypanels/groupproperties.py +223 -217
  221. meerk40t/gui/propertypanels/hatchproperty.py +489 -469
  222. meerk40t/gui/propertypanels/imageproperty.py +2244 -1384
  223. meerk40t/gui/propertypanels/inputproperty.py +59 -58
  224. meerk40t/gui/propertypanels/opbranchproperties.py +82 -80
  225. meerk40t/gui/propertypanels/operationpropertymain.py +1890 -1638
  226. meerk40t/gui/propertypanels/outputproperty.py +59 -58
  227. meerk40t/gui/propertypanels/pathproperty.py +389 -380
  228. meerk40t/gui/propertypanels/placementproperty.py +1214 -383
  229. meerk40t/gui/propertypanels/pointproperty.py +140 -136
  230. meerk40t/gui/propertypanels/propertywindow.py +313 -181
  231. meerk40t/gui/propertypanels/rasterwizardpanels.py +996 -912
  232. meerk40t/gui/propertypanels/regbranchproperties.py +76 -0
  233. meerk40t/gui/propertypanels/textproperty.py +770 -755
  234. meerk40t/gui/propertypanels/waitproperty.py +56 -55
  235. meerk40t/gui/propertypanels/warpproperty.py +121 -0
  236. meerk40t/gui/propertypanels/wobbleproperty.py +255 -204
  237. meerk40t/gui/ribbon.py +2471 -2210
  238. meerk40t/gui/scene/scene.py +1100 -1051
  239. meerk40t/gui/scene/sceneconst.py +22 -22
  240. meerk40t/gui/scene/scenepanel.py +439 -349
  241. meerk40t/gui/scene/scenespacewidget.py +365 -365
  242. meerk40t/gui/scene/widget.py +518 -505
  243. meerk40t/gui/scenewidgets/affinemover.py +215 -215
  244. meerk40t/gui/scenewidgets/attractionwidget.py +315 -309
  245. meerk40t/gui/scenewidgets/bedwidget.py +120 -97
  246. meerk40t/gui/scenewidgets/elementswidget.py +137 -107
  247. meerk40t/gui/scenewidgets/gridwidget.py +785 -745
  248. meerk40t/gui/scenewidgets/guidewidget.py +765 -765
  249. meerk40t/gui/scenewidgets/laserpathwidget.py +66 -66
  250. meerk40t/gui/scenewidgets/machineoriginwidget.py +86 -86
  251. meerk40t/gui/scenewidgets/nodeselector.py +28 -28
  252. meerk40t/gui/scenewidgets/rectselectwidget.py +592 -346
  253. meerk40t/gui/scenewidgets/relocatewidget.py +33 -33
  254. meerk40t/gui/scenewidgets/reticlewidget.py +83 -83
  255. meerk40t/gui/scenewidgets/selectionwidget.py +2958 -2756
  256. meerk40t/gui/simpleui.py +362 -333
  257. meerk40t/gui/simulation.py +2451 -2094
  258. meerk40t/gui/snapoptions.py +208 -203
  259. meerk40t/gui/spoolerpanel.py +1227 -1180
  260. meerk40t/gui/statusbarwidgets/defaultoperations.py +480 -353
  261. meerk40t/gui/statusbarwidgets/infowidget.py +520 -483
  262. meerk40t/gui/statusbarwidgets/opassignwidget.py +356 -355
  263. meerk40t/gui/statusbarwidgets/selectionwidget.py +172 -171
  264. meerk40t/gui/statusbarwidgets/shapepropwidget.py +754 -236
  265. meerk40t/gui/statusbarwidgets/statusbar.py +272 -260
  266. meerk40t/gui/statusbarwidgets/statusbarwidget.py +268 -270
  267. meerk40t/gui/statusbarwidgets/strokewidget.py +267 -251
  268. meerk40t/gui/themes.py +200 -78
  269. meerk40t/gui/tips.py +590 -0
  270. meerk40t/gui/toolwidgets/circlebrush.py +35 -35
  271. meerk40t/gui/toolwidgets/toolcircle.py +248 -242
  272. meerk40t/gui/toolwidgets/toolcontainer.py +82 -77
  273. meerk40t/gui/toolwidgets/tooldraw.py +97 -90
  274. meerk40t/gui/toolwidgets/toolellipse.py +219 -212
  275. meerk40t/gui/toolwidgets/toolimagecut.py +25 -132
  276. meerk40t/gui/toolwidgets/toolline.py +39 -144
  277. meerk40t/gui/toolwidgets/toollinetext.py +79 -236
  278. meerk40t/gui/toolwidgets/toollinetext_inline.py +296 -0
  279. meerk40t/gui/toolwidgets/toolmeasure.py +163 -216
  280. meerk40t/gui/toolwidgets/toolnodeedit.py +2088 -2074
  281. meerk40t/gui/toolwidgets/toolnodemove.py +92 -94
  282. meerk40t/gui/toolwidgets/toolparameter.py +754 -668
  283. meerk40t/gui/toolwidgets/toolplacement.py +108 -108
  284. meerk40t/gui/toolwidgets/toolpoint.py +68 -59
  285. meerk40t/gui/toolwidgets/toolpointlistbuilder.py +294 -0
  286. meerk40t/gui/toolwidgets/toolpointmove.py +183 -0
  287. meerk40t/gui/toolwidgets/toolpolygon.py +288 -403
  288. meerk40t/gui/toolwidgets/toolpolyline.py +38 -196
  289. meerk40t/gui/toolwidgets/toolrect.py +211 -207
  290. meerk40t/gui/toolwidgets/toolrelocate.py +72 -72
  291. meerk40t/gui/toolwidgets/toolribbon.py +598 -113
  292. meerk40t/gui/toolwidgets/tooltabedit.py +546 -0
  293. meerk40t/gui/toolwidgets/tooltext.py +98 -89
  294. meerk40t/gui/toolwidgets/toolvector.py +213 -204
  295. meerk40t/gui/toolwidgets/toolwidget.py +39 -39
  296. meerk40t/gui/usbconnect.py +98 -91
  297. meerk40t/gui/utilitywidgets/buttonwidget.py +18 -18
  298. meerk40t/gui/utilitywidgets/checkboxwidget.py +90 -90
  299. meerk40t/gui/utilitywidgets/controlwidget.py +14 -14
  300. meerk40t/gui/utilitywidgets/cyclocycloidwidget.py +343 -340
  301. meerk40t/gui/utilitywidgets/debugwidgets.py +148 -0
  302. meerk40t/gui/utilitywidgets/handlewidget.py +27 -27
  303. meerk40t/gui/utilitywidgets/harmonograph.py +450 -447
  304. meerk40t/gui/utilitywidgets/openclosewidget.py +40 -40
  305. meerk40t/gui/utilitywidgets/rotationwidget.py +54 -54
  306. meerk40t/gui/utilitywidgets/scalewidget.py +75 -75
  307. meerk40t/gui/utilitywidgets/seekbarwidget.py +183 -183
  308. meerk40t/gui/utilitywidgets/togglewidget.py +142 -142
  309. meerk40t/gui/utilitywidgets/toolbarwidget.py +8 -8
  310. meerk40t/gui/wordlisteditor.py +985 -931
  311. meerk40t/gui/wxmeerk40t.py +1447 -1169
  312. meerk40t/gui/wxmmain.py +5644 -4112
  313. meerk40t/gui/wxmribbon.py +1591 -1076
  314. meerk40t/gui/wxmscene.py +1631 -1453
  315. meerk40t/gui/wxmtree.py +2416 -2089
  316. meerk40t/gui/wxutils.py +1769 -1099
  317. meerk40t/gui/zmatrix.py +102 -102
  318. meerk40t/image/__init__.py +1 -1
  319. meerk40t/image/dither.py +429 -0
  320. meerk40t/image/imagetools.py +2793 -2269
  321. meerk40t/internal_plugins.py +150 -130
  322. meerk40t/kernel/__init__.py +63 -12
  323. meerk40t/kernel/channel.py +259 -212
  324. meerk40t/kernel/context.py +538 -538
  325. meerk40t/kernel/exceptions.py +41 -41
  326. meerk40t/kernel/functions.py +463 -414
  327. meerk40t/kernel/jobs.py +100 -100
  328. meerk40t/kernel/kernel.py +3828 -3571
  329. meerk40t/kernel/lifecycles.py +71 -71
  330. meerk40t/kernel/module.py +49 -49
  331. meerk40t/kernel/service.py +147 -147
  332. meerk40t/kernel/settings.py +383 -343
  333. meerk40t/lihuiyu/controller.py +883 -876
  334. meerk40t/lihuiyu/device.py +1181 -1069
  335. meerk40t/lihuiyu/driver.py +1466 -1372
  336. meerk40t/lihuiyu/gui/gui.py +127 -106
  337. meerk40t/lihuiyu/gui/lhyaccelgui.py +377 -363
  338. meerk40t/lihuiyu/gui/lhycontrollergui.py +741 -651
  339. meerk40t/lihuiyu/gui/lhydrivergui.py +470 -446
  340. meerk40t/lihuiyu/gui/lhyoperationproperties.py +238 -237
  341. meerk40t/lihuiyu/gui/tcpcontroller.py +226 -190
  342. meerk40t/lihuiyu/interpreter.py +53 -53
  343. meerk40t/lihuiyu/laserspeed.py +450 -450
  344. meerk40t/lihuiyu/loader.py +90 -90
  345. meerk40t/lihuiyu/parser.py +404 -404
  346. meerk40t/lihuiyu/plugin.py +101 -102
  347. meerk40t/lihuiyu/tcp_connection.py +111 -109
  348. meerk40t/main.py +231 -165
  349. meerk40t/moshi/builder.py +788 -781
  350. meerk40t/moshi/controller.py +505 -499
  351. meerk40t/moshi/device.py +495 -442
  352. meerk40t/moshi/driver.py +862 -696
  353. meerk40t/moshi/gui/gui.py +78 -76
  354. meerk40t/moshi/gui/moshicontrollergui.py +538 -522
  355. meerk40t/moshi/gui/moshidrivergui.py +87 -75
  356. meerk40t/moshi/plugin.py +43 -43
  357. meerk40t/network/console_server.py +140 -57
  358. meerk40t/network/kernelserver.py +10 -9
  359. meerk40t/network/tcp_server.py +142 -140
  360. meerk40t/network/udp_server.py +103 -77
  361. meerk40t/network/web_server.py +404 -0
  362. meerk40t/newly/controller.py +1158 -1144
  363. meerk40t/newly/device.py +874 -732
  364. meerk40t/newly/driver.py +540 -412
  365. meerk40t/newly/gui/gui.py +219 -188
  366. meerk40t/newly/gui/newlyconfig.py +116 -101
  367. meerk40t/newly/gui/newlycontroller.py +193 -186
  368. meerk40t/newly/gui/operationproperties.py +51 -51
  369. meerk40t/newly/mock_connection.py +82 -82
  370. meerk40t/newly/newly_params.py +56 -56
  371. meerk40t/newly/plugin.py +1214 -1246
  372. meerk40t/newly/usb_connection.py +322 -322
  373. meerk40t/rotary/gui/gui.py +52 -46
  374. meerk40t/rotary/gui/rotarysettings.py +240 -232
  375. meerk40t/rotary/rotary.py +202 -98
  376. meerk40t/ruida/control.py +291 -91
  377. meerk40t/ruida/controller.py +138 -1088
  378. meerk40t/ruida/device.py +676 -231
  379. meerk40t/ruida/driver.py +534 -472
  380. meerk40t/ruida/emulator.py +1494 -1491
  381. meerk40t/ruida/exceptions.py +4 -4
  382. meerk40t/ruida/gui/gui.py +71 -76
  383. meerk40t/ruida/gui/ruidaconfig.py +239 -72
  384. meerk40t/ruida/gui/ruidacontroller.py +187 -184
  385. meerk40t/ruida/gui/ruidaoperationproperties.py +48 -47
  386. meerk40t/ruida/loader.py +54 -52
  387. meerk40t/ruida/mock_connection.py +57 -109
  388. meerk40t/ruida/plugin.py +124 -87
  389. meerk40t/ruida/rdjob.py +2084 -945
  390. meerk40t/ruida/serial_connection.py +116 -0
  391. meerk40t/ruida/tcp_connection.py +146 -0
  392. meerk40t/ruida/udp_connection.py +73 -0
  393. meerk40t/svgelements.py +9671 -9669
  394. meerk40t/tools/driver_to_path.py +584 -579
  395. meerk40t/tools/geomstr.py +5583 -4680
  396. meerk40t/tools/jhfparser.py +357 -292
  397. meerk40t/tools/kerftest.py +904 -890
  398. meerk40t/tools/livinghinges.py +1168 -1033
  399. meerk40t/tools/pathtools.py +987 -949
  400. meerk40t/tools/pmatrix.py +234 -0
  401. meerk40t/tools/pointfinder.py +942 -942
  402. meerk40t/tools/polybool.py +941 -940
  403. meerk40t/tools/rasterplotter.py +1660 -547
  404. meerk40t/tools/shxparser.py +1047 -901
  405. meerk40t/tools/ttfparser.py +726 -446
  406. meerk40t/tools/zinglplotter.py +595 -593
  407. {meerk40t-0.9.3001.dist-info → meerk40t-0.9.7020.dist-info}/LICENSE +21 -21
  408. {meerk40t-0.9.3001.dist-info → meerk40t-0.9.7020.dist-info}/METADATA +150 -139
  409. meerk40t-0.9.7020.dist-info/RECORD +446 -0
  410. {meerk40t-0.9.3001.dist-info → meerk40t-0.9.7020.dist-info}/WHEEL +1 -1
  411. {meerk40t-0.9.3001.dist-info → meerk40t-0.9.7020.dist-info}/top_level.txt +0 -1
  412. {meerk40t-0.9.3001.dist-info → meerk40t-0.9.7020.dist-info}/zip-safe +1 -1
  413. meerk40t/balormk/elementlightjob.py +0 -159
  414. meerk40t-0.9.3001.dist-info/RECORD +0 -437
  415. test/bootstrap.py +0 -63
  416. test/test_cli.py +0 -12
  417. test/test_core_cutcode.py +0 -418
  418. test/test_core_elements.py +0 -144
  419. test/test_core_plotplanner.py +0 -397
  420. test/test_core_viewports.py +0 -312
  421. test/test_drivers_grbl.py +0 -108
  422. test/test_drivers_lihuiyu.py +0 -443
  423. test/test_drivers_newly.py +0 -113
  424. test/test_element_degenerate_points.py +0 -43
  425. test/test_elements_classify.py +0 -97
  426. test/test_elements_penbox.py +0 -22
  427. test/test_file_svg.py +0 -176
  428. test/test_fill.py +0 -155
  429. test/test_geomstr.py +0 -1523
  430. test/test_geomstr_nodes.py +0 -18
  431. test/test_imagetools_actualize.py +0 -306
  432. test/test_imagetools_wizard.py +0 -258
  433. test/test_kernel.py +0 -200
  434. test/test_laser_speeds.py +0 -3303
  435. test/test_length.py +0 -57
  436. test/test_lifecycle.py +0 -66
  437. test/test_operations.py +0 -251
  438. test/test_operations_hatch.py +0 -57
  439. test/test_ruida.py +0 -19
  440. test/test_spooler.py +0 -22
  441. test/test_tools_rasterplotter.py +0 -29
  442. test/test_wobble.py +0 -133
  443. test/test_zingl.py +0 -124
  444. {test → meerk40t/cylinder}/__init__.py +0 -0
  445. /meerk40t/{core/element_commands.py → cylinder/gui/__init__.py} +0 -0
  446. {meerk40t-0.9.3001.dist-info → meerk40t-0.9.7020.dist-info}/entry_points.txt +0 -0
meerk40t/core/cutplan.py CHANGED
@@ -1,1316 +1,1843 @@
1
- """
2
- CutPlan contains code to process LaserOperations into CutCode objects which are spooled.
3
-
4
- CutPlan handles the various complicated algorithms to optimising the sequence of CutObjects to:
5
- * Sort burns so that travel time is minimised
6
- * Do burns with multiple passes all at the same time (Merge Passes)
7
- * Sort burns for all operations at the same time rather than operation by operation
8
- * Ensure that elements inside closed cut paths are burned before the outside path
9
- * Group these inner burns so that one component on a sheet is completed before the next one is started
10
- * Ensure that non-closed paths start from one of the ends and burned in one continuous burn
11
- rather than being burned in 2 or more separate parts
12
- * Split raster images in to self-contained areas to avoid sweeping over large empty areas
13
- including splitting into individual small areas if burn inner first is set and then recombining
14
- those inside the same curves so that raster burns are fully optimised.
15
- """
16
-
17
- from copy import copy
18
- from math import isinf
19
- from os import times
20
- from time import perf_counter, time
21
- from typing import Optional
22
-
23
- import numpy as np
24
-
25
- from ..svgelements import Group, Polygon
26
- from ..tools.pathtools import VectorMontonizer
27
- from .cutcode.cutcode import CutCode
28
- from .cutcode.cutgroup import CutGroup
29
- from .cutcode.cutobject import CutObject
30
- from .cutcode.rastercut import RasterCut
31
- from .node.node import Node
32
- from .units import Length
33
-
34
-
35
- class CutPlanningFailedError(Exception):
36
- pass
37
-
38
-
39
- class CutPlan:
40
- """
41
- CutPlan is a centralized class to modify plans during cutplanning. It is typically is used to progress from
42
- copied operations through the stages to being properly optimized cutcode.
43
-
44
- The stages are:
45
- 1. Copy: This can be `copy-selected` or `copy` to decide which operations are moved initially into the plan.
46
- a. Copied operations are copied to real. All the reference nodes are replaced with copies of the actual elements
47
- 2. Preprocess: Convert from scene space to device space and add validation operations.
48
- 3. Validate: Run all the validation operations, this could be anything the nodes added during preprocess.
49
- a. Calls `execute` operation.
50
- 4. Blob: We convert all the operations/elements into proper cutcode. Some operations do not necessarily need to
51
- convert to cutcode. They merely need to convert to some type of spoolable operation.
52
- 5. Preopt: Preoptimize adds in the relevant optimization operations into the cutcode.
53
- 6. Optimize: This calls the added functions set during the preopt process.
54
- a. Calls `execute` operation.
55
- """
56
-
57
- def __init__(self, name, planner):
58
- self.name = name
59
- self.context = planner
60
- self.plan = list()
61
- self.spool_commands = list()
62
- self.commands = list()
63
- self.channel = self.context.channel("optimize", timestamp=True)
64
- self.outline = None
65
- self._previous_bounds = None
66
-
67
- def __str__(self):
68
- parts = list()
69
- parts.append(self.name)
70
- if len(self.plan):
71
- parts.append(f"#{len(self.plan)}")
72
- for p in self.plan:
73
- try:
74
- parts.append(p.__name__)
75
- except AttributeError:
76
- parts.append(p.__class__.__name__)
77
- else:
78
- parts.append("-- Empty --")
79
- return " ".join(parts)
80
-
81
- def execute(self):
82
- """
83
- Execute runs all the commands built during `preprocess` and `preopt` (preoptimize) stages.
84
-
85
- If a command's execution adds a command to commands, this command is also executed.
86
- @return:
87
- """
88
- # Using copy of commands, so commands can add ops.
89
- while self.commands:
90
- # Executing command can add a command, complete them all.
91
- commands = self.commands[:]
92
- self.commands.clear()
93
- for command in commands:
94
- command()
95
-
96
- def final(self):
97
- """
98
- Executes all the spool_commands built during the other stages.
99
-
100
- If a command's execution added a spool_command we run it during final.
101
-
102
- Final is called during at the time of spool. Just before the laserjob is created.
103
- @return:
104
- """
105
- # Using copy of commands, so commands can add ops.
106
- while self.spool_commands:
107
- # Executing command can add a command, complete them all.
108
- commands = self.spool_commands[:]
109
- self.spool_commands.clear()
110
- for command in commands:
111
- command()
112
-
113
- def preprocess(self):
114
- """
115
- Preprocess stage.
116
-
117
- All operation nodes are called with the current context, the matrix converting from scene to device, and
118
- commands.
119
-
120
- Nodes are expected to convert relevant properties and shapes from scene coordinates to device coordinate systems
121
- if they need operations. They are also expected to add any relevant commands to the commands list. The commands
122
- list sequentially in the next stage.
123
- """
124
- device = self.context.device
125
-
126
- scene_to_device_matrix = device.view.matrix
127
-
128
- # ==========
129
- # Determine the jobs bounds.
130
- # ==========
131
- bounds = Node.union_bounds(self.plan, bounds=self._previous_bounds)
132
- self._previous_bounds = bounds
133
- if bounds is not None:
134
- left, top, right, bottom = bounds
135
- min_x = min(right, left)
136
- min_y = min(top, bottom)
137
- max_x = max(right, left)
138
- max_y = max(top, bottom)
139
- if isinf(min_x) or isinf(min_y) or isinf(max_x) or isinf(max_y):
140
- # Infinite bounds are invalid.
141
- self.outline = None
142
- else:
143
- self.outline = (
144
- device.view.position(min_x, min_y),
145
- device.view.position(max_x, min_y),
146
- device.view.position(max_x, max_y),
147
- device.view.position(min_x, max_y),
148
- )
149
-
150
- # ==========
151
- # Query Placements
152
- # ==========
153
- placements = []
154
- for place in self.plan:
155
- if not hasattr(place, "type"):
156
- continue
157
- if place.type.startswith("place "):
158
- if hasattr(place, "output") and place.output:
159
- loops = 1
160
- if hasattr(place, "loops") and place.loops > 1:
161
- loops = place.loops
162
- for idx in range(loops):
163
- placements.extend(
164
- place.placements(
165
- self.context, self.outline, scene_to_device_matrix, self
166
- )
167
- )
168
- if not placements:
169
- # Absolute coordinates.
170
- placements.append(scene_to_device_matrix)
171
-
172
- # TODO: Correct rotary.
173
- # rotary = self.context.rotary
174
- # if rotary.rotary_enabled:
175
- # axis = rotary.axis
176
-
177
- original_ops = copy(self.plan)
178
- if self.context.opt_raster_optimisation and self.context.do_optimization:
179
- try:
180
- margin = float(Length(self.context.opt_raster_opt_margin, "0"))
181
- except (AttributeError, ValueError):
182
- margin = 0
183
- self.optimize_rasters(original_ops, "op raster", margin)
184
- # We could do this as well, but images are burnt separately anyway...
185
- # self.optimize_rasters(original_ops, "op image", margin)
186
- self.plan.clear()
187
-
188
- idx = 0
189
- self.context.elements.mywordlist.push()
190
-
191
- for placement in placements:
192
- # Adjust wordlist
193
- if idx > 0:
194
- self.context.elements.mywordlist.move_all_indices(1)
195
-
196
- for original_op in original_ops:
197
- try:
198
- op = original_op.copy_with_reified_tree()
199
- except AttributeError:
200
- op = original_op
201
- if not hasattr(op, "type") or op.type is None:
202
- self.plan.append(op)
203
- continue
204
- if op.type.startswith("place "):
205
- continue
206
- self.plan.append(op)
207
- if op.type.startswith("op"):
208
- if hasattr(op, "preprocess"):
209
- op.preprocess(self.context, placement, self)
210
- for node in op.flat():
211
- if node is op:
212
- continue
213
- if hasattr(node, "mktext") and hasattr(node, "_cache"):
214
- newtext = self.context.elements.wordlist_translate(
215
- node.mktext, elemnode=node, increment=False
216
- )
217
- oldtext = getattr(node, "_translated_text", "")
218
- # print (f"Was called inside preprocess for {node.type} with {node.mktext}, old: {oldtext}, new:{newtext}")
219
- if newtext != oldtext:
220
- node._translated_text = newtext
221
- kernel = self.context.elements.kernel
222
- for property_op in kernel.lookup_all("path_updater/.*"):
223
- property_op(kernel.root, node)
224
- if hasattr(node, "_cache"):
225
- node._cache = None
226
- if hasattr(node, "preprocess"):
227
- node.preprocess(self.context, placement, self)
228
- idx += 1
229
- self.context.elements.mywordlist.pop()
230
-
231
- def _to_grouped_plan(self, plan):
232
- """
233
- Break operations into grouped sequences of Operations and utility operations.
234
-
235
- We can only merge between contiguous groups of operations. We cannot merge util node types with op node types.
236
-
237
- Anything that does not have a type is likely able to spool, but cannot merge and are not grouped. Only grouped
238
- operations are candidates for cutcode merging.
239
- @return:
240
- """
241
- last_type = None
242
- group = list()
243
- for c in plan:
244
- c_type = (
245
- c.type
246
- if hasattr(c, "type") and c.type is not None
247
- else type(c).__name__
248
- )
249
- if c_type.startswith("effect"):
250
- # Effects should not be used here.
251
- continue
252
- if last_type is not None:
253
- if c_type.startswith("op") != last_type.startswith("op"):
254
- # This cannot merge
255
- yield group
256
- group = list()
257
- group.append(c)
258
- last_type = c_type
259
- if group:
260
- yield group
261
-
262
- def _to_blob_plan_passes_first(self, grouped_plan):
263
- """
264
- If Merge operations and not merge passes we need to iterate passes first and operations second.
265
-
266
- This function is specific to that case, when passes first operations second.
267
-
268
- Converts the operations to cutcode.
269
- @param grouped_plan:
270
- @return:
271
- """
272
- for plan in grouped_plan:
273
- pass_idx = 0
274
- while True:
275
- more_passes_possible = False
276
- for op in plan:
277
- if (
278
- not hasattr(op, "type")
279
- or op.type == "util console"
280
- or (
281
- not op.type.startswith("op")
282
- and not op.type.startswith("util")
283
- )
284
- ):
285
- # This is an irregular object and can't become cutcode.
286
- if pass_idx == 0:
287
- # irregular objects have an implicit single pass.
288
- yield op
289
- continue
290
- if pass_idx > op.implicit_passes - 1:
291
- continue
292
- more_passes_possible = True
293
- yield from self._blob_convert(op, 1, 1, force_idx=pass_idx)
294
- if not more_passes_possible:
295
- # No operation needs additional passes.
296
- break
297
- pass_idx += 1
298
-
299
- def _to_blob_plan(self, grouped_plan):
300
- """
301
- Iterate operations first and passes second. Operation first mode. Passes are done within cutcode pass value.
302
-
303
- Converts the operations to cutcode.
304
-
305
- @param grouped_plan:
306
- @return:
307
- """
308
- context = self.context
309
- for plan in grouped_plan:
310
- for op in plan:
311
- if not hasattr(op, "type") or op.type is None:
312
- yield op
313
- continue
314
- if (
315
- not op.type.startswith("op")
316
- and not op.type.startswith("util")
317
- or op.type == "util console"
318
- ):
319
- yield op
320
- continue
321
- passes = op.implicit_passes
322
- if context.opt_merge_passes and (
323
- context.opt_nearest_neighbor or context.opt_inner_first
324
- ):
325
- # Providing we do some sort of post-processing of blobs,
326
- # then merge passes is handled by the greedy or inner_first algorithms
327
-
328
- # So, we only need 1 copy and to set the passes.
329
- yield from self._blob_convert(op, copies=1, passes=passes)
330
- else:
331
- # We do passes by making copies of the cutcode.
332
- yield from self._blob_convert(op, copies=passes, passes=1)
333
-
334
- def _blob_convert(self, op, copies, passes, force_idx=None):
335
- """
336
- Converts the given op into cutcode. Provides `copies` copies of that cutcode, sets
337
- the passes to passes for each cutcode object.
338
-
339
- @param op:
340
- @param copies:
341
- @param passes:
342
- @param force_idx:
343
- @return:
344
- """
345
- context = self.context
346
- for pass_idx in range(copies):
347
- # if the settings dictionary doesn't exist we use the defined instance dictionary
348
- try:
349
- settings_dict = op.settings
350
- except AttributeError:
351
- settings_dict = op.__dict__
352
- # If passes isn't equal to implicit passes then we need a different settings to permit change
353
- settings = (
354
- settings_dict if op.implicit_passes == passes else dict(settings_dict)
355
- )
356
- cutcode = CutCode(
357
- op.as_cutobjects(
358
- closed_distance=context.opt_closed_distance,
359
- passes=passes,
360
- ),
361
- settings=settings,
362
- )
363
- if len(cutcode) == 0:
364
- break
365
- cutcode.constrained = op.type == "op cut" and context.opt_inner_first
366
- cutcode.pass_index = pass_idx if force_idx is None else force_idx
367
- cutcode.original_op = op.type
368
- yield cutcode
369
-
370
- def _to_merged_plan(self, blob_plan):
371
- """
372
- Convert the blobbed plan of cutcode (rather than operations) into a merged plan for those cutcode operations
373
- which are permitted to merge into the same cutcode object. All items within the same cutcode object are
374
- candidates for optimizations. For example, if a HomeCut was merged LineCut in the cutcode, that entire group
375
- would merge together, finding the most optimized time to home the machine (if optimization was enabled).
376
-
377
- @param blob_plan:
378
- @return:
379
- """
380
- last_item = None
381
- context = self.context
382
- for blob in blob_plan:
383
- try:
384
- blob.jog_distance = context.opt_jog_minimum
385
- blob.jog_enable = context.opt_rapid_between
386
- except AttributeError:
387
- pass
388
- if last_item and self._should_merge(context, last_item, blob):
389
- # Do not check empty plan.
390
- if blob.constrained:
391
- # if any merged object is constrained, then combined blob is also constrained.
392
- last_item.constrained = True
393
- last_item.extend(blob)
394
-
395
- else:
396
- if isinstance(blob, CutObject) and not isinstance(blob, CutCode):
397
- cc = CutCode([blob])
398
- cc.original_op = blob.original_op
399
- cc.pass_index = blob.pass_index
400
- last_item = cc
401
- yield last_item
402
- else:
403
- last_item = blob
404
- yield last_item
405
-
406
- def _should_merge(self, context, last_item, current_item):
407
- """
408
- Checks whether we should merge the blob with the current plan.
409
-
410
- We can only merge things if we have the right objects and settings.
411
- """
412
- if not isinstance(last_item, CutCode):
413
- # The last plan item is not cutcode, merge is only between cutobjects adding to cutcode.
414
- return False
415
- if not isinstance(current_item, CutObject):
416
- # The object to be merged is not a cutObject and cannot be added to Cutcode.
417
- return False
418
- last_op = last_item.original_op
419
- if last_op is None:
420
- last_op = ""
421
- current_op = current_item.original_op
422
- if current_op is None:
423
- current_op = ""
424
- if last_op.startswith("util") or current_op.startswith("util"):
425
- return False
426
-
427
- if (
428
- not context.opt_merge_passes
429
- and last_item.pass_index != current_item.pass_index
430
- ):
431
- # Do not merge if opt_merge_passes is off, and pass_index do not match
432
- return False
433
- if (
434
- not context.opt_merge_ops
435
- and last_item.settings is not current_item.settings
436
- ):
437
- # Do not merge if opt_merge_ops is off, and the original ops do not match
438
- # Same settings object implies same original operation
439
- return False
440
- if not context.opt_inner_first and last_item.original_op == "op cut":
441
- # Do not merge if opt_inner_first is off, and operation was originally a cut.
442
- return False
443
- return True # No reason these should not be merged.
444
-
445
- def blob(self):
446
- """
447
- Blob converts User operations to CutCode objects.
448
-
449
- In order to have CutCode objects in the correct sequence for merging we need to:
450
- 1. Break operations into grouped sequences of Operations and utility operations.
451
- We can only merge between contiguous groups of operations (with option set)
452
- 2. The sequence of CutObjects needs to reflect merge settings
453
- Normal sequence is to iterate operations and then passes for each operation.
454
- With Merge ops and not Merge passes, we need to iterate on passes first and then ops within.
455
- """
456
-
457
- if not self.plan:
458
- return
459
- context = self.context
460
- grouped_plan = list(self._to_grouped_plan(self.plan))
461
- if context.opt_merge_ops and not context.opt_merge_passes:
462
- blob_plan = list(self._to_blob_plan_passes_first(grouped_plan))
463
- else:
464
- blob_plan = list(self._to_blob_plan(grouped_plan))
465
- self.plan.clear()
466
- self.plan.extend(self._to_merged_plan(blob_plan))
467
-
468
- def preopt(self):
469
- """
470
- Add commands for optimize stage. This stage tends to do very little but checks the settings and adds the
471
- relevant operations.
472
-
473
- @return:
474
- """
475
- context = self.context
476
- has_cutcode = False
477
- for op in self.plan:
478
- try:
479
- if isinstance(op, CutCode):
480
- has_cutcode = True
481
- break
482
- except AttributeError:
483
- pass
484
- if not has_cutcode:
485
- return
486
-
487
- if context.opt_reduce_travel and (
488
- context.opt_nearest_neighbor or context.opt_2opt
489
- ):
490
- if context.opt_nearest_neighbor:
491
- self.commands.append(self.optimize_travel)
492
- if context.opt_2opt and not context.opt_inner_first:
493
- try:
494
- # Check for numpy before adding additional 2opt
495
- # pylint: disable=unused-import
496
- import numpy as np
497
-
498
- self.commands.append(self.optimize_travel_2opt)
499
- except ImportError:
500
- pass
501
-
502
- elif context.opt_inner_first:
503
- self.commands.append(self.optimize_cuts)
504
- self.commands.append(self.merge_cutcode)
505
- if context.opt_reduce_directions:
506
- pass
507
- if context.opt_remove_overlap:
508
- pass
509
-
510
- def optimize_travel_2opt(self):
511
- """
512
- Optimize travel 2opt at optimize stage on cutcode
513
- @return:
514
- """
515
- busy = self.context.kernel.busyinfo
516
- _ = self.context.kernel.translation
517
- if busy.shown:
518
- busy.change(msg=_("Optimize inner travel"), keep=1)
519
- busy.show()
520
- channel = self.context.channel("optimize", timestamp=True)
521
- for i, c in enumerate(self.plan):
522
- if isinstance(c, CutCode):
523
- self.plan[i] = short_travel_cutcode_2opt(self.plan[i], channel=channel)
524
-
525
- def optimize_cuts(self):
526
- """
527
- Optimize cuts at optimize stage on cutcode
528
- @return:
529
- """
530
- # Update Info-panel if displayed
531
- busy = self.context.kernel.busyinfo
532
- _ = self.context.kernel.translation
533
- if busy.shown:
534
- busy.change(msg=_("Optimize cuts"), keep=1)
535
- busy.show()
536
- tolerance = 0
537
- if self.context.opt_inner_first:
538
- stol = self.context.opt_inner_tolerance
539
- try:
540
- tolerance = (
541
- float(Length(stol))
542
- * 2
543
- / (
544
- self.context.device.view.native_scale_x
545
- + self.context.device.view.native_scale_y
546
- )
547
- )
548
- except ValueError:
549
- pass
550
- # print(f"Tolerance: {tolerance}")
551
-
552
- channel = self.context.channel("optimize", timestamp=True)
553
- grouped_inner = self.context.opt_inner_first and self.context.opt_inners_grouped
554
- for i, c in enumerate(self.plan):
555
- if busy.shown:
556
- busy.change(
557
- msg=_("Optimize cuts") + f" {i + 1}/{len(self.plan)}", keep=1
558
- )
559
- busy.show()
560
- if isinstance(c, CutCode):
561
- if c.constrained:
562
- self.plan[i] = inner_first_ident(
563
- c, channel=channel, tolerance=tolerance
564
- )
565
- c = self.plan[i]
566
- self.plan[i] = inner_selection_cutcode(
567
- c,
568
- channel=channel,
569
- grouped_inner=grouped_inner,
570
- )
571
-
572
- def optimize_travel(self):
573
- """
574
- Optimize travel at optimize stage on cutcode.
575
- @return:
576
- """
577
- # Update Info-panel if displayed
578
- busy = self.context.kernel.busyinfo
579
- _ = self.context.kernel.translation
580
- if busy.shown:
581
- busy.change(msg=_("Optimize travel"), keep=1)
582
- busy.show()
583
- try:
584
- last = self.context.device.native
585
- except AttributeError:
586
- last = None
587
- tolerance = 0
588
- if self.context.opt_inner_first:
589
- stol = self.context.opt_inner_tolerance
590
- try:
591
- tolerance = (
592
- float(Length(stol))
593
- * 2
594
- / (
595
- self.context.device.view.native_scale_x
596
- + self.context.device.view.native_scale_y
597
- )
598
- )
599
- except ValueError:
600
- pass
601
- # print(f"Tolerance: {tolerance}")
602
-
603
- channel = self.context.channel("optimize", timestamp=True)
604
- grouped_inner = self.context.opt_inner_first and self.context.opt_inners_grouped
605
- for i, c in enumerate(self.plan):
606
- if busy.shown:
607
- busy.change(
608
- msg=_("Optimize travel") + f" {i + 1}/{len(self.plan)}", keep=1
609
- )
610
- busy.show()
611
-
612
- if isinstance(c, CutCode):
613
- if c.constrained:
614
- self.plan[i] = inner_first_ident(
615
- c, channel=channel, tolerance=tolerance
616
- )
617
- c = self.plan[i]
618
- if last is not None:
619
- c._start_x, c._start_y = last
620
- self.plan[i] = short_travel_cutcode(
621
- c,
622
- channel=channel,
623
- complete_path=self.context.opt_complete_subpaths,
624
- grouped_inner=grouped_inner,
625
- )
626
- last = self.plan[i].end
627
-
628
- def merge_cutcode(self):
629
- """
630
- Merge all adjacent optimized cutcode into single cutcode objects.
631
- @return:
632
- """
633
- busy = self.context.kernel.busyinfo
634
- _ = self.context.kernel.translation
635
- if busy.shown:
636
- busy.change(msg=_("Merging cutcode"), keep=1)
637
- busy.show()
638
- for i in range(len(self.plan) - 1, 0, -1):
639
- cur = self.plan[i]
640
- prev = self.plan[i - 1]
641
- if isinstance(cur, CutCode) and isinstance(prev, CutCode):
642
- prev.extend(cur)
643
- del self.plan[i]
644
-
645
- def clear(self):
646
- self._previous_bounds = None
647
- self.plan.clear()
648
- self.commands.clear()
649
-
650
- def optimize_rasters(self, operation_list, op_type, margin):
651
- def generate_clusters(operation):
652
- def overlapping(bounds1, bounds2, margin):
653
- # The rectangles don't overlap if
654
- # one rectangle's minimum in some dimension
655
- # is greater than the other's maximum in
656
- # that dimension.
657
- flagx = (bounds1[0] > bounds2[2] + margin) or (
658
- bounds2[0] > bounds1[2] + margin
659
- )
660
- flagy = (bounds1[1] > bounds2[3] + margin) or (
661
- bounds2[1] > bounds1[3] + margin
662
- )
663
- return bool(not (flagx or flagy))
664
-
665
- clusters = list()
666
- cluster_bounds = list()
667
- for node in operation.children:
668
- try:
669
- if node.type == "reference":
670
- node = node.node
671
- bb = node.paint_bounds
672
- except AttributeError:
673
- # Either no element node or does not have bounds
674
- continue
675
- clusters.append([node])
676
- cluster_bounds.append(
677
- (
678
- bb[0],
679
- bb[1],
680
- bb[2],
681
- bb[3],
682
- )
683
- )
684
-
685
- def detail_overlap(index1, index2):
686
- # But is there a real overlap, or just one with the union bounds?
687
- for outer_node in clusters[index1]:
688
- try:
689
- bb_outer = outer_node.paint_bounds
690
- except AttributeError:
691
- continue
692
- for inner_node in clusters[index2]:
693
- try:
694
- bb_inner = inner_node.paint_bounds
695
- except AttributeError:
696
- continue
697
- if overlapping(bb_outer, bb_inner, margin):
698
- return True
699
- # We did not find anything...
700
- return False
701
-
702
- needs_repeat = True
703
- while needs_repeat:
704
- needs_repeat = False
705
- for outer_idx in range(len(clusters) - 1, -1, -1):
706
- # Loop downwards as we are manipulating the arrays
707
- bb = cluster_bounds[outer_idx]
708
- for inner_idx in range(outer_idx - 1, -1, -1):
709
- cc = cluster_bounds[inner_idx]
710
- if not overlapping(bb, cc, margin):
711
- continue
712
- # Overlap!
713
- # print (f"Reuse cluster {inner_idx} for {outer_idx}")
714
- real_overlap = detail_overlap(outer_idx, inner_idx)
715
- if real_overlap:
716
- needs_repeat = True
717
- # We need to extend the inner cluster by the outer
718
- clusters[inner_idx].extend(clusters[outer_idx])
719
- cluster_bounds[inner_idx] = (
720
- min(bb[0], cc[0]),
721
- min(bb[1], cc[1]),
722
- max(bb[2], cc[2]),
723
- max(bb[3], cc[3]),
724
- )
725
- clusters.pop(outer_idx)
726
- cluster_bounds.pop(outer_idx)
727
- # We are done with the inner loop, as we effectively
728
- # destroyed the cluster element we compared
729
- break
730
-
731
- return clusters
732
-
733
- stime = perf_counter()
734
- scount = 0
735
- ecount = 0
736
- for idx in range(len(operation_list) - 1, -1, -1):
737
- op = operation_list[idx]
738
- if (
739
- not hasattr(op, "type")
740
- or not hasattr(op, "children")
741
- or op.type != op_type
742
- ):
743
- # That's not what we are looking for
744
- continue
745
- scount += 1
746
- clusters = generate_clusters(op)
747
- ecount += len(clusters)
748
- if len(clusters) > 0:
749
- # Create cluster copies of the raster op
750
- for entry in clusters:
751
- newop = copy(op)
752
- newop._references.clear()
753
- for node in entry:
754
- newop.add_reference(node)
755
- newop.set_dirty_bounds()
756
- operation_list.insert(idx + 1, newop)
757
-
758
- # And remove the original one...
759
- operation_list.pop(idx)
760
- etime = perf_counter()
761
- if self.channel:
762
- self.channel(
763
- f"Optimise {op_type} finished after {etime-stime:.2f} seconds, inflated {scount} operations to {ecount}"
764
- )
765
-
766
-
767
- def is_inside(inner, outer, tolerance=0):
768
- """
769
- Test that path1 is inside path2.
770
- @param inner: inner path
771
- @param outer: outer path
772
- @param tolerance: 0
773
- @return: whether path1 is wholly inside path2.
774
- """
775
- # We still consider a path to be inside another path if it is
776
- # within a certain tolerance
777
- inner_path = inner
778
- outer_path = outer
779
- if outer == inner: # This is the same object.
780
- return False
781
- if hasattr(inner, "path") and inner.path is not None:
782
- inner_path = inner.path
783
- if hasattr(outer, "path") and outer.path is not None:
784
- outer_path = outer.path
785
- if not hasattr(inner, "bounding_box"):
786
- inner.bounding_box = Group.union_bbox([inner_path])
787
- if not hasattr(outer, "bounding_box"):
788
- outer.bounding_box = Group.union_bbox([outer_path])
789
- if outer.bounding_box is None:
790
- return False
791
- if inner.bounding_box is None:
792
- return False
793
- # Raster is inner if the bboxes overlap anywhere
794
- if isinstance(inner, RasterCut):
795
- return (
796
- inner.bounding_box[0] <= outer.bounding_box[2] + tolerance
797
- and inner.bounding_box[1] <= outer.bounding_box[3] + tolerance
798
- and inner.bounding_box[2] >= outer.bounding_box[0] - tolerance
799
- and inner.bounding_box[3] >= outer.bounding_box[1] - tolerance
800
- )
801
- if outer.bounding_box[0] > inner.bounding_box[0] + tolerance:
802
- # outer minx > inner minx (is not contained)
803
- return False
804
- if outer.bounding_box[1] > inner.bounding_box[1] + tolerance:
805
- # outer miny > inner miny (is not contained)
806
- return False
807
- if outer.bounding_box[2] < inner.bounding_box[2] - tolerance:
808
- # outer maxx < inner maxx (is not contained)
809
- return False
810
- if outer.bounding_box[3] < inner.bounding_box[3] - tolerance:
811
- # outer maxy < inner maxy (is not contained)
812
- return False
813
- if outer.bounding_box == inner.bounding_box:
814
- if outer == inner: # This is the same object.
815
- return False
816
-
817
- # Inner bbox is entirely inside outer bbox,
818
- # however that does not mean that inner is actually inside outer
819
- # i.e. inner could be small and between outer and the bbox corner,
820
- # or small and contained in a concave indentation.
821
- #
822
- # VectorMontonizer can determine whether a point is inside a polygon.
823
- # The code below uses a brute force approach by considering a fixed number of points,
824
- # however we should consider a future enhancement whereby we create
825
- # a polygon more intelligently based on size and curvature
826
- # i.e. larger bboxes need more points and
827
- # tighter curves need more points (i.e. compare vector directions)
828
-
829
- def vm_code(outer, outer_path, inner, inner_path):
830
- if not hasattr(outer, "vm"):
831
- outer_path = Polygon(
832
- [outer_path.point(i / 1000.0, error=1e4) for i in range(1001)]
833
- )
834
- vm = VectorMontonizer()
835
- vm.add_polyline(outer_path)
836
- outer.vm = vm
837
- for i in range(101):
838
- p = inner_path.point(
839
- i / 100.0, error=1e4
840
- ) # Point(4633.110682926033,1788.413481872459)
841
- if not outer.vm.is_point_inside(p.x, p.y, tolerance=tolerance):
842
- return False
843
- return True
844
-
845
- def sb_code(outer, outer_path, inner, inner_path):
846
- from ..tools.geomstr import Polygon as Gpoly
847
- from ..tools.geomstr import Scanbeam
848
-
849
- if not hasattr(outer, "sb"):
850
- pg = outer_path.npoint(np.linspace(0, 1, 1001), error=1e4)
851
- pg = pg[:, 0] + pg[:, 1] * 1j
852
-
853
- outer_path = Gpoly(*pg)
854
- sb = Scanbeam(outer_path.geomstr)
855
- outer.sb = sb
856
- p = inner_path.npoint(np.linspace(0, 1, 101), error=1e4)
857
- points = p[:, 0] + p[:, 1] * 1j
858
-
859
- q = outer.sb.points_in_polygon(points)
860
- return q.all()
861
-
862
- return sb_code(outer, outer_path, inner, inner_path)
863
- # return vm_code(outer, outer_path, inner, inner_path)
864
-
865
-
866
- def reify_matrix(self):
867
- """Apply the matrix to the path and reset matrix."""
868
- self.element = abs(self.element)
869
- self.scene_bounds = None
870
-
871
-
872
- # def bounding_box(elements):
873
- # if isinstance(elements, SVGElement):
874
- # elements = [elements]
875
- # elif isinstance(elements, list):
876
- # try:
877
- # elements = [e.object for e in elements if isinstance(e.object, SVGElement)]
878
- # except AttributeError:
879
- # pass
880
- # boundary_points = []
881
- # for e in elements:
882
- # box = e.bbox(False)
883
- # if box is None:
884
- # continue
885
- # top_left = e.transform.point_in_matrix_space([box[0], box[1]])
886
- # top_right = e.transform.point_in_matrix_space([box[2], box[1]])
887
- # bottom_left = e.transform.point_in_matrix_space([box[0], box[3]])
888
- # bottom_right = e.transform.point_in_matrix_space([box[2], box[3]])
889
- # boundary_points.append(top_left)
890
- # boundary_points.append(top_right)
891
- # boundary_points.append(bottom_left)
892
- # boundary_points.append(bottom_right)
893
- # if len(boundary_points) == 0:
894
- # return None
895
- # xmin = min([e[0] for e in boundary_points])
896
- # ymin = min([e[1] for e in boundary_points])
897
- # xmax = max([e[0] for e in boundary_points])
898
- # ymax = max([e[1] for e in boundary_points])
899
- # return xmin, ymin, xmax, ymax
900
-
901
-
902
- def correct_empty(context: CutGroup):
903
- """
904
- Iterates through backwards deleting any entries that are empty.
905
- """
906
- for index in range(len(context) - 1, -1, -1):
907
- c = context[index]
908
- if not isinstance(c, CutGroup):
909
- continue
910
- correct_empty(c)
911
- if len(c) == 0:
912
- del context[index]
913
-
914
-
915
- def inner_first_ident(context: CutGroup, channel=None, tolerance=0):
916
- """
917
- Identifies closed CutGroups and then identifies any other CutGroups which
918
- are entirely inside.
919
-
920
- The CutGroup candidate generator uses this information to not offer the outer CutGroup
921
- as a candidate for a burn unless all contained CutGroups are cut.
922
-
923
- The Cutcode is resequenced in either short_travel_cutcode or inner_selection_cutcode
924
- based on this information, as used in the
925
- """
926
- if channel:
927
- start_time = time()
928
- start_times = times()
929
- channel("Executing Inner-First Identification")
930
-
931
- groups = [cut for cut in context if isinstance(cut, (CutGroup, RasterCut))]
932
- closed_groups = [g for g in groups if isinstance(g, CutGroup) and g.closed]
933
- context.contains = closed_groups
934
- if channel:
935
- channel(
936
- f"Compare {len(groups)} groups against {len(closed_groups)} closed groups"
937
- )
938
-
939
- constrained = False
940
- for outer in closed_groups:
941
- for inner in groups:
942
- if outer is inner:
943
- continue
944
- # if outer is inside inner, then inner cannot be inside outer
945
- if inner.contains and outer in inner.contains:
946
- continue
947
-
948
- if is_inside(inner, outer, tolerance):
949
- constrained = True
950
- if outer.contains is None:
951
- outer.contains = list()
952
- outer.contains.append(inner)
953
-
954
- if inner.inside is None:
955
- inner.inside = list()
956
- inner.inside.append(outer)
957
-
958
- context.constrained = constrained
959
-
960
- # for g in groups:
961
- # if g.contains is not None:
962
- # for inner in g.contains:
963
- # assert inner in groups
964
- # assert inner is not g
965
- # assert g in inner.inside
966
- # if g.inside is not None:
967
- # for outer in g.inside:
968
- # assert outer in groups
969
- # assert outer is not g
970
- # assert g in outer.contains
971
-
972
- if channel:
973
- end_times = times()
974
- channel(
975
- f"Inner paths identified in {time() - start_time:.3f} elapsed seconds: {constrained} "
976
- f"using {end_times[0] - start_times[0]:.3f} seconds CPU"
977
- )
978
- return context
979
-
980
-
981
- def short_travel_cutcode(
982
- context: CutCode,
983
- channel=None,
984
- complete_path: Optional[bool] = False,
985
- grouped_inner: Optional[bool] = False,
986
- ):
987
- """
988
- Selects cutcode from candidate cutcode (burns_done < passes in this CutCode),
989
- optimizing with greedy/brute for shortest distances optimizations.
990
-
991
- For paths starting at exactly the same point forward paths are preferred over reverse paths
992
-
993
- We start at either 0,0 or the value given in `context.start`
994
-
995
- This is time-intense hyper-optimized code, so it contains several seemingly redundant
996
- checks.
997
- """
998
- if channel:
999
- start_length = context.length_travel(True)
1000
- start_time = time()
1001
- start_times = times()
1002
- channel("Executing Greedy Short-Travel optimization")
1003
- channel(f"Length at start: {start_length:.0f} steps")
1004
-
1005
- curr = context.start
1006
- if curr is None:
1007
- curr = 0
1008
- else:
1009
- curr = complex(curr[0], curr[1])
1010
-
1011
- for c in context.flat():
1012
- c.burns_done = 0
1013
-
1014
- ordered = CutCode()
1015
- while True:
1016
- closest = None
1017
- backwards = False
1018
- distance = float("inf")
1019
-
1020
- try:
1021
- last_segment = ordered[-1]
1022
- except IndexError:
1023
- pass
1024
- else:
1025
- if last_segment.normal:
1026
- # Attempt to initialize value to next segment in subpath
1027
- cut = last_segment.next
1028
- if cut and cut.burns_done < cut.passes:
1029
- closest = cut
1030
- backwards = False
1031
- start = closest.start
1032
- distance = abs(complex(start[0], start[1]) - curr)
1033
- else:
1034
- # Attempt to initialize value to previous segment in subpath
1035
- cut = last_segment.previous
1036
- if cut and cut.burns_done < cut.passes:
1037
- closest = cut
1038
- backwards = True
1039
- end = closest.end
1040
- distance = abs(complex(end[0], end[1]) - curr)
1041
- # Gap or continuing on path not permitted, try reversing
1042
- if (
1043
- distance > 50
1044
- and last_segment.burns_done < last_segment.passes
1045
- and last_segment.reversible()
1046
- and last_segment.next is not None
1047
- ):
1048
- # last_segment is a copy, so we need to get original
1049
- closest = last_segment.next.previous
1050
- backwards = last_segment.normal
1051
- distance = 0 # By definition since we are reversing and reburning
1052
-
1053
- # Stay on path in same direction if gap <= 1/20" i.e. path not quite closed
1054
- # Travel only if path is completely burned or gap > 1/20"
1055
- if distance > 50:
1056
- for cut in context.candidate(
1057
- complete_path=complete_path, grouped_inner=grouped_inner
1058
- ):
1059
- s = cut.start
1060
- if (
1061
- abs(s[0] - curr.real) <= distance
1062
- and abs(s[1] - curr.imag) <= distance
1063
- and (not complete_path or cut.closed or cut.first)
1064
- ):
1065
- d = abs(complex(s[0], s[1]) - curr)
1066
- if d < distance:
1067
- closest = cut
1068
- backwards = False
1069
- if d <= 0.1: # Distance in px is zero, we cannot improve.
1070
- break
1071
- distance = d
1072
-
1073
- if not cut.reversible():
1074
- continue
1075
- e = cut.end
1076
- if (
1077
- abs(e[0] - curr.real) <= distance
1078
- and abs(e[1] - curr.imag) <= distance
1079
- and (not complete_path or cut.closed or cut.last)
1080
- ):
1081
- d = abs(complex(e[0], e[1]) - curr)
1082
- if d < distance:
1083
- closest = cut
1084
- backwards = True
1085
- if d <= 0.1: # Distance in px is zero, we cannot improve.
1086
- break
1087
- distance = d
1088
-
1089
- if closest is None:
1090
- break
1091
-
1092
- # Change direction if other direction is coincident and has more burns remaining
1093
- if backwards:
1094
- if (
1095
- closest.next
1096
- and closest.next.burns_done <= closest.burns_done
1097
- and closest.next.start == closest.end
1098
- ):
1099
- closest = closest.next
1100
- backwards = False
1101
- elif closest.reversible():
1102
- if (
1103
- closest.previous
1104
- and closest.previous is not closest
1105
- and closest.previous.burns_done < closest.burns_done
1106
- and closest.previous.end == closest.start
1107
- ):
1108
- closest = closest.previous
1109
- backwards = True
1110
-
1111
- closest.burns_done += 1
1112
- c = copy(closest)
1113
- if backwards:
1114
- c.reverse()
1115
- end = c.end
1116
- curr = complex(end[0], end[1])
1117
- ordered.append(c)
1118
- if context.start is not None:
1119
- ordered._start_x, ordered._start_y = context.start
1120
- else:
1121
- ordered._start_x = 0
1122
- ordered._start_y = 0
1123
- if channel:
1124
- end_times = times()
1125
- end_length = ordered.length_travel(True)
1126
- try:
1127
- delta = (end_length - start_length) / start_length
1128
- except ZeroDivisionError:
1129
- delta = 0
1130
- channel(
1131
- f"Length at end: {end_length:.0f} steps "
1132
- f"({delta:+.0%}), "
1133
- f"optimized in {time() - start_time:.3f} "
1134
- f"elapsed seconds using {end_times[0] - start_times[0]:.3f} seconds CPU"
1135
- )
1136
- return ordered
1137
-
1138
-
1139
- def short_travel_cutcode_2opt(context: CutCode, passes: int = 50, channel=None):
1140
- """
1141
- This implements 2-opt algorithm using numpy.
1142
-
1143
- Skipping of the candidate code it does not perform inner first optimizations.
1144
- Due to the numpy requirement, doesn't work without numpy.
1145
- --
1146
- Uses code I wrote for vpype:
1147
- https://github.com/abey79/vpype/commit/7b1fad6bd0fcfc267473fdb8ba2166821c80d9cd
1148
-
1149
- @param context:cutcode: cutcode to be optimized
1150
- @param passes: max passes to perform 2-opt
1151
- @param channel: Channel to send data about the optimization process.
1152
- @return:
1153
- """
1154
- try:
1155
- import numpy as np
1156
- except ImportError:
1157
- return context
1158
-
1159
- if channel:
1160
- start_length = context.length_travel(True)
1161
- start_time = time()
1162
- start_times = times()
1163
- channel("Executing 2-Opt Short-Travel optimization")
1164
- channel(f"Length at start: {start_length:.0f} steps")
1165
-
1166
- ordered = CutCode(context.flat())
1167
- length = len(ordered)
1168
- if length <= 1:
1169
- if channel:
1170
- channel("2-Opt: Not enough elements to optimize.")
1171
- return ordered
1172
-
1173
- curr = context.start
1174
- if curr is None:
1175
- curr = 0
1176
- else:
1177
- curr = complex(curr)
1178
-
1179
- current_pass = 1
1180
- min_value = -1e-10 # Do not swap on rounding error.
1181
-
1182
- endpoints = np.zeros((length, 4), dtype="complex")
1183
- # start, index, reverse-index, end
1184
- for i in range(length):
1185
- endpoints[i] = complex(ordered[i].start), i, ~i, complex(ordered[i].end)
1186
- indexes0 = np.arange(0, length - 1)
1187
- indexes1 = indexes0 + 1
1188
-
1189
- def log_progress(pos):
1190
- starts = endpoints[indexes0, -1]
1191
- ends = endpoints[indexes1, 0]
1192
- dists = np.abs(starts - ends)
1193
- dist_sum = dists.sum() + abs(curr - endpoints[0][0])
1194
- channel(
1195
- f"optimize: laser-off distance is {dist_sum}. {100 * pos / length:.02f}% done with pass {current_pass}/{passes}"
1196
- )
1197
-
1198
- improved = True
1199
- while improved:
1200
- improved = False
1201
-
1202
- first = endpoints[0][0]
1203
- cut_ends = endpoints[indexes0, -1]
1204
- cut_starts = endpoints[indexes1, 0]
1205
-
1206
- # delta = np.abs(curr - first) + np.abs(first - cut_starts) - np.abs(cut_ends - cut_starts)
1207
- delta = (
1208
- np.abs(curr - cut_ends)
1209
- + np.abs(first - cut_starts)
1210
- - np.abs(cut_ends - cut_starts)
1211
- - np.abs(curr - first)
1212
- )
1213
- index = int(np.argmin(delta))
1214
- if delta[index] < min_value:
1215
- endpoints[: index + 1] = np.flip(
1216
- endpoints[: index + 1], (0, 1)
1217
- ) # top to bottom, and right to left flips.
1218
- improved = True
1219
- if channel:
1220
- log_progress(1)
1221
- for mid in range(1, length - 1):
1222
- idxs = np.arange(mid, length - 1)
1223
-
1224
- mid_source = endpoints[mid - 1, -1]
1225
- mid_dest = endpoints[mid, 0]
1226
- cut_ends = endpoints[idxs, -1]
1227
- cut_starts = endpoints[idxs + 1, 0]
1228
- delta = (
1229
- np.abs(mid_source - cut_ends)
1230
- + np.abs(mid_dest - cut_starts)
1231
- - np.abs(cut_ends - cut_starts)
1232
- - np.abs(mid_source - mid_dest)
1233
- )
1234
- index = int(np.argmin(delta))
1235
- if delta[index] < min_value:
1236
- endpoints[mid : mid + index + 1] = np.flip(
1237
- endpoints[mid : mid + index + 1], (0, 1)
1238
- )
1239
- improved = True
1240
- if channel:
1241
- log_progress(mid)
1242
-
1243
- last = endpoints[-1, -1]
1244
- cut_ends = endpoints[indexes0, -1]
1245
- cut_starts = endpoints[indexes1, 0]
1246
-
1247
- delta = np.abs(cut_ends - last) - np.abs(cut_ends - cut_starts)
1248
- index = int(np.argmin(delta))
1249
- if delta[index] < min_value:
1250
- endpoints[index + 1 :] = np.flip(
1251
- endpoints[index + 1 :], (0, 1)
1252
- ) # top to bottom, and right to left flips.
1253
- improved = True
1254
- if channel:
1255
- log_progress(length)
1256
- if current_pass >= passes:
1257
- break
1258
- current_pass += 1
1259
-
1260
- # Two-opt complete.
1261
- order = endpoints[:, 1].real.astype(int)
1262
- ordered.reordered(order)
1263
- if channel:
1264
- end_times = times()
1265
- end_length = ordered.length_travel(True)
1266
- channel(
1267
- f"Length at end: {end_length:.0f} steps "
1268
- f"({(end_length - start_length) / start_length:+.0%}), "
1269
- f"optimized in {time() - start_time:.3f} "
1270
- f"elapsed seconds using {end_times[0] - start_times[0]:.3f} seconds CPU"
1271
- )
1272
- return ordered
1273
-
1274
-
1275
- def inner_selection_cutcode(
1276
- context: CutCode, channel=None, grouped_inner: Optional[bool] = False
1277
- ):
1278
- """
1279
- Selects cutcode from candidate cutcode permitted but does nothing to optimize beyond
1280
- finding a valid solution.
1281
-
1282
- This routine runs if opt_inner first is selected and opt_greedy is not selected.
1283
- """
1284
- if channel:
1285
- start_length = context.length_travel(True)
1286
- start_time = time()
1287
- start_times = times()
1288
- channel("Executing Inner Selection-Only optimization")
1289
- channel(f"Length at start: {start_length:.0f} steps")
1290
-
1291
- for c in context.flat():
1292
- c.burns_done = 0
1293
-
1294
- ordered = CutCode()
1295
- iterations = 0
1296
- while True:
1297
- c = list(context.candidate(grouped_inner=grouped_inner))
1298
- if len(c) == 0:
1299
- break
1300
- for o in c:
1301
- o.burns_done += 1
1302
- ordered.extend(copy(c))
1303
- iterations += 1
1304
-
1305
- if channel:
1306
- end_times = times()
1307
- end_length = ordered.length_travel(True)
1308
- msg = f"Length at end: {end_length:.0f} steps "
1309
- if start_length != 0:
1310
- msg += f"({(end_length - start_length) / start_length:+.0%}), "
1311
- msg += f"optimized in {time() - start_time:.3f} "
1312
- msg += f"elapsed seconds using {end_times[0] - start_times[0]:.3f} "
1313
- msg += f"seconds CPU in {iterations} iterations"
1314
-
1315
- channel(msg)
1316
- return ordered
1
+ """
2
+ CutPlan contains code to process LaserOperations into CutCode objects which are spooled.
3
+
4
+ CutPlan handles the various complicated algorithms to optimising the sequence of CutObjects to:
5
+ * Sort burns so that travel time is minimised
6
+ * Do burns with multiple passes all at the same time (Merge Passes)
7
+ * Sort burns for all operations at the same time rather than operation by operation
8
+ * Ensure that elements inside closed cut paths are burned before the outside path
9
+ * Group these inner burns so that one component on a sheet is completed before the next one is started
10
+ * Ensure that non-closed paths start from one of the ends and burned in one continuous burn
11
+ rather than being burned in 2 or more separate parts
12
+ * Split raster images in to self-contained areas to avoid sweeping over large empty areas
13
+ including splitting into individual small areas if burn inner first is set and then recombining
14
+ those inside the same curves so that raster burns are fully optimised.
15
+ """
16
+
17
+ from copy import copy
18
+ from math import isinf
19
+ from os import times
20
+ from time import perf_counter, time
21
+ from typing import Optional
22
+ from functools import lru_cache
23
+ import numpy as np
24
+
25
+ from ..svgelements import Group, Matrix, Path, Polygon
26
+ from ..tools.geomstr import Geomstr
27
+ from ..tools.pathtools import VectorMontonizer
28
+ from .cutcode.cutcode import CutCode
29
+ from .cutcode.cutgroup import CutGroup
30
+ from .cutcode.cutobject import CutObject
31
+ from .cutcode.rastercut import RasterCut
32
+ from .node.node import Node
33
+ from .node.util_console import ConsoleOperation
34
+ from .units import Length, UNITS_PER_MM
35
+
36
+ """
37
+ The time to compile does outweigh the benefit...
38
+ try:
39
+ from numba import jit
40
+ except Exception as e:
41
+ # Jit does not exist, add a dummy decorator and continue.
42
+ # print (f"Encountered error: {e}")
43
+ def jit(*args, **kwargs):
44
+ def inner(func):
45
+ return func
46
+
47
+ return inner
48
+ """
49
+
50
+ class CutPlanningFailedError(Exception):
51
+ pass
52
+
53
+
54
+ class CutPlan:
55
+ """
56
+ CutPlan is a centralized class to modify plans during cutplanning. It is typically is used to progress from
57
+ copied operations through the stages to being properly optimized cutcode.
58
+
59
+ The stages are:
60
+ 1. Copy: This can be `copy-selected` or `copy` to decide which operations are moved initially into the plan.
61
+ a. Copied operations are copied to real. All the reference nodes are replaced with copies of the actual elements
62
+ 2. Preprocess: Convert from scene space to device space and add validation operations.
63
+ 3. Validate: Run all the validation operations, this could be anything the nodes added during preprocess.
64
+ a. Calls `execute` operation.
65
+ 4. Blob: We convert all the operations/elements into proper cutcode. Some operations do not necessarily need to
66
+ convert to cutcode. They merely need to convert to some type of spoolable operation.
67
+ 5. Preopt: Preoptimize adds in the relevant optimization operations into the cutcode.
68
+ 6. Optimize: This calls the added functions set during the preopt process.
69
+ a. Calls `execute` operation.
70
+ """
71
+
72
+ def __init__(self, name, planner):
73
+ self.name = name
74
+ self.context = planner
75
+ self.plan = list()
76
+ self.spool_commands = list()
77
+ self.commands = list()
78
+ self.channel = self.context.channel("optimize", timestamp=True)
79
+ self.outline = None
80
+ self._previous_bounds = None
81
+
82
+ def __str__(self):
83
+ parts = list()
84
+ parts.append(self.name)
85
+ if len(self.plan):
86
+ parts.append(f"#{len(self.plan)}")
87
+ for p in self.plan:
88
+ try:
89
+ parts.append(p.__name__)
90
+ except AttributeError:
91
+ parts.append(p.__class__.__name__)
92
+ else:
93
+ parts.append("-- Empty --")
94
+ return " ".join(parts)
95
+
96
+ def execute(self):
97
+ """
98
+ Execute runs all the commands built during `preprocess` and `preopt` (preoptimize) stages.
99
+
100
+ If a command's execution adds a command to commands, this command is also executed.
101
+ @return:
102
+ """
103
+ # Using copy of commands, so commands can add ops.
104
+ self._debug_me("At start of execute")
105
+
106
+ while self.commands:
107
+ # Executing command can add a command, complete them all.
108
+ commands = self.commands[:]
109
+ self.commands.clear()
110
+ for command in commands:
111
+ command()
112
+ self._debug_me(f"At end of {command.__name__}")
113
+
114
+ def final(self):
115
+ """
116
+ Executes all the spool_commands built during the other stages.
117
+
118
+ If a command's execution added a spool_command we run it during final.
119
+
120
+ Final is called during at the time of spool. Just before the laserjob is created.
121
+ @return:
122
+ """
123
+ # Using copy of commands, so commands can add ops.
124
+ while self.spool_commands:
125
+ # Executing command can add a command, complete them all.
126
+ commands = self.spool_commands[:]
127
+ self.spool_commands.clear()
128
+ for command in commands:
129
+ command()
130
+
131
+ def preprocess(self):
132
+ """
133
+ Preprocess stage.
134
+
135
+ All operation nodes are called with the current context, the matrix converting from scene to device, and
136
+ commands.
137
+
138
+ Nodes are expected to convert relevant properties and shapes from scene coordinates to device coordinate systems
139
+ if they need operations. They are also expected to add any relevant commands to the commands list. The commands
140
+ list sequentially in the next stage.
141
+ """
142
+ device = self.context.device
143
+
144
+ scene_to_device_matrix = device.view.matrix
145
+
146
+ # ==========
147
+ # Determine the jobs bounds.
148
+ # ==========
149
+ bounds = Node.union_bounds(self.plan, bounds=self._previous_bounds)
150
+ self._previous_bounds = bounds
151
+ if bounds is not None:
152
+ left, top, right, bottom = bounds
153
+ min_x = min(right, left)
154
+ min_y = min(top, bottom)
155
+ max_x = max(right, left)
156
+ max_y = max(top, bottom)
157
+ if isinf(min_x) or isinf(min_y) or isinf(max_x) or isinf(max_y):
158
+ # Infinite bounds are invalid.
159
+ self.outline = None
160
+ else:
161
+ self.outline = (
162
+ device.view.position(min_x, min_y, margins=False),
163
+ device.view.position(max_x, min_y, margins=False),
164
+ device.view.position(max_x, max_y, margins=False),
165
+ device.view.position(min_x, max_y, margins=False),
166
+ )
167
+
168
+ # ==========
169
+ # Query Placements
170
+ # ==========
171
+ placements = []
172
+ for place in self.plan:
173
+ if not hasattr(place, "type"):
174
+ continue
175
+ if place.type.startswith("place ") and (hasattr(place, "output") and place.output):
176
+ loops = 1
177
+ if hasattr(place, "loops") and place.loops > 1:
178
+ loops = place.loops
179
+ for idx in range(loops):
180
+ placements.extend(
181
+ place.placements(
182
+ self.context, self.outline, scene_to_device_matrix, self
183
+ )
184
+ )
185
+ if not placements:
186
+ # Absolute coordinates.
187
+ placements.append(scene_to_device_matrix)
188
+
189
+ original_ops = copy(self.plan)
190
+ if self.context.opt_raster_optimisation and self.context.do_optimization:
191
+ try:
192
+ margin = float(Length(self.context.opt_raster_opt_margin, "0"))
193
+ except (AttributeError, ValueError):
194
+ margin = 0
195
+ self.optimize_rasters(original_ops, "op raster", margin)
196
+ # We could do this as well, but images are burnt separately anyway...
197
+ # self.optimize_rasters(original_ops, "op image", margin)
198
+ self.plan.clear()
199
+
200
+ idx = 0
201
+ self.context.elements.mywordlist.push()
202
+
203
+ perform_simplify = (
204
+ self.context.opt_reduce_details and self.context.do_optimization
205
+ )
206
+ tolerance = self.context.opt_reduce_tolerance
207
+ for placement in placements:
208
+ # Adjust wordlist
209
+ if idx > 0:
210
+ self.context.elements.mywordlist.move_all_indices(1)
211
+
212
+ current_cool = 0
213
+ for original_op in original_ops:
214
+ # First, do we have a valid coolant aka airassist command?
215
+ # And is this relevant, as in does the device support it?
216
+ coolid = getattr(self.context.device, "device_coolant", "")
217
+ if hasattr(original_op, "coolant"):
218
+ cool = original_op.coolant
219
+ if cool is None:
220
+ cool = 0
221
+ if cool in (1, 2): # Explicit on / off
222
+ if cool != current_cool:
223
+ cmd = "coolant_on" if cool == 1 else "coolant_off"
224
+ if coolid:
225
+ coolop = ConsoleOperation(command=cmd)
226
+ self.plan.append(coolop)
227
+ else:
228
+ self.channel("The current device does not support a coolant method")
229
+ current_cool = cool
230
+ # Is there already a coolant operation?
231
+ if getattr(original_op, "type", "") == "util console":
232
+ if original_op.command == "coolant_on":
233
+ current_cool = 1
234
+ elif original_op.command == "coolant_off":
235
+ current_cool = 2
236
+
237
+ try:
238
+ op = original_op.copy_with_reified_tree()
239
+ except AttributeError:
240
+ op = original_op
241
+ if not hasattr(op, "type") or op.type is None:
242
+ self.plan.append(op)
243
+ continue
244
+ op_type = getattr(op, "type", "")
245
+ if op_type.startswith("place "):
246
+ continue
247
+ self.plan.append(op)
248
+ if (op_type.startswith("op") or op_type.startswith("util")) and hasattr(op, "preprocess"):
249
+ op.preprocess(self.context, placement, self)
250
+ if op_type.startswith("op"):
251
+ for node in op.flat():
252
+ if node is op:
253
+ continue
254
+ if hasattr(node, "geometry") and perform_simplify:
255
+ # We are still in scene reolution and not yet at device level
256
+ node.geometry = node.geometry.simplify(tolerance=tolerance)
257
+ if hasattr(node, "mktext") and hasattr(node, "_cache"):
258
+ newtext = self.context.elements.wordlist_translate(
259
+ node.mktext, elemnode=node, increment=False
260
+ )
261
+ oldtext = getattr(node, "_translated_text", "")
262
+ # print (f"Was called inside preprocess for {node.type} with {node.mktext}, old: {oldtext}, new:{newtext}")
263
+ if newtext != oldtext:
264
+ node._translated_text = newtext
265
+ kernel = self.context.elements.kernel
266
+ for property_op in kernel.lookup_all("path_updater/.*"):
267
+ property_op(kernel.root, node)
268
+ if hasattr(node, "_cache"):
269
+ node._cache = None
270
+ if hasattr(node, "preprocess"):
271
+ node.preprocess(self.context, placement, self)
272
+ idx += 1
273
+ self.context.elements.mywordlist.pop()
274
+
275
+ def _to_grouped_plan(self, plan):
276
+ """
277
+ Break operations into grouped sequences of Operations and utility operations.
278
+
279
+ We can only merge between contiguous groups of operations. We cannot merge util node types with op node types.
280
+
281
+ Anything that does not have a type is likely able to spool, but cannot merge and are not grouped. Only grouped
282
+ operations are candidates for cutcode merging.
283
+ @return:
284
+ """
285
+ last_type = None
286
+ group = list()
287
+ for c in plan:
288
+ c_type = (
289
+ c.type
290
+ if hasattr(c, "type") and c.type is not None
291
+ else type(c).__name__
292
+ )
293
+ if c_type.startswith("effect"):
294
+ # Effects should not be used here.
295
+ continue
296
+ if last_type is not None:
297
+ if c_type.startswith("op") != last_type.startswith("op"):
298
+ # This cannot merge
299
+ yield group
300
+ group = list()
301
+ group.append(c)
302
+ last_type = c_type
303
+ if group:
304
+ yield group
305
+
306
+ def _to_blob_plan_passes_first(self, grouped_plan):
307
+ """
308
+ If Merge operations and not merge passes we need to iterate passes first and operations second.
309
+
310
+ This function is specific to that case, when passes first operations second.
311
+
312
+ Converts the operations to cutcode.
313
+ @param grouped_plan:
314
+ @return:
315
+ """
316
+ for plan in grouped_plan:
317
+ pass_idx = 0
318
+ while True:
319
+ more_passes_possible = False
320
+ for op in plan:
321
+ if (
322
+ not hasattr(op, "type")
323
+ or op.type == "util console"
324
+ or (
325
+ not op.type.startswith("op")
326
+ and not op.type.startswith("util")
327
+ )
328
+ ):
329
+ # This is an irregular object and can't become cutcode.
330
+ if pass_idx == 0:
331
+ # irregular objects have an implicit single pass.
332
+ yield op
333
+ continue
334
+ if pass_idx > op.implicit_passes - 1:
335
+ continue
336
+ more_passes_possible = True
337
+ yield from self._blob_convert(op, 1, 1, force_idx=pass_idx)
338
+ if not more_passes_possible:
339
+ # No operation needs additional passes.
340
+ break
341
+ pass_idx += 1
342
+
343
+ def _to_blob_plan(self, grouped_plan):
344
+ """
345
+ Iterate operations first and passes second. Operation first mode. Passes are done within cutcode pass value.
346
+
347
+ Converts the operations to cutcode.
348
+
349
+ @param grouped_plan:
350
+ @return:
351
+ """
352
+ context = self.context
353
+ for plan in grouped_plan:
354
+ for op in plan:
355
+ if not hasattr(op, "type") or op.type is None:
356
+ yield op
357
+ continue
358
+ if (
359
+ not op.type.startswith("op")
360
+ and not op.type.startswith("util")
361
+ or op.type == "util console"
362
+ ):
363
+ yield op
364
+ continue
365
+ passes = op.implicit_passes
366
+ if context.opt_merge_passes and (
367
+ context.opt_nearest_neighbor or context.opt_inner_first
368
+ ):
369
+ # Providing we do some sort of post-processing of blobs,
370
+ # then merge passes is handled by the greedy or inner_first algorithms
371
+
372
+ # So, we only need 1 copy and to set the passes.
373
+ yield from self._blob_convert(op, copies=1, passes=passes)
374
+ else:
375
+ # We do passes by making copies of the cutcode.
376
+ yield from self._blob_convert(op, copies=passes, passes=1)
377
+
378
+ def _blob_convert(self, op, copies, passes, force_idx=None):
379
+ """
380
+ Converts the given op into cutcode. Provides `copies` copies of that cutcode, sets
381
+ the passes to passes for each cutcode object.
382
+
383
+ @param op:
384
+ @param copies:
385
+ @param passes:
386
+ @param force_idx:
387
+ @return:
388
+ """
389
+ context = self.context
390
+ for pass_idx in range(copies):
391
+ # if the settings dictionary doesn't exist we use the defined instance dictionary
392
+ try:
393
+ settings_dict = op.settings
394
+ except AttributeError:
395
+ settings_dict = op.__dict__
396
+ # If passes isn't equal to implicit passes then we need a different settings to permit change
397
+ settings = (
398
+ settings_dict if op.implicit_passes == passes else dict(settings_dict)
399
+ )
400
+ cutcode = CutCode(
401
+ op.as_cutobjects(
402
+ closed_distance=context.opt_closed_distance,
403
+ passes=passes,
404
+ ),
405
+ settings=settings,
406
+ )
407
+ if len(cutcode) == 0:
408
+ break
409
+ op_type = getattr(op, "type", "")
410
+ cutcode.constrained = op_type == "op cut" and context.opt_inner_first
411
+ cutcode.pass_index = pass_idx if force_idx is None else force_idx
412
+ cutcode.original_op = op_type
413
+ yield cutcode
414
+
415
+ def _to_merged_plan(self, blob_plan):
416
+ """
417
+ Convert the blobbed plan of cutcode (rather than operations) into a merged plan for those cutcode operations
418
+ which are permitted to merge into the same cutcode object. All items within the same cutcode object are
419
+ candidates for optimizations. For example, if a HomeCut was merged LineCut in the cutcode, that entire group
420
+ would merge together, finding the most optimized time to home the machine (if optimization was enabled).
421
+
422
+ @param blob_plan:
423
+ @return:
424
+ """
425
+ last_item = None
426
+ context = self.context
427
+ for blob in blob_plan:
428
+ try:
429
+ blob.jog_distance = context.opt_jog_minimum
430
+ blob.jog_enable = context.opt_rapid_between
431
+ except AttributeError:
432
+ pass
433
+ if last_item and self._should_merge(context, last_item, blob):
434
+ # Do not check empty plan.
435
+ if blob.constrained:
436
+ # if any merged object is constrained, then combined blob is also constrained.
437
+ last_item.constrained = True
438
+ last_item.extend(blob)
439
+
440
+ else:
441
+ if isinstance(blob, CutObject) and not isinstance(blob, CutCode):
442
+ cc = CutCode([blob])
443
+ cc.original_op = blob.original_op
444
+ cc.pass_index = blob.pass_index
445
+ last_item = cc
446
+ else:
447
+ last_item = blob
448
+ yield last_item
449
+
450
+ def _should_merge(self, context, last_item, current_item):
451
+ """
452
+ Checks whether we should merge the blob with the current plan.
453
+
454
+ We can only merge things if we have the right objects and settings.
455
+ """
456
+ if not isinstance(last_item, CutCode):
457
+ # The last plan item is not cutcode, merge is only between cutobjects adding to cutcode.
458
+ self.channel (f"last_item is no cutcode ({type(last_item).__name__}), can't merge")
459
+ return False
460
+ if not isinstance(current_item, CutObject):
461
+ # The object to be merged is not a cutObject and cannot be added to Cutcode.
462
+ self.channel (f"current_item is no cutcode ({type(current_item).__name__}), can't merge")
463
+ return False
464
+ last_op = last_item.original_op
465
+ if last_op is None:
466
+ last_op = ""
467
+ current_op = current_item.original_op
468
+ if current_op is None:
469
+ current_op = ""
470
+ if last_op.startswith("util") or current_op.startswith("util"):
471
+ self.channel (f"{last_op} / {current_op} - at least one is a util operation, can't merge")
472
+ return False
473
+
474
+ if (
475
+ not context.opt_merge_passes
476
+ and last_item.pass_index != current_item.pass_index
477
+ ):
478
+ # Do not merge if opt_merge_passes is off, and pass_index do not match
479
+ self.channel (f"{last_item.pass_index} / {current_item.pass_index} - pass indices are different, can't merge")
480
+ return False
481
+
482
+ if (
483
+ not context.opt_merge_ops
484
+ and last_item.settings is not current_item.settings
485
+ ):
486
+ # Do not merge if opt_merge_ops is off, and the original ops do not match
487
+ # Same settings object implies same original operation
488
+ self.channel (f"Settings do differ from {last_op} to {current_op} and merge ops= {context.opt_merge_ops}")
489
+ return False
490
+ if not context.opt_inner_first and last_item.original_op == "op cut":
491
+ # Do not merge if opt_inner_first is off, and operation was originally a cut.
492
+ self.channel (f"Inner first {context.opt_inner_first}, last op= {last_item.original_op} - Last op was a cut, can't merge")
493
+ return False
494
+ return True # No reason these should not be merged.
495
+
496
+ def _debug_me(self, message):
497
+ debug_level = 0
498
+ if not self.channel:
499
+ return
500
+ self.channel(f"Plan at {message}")
501
+ for pitem in self.plan:
502
+ if isinstance(pitem, (tuple, list)):
503
+ self.channel(f"-{type(pitem).__name__}: {len(pitem)} items")
504
+ if debug_level > 0:
505
+ for cut in pitem:
506
+ if isinstance(cut, (tuple, list)):
507
+ self.channel(
508
+ f"--{type(pitem).__name__}: {type(cut).__name__}: {len(cut)} items"
509
+ )
510
+ else:
511
+ self.channel(
512
+ f"--{type(pitem).__name__}: {type(cut).__name__}: --childless--"
513
+ )
514
+
515
+ elif hasattr(pitem, "children"):
516
+ self.channel(
517
+ f" {type(pitem).__name__}: {len(pitem.children)} children"
518
+ )
519
+ else:
520
+ self.channel(f" {type(pitem).__name__}: --childless--")
521
+
522
+ self.channel("------------")
523
+
524
+ def geometry(self):
525
+ """
526
+ Geometry converts User operations to naked geomstr objects.
527
+ """
528
+
529
+ if not self.plan:
530
+ return
531
+
532
+ plan = list(self.plan)
533
+ self.plan.clear()
534
+ g = Geomstr()
535
+ settings_index = 0
536
+ for c in plan:
537
+ c_type = (
538
+ c.type
539
+ if hasattr(c, "type") and c.type is not None
540
+ else type(c).__name__
541
+ )
542
+ settings_index += 1
543
+ if hasattr(c, "settings"):
544
+ settings = dict(c.settings)
545
+ else:
546
+ settings = dict(c.__dict__)
547
+ g.settings(settings_index, settings)
548
+
549
+ if c_type in ("op cut", "op engrave"):
550
+ for elem in c.children:
551
+ if hasattr(elem, "final_geometry"):
552
+ start_index = g.index
553
+ g.append(elem.final_geometry())
554
+ end_index = g.index
555
+ g.flag_settings(settings_index, start_index, end_index)
556
+ elif hasattr(elem, "as_geometry"):
557
+ start_index = g.index
558
+ g.append(elem.as_geometry())
559
+ end_index = g.index
560
+ g.flag_settings(settings_index, start_index, end_index)
561
+ elif c_type in ("op raster", "op image"):
562
+ for elem in c.children:
563
+ if hasattr(elem, "as_image"):
564
+ settings["raster"] = True
565
+ image, box = elem.as_image()
566
+ m = elem.matrix
567
+ start_index = g.index
568
+ image_geom = Geomstr.image(image)
569
+ image_geom.transform(m)
570
+ g.append(image_geom)
571
+ end_index = g.index
572
+ g.flag_settings(settings_index, start_index, end_index)
573
+ else:
574
+ if g:
575
+ self.plan.append(g)
576
+ g = Geomstr()
577
+ self.plan.append(c)
578
+ if g:
579
+ self.plan.append(g)
580
+
581
+ def blob(self):
582
+ """
583
+ Blob converts User operations to CutCode objects.
584
+
585
+ In order to have CutCode objects in the correct sequence for merging we need to:
586
+ 1. Break operations into grouped sequences of Operations and utility operations.
587
+ We can only merge between contiguous groups of operations (with option set)
588
+ 2. The sequence of CutObjects needs to reflect merge settings
589
+ Normal sequence is to iterate operations and then passes for each operation.
590
+ With Merge ops and not Merge passes, we need to iterate on passes first and then ops within.
591
+ """
592
+
593
+ if not self.plan:
594
+ return
595
+ context = self.context
596
+ grouped_plan = list(self._to_grouped_plan(self.plan))
597
+ if context.opt_merge_ops and not context.opt_merge_passes:
598
+ blob_plan = list(self._to_blob_plan_passes_first(grouped_plan))
599
+ else:
600
+ blob_plan = list(self._to_blob_plan(grouped_plan))
601
+ self.plan.clear()
602
+ self.plan.extend(self._to_merged_plan(blob_plan))
603
+
604
+ def preopt(self):
605
+ """
606
+ Add commands for optimize stage. This stage tends to do very little but checks the settings and adds the
607
+ relevant operations.
608
+
609
+ @return:
610
+ """
611
+ context = self.context
612
+ has_cutcode = False
613
+ for op in self.plan:
614
+ try:
615
+ if isinstance(op, CutCode):
616
+ has_cutcode = True
617
+ break
618
+ except AttributeError:
619
+ pass
620
+ if not has_cutcode:
621
+ return
622
+
623
+ if context.opt_effect_combine:
624
+ self.commands.append(self.combine_effects)
625
+
626
+ if context.opt_reduce_travel and (
627
+ context.opt_nearest_neighbor or context.opt_2opt
628
+ ):
629
+ if context.opt_nearest_neighbor:
630
+ self.commands.append(self.optimize_travel)
631
+ if context.opt_2opt and not context.opt_inner_first:
632
+ self.commands.append(self.optimize_travel_2opt)
633
+
634
+ elif context.opt_inner_first:
635
+ self.commands.append(self.optimize_cuts)
636
+ self.commands.append(self.merge_cutcode)
637
+ if context.opt_reduce_directions:
638
+ pass
639
+ if context.opt_remove_overlap:
640
+ pass
641
+
642
+ def combine_effects(self):
643
+ """
644
+ Will browse through the cutcode entries grouping everything together
645
+ that as a common 'source' attribute
646
+ """
647
+
648
+ def update_group_sequence(group):
649
+ if len(group) == 0:
650
+ return
651
+ glen = len(group)
652
+ for i, cut_obj in enumerate(group):
653
+ cut_obj.first = i == 0
654
+ cut_obj.last = i == glen - 1
655
+ next_idx = i + 1 if i < glen - 1 else 0
656
+ cut_obj.next = group[next_idx]
657
+ cut_obj.previous = group[i - 1]
658
+ group.path = group._geometry.as_path()
659
+
660
+ def update_busy_info(busy, idx, l_pitem, plan_idx, l_plan):
661
+ busy.change(
662
+ msg=_("Combine effect primitives")
663
+ + f" {idx + 1}/{l_pitem} ({plan_idx + 1}/{l_plan})",
664
+ keep=1,
665
+ )
666
+ busy.show()
667
+
668
+ def process_plan_item(pitem, busy, total, plan_idx, l_plan):
669
+ grouping = {}
670
+ l_pitem = len(pitem)
671
+ to_be_deleted = []
672
+ combined = 0
673
+ for idx, cut in enumerate(pitem):
674
+ total += 1
675
+ if busy.shown and total % 100 == 0:
676
+ update_busy_info(busy, idx, l_pitem, plan_idx, l_plan)
677
+ if not isinstance(cut, CutGroup) or cut.origin is None:
678
+ continue
679
+ combined += process_cut(cut, grouping, pitem, idx, to_be_deleted)
680
+ return grouping, to_be_deleted, combined, total
681
+
682
+ def process_cut(cut, grouping, pitem, idx, to_be_deleted):
683
+ if cut.origin not in grouping:
684
+ grouping[cut.origin] = idx
685
+ return 0
686
+ mastercut = grouping[cut.origin]
687
+ geom = cut._geometry
688
+ pitem[mastercut].skip = True
689
+ pitem[mastercut].extend(cut)
690
+ pitem[mastercut]._geometry.append(geom)
691
+ cut.clear()
692
+ to_be_deleted.append(idx)
693
+ return 1
694
+
695
+ busy = self.context.kernel.busyinfo
696
+ _ = self.context.kernel.translation
697
+ if busy.shown:
698
+ busy.change(msg=_("Combine effect primitives"), keep=1)
699
+ busy.show()
700
+ combined = 0
701
+ l_plan = len(self.plan)
702
+ total = -1
703
+ group_count = 0
704
+ for plan_idx, pitem in enumerate(self.plan):
705
+ # We don't combine across plan boundaries
706
+ if not isinstance(pitem, CutGroup):
707
+ continue
708
+ grouping, to_be_deleted, item_combined, total = process_plan_item(pitem, busy, total, plan_idx, l_plan)
709
+ combined += item_combined
710
+ group_count += len(grouping)
711
+
712
+ for key, item in grouping.items():
713
+ update_group_sequence(pitem[item])
714
+
715
+ for p in reversed(to_be_deleted):
716
+ pitem.pop(p)
717
+
718
+ if self.channel:
719
+ self.channel(f"Combined: {combined}, groups: {group_count}")
720
+
721
+ def optimize_travel_2opt(self):
722
+ """
723
+ Optimize travel 2opt at optimize stage on cutcode
724
+ @return:
725
+ """
726
+ busy = self.context.kernel.busyinfo
727
+ _ = self.context.kernel.translation
728
+ if busy.shown:
729
+ busy.change(msg=_("Optimize inner travel"), keep=1)
730
+ busy.show()
731
+ channel = self.context.channel("optimize", timestamp=True)
732
+ for i, c in enumerate(self.plan):
733
+ if isinstance(c, CutCode):
734
+ self.plan[i] = short_travel_cutcode_2opt(
735
+ self.plan[i], kernel=self.context.kernel, channel=channel
736
+ )
737
+
738
+ def optimize_cuts(self):
739
+ """
740
+ Optimize cuts at optimize stage on cutcode
741
+ @return:
742
+ """
743
+ # Update Info-panel if displayed
744
+ busy = self.context.kernel.busyinfo
745
+ _ = self.context.kernel.translation
746
+ if busy.shown:
747
+ busy.change(msg=_("Optimize cuts"), keep=1)
748
+ busy.show()
749
+ tolerance = 0
750
+ if self.context.opt_inner_first:
751
+ stol = self.context.opt_inner_tolerance
752
+ try:
753
+ tolerance = (
754
+ float(Length(stol))
755
+ * 2
756
+ / (
757
+ self.context.device.view.native_scale_x
758
+ + self.context.device.view.native_scale_y
759
+ )
760
+ )
761
+ except ValueError:
762
+ pass
763
+ # print(f"Tolerance: {tolerance}")
764
+
765
+ channel = self.context.channel("optimize", timestamp=True)
766
+ grouped_inner = self.context.opt_inner_first and self.context.opt_inners_grouped
767
+ for i, c in enumerate(self.plan):
768
+ if busy.shown:
769
+ busy.change(
770
+ msg=_("Optimize cuts") + f" {i + 1}/{len(self.plan)}", keep=1
771
+ )
772
+ busy.show()
773
+ if isinstance(c, CutCode):
774
+ if c.constrained:
775
+ self.plan[i] = inner_first_ident(
776
+ c,
777
+ kernel=self.context.kernel,
778
+ channel=channel,
779
+ tolerance=tolerance,
780
+ )
781
+ c = self.plan[i]
782
+ self.plan[i] = inner_selection_cutcode(
783
+ c,
784
+ channel=channel,
785
+ grouped_inner=grouped_inner,
786
+ )
787
+
788
+ def optimize_travel(self):
789
+ """
790
+ Optimize travel at optimize stage on cutcode.
791
+ @return:
792
+ """
793
+ # Update Info-panel if displayed
794
+ busy = self.context.kernel.busyinfo
795
+ _ = self.context.kernel.translation
796
+ if busy.shown:
797
+ busy.change(msg=_("Optimize travel"), keep=1)
798
+ busy.show()
799
+ try:
800
+ last = self.context.device.native
801
+ except AttributeError:
802
+ last = None
803
+ tolerance = 0
804
+ if self.context.opt_inner_first:
805
+ stol = self.context.opt_inner_tolerance
806
+ try:
807
+ tolerance = (
808
+ float(Length(stol))
809
+ * 2
810
+ / (
811
+ self.context.device.view.native_scale_x
812
+ + self.context.device.view.native_scale_y
813
+ )
814
+ )
815
+ except ValueError:
816
+ pass
817
+ # print(f"Tolerance: {tolerance}")
818
+
819
+ channel = self.context.channel("optimize", timestamp=True)
820
+ grouped_inner = self.context.opt_inner_first and self.context.opt_inners_grouped
821
+ for i, c in enumerate(self.plan):
822
+ if busy.shown:
823
+ busy.change(
824
+ msg=_("Optimize travel") + f" {i + 1}/{len(self.plan)}", keep=1
825
+ )
826
+ busy.show()
827
+
828
+ if isinstance(c, CutCode):
829
+ if c.constrained:
830
+ self.plan[i] = inner_first_ident(
831
+ c,
832
+ kernel=self.context.kernel,
833
+ channel=channel,
834
+ tolerance=tolerance,
835
+ )
836
+ c = self.plan[i]
837
+ if last is not None:
838
+ c._start_x, c._start_y = last
839
+ self.plan[i] = short_travel_cutcode(
840
+ c,
841
+ kernel=self.context.kernel,
842
+ channel=channel,
843
+ complete_path=self.context.opt_complete_subpaths,
844
+ grouped_inner=grouped_inner,
845
+ hatch_optimize=self.context.opt_effect_optimize,
846
+ )
847
+ last = self.plan[i].end
848
+
849
+ def merge_cutcode(self):
850
+ """
851
+ Merge all adjacent optimized cutcode into single cutcode objects.
852
+ @return:
853
+ """
854
+ busy = self.context.kernel.busyinfo
855
+ _ = self.context.kernel.translation
856
+ if busy.shown:
857
+ busy.change(msg=_("Merging cutcode"), keep=1)
858
+ busy.show()
859
+ for i in range(len(self.plan) - 1, 0, -1):
860
+ cur = self.plan[i]
861
+ prev = self.plan[i - 1]
862
+ if isinstance(cur, CutCode) and isinstance(prev, CutCode):
863
+ prev.extend(cur)
864
+ del self.plan[i]
865
+
866
+ def clear(self):
867
+ self._previous_bounds = None
868
+ self.plan.clear()
869
+ self.commands.clear()
870
+
871
+ def optimize_rasters(self, operation_list, op_type, margin):
872
+ def generate_clusters(operation):
873
+ def overlapping(bounds1, bounds2, margin):
874
+ # The rectangles don't overlap if
875
+ # one rectangle's minimum in some dimension
876
+ # is greater than the other's maximum in
877
+ # that dimension.
878
+ flagx = (bounds1[0] > bounds2[2] + margin) or (
879
+ bounds2[0] > bounds1[2] + margin
880
+ )
881
+ flagy = (bounds1[1] > bounds2[3] + margin) or (
882
+ bounds2[1] > bounds1[3] + margin
883
+ )
884
+ return bool(not (flagx or flagy))
885
+
886
+ clusters = list()
887
+ cluster_bounds = list()
888
+ for child in operation.children:
889
+ try:
890
+ if child.type == "reference":
891
+ child = child.node
892
+ bb = child.paint_bounds
893
+ except AttributeError:
894
+ # Either no element node or does not have bounds
895
+ continue
896
+ clusters.append([child])
897
+ cluster_bounds.append(
898
+ (
899
+ bb[0],
900
+ bb[1],
901
+ bb[2],
902
+ bb[3],
903
+ )
904
+ )
905
+
906
+ def detail_overlap(index1, index2):
907
+ # But is there a real overlap, or just one with the union bounds?
908
+ for outer_node in clusters[index1]:
909
+ try:
910
+ bb_outer = outer_node.paint_bounds
911
+ except AttributeError:
912
+ continue
913
+ for inner_node in clusters[index2]:
914
+ try:
915
+ bb_inner = inner_node.paint_bounds
916
+ except AttributeError:
917
+ continue
918
+ if overlapping(bb_outer, bb_inner, margin):
919
+ return True
920
+ # We did not find anything...
921
+ return False
922
+
923
+ needs_repeat = True
924
+ while needs_repeat:
925
+ needs_repeat = False
926
+ for outer_idx in range(len(clusters) - 1, -1, -1):
927
+ # Loop downwards as we are manipulating the arrays
928
+ bb = cluster_bounds[outer_idx]
929
+ for inner_idx in range(outer_idx - 1, -1, -1):
930
+ cc = cluster_bounds[inner_idx]
931
+ if not overlapping(bb, cc, margin):
932
+ continue
933
+ # Overlap!
934
+ # print (f"Reuse cluster {inner_idx} for {outer_idx}")
935
+ real_overlap = detail_overlap(outer_idx, inner_idx)
936
+ if real_overlap:
937
+ needs_repeat = True
938
+ # We need to extend the inner cluster by the outer
939
+ clusters[inner_idx].extend(clusters[outer_idx])
940
+ cluster_bounds[inner_idx] = (
941
+ min(bb[0], cc[0]),
942
+ min(bb[1], cc[1]),
943
+ max(bb[2], cc[2]),
944
+ max(bb[3], cc[3]),
945
+ )
946
+ clusters.pop(outer_idx)
947
+ cluster_bounds.pop(outer_idx)
948
+ # We are done with the inner loop, as we effectively
949
+ # destroyed the cluster element we compared
950
+ break
951
+
952
+ return clusters
953
+
954
+ stime = perf_counter()
955
+ scount = 0
956
+ ecount = 0
957
+ for idx in range(len(operation_list) - 1, -1, -1):
958
+ op = operation_list[idx]
959
+ if (
960
+ not hasattr(op, "type")
961
+ or not hasattr(op, "children")
962
+ or op.type != op_type
963
+ ):
964
+ # That's not what we are looking for
965
+ continue
966
+ scount += 1
967
+ clusters = generate_clusters(op)
968
+ ecount += len(clusters)
969
+ if len(clusters) > 0:
970
+ # Create cluster copies of the raster op
971
+ for entry in clusters:
972
+ newop = copy(op)
973
+ newop._references.clear()
974
+ for node in entry:
975
+ newop.add_reference(node)
976
+ newop.set_dirty_bounds()
977
+ operation_list.insert(idx + 1, newop)
978
+
979
+ # And remove the original one...
980
+ operation_list.pop(idx)
981
+ etime = perf_counter()
982
+ if self.channel:
983
+ self.channel(
984
+ f"Optimise {op_type} finished after {etime-stime:.2f} seconds, inflated {scount} operations to {ecount}"
985
+ )
986
+
987
+
988
+ def is_inside(inner, outer, tolerance=0, resolution=50):
989
+ """
990
+ Test that path1 is inside path2.
991
+ @param inner: inner path
992
+ @param outer: outer path
993
+ @param tolerance: 0
994
+ @return: whether path1 is wholly inside path2.
995
+ """
996
+
997
+ def convex_geometry(raster) -> Geomstr:
998
+ dx = raster.bounding_box[0]
999
+ dy = raster.bounding_box[1]
1000
+ dw = raster.bounding_box[2] - raster.bounding_box[0]
1001
+ dh = raster.bounding_box[3] - raster.bounding_box[1]
1002
+ if raster.image is None:
1003
+ return Geomstr.rect(dx, dy, dw, dh)
1004
+ image_np = np.array(raster.image.convert("L"))
1005
+ # Find non-white pixels
1006
+ # Iterate over each row in the image
1007
+ left_side = []
1008
+ right_side = []
1009
+ for y in range(image_np.shape[0]):
1010
+ row = image_np[y]
1011
+ non_white_indices = np.where(row < 255)[0]
1012
+
1013
+ if non_white_indices.size > 0:
1014
+ leftmost = non_white_indices[0]
1015
+ rightmost = non_white_indices[-1]
1016
+ left_side.append((leftmost, y))
1017
+ right_side.insert(0, (rightmost, y))
1018
+ left_side.extend(right_side)
1019
+ non_white_pixels = left_side
1020
+ # Compute convex hull
1021
+ pts = list(Geomstr.convex_hull(None, non_white_pixels))
1022
+ if pts:
1023
+ pts.append(pts[0])
1024
+ geom = Geomstr.lines(*pts)
1025
+ sx = dw / raster.image.width
1026
+ sy = dh / raster.image.height
1027
+ matrix = Matrix()
1028
+ matrix.post_scale(sx, sy)
1029
+ matrix.post_translate(dx, dy)
1030
+ geom.transform(matrix)
1031
+ return geom
1032
+
1033
+ # We still consider a path to be inside another path if it is
1034
+ # within a certain tolerance
1035
+ inner_path = inner
1036
+ outer_path = outer
1037
+ if outer == inner: # This is the same object.
1038
+ return False
1039
+ if hasattr(inner, "path") and inner.path is not None:
1040
+ inner_path = inner.path
1041
+ if hasattr(outer, "path") and outer.path is not None:
1042
+ outer_path = outer.path
1043
+ if not hasattr(inner, "bounding_box"):
1044
+ inner.bounding_box = Group.union_bbox([inner_path])
1045
+ if not hasattr(outer, "bounding_box"):
1046
+ outer.bounding_box = Group.union_bbox([outer_path])
1047
+ if outer.bounding_box is None:
1048
+ return False
1049
+ if inner.bounding_box is None:
1050
+ return False
1051
+ if isinstance(inner, RasterCut):
1052
+ if not hasattr(inner, "convex_path"):
1053
+ inner.convex_path = convex_geometry(inner).as_path()
1054
+ inner_path = inner.convex_path
1055
+ # # Raster is inner if the bboxes overlap anywhere
1056
+ # if isinstance(inner, RasterCut) and not hasattr(inner, "path"):
1057
+ # image = inner.image
1058
+ # return (
1059
+ # inner.bounding_box[0] <= outer.bounding_box[2] + tolerance
1060
+ # and inner.bounding_box[1] <= outer.bounding_box[3] + tolerance
1061
+ # and inner.bounding_box[2] >= outer.bounding_box[0] - tolerance
1062
+ # and inner.bounding_box[3] >= outer.bounding_box[1] - tolerance
1063
+ # )
1064
+ if outer.bounding_box[0] > inner.bounding_box[2] + tolerance:
1065
+ # outer minx > inner maxx (is not contained)
1066
+ return False
1067
+ if outer.bounding_box[1] > inner.bounding_box[3] + tolerance:
1068
+ # outer miny > inner maxy (is not contained)
1069
+ return False
1070
+ if outer.bounding_box[2] < inner.bounding_box[0] - tolerance:
1071
+ # outer maxx < inner minx (is not contained)
1072
+ return False
1073
+ if outer.bounding_box[3] < inner.bounding_box[1] - tolerance:
1074
+ # outer maxy < inner maxy (is not contained)
1075
+ return False
1076
+
1077
+ # Inner bbox is entirely inside outer bbox,
1078
+ # however that does not mean that inner is actually inside outer
1079
+ # i.e. inner could be small and between outer and the bbox corner,
1080
+ # or small and contained in a concave indentation.
1081
+ #
1082
+ # VectorMontonizer can determine whether a point is inside a polygon.
1083
+ # The code below uses a brute force approach by considering a fixed number of points,
1084
+ # however we should consider a future enhancement whereby we create
1085
+ # a polygon more intelligently based on size and curvature
1086
+ # i.e. larger bboxes need more points and
1087
+ # tighter curves need more points (i.e. compare vector directions)
1088
+
1089
+ def vm_code(outer, outer_polygon, inner, inner_polygon):
1090
+ if not hasattr(outer, "vm"):
1091
+ vm = VectorMontonizer()
1092
+ vm.add_pointlist(outer_polygon)
1093
+ outer.vm = vm
1094
+ for gp in inner_polygon:
1095
+ if not outer.vm.is_point_inside(gp[0], gp[1], tolerance=tolerance):
1096
+ return False
1097
+ return True
1098
+
1099
+ def scanbeam_code_not_working_reliably(outer_cut, outer_path, inner_cut, inner_path):
1100
+ from ..tools.geomstr import Polygon as Gpoly
1101
+ from ..tools.geomstr import Scanbeam
1102
+
1103
+ if not hasattr(outer_cut, "sb"):
1104
+ pg = outer_path.npoint(np.linspace(0, 1, 1001), error=1e4)
1105
+ pg = pg[:, 0] + pg[:, 1] * 1j
1106
+
1107
+ outer_path = Gpoly(*pg)
1108
+ sb = Scanbeam(outer_path.geomstr)
1109
+ outer_cut.sb = sb
1110
+ p = inner_path.npoint(np.linspace(0, 1, 101), error=1e4)
1111
+ points = p[:, 0] + p[:, 1] * 1j
1112
+
1113
+ q = outer_cut.sb.points_in_polygon(points)
1114
+ return q.all()
1115
+
1116
+ def raycasting_code_old(outer_polygon, inner_polygon):
1117
+ # The time to compile is outweighing the benefits...
1118
+
1119
+ def ray_tracing(x, y, poly, tolerance):
1120
+ def sq_length(a, b):
1121
+ return a * a + b * b
1122
+
1123
+ tolerance_square = tolerance * tolerance
1124
+ n = len(poly)
1125
+ inside = False
1126
+ xints = 0
1127
+
1128
+ p1x, p1y = poly[0]
1129
+ old_sq_dist = sq_length(p1x - x, p1y - y)
1130
+ for i in range(n+1):
1131
+ p2x, p2y = poly[i % n]
1132
+ new_sq_dist = sq_length(p2x - x, p2y - y)
1133
+ # We are approximating the edge to an extremely thin ellipse and see
1134
+ # whether our point is on that ellipse
1135
+ reldist = (
1136
+ old_sq_dist + new_sq_dist +
1137
+ 2.0 * np.sqrt(old_sq_dist * new_sq_dist) -
1138
+ sq_length(p2x - p1x, p2y - p1y)
1139
+ )
1140
+ if reldist < tolerance_square:
1141
+ return True
1142
+
1143
+ if y > min(p1y,p2y):
1144
+ if y <= max(p1y,p2y):
1145
+ if x <= max(p1x,p2x):
1146
+ if p1y != p2y:
1147
+ xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
1148
+ if p1x == p2x or x <= xints:
1149
+ inside = not inside
1150
+ p1x, p1y = p2x, p2y
1151
+ old_sq_dist = new_sq_dist
1152
+ return inside
1153
+
1154
+ points = inner_polygon
1155
+ vertices = outer_polygon
1156
+ return all(ray_tracing(p[0], p[1], vertices, tolerance) for p in points)
1157
+
1158
+ """
1159
+ Unfortunately this does not work if one of the objects is concave.
1160
+
1161
+ def sat_code(outer_polygon, inner_polygon):
1162
+ # https://en.wikipedia.org/wiki/Hyperplane_separation_theorem
1163
+
1164
+ # Separating Axis Theorem (SAT) for Polygon Containment
1165
+
1166
+ # The Separating Axis Theorem (SAT) is a powerful technique for collision detection
1167
+ # between convex polygons. It can also be adapted to determine polygon containment.
1168
+
1169
+ # How SAT Works:
1170
+
1171
+ # Generate Axes: For each edge of the outer polygon, create a perpendicular axis.
1172
+ # Project Polygons: Project both the inner and outer polygons onto each axis.
1173
+ # Check Overlap: If the projections of the inner polygon are completely contained
1174
+ # within the projections of the outer polygon on all axes,
1175
+ # then the inner polygon is fully contained.
1176
+
1177
+ # Convex Polygons: SAT is most efficient for convex polygons.
1178
+ # For concave polygons, you might need to decompose them into convex sub-polygons.
1179
+ # Computational Cost: SAT can be computationally expensive for large numbers of polygons.
1180
+ # In such cases, spatial indexing can be used to reduce the number of pairwise comparisons.
1181
+ def project_polygon(polygon, axis):
1182
+ # Projects a polygon onto a given axis.
1183
+ min_projection, max_projection = np.dot(polygon, axis).min(), np.dot(polygon, axis).max()
1184
+ return min_projection, max_projection
1185
+
1186
+ def is_polygon_inside_polygon_sat(inner_polygon, outer_polygon):
1187
+ # Determines if one polygon is fully inside another using the Separating Axis Theorem.
1188
+
1189
+ # Args:
1190
+ # inner_polygon: A 2D array of inner polygon vertices.
1191
+ # outer_polygon: A 2D array of outer polygon vertices.
1192
+
1193
+ # Returns:
1194
+ # True if the inner polygon is fully inside the outer polygon, False otherwise.
1195
+
1196
+ for i in range(len(outer_polygon)):
1197
+ # Calculate the axis perpendicular to the current edge
1198
+ edge = outer_polygon[i] - outer_polygon[(i+1) % len(outer_polygon)]
1199
+ axis = np.array([-edge[1], edge[0]])
1200
+
1201
+ # Project both polygons onto the axis
1202
+ min_inner, max_inner = project_polygon(inner_polygon, axis)
1203
+ min_outer, max_outer = project_polygon(outer_polygon, axis)
1204
+
1205
+ # Check if the inner polygon's projection is fully contained within the outer polygon's projection
1206
+ if not (min_inner >= min_outer and max_inner <= max_outer):
1207
+ return False
1208
+
1209
+ return True
1210
+
1211
+ return is_polygon_inside_polygon_sat(inner_polygon, outer_polygon)
1212
+ """
1213
+
1214
+ def raycasting_code_new(outer_polygon, inner_polygon):
1215
+
1216
+ def precompute_intersections(polygon):
1217
+ slopes = []
1218
+ intercepts = []
1219
+ is_vertical = []
1220
+ for i in range(len(polygon)):
1221
+ p1, p2 = polygon[i], polygon[(i + 1) % len(polygon)]
1222
+ dx = p2[0] - p1[0]
1223
+ dy = p2[1] - p1[1]
1224
+
1225
+ if dx == 0:
1226
+ slopes.append(np.nan) # Use NaN to indicate vertical line
1227
+ intercepts.append(p1[0])
1228
+ is_vertical.append(True)
1229
+ else:
1230
+ slope = dy / dx
1231
+ intercept = p1[1] - slope * p1[0]
1232
+ slopes.append(slope)
1233
+ intercepts.append(intercept)
1234
+ is_vertical.append(False)
1235
+
1236
+ return np.array(slopes), np.array(intercepts), np.array(is_vertical)
1237
+
1238
+ def point_in_polygon(x, y, slopes, intercepts, is_vertical, polygon):
1239
+ inside = False
1240
+ for i in range(len(slopes)):
1241
+ slope = slopes[i]
1242
+ intercept = intercepts[i]
1243
+ p1, p2 = polygon[i], polygon[(i + 1) % len(polygon)]
1244
+
1245
+ if np.isnan(slope): # Vertical line
1246
+ if x == intercept and y >= min(p1[1], p2[1]) and y <= max(p1[1], p2[1]):
1247
+ inside = not inside
1248
+ else:
1249
+ if y > min(p1[1], p2[1]):
1250
+ if y <= max(p1[1], p2[1]):
1251
+ if x <= max(p1[0], p2[0]):
1252
+ if p1[1] != p2[1]:
1253
+ xints = (y - intercept) / slope
1254
+ if p1[0] == p2[0] or x <= xints:
1255
+ inside = not inside
1256
+
1257
+ return inside
1258
+
1259
+ def is_polygon_inside(outer_polygon, inner_polygon):
1260
+ slopes, intercepts, is_vertical = precompute_intersections(outer_polygon)
1261
+ for point in inner_polygon:
1262
+ if not point_in_polygon(point[0], point[1], slopes, intercepts, is_vertical, outer_polygon):
1263
+ return False
1264
+ return True
1265
+
1266
+ return is_polygon_inside(outer_polygon=outer_polygon, inner_polygon=inner_polygon)
1267
+
1268
+ def shapely_code(outer_polygon, inner_polygon):
1269
+ from shapely.geometry import Polygon
1270
+
1271
+ # Create Shapely Polygon objects
1272
+ poly_a = Polygon(inner_polygon)
1273
+ poly_b = Polygon(outer_polygon)
1274
+ # Check for containment
1275
+ return poly_a.within(poly_b)
1276
+
1277
+ @lru_cache(maxsize=128)
1278
+ def get_polygon(path, resolution):
1279
+ geom = Geomstr.svg(path)
1280
+ polygon = np.array(
1281
+ list((p.real, p.imag) for p in geom.as_equal_interpolated_points(distance = resolution))
1282
+ )
1283
+ return polygon
1284
+
1285
+ """
1286
+ # Testroutines
1287
+ from time import perf_counter
1288
+ inner_polygon = get_polygon(inner_path.d(), resolution)
1289
+ outer_polygon = get_polygon(outer_path.d(), resolution)
1290
+
1291
+ t0 = perf_counter()
1292
+ res0 = vm_code(outer, outer_polygon, inner, inner_polygon)
1293
+ t1 = perf_counter()
1294
+ res1 = scanbeam_code_not_working_reliably(outer, outer_path, inner, inner_path)
1295
+ t2 = perf_counter()
1296
+ res2 = raycasting_code_old(outer_polygon, inner_polygon)
1297
+ t3 = perf_counter()
1298
+ # res3 = sat_code(outer, outer_path, inner, inner_path)
1299
+ res3 = raycasting_code_new(outer_polygon, inner_polygon)
1300
+ t4 = perf_counter()
1301
+ try:
1302
+ import shapely
1303
+ res4 = shapely_code(outer_polygon, inner_polygon)
1304
+ t5 = perf_counter()
1305
+ except ImportError:
1306
+ res4 = "Shapely missing"
1307
+ t5 = t4
1308
+ print (f"Tolerance: {tolerance}, vm={res0} in {t1 - t0:.3f}s, sb={res1} in {t1 - t0:.3f}s, ray-old={res2} in {t2 - t1:.3f}s, ray-new={res3} in {t3 - t2:.3f}s, shapely={res4} in {t4 - t3:.3f}s")
1309
+ """
1310
+ inner_polygon = get_polygon(inner_path.d(), resolution)
1311
+ outer_polygon = get_polygon(outer_path.d(), resolution)
1312
+ try:
1313
+ import shapely
1314
+ return shapely_code(outer_polygon, inner_polygon)
1315
+ except ImportError:
1316
+ return vm_code(outer, outer_polygon, inner, inner_polygon)
1317
+ # return raycasting_code_new(outer_polygon, inner_polygon)
1318
+ # return scanbeam_code_not_working_reliably(outer, outer_path, inner, inner_path)
1319
+ # return vm_code(outer, outer_path, inner, inner_path)
1320
+ return
1321
+
1322
+ def reify_matrix(self):
1323
+ """Apply the matrix to the path and reset matrix."""
1324
+ self.element = abs(self.element)
1325
+ self.scene_bounds = None
1326
+
1327
+
1328
+ # def bounding_box(elements):
1329
+ # if isinstance(elements, SVGElement):
1330
+ # elements = [elements]
1331
+ # elif isinstance(elements, list):
1332
+ # try:
1333
+ # elements = [e.object for e in elements if isinstance(e.object, SVGElement)]
1334
+ # except AttributeError:
1335
+ # pass
1336
+ # boundary_points = []
1337
+ # for e in elements:
1338
+ # box = e.bbox(False)
1339
+ # if box is None:
1340
+ # continue
1341
+ # top_left = e.transform.point_in_matrix_space([box[0], box[1]])
1342
+ # top_right = e.transform.point_in_matrix_space([box[2], box[1]])
1343
+ # bottom_left = e.transform.point_in_matrix_space([box[0], box[3]])
1344
+ # bottom_right = e.transform.point_in_matrix_space([box[2], box[3]])
1345
+ # boundary_points.append(top_left)
1346
+ # boundary_points.append(top_right)
1347
+ # boundary_points.append(bottom_left)
1348
+ # boundary_points.append(bottom_right)
1349
+ # if len(boundary_points) == 0:
1350
+ # return None
1351
+ # xmin = min([e[0] for e in boundary_points])
1352
+ # ymin = min([e[1] for e in boundary_points])
1353
+ # xmax = max([e[0] for e in boundary_points])
1354
+ # ymax = max([e[1] for e in boundary_points])
1355
+ # return xmin, ymin, xmax, ymax
1356
+
1357
+
1358
+ def correct_empty(context: CutGroup):
1359
+ """
1360
+ Iterates through backwards deleting any entries that are empty.
1361
+ """
1362
+ for index in range(len(context) - 1, -1, -1):
1363
+ c = context[index]
1364
+ if not isinstance(c, CutGroup):
1365
+ continue
1366
+ correct_empty(c)
1367
+ if len(c) == 0:
1368
+ del context[index]
1369
+
1370
+
1371
+ def inner_first_ident(context: CutGroup, kernel=None, channel=None, tolerance=0):
1372
+ """
1373
+ Identifies closed CutGroups and then identifies any other CutGroups which
1374
+ are entirely inside.
1375
+
1376
+ The CutGroup candidate generator uses this information to not offer the outer CutGroup
1377
+ as a candidate for a burn unless all contained CutGroups are cut.
1378
+
1379
+ The Cutcode is resequenced in either short_travel_cutcode or inner_selection_cutcode
1380
+ based on this information, as used in the
1381
+ """
1382
+ if channel:
1383
+ start_time = time()
1384
+ start_times = times()
1385
+ channel("Executing Inner-First Identification")
1386
+
1387
+ groups = [cut for cut in context if isinstance(cut, (CutGroup, RasterCut))]
1388
+ closed_groups = [g for g in groups if isinstance(g, CutGroup) and g.closed]
1389
+ total_pass = len(groups) * len(closed_groups)
1390
+ context.contains = closed_groups
1391
+ if channel:
1392
+ channel(
1393
+ f"Compare {len(groups)} groups against {len(closed_groups)} closed groups"
1394
+ )
1395
+
1396
+ constrained = False
1397
+ current_pass = 0
1398
+ if kernel:
1399
+ busy = kernel.busyinfo
1400
+ _ = kernel.translation
1401
+ min_res = min(kernel.device.view.native_scale_x, kernel.device.view.native_scale_y)
1402
+ # a 0.5 mm resolution is enough
1403
+ resolution = int(0.5 * UNITS_PER_MM / min_res)
1404
+ # print(f"Chosen resolution: {resolution} - minscale = {min_res}")
1405
+ else:
1406
+ busy = None
1407
+ resolution = 10
1408
+ for outer in closed_groups:
1409
+ for inner in groups:
1410
+ current_pass += 1
1411
+ if outer is inner:
1412
+ continue
1413
+ # if outer is inside inner, then inner cannot be inside outer
1414
+ if inner.contains and outer in inner.contains:
1415
+ continue
1416
+ if current_pass % 50 == 0 and busy and busy.shown:
1417
+ # Can't execute without kernel, reference before assignment is safe.
1418
+ message = _("Pass {cpass}/{tpass}").format(
1419
+ cpass=current_pass, tpass=total_pass
1420
+ )
1421
+ busy.change(msg=message, keep=2)
1422
+ busy.show()
1423
+
1424
+ if is_inside(inner, outer, tolerance, resolution):
1425
+ constrained = True
1426
+ if outer.contains is None:
1427
+ outer.contains = []
1428
+ outer.contains.append(inner)
1429
+
1430
+ if inner.inside is None:
1431
+ inner.inside = []
1432
+ inner.inside.append(outer)
1433
+
1434
+ context.constrained = constrained
1435
+
1436
+ # for g in groups:
1437
+ # if g.contains is not None:
1438
+ # for inner in g.contains:
1439
+ # assert inner in groups
1440
+ # assert inner is not g
1441
+ # assert g in inner.inside
1442
+ # if g.inside is not None:
1443
+ # for outer in g.inside:
1444
+ # assert outer in groups
1445
+ # assert outer is not g
1446
+ # assert g in outer.contains
1447
+
1448
+ if channel:
1449
+ end_times = times()
1450
+ channel(
1451
+ f"Inner paths identified in {time() - start_time:.3f} elapsed seconds: {constrained} "
1452
+ f"using {end_times[0] - start_times[0]:.3f} seconds CPU"
1453
+ )
1454
+ for outer in closed_groups:
1455
+ if outer is None:
1456
+ continue
1457
+ channel(
1458
+ f"Outer {type(outer).__name__} contains: {'None' if outer.contains is None else str(len(outer.contains))} cutcode elements"
1459
+ )
1460
+
1461
+ return context
1462
+
1463
+
1464
+ def short_travel_cutcode(
1465
+ context: CutCode,
1466
+ kernel=None,
1467
+ channel=None,
1468
+ complete_path: Optional[bool] = False,
1469
+ grouped_inner: Optional[bool] = False,
1470
+ hatch_optimize: Optional[bool] = False,
1471
+ ):
1472
+ """
1473
+ Selects cutcode from candidate cutcode (burns_done < passes in this CutCode),
1474
+ optimizing with greedy/brute for shortest distances optimizations.
1475
+
1476
+ For paths starting at exactly the same point forward paths are preferred over reverse paths
1477
+
1478
+ We start at either 0,0 or the value given in `context.start`
1479
+
1480
+ This is time-intense hyper-optimized code, so it contains several seemingly redundant
1481
+ checks.
1482
+ """
1483
+ if channel:
1484
+ start_length = context.length_travel(True)
1485
+ start_time = time()
1486
+ start_times = times()
1487
+ channel("Executing Greedy Short-Travel optimization")
1488
+ channel(f"Length at start: {start_length:.0f} steps")
1489
+ unordered = []
1490
+ for idx in range(len(context) - 1, -1, -1):
1491
+ c = context[idx]
1492
+ if isinstance(c, CutGroup) and c.skip:
1493
+ unordered.append(c)
1494
+ context.pop(idx)
1495
+
1496
+ curr = context.start
1497
+ curr = 0 if curr is None else complex(curr[0], curr[1])
1498
+
1499
+ cutcode_len = 0
1500
+ for c in context.flat():
1501
+ cutcode_len += 1
1502
+ c.burns_done = 0
1503
+
1504
+ ordered = CutCode()
1505
+ current_pass = 0
1506
+ if kernel:
1507
+ busy = kernel.busyinfo
1508
+ _ = kernel.translation
1509
+ else:
1510
+ busy = None
1511
+ # print (f"Cutcode-Len={cutcode_len}, unordered: {len(unordered)}")
1512
+ while True:
1513
+ current_pass += 1
1514
+ if current_pass % 50 == 0 and busy and busy.shown:
1515
+ # That may not be a fully correct approximation
1516
+ # in terms of the total passes required, but it
1517
+ # should give an idea...
1518
+ message = _("Pass {cpass}/{tpass}").format(
1519
+ cpass=current_pass, tpass=cutcode_len
1520
+ )
1521
+ busy.change(msg=message, keep=2)
1522
+ busy.show()
1523
+ closest = None
1524
+ backwards = False
1525
+ distance = float("inf")
1526
+
1527
+ try:
1528
+ last_segment = ordered[-1]
1529
+ except IndexError:
1530
+ pass
1531
+ else:
1532
+ if last_segment.normal:
1533
+ # Attempt to initialize value to next segment in subpath
1534
+ cut = last_segment.next
1535
+ if cut and cut.burns_done < cut.passes:
1536
+ closest = cut
1537
+ backwards = False
1538
+ start = closest.start
1539
+ distance = abs(complex(start[0], start[1]) - curr)
1540
+ else:
1541
+ # Attempt to initialize value to previous segment in subpath
1542
+ cut = last_segment.previous
1543
+ if cut and cut.burns_done < cut.passes:
1544
+ closest = cut
1545
+ backwards = True
1546
+ end = closest.end
1547
+ distance = abs(complex(end[0], end[1]) - curr)
1548
+ # Gap or continuing on path not permitted, try reversing
1549
+ if (
1550
+ distance > 50
1551
+ and last_segment.burns_done < last_segment.passes
1552
+ and last_segment.reversible()
1553
+ and last_segment.next is not None
1554
+ ):
1555
+ # last_segment is a copy, so we need to get original
1556
+ closest = last_segment.next.previous
1557
+ backwards = last_segment.normal
1558
+ distance = 0 # By definition since we are reversing and reburning
1559
+
1560
+ # Stay on path in same direction if gap <= 1/20" i.e. path not quite closed
1561
+ # Travel only if path is completely burned or gap > 1/20"
1562
+ if distance > 50:
1563
+ for cut in context.candidate(
1564
+ complete_path=complete_path, grouped_inner=grouped_inner
1565
+ ):
1566
+ s = cut.start
1567
+ if (
1568
+ abs(s[0] - curr.real) <= distance
1569
+ and abs(s[1] - curr.imag) <= distance
1570
+ and (not complete_path or cut.closed or cut.first)
1571
+ ):
1572
+ d = abs(complex(s[0], s[1]) - curr)
1573
+ if d < distance:
1574
+ closest = cut
1575
+ backwards = False
1576
+ if d <= 0.1: # Distance in px is zero, we cannot improve.
1577
+ break
1578
+ distance = d
1579
+
1580
+ if not cut.reversible():
1581
+ continue
1582
+ e = cut.end
1583
+ if (
1584
+ abs(e[0] - curr.real) <= distance
1585
+ and abs(e[1] - curr.imag) <= distance
1586
+ and (not complete_path or cut.closed or cut.last)
1587
+ ):
1588
+ d = abs(complex(e[0], e[1]) - curr)
1589
+ if d < distance:
1590
+ closest = cut
1591
+ backwards = True
1592
+ if d <= 0.1: # Distance in px is zero, we cannot improve.
1593
+ break
1594
+ distance = d
1595
+
1596
+ if closest is None:
1597
+ break
1598
+
1599
+ # Change direction if other direction is coincident and has more burns remaining
1600
+ if backwards:
1601
+ if (
1602
+ closest.next
1603
+ and closest.next.burns_done <= closest.burns_done
1604
+ and closest.next.start == closest.end
1605
+ ):
1606
+ closest = closest.next
1607
+ backwards = False
1608
+ elif closest.reversible():
1609
+ if (
1610
+ closest.previous
1611
+ and closest.previous is not closest
1612
+ and closest.previous.burns_done < closest.burns_done
1613
+ and closest.previous.end == closest.start
1614
+ ):
1615
+ closest = closest.previous
1616
+ backwards = True
1617
+
1618
+ closest.burns_done += 1
1619
+ c = copy(closest)
1620
+ if backwards:
1621
+ c.reverse()
1622
+ end = c.end
1623
+ curr = complex(end[0], end[1])
1624
+ ordered.append(c)
1625
+ # print (f"Now we have {len(ordered)} items in list")
1626
+ if hatch_optimize:
1627
+ for idx, c in enumerate(unordered):
1628
+ if isinstance(c, CutGroup):
1629
+ c.skip = False
1630
+ unordered[idx] = short_travel_cutcode(context=c, kernel=kernel, complete_path=False, grouped_inner=False, channel=channel)
1631
+ # As these are reversed, we reverse again...
1632
+ ordered.extend(reversed(unordered))
1633
+ # print (f"And after extension {len(ordered)} items in list")
1634
+ # for c in ordered:
1635
+ # print (f"{type(c).__name__} - {len(c) if isinstance(c, (list, tuple)) else '-childless-'}")
1636
+ if context.start is not None:
1637
+ ordered._start_x, ordered._start_y = context.start
1638
+ else:
1639
+ ordered._start_x = 0
1640
+ ordered._start_y = 0
1641
+ if channel:
1642
+ end_times = times()
1643
+ end_length = ordered.length_travel(True)
1644
+ try:
1645
+ delta = (end_length - start_length) / start_length
1646
+ except ZeroDivisionError:
1647
+ delta = 0
1648
+ channel(
1649
+ f"Length at end: {end_length:.0f} steps "
1650
+ f"({delta:+.0%}), "
1651
+ f"optimized in {time() - start_time:.3f} "
1652
+ f"elapsed seconds using {end_times[0] - start_times[0]:.3f} seconds CPU"
1653
+ )
1654
+ return ordered
1655
+
1656
+
1657
+ def short_travel_cutcode_2opt(
1658
+ context: CutCode, kernel=None, passes: int = 50, channel=None
1659
+ ):
1660
+ """
1661
+ This implements 2-opt algorithm using numpy.
1662
+
1663
+ Skipping of the candidate code it does not perform inner first optimizations.
1664
+ Due to the numpy requirement, doesn't work without numpy.
1665
+ --
1666
+ Uses code I wrote for vpype:
1667
+ https://github.com/abey79/vpype/commit/7b1fad6bd0fcfc267473fdb8ba2166821c80d9cd
1668
+
1669
+ @param context: cutcode to be optimized
1670
+ @param kernel: kernel value
1671
+ @param passes: max passes to perform 2-opt
1672
+ @param channel: Channel to send data about the optimization process.
1673
+ @return:
1674
+ """
1675
+ if channel:
1676
+ start_length = context.length_travel(True)
1677
+ start_time = time()
1678
+ start_times = times()
1679
+ channel("Executing 2-Opt Short-Travel optimization")
1680
+ channel(f"Length at start: {start_length:.0f} steps")
1681
+
1682
+ ordered = CutCode(context.flat())
1683
+ length = len(ordered)
1684
+ if length <= 1:
1685
+ if channel:
1686
+ channel("2-Opt: Not enough elements to optimize.")
1687
+ return ordered
1688
+
1689
+ curr = context.start
1690
+ if curr is None:
1691
+ curr = 0
1692
+ else:
1693
+ curr = complex(curr)
1694
+
1695
+ current_pass = 1
1696
+ min_value = -1e-10 # Do not swap on rounding error.
1697
+
1698
+ endpoints = np.zeros((length, 4), dtype="complex")
1699
+ # start, index, reverse-index, end
1700
+ for i in range(length):
1701
+ endpoints[i] = complex(ordered[i].start), i, ~i, complex(ordered[i].end)
1702
+ indexes0 = np.arange(0, length - 1)
1703
+ indexes1 = indexes0 + 1
1704
+
1705
+ def log_progress(pos):
1706
+ starts = endpoints[indexes0, -1]
1707
+ ends = endpoints[indexes1, 0]
1708
+ dists = np.abs(starts - ends)
1709
+ dist_sum = dists.sum() + abs(curr - endpoints[0][0])
1710
+ channel(
1711
+ f"optimize: laser-off distance is {dist_sum}. {100 * pos / length:.02f}% done with pass {current_pass}/{passes}"
1712
+ )
1713
+ if kernel:
1714
+ busy = kernel.busyinfo
1715
+ _ = kernel.translation
1716
+ if busy.shown:
1717
+ busy.change(
1718
+ msg=_("Pass {cpass}/{tpass}").format(
1719
+ cpass=current_pass, tpass=passes
1720
+ ),
1721
+ keep=2,
1722
+ )
1723
+ busy.show()
1724
+
1725
+ improved = True
1726
+ while improved:
1727
+ improved = False
1728
+
1729
+ first = endpoints[0][0]
1730
+ cut_ends = endpoints[indexes0, -1]
1731
+ cut_starts = endpoints[indexes1, 0]
1732
+
1733
+ # delta = np.abs(curr - first) + np.abs(first - cut_starts) - np.abs(cut_ends - cut_starts)
1734
+ delta = (
1735
+ np.abs(curr - cut_ends)
1736
+ + np.abs(first - cut_starts)
1737
+ - np.abs(cut_ends - cut_starts)
1738
+ - np.abs(curr - first)
1739
+ )
1740
+ index = int(np.argmin(delta))
1741
+ if delta[index] < min_value:
1742
+ endpoints[: index + 1] = np.flip(
1743
+ endpoints[: index + 1], (0, 1)
1744
+ ) # top to bottom, and right to left flips.
1745
+ improved = True
1746
+ if channel:
1747
+ log_progress(1)
1748
+ for mid in range(1, length - 1):
1749
+ idxs = np.arange(mid, length - 1)
1750
+
1751
+ mid_source = endpoints[mid - 1, -1]
1752
+ mid_dest = endpoints[mid, 0]
1753
+ cut_ends = endpoints[idxs, -1]
1754
+ cut_starts = endpoints[idxs + 1, 0]
1755
+ delta = (
1756
+ np.abs(mid_source - cut_ends)
1757
+ + np.abs(mid_dest - cut_starts)
1758
+ - np.abs(cut_ends - cut_starts)
1759
+ - np.abs(mid_source - mid_dest)
1760
+ )
1761
+ index = int(np.argmin(delta))
1762
+ if delta[index] < min_value:
1763
+ endpoints[mid : mid + index + 1] = np.flip(
1764
+ endpoints[mid : mid + index + 1], (0, 1)
1765
+ )
1766
+ improved = True
1767
+ if channel:
1768
+ log_progress(mid)
1769
+
1770
+ last = endpoints[-1, -1]
1771
+ cut_ends = endpoints[indexes0, -1]
1772
+ cut_starts = endpoints[indexes1, 0]
1773
+
1774
+ delta = np.abs(cut_ends - last) - np.abs(cut_ends - cut_starts)
1775
+ index = int(np.argmin(delta))
1776
+ if delta[index] < min_value:
1777
+ endpoints[index + 1 :] = np.flip(
1778
+ endpoints[index + 1 :], (0, 1)
1779
+ ) # top to bottom, and right to left flips.
1780
+ improved = True
1781
+ if channel:
1782
+ log_progress(length)
1783
+ if current_pass >= passes:
1784
+ break
1785
+ current_pass += 1
1786
+
1787
+ # Two-opt complete.
1788
+ order = endpoints[:, 1].real.astype(int)
1789
+ ordered.reordered(order)
1790
+ if channel:
1791
+ end_times = times()
1792
+ end_length = ordered.length_travel(True)
1793
+ channel(
1794
+ f"Length at end: {end_length:.0f} steps "
1795
+ f"({(end_length - start_length) / start_length:+.0%}), "
1796
+ f"optimized in {time() - start_time:.3f} "
1797
+ f"elapsed seconds using {end_times[0] - start_times[0]:.3f} seconds CPU"
1798
+ )
1799
+ return ordered
1800
+
1801
+
1802
+ def inner_selection_cutcode(
1803
+ context: CutCode, channel=None, grouped_inner: Optional[bool] = False
1804
+ ):
1805
+ """
1806
+ Selects cutcode from candidate cutcode permitted but does nothing to optimize beyond
1807
+ finding a valid solution.
1808
+
1809
+ This routine runs if opt_inner first is selected and opt_greedy is not selected.
1810
+ """
1811
+ if channel:
1812
+ start_length = context.length_travel(True)
1813
+ start_time = time()
1814
+ start_times = times()
1815
+ channel("Executing Inner Selection-Only optimization")
1816
+ channel(f"Length at start: {start_length:.0f} steps")
1817
+
1818
+ for c in context.flat():
1819
+ c.burns_done = 0
1820
+
1821
+ ordered = CutCode()
1822
+ iterations = 0
1823
+ while True:
1824
+ c = list(context.candidate(grouped_inner=grouped_inner))
1825
+ if not c:
1826
+ break
1827
+ for o in c:
1828
+ o.burns_done += 1
1829
+ ordered.extend(copy(c))
1830
+ iterations += 1
1831
+
1832
+ if channel:
1833
+ end_times = times()
1834
+ end_length = ordered.length_travel(True)
1835
+ msg = f"Length at end: {end_length:.0f} steps "
1836
+ if start_length != 0:
1837
+ msg += f"({(end_length - start_length) / start_length:+.0%}), "
1838
+ msg += f"optimized in {time() - start_time:.3f} "
1839
+ msg += f"elapsed seconds using {end_times[0] - start_times[0]:.3f} "
1840
+ msg += f"seconds CPU in {iterations} iterations"
1841
+
1842
+ channel(msg)
1843
+ return ordered