meerk40t 0.9.7020__py2.py3-none-any.whl → 0.9.7040__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. meerk40t/balormk/clone_loader.py +3 -2
  2. meerk40t/balormk/controller.py +28 -11
  3. meerk40t/balormk/cylindermod.py +1 -0
  4. meerk40t/balormk/device.py +13 -9
  5. meerk40t/balormk/driver.py +9 -2
  6. meerk40t/balormk/galvo_commands.py +3 -1
  7. meerk40t/balormk/gui/gui.py +6 -0
  8. meerk40t/balormk/livelightjob.py +338 -321
  9. meerk40t/balormk/mock_connection.py +4 -3
  10. meerk40t/balormk/usb_connection.py +11 -2
  11. meerk40t/camera/camera.py +19 -14
  12. meerk40t/camera/gui/camerapanel.py +6 -0
  13. meerk40t/core/cutcode/cutcode.py +1 -1
  14. meerk40t/core/cutplan.py +169 -43
  15. meerk40t/core/elements/element_treeops.py +444 -147
  16. meerk40t/core/elements/elements.py +100 -9
  17. meerk40t/core/elements/grid.py +8 -1
  18. meerk40t/core/elements/offset_mk.py +2 -1
  19. meerk40t/core/elements/shapes.py +618 -279
  20. meerk40t/core/elements/tree_commands.py +10 -5
  21. meerk40t/core/node/elem_ellipse.py +18 -8
  22. meerk40t/core/node/elem_image.py +51 -19
  23. meerk40t/core/node/elem_line.py +18 -8
  24. meerk40t/core/node/elem_path.py +18 -8
  25. meerk40t/core/node/elem_point.py +10 -4
  26. meerk40t/core/node/elem_polyline.py +19 -11
  27. meerk40t/core/node/elem_rect.py +18 -8
  28. meerk40t/core/node/elem_text.py +11 -5
  29. meerk40t/core/node/filenode.py +2 -8
  30. meerk40t/core/node/groupnode.py +11 -11
  31. meerk40t/core/node/image_processed.py +11 -5
  32. meerk40t/core/node/image_raster.py +11 -5
  33. meerk40t/core/node/node.py +70 -19
  34. meerk40t/core/node/refnode.py +2 -1
  35. meerk40t/core/planner.py +23 -0
  36. meerk40t/core/svg_io.py +91 -34
  37. meerk40t/core/undos.py +1 -1
  38. meerk40t/core/wordlist.py +1 -0
  39. meerk40t/device/dummydevice.py +7 -1
  40. meerk40t/dxf/dxf_io.py +6 -0
  41. meerk40t/extra/mk_potrace.py +1959 -0
  42. meerk40t/extra/param_functions.py +1 -1
  43. meerk40t/extra/potrace.py +14 -10
  44. meerk40t/extra/vtracer.py +222 -0
  45. meerk40t/grbl/device.py +81 -8
  46. meerk40t/grbl/interpreter.py +1 -1
  47. meerk40t/gui/about.py +21 -3
  48. meerk40t/gui/basicops.py +3 -3
  49. meerk40t/gui/choicepropertypanel.py +1 -4
  50. meerk40t/gui/devicepanel.py +20 -16
  51. meerk40t/gui/gui_mixins.py +8 -1
  52. meerk40t/gui/icons.py +330 -253
  53. meerk40t/gui/laserpanel.py +8 -3
  54. meerk40t/gui/laserrender.py +41 -21
  55. meerk40t/gui/magnetoptions.py +158 -65
  56. meerk40t/gui/materialtest.py +229 -39
  57. meerk40t/gui/navigationpanels.py +229 -24
  58. meerk40t/gui/propertypanels/hatchproperty.py +2 -0
  59. meerk40t/gui/propertypanels/imageproperty.py +160 -106
  60. meerk40t/gui/ribbon.py +6 -1
  61. meerk40t/gui/scenewidgets/gridwidget.py +29 -32
  62. meerk40t/gui/scenewidgets/rectselectwidget.py +190 -192
  63. meerk40t/gui/simulation.py +75 -77
  64. meerk40t/gui/spoolerpanel.py +6 -9
  65. meerk40t/gui/statusbarwidgets/defaultoperations.py +84 -48
  66. meerk40t/gui/statusbarwidgets/infowidget.py +2 -2
  67. meerk40t/gui/themes.py +7 -1
  68. meerk40t/gui/tips.py +15 -1
  69. meerk40t/gui/toolwidgets/toolpointmove.py +3 -1
  70. meerk40t/gui/wxmeerk40t.py +26 -0
  71. meerk40t/gui/wxmmain.py +242 -114
  72. meerk40t/gui/wxmscene.py +180 -4
  73. meerk40t/gui/wxmtree.py +4 -2
  74. meerk40t/gui/wxutils.py +60 -15
  75. meerk40t/image/imagetools.py +130 -66
  76. meerk40t/internal_plugins.py +4 -0
  77. meerk40t/kernel/kernel.py +49 -22
  78. meerk40t/kernel/settings.py +29 -8
  79. meerk40t/lihuiyu/device.py +30 -12
  80. meerk40t/main.py +22 -5
  81. meerk40t/moshi/device.py +20 -6
  82. meerk40t/network/console_server.py +22 -6
  83. meerk40t/newly/device.py +10 -3
  84. meerk40t/newly/gui/gui.py +10 -0
  85. meerk40t/ruida/device.py +22 -2
  86. meerk40t/ruida/gui/gui.py +6 -6
  87. meerk40t/ruida/gui/ruidaoperationproperties.py +1 -10
  88. meerk40t/ruida/loader.py +6 -3
  89. meerk40t/ruida/rdjob.py +3 -3
  90. meerk40t/tools/geomstr.py +195 -39
  91. meerk40t/tools/rasterplotter.py +179 -93
  92. {meerk40t-0.9.7020.dist-info → meerk40t-0.9.7040.dist-info}/METADATA +1 -1
  93. {meerk40t-0.9.7020.dist-info → meerk40t-0.9.7040.dist-info}/RECORD +98 -96
  94. {meerk40t-0.9.7020.dist-info → meerk40t-0.9.7040.dist-info}/WHEEL +1 -1
  95. {meerk40t-0.9.7020.dist-info → meerk40t-0.9.7040.dist-info}/LICENSE +0 -0
  96. {meerk40t-0.9.7020.dist-info → meerk40t-0.9.7040.dist-info}/entry_points.txt +0 -0
  97. {meerk40t-0.9.7020.dist-info → meerk40t-0.9.7040.dist-info}/top_level.txt +0 -0
  98. {meerk40t-0.9.7020.dist-info → meerk40t-0.9.7040.dist-info}/zip-safe +0 -0
@@ -48,6 +48,7 @@ class MockConnection:
48
48
 
49
49
  def write(self, index=0, packet=None):
50
50
  from meerk40t.balormk.controller import GetSerialNo
51
+
51
52
  packet_length = len(packet)
52
53
  assert packet_length == 0xC or packet_length == 0xC00
53
54
  if packet is not None:
@@ -88,6 +89,7 @@ class MockConnection:
88
89
 
89
90
  def _parse_single(self, packet):
90
91
  from meerk40t.balormk.controller import single_command_lookup
92
+
91
93
  b0 = packet[1] << 8 | packet[0]
92
94
  b1 = packet[3] << 8 | packet[2]
93
95
  b2 = packet[5] << 8 | packet[4]
@@ -104,12 +106,11 @@ class MockConnection:
104
106
 
105
107
  # Convert input to bytes early
106
108
  if isinstance(data, str):
107
- data = data.encode('ascii')
109
+ data = data.encode("ascii")
108
110
 
109
111
  # Create fixed-size response with padding
110
112
  self._implied_response = bytearray(8)
111
- self._implied_response[:len(data)] = data[:8]
112
-
113
+ self._implied_response[: len(data)] = data[:8]
113
114
 
114
115
  def read(self, index=0):
115
116
  if self._implied_response is None:
@@ -232,10 +232,19 @@ class USBConnection:
232
232
  except usb.core.NoBackendError as e:
233
233
  self.channel(str(e))
234
234
  from platform import system
235
+
235
236
  osname = system()
236
237
  if osname == "Windows":
237
- self.channel(_("Did you install the libusb driver via Zadig (https://zadig.akeo.ie/)?"))
238
- self.channel(_("Consult the wiki: https://github.com/meerk40t/meerk40t/wiki/Install%3A-Windows"))
238
+ self.channel(
239
+ _(
240
+ "Did you install the libusb driver via Zadig (https://zadig.akeo.ie/)?"
241
+ )
242
+ )
243
+ self.channel(
244
+ _(
245
+ "Consult the wiki: https://github.com/meerk40t/meerk40t/wiki/Install%3A-Windows"
246
+ )
247
+ )
239
248
  self.channel(_("PyUsb detected no backend LibUSB driver."))
240
249
  return -2
241
250
  except ConnectionRefusedError:
meerk40t/camera/camera.py CHANGED
@@ -189,32 +189,33 @@ class Camera(Service):
189
189
  pass
190
190
  return actual_width, actual_height
191
191
 
192
- def _get_capture(self, set_resolution = True):
192
+ def _get_capture(self, set_resolution=True):
193
193
  import platform
194
+
194
195
  # print (self.uri, type(self.uri).__name__)
195
196
  if platform.system() == "Windows":
196
197
  self.logger("Set DSHOW for Windows")
197
198
  cv2.CAP_DSHOW
198
- #sets the Windows cv2 backend to DSHOW (Direct Video Input Show)
199
+ # sets the Windows cv2 backend to DSHOW (Direct Video Input Show)
199
200
  cap = cv2.VideoCapture(self.uri)
200
201
  elif platform.system() == "Linux":
201
202
  self.logger("Set GSTREAMER for Linux")
202
- cv2.CAP_GSTREAMER # set the Linux cv2 backend to GTREAMER
203
- #cv2.CAP_V4L
203
+ cv2.CAP_GSTREAMER # set the Linux cv2 backend to GTREAMER
204
+ # cv2.CAP_V4L
204
205
  cap = cv2.VideoCapture(self.uri)
205
206
  else:
206
207
  self.logger("Try something for Darwin")
207
208
  cap = cv2.VideoCapture(self.uri)
208
209
  # For MAC please refer to link below for I/O
209
- cap.set(cv2.CAP_FFMPEG, cv2.CAP_AVFOUNDATION) # not sure!
210
- #please refer to reference link at bottom of page for more I/O
210
+ cap.set(cv2.CAP_FFMPEG, cv2.CAP_AVFOUNDATION) # not sure!
211
+ # please refer to reference link at bottom of page for more I/O
211
212
  if set_resolution:
212
- self.logger (f"Try to start camera with {self.width}x{self.height}")
213
+ self.logger(f"Try to start camera with {self.width}x{self.height}")
213
214
  cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
214
215
  cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
215
- self.logger (
216
- f"Capture: {str(self.capture)}\n" +
217
- f"Frame resolution set to: ({cap.get(cv2.CAP_PROP_FRAME_WIDTH)}x{cap.get(cv2.CAP_PROP_FRAME_HEIGHT)})"
216
+ self.logger(
217
+ f"Capture: {str(self.capture)}\n"
218
+ + f"Frame resolution set to: ({cap.get(cv2.CAP_PROP_FRAME_WIDTH)}x{cap.get(cv2.CAP_PROP_FRAME_HEIGHT)})"
218
219
  )
219
220
 
220
221
  return cap
@@ -297,14 +298,18 @@ class Camera(Service):
297
298
  actual_height = 0
298
299
  actual_width = 0
299
300
  msg = f"(Fail: {e})"
300
- self.logger(f"Tried {width}x{height} ({description}) - received {actual_width}x{actual_height} {msg}")
301
+ self.logger(
302
+ f"Tried {width}x{height} ({description}) - received {actual_width}x{actual_height} {msg}"
303
+ )
301
304
  if int(actual_width) == width and int(actual_height) == height:
302
305
  supported_resolutions.append((width, height, description))
303
-
304
- cap.release()
306
+ try:
307
+ # Might crash if the camera is not opened
308
+ cap.release()
309
+ except cv2.error:
310
+ pass
305
311
  return supported_resolutions
306
312
 
307
-
308
313
  def process_frame(self):
309
314
  frame = self._current_raw
310
315
  if (
@@ -947,35 +947,41 @@ class CameraInterface(MWindow):
947
947
  "label": _("Camera {index}").format(index=0),
948
948
  "action": camera_click(0),
949
949
  "signal": "camset0",
950
+ "multi_autoexec": True,
950
951
  },
951
952
  {
952
953
  "identifier": "cam1",
953
954
  "label": _("Camera {index}").format(index=1),
954
955
  "action": camera_click(1),
955
956
  "signal": "camset1",
957
+ "multi_autoexec": True,
956
958
  },
957
959
  {
958
960
  "identifier": "cam2",
959
961
  "label": _("Camera {index}").format(index=2),
960
962
  "action": camera_click(2),
961
963
  "signal": "camset2",
964
+ "multi_autoexec": True,
962
965
  },
963
966
  {
964
967
  "identifier": "cam3",
965
968
  "label": _("Camera {index}").format(index=3),
966
969
  "action": camera_click(3),
967
970
  "signal": "camset3",
971
+ "multi_autoexec": True,
968
972
  },
969
973
  {
970
974
  "identifier": "cam4",
971
975
  "label": _("Camera {index}").format(index=4),
972
976
  "action": camera_click(4),
973
977
  "signal": "camset4",
978
+ "multi_autoexec": True,
974
979
  },
975
980
  {
976
981
  "identifier": "id_cam",
977
982
  "label": _("Identify cameras"),
978
983
  "action": detect_usb_cameras,
984
+ "multi_autoexec": True,
979
985
  },
980
986
  ],
981
987
  },
@@ -132,7 +132,7 @@ class CutCode(CutGroup):
132
132
  length_of_previous_travel = Point.distance(prev.end, current.start)
133
133
  total_distance_travel += length_of_previous_travel
134
134
  rapid_speed = self._native_speed(cutcode)
135
- if rapid_speed is not None:
135
+ if rapid_speed is not None and rapid_speed != 0:
136
136
  total_duration_travel = total_distance_travel / rapid_speed
137
137
  duration_of_this_travel = length_of_previous_travel / rapid_speed
138
138
 
meerk40t/core/cutplan.py CHANGED
@@ -15,15 +15,16 @@ CutPlan handles the various complicated algorithms to optimising the sequence of
15
15
  """
16
16
 
17
17
  from copy import copy
18
+ from functools import lru_cache
18
19
  from math import isinf
19
20
  from os import times
20
21
  from time import perf_counter, time
21
22
  from typing import Optional
22
- from functools import lru_cache
23
+
23
24
  import numpy as np
24
25
 
25
26
  from ..svgelements import Group, Matrix, Path, Polygon
26
- from ..tools.geomstr import Geomstr
27
+ from ..tools.geomstr import Geomstr, stitch_geometries
27
28
  from ..tools.pathtools import VectorMontonizer
28
29
  from .cutcode.cutcode import CutCode
29
30
  from .cutcode.cutgroup import CutGroup
@@ -31,7 +32,7 @@ from .cutcode.cutobject import CutObject
31
32
  from .cutcode.rastercut import RasterCut
32
33
  from .node.node import Node
33
34
  from .node.util_console import ConsoleOperation
34
- from .units import Length, UNITS_PER_MM
35
+ from .units import UNITS_PER_MM, Length
35
36
 
36
37
  """
37
38
  The time to compile does outweigh the benefit...
@@ -47,6 +48,7 @@ except Exception as e:
47
48
  return inner
48
49
  """
49
50
 
51
+
50
52
  class CutPlanningFailedError(Exception):
51
53
  pass
52
54
 
@@ -172,7 +174,9 @@ class CutPlan:
172
174
  for place in self.plan:
173
175
  if not hasattr(place, "type"):
174
176
  continue
175
- if place.type.startswith("place ") and (hasattr(place, "output") and place.output):
177
+ if place.type.startswith("place ") and (
178
+ hasattr(place, "output") and place.output
179
+ ):
176
180
  loops = 1
177
181
  if hasattr(place, "loops") and place.loops > 1:
178
182
  loops = place.loops
@@ -187,6 +191,7 @@ class CutPlan:
187
191
  placements.append(scene_to_device_matrix)
188
192
 
189
193
  original_ops = copy(self.plan)
194
+
190
195
  if self.context.opt_raster_optimisation and self.context.do_optimization:
191
196
  try:
192
197
  margin = float(Length(self.context.opt_raster_opt_margin, "0"))
@@ -225,7 +230,9 @@ class CutPlan:
225
230
  coolop = ConsoleOperation(command=cmd)
226
231
  self.plan.append(coolop)
227
232
  else:
228
- self.channel("The current device does not support a coolant method")
233
+ self.channel(
234
+ "The current device does not support a coolant method"
235
+ )
229
236
  current_cool = cool
230
237
  # Is there already a coolant operation?
231
238
  if getattr(original_op, "type", "") == "util console":
@@ -244,8 +251,90 @@ class CutPlan:
244
251
  op_type = getattr(op, "type", "")
245
252
  if op_type.startswith("place "):
246
253
  continue
254
+ if (
255
+ op_type == "op cut"
256
+ and self.context.opt_stitching
257
+ and self.context.do_optimization
258
+ ):
259
+ # This isn't a lossless operation: dotted/dashed lines will be treated as solid lines
260
+ try:
261
+ stitch_tolerance = float(
262
+ Length(self.context.opt_stitch_tolerance)
263
+ )
264
+ except ValueError:
265
+ stitch_tolerance = 0
266
+ default_stroke = None
267
+ default_strokewidth = None
268
+
269
+ def stitcheable_nodes(data, tolerance) -> list:
270
+ out = []
271
+ geoms = []
272
+ # Store all geometries together with an indicator, to which node they belong
273
+ for idx, node in enumerate(data):
274
+ if not hasattr(node, "as_geometry"):
275
+ continue
276
+ for g1 in node.as_geometry().as_contiguous():
277
+ geoms.append((idx, g1))
278
+ for idx1, (nodeidx1, g1) in enumerate(geoms):
279
+ for idx2 in range(idx1 + 1, len(geoms)):
280
+ nodeidx2 = geoms[idx2][0]
281
+ g2 = geoms[idx2][1]
282
+ fp1 = g1.first_point
283
+ fp2 = g2.first_point
284
+ lp1 = g1.last_point
285
+ lp2 = g2.last_point
286
+ if fp1 is None or fp2 is None:
287
+ continue
288
+ if lp1 is None or lp2 is None:
289
+ continue
290
+ if (
291
+ abs(lp1 - lp2) <= tolerance
292
+ or abs(lp1 - fp2) <= tolerance
293
+ or abs(fp1 - fp2) <= tolerance
294
+ or abs(fp1 - lp2) <= tolerance
295
+ ):
296
+ if nodeidx1 not in out:
297
+ out.append(nodeidx1)
298
+ if nodeidx2 not in out:
299
+ out.append(nodeidx2)
300
+
301
+ return [data[idx] for idx in out]
302
+
303
+ geoms = []
304
+ to_be_deleted = []
305
+ data = stitcheable_nodes(list(op.flat()), stitch_tolerance)
306
+ for node in data:
307
+ if node is op:
308
+ continue
309
+ if hasattr(node, "as_geometry"):
310
+ geom: Geomstr = node.as_geometry()
311
+ geoms.extend(iter(geom.as_contiguous()))
312
+ if default_stroke is None and hasattr(node, "stroke"):
313
+ default_stroke = node.stroke
314
+ if default_strokewidth is None and hasattr(
315
+ node, "stroke_width"
316
+ ):
317
+ default_strokewidth = node.stroke_width
318
+ to_be_deleted.append(node)
319
+ result = stitch_geometries(geoms, stitch_tolerance)
320
+ if result is not None:
321
+ # print (f"Paths at start of action: {len(list(op.flat()))}")
322
+ for node in to_be_deleted:
323
+ node.remove_node()
324
+ for idx, g in enumerate(result):
325
+ node = op.add(
326
+ label=f"Stitch # {idx + 1}",
327
+ stroke=default_stroke,
328
+ stroke_width=default_strokewidth,
329
+ geometry=g,
330
+ type="elem path",
331
+ )
332
+ # print (f"Paths at start of action: {len(list(op.flat()))}")
333
+
247
334
  self.plan.append(op)
248
- if (op_type.startswith("op") or op_type.startswith("util")) and hasattr(op, "preprocess"):
335
+ if (op_type.startswith("op") or op_type.startswith("util")) and hasattr(
336
+ op, "preprocess"
337
+ ):
249
338
  op.preprocess(self.context, placement, self)
250
339
  if op_type.startswith("op"):
251
340
  for node in op.flat():
@@ -455,11 +544,15 @@ class CutPlan:
455
544
  """
456
545
  if not isinstance(last_item, CutCode):
457
546
  # The last plan item is not cutcode, merge is only between cutobjects adding to cutcode.
458
- self.channel (f"last_item is no cutcode ({type(last_item).__name__}), can't merge")
547
+ self.channel(
548
+ f"last_item is no cutcode ({type(last_item).__name__}), can't merge"
549
+ )
459
550
  return False
460
551
  if not isinstance(current_item, CutObject):
461
552
  # The object to be merged is not a cutObject and cannot be added to Cutcode.
462
- self.channel (f"current_item is no cutcode ({type(current_item).__name__}), can't merge")
553
+ self.channel(
554
+ f"current_item is no cutcode ({type(current_item).__name__}), can't merge"
555
+ )
463
556
  return False
464
557
  last_op = last_item.original_op
465
558
  if last_op is None:
@@ -468,7 +561,9 @@ class CutPlan:
468
561
  if current_op is None:
469
562
  current_op = ""
470
563
  if last_op.startswith("util") or current_op.startswith("util"):
471
- self.channel (f"{last_op} / {current_op} - at least one is a util operation, can't merge")
564
+ self.channel(
565
+ f"{last_op} / {current_op} - at least one is a util operation, can't merge"
566
+ )
472
567
  return False
473
568
 
474
569
  if (
@@ -476,7 +571,9 @@ class CutPlan:
476
571
  and last_item.pass_index != current_item.pass_index
477
572
  ):
478
573
  # Do not merge if opt_merge_passes is off, and pass_index do not match
479
- self.channel (f"{last_item.pass_index} / {current_item.pass_index} - pass indices are different, can't merge")
574
+ self.channel(
575
+ f"{last_item.pass_index} / {current_item.pass_index} - pass indices are different, can't merge"
576
+ )
480
577
  return False
481
578
 
482
579
  if (
@@ -485,11 +582,15 @@ class CutPlan:
485
582
  ):
486
583
  # Do not merge if opt_merge_ops is off, and the original ops do not match
487
584
  # Same settings object implies same original operation
488
- self.channel (f"Settings do differ from {last_op} to {current_op} and merge ops= {context.opt_merge_ops}")
585
+ self.channel(
586
+ f"Settings do differ from {last_op} to {current_op} and merge ops= {context.opt_merge_ops}"
587
+ )
489
588
  return False
490
589
  if not context.opt_inner_first and last_item.original_op == "op cut":
491
590
  # Do not merge if opt_inner_first is off, and operation was originally a cut.
492
- self.channel (f"Inner first {context.opt_inner_first}, last op= {last_item.original_op} - Last op was a cut, can't merge")
591
+ self.channel(
592
+ f"Inner first {context.opt_inner_first}, last op= {last_item.original_op} - Last op was a cut, can't merge"
593
+ )
493
594
  return False
494
595
  return True # No reason these should not be merged.
495
596
 
@@ -705,7 +806,9 @@ class CutPlan:
705
806
  # We don't combine across plan boundaries
706
807
  if not isinstance(pitem, CutGroup):
707
808
  continue
708
- grouping, to_be_deleted, item_combined, total = process_plan_item(pitem, busy, total, plan_idx, l_plan)
809
+ grouping, to_be_deleted, item_combined, total = process_plan_item(
810
+ pitem, busy, total, plan_idx, l_plan
811
+ )
709
812
  combined += item_combined
710
813
  group_count += len(grouping)
711
814
 
@@ -1096,7 +1199,9 @@ def is_inside(inner, outer, tolerance=0, resolution=50):
1096
1199
  return False
1097
1200
  return True
1098
1201
 
1099
- def scanbeam_code_not_working_reliably(outer_cut, outer_path, inner_cut, inner_path):
1202
+ def scanbeam_code_not_working_reliably(
1203
+ outer_cut, outer_path, inner_cut, inner_path
1204
+ ):
1100
1205
  from ..tools.geomstr import Polygon as Gpoly
1101
1206
  from ..tools.geomstr import Scanbeam
1102
1207
 
@@ -1127,24 +1232,25 @@ def is_inside(inner, outer, tolerance=0, resolution=50):
1127
1232
 
1128
1233
  p1x, p1y = poly[0]
1129
1234
  old_sq_dist = sq_length(p1x - x, p1y - y)
1130
- for i in range(n+1):
1235
+ for i in range(n + 1):
1131
1236
  p2x, p2y = poly[i % n]
1132
1237
  new_sq_dist = sq_length(p2x - x, p2y - y)
1133
1238
  # We are approximating the edge to an extremely thin ellipse and see
1134
1239
  # whether our point is on that ellipse
1135
1240
  reldist = (
1136
- old_sq_dist + new_sq_dist +
1137
- 2.0 * np.sqrt(old_sq_dist * new_sq_dist) -
1138
- sq_length(p2x - p1x, p2y - p1y)
1241
+ old_sq_dist
1242
+ + new_sq_dist
1243
+ + 2.0 * np.sqrt(old_sq_dist * new_sq_dist)
1244
+ - sq_length(p2x - p1x, p2y - p1y)
1139
1245
  )
1140
1246
  if reldist < tolerance_square:
1141
1247
  return True
1142
1248
 
1143
- if y > min(p1y,p2y):
1144
- if y <= max(p1y,p2y):
1145
- if x <= max(p1x,p2x):
1249
+ if y > min(p1y, p2y):
1250
+ if y <= max(p1y, p2y):
1251
+ if x <= max(p1x, p2x):
1146
1252
  if p1y != p2y:
1147
- xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
1253
+ xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
1148
1254
  if p1x == p2x or x <= xints:
1149
1255
  inside = not inside
1150
1256
  p1x, p1y = p2x, p2y
@@ -1163,20 +1269,20 @@ def is_inside(inner, outer, tolerance=0, resolution=50):
1163
1269
 
1164
1270
  # Separating Axis Theorem (SAT) for Polygon Containment
1165
1271
 
1166
- # The Separating Axis Theorem (SAT) is a powerful technique for collision detection
1272
+ # The Separating Axis Theorem (SAT) is a powerful technique for collision detection
1167
1273
  # between convex polygons. It can also be adapted to determine polygon containment.
1168
1274
 
1169
1275
  # How SAT Works:
1170
1276
 
1171
1277
  # Generate Axes: For each edge of the outer polygon, create a perpendicular axis.
1172
1278
  # Project Polygons: Project both the inner and outer polygons onto each axis.
1173
- # Check Overlap: If the projections of the inner polygon are completely contained
1174
- # within the projections of the outer polygon on all axes,
1279
+ # Check Overlap: If the projections of the inner polygon are completely contained
1280
+ # within the projections of the outer polygon on all axes,
1175
1281
  # then the inner polygon is fully contained.
1176
-
1177
- # Convex Polygons: SAT is most efficient for convex polygons.
1282
+
1283
+ # Convex Polygons: SAT is most efficient for convex polygons.
1178
1284
  # For concave polygons, you might need to decompose them into convex sub-polygons.
1179
- # Computational Cost: SAT can be computationally expensive for large numbers of polygons.
1285
+ # Computational Cost: SAT can be computationally expensive for large numbers of polygons.
1180
1286
  # In such cases, spatial indexing can be used to reduce the number of pairwise comparisons.
1181
1287
  def project_polygon(polygon, axis):
1182
1288
  # Projects a polygon onto a given axis.
@@ -1212,7 +1318,6 @@ def is_inside(inner, outer, tolerance=0, resolution=50):
1212
1318
  """
1213
1319
 
1214
1320
  def raycasting_code_new(outer_polygon, inner_polygon):
1215
-
1216
1321
  def precompute_intersections(polygon):
1217
1322
  slopes = []
1218
1323
  intercepts = []
@@ -1241,9 +1346,13 @@ def is_inside(inner, outer, tolerance=0, resolution=50):
1241
1346
  slope = slopes[i]
1242
1347
  intercept = intercepts[i]
1243
1348
  p1, p2 = polygon[i], polygon[(i + 1) % len(polygon)]
1244
-
1349
+
1245
1350
  if np.isnan(slope): # Vertical line
1246
- if x == intercept and y >= min(p1[1], p2[1]) and y <= max(p1[1], p2[1]):
1351
+ if (
1352
+ x == intercept
1353
+ and y >= min(p1[1], p2[1])
1354
+ and y <= max(p1[1], p2[1])
1355
+ ):
1247
1356
  inside = not inside
1248
1357
  else:
1249
1358
  if y > min(p1[1], p2[1]):
@@ -1253,17 +1362,21 @@ def is_inside(inner, outer, tolerance=0, resolution=50):
1253
1362
  xints = (y - intercept) / slope
1254
1363
  if p1[0] == p2[0] or x <= xints:
1255
1364
  inside = not inside
1256
-
1365
+
1257
1366
  return inside
1258
-
1367
+
1259
1368
  def is_polygon_inside(outer_polygon, inner_polygon):
1260
1369
  slopes, intercepts, is_vertical = precompute_intersections(outer_polygon)
1261
1370
  for point in inner_polygon:
1262
- if not point_in_polygon(point[0], point[1], slopes, intercepts, is_vertical, outer_polygon):
1371
+ if not point_in_polygon(
1372
+ point[0], point[1], slopes, intercepts, is_vertical, outer_polygon
1373
+ ):
1263
1374
  return False
1264
1375
  return True
1265
1376
 
1266
- return is_polygon_inside(outer_polygon=outer_polygon, inner_polygon=inner_polygon)
1377
+ return is_polygon_inside(
1378
+ outer_polygon=outer_polygon, inner_polygon=inner_polygon
1379
+ )
1267
1380
 
1268
1381
  def shapely_code(outer_polygon, inner_polygon):
1269
1382
  from shapely.geometry import Polygon
@@ -1273,15 +1386,18 @@ def is_inside(inner, outer, tolerance=0, resolution=50):
1273
1386
  poly_b = Polygon(outer_polygon)
1274
1387
  # Check for containment
1275
1388
  return poly_a.within(poly_b)
1276
-
1389
+
1277
1390
  @lru_cache(maxsize=128)
1278
1391
  def get_polygon(path, resolution):
1279
1392
  geom = Geomstr.svg(path)
1280
1393
  polygon = np.array(
1281
- list((p.real, p.imag) for p in geom.as_equal_interpolated_points(distance = resolution))
1394
+ list(
1395
+ (p.real, p.imag)
1396
+ for p in geom.as_equal_interpolated_points(distance=resolution)
1397
+ )
1282
1398
  )
1283
1399
  return polygon
1284
-
1400
+
1285
1401
  """
1286
1402
  # Testroutines
1287
1403
  from time import perf_counter
@@ -1304,13 +1420,14 @@ def is_inside(inner, outer, tolerance=0, resolution=50):
1304
1420
  t5 = perf_counter()
1305
1421
  except ImportError:
1306
1422
  res4 = "Shapely missing"
1307
- t5 = t4
1423
+ t5 = t4
1308
1424
  print (f"Tolerance: {tolerance}, vm={res0} in {t1 - t0:.3f}s, sb={res1} in {t1 - t0:.3f}s, ray-old={res2} in {t2 - t1:.3f}s, ray-new={res3} in {t3 - t2:.3f}s, shapely={res4} in {t4 - t3:.3f}s")
1309
1425
  """
1310
1426
  inner_polygon = get_polygon(inner_path.d(), resolution)
1311
- outer_polygon = get_polygon(outer_path.d(), resolution)
1427
+ outer_polygon = get_polygon(outer_path.d(), resolution)
1312
1428
  try:
1313
1429
  import shapely
1430
+
1314
1431
  return shapely_code(outer_polygon, inner_polygon)
1315
1432
  except ImportError:
1316
1433
  return vm_code(outer, outer_polygon, inner, inner_polygon)
@@ -1319,6 +1436,7 @@ def is_inside(inner, outer, tolerance=0, resolution=50):
1319
1436
  # return vm_code(outer, outer_path, inner, inner_path)
1320
1437
  return
1321
1438
 
1439
+
1322
1440
  def reify_matrix(self):
1323
1441
  """Apply the matrix to the path and reset matrix."""
1324
1442
  self.element = abs(self.element)
@@ -1398,13 +1516,15 @@ def inner_first_ident(context: CutGroup, kernel=None, channel=None, tolerance=0)
1398
1516
  if kernel:
1399
1517
  busy = kernel.busyinfo
1400
1518
  _ = kernel.translation
1401
- min_res = min(kernel.device.view.native_scale_x, kernel.device.view.native_scale_y)
1519
+ min_res = min(
1520
+ kernel.device.view.native_scale_x, kernel.device.view.native_scale_y
1521
+ )
1402
1522
  # a 0.5 mm resolution is enough
1403
1523
  resolution = int(0.5 * UNITS_PER_MM / min_res)
1404
1524
  # print(f"Chosen resolution: {resolution} - minscale = {min_res}")
1405
1525
  else:
1406
1526
  busy = None
1407
- resolution = 10
1527
+ resolution = 10
1408
1528
  for outer in closed_groups:
1409
1529
  for inner in groups:
1410
1530
  current_pass += 1
@@ -1627,7 +1747,13 @@ def short_travel_cutcode(
1627
1747
  for idx, c in enumerate(unordered):
1628
1748
  if isinstance(c, CutGroup):
1629
1749
  c.skip = False
1630
- unordered[idx] = short_travel_cutcode(context=c, kernel=kernel, complete_path=False, grouped_inner=False, channel=channel)
1750
+ unordered[idx] = short_travel_cutcode(
1751
+ context=c,
1752
+ kernel=kernel,
1753
+ complete_path=False,
1754
+ grouped_inner=False,
1755
+ channel=channel,
1756
+ )
1631
1757
  # As these are reversed, we reverse again...
1632
1758
  ordered.extend(reversed(unordered))
1633
1759
  # print (f"And after extension {len(ordered)} items in list")