foodforthought-cli 0.2.8__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. ate/__init__.py +6 -0
  2. ate/__main__.py +16 -0
  3. ate/auth/__init__.py +1 -0
  4. ate/auth/device_flow.py +141 -0
  5. ate/auth/token_store.py +96 -0
  6. ate/behaviors/__init__.py +12 -0
  7. ate/behaviors/approach.py +399 -0
  8. ate/cli.py +855 -4551
  9. ate/client.py +90 -0
  10. ate/commands/__init__.py +168 -0
  11. ate/commands/auth.py +389 -0
  12. ate/commands/bridge.py +448 -0
  13. ate/commands/data.py +185 -0
  14. ate/commands/deps.py +111 -0
  15. ate/commands/generate.py +384 -0
  16. ate/commands/memory.py +907 -0
  17. ate/commands/parts.py +166 -0
  18. ate/commands/primitive.py +399 -0
  19. ate/commands/protocol.py +288 -0
  20. ate/commands/recording.py +524 -0
  21. ate/commands/repo.py +154 -0
  22. ate/commands/simulation.py +291 -0
  23. ate/commands/skill.py +303 -0
  24. ate/commands/skills.py +487 -0
  25. ate/commands/team.py +147 -0
  26. ate/commands/workflow.py +271 -0
  27. ate/detection/__init__.py +38 -0
  28. ate/detection/base.py +142 -0
  29. ate/detection/color_detector.py +399 -0
  30. ate/detection/trash_detector.py +322 -0
  31. ate/drivers/__init__.py +18 -6
  32. ate/drivers/ble_transport.py +405 -0
  33. ate/drivers/mechdog.py +360 -24
  34. ate/drivers/wifi_camera.py +477 -0
  35. ate/interfaces/__init__.py +16 -0
  36. ate/interfaces/base.py +2 -0
  37. ate/interfaces/sensors.py +247 -0
  38. ate/llm_proxy.py +239 -0
  39. ate/memory/__init__.py +35 -0
  40. ate/memory/cloud.py +244 -0
  41. ate/memory/context.py +269 -0
  42. ate/memory/embeddings.py +184 -0
  43. ate/memory/export.py +26 -0
  44. ate/memory/merge.py +146 -0
  45. ate/memory/migrate/__init__.py +34 -0
  46. ate/memory/migrate/base.py +89 -0
  47. ate/memory/migrate/pipeline.py +189 -0
  48. ate/memory/migrate/sources/__init__.py +13 -0
  49. ate/memory/migrate/sources/chroma.py +170 -0
  50. ate/memory/migrate/sources/pinecone.py +120 -0
  51. ate/memory/migrate/sources/qdrant.py +110 -0
  52. ate/memory/migrate/sources/weaviate.py +160 -0
  53. ate/memory/reranker.py +353 -0
  54. ate/memory/search.py +26 -0
  55. ate/memory/store.py +548 -0
  56. ate/recording/__init__.py +42 -3
  57. ate/recording/session.py +12 -2
  58. ate/recording/visual.py +416 -0
  59. ate/robot/__init__.py +142 -0
  60. ate/robot/agentic_servo.py +856 -0
  61. ate/robot/behaviors.py +493 -0
  62. ate/robot/ble_capture.py +1000 -0
  63. ate/robot/ble_enumerate.py +506 -0
  64. ate/robot/calibration.py +88 -3
  65. ate/robot/calibration_state.py +388 -0
  66. ate/robot/commands.py +143 -11
  67. ate/robot/direction_calibration.py +554 -0
  68. ate/robot/discovery.py +104 -2
  69. ate/robot/llm_system_id.py +654 -0
  70. ate/robot/locomotion_calibration.py +508 -0
  71. ate/robot/marker_generator.py +611 -0
  72. ate/robot/perception.py +502 -0
  73. ate/robot/primitives.py +614 -0
  74. ate/robot/profiles.py +6 -0
  75. ate/robot/registry.py +5 -2
  76. ate/robot/servo_mapper.py +1153 -0
  77. ate/robot/skill_upload.py +285 -3
  78. ate/robot/target_calibration.py +500 -0
  79. ate/robot/teach.py +515 -0
  80. ate/robot/types.py +242 -0
  81. ate/robot/visual_labeler.py +9 -0
  82. ate/robot/visual_servo_loop.py +494 -0
  83. ate/robot/visual_servoing.py +570 -0
  84. ate/robot/visual_system_id.py +906 -0
  85. ate/transports/__init__.py +121 -0
  86. ate/transports/base.py +394 -0
  87. ate/transports/ble.py +405 -0
  88. ate/transports/hybrid.py +444 -0
  89. ate/transports/serial.py +345 -0
  90. ate/urdf/__init__.py +30 -0
  91. ate/urdf/capture.py +582 -0
  92. ate/urdf/cloud.py +491 -0
  93. ate/urdf/collision.py +271 -0
  94. ate/urdf/commands.py +708 -0
  95. ate/urdf/depth.py +360 -0
  96. ate/urdf/inertial.py +312 -0
  97. ate/urdf/kinematics.py +330 -0
  98. ate/urdf/lifting.py +415 -0
  99. ate/urdf/meshing.py +300 -0
  100. ate/urdf/models/__init__.py +110 -0
  101. ate/urdf/models/depth_anything.py +253 -0
  102. ate/urdf/models/sam2.py +324 -0
  103. ate/urdf/motion_analysis.py +396 -0
  104. ate/urdf/pipeline.py +468 -0
  105. ate/urdf/scale.py +256 -0
  106. ate/urdf/scan_session.py +411 -0
  107. ate/urdf/segmentation.py +299 -0
  108. ate/urdf/synthesis.py +319 -0
  109. ate/urdf/topology.py +336 -0
  110. ate/urdf/validation.py +371 -0
  111. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/METADATA +1 -1
  112. foodforthought_cli-0.3.0.dist-info/RECORD +166 -0
  113. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/WHEEL +1 -1
  114. foodforthought_cli-0.2.8.dist-info/RECORD +0 -73
  115. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/entry_points.txt +0 -0
  116. {foodforthought_cli-0.2.8.dist-info → foodforthought_cli-0.3.0.dist-info}/top_level.txt +0 -0
ate/robot/teach.py ADDED
@@ -0,0 +1,515 @@
1
+ """
2
+ Fast Teaching Mode for Robot Skill Development.
3
+
4
+ Provides a real-time, keyboard-driven interface for:
5
+ 1. Live webcam preview
6
+ 2. Quick servo adjustment with visual feedback
7
+ 3. One-key pose saving
8
+ 4. Immediate playback testing
9
+
10
+ Usage:
11
+ ate robot teach --port /dev/cu.usbserial-10 --name mechdog
12
+
13
+ Controls:
14
+ ↑/↓ Select servo
15
+ ←/→ Decrease/increase servo value
16
+ SHIFT Fine adjustment (±10 instead of ±50)
17
+ SPACE Move to current position
18
+ S Save current position as pose
19
+ A Create action from saved poses
20
+ P Playback last action
21
+ R Reset to center positions
22
+ Q Quit and save
23
+ """
24
+
25
+ import sys
26
+ import time
27
+ import json
28
+ import threading
29
+ from pathlib import Path
30
+ from typing import Dict, List, Optional, Tuple
31
+ from dataclasses import dataclass, field
32
+ from datetime import datetime
33
+
34
+ try:
35
+ import cv2
36
+ HAS_OPENCV = True
37
+ except ImportError:
38
+ HAS_OPENCV = False
39
+
40
+ from .calibration import (
41
+ RobotCalibration,
42
+ ServoCalibration,
43
+ Pose,
44
+ JointType,
45
+ load_calibration,
46
+ save_calibration,
47
+ )
48
+ from .visual_labeler import (
49
+ SkillLibrary,
50
+ Action,
51
+ ActionStep,
52
+ ActionType,
53
+ load_skill_library,
54
+ save_skill_library,
55
+ ROBOT_PRESETS,
56
+ apply_preset_to_calibration,
57
+ )
58
+
59
+
60
+ @dataclass
61
+ class TeachingSession:
62
+ """State for a teaching session."""
63
+ robot_name: str
64
+ calibration: RobotCalibration
65
+ library: SkillLibrary
66
+
67
+ # Current state
68
+ selected_servo_idx: int = 0
69
+ servo_ids: List[int] = field(default_factory=list)
70
+ current_positions: Dict[int, int] = field(default_factory=dict)
71
+
72
+ # Recorded poses for action creation
73
+ recorded_poses: List[str] = field(default_factory=list)
74
+
75
+ # Display
76
+ window_name: str = "Robot Teaching Mode"
77
+
78
+ def __post_init__(self):
79
+ self.servo_ids = sorted(self.calibration.servos.keys())
80
+ # Initialize current positions to center
81
+ for sid in self.servo_ids:
82
+ servo = self.calibration.servos[sid]
83
+ self.current_positions[sid] = servo.center_value
84
+
85
+
86
+ class TeachingInterface:
87
+ """
88
+ Real-time teaching interface with live preview.
89
+
90
+ Uses OpenCV for display and keyboard handling.
91
+ """
92
+
93
+ def __init__(
94
+ self,
95
+ serial_port: str,
96
+ robot_name: str = "robot",
97
+ robot_model: str = "unknown",
98
+ webcam_id: int = 0,
99
+ camera_url: Optional[str] = None,
100
+ ):
101
+ if not HAS_OPENCV:
102
+ raise RuntimeError("OpenCV required. Install with: pip install opencv-python")
103
+
104
+ self.serial_port = serial_port
105
+ self.robot_name = robot_name
106
+ self.robot_model = robot_model
107
+ self.webcam_id = webcam_id
108
+ self.camera_url = camera_url
109
+
110
+ # Serial connection
111
+ self.serial = None
112
+ self._connect_serial()
113
+
114
+ # Camera
115
+ self.webcam = None
116
+ self.frame = None
117
+ self.frame_lock = threading.Lock()
118
+
119
+ # Load or create calibration
120
+ self.calibration = load_calibration(robot_name)
121
+ if not self.calibration:
122
+ self.calibration = RobotCalibration(
123
+ robot_model=robot_model,
124
+ robot_name=robot_name,
125
+ serial_port=serial_port,
126
+ )
127
+ # Apply preset if available
128
+ preset = ROBOT_PRESETS.get(robot_model)
129
+ if preset:
130
+ apply_preset_to_calibration(self.calibration, preset)
131
+ print(f"Applied {preset['name']} preset")
132
+
133
+ # Load or create skill library
134
+ self.library = load_skill_library(robot_name)
135
+ if not self.library:
136
+ self.library = SkillLibrary(
137
+ robot_name=robot_name,
138
+ robot_model=robot_model,
139
+ )
140
+
141
+ # Create session
142
+ self.session = TeachingSession(
143
+ robot_name=robot_name,
144
+ calibration=self.calibration,
145
+ library=self.library,
146
+ )
147
+
148
+ # Discover servos if none defined
149
+ if not self.session.servo_ids:
150
+ self._discover_servos()
151
+
152
+ def _connect_serial(self):
153
+ """Connect to robot via serial."""
154
+ try:
155
+ import serial
156
+ self.serial = serial.Serial(self.serial_port, 115200, timeout=1)
157
+ time.sleep(0.5) # Wait for connection
158
+ # Clear any pending data
159
+ self.serial.reset_input_buffer()
160
+ except Exception as e:
161
+ print(f"Serial connection failed: {e}")
162
+ self.serial = None
163
+
164
+ def _send_command(self, cmd: str) -> Optional[str]:
165
+ """Send command to robot and get response."""
166
+ if not self.serial:
167
+ return None
168
+ try:
169
+ self.serial.write(f"{cmd}\n".encode())
170
+ self.serial.flush()
171
+ time.sleep(0.05)
172
+ response = ""
173
+ while self.serial.in_waiting:
174
+ response += self.serial.read(self.serial.in_waiting).decode('utf-8', errors='ignore')
175
+ time.sleep(0.01)
176
+ return response.strip()
177
+ except Exception as e:
178
+ print(f"Command failed: {e}")
179
+ return None
180
+
181
+ def _discover_servos(self):
182
+ """Discover connected servos."""
183
+ print("Discovering servos...")
184
+ found = []
185
+ for sid in range(1, 16):
186
+ response = self._send_command(f"servo_read({sid})")
187
+ if response and "error" not in response.lower():
188
+ try:
189
+ # Try to parse position
190
+ pos = int(response.split("=")[-1].strip().rstrip(")"))
191
+ found.append(sid)
192
+ self.session.current_positions[sid] = pos
193
+
194
+ # Add to calibration if not present
195
+ if sid not in self.calibration.servos:
196
+ self.calibration.servos[sid] = ServoCalibration(
197
+ servo_id=sid,
198
+ name=f"servo_{sid}",
199
+ joint_type=JointType.UNKNOWN,
200
+ )
201
+ except:
202
+ pass
203
+
204
+ self.session.servo_ids = sorted(found)
205
+ print(f"Found {len(found)} servos: {found}")
206
+
207
+ def _move_servo(self, servo_id: int, position: int, time_ms: int = 200):
208
+ """Move a servo to position."""
209
+ # Clamp to limits
210
+ servo = self.calibration.servos.get(servo_id)
211
+ if servo:
212
+ position = max(servo.min_value, min(servo.max_value, position))
213
+ else:
214
+ position = max(200, min(2500, position))
215
+
216
+ self._send_command(f"servo_move({servo_id}, {position}, {time_ms})")
217
+ self.session.current_positions[servo_id] = position
218
+ return position
219
+
220
+ def _start_webcam(self):
221
+ """Start webcam capture in background thread."""
222
+ def capture_loop():
223
+ cap = cv2.VideoCapture(self.webcam_id)
224
+ cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
225
+ cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
226
+
227
+ while self.webcam is not None:
228
+ ret, frame = cap.read()
229
+ if ret:
230
+ with self.frame_lock:
231
+ self.frame = frame
232
+ time.sleep(0.033) # ~30 fps
233
+
234
+ cap.release()
235
+
236
+ self.webcam = threading.Thread(target=capture_loop, daemon=True)
237
+ self.webcam.start()
238
+
239
+ def _get_frame(self) -> Optional[any]:
240
+ """Get current webcam frame."""
241
+ with self.frame_lock:
242
+ return self.frame.copy() if self.frame is not None else None
243
+
244
+ def _draw_overlay(self, frame) -> any:
245
+ """Draw servo info overlay on frame."""
246
+ if frame is None:
247
+ # Create blank frame
248
+ frame = 255 * (cv2.imread.__module__ and 1) # placeholder
249
+ return frame
250
+
251
+ h, w = frame.shape[:2]
252
+
253
+ # Semi-transparent overlay on right side
254
+ overlay = frame.copy()
255
+ cv2.rectangle(overlay, (w - 250, 0), (w, h), (40, 40, 40), -1)
256
+ frame = cv2.addWeighted(overlay, 0.7, frame, 0.3, 0)
257
+
258
+ # Title
259
+ cv2.putText(frame, "TEACH MODE", (w - 240, 30),
260
+ cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 255), 2)
261
+
262
+ # Servo list
263
+ y = 70
264
+ for i, sid in enumerate(self.session.servo_ids):
265
+ servo = self.calibration.servos.get(sid)
266
+ name = servo.name if servo else f"servo_{sid}"
267
+ pos = self.session.current_positions.get(sid, 1500)
268
+
269
+ # Highlight selected
270
+ is_selected = (i == self.session.selected_servo_idx)
271
+ color = (0, 255, 0) if is_selected else (200, 200, 200)
272
+ marker = ">" if is_selected else " "
273
+
274
+ # Draw servo info
275
+ cv2.putText(frame, f"{marker} {name[:12]}", (w - 240, y),
276
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 1)
277
+
278
+ # Draw position bar
279
+ if servo:
280
+ pct = (pos - servo.min_value) / max(1, servo.max_value - servo.min_value)
281
+ else:
282
+ pct = (pos - 500) / 2000
283
+ bar_w = int(80 * pct)
284
+ cv2.rectangle(frame, (w - 100, y - 10), (w - 20, y), (60, 60, 60), -1)
285
+ cv2.rectangle(frame, (w - 100, y - 10), (w - 100 + bar_w, y), color, -1)
286
+ cv2.putText(frame, str(pos), (w - 100, y + 15),
287
+ cv2.FONT_HERSHEY_SIMPLEX, 0.4, color, 1)
288
+
289
+ y += 45
290
+
291
+ # Controls hint
292
+ cv2.putText(frame, "[Arrows] Move", (w - 240, h - 100),
293
+ cv2.FONT_HERSHEY_SIMPLEX, 0.4, (150, 150, 150), 1)
294
+ cv2.putText(frame, "[S] Save pose", (w - 240, h - 80),
295
+ cv2.FONT_HERSHEY_SIMPLEX, 0.4, (150, 150, 150), 1)
296
+ cv2.putText(frame, "[P] Playback", (w - 240, h - 60),
297
+ cv2.FONT_HERSHEY_SIMPLEX, 0.4, (150, 150, 150), 1)
298
+ cv2.putText(frame, "[Q] Quit", (w - 240, h - 40),
299
+ cv2.FONT_HERSHEY_SIMPLEX, 0.4, (150, 150, 150), 1)
300
+
301
+ # Recorded poses
302
+ if self.session.recorded_poses:
303
+ cv2.putText(frame, f"Recorded: {len(self.session.recorded_poses)}", (10, h - 20),
304
+ cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
305
+
306
+ return frame
307
+
308
+ def _save_pose(self, name: Optional[str] = None):
309
+ """Save current position as a named pose."""
310
+ if name is None:
311
+ # Generate name
312
+ existing = len(self.calibration.poses)
313
+ name = f"pose_{existing + 1}"
314
+
315
+ pose = Pose(
316
+ name=name,
317
+ description="",
318
+ servo_positions=dict(self.session.current_positions),
319
+ transition_time_ms=500,
320
+ )
321
+ self.calibration.poses[name] = pose
322
+ self.session.recorded_poses.append(name)
323
+
324
+ # Save calibration
325
+ save_calibration(self.calibration)
326
+
327
+ print(f"Saved pose: {name}")
328
+ return name
329
+
330
+ def _apply_pose(self, pose_name: str, time_ms: int = 500):
331
+ """Apply a saved pose."""
332
+ pose = self.calibration.poses.get(pose_name)
333
+ if not pose:
334
+ print(f"Pose not found: {pose_name}")
335
+ return
336
+
337
+ for sid, pos in pose.servo_positions.items():
338
+ self._move_servo(sid, pos, time_ms)
339
+
340
+ time.sleep(time_ms / 1000 + 0.1)
341
+
342
+ def _create_action(self, name: str) -> Optional[Action]:
343
+ """Create an action from recorded poses."""
344
+ if not self.session.recorded_poses:
345
+ print("No poses recorded")
346
+ return None
347
+
348
+ steps = []
349
+ for pose_name in self.session.recorded_poses:
350
+ steps.append(ActionStep(
351
+ pose_name=pose_name,
352
+ duration_ms=500,
353
+ wait_after_ms=200,
354
+ ))
355
+
356
+ action = Action(
357
+ name=name,
358
+ action_type=ActionType.MANIPULATION,
359
+ description=f"Action with {len(steps)} steps",
360
+ steps=steps,
361
+ )
362
+
363
+ self.library.actions[name] = action
364
+ save_skill_library(self.library)
365
+
366
+ print(f"Created action: {name} ({len(steps)} steps)")
367
+ return action
368
+
369
+ def _playback_action(self, action_name: str):
370
+ """Playback an action."""
371
+ action = self.library.actions.get(action_name)
372
+ if not action:
373
+ print(f"Action not found: {action_name}")
374
+ return
375
+
376
+ print(f"Playing: {action_name}")
377
+ for step in action.steps:
378
+ self._apply_pose(step.pose_name, step.duration_ms)
379
+ if step.wait_after_ms > 0:
380
+ time.sleep(step.wait_after_ms / 1000)
381
+
382
+ def run(self):
383
+ """Run the teaching interface."""
384
+ print("\n=== TEACHING MODE ===")
385
+ print("Controls:")
386
+ print(" ↑/↓ Select servo")
387
+ print(" ←/→ Adjust position")
388
+ print(" S Save pose")
389
+ print(" A Create action from poses")
390
+ print(" P Playback last action")
391
+ print(" C Clear recorded poses")
392
+ print(" Q Quit and save")
393
+ print()
394
+
395
+ # Start webcam
396
+ self._start_webcam()
397
+ time.sleep(0.5)
398
+
399
+ # Create window
400
+ cv2.namedWindow(self.session.window_name, cv2.WINDOW_NORMAL)
401
+ cv2.resizeWindow(self.session.window_name, 900, 600)
402
+
403
+ step_size = 50 # Default step size
404
+ last_action = None
405
+
406
+ try:
407
+ while True:
408
+ # Get and display frame
409
+ frame = self._get_frame()
410
+ if frame is not None:
411
+ display = self._draw_overlay(frame)
412
+ cv2.imshow(self.session.window_name, display)
413
+
414
+ # Handle keyboard
415
+ key = cv2.waitKey(30) & 0xFF
416
+
417
+ if key == ord('q') or key == 27: # Q or ESC
418
+ break
419
+
420
+ elif key == 82 or key == 0: # Up arrow
421
+ self.session.selected_servo_idx = max(0, self.session.selected_servo_idx - 1)
422
+
423
+ elif key == 84 or key == 1: # Down arrow
424
+ self.session.selected_servo_idx = min(
425
+ len(self.session.servo_ids) - 1,
426
+ self.session.selected_servo_idx + 1
427
+ )
428
+
429
+ elif key == 81 or key == 2: # Left arrow
430
+ if self.session.servo_ids:
431
+ sid = self.session.servo_ids[self.session.selected_servo_idx]
432
+ pos = self.session.current_positions.get(sid, 1500)
433
+ self._move_servo(sid, pos - step_size)
434
+
435
+ elif key == 83 or key == 3: # Right arrow
436
+ if self.session.servo_ids:
437
+ sid = self.session.servo_ids[self.session.selected_servo_idx]
438
+ pos = self.session.current_positions.get(sid, 1500)
439
+ self._move_servo(sid, pos + step_size)
440
+
441
+ elif key == ord('s'): # Save pose
442
+ name = self._save_pose()
443
+ print(f"Pose saved: {name}")
444
+
445
+ elif key == ord('a'): # Create action
446
+ if self.session.recorded_poses:
447
+ action_name = f"action_{len(self.library.actions) + 1}"
448
+ self._create_action(action_name)
449
+ last_action = action_name
450
+ self.session.recorded_poses = [] # Clear for next action
451
+ else:
452
+ print("No poses recorded. Press S to save poses first.")
453
+
454
+ elif key == ord('p'): # Playback
455
+ if last_action:
456
+ self._playback_action(last_action)
457
+ elif self.library.actions:
458
+ last_action = list(self.library.actions.keys())[-1]
459
+ self._playback_action(last_action)
460
+ else:
461
+ print("No actions to play")
462
+
463
+ elif key == ord('c'): # Clear recorded
464
+ self.session.recorded_poses = []
465
+ print("Cleared recorded poses")
466
+
467
+ elif key == ord('r'): # Reset to center
468
+ for sid in self.session.servo_ids:
469
+ servo = self.calibration.servos.get(sid)
470
+ center = servo.center_value if servo else 1500
471
+ self._move_servo(sid, center)
472
+ print("Reset to center positions")
473
+
474
+ elif key == ord('+') or key == ord('='):
475
+ step_size = min(200, step_size + 10)
476
+ print(f"Step size: {step_size}")
477
+
478
+ elif key == ord('-'):
479
+ step_size = max(10, step_size - 10)
480
+ print(f"Step size: {step_size}")
481
+
482
+ finally:
483
+ # Cleanup
484
+ self.webcam = None # Stop capture thread
485
+ cv2.destroyAllWindows()
486
+
487
+ if self.serial:
488
+ self.serial.close()
489
+
490
+ # Final save
491
+ save_calibration(self.calibration)
492
+ save_skill_library(self.library)
493
+ print("\nSession saved.")
494
+
495
+
496
+ def teach_command(
497
+ port: str,
498
+ name: str = "robot",
499
+ robot_type: str = "hiwonder_mechdog",
500
+ webcam_id: int = 0,
501
+ camera_url: Optional[str] = None,
502
+ ):
503
+ """
504
+ Run the teaching interface.
505
+
506
+ ate robot teach --port /dev/cu.usbserial-10 --name mechdog
507
+ """
508
+ interface = TeachingInterface(
509
+ serial_port=port,
510
+ robot_name=name,
511
+ robot_model=robot_type,
512
+ webcam_id=webcam_id,
513
+ camera_url=camera_url,
514
+ )
515
+ interface.run()