opentrons 8.7.0a5__py3-none-any.whl → 8.7.0a7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentrons might be problematic. Click here for more details.

Files changed (144) hide show
  1. opentrons/_version.py +2 -2
  2. opentrons/drivers/asyncio/communication/serial_connection.py +129 -52
  3. opentrons/drivers/heater_shaker/abstract.py +5 -0
  4. opentrons/drivers/heater_shaker/driver.py +10 -0
  5. opentrons/drivers/heater_shaker/simulator.py +4 -0
  6. opentrons/drivers/thermocycler/abstract.py +6 -0
  7. opentrons/drivers/thermocycler/driver.py +61 -10
  8. opentrons/drivers/thermocycler/simulator.py +6 -0
  9. opentrons/hardware_control/api.py +24 -5
  10. opentrons/hardware_control/backends/controller.py +8 -2
  11. opentrons/hardware_control/backends/ot3controller.py +3 -0
  12. opentrons/hardware_control/backends/ot3simulator.py +2 -1
  13. opentrons/hardware_control/backends/simulator.py +2 -1
  14. opentrons/hardware_control/backends/subsystem_manager.py +5 -2
  15. opentrons/hardware_control/emulation/abstract_emulator.py +6 -4
  16. opentrons/hardware_control/emulation/connection_handler.py +8 -5
  17. opentrons/hardware_control/emulation/heater_shaker.py +12 -3
  18. opentrons/hardware_control/emulation/settings.py +1 -1
  19. opentrons/hardware_control/emulation/thermocycler.py +67 -15
  20. opentrons/hardware_control/module_control.py +82 -8
  21. opentrons/hardware_control/modules/__init__.py +3 -0
  22. opentrons/hardware_control/modules/absorbance_reader.py +11 -4
  23. opentrons/hardware_control/modules/flex_stacker.py +38 -9
  24. opentrons/hardware_control/modules/heater_shaker.py +42 -5
  25. opentrons/hardware_control/modules/magdeck.py +8 -4
  26. opentrons/hardware_control/modules/mod_abc.py +13 -5
  27. opentrons/hardware_control/modules/tempdeck.py +25 -5
  28. opentrons/hardware_control/modules/thermocycler.py +68 -11
  29. opentrons/hardware_control/modules/types.py +20 -1
  30. opentrons/hardware_control/modules/utils.py +11 -4
  31. opentrons/hardware_control/nozzle_manager.py +3 -0
  32. opentrons/hardware_control/ot3api.py +26 -5
  33. opentrons/hardware_control/poller.py +22 -8
  34. opentrons/hardware_control/scripts/update_module_fw.py +5 -0
  35. opentrons/hardware_control/types.py +31 -2
  36. opentrons/legacy_commands/module_commands.py +23 -0
  37. opentrons/legacy_commands/protocol_commands.py +20 -0
  38. opentrons/legacy_commands/types.py +80 -0
  39. opentrons/motion_planning/deck_conflict.py +17 -12
  40. opentrons/motion_planning/waypoints.py +15 -29
  41. opentrons/protocol_api/__init__.py +5 -1
  42. opentrons/protocol_api/_types.py +6 -1
  43. opentrons/protocol_api/core/common.py +3 -1
  44. opentrons/protocol_api/core/engine/_default_labware_versions.py +32 -11
  45. opentrons/protocol_api/core/engine/labware.py +8 -1
  46. opentrons/protocol_api/core/engine/module_core.py +75 -8
  47. opentrons/protocol_api/core/engine/protocol.py +18 -1
  48. opentrons/protocol_api/core/engine/tasks.py +48 -0
  49. opentrons/protocol_api/core/engine/well.py +8 -0
  50. opentrons/protocol_api/core/legacy/legacy_module_core.py +24 -4
  51. opentrons/protocol_api/core/legacy/legacy_protocol_core.py +11 -1
  52. opentrons/protocol_api/core/legacy/legacy_well_core.py +4 -0
  53. opentrons/protocol_api/core/legacy/tasks.py +19 -0
  54. opentrons/protocol_api/core/legacy_simulator/legacy_protocol_core.py +14 -2
  55. opentrons/protocol_api/core/legacy_simulator/tasks.py +19 -0
  56. opentrons/protocol_api/core/module.py +37 -4
  57. opentrons/protocol_api/core/protocol.py +11 -2
  58. opentrons/protocol_api/core/tasks.py +31 -0
  59. opentrons/protocol_api/core/well.py +4 -0
  60. opentrons/protocol_api/labware.py +5 -0
  61. opentrons/protocol_api/module_contexts.py +117 -11
  62. opentrons/protocol_api/protocol_context.py +26 -4
  63. opentrons/protocol_api/robot_context.py +38 -21
  64. opentrons/protocol_api/tasks.py +48 -0
  65. opentrons/protocol_api/validation.py +6 -1
  66. opentrons/protocol_engine/actions/__init__.py +4 -2
  67. opentrons/protocol_engine/actions/actions.py +22 -9
  68. opentrons/protocol_engine/clients/sync_client.py +42 -7
  69. opentrons/protocol_engine/commands/__init__.py +42 -0
  70. opentrons/protocol_engine/commands/absorbance_reader/close_lid.py +2 -15
  71. opentrons/protocol_engine/commands/absorbance_reader/open_lid.py +2 -15
  72. opentrons/protocol_engine/commands/aspirate.py +1 -0
  73. opentrons/protocol_engine/commands/command.py +1 -0
  74. opentrons/protocol_engine/commands/command_unions.py +49 -0
  75. opentrons/protocol_engine/commands/create_timer.py +83 -0
  76. opentrons/protocol_engine/commands/dispense.py +1 -0
  77. opentrons/protocol_engine/commands/drop_tip.py +32 -8
  78. opentrons/protocol_engine/commands/heater_shaker/__init__.py +14 -0
  79. opentrons/protocol_engine/commands/heater_shaker/common.py +20 -0
  80. opentrons/protocol_engine/commands/heater_shaker/set_and_wait_for_shake_speed.py +5 -4
  81. opentrons/protocol_engine/commands/heater_shaker/set_shake_speed.py +136 -0
  82. opentrons/protocol_engine/commands/heater_shaker/set_target_temperature.py +31 -5
  83. opentrons/protocol_engine/commands/movement_common.py +2 -0
  84. opentrons/protocol_engine/commands/pick_up_tip.py +21 -11
  85. opentrons/protocol_engine/commands/set_tip_state.py +97 -0
  86. opentrons/protocol_engine/commands/temperature_module/set_target_temperature.py +38 -7
  87. opentrons/protocol_engine/commands/thermocycler/__init__.py +16 -0
  88. opentrons/protocol_engine/commands/thermocycler/run_extended_profile.py +6 -0
  89. opentrons/protocol_engine/commands/thermocycler/run_profile.py +8 -0
  90. opentrons/protocol_engine/commands/thermocycler/set_target_block_temperature.py +40 -6
  91. opentrons/protocol_engine/commands/thermocycler/set_target_lid_temperature.py +29 -5
  92. opentrons/protocol_engine/commands/thermocycler/start_run_extended_profile.py +191 -0
  93. opentrons/protocol_engine/commands/touch_tip.py +1 -1
  94. opentrons/protocol_engine/commands/unsafe/unsafe_place_labware.py +6 -22
  95. opentrons/protocol_engine/commands/wait_for_tasks.py +98 -0
  96. opentrons/protocol_engine/errors/__init__.py +4 -0
  97. opentrons/protocol_engine/errors/exceptions.py +55 -0
  98. opentrons/protocol_engine/execution/__init__.py +2 -0
  99. opentrons/protocol_engine/execution/command_executor.py +8 -0
  100. opentrons/protocol_engine/execution/create_queue_worker.py +5 -1
  101. opentrons/protocol_engine/execution/labware_movement.py +9 -12
  102. opentrons/protocol_engine/execution/movement.py +2 -0
  103. opentrons/protocol_engine/execution/queue_worker.py +4 -0
  104. opentrons/protocol_engine/execution/run_control.py +8 -0
  105. opentrons/protocol_engine/execution/task_handler.py +157 -0
  106. opentrons/protocol_engine/protocol_engine.py +75 -34
  107. opentrons/protocol_engine/resources/__init__.py +2 -0
  108. opentrons/protocol_engine/resources/concurrency_provider.py +27 -0
  109. opentrons/protocol_engine/resources/deck_configuration_provider.py +7 -0
  110. opentrons/protocol_engine/resources/labware_validation.py +10 -6
  111. opentrons/protocol_engine/state/_well_math.py +60 -18
  112. opentrons/protocol_engine/state/addressable_areas.py +2 -0
  113. opentrons/protocol_engine/state/commands.py +14 -11
  114. opentrons/protocol_engine/state/geometry.py +213 -374
  115. opentrons/protocol_engine/state/labware.py +52 -102
  116. opentrons/protocol_engine/state/labware_origin_math/errors.py +94 -0
  117. opentrons/protocol_engine/state/labware_origin_math/stackup_origin_to_labware_origin.py +1331 -0
  118. opentrons/protocol_engine/state/module_substates/thermocycler_module_substate.py +37 -0
  119. opentrons/protocol_engine/state/modules.py +21 -8
  120. opentrons/protocol_engine/state/motion.py +44 -0
  121. opentrons/protocol_engine/state/state.py +14 -0
  122. opentrons/protocol_engine/state/state_summary.py +2 -0
  123. opentrons/protocol_engine/state/tasks.py +139 -0
  124. opentrons/protocol_engine/state/tips.py +177 -258
  125. opentrons/protocol_engine/state/update_types.py +16 -9
  126. opentrons/protocol_engine/types/__init__.py +9 -3
  127. opentrons/protocol_engine/types/deck_configuration.py +5 -1
  128. opentrons/protocol_engine/types/instrument.py +8 -1
  129. opentrons/protocol_engine/types/labware.py +1 -13
  130. opentrons/protocol_engine/types/module.py +10 -0
  131. opentrons/protocol_engine/types/tasks.py +38 -0
  132. opentrons/protocol_engine/types/tip.py +9 -0
  133. opentrons/protocol_runner/create_simulating_orchestrator.py +29 -2
  134. opentrons/protocol_runner/run_orchestrator.py +18 -2
  135. opentrons/protocols/api_support/definitions.py +1 -1
  136. opentrons/protocols/api_support/types.py +2 -1
  137. opentrons/simulate.py +48 -15
  138. opentrons/system/camera.py +1 -1
  139. {opentrons-8.7.0a5.dist-info → opentrons-8.7.0a7.dist-info}/METADATA +4 -4
  140. {opentrons-8.7.0a5.dist-info → opentrons-8.7.0a7.dist-info}/RECORD +143 -127
  141. opentrons/protocol_engine/state/_labware_origin_math.py +0 -636
  142. {opentrons-8.7.0a5.dist-info → opentrons-8.7.0a7.dist-info}/WHEEL +0 -0
  143. {opentrons-8.7.0a5.dist-info → opentrons-8.7.0a7.dist-info}/entry_points.txt +0 -0
  144. {opentrons-8.7.0a5.dist-info → opentrons-8.7.0a7.dist-info}/licenses/LICENSE +0 -0
@@ -83,6 +83,7 @@ class MovementHandler:
83
83
  minimum_z_height: Optional[float] = None,
84
84
  speed: Optional[float] = None,
85
85
  operation_volume: Optional[float] = None,
86
+ offset_pipette_for_reservoir_subwells: bool = False,
86
87
  ) -> Point:
87
88
  """Move to a specific well."""
88
89
  self._state_store.geometry.raise_if_labware_inaccessible_by_pipette(
@@ -143,6 +144,7 @@ class MovementHandler:
143
144
  force_direct=force_direct,
144
145
  minimum_z_height=minimum_z_height,
145
146
  operation_volume=operation_volume,
147
+ offset_pipette_for_reservoir_subwells=offset_pipette_for_reservoir_subwells,
146
148
  )
147
149
 
148
150
  speed = self._state_store.pipettes.get_movement_speed(
@@ -51,6 +51,7 @@ class QueueWorker:
51
51
  """
52
52
  if self._worker_task:
53
53
  self._worker_task.cancel()
54
+ self._command_executor.cancel_tasks("Engine cancelled")
54
55
 
55
56
  async def join(self) -> None:
56
57
  """Wait for the worker to finish, propagating any errors."""
@@ -65,7 +66,10 @@ class QueueWorker:
65
66
  pass
66
67
  except Exception as e:
67
68
  log.error("Unhandled exception in QueueWorker job", exc_info=e)
69
+ self._command_executor.cancel_tasks("Engine failed")
68
70
  raise e
71
+ else:
72
+ self._command_executor.cancel_tasks("Engine commands complete")
69
73
 
70
74
  async def _run_commands(self) -> None:
71
75
  async for command_id in self._command_generator():
@@ -31,3 +31,11 @@ class RunControlHandler:
31
31
  """Delay protocol execution for a duration."""
32
32
  if not self._state_store.config.ignore_pause:
33
33
  await asyncio.sleep(seconds)
34
+
35
+ async def wait_for_tasks(self, tasks: list[str]) -> None:
36
+ """Wait for concurrent tasks to complete."""
37
+ await self._state_store.wait_for(
38
+ condition=lambda: self._state_store.tasks.all_tasks_finished_or_any_task_failed(
39
+ task_ids=tasks
40
+ )
41
+ )
@@ -0,0 +1,157 @@
1
+ """Task handling."""
2
+
3
+ from __future__ import annotations
4
+ import logging
5
+ from typing import Protocol, AsyncIterator
6
+ from ..state.state import StateStore
7
+ from ..resources import ModelUtils, ConcurrencyProvider
8
+ from ..types import Task
9
+ import asyncio
10
+ import contextlib
11
+ from ..actions import ActionDispatcher, FinishTaskAction, StartTaskAction
12
+ from ..errors import ErrorOccurrence
13
+ from opentrons_shared_data.errors.exceptions import EnumeratedError, PythonException
14
+
15
+ log = logging.getLogger(__name__)
16
+
17
+
18
+ class TaskFunction(Protocol):
19
+ """The function run inside a task protocol."""
20
+
21
+ async def __call__(self, task_handler: TaskHandler) -> None:
22
+ """The function called inside a task."""
23
+ ...
24
+
25
+
26
+ class TaskHandler:
27
+ """Implementation logic for fast concurrency."""
28
+
29
+ _state_store: StateStore
30
+ _model_utils: ModelUtils
31
+ _concurrency_provider: ConcurrencyProvider
32
+
33
+ def __init__(
34
+ self,
35
+ state_store: StateStore,
36
+ action_dispatcher: ActionDispatcher,
37
+ model_utils: ModelUtils | None = None,
38
+ concurrency_provider: ConcurrencyProvider | None = None,
39
+ ) -> None:
40
+ """Initialize a TaskHandler instance."""
41
+ self._state_store = state_store
42
+ self._model_utils = model_utils or ModelUtils()
43
+ self._concurrency_provider = concurrency_provider or ConcurrencyProvider()
44
+ self._action_dispatcher = action_dispatcher
45
+
46
+ async def create_task(
47
+ self, task_function: TaskFunction, id: str | None = None
48
+ ) -> Task:
49
+ """Create a task and immediately schedules it."""
50
+ task_id = self._model_utils.ensure_id(id)
51
+ asyncio_task = asyncio.create_task(
52
+ task_function(task_handler=self), name=f"engine-task-{task_id}"
53
+ )
54
+
55
+ def _done_callback(task: asyncio.Task[None]) -> None:
56
+ try:
57
+ maybe_exception = task.exception()
58
+ except asyncio.CancelledError as e:
59
+ maybe_exception = e
60
+ if isinstance(maybe_exception, EnumeratedError):
61
+ occurence: ErrorOccurrence | None = ErrorOccurrence.from_failed(
62
+ id=self._model_utils.generate_id(),
63
+ createdAt=self._model_utils.get_timestamp(),
64
+ error=maybe_exception,
65
+ )
66
+ elif isinstance(maybe_exception, BaseException):
67
+ occurence = ErrorOccurrence.from_failed(
68
+ id=self._model_utils.generate_id(),
69
+ createdAt=self._model_utils.get_timestamp(),
70
+ error=PythonException(maybe_exception),
71
+ )
72
+ else:
73
+ occurence = None
74
+ try:
75
+ self._action_dispatcher.dispatch(
76
+ FinishTaskAction(
77
+ task_id=task_id,
78
+ finished_at=self._model_utils.get_timestamp(),
79
+ error=occurence,
80
+ ),
81
+ )
82
+ except BaseException:
83
+ log.exception("Exception in task finish dispatch.")
84
+
85
+ asyncio_task.add_done_callback(_done_callback)
86
+ task = Task(
87
+ id=task_id,
88
+ createdAt=self._model_utils.get_timestamp(),
89
+ asyncioTask=asyncio_task,
90
+ )
91
+ self._action_dispatcher.dispatch(StartTaskAction(task))
92
+ return task
93
+
94
+ @staticmethod
95
+ def _empty_queue(
96
+ queue: "asyncio.Queue[asyncio.Task[None]]", this_task: asyncio.Task[None]
97
+ ) -> None:
98
+ """Empties the queue."""
99
+ try:
100
+ while True:
101
+ task = queue.get_nowait()
102
+ if task is this_task:
103
+ break
104
+ except asyncio.QueueEmpty:
105
+ pass
106
+
107
+ @contextlib.asynccontextmanager
108
+ async def synchronize_cancel_latest(self, group_id: str) -> AsyncIterator[None]:
109
+ """Cancel current task."""
110
+ lock = self._concurrency_provider.lock_for_group(group_id)
111
+ if lock.locked():
112
+ raise asyncio.CancelledError()
113
+ async with lock:
114
+ yield
115
+
116
+ @contextlib.asynccontextmanager
117
+ async def synchronize_cancel_previous(self, group_id: str) -> AsyncIterator[None]:
118
+ """Cancel previous run."""
119
+ queue = self._concurrency_provider.queue_for_group(group_id)
120
+ while not queue.empty():
121
+ task = queue.get_nowait()
122
+ task.cancel()
123
+ this_task = asyncio.current_task()
124
+ assert this_task is not None
125
+ queue.put_nowait(this_task)
126
+ try:
127
+ yield
128
+ except asyncio.CancelledError:
129
+ raise
130
+ except BaseException:
131
+ self._empty_queue(queue, this_task)
132
+ raise
133
+ else:
134
+ self._empty_queue(queue, this_task)
135
+
136
+ @contextlib.asynccontextmanager
137
+ async def synchronize_sequential(self, group_id: str) -> AsyncIterator[None]:
138
+ """Run tasks one after the other."""
139
+ lock = self._concurrency_provider.lock_for_group(group_id)
140
+ async with lock:
141
+ yield
142
+
143
+ @contextlib.asynccontextmanager
144
+ async def synchronize_concurrent(self, group_id: str) -> AsyncIterator[None]:
145
+ """Run a list of tasks at the same time."""
146
+ yield
147
+
148
+ def cancel_all(self, message: str | None = None) -> None:
149
+ """Cancel all asyncio tasks immediately.
150
+
151
+ Do not call this more than once synchronously because
152
+ that could lead to tasks cancelling more than once.
153
+ It can be called if there are no current tasks. In that case
154
+ nothing will happen.
155
+ """
156
+ for task in self._state_store.tasks.get_all_current():
157
+ task.asyncioTask.cancel(msg=message)
@@ -59,7 +59,6 @@ from .actions import (
59
59
  AddAddressableAreaAction,
60
60
  AddModuleAction,
61
61
  HardwareStoppedAction,
62
- ResetTipsAction,
63
62
  SetPipetteMovementSpeedAction,
64
63
  )
65
64
 
@@ -322,24 +321,10 @@ class ProtocolEngine:
322
321
  )
323
322
  return completed_command
324
323
 
325
- def estop(self) -> None:
326
- """Signal to the engine that an E-stop event occurred.
327
-
328
- If an estop happens while the robot is moving, lower layers physically stop
329
- motion and raise the event as an exception, which fails the Protocol Engine
330
- command. No action from the `ProtocolEngine` caller is needed to handle that.
331
-
332
- However, if an estop happens in between commands, or in the middle of
333
- a command like `comment` or `waitForDuration` that doesn't access the hardware,
334
- `ProtocolEngine` needs to be told about it so it can interrupt the command
335
- and stop executing any more. This method is how to do that.
336
-
337
- This acts roughly like `request_stop()`. After calling this, you should call
338
- `finish()` with an EStopActivatedError.
339
- """
324
+ def _stop_from_asynchronous_error(self) -> None:
340
325
  try:
341
326
  action = self._state_store.commands.validate_action_allowed(
342
- StopAction(from_estop=True)
327
+ StopAction(from_asynchronous_error=True)
343
328
  )
344
329
  except Exception: # todo(mm, 2024-04-16): Catch a more specific type.
345
330
  # This is likely called from some hardware API callback that doesn't care
@@ -358,9 +343,79 @@ class ProtocolEngine:
358
343
  # do this because we want to make sure non-hardware commands, like
359
344
  # `waitForDuration`, are also interrupted.
360
345
  self._get_queue_worker.cancel()
346
+
347
+ def estop(self) -> None:
348
+ """Signal to the engine that an E-stop event occurred.
349
+
350
+ If an estop happens while the robot is moving, lower layers physically stop
351
+ motion and raise the event as an exception, which fails the Protocol Engine
352
+ command. No action from the `ProtocolEngine` caller is needed to handle that.
353
+
354
+ However, if an estop happens in between commands, or in the middle of
355
+ a command like `comment` or `waitForDuration` that doesn't access the hardware,
356
+ `ProtocolEngine` needs to be told about it so it can interrupt the command
357
+ and stop executing any more. This method is how to do that.
358
+
359
+ This acts roughly like `request_stop()`. After calling this, you should call
360
+ `finish()` with an EStopActivatedError.
361
+ """
361
362
  # Unlike self.request_stop(), we don't need to do
362
363
  # self._hardware_api.cancel_execution_and_running_tasks(). Since this was an
363
364
  # E-stop event, the hardware API already knows.
365
+ self._stop_from_asynchronous_error()
366
+
367
+ async def async_module_error(
368
+ self, module_model: ModuleModel, serial: str | None
369
+ ) -> bool:
370
+ """Signal to the engine that an asynchronous module error occured.
371
+
372
+ The return value of this function signals whether the error is relevant to the protocol
373
+ or not. If the function returns True, the error is relevant. The engine will stop, and
374
+ the caller should call `finish()` with the error object that signaled the error. If
375
+ the function returns False, the error is not relevant. The engine will not stop, and the
376
+ caller should not call `finish()`.
377
+
378
+ Asynchronous module errors are signaled when a module enters a hardware error state
379
+ - for instance, a thermocycler's thermistors fail because of condensation, or a
380
+ heater-shaker's wires fray and snap, or a module is accidentally disconnected. These
381
+ errors are not related to a particular command, even a currently-happening module
382
+ control command for the module in the error state.
383
+
384
+ Similar to an estop error, the error can occur at any time relative to the lifecycle
385
+ of the engine run or of any particular command.
386
+
387
+ Unlike an estop, the motion control hardware will not be raising an error and will not
388
+ stop on its own; the stop action derived from this call will do that.
389
+ """
390
+ if not self._state_store.modules.get_has_module_probably_matching_hardware_details(
391
+ module_model, serial
392
+ ):
393
+ return False
394
+
395
+ if self._state_store.commands.get_is_terminal():
396
+ # Do not stop multiple times; it will be common for this action to fire
397
+ # many times when a module enters an error state, and we don't want to do
398
+ # the stop behavior over and over
399
+ return False
400
+
401
+ self._stop_from_asynchronous_error()
402
+ # like self.request_stop, and unlike self.estop(), we must explicitly request that the
403
+ # hardware stops execution, since not all asynchronous errors will cause the hardware
404
+ # to know that it should stop.
405
+ await self._do_hardware_stop()
406
+ return True
407
+
408
+ async def _do_hardware_stop(self) -> None:
409
+ """Make the hardware stop now."""
410
+ if self._hardware_api.is_movement_execution_taskified():
411
+ # We 'taskify' hardware controller movement functions when running protocols
412
+ # that are not backed by the engine. Such runs cannot be stopped by cancelling
413
+ # the queue worker and hence need to be stopped via the execution manager.
414
+ # `cancel_execution_and_running_tasks()` sets the execution manager in a CANCELLED state
415
+ # and cancels the running tasks, which raises an error and gets us out of the
416
+ # run function execution, just like `_queue_worker.cancel()` does for
417
+ # engine-backed runs.
418
+ await self._hardware_api.cancel_execution_and_running_tasks()
364
419
 
365
420
  async def request_stop(self) -> None:
366
421
  """Make command execution stop soon.
@@ -378,15 +433,7 @@ class ProtocolEngine:
378
433
  action = self._state_store.commands.validate_action_allowed(StopAction())
379
434
  self._action_dispatcher.dispatch(action)
380
435
  self._get_queue_worker.cancel()
381
- if self._hardware_api.is_movement_execution_taskified():
382
- # We 'taskify' hardware controller movement functions when running protocols
383
- # that are not backed by the engine. Such runs cannot be stopped by cancelling
384
- # the queue worker and hence need to be stopped via the execution manager.
385
- # `cancel_execution_and_running_tasks()` sets the execution manager in a CANCELLED state
386
- # and cancels the running tasks, which raises an error and gets us out of the
387
- # run function execution, just like `_queue_worker.cancel()` does for
388
- # engine-backed runs.
389
- await self._hardware_api.cancel_execution_and_running_tasks()
436
+ await self._do_hardware_stop()
390
437
 
391
438
  async def wait_until_complete(self) -> None:
392
439
  """Wait until there are no more commands to execute.
@@ -429,13 +476,13 @@ class ProtocolEngine:
429
476
  post_run_hardware_state: The state in which to leave the gantry and motors in
430
477
  after the run is over.
431
478
  """
432
- if self._state_store.commands.get_is_stopped_by_estop():
479
+ if self._state_store.commands.get_is_stopped_by_async_error():
433
480
  # This handles the case where the E-stop was pressed while we were *not* in the middle
434
481
  # of some hardware interaction that would raise it as an exception. For example, imagine
435
482
  # we were paused between two commands, or imagine we were executing a waitForDuration.
436
483
  drop_tips_after_run = False
437
484
  post_run_hardware_state = PostRunHardwareState.DISENGAGE_IN_PLACE
438
- if error is None:
485
+ if error is None and self._state_store.commands.get_error() is None:
439
486
  error = EStopActivatedError()
440
487
 
441
488
  if error:
@@ -586,12 +633,6 @@ class ProtocolEngine:
586
633
  AddAddressableAreaAction(addressable_area_name)
587
634
  )
588
635
 
589
- def reset_tips(self, labware_id: str) -> None:
590
- """Reset the tip state of a given labware."""
591
- # TODO(mm, 2023-03-10): Safely raise an error if the given labware isn't a
592
- # tip rack?
593
- self._action_dispatcher.dispatch(ResetTipsAction(labware_id=labware_id))
594
-
595
636
  # TODO(mm, 2022-11-10): This is a method on ProtocolEngine instead of a command
596
637
  # as a quick hack to support Python protocols. We should consider making this a
597
638
  # command, or adding speed parameters to existing commands.
@@ -11,6 +11,7 @@ from .labware_data_provider import LabwareDataProvider
11
11
  from .module_data_provider import ModuleDataProvider
12
12
  from .file_provider import FileProvider
13
13
  from .ot3_validation import ensure_ot3_hardware
14
+ from .concurrency_provider import ConcurrencyProvider
14
15
 
15
16
 
16
17
  __all__ = [
@@ -18,6 +19,7 @@ __all__ = [
18
19
  "LabwareDataProvider",
19
20
  "DeckDataProvider",
20
21
  "DeckFixedLabware",
22
+ "ConcurrencyProvider",
21
23
  "ModuleDataProvider",
22
24
  "FileProvider",
23
25
  "ensure_ot3_hardware",
@@ -0,0 +1,27 @@
1
+ """Concurrency primitives providers."""
2
+ import asyncio
3
+
4
+
5
+ class ConcurrencyProvider:
6
+ """Concurrency primitives for engine tasks."""
7
+
8
+ def __init__(self) -> None:
9
+ """Build a concurrency provider."""
10
+ self._locks: dict[str, asyncio.Lock] = {}
11
+ self._queues: dict[str, "asyncio.Queue[asyncio.Task[None]]"] = {}
12
+
13
+ def lock_for_group(self, group_id: str) -> asyncio.Lock:
14
+ """Returns the lock for specified group id."""
15
+ try:
16
+ return self._locks[group_id]
17
+ except KeyError:
18
+ self._locks[group_id] = asyncio.Lock()
19
+ return self._locks[group_id]
20
+
21
+ def queue_for_group(self, group_id: str) -> "asyncio.Queue[asyncio.Task[None]]":
22
+ """Returns the queue for specified group id."""
23
+ try:
24
+ return self._queues[group_id]
25
+ except KeyError:
26
+ self._queues[group_id] = asyncio.Queue()
27
+ return self._queues[group_id]
@@ -2,6 +2,7 @@
2
2
 
3
3
  from typing import List, Set, Tuple
4
4
 
5
+ from opentrons_shared_data.module.types import ModuleOrientation
5
6
  from opentrons_shared_data.deck.types import (
6
7
  DeckDefinitionV5,
7
8
  CutoutFixture,
@@ -124,6 +125,11 @@ def get_addressable_area_from_name(
124
125
  z=addressable_area["boundingBox"]["zDimension"],
125
126
  )
126
127
  features = addressable_area["features"]
128
+ orientation = (
129
+ addressable_area["orientation"]
130
+ if addressable_area["orientation"]
131
+ else ModuleOrientation.NOT_APPLICABLE
132
+ )
127
133
  mating_surface_unit_vector = addressable_area.get("matingSurfaceUnitVector")
128
134
 
129
135
  return AddressableArea(
@@ -138,6 +144,7 @@ def get_addressable_area_from_name(
138
144
  "compatibleModuleTypes", []
139
145
  ),
140
146
  features=features,
147
+ orientation=orientation,
141
148
  )
142
149
  raise AddressableAreaDoesNotExistError(
143
150
  f"Could not find addressable area with name {addressable_area_name}"
@@ -2,6 +2,7 @@
2
2
 
3
3
  from opentrons_shared_data.labware.labware_definition import (
4
4
  LabwareDefinition,
5
+ LabwareDefinition2,
5
6
  LabwareRole,
6
7
  )
7
8
 
@@ -44,15 +45,18 @@ def validate_definition_is_system(definition: LabwareDefinition) -> bool:
44
45
  return LabwareRole.system in definition.allowedRoles
45
46
 
46
47
 
47
- def validate_labware_can_be_stacked(
48
- top_labware_definition: LabwareDefinition, below_labware_load_name: str
48
+ def validate_legacy_labware_can_be_stacked(
49
+ child_labware_definition: LabwareDefinition2, parent_labware_load_name: str
49
50
  ) -> bool:
50
- """Validate that the labware being loaded onto is in the above labware's stackingOffsetWithLabware definition."""
51
+ """Validate that the parent labware is in the child labware's stackingOffsetWithLabware definition.
52
+
53
+ Schema 3 Labware stacking validation is handled in locating features.
54
+ """
51
55
  return (
52
- below_labware_load_name in top_labware_definition.stackingOffsetWithLabware
56
+ parent_labware_load_name in child_labware_definition.stackingOffsetWithLabware
53
57
  or (
54
- "default" in top_labware_definition.stackingOffsetWithLabware
55
- and top_labware_definition.compatibleParentLabware is None
58
+ "default" in child_labware_definition.stackingOffsetWithLabware
59
+ and child_labware_definition.compatibleParentLabware is None
56
60
  )
57
61
  )
58
62
 
@@ -17,13 +17,47 @@ def wells_covered_by_pipette_configuration(
17
17
  """Compute the wells covered by a pipette nozzle configuration."""
18
18
  if len(labware_wells_by_column) >= 12 and len(labware_wells_by_column[0]) >= 8:
19
19
  yield from wells_covered_dense(
20
- nozzle_map,
20
+ nozzle_map.columns,
21
+ nozzle_map.rows,
22
+ nozzle_map.starting_nozzle,
21
23
  target_well,
22
24
  labware_wells_by_column,
23
25
  )
24
26
  elif len(labware_wells_by_column) < 12 and len(labware_wells_by_column[0]) < 8:
25
27
  yield from wells_covered_sparse(
26
- nozzle_map, target_well, labware_wells_by_column
28
+ nozzle_map.columns,
29
+ nozzle_map.rows,
30
+ nozzle_map.starting_nozzle,
31
+ target_well,
32
+ labware_wells_by_column,
33
+ )
34
+ else:
35
+ raise InvalidStoredData(
36
+ "Labware of non-SBS and non-reservoir format cannot be handled"
37
+ )
38
+
39
+
40
+ def wells_covered_by_physical_pipette(
41
+ nozzle_map: NozzleMap,
42
+ target_well: str,
43
+ labware_wells_by_column: list[list[str]],
44
+ ) -> Iterator[str]:
45
+ """Compute the wells covered by a pipette nozzle configuration."""
46
+ if len(labware_wells_by_column) >= 12 and len(labware_wells_by_column[0]) >= 8:
47
+ yield from wells_covered_dense(
48
+ nozzle_map.full_instrument_columns,
49
+ nozzle_map.full_instrument_rows,
50
+ nozzle_map.starting_nozzle,
51
+ target_well,
52
+ labware_wells_by_column,
53
+ )
54
+ elif len(labware_wells_by_column) < 12 and len(labware_wells_by_column[0]) < 8:
55
+ yield from wells_covered_sparse(
56
+ nozzle_map.full_instrument_columns,
57
+ nozzle_map.full_instrument_rows,
58
+ nozzle_map.starting_nozzle,
59
+ target_well,
60
+ labware_wells_by_column,
27
61
  )
28
62
  else:
29
63
  raise InvalidStoredData(
@@ -42,7 +76,11 @@ def row_col_ordinals_from_column_major_map(
42
76
 
43
77
 
44
78
  def wells_covered_dense( # noqa: C901
45
- nozzle_map: NozzleMap, target_well: str, target_wells_by_column: list[list[str]]
79
+ columns: dict[str, list[str]],
80
+ rows: dict[str, list[str]],
81
+ starting_nozzle: str,
82
+ target_well: str,
83
+ target_wells_by_column: list[list[str]],
46
84
  ) -> Iterator[str]:
47
85
  """Get the list of wells covered by a nozzle map on an SBS format labware with a specified multiplier of 96 into the number of wells.
48
86
 
@@ -66,11 +104,11 @@ def wells_covered_dense( # noqa: C901
66
104
  "This labware cannot be used with wells_covered_dense() because it is less dense than an SBS 96 standard"
67
105
  )
68
106
 
69
- for nozzle_column in range(len(nozzle_map.columns)):
107
+ for nozzle_column in range(len(columns)):
70
108
  target_column_offset = nozzle_column * column_downsample
71
- for nozzle_row in range(len(nozzle_map.rows)):
109
+ for nozzle_row in range(len(rows)):
72
110
  target_row_offset = nozzle_row * row_downsample
73
- if nozzle_map.starting_nozzle == "A1":
111
+ if starting_nozzle == "A1":
74
112
  if (
75
113
  target_column_index + target_column_offset
76
114
  < len(target_wells_by_column)
@@ -81,7 +119,7 @@ def wells_covered_dense( # noqa: C901
81
119
  yield target_wells_by_column[
82
120
  target_column_index + target_column_offset
83
121
  ][target_row_index + target_row_offset]
84
- elif nozzle_map.starting_nozzle == "A12":
122
+ elif starting_nozzle == "A12":
85
123
  if (target_column_index - target_column_offset >= 0) and (
86
124
  target_row_index + target_row_offset
87
125
  < len(target_wells_by_column[target_column_index])
@@ -89,7 +127,7 @@ def wells_covered_dense( # noqa: C901
89
127
  yield target_wells_by_column[
90
128
  target_column_index - target_column_offset
91
129
  ][target_row_index + target_row_offset]
92
- elif nozzle_map.starting_nozzle == "H1":
130
+ elif starting_nozzle == "H1":
93
131
  if (
94
132
  target_column_index + target_column_offset
95
133
  < len(target_wells_by_column)
@@ -97,7 +135,7 @@ def wells_covered_dense( # noqa: C901
97
135
  yield target_wells_by_column[
98
136
  target_column_index + target_column_offset
99
137
  ][target_row_index - target_row_offset]
100
- elif nozzle_map.starting_nozzle == "H12":
138
+ elif starting_nozzle == "H12":
101
139
  if (target_column_index - target_column_offset >= 0) and (
102
140
  target_row_index - target_row_offset >= 0
103
141
  ):
@@ -106,12 +144,16 @@ def wells_covered_dense( # noqa: C901
106
144
  ][target_row_index - target_row_offset]
107
145
  else:
108
146
  raise InvalidProtocolData(
109
- f"A pipette nozzle configuration may not having a starting nozzle of {nozzle_map.starting_nozzle}"
147
+ f"A pipette nozzle configuration may not having a starting nozzle of {starting_nozzle}"
110
148
  )
111
149
 
112
150
 
113
151
  def wells_covered_sparse( # noqa: C901
114
- nozzle_map: NozzleMap, target_well: str, target_wells_by_column: list[list[str]]
152
+ columns: dict[str, list[str]],
153
+ rows: dict[str, list[str]],
154
+ starting_nozzle: str,
155
+ target_well: str,
156
+ target_wells_by_column: list[list[str]],
115
157
  ) -> Iterator[str]:
116
158
  """Get the list of wells covered by a nozzle map on a column-oriented reservoir.
117
159
 
@@ -128,9 +170,9 @@ def wells_covered_sparse( # noqa: C901
128
170
  raise InvalidStoredData(
129
171
  "This labware cannot be used with wells_covered_sparse() because it is more dense than an SBS 96 standard."
130
172
  )
131
- for nozzle_column in range(max(1, len(nozzle_map.columns) // column_upsample)):
132
- for nozzle_row in range(max(1, len(nozzle_map.rows) // row_upsample)):
133
- if nozzle_map.starting_nozzle == "A1":
173
+ for nozzle_column in range(max(1, len(columns) // column_upsample)):
174
+ for nozzle_row in range(max(1, len(rows) // row_upsample)):
175
+ if starting_nozzle == "A1":
134
176
  if (
135
177
  target_column_index + nozzle_column < len(target_wells_by_column)
136
178
  ) and (
@@ -140,7 +182,7 @@ def wells_covered_sparse( # noqa: C901
140
182
  yield target_wells_by_column[target_column_index + nozzle_column][
141
183
  target_row_index + nozzle_row
142
184
  ]
143
- elif nozzle_map.starting_nozzle == "A12":
185
+ elif starting_nozzle == "A12":
144
186
  if (target_column_index - nozzle_column >= 0) and (
145
187
  target_row_index + nozzle_row
146
188
  < len(target_wells_by_column[target_column_index])
@@ -148,7 +190,7 @@ def wells_covered_sparse( # noqa: C901
148
190
  yield target_wells_by_column[target_column_index - nozzle_column][
149
191
  target_row_index + nozzle_row
150
192
  ]
151
- elif nozzle_map.starting_nozzle == "H1":
193
+ elif starting_nozzle == "H1":
152
194
  if (
153
195
  target_column_index + nozzle_column
154
196
  < len(target_wells_by_column[target_column_index])
@@ -156,7 +198,7 @@ def wells_covered_sparse( # noqa: C901
156
198
  yield target_wells_by_column[target_column_index + nozzle_column][
157
199
  target_row_index - nozzle_row
158
200
  ]
159
- elif nozzle_map.starting_nozzle == "H12":
201
+ elif starting_nozzle == "H12":
160
202
  if (target_column_index - nozzle_column >= 0) and (
161
203
  target_row_index - nozzle_row >= 0
162
204
  ):
@@ -165,7 +207,7 @@ def wells_covered_sparse( # noqa: C901
165
207
  ]
166
208
  else:
167
209
  raise InvalidProtocolData(
168
- f"A pipette nozzle configuration may not having a starting nozzle of {nozzle_map.starting_nozzle}"
210
+ f"A pipette nozzle configuration may not having a starting nozzle of {starting_nozzle}"
169
211
  )
170
212
 
171
213
 
@@ -5,6 +5,7 @@ from functools import cached_property
5
5
  from typing import Dict, List, Optional, Set
6
6
 
7
7
  from opentrons_shared_data.robot.types import RobotType, RobotDefinition
8
+ from opentrons_shared_data.module.types import ModuleOrientation
8
9
  from opentrons_shared_data.deck.types import (
9
10
  DeckDefinitionV5,
10
11
  SlotDefV3,
@@ -614,6 +615,7 @@ class AddressableAreaView:
614
615
  "displayName": addressable_area.display_name,
615
616
  "compatibleModuleTypes": addressable_area.compatible_module_types,
616
617
  "features": addressable_area.features,
618
+ "orientation": ModuleOrientation.NOT_APPLICABLE,
617
619
  }
618
620
 
619
621
  def get_deck_slot_definitions(self) -> Dict[str, SlotDefV3]: