opentrons 8.4.0a4__py2.py3-none-any.whl → 8.4.0a6__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentrons might be problematic. Click here for more details.

@@ -29,7 +29,7 @@ from ..state.update_types import StateUpdate
29
29
  from ..errors.exceptions import PipetteNotReadyToAspirateError
30
30
  from opentrons.hardware_control import HardwareControlAPI
31
31
  from ..state.update_types import CLEAR
32
- from ..types import CurrentWell, DeckPoint
32
+ from ..types import DeckPoint
33
33
 
34
34
  if TYPE_CHECKING:
35
35
  from ..execution import PipettingHandler, GantryMover, MovementHandler
@@ -104,11 +104,8 @@ class AspirateWhileTrackingImplementation(
104
104
  " The first aspirate following a blow-out must be from a specific well"
105
105
  " so the plunger can be reset in a known safe position."
106
106
  )
107
-
108
- current_position = await self._gantry_mover.get_position(params.pipetteId)
109
- current_location = self._state_view.pipettes.get_current_location()
110
-
111
107
  state_update = StateUpdate()
108
+
112
109
  move_result = await move_to_well(
113
110
  movement=self._movement,
114
111
  model_utils=self._model_utils,
@@ -132,9 +129,9 @@ class AspirateWhileTrackingImplementation(
132
129
  flow_rate=params.flowRate,
133
130
  location_if_error={
134
131
  "retryLocation": (
135
- current_position.x,
136
- current_position.y,
137
- current_position.z,
132
+ move_result.public.position.x,
133
+ move_result.public.position.y,
134
+ move_result.public.position.z,
138
135
  )
139
136
  },
140
137
  command_note_adder=self._command_note_adder,
@@ -150,58 +147,40 @@ class AspirateWhileTrackingImplementation(
150
147
  z=position_after_aspirate.z,
151
148
  )
152
149
  if isinstance(aspirate_result, DefinedErrorData):
153
- if (
154
- isinstance(current_location, CurrentWell)
155
- and current_location.pipette_id == params.pipetteId
156
- ):
157
- return DefinedErrorData(
158
- public=aspirate_result.public,
159
- state_update=aspirate_result.state_update.set_liquid_operated(
160
- labware_id=current_location.labware_id,
161
- well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well(
162
- current_location.labware_id,
163
- current_location.well_name,
164
- params.pipetteId,
165
- ),
166
- volume_added=CLEAR,
167
- ),
168
- state_update_if_false_positive=aspirate_result.state_update_if_false_positive,
169
- )
170
- else:
171
- return aspirate_result
172
- else:
173
- if (
174
- isinstance(current_location, CurrentWell)
175
- and current_location.pipette_id == params.pipetteId
176
- ):
177
- return SuccessData(
178
- public=AspirateWhileTrackingResult(
179
- volume=aspirate_result.public.volume,
180
- position=result_deck_point,
181
- ),
182
- state_update=aspirate_result.state_update.set_liquid_operated(
183
- labware_id=current_location.labware_id,
184
- well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well(
185
- current_location.labware_id,
186
- current_location.well_name,
187
- params.pipetteId,
188
- ),
189
- volume_added=-aspirate_result.public.volume
190
- * self._state_view.geometry.get_nozzles_per_well(
191
- current_location.labware_id,
192
- current_location.well_name,
193
- params.pipetteId,
194
- ),
195
- ),
196
- )
197
- else:
198
- return SuccessData(
199
- public=AspirateWhileTrackingResult(
200
- volume=aspirate_result.public.volume,
201
- position=result_deck_point,
150
+ return DefinedErrorData(
151
+ public=aspirate_result.public,
152
+ state_update=aspirate_result.state_update.set_liquid_operated(
153
+ labware_id=params.labwareId,
154
+ well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well(
155
+ params.labwareId,
156
+ params.wellName,
157
+ params.pipetteId,
202
158
  ),
203
- state_update=aspirate_result.state_update,
204
- )
159
+ volume_added=CLEAR,
160
+ ),
161
+ state_update_if_false_positive=aspirate_result.state_update_if_false_positive,
162
+ )
163
+
164
+ return SuccessData(
165
+ public=AspirateWhileTrackingResult(
166
+ volume=aspirate_result.public.volume,
167
+ position=result_deck_point,
168
+ ),
169
+ state_update=aspirate_result.state_update.set_liquid_operated(
170
+ labware_id=params.labwareId,
171
+ well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well(
172
+ params.labwareId,
173
+ params.wellName,
174
+ params.pipetteId,
175
+ ),
176
+ volume_added=-aspirate_result.public.volume
177
+ * self._state_view.geometry.get_nozzles_per_well(
178
+ params.labwareId,
179
+ params.wellName,
180
+ params.pipetteId,
181
+ ),
182
+ ),
183
+ )
205
184
 
206
185
 
207
186
  class AspirateWhileTracking(
@@ -9,7 +9,7 @@ from pydantic import Field
9
9
  from pydantic.json_schema import SkipJsonSchema
10
10
 
11
11
  from ..state.update_types import CLEAR, StateUpdate
12
- from ..types import CurrentWell, DeckPoint
12
+ from ..types import DeckPoint
13
13
  from .pipetting_common import (
14
14
  PipetteIdMixin,
15
15
  DispenseVolumeMixin,
@@ -99,9 +99,6 @@ class DispenseWhileTrackingImplementation(
99
99
 
100
100
  # TODO(pbm, 10-15-24): call self._state_view.geometry.validate_dispense_volume_into_well()
101
101
 
102
- current_location = self._state_view.pipettes.get_current_location()
103
- current_position = await self._gantry_mover.get_position(params.pipetteId)
104
-
105
102
  state_update = StateUpdate()
106
103
  move_result = await move_to_well(
107
104
  movement=self._movement,
@@ -110,7 +107,6 @@ class DispenseWhileTrackingImplementation(
110
107
  labware_id=params.labwareId,
111
108
  well_name=params.wellName,
112
109
  well_location=params.wellLocation,
113
- operation_volume=-params.volume,
114
110
  )
115
111
  state_update.append(move_result.state_update)
116
112
  if isinstance(move_result, DefinedErrorData):
@@ -127,9 +123,9 @@ class DispenseWhileTrackingImplementation(
127
123
  push_out=params.pushOut,
128
124
  location_if_error={
129
125
  "retryLocation": (
130
- current_position.x,
131
- current_position.y,
132
- current_position.z,
126
+ move_result.public.position.x,
127
+ move_result.public.position.y,
128
+ move_result.public.position.z,
133
129
  )
134
130
  },
135
131
  pipetting=self._pipetting,
@@ -145,67 +141,40 @@ class DispenseWhileTrackingImplementation(
145
141
  )
146
142
 
147
143
  if isinstance(dispense_result, DefinedErrorData):
148
- if (
149
- isinstance(current_location, CurrentWell)
150
- and current_location.pipette_id == params.pipetteId
151
- ):
152
- return DefinedErrorData(
153
- public=dispense_result.public,
154
- state_update=dispense_result.state_update.set_liquid_operated(
155
- labware_id=current_location.labware_id,
156
- well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well(
157
- current_location.labware_id,
158
- current_location.well_name,
159
- params.pipetteId,
160
- ),
161
- volume_added=CLEAR,
162
- ),
163
- state_update_if_false_positive=dispense_result.state_update_if_false_positive,
164
- )
165
- else:
166
- return dispense_result
167
- else:
168
- if (
169
- isinstance(current_location, CurrentWell)
170
- and current_location.pipette_id == params.pipetteId
171
- ):
172
- volume_added = (
173
- self._state_view.pipettes.get_liquid_dispensed_by_ejecting_volume(
174
- pipette_id=params.pipetteId,
175
- volume=dispense_result.public.volume,
176
- )
177
- )
178
- if volume_added is not None:
179
- volume_added *= self._state_view.geometry.get_nozzles_per_well(
180
- current_location.labware_id,
181
- current_location.well_name,
144
+ return DefinedErrorData(
145
+ public=dispense_result.public,
146
+ state_update=dispense_result.state_update.set_liquid_operated(
147
+ labware_id=params.labwareId,
148
+ well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well(
149
+ params.labwareId,
150
+ params.wellName,
182
151
  params.pipetteId,
183
- )
184
- return SuccessData(
185
- public=DispenseWhileTrackingResult(
186
- volume=dispense_result.public.volume,
187
- position=result_deck_point,
188
- ),
189
- state_update=dispense_result.state_update.set_liquid_operated(
190
- labware_id=current_location.labware_id,
191
- well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well(
192
- current_location.labware_id,
193
- current_location.well_name,
194
- params.pipetteId,
195
- ),
196
- volume_added=volume_added
197
- if volume_added is not None
198
- else CLEAR,
199
152
  ),
200
- )
201
- else:
202
- return SuccessData(
203
- public=DispenseWhileTrackingResult(
204
- volume=dispense_result.public.volume,
205
- position=result_deck_point,
206
- ),
207
- state_update=dispense_result.state_update,
208
- )
153
+ volume_added=CLEAR,
154
+ ),
155
+ state_update_if_false_positive=dispense_result.state_update_if_false_positive,
156
+ )
157
+
158
+ return SuccessData(
159
+ public=DispenseWhileTrackingResult(
160
+ volume=dispense_result.public.volume,
161
+ position=result_deck_point,
162
+ ),
163
+ state_update=dispense_result.state_update.set_liquid_operated(
164
+ labware_id=params.labwareId,
165
+ well_names=self._state_view.geometry.get_wells_covered_by_pipette_with_active_well(
166
+ params.labwareId,
167
+ params.wellName,
168
+ params.pipetteId,
169
+ ),
170
+ volume_added=dispense_result.public.volume
171
+ * self._state_view.geometry.get_nozzles_per_well(
172
+ params.labwareId,
173
+ params.wellName,
174
+ params.pipetteId,
175
+ ),
176
+ ),
177
+ )
209
178
 
210
179
 
211
180
  class DispenseWhileTracking(
@@ -246,6 +246,7 @@ class HardwarePipettingHandler(PipettingHandler):
246
246
  flow_rate=flow_rate,
247
247
  volume=adjusted_volume,
248
248
  push_out=push_out,
249
+ is_full_dispense=is_full_dispense,
249
250
  )
250
251
  return adjusted_volume
251
252
 
@@ -1,6 +1,6 @@
1
1
  """Tip pickup and drop procedures."""
2
2
 
3
- from typing import Optional, Dict
3
+ from typing import Optional, Dict, Tuple
4
4
  from typing_extensions import Protocol as TypingProtocol
5
5
 
6
6
  from opentrons.hardware_control import HardwareControlAPI
@@ -201,6 +201,18 @@ async def _available_for_nozzle_layout( # noqa: C901
201
201
  }
202
202
 
203
203
 
204
+ def tip_on_left_side_96(back_left_nozzle: str) -> bool:
205
+ """Return if there is a tip on the left edge of the 96 channel."""
206
+ left_most_column = int(back_left_nozzle[1:])
207
+ return left_most_column == 1
208
+
209
+
210
+ def tip_on_right_side_96(front_right_nozzle: str) -> bool:
211
+ """Return if there is a tip on the left edge of the 96 channel."""
212
+ right_most_column = int(front_right_nozzle[1:])
213
+ return right_most_column == 12
214
+
215
+
204
216
  class HardwareTipHandler(TipHandler):
205
217
  """Pick up and drop tips, using the Hardware API."""
206
218
 
@@ -237,6 +249,50 @@ class HardwareTipHandler(TipHandler):
237
249
  channels, style, primary_nozzle, front_right_nozzle, back_left_nozzle
238
250
  )
239
251
 
252
+ def get_tip_presence_config(
253
+ self, pipette_id: str
254
+ ) -> Tuple[bool, Optional[InstrumentProbeType]]:
255
+ """Return the supported settings for tip presence on a given pipette depending on it's current nozzle map."""
256
+ follow_singular_sensor = None
257
+
258
+ unsupported_layout_types_96 = [NozzleConfigurationType.SINGLE]
259
+ # NOTE: (09-20-2024) Current on multi-channel pipettes, utilizing less than 4 nozzles risks false positives on the tip presence sensor
260
+ supported_partial_nozzle_minimum = 4
261
+
262
+ nozzle_configuration = self._state_view.pipettes.get_nozzle_configuration(
263
+ pipette_id=pipette_id
264
+ )
265
+
266
+ match self._state_view.pipettes.get_channels(pipette_id):
267
+ case 1:
268
+ tip_presence_supported = True
269
+ case 8:
270
+ tip_presence_supported = (
271
+ nozzle_configuration.tip_count >= supported_partial_nozzle_minimum
272
+ )
273
+ case 96:
274
+ tip_presence_supported = (
275
+ nozzle_configuration.configuration
276
+ not in unsupported_layout_types_96
277
+ and nozzle_configuration.tip_count
278
+ >= supported_partial_nozzle_minimum
279
+ )
280
+ if (
281
+ nozzle_configuration.configuration != NozzleConfigurationType.FULL
282
+ and tip_presence_supported
283
+ ):
284
+ use_left = tip_on_left_side_96(nozzle_configuration.back_left)
285
+ use_right = tip_on_right_side_96(nozzle_configuration.front_right)
286
+ if not (use_left and use_right):
287
+ if use_left:
288
+ follow_singular_sensor = InstrumentProbeType.PRIMARY
289
+ else:
290
+ follow_singular_sensor = InstrumentProbeType.SECONDARY
291
+ case _:
292
+ raise ValueError("Unknown pipette type.")
293
+
294
+ return (tip_presence_supported, follow_singular_sensor)
295
+
240
296
  async def pick_up_tip(
241
297
  self,
242
298
  pipette_id: str,
@@ -266,9 +322,18 @@ class HardwareTipHandler(TipHandler):
266
322
  await self._hardware_api.tip_pickup_moves(
267
323
  mount=hw_mount, presses=None, increment=None
268
324
  )
269
- if do_not_ignore_tip_presence:
325
+
326
+ tip_presence_supported, follow_singular_sensor = self.get_tip_presence_config(
327
+ pipette_id
328
+ )
329
+
330
+ if do_not_ignore_tip_presence and tip_presence_supported:
270
331
  try:
271
- await self.verify_tip_presence(pipette_id, TipPresenceStatus.PRESENT)
332
+ await self.verify_tip_presence(
333
+ pipette_id,
334
+ TipPresenceStatus.PRESENT,
335
+ follow_singular_sensor=follow_singular_sensor,
336
+ )
272
337
  except TipNotAttachedError as e:
273
338
  raise PickUpTipTipNotAttachedError(tip_geometry=tip_geometry) from e
274
339
 
@@ -350,30 +415,6 @@ class HardwareTipHandler(TipHandler):
350
415
  follow_singular_sensor: Optional[InstrumentProbeType] = None,
351
416
  ) -> None:
352
417
  """See documentation on abstract base class."""
353
- nozzle_configuration = self._state_view.pipettes.get_nozzle_configuration(
354
- pipette_id=pipette_id
355
- )
356
-
357
- # Configuration metrics by which tip presence checking is ignored
358
- unsupported_pipette_types = [8, 96]
359
- unsupported_layout_types = [
360
- NozzleConfigurationType.SINGLE,
361
- NozzleConfigurationType.COLUMN,
362
- ]
363
- # NOTE: (09-20-2024) Current on multi-channel pipettes, utilizing less than 4 nozzles risks false positives on the tip presence sensor
364
- supported_partial_nozzle_minimum = 4
365
-
366
- if (
367
- nozzle_configuration is not None
368
- and self._state_view.pipettes.get_channels(pipette_id)
369
- in unsupported_pipette_types
370
- and nozzle_configuration.configuration in unsupported_layout_types
371
- and len(nozzle_configuration.map_store) < supported_partial_nozzle_minimum
372
- ):
373
- # Tip presence sensing is not supported for single tip pick up on the 96ch Flex Pipette, nor with single and some partial layous of the 8ch Flex Pipette.
374
- # This is due in part to a press distance tolerance which creates a risk case for false positives. In the case of single tip, the mechanical tolerance
375
- # for presses with 100% success is below the minimum average achieved press distance for a given multi channel pipette in that configuration.
376
- return
377
418
  try:
378
419
  ot3api = ensure_ot3_hardware(hardware_api=self._hardware_api)
379
420
  hw_mount = self._get_hw_mount(pipette_id)
@@ -371,7 +371,7 @@ def find_volume_at_well_height(
371
371
  max_height = volumetric_capacity[-1][0]
372
372
  if target_height < 0 or target_height > max_height:
373
373
  raise InvalidLiquidHeightFound(
374
- "Invalid target height {target_height} mm; max well height is {max_height} mm."
374
+ f"Invalid target height {target_height} mm; max well height is {max_height} mm."
375
375
  )
376
376
  # volumes in volumetric_capacity are relative to each frustum,
377
377
  # so we have to find the volume of all the full sections enclosed
@@ -7,6 +7,7 @@ from numpy.typing import NDArray
7
7
  from typing import Optional, List, Tuple, Union, cast, TypeVar, Dict, Set
8
8
  from dataclasses import dataclass
9
9
  from functools import cached_property
10
+ from math import isclose
10
11
 
11
12
  from opentrons.types import (
12
13
  Point,
@@ -487,7 +488,7 @@ class GeometryView:
487
488
  raise OperationLocationNotInWellError(
488
489
  f"Specifying {well_location.origin} with an offset of {well_location.offset} results in an operation location that could be below the bottom of the well"
489
490
  )
490
- elif z_offset < 0:
491
+ elif z_offset < 0 and not isclose(z_offset, 0, abs_tol=0.0000001):
491
492
  if isinstance(well_location, LiquidHandlingWellLocation):
492
493
  raise OperationLocationNotInWellError(
493
494
  f"Specifying {well_location.origin} with an offset of {well_location.offset} and a volume offset of {well_location.volumeOffset} results in an operation location below the bottom of the well"
@@ -1,7 +1,8 @@
1
1
  import logging
2
2
  from logging.config import dictConfig
3
+ from logging.handlers import QueueListener, RotatingFileHandler
3
4
  import sys
4
- from typing import Any, Dict
5
+ from queue import Queue
5
6
 
6
7
  from opentrons.config import CONFIG, ARCHITECTURE, SystemArchitecture
7
8
 
@@ -12,11 +13,33 @@ else:
12
13
  SENSOR_LOG_NAME = "unused"
13
14
 
14
15
 
15
- def _host_config(level_value: int) -> Dict[str, Any]:
16
+ # We want this big enough to smooth over any temporary stalls in journald's ability
17
+ # to consume our records--but bounded, so if we consistently outpace journald for
18
+ # some reason, we don't leak memory or get latency from buffer bloat.
19
+ # 50000 is basically an arbitrary guess.
20
+ _LOG_QUEUE_SIZE = 50000
21
+
22
+
23
+ log_queue = Queue[logging.LogRecord](maxsize=_LOG_QUEUE_SIZE)
24
+ """A buffer through which log records will pass.
25
+
26
+ This is intended to work around problems when our logs are going to journald:
27
+ we think journald can block for a while when it flushes records to the filesystem,
28
+ and the backpressure from that will cause calls like `log.debug()` to block and
29
+ interfere with timing-sensitive hardware control.
30
+ https://github.com/Opentrons/opentrons/issues/18034
31
+
32
+ `log_init()` will configure all the logs that this package knows about to pass through
33
+ this queue. This queue is exposed so consumers of this package (i.e. robot-server)
34
+ can do the same thing with their own logs, which is important to preserve ordering.
35
+ """
36
+
37
+
38
+ def _config_for_host(level_value: int) -> None:
16
39
  serial_log_filename = CONFIG["serial_log_file"]
17
40
  api_log_filename = CONFIG["api_log_file"]
18
41
  sensor_log_filename = CONFIG["sensor_log_file"]
19
- return {
42
+ config = {
20
43
  "version": 1,
21
44
  "disable_existing_loggers": False,
22
45
  "formatters": {
@@ -90,13 +113,20 @@ def _host_config(level_value: int) -> Dict[str, Any]:
90
113
  },
91
114
  }
92
115
 
116
+ dictConfig(config)
93
117
 
94
- def _buildroot_config(level_value: int) -> Dict[str, Any]:
118
+
119
+ def _config_for_robot(level_value: int) -> None:
95
120
  # Import systemd.journald here since it is generally unavailble on non
96
121
  # linux systems and we probably don't want to use it on linux desktops
97
122
  # either
123
+ from systemd.journal import JournalHandler # type: ignore
124
+
98
125
  sensor_log_filename = CONFIG["sensor_log_file"]
99
- return {
126
+
127
+ sensor_log_queue = Queue[logging.LogRecord](maxsize=_LOG_QUEUE_SIZE)
128
+
129
+ config = {
100
130
  "version": 1,
101
131
  "disable_existing_loggers": False,
102
132
  "formatters": {
@@ -104,36 +134,38 @@ def _buildroot_config(level_value: int) -> Dict[str, Any]:
104
134
  },
105
135
  "handlers": {
106
136
  "api": {
107
- "class": "systemd.journal.JournalHandler",
137
+ "class": "opentrons.util.logging_queue_handler.CustomQueueHandler",
108
138
  "level": logging.DEBUG,
109
139
  "formatter": "message_only",
110
- "SYSLOG_IDENTIFIER": "opentrons-api",
140
+ "extra": {"SYSLOG_IDENTIFIER": "opentrons-api"},
141
+ "queue": log_queue,
111
142
  },
112
143
  "serial": {
113
- "class": "systemd.journal.JournalHandler",
144
+ "class": "opentrons.util.logging_queue_handler.CustomQueueHandler",
114
145
  "level": logging.DEBUG,
115
146
  "formatter": "message_only",
116
- "SYSLOG_IDENTIFIER": "opentrons-api-serial",
147
+ "extra": {"SYSLOG_IDENTIFIER": "opentrons-api-serial"},
148
+ "queue": log_queue,
117
149
  },
118
150
  "can_serial": {
119
- "class": "systemd.journal.JournalHandler",
151
+ "class": "opentrons.util.logging_queue_handler.CustomQueueHandler",
120
152
  "level": logging.DEBUG,
121
153
  "formatter": "message_only",
122
- "SYSLOG_IDENTIFIER": "opentrons-api-serial-can",
154
+ "extra": {"SYSLOG_IDENTIFIER": "opentrons-api-serial-can"},
155
+ "queue": log_queue,
123
156
  },
124
157
  "usbbin_serial": {
125
- "class": "systemd.journal.JournalHandler",
158
+ "class": "opentrons.util.logging_queue_handler.CustomQueueHandler",
126
159
  "level": logging.DEBUG,
127
160
  "formatter": "message_only",
128
- "SYSLOG_IDENTIFIER": "opentrons-api-serial-usbbin",
161
+ "extra": {"SYSLOG_IDENTIFIER": "opentrons-api-serial-usbbin"},
162
+ "queue": log_queue,
129
163
  },
130
164
  "sensor": {
131
- "class": "logging.handlers.RotatingFileHandler",
132
- "formatter": "message_only",
133
- "filename": sensor_log_filename,
134
- "maxBytes": 1000000,
165
+ "class": "opentrons.util.logging_queue_handler.CustomQueueHandler",
135
166
  "level": logging.DEBUG,
136
- "backupCount": 3,
167
+ "formatter": "message_only",
168
+ "queue": sensor_log_queue,
137
169
  },
138
170
  },
139
171
  "loggers": {
@@ -169,12 +201,47 @@ def _buildroot_config(level_value: int) -> Dict[str, Any]:
169
201
  },
170
202
  }
171
203
 
204
+ # Start draining from the queue and sending messages to journald.
205
+ # Then, stash the queue listener in a global variable so it doesn't get garbage-collected.
206
+ # I don't know if we actually need to do this, but let's not find out the hard way.
207
+ global _queue_listener
208
+ if _queue_listener is not None:
209
+ # In case this log init function was called multiple times for some reason.
210
+ _queue_listener.stop()
211
+ _queue_listener = QueueListener(log_queue, JournalHandler())
212
+ _queue_listener.start()
213
+
214
+ # Sensor logs are a special one-off thing that go to their own file instead of journald.
215
+ # We apply the same QueueListener performance workaround for basically the same reasons.
216
+ sensor_rotating_file_handler = RotatingFileHandler(
217
+ filename=sensor_log_filename, maxBytes=1000000, backupCount=3
218
+ )
219
+ sensor_rotating_file_handler.setLevel(logging.DEBUG)
220
+ sensor_rotating_file_handler.setFormatter(logging.Formatter(fmt="%(message)s"))
221
+ global _sensor_queue_listener
222
+ if _sensor_queue_listener is not None:
223
+ _sensor_queue_listener.stop()
224
+ _sensor_queue_listener = QueueListener(
225
+ sensor_log_queue, sensor_rotating_file_handler
226
+ )
227
+ _sensor_queue_listener.start()
228
+
229
+ dictConfig(config)
172
230
 
173
- def _config(arch: SystemArchitecture, level_value: int) -> Dict[str, Any]:
174
- return {
175
- SystemArchitecture.YOCTO: _buildroot_config,
176
- SystemArchitecture.BUILDROOT: _buildroot_config,
177
- SystemArchitecture.HOST: _host_config,
231
+ # TODO(2025-04-15): We need some kind of log_deinit() function to call
232
+ # queue_listener.stop() before the process ends. Not doing that means we're
233
+ # dropping some records when the process shuts down.
234
+
235
+
236
+ _queue_listener: QueueListener | None = None
237
+ _sensor_queue_listener: QueueListener | None = None
238
+
239
+
240
+ def _config(arch: SystemArchitecture, level_value: int) -> None:
241
+ {
242
+ SystemArchitecture.YOCTO: _config_for_robot,
243
+ SystemArchitecture.BUILDROOT: _config_for_robot,
244
+ SystemArchitecture.HOST: _config_for_host,
178
245
  }[arch](level_value)
179
246
 
180
247
 
@@ -191,6 +258,8 @@ def log_init(level_name: str) -> None:
191
258
  f"Defaulting to {fallback_log_level}\n"
192
259
  )
193
260
  ot_log_level = fallback_log_level
261
+
262
+ # todo(mm, 2025-04-14): Use logging.getLevelNamesMapping() when we have Python >=3.11.
194
263
  level_value = logging._nameToLevel[ot_log_level]
195
- logging_config = _config(ARCHITECTURE, level_value)
196
- dictConfig(logging_config)
264
+
265
+ _config(ARCHITECTURE, level_value)