motorcortex-python 1.0.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- motorcortex/__init__.py +314 -0
- motorcortex/_connection_state.py +58 -0
- motorcortex/_request_builders.py +157 -0
- motorcortex/_request_utils.py +314 -0
- motorcortex/_subscribe_dispatch.py +90 -0
- motorcortex/exceptions.py +65 -0
- motorcortex/init_threads.py +103 -0
- motorcortex/message_types.py +387 -0
- motorcortex/motorcortex_hash.json +166 -0
- motorcortex/motorcortex_pb2.py +105 -0
- motorcortex/motorcortex_pb2.pyi +1961 -0
- motorcortex/nng_url.py +49 -0
- motorcortex/parameter_tree.py +86 -0
- motorcortex/py.typed +0 -0
- motorcortex/reply.py +108 -0
- motorcortex/request.py +668 -0
- motorcortex/session.py +194 -0
- motorcortex/setup_logger.py +10 -0
- motorcortex/state_callback_handler.py +92 -0
- motorcortex/subscribe.py +400 -0
- motorcortex/subscription.py +414 -0
- motorcortex/timespec.py +173 -0
- motorcortex/version.py +1 -0
- motorcortex_python-1.0.0rc1.dist-info/LICENSE +22 -0
- motorcortex_python-1.0.0rc1.dist-info/METADATA +171 -0
- motorcortex_python-1.0.0rc1.dist-info/RECORD +28 -0
- motorcortex_python-1.0.0rc1.dist-info/WHEEL +5 -0
- motorcortex_python-1.0.0rc1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Developer: Alexey Zakharov (alexey.zakharov@vectioneer.com)
|
|
3
|
+
# All rights reserved. Copyright (c) 2026 VECTIONEER.
|
|
4
|
+
#
|
|
5
|
+
|
|
6
|
+
"""Pure helpers used by :class:`motorcortex.request.Request`.
|
|
7
|
+
|
|
8
|
+
None of these touch the socket — they're either kwarg massaging, thread-
|
|
9
|
+
sync waits, or filesystem cache I/O. Extracted so they can be unit-tested
|
|
10
|
+
without a live server. ``Request`` keeps thin static-method shims that
|
|
11
|
+
delegate here, so existing callers and tests stay on the public API.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import base64
|
|
17
|
+
import glob
|
|
18
|
+
import hashlib
|
|
19
|
+
import json
|
|
20
|
+
import os
|
|
21
|
+
import tempfile
|
|
22
|
+
from threading import Event
|
|
23
|
+
from typing import Any, Callable, Optional, Tuple
|
|
24
|
+
|
|
25
|
+
from motorcortex.exceptions import McxConnectionError
|
|
26
|
+
from motorcortex.setup_logger import logger
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
_CACHE_PREFIX = "mcx-python-pt"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def parse_connect_kwargs(
|
|
33
|
+
conn_timeout_ms: int = 0,
|
|
34
|
+
timeout_ms: Optional[int] = None,
|
|
35
|
+
recv_timeout_ms: Optional[int] = None,
|
|
36
|
+
certificate: Optional[str] = None,
|
|
37
|
+
login: Optional[str] = None,
|
|
38
|
+
password: Optional[str] = None,
|
|
39
|
+
state_update: Optional[Callable] = None,
|
|
40
|
+
**kwargs: Any,
|
|
41
|
+
) -> Tuple[int, Optional[int], Optional[str], Optional[Callable]]:
|
|
42
|
+
"""Normalise the kwargs bag ``Request.connect()`` accepts.
|
|
43
|
+
|
|
44
|
+
The only non-trivial rule is that ``timeout_ms`` promotes to
|
|
45
|
+
``conn_timeout_ms`` when the latter wasn't passed explicitly. Any
|
|
46
|
+
extra kwargs (``login``, ``password``, ``unknown_knob``, ...) are
|
|
47
|
+
swallowed so the caller can pass a superset of keys without
|
|
48
|
+
triggering a ``TypeError``.
|
|
49
|
+
"""
|
|
50
|
+
if timeout_ms and not conn_timeout_ms:
|
|
51
|
+
conn_timeout_ms = timeout_ms
|
|
52
|
+
return conn_timeout_ms, recv_timeout_ms, certificate, state_update
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def wait_for_connection(
|
|
56
|
+
event: Event,
|
|
57
|
+
timeout_sec: float,
|
|
58
|
+
is_connected_fn: Optional[Callable[[], bool]] = None,
|
|
59
|
+
) -> bool:
|
|
60
|
+
"""Block on ``event`` and report whether a connection actually succeeded.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
event: ``Event`` that connect-side code sets once the handshake
|
|
64
|
+
either completed or failed.
|
|
65
|
+
timeout_sec: Max wait. ``<= 0`` means wait forever (passed through
|
|
66
|
+
to ``event.wait()`` with no timeout).
|
|
67
|
+
is_connected_fn: Optional status probe. The event fires on *any*
|
|
68
|
+
terminal transition (success, failure, close), so the probe
|
|
69
|
+
disambiguates — when provided, must return ``True`` if the
|
|
70
|
+
client is genuinely connected.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
``True`` on success.
|
|
74
|
+
|
|
75
|
+
Raises:
|
|
76
|
+
McxConnectionError: ``event`` didn't fire within
|
|
77
|
+
``timeout_sec`` (dial timeout), OR it fired but
|
|
78
|
+
``is_connected_fn()`` returned falsy (peer dropped /
|
|
79
|
+
handshake failed). Both are connect-side transport
|
|
80
|
+
failures per the documented contract in
|
|
81
|
+
ARCHITECTURE.md §2b, so they share one exception type.
|
|
82
|
+
"""
|
|
83
|
+
logger.debug("[REQUEST-WAIT] waitForConnection started with timeout=%ss", timeout_sec)
|
|
84
|
+
|
|
85
|
+
if timeout_sec <= 0:
|
|
86
|
+
logger.debug("[REQUEST-WAIT] Waiting indefinitely for connection event")
|
|
87
|
+
result = event.wait()
|
|
88
|
+
else:
|
|
89
|
+
logger.debug("[REQUEST-WAIT] Waiting up to %ss for connection event", timeout_sec)
|
|
90
|
+
result = event.wait(timeout_sec)
|
|
91
|
+
|
|
92
|
+
if not result:
|
|
93
|
+
logger.error("[REQUEST-WAIT] Connection timeout after %ss - no event received", timeout_sec)
|
|
94
|
+
raise McxConnectionError(f"Connection timeout after {timeout_sec}s")
|
|
95
|
+
|
|
96
|
+
logger.debug("[REQUEST-WAIT] Event received - checking connection status")
|
|
97
|
+
|
|
98
|
+
if is_connected_fn is not None:
|
|
99
|
+
connected = is_connected_fn()
|
|
100
|
+
logger.debug("[REQUEST-WAIT] Connection status check: connected=%s", connected)
|
|
101
|
+
if not connected:
|
|
102
|
+
logger.error("[REQUEST-WAIT] Event was set but connection failed or was closed")
|
|
103
|
+
raise McxConnectionError("Connection failed or was closed before completion")
|
|
104
|
+
else:
|
|
105
|
+
logger.debug("[REQUEST-WAIT] No connection status check function provided")
|
|
106
|
+
|
|
107
|
+
logger.debug("[REQUEST-WAIT] Connection successfully established")
|
|
108
|
+
return True
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def fetch_parameter_tree(
|
|
112
|
+
tree_hash: int,
|
|
113
|
+
protobuf_types: Any,
|
|
114
|
+
socket: Any,
|
|
115
|
+
url: Optional[str] = None,
|
|
116
|
+
) -> Any:
|
|
117
|
+
"""Fetch the parameter tree, hitting the local cache first.
|
|
118
|
+
|
|
119
|
+
Used by ``Request.getParameterTree`` as the executor-pool callable.
|
|
120
|
+
Kept here (not on ``Request``) so the orchestration between
|
|
121
|
+
:func:`parameter_tree_cache_path`, :func:`load_parameter_tree_file`,
|
|
122
|
+
:func:`send_and_recv`, and :func:`save_parameter_tree_file` is unit
|
|
123
|
+
testable with stubs.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
tree_hash: Integer from ``getParameterTreeHash`` — part of the
|
|
127
|
+
cache filename.
|
|
128
|
+
protobuf_types: ``MessageTypes`` used to build the request
|
|
129
|
+
message and decode the reply.
|
|
130
|
+
socket: pynng ``Req0``-ish handle passed to
|
|
131
|
+
:func:`send_and_recv`.
|
|
132
|
+
url: Connection URL — hashed into the cache filename so two
|
|
133
|
+
engines with the same ``tree_hash`` don't collide.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
A ``ParameterTreeMsg`` — either the cached one or the freshly
|
|
137
|
+
fetched one (also persisted to the cache).
|
|
138
|
+
"""
|
|
139
|
+
path = parameter_tree_cache_path(url, tree_hash)
|
|
140
|
+
cached = load_parameter_tree_file(path, protobuf_types)
|
|
141
|
+
if cached:
|
|
142
|
+
logger.debug("[REQUEST] Found parameter tree in the cache")
|
|
143
|
+
return cached
|
|
144
|
+
logger.debug("[REQUEST] Failed to find parameter tree in the cache")
|
|
145
|
+
|
|
146
|
+
request_msg = protobuf_types.createType("motorcortex.GetParameterTreeMsg")
|
|
147
|
+
handle = send_and_recv(socket, protobuf_types.encode(request_msg), protobuf_types)
|
|
148
|
+
return save_parameter_tree_file(path, handle)
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def send_and_recv(
|
|
152
|
+
socket: Any,
|
|
153
|
+
encoded_msg: Any,
|
|
154
|
+
protobuf_types: Optional[Any],
|
|
155
|
+
) -> Any:
|
|
156
|
+
"""One-shot synchronous request/reply over an nng Req0 socket.
|
|
157
|
+
|
|
158
|
+
Opens a new nng context on ``socket``, sends ``encoded_msg``, waits
|
|
159
|
+
for a reply, optionally decodes it through ``protobuf_types.decode``,
|
|
160
|
+
and closes the context. Errors from nng propagate — they're logged
|
|
161
|
+
for diagnostics and re-raised so the caller's ``Future`` sees the
|
|
162
|
+
failure instead of swallowing it.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
socket: A pynng ``Req0``-ish object exposing ``new_context()``.
|
|
166
|
+
The context it returns must support ``send()``, ``recv()``,
|
|
167
|
+
``close()``.
|
|
168
|
+
encoded_msg: Bytes to send.
|
|
169
|
+
protobuf_types: If truthy, decoded wire reply is returned.
|
|
170
|
+
Otherwise the raw buffer is returned.
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Decoded reply (when ``protobuf_types`` supplied and buffer is
|
|
174
|
+
non-empty), raw buffer, or ``None`` if nng returned empty.
|
|
175
|
+
"""
|
|
176
|
+
ctx = socket.new_context()
|
|
177
|
+
try:
|
|
178
|
+
ctx.send(encoded_msg)
|
|
179
|
+
buffer = ctx.recv()
|
|
180
|
+
if buffer:
|
|
181
|
+
if protobuf_types:
|
|
182
|
+
return protobuf_types.decode(buffer)
|
|
183
|
+
return buffer
|
|
184
|
+
except Exception as e:
|
|
185
|
+
logger.error("[SEND] Error during send/recv: %s: %s", type(e).__name__, e)
|
|
186
|
+
raise
|
|
187
|
+
finally:
|
|
188
|
+
ctx.close()
|
|
189
|
+
return None
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def parameter_tree_cache_path(url: Optional[str], tree_hash: int) -> str:
|
|
193
|
+
"""Build the temp-dir path that caches the parameter tree for a given
|
|
194
|
+
``(url, tree_hash)`` pair.
|
|
195
|
+
|
|
196
|
+
Before 0.25.8 the cache was keyed only on ``tree_hash``, so two
|
|
197
|
+
different engines that happened to produce the same hash value would
|
|
198
|
+
read each other's cached tree. This function now hashes the URL into
|
|
199
|
+
the filename so engines with different URLs get distinct cache files.
|
|
200
|
+
A short md5 prefix keeps the path manageable.
|
|
201
|
+
|
|
202
|
+
``url`` may be ``None`` (e.g. before the Request has finished
|
|
203
|
+
connecting) — in that case we fall back to a ``nourl`` placeholder,
|
|
204
|
+
which still preserves the hash-based deduplication for a single
|
|
205
|
+
process.
|
|
206
|
+
"""
|
|
207
|
+
url_key = hashlib.md5((url or "nourl").encode("utf-8")).hexdigest()[:8]
|
|
208
|
+
return os.path.join(
|
|
209
|
+
tempfile.gettempdir(), f"{_CACHE_PREFIX}-{url_key}-{tree_hash}"
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def _purge_stale_cache_files(path: str) -> None:
|
|
214
|
+
"""Remove previous cache files for the same URL.
|
|
215
|
+
|
|
216
|
+
Called by :func:`save_parameter_tree_file` after a successful write.
|
|
217
|
+
The filename layout is ``mcx-python-pt-<url-md5>-<tree-hash>``; this
|
|
218
|
+
helper globs the ``mcx-python-pt-<url-md5>-`` prefix of the just-
|
|
219
|
+
written file and unlinks any sibling that doesn't match the current
|
|
220
|
+
``tree-hash``. That keeps the cache directory at one file per URL
|
|
221
|
+
in steady state instead of accumulating forever as servers cycle
|
|
222
|
+
through tree-hash values.
|
|
223
|
+
|
|
224
|
+
Temp files from concurrent :func:`tempfile.mkstemp` writers are
|
|
225
|
+
skipped — their names contain a ``.`` right after the hash, which
|
|
226
|
+
canonical cache filenames never do.
|
|
227
|
+
"""
|
|
228
|
+
directory, basename = os.path.split(path)
|
|
229
|
+
last_dash = basename.rfind("-")
|
|
230
|
+
if last_dash == -1:
|
|
231
|
+
return
|
|
232
|
+
url_prefix = basename[:last_dash + 1] # "mcx-python-pt-<url-md5>-"
|
|
233
|
+
pattern = os.path.join(directory or ".", url_prefix + "*")
|
|
234
|
+
for match in glob.glob(pattern):
|
|
235
|
+
entry = os.path.basename(match)
|
|
236
|
+
if entry == basename:
|
|
237
|
+
continue
|
|
238
|
+
# mkstemp injects ".<random>.tmp" after the hash — skip so we
|
|
239
|
+
# don't race a concurrent writer to its own temp file.
|
|
240
|
+
if "." in entry[len(url_prefix):]:
|
|
241
|
+
continue
|
|
242
|
+
try:
|
|
243
|
+
os.unlink(match)
|
|
244
|
+
logger.debug("[REQUEST] Evicted stale cache file: %s", entry)
|
|
245
|
+
except OSError:
|
|
246
|
+
pass
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def save_parameter_tree_file(path: str, parameter_tree: Any) -> Any:
|
|
250
|
+
"""Serialise ``parameter_tree`` to a local JSON cache file.
|
|
251
|
+
|
|
252
|
+
Writes a ``{"md5": ..., "data": base64}`` envelope at ``path``. The
|
|
253
|
+
md5 is over the **base64** payload (not the raw protobuf bytes), so
|
|
254
|
+
tampering with ``data`` alone invalidates the cache on next load.
|
|
255
|
+
|
|
256
|
+
The write is atomic: the envelope goes to a sibling temp file in the
|
|
257
|
+
same directory, then ``os.replace`` moves it into place. On POSIX
|
|
258
|
+
this is a single rename syscall, so concurrent readers either see
|
|
259
|
+
the old complete file or the new complete file — never a half-written
|
|
260
|
+
mix that would fail md5 on the next ``load_parameter_tree_file``.
|
|
261
|
+
"""
|
|
262
|
+
logger.debug("[REQUEST] Saved parameter tree to the cache")
|
|
263
|
+
base64_data = base64.b64encode(parameter_tree.SerializeToString())
|
|
264
|
+
envelope = {
|
|
265
|
+
"md5": hashlib.md5(base64_data).hexdigest(),
|
|
266
|
+
"data": base64_data.decode("utf-8"),
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
directory = os.path.dirname(path) or "."
|
|
270
|
+
fd, tmp_path = tempfile.mkstemp(
|
|
271
|
+
prefix=os.path.basename(path) + ".",
|
|
272
|
+
suffix=".tmp",
|
|
273
|
+
dir=directory,
|
|
274
|
+
)
|
|
275
|
+
try:
|
|
276
|
+
with os.fdopen(fd, "w") as outfile:
|
|
277
|
+
outfile.write(json.dumps(envelope))
|
|
278
|
+
os.replace(tmp_path, path)
|
|
279
|
+
except Exception:
|
|
280
|
+
# Clean up the temp file if the rename never ran.
|
|
281
|
+
if os.path.exists(tmp_path):
|
|
282
|
+
try:
|
|
283
|
+
os.unlink(tmp_path)
|
|
284
|
+
except OSError:
|
|
285
|
+
pass
|
|
286
|
+
raise
|
|
287
|
+
# Cache is now durable; best-effort sweep of stale same-URL files.
|
|
288
|
+
_purge_stale_cache_files(path)
|
|
289
|
+
return parameter_tree
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
def load_parameter_tree_file(path: str, protobuf_types: Any) -> Optional[Any]:
|
|
293
|
+
"""Inverse of :func:`save_parameter_tree_file` with md5 verification.
|
|
294
|
+
|
|
295
|
+
Returns ``None`` when the file is missing, malformed, or fails the
|
|
296
|
+
md5 check. Malformed here means: not valid JSON, missing keys, or
|
|
297
|
+
md5 mismatch. Any other exception propagates (disk errors, etc.).
|
|
298
|
+
"""
|
|
299
|
+
logger.debug("[REQUEST] Loaded parameter tree from the cache")
|
|
300
|
+
if not os.path.exists(path):
|
|
301
|
+
return None
|
|
302
|
+
|
|
303
|
+
with open(path, "r") as outfile:
|
|
304
|
+
json_data = json.load(outfile)
|
|
305
|
+
|
|
306
|
+
if not json_data or "md5" not in json_data or "data" not in json_data:
|
|
307
|
+
return None
|
|
308
|
+
|
|
309
|
+
if hashlib.md5(json_data["data"].encode()).hexdigest() != json_data["md5"]:
|
|
310
|
+
return None
|
|
311
|
+
|
|
312
|
+
msg = protobuf_types.createType("motorcortex.ParameterTreeMsg")
|
|
313
|
+
msg.ParseFromString(base64.b64decode(json_data["data"]))
|
|
314
|
+
return msg
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Developer: Alexey Zakharov (alexey.zakharov@vectioneer.com)
|
|
3
|
+
# All rights reserved. Copyright (c) 2026 VECTIONEER.
|
|
4
|
+
#
|
|
5
|
+
|
|
6
|
+
"""Pure frame-parsing and dispatch helpers for :mod:`motorcortex.subscribe`.
|
|
7
|
+
|
|
8
|
+
These used to be inlined in ``Subscribe.__run``. Moving them out lets us
|
|
9
|
+
unit-test the wire format parser and the protocol-version dispatch
|
|
10
|
+
without standing up a real subscribe socket — in particular the
|
|
11
|
+
"unknown protocol version" branch that real servers never produce.
|
|
12
|
+
|
|
13
|
+
Wire frame layout:
|
|
14
|
+
bytes [0..2] little-endian 24-bit subscription id
|
|
15
|
+
byte 3 protocol version (0 = MULTI_TIMESTAMP, 1 = SINGLE_TIMESTAMP)
|
|
16
|
+
bytes [4..] payload passed to ``Subscription._updateProtocol{0,1}``
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
from typing import Any, Mapping, Tuple
|
|
22
|
+
|
|
23
|
+
from motorcortex.setup_logger import logger
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
_HEADER_SIZE = 4
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def parse_frame_header(buffer: bytes) -> Tuple[int, int]:
|
|
30
|
+
"""Decode the leading 4-byte frame header.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
buffer: Raw bytes received on the subscribe socket. Must be at
|
|
34
|
+
least ``_HEADER_SIZE`` long; shorter inputs raise IndexError.
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
``(sub_id, protocol_version)``.
|
|
38
|
+
"""
|
|
39
|
+
sub_id = buffer[0] + (buffer[1] << 8) + (buffer[2] << 16)
|
|
40
|
+
protocol_version = buffer[3]
|
|
41
|
+
return sub_id, protocol_version
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def dispatch_frame(buffer: bytes, subscriptions: Mapping[int, Any]) -> None:
|
|
45
|
+
"""Route a received wire frame to the matching ``Subscription``.
|
|
46
|
+
|
|
47
|
+
Drops frames that fail any of:
|
|
48
|
+
- empty / shorter than the 4-byte header,
|
|
49
|
+
- sub_id not present in ``subscriptions``,
|
|
50
|
+
- protocol version ≠ 0 and ≠ 1.
|
|
51
|
+
|
|
52
|
+
Dropped frames are logged at ``error`` (unknown protocol / short frame)
|
|
53
|
+
or ``debug`` (unknown sub_id — can happen briefly after unsubscribe).
|
|
54
|
+
"""
|
|
55
|
+
if not buffer:
|
|
56
|
+
return
|
|
57
|
+
if len(buffer) < _HEADER_SIZE:
|
|
58
|
+
logger.error(
|
|
59
|
+
'[SUBSCRIBE-DISPATCH] Frame shorter than header (%d bytes)',
|
|
60
|
+
len(buffer),
|
|
61
|
+
)
|
|
62
|
+
return
|
|
63
|
+
|
|
64
|
+
sub_id, protocol_version = parse_frame_header(buffer)
|
|
65
|
+
sub = subscriptions.get(sub_id)
|
|
66
|
+
|
|
67
|
+
if sub is None:
|
|
68
|
+
logger.debug(
|
|
69
|
+
'[SUBSCRIBE-DISPATCH] Received data for unknown subscription id: %s',
|
|
70
|
+
sub_id,
|
|
71
|
+
)
|
|
72
|
+
return
|
|
73
|
+
|
|
74
|
+
payload = buffer[_HEADER_SIZE:]
|
|
75
|
+
payload_length = len(buffer) - _HEADER_SIZE
|
|
76
|
+
|
|
77
|
+
if protocol_version == 1:
|
|
78
|
+
sub._updateProtocol1(payload, payload_length)
|
|
79
|
+
elif protocol_version == 0:
|
|
80
|
+
sub._updateProtocol0(payload, payload_length)
|
|
81
|
+
else:
|
|
82
|
+
logger.error(
|
|
83
|
+
'[SUBSCRIBE-DISPATCH] Unknown protocol version: %s for sub_id: %s',
|
|
84
|
+
protocol_version, sub_id,
|
|
85
|
+
)
|
|
86
|
+
# Bump the subscription's dropped-frame count so callers can
|
|
87
|
+
# observe the silent discard via ``Subscription.droppedFrameCount()``.
|
|
88
|
+
recorder = getattr(sub, '_recordDroppedFrame', None)
|
|
89
|
+
if recorder is not None:
|
|
90
|
+
recorder()
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Developer: Alexey Zakharov (alexey.zakharov@vectioneer.com)
|
|
3
|
+
# All rights reserved. Copyright (c) 2026 VECTIONEER.
|
|
4
|
+
#
|
|
5
|
+
|
|
6
|
+
"""Typed exceptions raised by the motorcortex library.
|
|
7
|
+
|
|
8
|
+
The library follows a deliberate split between **transport** errors
|
|
9
|
+
(which raise) and **protocol-level** status (which lives on the reply
|
|
10
|
+
message and is checked by the caller):
|
|
11
|
+
|
|
12
|
+
- **Transport errors raise** an :class:`McxError` subclass. Use when
|
|
13
|
+
the socket is closed, the server never answers, or the dial itself
|
|
14
|
+
fails. Caller can't do anything with the reply — there isn't one.
|
|
15
|
+
- **Protocol-level status stays on the reply.** ``reply.get().status``
|
|
16
|
+
carries codes like ``WRONG_PASSWORD``, ``WRONG_PARAMETER_PATH``,
|
|
17
|
+
``READ_ONLY_MODE``. Caller inspects if they care; nothing raises.
|
|
18
|
+
|
|
19
|
+
Every exception here inherits from ``RuntimeError`` so existing
|
|
20
|
+
``try: ... except RuntimeError: ...`` code keeps catching
|
|
21
|
+
motorcortex failures without modification.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
from __future__ import annotations
|
|
25
|
+
|
|
26
|
+
from typing import Optional
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class McxError(RuntimeError):
|
|
30
|
+
"""Base class for all motorcortex library errors.
|
|
31
|
+
|
|
32
|
+
Inherits from ``RuntimeError`` so code that was written against the
|
|
33
|
+
pre-typed-exception era (``except RuntimeError``) keeps working
|
|
34
|
+
when the library swaps in more specific types.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class McxConnectionError(McxError):
|
|
39
|
+
"""Transport failure: socket is closed, server is unreachable,
|
|
40
|
+
dial was rejected, or a request was made on a connection that
|
|
41
|
+
never came up.
|
|
42
|
+
"""
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class McxLoginError(McxError):
|
|
46
|
+
"""Login RPC returned a non-OK status during
|
|
47
|
+
:func:`motorcortex.connect` or
|
|
48
|
+
:meth:`motorcortex.Session.connect`.
|
|
49
|
+
|
|
50
|
+
The engine's numeric status code is available as :attr:`status`
|
|
51
|
+
so callers can distinguish ``WRONG_PASSWORD`` from
|
|
52
|
+
``READ_ONLY_MODE`` without parsing the message text.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(self, message: str, status: Optional[int] = None) -> None:
|
|
56
|
+
super().__init__(message)
|
|
57
|
+
self.status: Optional[int] = status
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class McxTimeout(McxError, TimeoutError):
|
|
61
|
+
"""A reply didn't arrive within the requested timeout.
|
|
62
|
+
|
|
63
|
+
Dual-bases ``McxError`` and the stdlib ``TimeoutError`` so either
|
|
64
|
+
``except McxError`` or ``except TimeoutError`` catches it.
|
|
65
|
+
"""
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
#!/usr/bin/python3
|
|
2
|
+
|
|
3
|
+
#
|
|
4
|
+
# Developer: Alexey Zakharov (alexey.zakharov@vectioneer.com)
|
|
5
|
+
# All rights reserved. Copyright (c) 2016-2026 VECTIONEER.
|
|
6
|
+
#
|
|
7
|
+
|
|
8
|
+
"""NNG thread-count initialization.
|
|
9
|
+
|
|
10
|
+
``init_nng_threads`` is called automatically once from
|
|
11
|
+
``motorcortex/__init__.py`` — it **must** run before any pynng socket
|
|
12
|
+
is created, because NNG latches these values on its first internal
|
|
13
|
+
init and ignores subsequent ``nng_init_set_parameter`` calls. The
|
|
14
|
+
import-time auto-call is what guarantees that ordering for users who
|
|
15
|
+
only touch NNG through motorcortex.
|
|
16
|
+
|
|
17
|
+
Values are process-wide, so anything else using pynng in the same
|
|
18
|
+
process inherits them. The hardcoded defaults (task=2, expire=1,
|
|
19
|
+
poller=1, resolver=1) are tuned for the request/subscribe workload;
|
|
20
|
+
most callers never need to tune them.
|
|
21
|
+
|
|
22
|
+
Ops-side tuning is via env vars, read at import time — no code
|
|
23
|
+
change needed:
|
|
24
|
+
|
|
25
|
+
MCX_NNG_TASK_THREADS default 2
|
|
26
|
+
MCX_NNG_EXPIRE_THREADS default 1
|
|
27
|
+
MCX_NNG_POLLER_THREADS default 1
|
|
28
|
+
MCX_NNG_RESOLVER_THREADS default 1
|
|
29
|
+
|
|
30
|
+
Any explicit keyword argument to ``init_nng_threads(...)`` takes
|
|
31
|
+
precedence over the env value. Env bad-values (non-int, negative)
|
|
32
|
+
fall back to the default with a warning.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
import os
|
|
36
|
+
|
|
37
|
+
from pynng._nng import lib # type: ignore[import-untyped]
|
|
38
|
+
from motorcortex.setup_logger import logger
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
_DEFAULTS = {
|
|
42
|
+
"task": 2,
|
|
43
|
+
"expire": 1,
|
|
44
|
+
"poller": 1,
|
|
45
|
+
"resolver": 1,
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
_ENV_NAMES = {
|
|
49
|
+
"task": "MCX_NNG_TASK_THREADS",
|
|
50
|
+
"expire": "MCX_NNG_EXPIRE_THREADS",
|
|
51
|
+
"poller": "MCX_NNG_POLLER_THREADS",
|
|
52
|
+
"resolver": "MCX_NNG_RESOLVER_THREADS",
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _resolve(name: str, override: int | None) -> int:
|
|
57
|
+
"""Pick the value for one knob: explicit arg > env var > default."""
|
|
58
|
+
if override is not None:
|
|
59
|
+
return override
|
|
60
|
+
raw = os.environ.get(_ENV_NAMES[name])
|
|
61
|
+
if raw is None:
|
|
62
|
+
return _DEFAULTS[name]
|
|
63
|
+
try:
|
|
64
|
+
value = int(raw)
|
|
65
|
+
if value < 1:
|
|
66
|
+
raise ValueError("must be >= 1")
|
|
67
|
+
return value
|
|
68
|
+
except ValueError as e:
|
|
69
|
+
logger.warning(
|
|
70
|
+
"[INIT-THREADS] %s=%r invalid (%s); using default %d",
|
|
71
|
+
_ENV_NAMES[name], raw, e, _DEFAULTS[name],
|
|
72
|
+
)
|
|
73
|
+
return _DEFAULTS[name]
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def init_nng_threads(
|
|
77
|
+
task: int | None = None,
|
|
78
|
+
expire: int | None = None,
|
|
79
|
+
poller: int | None = None,
|
|
80
|
+
resolver: int | None = None,
|
|
81
|
+
) -> None:
|
|
82
|
+
"""Set NNG thread-pool sizes. Must run before any pynng socket.
|
|
83
|
+
|
|
84
|
+
Any argument left as ``None`` is resolved from the matching
|
|
85
|
+
``MCX_NNG_*_THREADS`` env var, falling back to the hardcoded
|
|
86
|
+
default.
|
|
87
|
+
"""
|
|
88
|
+
task_n = _resolve("task", task)
|
|
89
|
+
expire_n = _resolve("expire", expire)
|
|
90
|
+
poller_n = _resolve("poller", poller)
|
|
91
|
+
resolver_n = _resolve("resolver", resolver)
|
|
92
|
+
|
|
93
|
+
try:
|
|
94
|
+
lib.nng_init_set_parameter(lib.NNG_INIT_NUM_TASK_THREADS, task_n)
|
|
95
|
+
lib.nng_init_set_parameter(lib.NNG_INIT_NUM_EXPIRE_THREADS, expire_n)
|
|
96
|
+
lib.nng_init_set_parameter(lib.NNG_INIT_NUM_POLLER_THREADS, poller_n)
|
|
97
|
+
lib.nng_init_set_parameter(lib.NNG_INIT_NUM_RESOLVER_THREADS, resolver_n)
|
|
98
|
+
logger.debug(
|
|
99
|
+
"[INIT-THREADS] task=%d expire=%d poller=%d resolver=%d",
|
|
100
|
+
task_n, expire_n, poller_n, resolver_n,
|
|
101
|
+
)
|
|
102
|
+
except AttributeError:
|
|
103
|
+
logger.error("[INIT-THREADS] Cannot adjust thread count: interface unavailable")
|