foxglove-sdk 0.16.2__cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foxglove-sdk might be problematic. Click here for more details.
- foxglove/__init__.py +245 -0
- foxglove/_foxglove_py/__init__.pyi +211 -0
- foxglove/_foxglove_py/channels.pyi +2792 -0
- foxglove/_foxglove_py/cloud.pyi +9 -0
- foxglove/_foxglove_py/mcap.pyi +120 -0
- foxglove/_foxglove_py/schemas.pyi +1009 -0
- foxglove/_foxglove_py/schemas_wkt.pyi +85 -0
- foxglove/_foxglove_py/websocket.pyi +394 -0
- foxglove/_foxglove_py.cpython-310-aarch64-linux-gnu.so +0 -0
- foxglove/benchmarks/test_mcap_serialization.py +160 -0
- foxglove/channel.py +241 -0
- foxglove/channels/__init__.py +94 -0
- foxglove/cloud.py +61 -0
- foxglove/mcap.py +12 -0
- foxglove/notebook/__init__.py +0 -0
- foxglove/notebook/foxglove_widget.py +100 -0
- foxglove/notebook/notebook_buffer.py +114 -0
- foxglove/notebook/static/widget.js +1 -0
- foxglove/py.typed +0 -0
- foxglove/schemas/__init__.py +163 -0
- foxglove/tests/__init__.py +0 -0
- foxglove/tests/test_channel.py +243 -0
- foxglove/tests/test_context.py +10 -0
- foxglove/tests/test_logging.py +62 -0
- foxglove/tests/test_mcap.py +368 -0
- foxglove/tests/test_parameters.py +178 -0
- foxglove/tests/test_schemas.py +17 -0
- foxglove/tests/test_server.py +141 -0
- foxglove/tests/test_time.py +137 -0
- foxglove/websocket.py +220 -0
- foxglove_sdk-0.16.2.dist-info/METADATA +53 -0
- foxglove_sdk-0.16.2.dist-info/RECORD +33 -0
- foxglove_sdk-0.16.2.dist-info/WHEEL +5 -0
|
@@ -0,0 +1,368 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
from typing import Callable, Generator, Optional
|
|
3
|
+
|
|
4
|
+
import pytest
|
|
5
|
+
from foxglove import Channel, ChannelDescriptor, Context, open_mcap
|
|
6
|
+
from foxglove.mcap import MCAPWriteOptions
|
|
7
|
+
|
|
8
|
+
chan = Channel("test", schema={"type": "object"})
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
@pytest.fixture
|
|
12
|
+
def make_tmp_mcap(
|
|
13
|
+
tmp_path_factory: pytest.TempPathFactory,
|
|
14
|
+
) -> Generator[Callable[[], Path], None, None]:
|
|
15
|
+
mcap: Optional[Path] = None
|
|
16
|
+
dir: Optional[Path] = None
|
|
17
|
+
|
|
18
|
+
def _make_tmp_mcap() -> Path:
|
|
19
|
+
nonlocal dir, mcap
|
|
20
|
+
dir = tmp_path_factory.mktemp("test", numbered=True)
|
|
21
|
+
mcap = dir / "test.mcap"
|
|
22
|
+
return mcap
|
|
23
|
+
|
|
24
|
+
yield _make_tmp_mcap
|
|
25
|
+
|
|
26
|
+
if mcap is not None and dir is not None:
|
|
27
|
+
try:
|
|
28
|
+
mcap.unlink()
|
|
29
|
+
dir.rmdir()
|
|
30
|
+
except FileNotFoundError:
|
|
31
|
+
pass
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@pytest.fixture
|
|
35
|
+
def tmp_mcap(make_tmp_mcap: Callable[[], Path]) -> Generator[Path, None, None]:
|
|
36
|
+
yield make_tmp_mcap()
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def test_open_with_str(tmp_mcap: Path) -> None:
|
|
40
|
+
open_mcap(str(tmp_mcap))
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def test_overwrite(tmp_mcap: Path) -> None:
|
|
44
|
+
tmp_mcap.touch()
|
|
45
|
+
with pytest.raises(FileExistsError):
|
|
46
|
+
open_mcap(tmp_mcap)
|
|
47
|
+
open_mcap(tmp_mcap, allow_overwrite=True)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def test_explicit_close(tmp_mcap: Path) -> None:
|
|
51
|
+
mcap = open_mcap(tmp_mcap)
|
|
52
|
+
for ii in range(20):
|
|
53
|
+
chan.log({"foo": ii})
|
|
54
|
+
size_before_close = tmp_mcap.stat().st_size
|
|
55
|
+
mcap.close()
|
|
56
|
+
assert tmp_mcap.stat().st_size > size_before_close
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def test_context_manager(tmp_mcap: Path) -> None:
|
|
60
|
+
with open_mcap(tmp_mcap):
|
|
61
|
+
for ii in range(20):
|
|
62
|
+
chan.log({"foo": ii})
|
|
63
|
+
size_before_close = tmp_mcap.stat().st_size
|
|
64
|
+
assert tmp_mcap.stat().st_size > size_before_close
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def test_writer_compression(make_tmp_mcap: Callable[[], Path]) -> None:
|
|
68
|
+
tmp_1 = make_tmp_mcap()
|
|
69
|
+
tmp_2 = make_tmp_mcap()
|
|
70
|
+
|
|
71
|
+
# Compression is enabled by default
|
|
72
|
+
mcap_1 = open_mcap(tmp_1)
|
|
73
|
+
mcap_2 = open_mcap(tmp_2, writer_options=MCAPWriteOptions(compression=None))
|
|
74
|
+
|
|
75
|
+
for _ in range(20):
|
|
76
|
+
chan.log({"foo": "bar"})
|
|
77
|
+
|
|
78
|
+
mcap_1.close()
|
|
79
|
+
mcap_2.close()
|
|
80
|
+
|
|
81
|
+
assert tmp_1.stat().st_size < tmp_2.stat().st_size
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def test_writer_custom_profile(tmp_mcap: Path) -> None:
|
|
85
|
+
options = MCAPWriteOptions(profile="--custom-profile-1--")
|
|
86
|
+
with open_mcap(tmp_mcap, writer_options=options):
|
|
87
|
+
chan.log({"foo": "bar"})
|
|
88
|
+
|
|
89
|
+
contents = tmp_mcap.read_bytes()
|
|
90
|
+
assert contents.find(b"--custom-profile-1--") > -1
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def test_write_to_different_contexts(make_tmp_mcap: Callable[[], Path]) -> None:
|
|
94
|
+
tmp_1 = make_tmp_mcap()
|
|
95
|
+
tmp_2 = make_tmp_mcap()
|
|
96
|
+
|
|
97
|
+
ctx1 = Context()
|
|
98
|
+
ctx2 = Context()
|
|
99
|
+
|
|
100
|
+
options = MCAPWriteOptions(compression=None)
|
|
101
|
+
mcap1 = open_mcap(tmp_1, writer_options=options, context=ctx1)
|
|
102
|
+
mcap2 = open_mcap(tmp_2, writer_options=options, context=ctx2)
|
|
103
|
+
|
|
104
|
+
ch1 = Channel("ctx1", context=ctx1)
|
|
105
|
+
ch1.log({"a": "b"})
|
|
106
|
+
|
|
107
|
+
ch2 = Channel("ctx2", context=ctx2)
|
|
108
|
+
ch2.log({"has-more-data": "true"})
|
|
109
|
+
|
|
110
|
+
mcap1.close()
|
|
111
|
+
mcap2.close()
|
|
112
|
+
|
|
113
|
+
contents1 = tmp_1.read_bytes()
|
|
114
|
+
contents2 = tmp_2.read_bytes()
|
|
115
|
+
|
|
116
|
+
assert len(contents1) < len(contents2)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def _verify_metadata_in_file(file_path: Path, expected_metadata: dict) -> None:
|
|
120
|
+
"""Helper function to verify metadata in MCAP file matches expected."""
|
|
121
|
+
import mcap.reader
|
|
122
|
+
|
|
123
|
+
with open(file_path, "rb") as f:
|
|
124
|
+
reader = mcap.reader.make_reader(f)
|
|
125
|
+
|
|
126
|
+
found_metadata = {}
|
|
127
|
+
metadata_count = 0
|
|
128
|
+
|
|
129
|
+
for record in reader.iter_metadata():
|
|
130
|
+
metadata_count += 1
|
|
131
|
+
found_metadata[record.name] = dict(record.metadata)
|
|
132
|
+
|
|
133
|
+
# Verify count
|
|
134
|
+
assert metadata_count == len(
|
|
135
|
+
expected_metadata
|
|
136
|
+
), f"Expected {len(expected_metadata)} metadata records, found {metadata_count}"
|
|
137
|
+
|
|
138
|
+
# Verify metadata names and content
|
|
139
|
+
assert set(found_metadata.keys()) == set(
|
|
140
|
+
expected_metadata.keys()
|
|
141
|
+
), "Metadata names don't match"
|
|
142
|
+
|
|
143
|
+
for name, expected_kv in expected_metadata.items():
|
|
144
|
+
assert (
|
|
145
|
+
found_metadata[name] == expected_kv
|
|
146
|
+
), f"Metadata '{name}' has wrong key-value pairs"
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def _verify_attachments_in_file(
|
|
150
|
+
file_path: Path, expected_attachments: list[dict]
|
|
151
|
+
) -> None:
|
|
152
|
+
"""Helper function to verify attachments in MCAP file match expected."""
|
|
153
|
+
import mcap.reader
|
|
154
|
+
|
|
155
|
+
with open(file_path, "rb") as f:
|
|
156
|
+
reader = mcap.reader.make_reader(f)
|
|
157
|
+
|
|
158
|
+
found_attachments = []
|
|
159
|
+
for attachment in reader.iter_attachments():
|
|
160
|
+
found_attachments.append(
|
|
161
|
+
{
|
|
162
|
+
"log_time": attachment.log_time,
|
|
163
|
+
"create_time": attachment.create_time,
|
|
164
|
+
"name": attachment.name,
|
|
165
|
+
"media_type": attachment.media_type,
|
|
166
|
+
"data": attachment.data,
|
|
167
|
+
}
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Verify count
|
|
171
|
+
assert len(found_attachments) == len(
|
|
172
|
+
expected_attachments
|
|
173
|
+
), f"Expected {len(expected_attachments)} attachments, found {len(found_attachments)}"
|
|
174
|
+
|
|
175
|
+
# Verify each attachment matches expected
|
|
176
|
+
for expected in expected_attachments:
|
|
177
|
+
matching = [a for a in found_attachments if a["name"] == expected["name"]]
|
|
178
|
+
assert len(matching) == 1, f"Attachment '{expected['name']}' not found"
|
|
179
|
+
actual = matching[0]
|
|
180
|
+
assert (
|
|
181
|
+
actual["log_time"] == expected["log_time"]
|
|
182
|
+
), f"Attachment '{expected['name']}' has wrong log_time"
|
|
183
|
+
assert (
|
|
184
|
+
actual["create_time"] == expected["create_time"]
|
|
185
|
+
), f"Attachment '{expected['name']}' has wrong create_time"
|
|
186
|
+
assert (
|
|
187
|
+
actual["media_type"] == expected["media_type"]
|
|
188
|
+
), f"Attachment '{expected['name']}' has wrong media_type"
|
|
189
|
+
assert (
|
|
190
|
+
actual["data"] == expected["data"]
|
|
191
|
+
), f"Attachment '{expected['name']}' has wrong data"
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def test_write_metadata(tmp_mcap: Path) -> None:
|
|
195
|
+
"""Test writing metadata to MCAP file."""
|
|
196
|
+
# Define expected metadata
|
|
197
|
+
expected_metadata = {
|
|
198
|
+
"test1": {"key1": "value1", "key2": "value2"},
|
|
199
|
+
"test2": {"a": "1", "b": "2"},
|
|
200
|
+
"test3": {"x": "y", "z": "w"},
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
with open_mcap(tmp_mcap) as writer:
|
|
204
|
+
# This should not raise an error
|
|
205
|
+
writer.write_metadata("empty", {})
|
|
206
|
+
|
|
207
|
+
# Write basic metadata
|
|
208
|
+
writer.write_metadata("test1", expected_metadata["test1"])
|
|
209
|
+
|
|
210
|
+
# Write multiple metadata records
|
|
211
|
+
writer.write_metadata("test2", expected_metadata["test2"])
|
|
212
|
+
writer.write_metadata("test3", expected_metadata["test3"])
|
|
213
|
+
|
|
214
|
+
# Write empty metadata (should be skipped)
|
|
215
|
+
writer.write_metadata("empty_test", {})
|
|
216
|
+
|
|
217
|
+
# Log some messages
|
|
218
|
+
for ii in range(5):
|
|
219
|
+
chan.log({"foo": ii})
|
|
220
|
+
|
|
221
|
+
# Verify metadata was written correctly
|
|
222
|
+
_verify_metadata_in_file(tmp_mcap, expected_metadata)
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def test_channel_filter(make_tmp_mcap: Callable[[], Path]) -> None:
|
|
226
|
+
tmp_1 = make_tmp_mcap()
|
|
227
|
+
tmp_2 = make_tmp_mcap()
|
|
228
|
+
|
|
229
|
+
ch1 = Channel("/1", schema={"type": "object"})
|
|
230
|
+
ch2 = Channel("/2", schema={"type": "object"})
|
|
231
|
+
|
|
232
|
+
def filter(ch: ChannelDescriptor) -> bool:
|
|
233
|
+
return ch.topic.startswith("/1")
|
|
234
|
+
|
|
235
|
+
mcap1 = open_mcap(tmp_1, channel_filter=filter)
|
|
236
|
+
mcap2 = open_mcap(tmp_2, channel_filter=None)
|
|
237
|
+
|
|
238
|
+
ch1.log({})
|
|
239
|
+
ch2.log({})
|
|
240
|
+
|
|
241
|
+
mcap1.close()
|
|
242
|
+
mcap2.close()
|
|
243
|
+
|
|
244
|
+
assert tmp_1.stat().st_size < tmp_2.stat().st_size
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def test_attach_basic(tmp_mcap: Path) -> None:
|
|
248
|
+
"""Test writing a single attachment to MCAP file."""
|
|
249
|
+
expected_attachments = [
|
|
250
|
+
{
|
|
251
|
+
"log_time": 1000000000,
|
|
252
|
+
"create_time": 2000000000,
|
|
253
|
+
"name": "config.json",
|
|
254
|
+
"media_type": "application/json",
|
|
255
|
+
"data": b'{"setting": true}',
|
|
256
|
+
}
|
|
257
|
+
]
|
|
258
|
+
|
|
259
|
+
with open_mcap(tmp_mcap) as writer:
|
|
260
|
+
writer.attach(
|
|
261
|
+
log_time=1000000000,
|
|
262
|
+
create_time=2000000000,
|
|
263
|
+
name="config.json",
|
|
264
|
+
media_type="application/json",
|
|
265
|
+
data=b'{"setting": true}',
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
_verify_attachments_in_file(tmp_mcap, expected_attachments)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
def test_attach_multiple(tmp_mcap: Path) -> None:
|
|
272
|
+
"""Test writing multiple attachments to MCAP file."""
|
|
273
|
+
expected_attachments = [
|
|
274
|
+
{
|
|
275
|
+
"log_time": 100,
|
|
276
|
+
"create_time": 200,
|
|
277
|
+
"name": "config.json",
|
|
278
|
+
"media_type": "application/json",
|
|
279
|
+
"data": b'{"setting": true}',
|
|
280
|
+
},
|
|
281
|
+
{
|
|
282
|
+
"log_time": 300,
|
|
283
|
+
"create_time": 400,
|
|
284
|
+
"name": "calibration.yaml",
|
|
285
|
+
"media_type": "text/yaml",
|
|
286
|
+
"data": b"camera:\n fx: 500\n fy: 500",
|
|
287
|
+
},
|
|
288
|
+
{
|
|
289
|
+
"log_time": 500,
|
|
290
|
+
"create_time": 600,
|
|
291
|
+
"name": "image.png",
|
|
292
|
+
"media_type": "image/png",
|
|
293
|
+
"data": bytes([0x89, 0x50, 0x4E, 0x47]), # PNG magic bytes
|
|
294
|
+
},
|
|
295
|
+
]
|
|
296
|
+
|
|
297
|
+
with open_mcap(tmp_mcap) as writer:
|
|
298
|
+
writer.attach(
|
|
299
|
+
log_time=100,
|
|
300
|
+
create_time=200,
|
|
301
|
+
name="config.json",
|
|
302
|
+
media_type="application/json",
|
|
303
|
+
data=b'{"setting": true}',
|
|
304
|
+
)
|
|
305
|
+
writer.attach(
|
|
306
|
+
log_time=300,
|
|
307
|
+
create_time=400,
|
|
308
|
+
name="calibration.yaml",
|
|
309
|
+
media_type="text/yaml",
|
|
310
|
+
data=b"camera:\n fx: 500\n fy: 500",
|
|
311
|
+
)
|
|
312
|
+
writer.attach(
|
|
313
|
+
log_time=500,
|
|
314
|
+
create_time=600,
|
|
315
|
+
name="image.png",
|
|
316
|
+
media_type="image/png",
|
|
317
|
+
data=bytes([0x89, 0x50, 0x4E, 0x47]), # PNG magic bytes
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
_verify_attachments_in_file(tmp_mcap, expected_attachments)
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def test_attach_with_messages(tmp_mcap: Path) -> None:
|
|
324
|
+
"""Test writing attachments alongside messages."""
|
|
325
|
+
with open_mcap(tmp_mcap) as writer:
|
|
326
|
+
# Write some messages
|
|
327
|
+
for ii in range(5):
|
|
328
|
+
chan.log({"foo": ii})
|
|
329
|
+
|
|
330
|
+
# Write an attachment
|
|
331
|
+
writer.attach(
|
|
332
|
+
log_time=1000,
|
|
333
|
+
create_time=2000,
|
|
334
|
+
name="notes.txt",
|
|
335
|
+
media_type="text/plain",
|
|
336
|
+
data=b"Recording notes",
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# Write more messages
|
|
340
|
+
for ii in range(5, 10):
|
|
341
|
+
chan.log({"foo": ii})
|
|
342
|
+
|
|
343
|
+
# Verify attachment was written
|
|
344
|
+
expected_attachments = [
|
|
345
|
+
{
|
|
346
|
+
"log_time": 1000,
|
|
347
|
+
"create_time": 2000,
|
|
348
|
+
"name": "notes.txt",
|
|
349
|
+
"media_type": "text/plain",
|
|
350
|
+
"data": b"Recording notes",
|
|
351
|
+
}
|
|
352
|
+
]
|
|
353
|
+
_verify_attachments_in_file(tmp_mcap, expected_attachments)
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
def test_attach_after_close(tmp_mcap: Path) -> None:
|
|
357
|
+
"""Test that attaching after close raises an error."""
|
|
358
|
+
writer = open_mcap(tmp_mcap)
|
|
359
|
+
writer.close()
|
|
360
|
+
|
|
361
|
+
with pytest.raises(Exception): # FoxgloveError for SinkClosed
|
|
362
|
+
writer.attach(
|
|
363
|
+
log_time=100,
|
|
364
|
+
create_time=200,
|
|
365
|
+
name="test.txt",
|
|
366
|
+
media_type="text/plain",
|
|
367
|
+
data=b"test",
|
|
368
|
+
)
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
import pytest
|
|
2
|
+
from foxglove.websocket import (
|
|
3
|
+
AnyNativeParameterValue,
|
|
4
|
+
Parameter,
|
|
5
|
+
ParameterType,
|
|
6
|
+
ParameterValue,
|
|
7
|
+
)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def test_empty() -> None:
|
|
11
|
+
p = Parameter("empty")
|
|
12
|
+
assert p.name == "empty"
|
|
13
|
+
assert p.type is None
|
|
14
|
+
assert p.value is None
|
|
15
|
+
assert p.get_value() is None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def test_float() -> None:
|
|
19
|
+
p = Parameter("float", value=1.234)
|
|
20
|
+
assert p.name == "float"
|
|
21
|
+
assert p.type == ParameterType.Float64
|
|
22
|
+
assert p.value == ParameterValue.Float64(1.234)
|
|
23
|
+
assert p.get_value() == 1.234
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def test_int() -> None:
|
|
27
|
+
p = Parameter("int", value=1)
|
|
28
|
+
assert p.name == "int"
|
|
29
|
+
assert p.type is None
|
|
30
|
+
assert p.value == ParameterValue.Integer(1)
|
|
31
|
+
assert type(p.get_value()) is int
|
|
32
|
+
assert p.get_value() == 1
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def test_float_array() -> None:
|
|
36
|
+
v: AnyNativeParameterValue = [1.0, 2.0, 3.0]
|
|
37
|
+
p = Parameter("float_array", value=v)
|
|
38
|
+
assert p.name == "float_array"
|
|
39
|
+
assert p.type == ParameterType.Float64Array
|
|
40
|
+
assert p.value == ParameterValue.Array(
|
|
41
|
+
[
|
|
42
|
+
ParameterValue.Float64(1.0),
|
|
43
|
+
ParameterValue.Float64(2.0),
|
|
44
|
+
ParameterValue.Float64(3.0),
|
|
45
|
+
]
|
|
46
|
+
)
|
|
47
|
+
assert p.get_value() == v
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def test_int_array() -> None:
|
|
51
|
+
v: AnyNativeParameterValue = [1, 2, 3]
|
|
52
|
+
p = Parameter("int_array", value=v)
|
|
53
|
+
assert p.name == "int_array"
|
|
54
|
+
assert p.type is None
|
|
55
|
+
assert p.value == ParameterValue.Array(
|
|
56
|
+
[
|
|
57
|
+
ParameterValue.Integer(1),
|
|
58
|
+
ParameterValue.Integer(2),
|
|
59
|
+
ParameterValue.Integer(3),
|
|
60
|
+
]
|
|
61
|
+
)
|
|
62
|
+
assert p.get_value() == v
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def test_parameter_value_integer() -> None:
|
|
66
|
+
p = Parameter("integer_param", value=ParameterValue.Integer(42))
|
|
67
|
+
assert p.name == "integer_param"
|
|
68
|
+
assert p.type is None
|
|
69
|
+
assert p.value == ParameterValue.Integer(42)
|
|
70
|
+
assert type(p.get_value()) is int
|
|
71
|
+
assert p.get_value() == 42
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def test_heterogeneous_array() -> None:
|
|
75
|
+
v: AnyNativeParameterValue = ["a", 2, False]
|
|
76
|
+
p = Parameter("heterogeneous_array", value=v)
|
|
77
|
+
assert p.name == "heterogeneous_array"
|
|
78
|
+
assert p.type is None
|
|
79
|
+
assert p.value == ParameterValue.Array(
|
|
80
|
+
[
|
|
81
|
+
ParameterValue.String("a"),
|
|
82
|
+
ParameterValue.Integer(2),
|
|
83
|
+
ParameterValue.Bool(False),
|
|
84
|
+
]
|
|
85
|
+
)
|
|
86
|
+
assert p.get_value() == v
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def test_string() -> None:
|
|
90
|
+
p = Parameter("string", value="hello")
|
|
91
|
+
assert p.name == "string"
|
|
92
|
+
assert p.type is None
|
|
93
|
+
assert p.value == ParameterValue.String("hello")
|
|
94
|
+
assert p.get_value() == "hello"
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def test_bytes() -> None:
|
|
98
|
+
p = Parameter("bytes", value=b"hello")
|
|
99
|
+
assert p.name == "bytes"
|
|
100
|
+
assert p.type == ParameterType.ByteArray
|
|
101
|
+
assert p.value == ParameterValue.String("aGVsbG8=")
|
|
102
|
+
assert p.get_value() == b"hello"
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def test_dict() -> None:
|
|
106
|
+
v: AnyNativeParameterValue = {
|
|
107
|
+
"a": True,
|
|
108
|
+
"b": 2,
|
|
109
|
+
"c": "C",
|
|
110
|
+
"d": {"inner": [1, 2, 3]},
|
|
111
|
+
}
|
|
112
|
+
p = Parameter(
|
|
113
|
+
"dict",
|
|
114
|
+
value=v,
|
|
115
|
+
)
|
|
116
|
+
assert p.name == "dict"
|
|
117
|
+
assert p.type is None
|
|
118
|
+
assert p.value == ParameterValue.Dict(
|
|
119
|
+
{
|
|
120
|
+
"a": ParameterValue.Bool(True),
|
|
121
|
+
"b": ParameterValue.Integer(2),
|
|
122
|
+
"c": ParameterValue.String("C"),
|
|
123
|
+
"d": ParameterValue.Dict(
|
|
124
|
+
{
|
|
125
|
+
"inner": ParameterValue.Array(
|
|
126
|
+
[
|
|
127
|
+
ParameterValue.Integer(1),
|
|
128
|
+
ParameterValue.Integer(2),
|
|
129
|
+
ParameterValue.Integer(3),
|
|
130
|
+
]
|
|
131
|
+
)
|
|
132
|
+
}
|
|
133
|
+
),
|
|
134
|
+
}
|
|
135
|
+
)
|
|
136
|
+
assert p.get_value() == v
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def test_explicit() -> None:
|
|
140
|
+
# Derive type from value
|
|
141
|
+
p = Parameter("float", value=ParameterValue.Float64(1))
|
|
142
|
+
assert p.type == ParameterType.Float64
|
|
143
|
+
assert p.get_value() == 1
|
|
144
|
+
|
|
145
|
+
# Override derived type.
|
|
146
|
+
p = Parameter(
|
|
147
|
+
"bad float array",
|
|
148
|
+
value=ParameterValue.Float64(1),
|
|
149
|
+
type=ParameterType.Float64Array,
|
|
150
|
+
)
|
|
151
|
+
assert p.type == ParameterType.Float64Array
|
|
152
|
+
assert p.get_value() == 1
|
|
153
|
+
|
|
154
|
+
# Override derived type in a different way.
|
|
155
|
+
p = Parameter(
|
|
156
|
+
"bad float",
|
|
157
|
+
value=ParameterValue.String("1"),
|
|
158
|
+
type=ParameterType.Float64,
|
|
159
|
+
)
|
|
160
|
+
assert p.type == ParameterType.Float64
|
|
161
|
+
assert p.get_value() == "1"
|
|
162
|
+
|
|
163
|
+
# Override derived type with None.
|
|
164
|
+
p = Parameter("underspecified float", value=ParameterValue.Float64(1), type=None)
|
|
165
|
+
assert p.type is None
|
|
166
|
+
assert p.get_value() == 1
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def test_base64_decode_error() -> None:
|
|
170
|
+
p = Parameter(
|
|
171
|
+
"bad bytes",
|
|
172
|
+
value=ParameterValue.String("!!!"),
|
|
173
|
+
type=ParameterType.ByteArray,
|
|
174
|
+
)
|
|
175
|
+
assert p.type == ParameterType.ByteArray
|
|
176
|
+
assert p.value == ParameterValue.String("!!!")
|
|
177
|
+
with pytest.raises(ValueError, match=r"Failed to decode base64"):
|
|
178
|
+
p.get_value()
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
from foxglove.schemas import Log, LogLevel, Timestamp
|
|
2
|
+
|
|
3
|
+
""" Asserts that foxglove schemas can be encoded as protobuf. """
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def test_can_encode() -> None:
|
|
7
|
+
msg = Log(
|
|
8
|
+
timestamp=Timestamp(5, 10),
|
|
9
|
+
level=LogLevel.Error,
|
|
10
|
+
message="hello",
|
|
11
|
+
name="logger",
|
|
12
|
+
file="file",
|
|
13
|
+
line=123,
|
|
14
|
+
)
|
|
15
|
+
encoded = msg.encode()
|
|
16
|
+
assert isinstance(encoded, bytes)
|
|
17
|
+
assert len(encoded) == 34
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import typing
|
|
3
|
+
from urllib.parse import parse_qs, urlparse
|
|
4
|
+
|
|
5
|
+
import pytest
|
|
6
|
+
from foxglove import (
|
|
7
|
+
Capability,
|
|
8
|
+
Channel,
|
|
9
|
+
Context,
|
|
10
|
+
ServerListener,
|
|
11
|
+
Service,
|
|
12
|
+
start_server,
|
|
13
|
+
)
|
|
14
|
+
from foxglove.websocket import PlaybackState, PlaybackStatus, ServiceSchema, StatusLevel
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def test_server_interface() -> None:
|
|
18
|
+
"""
|
|
19
|
+
Exercise the server interface; will also be checked with mypy.
|
|
20
|
+
"""
|
|
21
|
+
server = start_server(
|
|
22
|
+
port=0, session_id="test-session", channel_filter=lambda _: True
|
|
23
|
+
)
|
|
24
|
+
assert isinstance(server.port, int)
|
|
25
|
+
assert server.port != 0
|
|
26
|
+
|
|
27
|
+
raw_url = server.app_url()
|
|
28
|
+
assert raw_url is not None
|
|
29
|
+
url = urlparse(raw_url)
|
|
30
|
+
assert url.scheme == "https"
|
|
31
|
+
assert url.netloc == "app.foxglove.dev"
|
|
32
|
+
assert parse_qs(url.query) == {
|
|
33
|
+
"ds": ["foxglove-websocket"],
|
|
34
|
+
"ds.url": [f"ws://127.0.0.1:{server.port}"],
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
raw_url = server.app_url(layout_id="lay_123", open_in_desktop=True)
|
|
38
|
+
assert raw_url is not None
|
|
39
|
+
url = urlparse(raw_url)
|
|
40
|
+
assert url.scheme == "https"
|
|
41
|
+
assert url.netloc == "app.foxglove.dev"
|
|
42
|
+
assert parse_qs(url.query) == {
|
|
43
|
+
"ds": ["foxglove-websocket"],
|
|
44
|
+
"ds.url": [f"ws://127.0.0.1:{server.port}"],
|
|
45
|
+
"layoutId": ["lay_123"],
|
|
46
|
+
"openIn": ["desktop"],
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
server.publish_status("test message", StatusLevel.Info, "some-id")
|
|
50
|
+
server.broadcast_time(time.time_ns())
|
|
51
|
+
server.broadcast_playback_state(
|
|
52
|
+
PlaybackState(
|
|
53
|
+
status=PlaybackStatus.Paused,
|
|
54
|
+
playback_speed=1.0,
|
|
55
|
+
current_time=time.time_ns(),
|
|
56
|
+
did_seek=False,
|
|
57
|
+
request_id=None,
|
|
58
|
+
)
|
|
59
|
+
)
|
|
60
|
+
server.remove_status(["some-id"])
|
|
61
|
+
server.clear_session("new-session")
|
|
62
|
+
server.stop()
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def test_server_listener_provides_default_implementation() -> None:
|
|
66
|
+
class DefaultServerListener(ServerListener):
|
|
67
|
+
pass
|
|
68
|
+
|
|
69
|
+
listener = DefaultServerListener()
|
|
70
|
+
|
|
71
|
+
listener.on_parameters_subscribe(["test"])
|
|
72
|
+
listener.on_parameters_unsubscribe(["test"])
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def test_services_interface() -> None:
|
|
76
|
+
test_svc = Service(
|
|
77
|
+
name="test",
|
|
78
|
+
schema=ServiceSchema(name="test-schema"),
|
|
79
|
+
handler=lambda *_: b"{}",
|
|
80
|
+
)
|
|
81
|
+
test2_svc = Service(
|
|
82
|
+
name="test2",
|
|
83
|
+
schema=ServiceSchema(name="test-schema"),
|
|
84
|
+
handler=lambda *_: b"{}",
|
|
85
|
+
)
|
|
86
|
+
server = start_server(
|
|
87
|
+
port=0,
|
|
88
|
+
capabilities=[Capability.Services],
|
|
89
|
+
supported_encodings=["json"],
|
|
90
|
+
services=[test_svc],
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
# Add a new service.
|
|
94
|
+
server.add_services([test2_svc])
|
|
95
|
+
|
|
96
|
+
# Can't add a service with the same name.
|
|
97
|
+
with pytest.raises(RuntimeError):
|
|
98
|
+
server.add_services([test_svc])
|
|
99
|
+
|
|
100
|
+
# Remove services.
|
|
101
|
+
server.remove_services(["test", "test2"])
|
|
102
|
+
|
|
103
|
+
# Re-add a service.
|
|
104
|
+
server.add_services([test_svc])
|
|
105
|
+
|
|
106
|
+
server.stop()
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def test_context_can_be_attached_to_server() -> None:
|
|
110
|
+
ctx1 = Context()
|
|
111
|
+
ctx2 = Context()
|
|
112
|
+
|
|
113
|
+
server1 = start_server(port=0, context=ctx1)
|
|
114
|
+
server2 = start_server(port=0, context=ctx2)
|
|
115
|
+
|
|
116
|
+
ch1 = Channel("/1", context=ctx1)
|
|
117
|
+
ch2 = Channel("/2", context=ctx2)
|
|
118
|
+
ch1.log("test")
|
|
119
|
+
ch2.log("test")
|
|
120
|
+
|
|
121
|
+
server1.stop()
|
|
122
|
+
server2.stop()
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
@typing.no_type_check
|
|
126
|
+
def test_server_with_invalid_playback_time_range() -> None:
|
|
127
|
+
with pytest.raises(TypeError):
|
|
128
|
+
# Tuple of a single element
|
|
129
|
+
start_server(port=0, playback_time_range=(123,))
|
|
130
|
+
|
|
131
|
+
with pytest.raises(TypeError):
|
|
132
|
+
# Tuple with invalid types
|
|
133
|
+
start_server(port=0, playback_time_range=("not-a-time", None))
|
|
134
|
+
|
|
135
|
+
with pytest.raises(TypeError):
|
|
136
|
+
# Not a tuple
|
|
137
|
+
start_server(port=0, playback_time_range=23443)
|
|
138
|
+
|
|
139
|
+
with pytest.raises(TypeError):
|
|
140
|
+
# Tuple with too many elements
|
|
141
|
+
start_server(port=0, playback_time_range=(123, 456, 789))
|