nominal 1.109.0__py3-none-any.whl → 1.111.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +31 -0
- nominal/core/_checklist_types.py +48 -0
- nominal/core/_clientsbunch.py +0 -3
- nominal/core/_utils/api_tools.py +16 -2
- nominal/core/_video_types.py +16 -0
- nominal/core/asset.py +34 -18
- nominal/core/bounds.py +8 -1
- nominal/core/channel.py +0 -15
- nominal/core/checklist.py +11 -25
- nominal/core/client.py +25 -29
- nominal/core/data_review.py +32 -11
- nominal/core/dataset.py +41 -2
- nominal/core/dataset_file.py +6 -0
- nominal/core/datasource.py +0 -3
- nominal/core/run.py +25 -11
- nominal/core/streaming_checklist.py +25 -0
- nominal/core/video.py +71 -13
- nominal/core/video_file.py +62 -2
- nominal/experimental/migration/migration_utils.py +343 -42
- {nominal-1.109.0.dist-info → nominal-1.111.0.dist-info}/METADATA +2 -2
- {nominal-1.109.0.dist-info → nominal-1.111.0.dist-info}/RECORD +24 -21
- {nominal-1.109.0.dist-info → nominal-1.111.0.dist-info}/WHEEL +0 -0
- {nominal-1.109.0.dist-info → nominal-1.111.0.dist-info}/entry_points.txt +0 -0
- {nominal-1.109.0.dist-info → nominal-1.111.0.dist-info}/licenses/LICENSE +0 -0
nominal/core/dataset.py
CHANGED
|
@@ -26,6 +26,8 @@ from nominal.core.filetype import FileType, FileTypes
|
|
|
26
26
|
from nominal.core.log import LogPoint, _write_logs
|
|
27
27
|
from nominal.ts import (
|
|
28
28
|
_AnyTimestampType,
|
|
29
|
+
_InferrableTimestampType,
|
|
30
|
+
_SecondsNanos,
|
|
29
31
|
_to_typed_timestamp_type,
|
|
30
32
|
)
|
|
31
33
|
|
|
@@ -93,6 +95,32 @@ class Dataset(DataSource, RefreshableMixin[scout_catalog.EnrichedDataset]):
|
|
|
93
95
|
updated_dataset = self._clients.catalog.update_dataset_metadata(self._clients.auth_header, self.rid, request)
|
|
94
96
|
return self._refresh_from_api(updated_dataset)
|
|
95
97
|
|
|
98
|
+
def update_bounds(
|
|
99
|
+
self,
|
|
100
|
+
*,
|
|
101
|
+
start: _InferrableTimestampType,
|
|
102
|
+
end: _InferrableTimestampType,
|
|
103
|
+
) -> Self:
|
|
104
|
+
"""Update the bounds (start and end timestamps) of the dataset.
|
|
105
|
+
Updates the current instance, and returns it.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
start: The start timestamp of the dataset bounds. Can be a datetime, ISO 8601 string,
|
|
109
|
+
or integer nanoseconds since epoch.
|
|
110
|
+
end: The end timestamp of the dataset bounds. Can be a datetime, ISO 8601 string,
|
|
111
|
+
or integer nanoseconds since epoch.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
The updated Dataset instance with new bounds.
|
|
115
|
+
"""
|
|
116
|
+
bounds = Bounds(
|
|
117
|
+
start=_SecondsNanos.from_flexible(start).to_nanoseconds(),
|
|
118
|
+
end=_SecondsNanos.from_flexible(end).to_nanoseconds(),
|
|
119
|
+
)
|
|
120
|
+
request = scout_catalog.UpdateBoundsRequest(bounds=bounds._to_conjure())
|
|
121
|
+
updated_dataset = self._clients.catalog.update_bounds(self._clients.auth_header, request, self.rid)
|
|
122
|
+
return self._refresh_from_api(updated_dataset)
|
|
123
|
+
|
|
96
124
|
def _handle_ingest_response(self, response: ingest_api.IngestResponse) -> DatasetFile:
|
|
97
125
|
if response.details.dataset is None:
|
|
98
126
|
raise ValueError(f"Expected response to provide dataset details, received: {response.details.type}")
|
|
@@ -216,6 +244,8 @@ class Dataset(DataSource, RefreshableMixin[scout_catalog.EnrichedDataset]):
|
|
|
216
244
|
API, making it useful for use cases where network connection drops during streaming and a backup file needs
|
|
217
245
|
to be created.
|
|
218
246
|
|
|
247
|
+
For struct columns, values should be converted to JSON strings and wrapped in the JsonStruct record type.
|
|
248
|
+
|
|
219
249
|
If this schema is not used, will result in a failed ingestion.
|
|
220
250
|
{
|
|
221
251
|
"type": "record",
|
|
@@ -234,8 +264,15 @@ class Dataset(DataSource, RefreshableMixin[scout_catalog.EnrichedDataset]):
|
|
|
234
264
|
},
|
|
235
265
|
{
|
|
236
266
|
"name": "values",
|
|
237
|
-
"type": {"type": "array", "items": [
|
|
238
|
-
|
|
267
|
+
"type": {"type": "array", "items": [
|
|
268
|
+
"double",
|
|
269
|
+
"string",
|
|
270
|
+
"long",
|
|
271
|
+
{"type": "record", "name": "DoubleArray", "fields": [{"name": "items", "type": {"type": "array", "items": "double"}}]},
|
|
272
|
+
{"type": "record", "name": "StringArray", "fields": [{"name": "items", "type": {"type": "array", "items": "string"}}]},
|
|
273
|
+
{"type": "record", "name": "JsonStruct", "fields": [{"name": "json", "type": "string"}]}
|
|
274
|
+
]},
|
|
275
|
+
"doc": "Array of values. Can be doubles, longs, strings, arrays, or JSON structs",
|
|
239
276
|
},
|
|
240
277
|
{
|
|
241
278
|
"name": "tags",
|
|
@@ -246,6 +283,8 @@ class Dataset(DataSource, RefreshableMixin[scout_catalog.EnrichedDataset]):
|
|
|
246
283
|
],
|
|
247
284
|
}
|
|
248
285
|
|
|
286
|
+
Note: The previous schema with only "double" and "string" value types is still fully supported.
|
|
287
|
+
|
|
249
288
|
Args:
|
|
250
289
|
path: Path to the .avro file to upload
|
|
251
290
|
|
nominal/core/dataset_file.py
CHANGED
|
@@ -280,6 +280,8 @@ class IngestStatus(Enum):
|
|
|
280
280
|
SUCCESS = "SUCCESS"
|
|
281
281
|
IN_PROGRESS = "IN_PROGRESS"
|
|
282
282
|
FAILED = "FAILED"
|
|
283
|
+
DELETION_IN_PROGRESS = "DELETION_IN_PROGRESS"
|
|
284
|
+
DELETED = "DELETED"
|
|
283
285
|
|
|
284
286
|
@classmethod
|
|
285
287
|
def _from_conjure(cls, status: api.IngestStatusV2) -> IngestStatus:
|
|
@@ -289,6 +291,10 @@ class IngestStatus(Enum):
|
|
|
289
291
|
return cls.IN_PROGRESS
|
|
290
292
|
elif status.error is not None:
|
|
291
293
|
return cls.FAILED
|
|
294
|
+
elif status.deletion_in_progress is not None:
|
|
295
|
+
return cls.DELETION_IN_PROGRESS
|
|
296
|
+
elif status.deleted is not None:
|
|
297
|
+
return cls.DELETED
|
|
292
298
|
raise ValueError(f"Unknown ingest status: {status.type}")
|
|
293
299
|
|
|
294
300
|
|
nominal/core/datasource.py
CHANGED
|
@@ -18,7 +18,6 @@ from nominal_api import (
|
|
|
18
18
|
storage_writer_api,
|
|
19
19
|
timeseries_channelmetadata,
|
|
20
20
|
timeseries_channelmetadata_api,
|
|
21
|
-
timeseries_logicalseries,
|
|
22
21
|
timeseries_metadata,
|
|
23
22
|
timeseries_metadata_api,
|
|
24
23
|
upload_api,
|
|
@@ -53,8 +52,6 @@ class DataSource(HasRid):
|
|
|
53
52
|
@property
|
|
54
53
|
def datasource(self) -> scout_datasource.DataSourceService: ...
|
|
55
54
|
@property
|
|
56
|
-
def logical_series(self) -> timeseries_logicalseries.LogicalSeriesService: ...
|
|
57
|
-
@property
|
|
58
55
|
def units(self) -> scout.UnitsService: ...
|
|
59
56
|
@property
|
|
60
57
|
def ingest(self) -> ingest_api.IngestService: ...
|
nominal/core/run.py
CHANGED
|
@@ -3,16 +3,17 @@ from __future__ import annotations
|
|
|
3
3
|
from dataclasses import dataclass, field
|
|
4
4
|
from datetime import datetime, timedelta
|
|
5
5
|
from types import MappingProxyType
|
|
6
|
-
from typing import Iterable, Mapping, Protocol, Sequence, cast
|
|
6
|
+
from typing import TYPE_CHECKING, Iterable, Mapping, Protocol, Sequence, cast
|
|
7
7
|
|
|
8
8
|
from nominal_api import (
|
|
9
|
+
event,
|
|
10
|
+
scout,
|
|
9
11
|
scout_asset_api,
|
|
12
|
+
scout_assets,
|
|
10
13
|
scout_run_api,
|
|
11
14
|
)
|
|
12
15
|
from typing_extensions import Self
|
|
13
16
|
|
|
14
|
-
from nominal.core import asset as core_asset
|
|
15
|
-
from nominal.core._clientsbunch import HasScoutParams
|
|
16
17
|
from nominal.core._event_types import EventType
|
|
17
18
|
from nominal.core._utils.api_tools import (
|
|
18
19
|
HasRid,
|
|
@@ -20,16 +21,20 @@ from nominal.core._utils.api_tools import (
|
|
|
20
21
|
LinkDict,
|
|
21
22
|
RefreshableMixin,
|
|
22
23
|
create_links,
|
|
24
|
+
filter_scopes,
|
|
23
25
|
rid_from_instance_or_string,
|
|
24
26
|
)
|
|
25
|
-
from nominal.core.asset import _filter_scopes
|
|
26
27
|
from nominal.core.attachment import Attachment, _iter_get_attachments
|
|
27
28
|
from nominal.core.connection import Connection, _get_connections
|
|
28
29
|
from nominal.core.dataset import Dataset, _DatasetWrapper, _get_datasets
|
|
30
|
+
from nominal.core.datasource import DataSource
|
|
29
31
|
from nominal.core.event import Event, _create_event
|
|
30
32
|
from nominal.core.video import Video, _get_video
|
|
31
33
|
from nominal.ts import IntegralNanosecondsDuration, IntegralNanosecondsUTC, _SecondsNanos, _to_api_duration
|
|
32
34
|
|
|
35
|
+
if TYPE_CHECKING:
|
|
36
|
+
from nominal.core.asset import Asset
|
|
37
|
+
|
|
33
38
|
|
|
34
39
|
@dataclass(frozen=True)
|
|
35
40
|
class Run(HasRid, RefreshableMixin[scout_run_api.Run], _DatasetWrapper):
|
|
@@ -48,11 +53,17 @@ class Run(HasRid, RefreshableMixin[scout_run_api.Run], _DatasetWrapper):
|
|
|
48
53
|
_clients: _Clients = field(repr=False)
|
|
49
54
|
|
|
50
55
|
class _Clients(
|
|
51
|
-
|
|
52
|
-
|
|
56
|
+
Attachment._Clients,
|
|
57
|
+
DataSource._Clients,
|
|
58
|
+
Video._Clients,
|
|
53
59
|
Protocol,
|
|
54
60
|
):
|
|
55
|
-
|
|
61
|
+
@property
|
|
62
|
+
def assets(self) -> scout_assets.AssetService: ...
|
|
63
|
+
@property
|
|
64
|
+
def event(self) -> event.EventService: ...
|
|
65
|
+
@property
|
|
66
|
+
def run(self) -> scout.RunService: ...
|
|
56
67
|
|
|
57
68
|
@property
|
|
58
69
|
def nominal_url(self) -> str:
|
|
@@ -105,7 +116,7 @@ class Run(HasRid, RefreshableMixin[scout_run_api.Run], _DatasetWrapper):
|
|
|
105
116
|
if len(api_run.assets) > 1:
|
|
106
117
|
raise RuntimeError("Can't retrieve dataset scopes on multi-asset runs")
|
|
107
118
|
|
|
108
|
-
return
|
|
119
|
+
return filter_scopes(api_run.asset_data_scopes, "dataset")
|
|
109
120
|
|
|
110
121
|
def _list_datasource_rids(
|
|
111
122
|
self, datasource_type: str | None = None, property_name: str | None = None
|
|
@@ -350,13 +361,16 @@ class Run(HasRid, RefreshableMixin[scout_run_api.Run], _DatasetWrapper):
|
|
|
350
361
|
"""List a sequence of Attachments associated with this Run."""
|
|
351
362
|
return list(self._iter_list_attachments())
|
|
352
363
|
|
|
353
|
-
def _iter_list_assets(self) -> Iterable[
|
|
364
|
+
def _iter_list_assets(self) -> Iterable["Asset"]:
|
|
365
|
+
from nominal.core.asset import Asset
|
|
366
|
+
|
|
367
|
+
clients = cast(Asset._Clients, self._clients)
|
|
354
368
|
run = self._get_latest_api()
|
|
355
369
|
assets = self._clients.assets.get_assets(self._clients.auth_header, run.assets)
|
|
356
370
|
for a in assets.values():
|
|
357
|
-
yield
|
|
371
|
+
yield Asset._from_conjure(clients, a)
|
|
358
372
|
|
|
359
|
-
def list_assets(self) -> Sequence[
|
|
373
|
+
def list_assets(self) -> Sequence["Asset"]:
|
|
360
374
|
"""List assets associated with this run."""
|
|
361
375
|
return list(self._iter_list_assets())
|
|
362
376
|
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Iterable, Protocol
|
|
4
|
+
|
|
5
|
+
from nominal_api import scout_checklistexecution_api
|
|
6
|
+
|
|
7
|
+
from nominal.core._clientsbunch import HasScoutParams
|
|
8
|
+
from nominal.core._utils.pagination_tools import (
|
|
9
|
+
list_streaming_checklists_for_asset_paginated,
|
|
10
|
+
list_streaming_checklists_paginated,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class _Clients(HasScoutParams, Protocol):
|
|
15
|
+
@property
|
|
16
|
+
def checklist_execution(self) -> scout_checklistexecution_api.ChecklistExecutionService: ...
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _iter_list_streaming_checklists(
|
|
20
|
+
clients: _Clients,
|
|
21
|
+
asset_rid: str | None = None,
|
|
22
|
+
) -> Iterable[str]:
|
|
23
|
+
if asset_rid is None:
|
|
24
|
+
return list_streaming_checklists_paginated(clients.checklist_execution, clients.auth_header)
|
|
25
|
+
return list_streaming_checklists_for_asset_paginated(clients.checklist_execution, clients.auth_header, asset_rid)
|
nominal/core/video.py
CHANGED
|
@@ -8,9 +8,9 @@ from dataclasses import dataclass, field
|
|
|
8
8
|
from datetime import datetime, timedelta
|
|
9
9
|
from io import BytesIO, TextIOBase, TextIOWrapper
|
|
10
10
|
from types import MappingProxyType
|
|
11
|
-
from typing import BinaryIO, Mapping, Protocol, Sequence
|
|
11
|
+
from typing import BinaryIO, Mapping, Protocol, Sequence, overload
|
|
12
12
|
|
|
13
|
-
from nominal_api import api, ingest_api, scout_video, scout_video_api, upload_api
|
|
13
|
+
from nominal_api import api, ingest_api, scout_catalog, scout_video, scout_video_api, upload_api
|
|
14
14
|
from typing_extensions import Self
|
|
15
15
|
|
|
16
16
|
from nominal.core._clientsbunch import HasScoutParams
|
|
@@ -44,6 +44,8 @@ class Video(HasRid, RefreshableMixin[scout_video_api.Video]):
|
|
|
44
44
|
def ingest(self) -> ingest_api.IngestService: ...
|
|
45
45
|
@property
|
|
46
46
|
def video_file(self) -> scout_video.VideoFileService: ...
|
|
47
|
+
@property
|
|
48
|
+
def catalog(self) -> scout_catalog.CatalogService: ...
|
|
47
49
|
|
|
48
50
|
def poll_until_ingestion_completed(self, interval: timedelta = timedelta(seconds=1)) -> None:
|
|
49
51
|
"""Block until video ingestion has completed.
|
|
@@ -117,18 +119,38 @@ class Video(HasRid, RefreshableMixin[scout_video_api.Video]):
|
|
|
117
119
|
"""Unarchives this video, allowing it to show up in the 'All Videos' pane in the UI."""
|
|
118
120
|
self._clients.video.unarchive(self._clients.auth_header, self.rid)
|
|
119
121
|
|
|
122
|
+
@overload
|
|
123
|
+
def add_file(
|
|
124
|
+
self,
|
|
125
|
+
path: PathLike,
|
|
126
|
+
*,
|
|
127
|
+
start: datetime | IntegralNanosecondsUTC,
|
|
128
|
+
description: str | None = None,
|
|
129
|
+
) -> VideoFile: ...
|
|
130
|
+
|
|
131
|
+
@overload
|
|
120
132
|
def add_file(
|
|
121
133
|
self,
|
|
122
134
|
path: PathLike,
|
|
135
|
+
*,
|
|
136
|
+
frame_timestamps: Sequence[IntegralNanosecondsUTC],
|
|
137
|
+
description: str | None = None,
|
|
138
|
+
) -> VideoFile: ...
|
|
139
|
+
|
|
140
|
+
def add_file(
|
|
141
|
+
self,
|
|
142
|
+
path: PathLike,
|
|
143
|
+
*,
|
|
123
144
|
start: datetime | IntegralNanosecondsUTC | None = None,
|
|
124
145
|
frame_timestamps: Sequence[IntegralNanosecondsUTC] | None = None,
|
|
125
146
|
description: str | None = None,
|
|
126
147
|
) -> VideoFile:
|
|
127
|
-
"""Append to a video from a file-path to H264-encoded video data.
|
|
148
|
+
"""Append to a video from a file-path to H264-encoded video data. Only one of start or frame_timestamps
|
|
149
|
+
is allowed.
|
|
128
150
|
|
|
129
151
|
Args:
|
|
130
152
|
path: Path to the video file to add to an existing video within Nominal
|
|
131
|
-
start: Starting timestamp of the video file in absolute UTC time
|
|
153
|
+
start: Starting timestamp of the video file in absolute UTC time.
|
|
132
154
|
frame_timestamps: Per-frame absolute nanosecond timestamps. Most usecases should instead use the 'start'
|
|
133
155
|
parameter, unless precise per-frame metadata is available and desired.
|
|
134
156
|
description: Description of the video file.
|
|
@@ -141,16 +163,46 @@ class Video(HasRid, RefreshableMixin[scout_video_api.Video]):
|
|
|
141
163
|
file_type = FileType.from_video(path)
|
|
142
164
|
|
|
143
165
|
with path.open("rb") as video_file:
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
166
|
+
if start is not None:
|
|
167
|
+
return self.add_from_io(
|
|
168
|
+
video_file,
|
|
169
|
+
name=path_upload_name(path, file_type),
|
|
170
|
+
start=start,
|
|
171
|
+
description=description,
|
|
172
|
+
file_type=file_type,
|
|
173
|
+
)
|
|
174
|
+
elif frame_timestamps is not None:
|
|
175
|
+
return self.add_from_io(
|
|
176
|
+
video_file,
|
|
177
|
+
name=path_upload_name(path, file_type),
|
|
178
|
+
frame_timestamps=frame_timestamps,
|
|
179
|
+
description=description,
|
|
180
|
+
file_type=file_type,
|
|
181
|
+
)
|
|
182
|
+
else: # This should never be reached due to the validation above
|
|
183
|
+
raise ValueError("Either 'start' or 'frame_timestamps' must be provided")
|
|
152
184
|
|
|
153
|
-
|
|
185
|
+
@overload
|
|
186
|
+
def add_from_io(
|
|
187
|
+
self,
|
|
188
|
+
video: BinaryIO,
|
|
189
|
+
name: str,
|
|
190
|
+
*,
|
|
191
|
+
start: datetime | IntegralNanosecondsUTC,
|
|
192
|
+
description: str | None = None,
|
|
193
|
+
file_type: tuple[str, str] | FileType = FileTypes.MP4,
|
|
194
|
+
) -> VideoFile: ...
|
|
195
|
+
|
|
196
|
+
@overload
|
|
197
|
+
def add_from_io(
|
|
198
|
+
self,
|
|
199
|
+
video: BinaryIO,
|
|
200
|
+
name: str,
|
|
201
|
+
*,
|
|
202
|
+
frame_timestamps: Sequence[IntegralNanosecondsUTC],
|
|
203
|
+
description: str | None = None,
|
|
204
|
+
file_type: tuple[str, str] | FileType = FileTypes.MP4,
|
|
205
|
+
) -> VideoFile: ...
|
|
154
206
|
|
|
155
207
|
def add_from_io(
|
|
156
208
|
self,
|
|
@@ -179,6 +231,12 @@ class Video(HasRid, RefreshableMixin[scout_video_api.Video]):
|
|
|
179
231
|
if isinstance(video, TextIOBase):
|
|
180
232
|
raise TypeError(f"video {video} must be open in binary mode, rather than text mode")
|
|
181
233
|
|
|
234
|
+
# Validation: ensure exactly one of start or frame_timestamps is provided
|
|
235
|
+
if start is None and frame_timestamps is None:
|
|
236
|
+
raise ValueError("Either 'start' or 'frame_timestamps' must be provided")
|
|
237
|
+
if start is not None and frame_timestamps is not None:
|
|
238
|
+
raise ValueError("Only one of 'start' or 'frame_timestamps' may be provided")
|
|
239
|
+
|
|
182
240
|
timestamp_manifest = _build_video_file_timestamp_manifest(
|
|
183
241
|
self._clients.auth_header, self._clients.workspace_rid, self._clients.upload, start, frame_timestamps
|
|
184
242
|
)
|
nominal/core/video_file.py
CHANGED
|
@@ -4,13 +4,14 @@ import logging
|
|
|
4
4
|
import time
|
|
5
5
|
from dataclasses import dataclass, field
|
|
6
6
|
from datetime import datetime, timedelta
|
|
7
|
-
from typing import Protocol
|
|
7
|
+
from typing import Protocol, Tuple
|
|
8
8
|
|
|
9
|
-
from nominal_api import scout_video, scout_video_api
|
|
9
|
+
from nominal_api import scout_catalog, scout_video, scout_video_api
|
|
10
10
|
from typing_extensions import Self
|
|
11
11
|
|
|
12
12
|
from nominal.core._clientsbunch import HasScoutParams
|
|
13
13
|
from nominal.core._utils.api_tools import HasRid, RefreshableMixin
|
|
14
|
+
from nominal.core._video_types import McapVideoDetails, TimestampOptions
|
|
14
15
|
from nominal.core.exceptions import NominalIngestError, NominalIngestFailed
|
|
15
16
|
from nominal.ts import IntegralNanosecondsUTC, _SecondsNanos
|
|
16
17
|
|
|
@@ -28,6 +29,8 @@ class VideoFile(HasRid, RefreshableMixin[scout_video_api.VideoFile]):
|
|
|
28
29
|
class _Clients(HasScoutParams, Protocol):
|
|
29
30
|
@property
|
|
30
31
|
def video_file(self) -> scout_video.VideoFileService: ...
|
|
32
|
+
@property
|
|
33
|
+
def catalog(self) -> scout_catalog.CatalogService: ...
|
|
31
34
|
|
|
32
35
|
def archive(self) -> None:
|
|
33
36
|
"""Archive the video file, disallowing it to appear when playing back the video"""
|
|
@@ -128,6 +131,63 @@ class VideoFile(HasRid, RefreshableMixin[scout_video_api.VideoFile]):
|
|
|
128
131
|
|
|
129
132
|
time.sleep(interval.total_seconds())
|
|
130
133
|
|
|
134
|
+
def _get_file_ingest_options(self) -> Tuple[McapVideoDetails | None, TimestampOptions | None]:
|
|
135
|
+
"""Get ingest options metadata for this video file.
|
|
136
|
+
|
|
137
|
+
Retrieves metadata about the video file (such as timestamps, frame rate, and scale factor)
|
|
138
|
+
that can be used when ingesting this video into a video channel. The returned options
|
|
139
|
+
are either MCAP or MISC metadata depending on the video file type.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Video file ingest options (either McapVideoFileMetadata or MiscVideoFileMetadata).
|
|
143
|
+
|
|
144
|
+
Raises:
|
|
145
|
+
ValueError: If the video file has an unexpected timestamp manifest type.
|
|
146
|
+
"""
|
|
147
|
+
api_video_file = self._get_latest_api()
|
|
148
|
+
if api_video_file.origin_metadata.timestamp_manifest.type == "mcap":
|
|
149
|
+
mcap_manifest = api_video_file.origin_metadata.timestamp_manifest.mcap
|
|
150
|
+
topic = (
|
|
151
|
+
mcap_manifest.mcap_channel_locator.topic
|
|
152
|
+
if mcap_manifest and mcap_manifest.mcap_channel_locator and mcap_manifest.mcap_channel_locator.topic
|
|
153
|
+
else ""
|
|
154
|
+
)
|
|
155
|
+
mcap_video_details = McapVideoDetails(
|
|
156
|
+
mcap_channel_locator_topic=topic,
|
|
157
|
+
)
|
|
158
|
+
return (mcap_video_details, None)
|
|
159
|
+
else:
|
|
160
|
+
# TODO(sean): We need to add support for if starting timestamp isn't present, aka we have frame timestamps
|
|
161
|
+
# from S3.
|
|
162
|
+
if api_video_file.origin_metadata.timestamp_manifest.no_manifest is None:
|
|
163
|
+
raise NotImplementedError(
|
|
164
|
+
f"Expected no_manifest timestamp manifest for non-MCAP video file, "
|
|
165
|
+
f"but got type: {api_video_file._origin_metadata._timestamp_manifest._type}"
|
|
166
|
+
)
|
|
167
|
+
if api_video_file.segment_metadata is None:
|
|
168
|
+
raise ValueError(
|
|
169
|
+
"Expected segment metadata for non-MCAP video file: %s", api_video_file.segment_metadata
|
|
170
|
+
)
|
|
171
|
+
if (
|
|
172
|
+
api_video_file.segment_metadata.max_absolute_timestamp is None
|
|
173
|
+
or api_video_file.segment_metadata.scale_factor is None
|
|
174
|
+
or api_video_file.segment_metadata.media_frame_rate is None
|
|
175
|
+
):
|
|
176
|
+
raise ValueError(
|
|
177
|
+
"Not all timestamp metadata is populated in segment metadata: %s", api_video_file.segment_metadata
|
|
178
|
+
)
|
|
179
|
+
video_file_ingest_options = TimestampOptions(
|
|
180
|
+
starting_timestamp=_SecondsNanos.from_api(
|
|
181
|
+
api_video_file.origin_metadata.timestamp_manifest.no_manifest.starting_timestamp
|
|
182
|
+
).to_nanoseconds(),
|
|
183
|
+
ending_timestamp=_SecondsNanos.from_api(
|
|
184
|
+
api_video_file.segment_metadata.max_absolute_timestamp
|
|
185
|
+
).to_nanoseconds(),
|
|
186
|
+
scaling_factor=api_video_file.segment_metadata.scale_factor,
|
|
187
|
+
true_framerate=api_video_file.segment_metadata.media_frame_rate,
|
|
188
|
+
)
|
|
189
|
+
return (None, video_file_ingest_options)
|
|
190
|
+
|
|
131
191
|
@classmethod
|
|
132
192
|
def _from_conjure(cls, clients: _Clients, video_file: scout_video_api.VideoFile) -> Self:
|
|
133
193
|
return cls(
|