@neuracore/types 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Neuracore
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,74 @@
1
+ # Neuracore Types
2
+
3
+ Shared type definitions for the Neuracore platform. This package maintains a single source of truth for data types in Python (Pydantic models) and automatically generates TypeScript types.
4
+
5
+ ## Overview
6
+
7
+ - **Python Package**: `neuracore-types` - Pydantic models for Python backend
8
+ - **NPM Package**: `@neuracore/types` - TypeScript types for frontend
9
+
10
+ ## Installation
11
+
12
+ ### Python
13
+
14
+ ```bash
15
+ pip install neuracore-types
16
+ ```
17
+
18
+ ### TypeScript/JavaScript
19
+
20
+ ```bash
21
+ npm install @neuracore/types
22
+ # or
23
+ yarn add @neuracore/types
24
+ # or
25
+ pnpm add @neuracore/types
26
+ ```
27
+
28
+ ## Development
29
+
30
+ ### Setup
31
+
32
+ ```bash
33
+ # Clone the repository
34
+ git clone https://github.com/neuracoreai/neuracore_types.git
35
+ cd neuracore_types
36
+
37
+ # Install Python dependencies
38
+ pip install -e ".[dev]"
39
+
40
+ # Install Node dependencies
41
+ npm install
42
+ ```
43
+
44
+ ### Generate TypeScript Types
45
+
46
+ The TypeScript types are automatically generated from the Python Pydantic models:
47
+
48
+ ```bash
49
+ npm install json-schema-to-typescript
50
+ python scripts/generate_types.py
51
+ ```
52
+
53
+ This will:
54
+ 1. Read the Pydantic models from `neuracore_types/neuracore_types.py`
55
+ 2. Generate TypeScript definitions in `typescript/neuracore_types.ts`
56
+ 3. Create an index file at `typescript/index.ts`
57
+
58
+ ### Build TypeScript Package
59
+
60
+ ```bash
61
+ npm run build
62
+ ```
63
+
64
+ This compiles the TypeScript files to JavaScript and generates type declarations in the `dist/` directory.
65
+
66
+ ## CI/CD
67
+
68
+ The repository includes GitHub Actions workflows that:
69
+
70
+ 1. **On every push to `main` or PR**:
71
+ - Automatically generates TypeScript types from Python models
72
+ - Builds and validates both packages
73
+ - Publishes Python package to PyPI
74
+ - Publishes NPM package to npm registry
@@ -0,0 +1 @@
1
+ export * from './neuracore_types';
package/dist/index.js ADDED
@@ -0,0 +1,18 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
+ for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
+ };
16
+ Object.defineProperty(exports, "__esModule", { value: true });
17
+ // Auto-generated index file
18
+ __exportStar(require("./neuracore_types"), exports);
@@ -0,0 +1,578 @@
1
+ /**
2
+ /* This file was automatically generated from pydantic models by running pydantic2ts.
3
+ /* Do not modify it by hand - just update the pydantic models and then re-run the script
4
+ */
5
+ /**
6
+ * Enumerates the supported track kinds for streaming.
7
+ */
8
+ export type TrackKind = "joints" | "rgb" | "depth" | "language" | "gripper" | "end_effector_pose" | "parallel_gripper_open_amount" | "point_cloud" | "pose" | "custom";
9
+ /**
10
+ * Enumerates the types of signaling messages for WebRTC handshakes.
11
+ *
12
+ * These types are used to identify the purpose of a message sent through
13
+ * the signaling server during connection establishment.
14
+ */
15
+ export type MessageType = "offer" | "answer" | "ice" | "open_connection";
16
+ /**
17
+ * Enumeration of supported data types in the Neuracore system.
18
+ *
19
+ * Defines the standard data categories used for dataset organization,
20
+ * model training, and data processing pipelines.
21
+ */
22
+ export type DataType = "joint_positions" | "joint_velocities" | "joint_torques" | "joint_target_positions" | "end_effectors" | "end_effector_poses" | "parallel_gripper_open_amounts" | "rgb_image" | "depth_image" | "point_cloud" | "poses" | "language" | "custom";
23
+ /**
24
+ * Enumerates video format styles over a WebRTC connection.
25
+ */
26
+ export type VideoFormat = "WEB_RTC_NEGOTIATED" | "NEURACORE_CUSTOM";
27
+ /**
28
+ * Types of recording lifecycle notifications.
29
+ */
30
+ export type RecordingNotificationType = "init" | "requested" | "start" | "stop" | "saved" | "discarded" | "expired";
31
+ /**
32
+ * Represents an available robot, including all its running instances.
33
+ *
34
+ * Attributes:
35
+ * robot_id: The unique identifier for the robot model/type.
36
+ * instances: A dictionary of all available instances for this robot,
37
+ * keyed by instance ID.
38
+ */
39
+ export interface AvailableRobot {
40
+ robot_id: string;
41
+ instances: {
42
+ [k: string]: AvailableRobotInstance;
43
+ };
44
+ }
45
+ /**
46
+ * Represents a single, available instance of a robot.
47
+ *
48
+ * Attributes:
49
+ * robot_instance: The unique identifier for this robot instance.
50
+ * tracks: A dictionary of available media stream tracks for this instance.
51
+ * connections: The number of current connections to this instance.
52
+ */
53
+ export interface AvailableRobotInstance {
54
+ robot_instance: number;
55
+ tracks: {
56
+ [k: string]: RobotStreamTrack[];
57
+ };
58
+ connections: number;
59
+ }
60
+ /**
61
+ * Metadata for a robot's media stream track.
62
+ *
63
+ * This model holds all the necessary information to identify and manage
64
+ * a single media track (e.g., a video or audio feed) from a specific
65
+ * robot instance.
66
+ *
67
+ * Attributes:
68
+ * robot_id: The unique identifier of the robot providing the stream.
69
+ * robot_instance: The specific instance number of the robot.
70
+ * stream_id: The identifier for the overall media stream session.
71
+ * kind: The type of media track, typically 'audio' or 'video'.
72
+ * label: A human-readable label for the track (e.g., 'front_camera').
73
+ * mid: The media ID used in SDP, essential for WebRTC negotiation.
74
+ * id: A unique identifier for this track metadata object.
75
+ * created_at: The UTC timestamp when this track metadata was created.
76
+ */
77
+ export interface RobotStreamTrack {
78
+ robot_id: string;
79
+ robot_instance: number;
80
+ stream_id: string;
81
+ kind: TrackKind;
82
+ label: string;
83
+ mid: string;
84
+ id?: string;
85
+ created_at?: string;
86
+ }
87
+ /**
88
+ * Represents an update on the available capacity of all robots.
89
+ *
90
+ * This model is used to broadcast the current state of all available
91
+ * robots and their instances.
92
+ *
93
+ * Attributes:
94
+ * robots: A list of all available robots and their instances.
95
+ */
96
+ export interface AvailableRobotCapacityUpdate {
97
+ robots: AvailableRobot[];
98
+ }
99
+ /**
100
+ * Base payload for recording update notifications.
101
+ *
102
+ * Contains the minimum information needed to identify a recording
103
+ * and the robot instance it belongs to.
104
+ */
105
+ export interface BaseRecodingUpdatePayload {
106
+ recording_id: string;
107
+ robot_id: string;
108
+ instance: number;
109
+ }
110
+ /**
111
+ * Camera sensor data including images and calibration information.
112
+ *
113
+ * Contains image data along with camera intrinsic and extrinsic parameters
114
+ * for 3D reconstruction and computer vision applications. The frame field
115
+ * is populated during dataset iteration for efficiency.
116
+ */
117
+ export interface CameraData {
118
+ timestamp?: number;
119
+ frame_idx?: number;
120
+ extrinsics?: number[][] | null;
121
+ intrinsics?: number[][] | null;
122
+ frame?: unknown;
123
+ }
124
+ /**
125
+ * Generic container for application-specific data types.
126
+ *
127
+ * Provides a flexible way to include custom sensor data or application-specific
128
+ * information that doesn't fit into the standard data categories.
129
+ */
130
+ export interface CustomData {
131
+ timestamp?: number;
132
+ data: unknown;
133
+ }
134
+ /**
135
+ * Statistical summary of data dimensions and distributions.
136
+ *
137
+ * Contains statistical information about data arrays including means,
138
+ * standard deviations, counts, and maximum lengths for normalization
139
+ * and model configuration purposes.
140
+ *
141
+ * Attributes:
142
+ * mean: List of means for each data dimension
143
+ * std: List of standard deviations for each data dimension
144
+ * count: List of counts for each data dimension
145
+ * max_len: Maximum length of the data arrays
146
+ * robot_to_ncdata_keys: Mapping of robot ids to their associated
147
+ * data keys for this data type
148
+ */
149
+ export interface DataItemStats {
150
+ mean?: number[];
151
+ std?: number[];
152
+ count?: number[];
153
+ max_len?: number;
154
+ robot_to_ncdata_keys?: {
155
+ [k: string]: string[];
156
+ };
157
+ }
158
+ /**
159
+ * Represents a dataset of robot demonstrations.
160
+ *
161
+ * A dataset groups related robot demonstrations together and maintains metadata
162
+ * about the collection as a whole.
163
+ *
164
+ * Attributes:
165
+ * id: Unique identifier for the dataset.
166
+ * name: Human-readable name for the dataset.
167
+ * created_at: Unix timestamp of dataset creation.
168
+ * modified_at: Unix timestamp of last modification.
169
+ * description: Optional description of the dataset.
170
+ * tags: List of tags for categorizing the dataset.
171
+ * recording_ids: List of recording IDs in this dataset
172
+ * demonstration_ids: List of demonstration IDs in this dataset.
173
+ * num_demonstrations: Total number of demonstrations.
174
+ * total_duration_seconds: Total duration of all demonstrations.
175
+ * size_bytes: Total size of all demonstrations.
176
+ * is_shared: Whether the dataset is shared with other users.
177
+ * metadata: Additional arbitrary metadata.
178
+ * synced_dataset_ids: List of synced dataset IDs in this dataset.
179
+ * They point to synced datasets that synchronized
180
+ * this dataset at a particular frequency.
181
+ */
182
+ export interface Dataset {
183
+ id: string;
184
+ name: string;
185
+ created_at: number;
186
+ modified_at: number;
187
+ description?: string | null;
188
+ tags?: string[];
189
+ recording_ids?: string[];
190
+ num_demonstrations?: number;
191
+ total_duration_seconds?: number;
192
+ size_bytes?: number;
193
+ is_shared?: boolean;
194
+ metadata?: {
195
+ [k: string]: unknown;
196
+ };
197
+ synced_dataset_ids?: {
198
+ [k: string]: unknown;
199
+ };
200
+ all_data_types?: {
201
+ [k: string]: number;
202
+ };
203
+ common_data_types?: {
204
+ [k: string]: number;
205
+ };
206
+ recording_ids_in_bucket?: boolean;
207
+ }
208
+ /**
209
+ * Comprehensive description of dataset contents and statistics.
210
+ *
211
+ * Provides metadata about a complete dataset including statistical summaries
212
+ * for all data types, maximum counts for variable-length data, and methods
213
+ * for determining which data types are present.
214
+ */
215
+ export interface DatasetDescription {
216
+ joint_positions?: DataItemStats;
217
+ joint_velocities?: DataItemStats;
218
+ joint_torques?: DataItemStats;
219
+ joint_target_positions?: DataItemStats;
220
+ end_effector_states?: DataItemStats;
221
+ end_effector_poses?: DataItemStats;
222
+ parallel_gripper_open_amounts?: DataItemStats;
223
+ poses?: DataItemStats;
224
+ rgb_images?: DataItemStats;
225
+ depth_images?: DataItemStats;
226
+ point_clouds?: DataItemStats;
227
+ language?: DataItemStats;
228
+ custom_data?: {
229
+ [k: string]: DataItemStats;
230
+ };
231
+ }
232
+ /**
233
+ * End-effector state data including gripper and tool configurations.
234
+ *
235
+ * Contains the state of robot end-effectors such as gripper opening amounts,
236
+ * tool activations, or other end-effector specific parameters.
237
+ */
238
+ export interface EndEffectorData {
239
+ timestamp?: number;
240
+ open_amounts: {
241
+ [k: string]: number;
242
+ };
243
+ }
244
+ /**
245
+ * End-effector pose data.
246
+ *
247
+ * Contains the pose of end-effectors as a 7-element list containing the
248
+ * position and unit quaternion orientation [x, y, z, qx, qy, qz, qw].
249
+ */
250
+ export interface EndEffectorPoseData {
251
+ timestamp?: number;
252
+ poses: {
253
+ [k: string]: number[];
254
+ };
255
+ }
256
+ /**
257
+ * Represents a signaling message for the WebRTC handshake process.
258
+ *
259
+ * This message is exchanged between two peers via a signaling server to
260
+ * negotiate the connection details, such as SDP offers/answers and ICE
261
+ * candidates.
262
+ *
263
+ * Attributes:
264
+ * from_id: The unique identifier of the sender peer.
265
+ * to_id: The unique identifier of the recipient peer.
266
+ * data: The payload of the message, typically an SDP string or a JSON
267
+ * object with ICE candidate information.
268
+ * connection_id: The unique identifier for the connection session.
269
+ * type: The type of the handshake message, as defined by MessageType.
270
+ * id: A unique identifier for the message itself.
271
+ */
272
+ export interface HandshakeMessage {
273
+ from_id: string;
274
+ to_id: string;
275
+ data: string;
276
+ connection_id: string;
277
+ type: MessageType;
278
+ id?: string;
279
+ }
280
+ /**
281
+ * Robot joint state data including positions, velocities, or torques.
282
+ *
283
+ * Represents joint-space data for robotic systems with support for named
284
+ * joints and additional auxiliary values. Used for positions, velocities,
285
+ * torques, and target positions.
286
+ */
287
+ export interface JointData {
288
+ timestamp?: number;
289
+ values: {
290
+ [k: string]: number;
291
+ };
292
+ additional_values?: {
293
+ [k: string]: number;
294
+ } | null;
295
+ }
296
+ /**
297
+ * Natural language instruction or description data.
298
+ *
299
+ * Contains text-based information such as task descriptions, voice commands,
300
+ * or other linguistic data associated with robot demonstrations.
301
+ */
302
+ export interface LanguageData {
303
+ timestamp?: number;
304
+ text: string;
305
+ }
306
+ /**
307
+ * Configuration specification for initializing Neuracore models.
308
+ *
309
+ * Defines the model architecture requirements including dataset characteristics,
310
+ * input/output data types, and prediction horizons for model initialization
311
+ * and training configuration.
312
+ */
313
+ export interface ModelInitDescription {
314
+ dataset_description: DatasetDescription;
315
+ input_data_types: DataType[];
316
+ output_data_types: DataType[];
317
+ output_prediction_horizon?: number;
318
+ }
319
+ /**
320
+ * Model inference output containing predictions and timing information.
321
+ *
322
+ * Represents the results of model inference including predicted outputs
323
+ * for each configured data type and optional timing information for
324
+ * performance monitoring.
325
+ */
326
+ export interface ModelPrediction {
327
+ outputs?: {
328
+ [k: string]: unknown;
329
+ };
330
+ prediction_time?: number | null;
331
+ }
332
+ /**
333
+ * Base class for all Neuracore data with automatic timestamping.
334
+ *
335
+ * Provides a common base for all data types in the system with automatic
336
+ * timestamp generation for temporal synchronization and data ordering.
337
+ */
338
+ export interface NCData {
339
+ timestamp?: number;
340
+ }
341
+ /**
342
+ * The details describing properties about the new connection.
343
+ *
344
+ * Attributes:
345
+ * connection_token: The token used for security to establish the connection.
346
+ * robot_id: The unique identifier for the robot to connect to
347
+ * robot_instance: The identifier for the instance of the robot to connect to.
348
+ * video_format: The type of video the consumer expects to receive.
349
+ */
350
+ export interface OpenConnectionDetails {
351
+ connection_token: string;
352
+ robot_id: string;
353
+ robot_instance: number;
354
+ video_format: VideoFormat;
355
+ }
356
+ /**
357
+ * Represents a request to open a new WebRTC connection.
358
+ *
359
+ * Attributes:
360
+ * from_id: The unique identifier of the consumer peer.
361
+ * to_id: The unique identifier of the producer peer.
362
+ * robot_id: The unique identifier for the robot to be created.
363
+ * robot_instance: The identifier for the instance of the robot to connect to.
364
+ * video_format: The type of video the consumer expects to receive.
365
+ * id: the identifier for this connection request.
366
+ * created_at: when the request was created.
367
+ */
368
+ export interface OpenConnectionRequest {
369
+ from_id: string;
370
+ to_id: string;
371
+ robot_id: string;
372
+ robot_instance: number;
373
+ video_format: VideoFormat;
374
+ id?: string;
375
+ created_at?: string;
376
+ }
377
+ /**
378
+ * Open amount data for parallel end effector gripper.
379
+ *
380
+ * Contains the state of parallel gripper opening amounts.
381
+ */
382
+ export interface ParallelGripperOpenAmountData {
383
+ timestamp?: number;
384
+ open_amounts: {
385
+ [k: string]: number;
386
+ };
387
+ }
388
+ /**
389
+ * 3D point cloud data with optional RGB colouring and camera parameters.
390
+ *
391
+ * Represents 3D spatial data from depth sensors or LiDAR systems with
392
+ * optional colour information and camera calibration for registration.
393
+ */
394
+ export interface PointCloudData {
395
+ timestamp?: number;
396
+ points?: string | null;
397
+ rgb_points?: string | null;
398
+ extrinsics?: string | null;
399
+ intrinsics?: string | null;
400
+ }
401
+ /**
402
+ * 6DOF pose data for objects, end-effectors, or coordinate frames.
403
+ *
404
+ * Represents position and orientation information for tracking objects
405
+ * or robot components in 3D space. Poses are stored as dictionaries
406
+ * mapping pose names to [x, y, z, rx, ry, rz] values.
407
+ */
408
+ export interface PoseData {
409
+ timestamp?: number;
410
+ pose: {
411
+ [k: string]: number[];
412
+ };
413
+ }
414
+ /**
415
+ * Payload for recording request notifications.
416
+ *
417
+ * Contains information about who requested the recording and what
418
+ * data types should be captured.
419
+ */
420
+ export interface RecodingRequestedPayload {
421
+ recording_id: string;
422
+ robot_id: string;
423
+ instance: number;
424
+ created_by: string;
425
+ dataset_ids?: string[];
426
+ data_types?: DataType[];
427
+ }
428
+ /**
429
+ * Description of a single recording episode with statistics and counts.
430
+ *
431
+ * Provides metadata about an individual recording including data statistics,
432
+ * sensor counts, and episode length for analysis and processing.
433
+ */
434
+ export interface RecordingDescription {
435
+ joint_positions?: DataItemStats;
436
+ joint_velocities?: DataItemStats;
437
+ joint_torques?: DataItemStats;
438
+ joint_target_positions?: DataItemStats;
439
+ end_effector_states?: DataItemStats;
440
+ end_effector_poses?: DataItemStats;
441
+ parallel_gripper_open_amounts?: DataItemStats;
442
+ poses?: DataItemStats;
443
+ rgb_images?: DataItemStats;
444
+ depth_images?: DataItemStats;
445
+ point_clouds?: DataItemStats;
446
+ language?: DataItemStats;
447
+ episode_length?: number;
448
+ custom_data?: {
449
+ [k: string]: DataItemStats;
450
+ };
451
+ }
452
+ /**
453
+ * Notification message for recording lifecycle events.
454
+ *
455
+ * Used to communicate recording state changes across the system,
456
+ * including initialization, start/stop events, and final disposition.
457
+ */
458
+ export interface RecordingNotification {
459
+ type: RecordingNotificationType;
460
+ payload: RecordingStartPayload | RecodingRequestedPayload | (RecordingStartPayload | RecodingRequestedPayload)[] | BaseRecodingUpdatePayload;
461
+ id?: string;
462
+ }
463
+ /**
464
+ * Payload for recording start notifications.
465
+ *
466
+ * Extends the request payload with the actual start timestamp
467
+ * when recording begins.
468
+ */
469
+ export interface RecordingStartPayload {
470
+ recording_id: string;
471
+ robot_id: string;
472
+ instance: number;
473
+ created_by: string;
474
+ dataset_ids?: string[];
475
+ data_types?: DataType[];
476
+ start_time: number;
477
+ }
478
+ /**
479
+ * Represents the response from asserting a stream is alive.
480
+ *
481
+ * This is returned when a client pings a stream to keep it active.
482
+ *
483
+ * Attributes:
484
+ * resurrected: A boolean indicating if the stream was considered dead
485
+ * and has been successfully resurrected by this request.
486
+ */
487
+ export interface StreamAliveResponse {
488
+ resurrected: boolean;
489
+ }
490
+ /**
491
+ * Synchronized collection of all sensor data at a single time point.
492
+ *
493
+ * Represents a complete snapshot of robot state and sensor information
494
+ * at a specific timestamp. Used for creating temporally aligned datasets
495
+ * and ensuring consistent data relationships across different sensors.
496
+ */
497
+ export interface SyncPoint {
498
+ timestamp?: number;
499
+ joint_positions?: JointData | null;
500
+ joint_velocities?: JointData | null;
501
+ joint_torques?: JointData | null;
502
+ joint_target_positions?: JointData | null;
503
+ end_effectors?: EndEffectorData | null;
504
+ end_effector_poses?: EndEffectorPoseData | null;
505
+ parallel_gripper_open_amounts?: ParallelGripperOpenAmountData | null;
506
+ poses?: PoseData | null;
507
+ rgb_images?: {
508
+ [k: string]: CameraData;
509
+ } | null;
510
+ depth_images?: {
511
+ [k: string]: CameraData;
512
+ } | null;
513
+ point_clouds?: {
514
+ [k: string]: PointCloudData;
515
+ } | null;
516
+ language_data?: LanguageData | null;
517
+ custom_data?: {
518
+ [k: string]: CustomData;
519
+ } | null;
520
+ robot_id?: string | null;
521
+ }
522
+ /**
523
+ * Complete synchronized dataset containing a sequence of data points.
524
+ *
525
+ * Represents an entire recording or demonstration as a time-ordered sequence
526
+ * of synchronized data points with start and end timestamps for temporal
527
+ * reference.
528
+ */
529
+ export interface SyncedData {
530
+ frames: SyncPoint[];
531
+ start_time: number;
532
+ end_time: number;
533
+ robot_id: string;
534
+ }
535
+ /**
536
+ * Represents a dataset of robot demonstrations.
537
+ *
538
+ * A Synchronized dataset groups related robot demonstrations together
539
+ * and maintains metadata about the collection as a whole.
540
+ *
541
+ * Attributes:
542
+ * id: Unique identifier for the synced dataset.
543
+ * parent_id: Unique identifier of the corresponding dataset.
544
+ * freq: Frequency at which dataset was processed.
545
+ * name: Human-readable name for the dataset.
546
+ * created_at: Unix timestamp of dataset creation.
547
+ * modified_at: Unix timestamp of last modification.
548
+ * description: Optional description of the dataset.
549
+ * recording_ids: List of recording IDs in this dataset
550
+ * num_demonstrations: Total number of demonstrations.
551
+ * total_duration_seconds: Total duration of all demonstrations.
552
+ * is_shared: Whether the dataset is shared with other users.
553
+ * metadata: Additional arbitrary metadata.
554
+ */
555
+ export interface SyncedDataset {
556
+ id: string;
557
+ parent_id: string;
558
+ freq: number;
559
+ name: string;
560
+ created_at: number;
561
+ modified_at: number;
562
+ description?: string | null;
563
+ recording_ids?: string[];
564
+ num_demonstrations?: number;
565
+ num_processed_demonstrations?: number;
566
+ total_duration_seconds?: number;
567
+ is_shared?: boolean;
568
+ metadata?: {
569
+ [k: string]: unknown;
570
+ };
571
+ dataset_description?: DatasetDescription;
572
+ all_data_types?: {
573
+ [k: string]: number;
574
+ };
575
+ common_data_types?: {
576
+ [k: string]: number;
577
+ };
578
+ }
@@ -0,0 +1,8 @@
1
+ "use strict";
2
+ /* tslint:disable */
3
+ /* eslint-disable */
4
+ /**
5
+ /* This file was automatically generated from pydantic models by running pydantic2ts.
6
+ /* Do not modify it by hand - just update the pydantic models and then re-run the script
7
+ */
8
+ Object.defineProperty(exports, "__esModule", { value: true });
package/package.json ADDED
@@ -0,0 +1,36 @@
1
+ {
2
+ "name": "@neuracore/types",
3
+ "version": "1.0.0",
4
+ "description": "Shared TypeScript type definitions for Neuracore robotics platform",
5
+ "main": "dist/index.js",
6
+ "types": "dist/index.d.ts",
7
+ "files": [
8
+ "dist"
9
+ ],
10
+ "scripts": {
11
+ "build": "tsc",
12
+ "prepublishOnly": "npm run build",
13
+ "test": "echo \"No tests yet\" && exit 0"
14
+ },
15
+ "repository": {
16
+ "type": "git",
17
+ "url": "git+https://github.com/neuracoreai/neuracore_types.git"
18
+ },
19
+ "keywords": [
20
+ "neuracore",
21
+ "robotics",
22
+ "types",
23
+ "typescript"
24
+ ],
25
+ "author": "Neuracore Team",
26
+ "license": "MIT",
27
+ "devDependencies": {
28
+ "typescript": "^5.3.0"
29
+ },
30
+ "publishConfig": {
31
+ "access": "public"
32
+ },
33
+ "dependencies": {
34
+ "json-schema-to-typescript": "^15.0.4"
35
+ }
36
+ }