@spatialwalk/avatarkit 1.0.0-beta.35 → 1.0.0-beta.36
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +8 -0
- package/README.md +39 -8
- package/dist/{StreamingAudioPlayer-eWy8xl8G.js → StreamingAudioPlayer-GTu9p5GZ.js} +54 -7
- package/dist/animation/AnimationWebSocketClient.d.ts +3 -0
- package/dist/audio/StreamingAudioPlayer.d.ts +4 -0
- package/dist/core/AvatarController.d.ts +2 -0
- package/dist/generated/common/v1/models.d.ts +29 -0
- package/dist/generated/driveningress/v2/driveningress.d.ts +81 -0
- package/dist/{index-DgkjYDp2.js → index-B7llDHtO.js} +1347 -202
- package/dist/index.js +1 -1
- package/dist/types/index.d.ts +10 -1
- package/dist/utils/id-manager.d.ts +1 -0
- package/package.json +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -5,6 +5,14 @@ All notable changes to this project will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [1.0.0-beta.36] - 2025-12-20
|
|
9
|
+
|
|
10
|
+
### ✨ New Features
|
|
11
|
+
- **Audio Sample Rate Configuration** - Added configurable audio sample rate support in SDK initialization
|
|
12
|
+
- Added `audioFormat` configuration option with `channelCount` (fixed to 1) and `sampleRate` (supports: 8000, 16000, 22050, 24000, 32000, 44100, 48000 Hz, default: 16000)
|
|
13
|
+
- Audio recording and playback now automatically use the configured sample rate
|
|
14
|
+
- Vanilla demo updated with sample rate dropdown selector for easy testing
|
|
15
|
+
|
|
8
16
|
## [1.0.0-beta.35] - 2025-12-16
|
|
9
17
|
|
|
10
18
|
### 🔄 API Changes
|
package/README.md
CHANGED
|
@@ -45,6 +45,10 @@ const configuration: Configuration = {
|
|
|
45
45
|
// - LogLevel.error: Only error logs
|
|
46
46
|
// - LogLevel.warning: Warning and error logs
|
|
47
47
|
// - LogLevel.all: All logs (info, warning, error)
|
|
48
|
+
audioFormat: { // Optional, default is { channelCount: 1, sampleRate: 16000 }
|
|
49
|
+
channelCount: 1, // Fixed to 1 (mono)
|
|
50
|
+
sampleRate: 16000 // Supported: 8000, 16000, 22050, 24000, 32000, 44100, 48000 Hz
|
|
51
|
+
}
|
|
48
52
|
}
|
|
49
53
|
|
|
50
54
|
await AvatarSDK.initialize('your-app-id', configuration)
|
|
@@ -68,8 +72,8 @@ const avatarView = new AvatarView(avatar, container)
|
|
|
68
72
|
// 4. Start real-time communication (SDK mode only)
|
|
69
73
|
await avatarView.avatarController.start()
|
|
70
74
|
|
|
71
|
-
// 5. Send audio data (SDK mode, must be
|
|
72
|
-
const audioData = new ArrayBuffer(1024) // Example:
|
|
75
|
+
// 5. Send audio data (SDK mode, must be mono PCM16 format matching configured sample rate)
|
|
76
|
+
const audioData = new ArrayBuffer(1024) // Example: PCM16 audio data at configured sample rate
|
|
73
77
|
avatarView.avatarController.send(audioData, false) // Send audio data
|
|
74
78
|
avatarView.avatarController.send(audioData, true) // end=true marks the end of current conversation round
|
|
75
79
|
```
|
|
@@ -190,23 +194,37 @@ RenderSystem → WebGPU/WebGL → Canvas rendering
|
|
|
190
194
|
|
|
191
195
|
### Audio Format Requirements
|
|
192
196
|
|
|
193
|
-
**⚠️ Important:** The SDK requires audio data to be in **
|
|
197
|
+
**⚠️ Important:** The SDK requires audio data to be in **mono PCM16** format:
|
|
194
198
|
|
|
195
|
-
- **Sample Rate**:
|
|
196
|
-
-
|
|
199
|
+
- **Sample Rate**: Configurable via `audioFormat.sampleRate` in SDK initialization (default: 16000 Hz)
|
|
200
|
+
- Supported sample rates: 8000, 16000, 22050, 24000, 32000, 44100, 48000 Hz
|
|
201
|
+
- The configured sample rate will be used for both audio recording and playback
|
|
202
|
+
- **Channels**: Mono (single channel) - Fixed to 1 channel
|
|
197
203
|
- **Format**: PCM16 (16-bit signed integer, little-endian)
|
|
198
204
|
- **Byte Order**: Little-endian
|
|
199
205
|
|
|
200
206
|
**Audio Data Format:**
|
|
201
207
|
- Each sample is 2 bytes (16-bit)
|
|
202
208
|
- Audio data should be provided as `ArrayBuffer` or `Uint8Array`
|
|
203
|
-
- For example: 1 second of audio = 16000 samples × 2 bytes = 32000 bytes
|
|
209
|
+
- For example, with 16kHz sample rate: 1 second of audio = 16000 samples × 2 bytes = 32000 bytes
|
|
210
|
+
- For 48kHz sample rate: 1 second of audio = 48000 samples × 2 bytes = 96000 bytes
|
|
204
211
|
|
|
205
212
|
**Resampling:**
|
|
206
|
-
- If your audio source is at a different sample rate
|
|
213
|
+
- If your audio source is at a different sample rate, you must resample it to match the configured sample rate before sending to the SDK
|
|
207
214
|
- For high-quality resampling, we recommend using Web Audio API's `OfflineAudioContext` with anti-aliasing filtering
|
|
208
215
|
- See example projects for resampling implementation
|
|
209
216
|
|
|
217
|
+
**Configuration Example:**
|
|
218
|
+
```typescript
|
|
219
|
+
const configuration: Configuration = {
|
|
220
|
+
environment: Environment.cn,
|
|
221
|
+
audioFormat: {
|
|
222
|
+
channelCount: 1, // Fixed to 1 (mono)
|
|
223
|
+
sampleRate: 48000 // Choose from: 8000, 16000, 22050, 24000, 32000, 44100, 48000
|
|
224
|
+
}
|
|
225
|
+
}
|
|
226
|
+
```
|
|
227
|
+
|
|
210
228
|
## 📚 API Reference
|
|
211
229
|
|
|
212
230
|
### AvatarSDK
|
|
@@ -431,6 +449,12 @@ interface Configuration {
|
|
|
431
449
|
environment: Environment
|
|
432
450
|
drivingServiceMode?: DrivingServiceMode // Optional, default is 'sdk' (SDK mode)
|
|
433
451
|
logLevel?: LogLevel // Optional, default is 'off' (no logs)
|
|
452
|
+
audioFormat?: AudioFormat // Optional, default is { channelCount: 1, sampleRate: 16000 }
|
|
453
|
+
}
|
|
454
|
+
|
|
455
|
+
interface AudioFormat {
|
|
456
|
+
readonly channelCount: 1 // Fixed to 1 (mono)
|
|
457
|
+
readonly sampleRate: number // Supported: 8000, 16000, 22050, 24000, 32000, 44100, 48000 Hz, default: 16000
|
|
434
458
|
}
|
|
435
459
|
```
|
|
436
460
|
|
|
@@ -459,6 +483,11 @@ enum LogLevel {
|
|
|
459
483
|
- `LogLevel.error`: Only error logs
|
|
460
484
|
- `LogLevel.warning`: Warning and error logs
|
|
461
485
|
- `LogLevel.all`: All logs (info, warning, error)
|
|
486
|
+
- `audioFormat`: Configures audio sample rate and channel count
|
|
487
|
+
- `channelCount`: Fixed to 1 (mono channel)
|
|
488
|
+
- `sampleRate`: Audio sample rate in Hz (default: 16000)
|
|
489
|
+
- Supported values: 8000, 16000, 22050, 24000, 32000, 44100, 48000
|
|
490
|
+
- The configured sample rate will be used for both audio recording and playback
|
|
462
491
|
- `sessionToken`: Set separately via `AvatarSDK.setSessionToken()`, not in Configuration
|
|
463
492
|
|
|
464
493
|
```typescript
|
|
@@ -500,13 +529,15 @@ enum ConnectionState {
|
|
|
500
529
|
```typescript
|
|
501
530
|
enum ConversationState {
|
|
502
531
|
idle = 'idle', // Idle state (breathing animation)
|
|
503
|
-
playing = 'playing' // Playing state (active conversation)
|
|
532
|
+
playing = 'playing', // Playing state (active conversation)
|
|
533
|
+
pausing = 'pausing' // Pausing state (paused during playback)
|
|
504
534
|
}
|
|
505
535
|
```
|
|
506
536
|
|
|
507
537
|
**State Description:**
|
|
508
538
|
- `idle`: Avatar is in idle state (breathing animation), waiting for conversation to start
|
|
509
539
|
- `playing`: Avatar is playing conversation content (including during transition animations)
|
|
540
|
+
- `pausing`: Avatar playback is paused (e.g., when `end=false` and waiting for more audio data)
|
|
510
541
|
|
|
511
542
|
**Note:** During transition animations, the target state is notified immediately:
|
|
512
543
|
- When transitioning from `idle` to `playing`, the `playing` state is notified immediately
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
var __defProp = Object.defineProperty;
|
|
2
2
|
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
|
|
3
3
|
var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
|
|
4
|
-
import { A as APP_CONFIG, e as errorToMessage, l as logEvent, a as logger } from "./index-
|
|
4
|
+
import { A as APP_CONFIG, e as errorToMessage, l as logEvent, a as logger } from "./index-B7llDHtO.js";
|
|
5
5
|
class StreamingAudioPlayer {
|
|
6
6
|
constructor(options) {
|
|
7
7
|
__publicField(this, "audioContext", null);
|
|
@@ -17,9 +17,13 @@ class StreamingAudioPlayer {
|
|
|
17
17
|
__publicField(this, "isPlaying", false);
|
|
18
18
|
__publicField(this, "isPaused", false);
|
|
19
19
|
__publicField(this, "autoStartEnabled", true);
|
|
20
|
+
__publicField(this, "autoContinue", false);
|
|
20
21
|
__publicField(this, "audioChunks", []);
|
|
21
22
|
__publicField(this, "scheduledChunks", 0);
|
|
22
23
|
__publicField(this, "activeSources", /* @__PURE__ */ new Set());
|
|
24
|
+
__publicField(this, "lastScheduledChunkEndTime", 0);
|
|
25
|
+
__publicField(this, "lastGetCurrentTimeLog", 0);
|
|
26
|
+
__publicField(this, "scheduledChunkInfo", []);
|
|
23
27
|
__publicField(this, "gainNode", null);
|
|
24
28
|
__publicField(this, "volume", 1);
|
|
25
29
|
__publicField(this, "onEndedCallback");
|
|
@@ -70,6 +74,13 @@ class StreamingAudioPlayer {
|
|
|
70
74
|
isPlaying: this.isPlaying,
|
|
71
75
|
scheduledChunks: this.scheduledChunks
|
|
72
76
|
});
|
|
77
|
+
if (this.autoContinue && this.isPaused) {
|
|
78
|
+
this.log("[StreamingAudioPlayer] autoContinue=true, auto-resuming playback");
|
|
79
|
+
this.autoContinue = false;
|
|
80
|
+
this.resume().catch((err) => {
|
|
81
|
+
logger.errorWithError("Failed to auto-resume playback:", err);
|
|
82
|
+
});
|
|
83
|
+
}
|
|
73
84
|
if (!this.isPlaying && this.autoStartEnabled && this.audioChunks.length > 0) {
|
|
74
85
|
this.log("[StreamingAudioPlayer] Auto-starting playback from addChunk");
|
|
75
86
|
this.startPlayback();
|
|
@@ -88,6 +99,7 @@ class StreamingAudioPlayer {
|
|
|
88
99
|
this.pausedTimeOffset = 0;
|
|
89
100
|
this.pausedAt = 0;
|
|
90
101
|
this.pausedAudioContextTime = 0;
|
|
102
|
+
this.autoContinue = false;
|
|
91
103
|
this.log("Starting new session", {
|
|
92
104
|
chunks: audioChunks.length
|
|
93
105
|
});
|
|
@@ -107,6 +119,9 @@ class StreamingAudioPlayer {
|
|
|
107
119
|
this.isPlaying = true;
|
|
108
120
|
this.sessionStartTime = this.audioContext.currentTime;
|
|
109
121
|
this.scheduledTime = this.sessionStartTime;
|
|
122
|
+
this.lastScheduledChunkEndTime = 0;
|
|
123
|
+
this.scheduledChunkInfo = [];
|
|
124
|
+
this.autoContinue = false;
|
|
110
125
|
this.log("[StreamingAudioPlayer] Starting playback", {
|
|
111
126
|
sessionStartTime: this.sessionStartTime,
|
|
112
127
|
bufferedChunks: this.audioChunks.length,
|
|
@@ -155,16 +170,30 @@ class StreamingAudioPlayer {
|
|
|
155
170
|
const source = this.audioContext.createBufferSource();
|
|
156
171
|
source.buffer = audioBuffer;
|
|
157
172
|
source.connect(this.gainNode);
|
|
158
|
-
|
|
173
|
+
const chunkStartTime = this.scheduledTime;
|
|
174
|
+
source.start(chunkStartTime);
|
|
175
|
+
const actualStartTime = Math.max(chunkStartTime, this.audioContext.currentTime);
|
|
176
|
+
this.scheduledChunkInfo.push({
|
|
177
|
+
startTime: actualStartTime,
|
|
178
|
+
duration: audioBuffer.duration
|
|
179
|
+
});
|
|
159
180
|
this.activeSources.add(source);
|
|
160
181
|
source.onended = () => {
|
|
161
182
|
this.activeSources.delete(source);
|
|
162
|
-
if (
|
|
163
|
-
|
|
164
|
-
|
|
183
|
+
if (this.activeSources.size === 0) {
|
|
184
|
+
const lastChunk = this.audioChunks[this.scheduledChunks - 1];
|
|
185
|
+
if (lastChunk && !lastChunk.isLast) {
|
|
186
|
+
this.log("All audio chunks ended but end=false, pausing and setting autoContinue");
|
|
187
|
+
this.autoContinue = true;
|
|
188
|
+
this.pause();
|
|
189
|
+
} else if (isLast) {
|
|
190
|
+
this.log("Last audio chunk ended, marking playback as ended");
|
|
191
|
+
this.markEnded();
|
|
192
|
+
}
|
|
165
193
|
}
|
|
166
194
|
};
|
|
167
195
|
this.scheduledTime += audioBuffer.duration;
|
|
196
|
+
this.lastScheduledChunkEndTime = this.scheduledTime - this.sessionStartTime - this.pausedTimeOffset;
|
|
168
197
|
this.scheduledChunks++;
|
|
169
198
|
this.log(`[StreamingAudioPlayer] Scheduled chunk ${chunkIndex + 1}/${this.audioChunks.length}`, {
|
|
170
199
|
startTime: this.scheduledTime - audioBuffer.duration,
|
|
@@ -225,8 +254,24 @@ class StreamingAudioPlayer {
|
|
|
225
254
|
return this.pausedAt;
|
|
226
255
|
}
|
|
227
256
|
const currentAudioTime = this.audioContext.currentTime;
|
|
228
|
-
|
|
229
|
-
|
|
257
|
+
if (this.activeSources.size === 0 && this.scheduledChunks > 0) {
|
|
258
|
+
return Math.max(0, this.lastScheduledChunkEndTime);
|
|
259
|
+
}
|
|
260
|
+
let totalPlayedDuration = 0;
|
|
261
|
+
for (let i = 0; i < this.scheduledChunkInfo.length; i++) {
|
|
262
|
+
const chunkInfo = this.scheduledChunkInfo[i];
|
|
263
|
+
const chunkEndTime = chunkInfo.startTime + chunkInfo.duration;
|
|
264
|
+
if (currentAudioTime < chunkInfo.startTime) {
|
|
265
|
+
break;
|
|
266
|
+
} else if (chunkEndTime <= currentAudioTime) {
|
|
267
|
+
totalPlayedDuration += chunkInfo.duration;
|
|
268
|
+
} else {
|
|
269
|
+
const playedTime = currentAudioTime - chunkInfo.startTime;
|
|
270
|
+
totalPlayedDuration += playedTime;
|
|
271
|
+
break;
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
return Math.max(0, totalPlayedDuration);
|
|
230
275
|
}
|
|
231
276
|
pause() {
|
|
232
277
|
if (!this.isPlaying || this.isPaused || !this.audioContext) {
|
|
@@ -251,6 +296,7 @@ class StreamingAudioPlayer {
|
|
|
251
296
|
if (!this.isPaused || !this.audioContext || !this.isPlaying) {
|
|
252
297
|
return;
|
|
253
298
|
}
|
|
299
|
+
this.autoContinue = false;
|
|
254
300
|
if (this.audioContext.state === "suspended") {
|
|
255
301
|
try {
|
|
256
302
|
await this.audioContext.resume();
|
|
@@ -300,6 +346,7 @@ class StreamingAudioPlayer {
|
|
|
300
346
|
this.activeSources.clear();
|
|
301
347
|
this.audioChunks = [];
|
|
302
348
|
this.scheduledChunks = 0;
|
|
349
|
+
this.autoContinue = false;
|
|
303
350
|
this.log("[StreamingAudioPlayer] Playback stopped, state reset");
|
|
304
351
|
}
|
|
305
352
|
setAutoStart(enabled) {
|
|
@@ -18,6 +18,7 @@ export declare class AnimationWebSocketClient extends EventEmitter {
|
|
|
18
18
|
private isConnecting;
|
|
19
19
|
private isManuallyDisconnected;
|
|
20
20
|
private reconnectTimer;
|
|
21
|
+
private sessionConfigured;
|
|
21
22
|
constructor(options: AnimationWebSocketClientOptions);
|
|
22
23
|
connect(characterId: string): Promise<void>;
|
|
23
24
|
disconnect(): void;
|
|
@@ -27,6 +28,8 @@ export declare class AnimationWebSocketClient extends EventEmitter {
|
|
|
27
28
|
getCurrentCharacterId(): string;
|
|
28
29
|
private buildWebSocketUrl;
|
|
29
30
|
private connectWebSocket;
|
|
31
|
+
private sanitizeUrlForLog;
|
|
32
|
+
private configureSession;
|
|
30
33
|
private handleMessage;
|
|
31
34
|
private scheduleReconnect;
|
|
32
35
|
}
|
|
@@ -17,9 +17,13 @@ export declare class StreamingAudioPlayer {
|
|
|
17
17
|
private isPlaying;
|
|
18
18
|
private isPaused;
|
|
19
19
|
private autoStartEnabled;
|
|
20
|
+
private autoContinue;
|
|
20
21
|
private audioChunks;
|
|
21
22
|
private scheduledChunks;
|
|
22
23
|
private activeSources;
|
|
24
|
+
private lastScheduledChunkEndTime;
|
|
25
|
+
private lastGetCurrentTimeLog;
|
|
26
|
+
private scheduledChunkInfo;
|
|
23
27
|
private gainNode;
|
|
24
28
|
private volume;
|
|
25
29
|
private onEndedCallback?;
|
|
@@ -28,6 +28,8 @@ export declare class AvatarController {
|
|
|
28
28
|
private keyframesOffset;
|
|
29
29
|
private readonly MAX_KEYFRAMES;
|
|
30
30
|
private readonly KEYFRAMES_CLEANUP_THRESHOLD;
|
|
31
|
+
private lastSyncLogTime;
|
|
32
|
+
private lastOutOfBoundsState;
|
|
31
33
|
private isAudioOnlyMode;
|
|
32
34
|
private hostModeMetrics;
|
|
33
35
|
private readonly audioBytesPerSecond;
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { BinaryReader, BinaryWriter } from '@bufbuild/protobuf/wire';
|
|
2
|
+
export declare const protobufPackage = "common.v1";
|
|
3
|
+
|
|
4
|
+
export interface CustomAnimation {
|
|
5
|
+
key: string;
|
|
6
|
+
pbUrl: string;
|
|
7
|
+
wavUrl: string;
|
|
8
|
+
remark: string;
|
|
9
|
+
}
|
|
10
|
+
export declare const CustomAnimation: MessageFns<CustomAnimation>;
|
|
11
|
+
type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined;
|
|
12
|
+
export type DeepPartial<T> = T extends Builtin ? T : T extends globalThis.Array<infer U> ? globalThis.Array<DeepPartial<U>> : T extends ReadonlyArray<infer U> ? ReadonlyArray<DeepPartial<U>> : T extends {} ? {
|
|
13
|
+
[K in keyof T]?: DeepPartial<T[K]>;
|
|
14
|
+
} : Partial<T>;
|
|
15
|
+
type KeysOfUnion<T> = T extends T ? keyof T : never;
|
|
16
|
+
export type Exact<P, I extends P> = P extends Builtin ? P : P & {
|
|
17
|
+
[K in keyof P]: Exact<P[K], I[K]>;
|
|
18
|
+
} & {
|
|
19
|
+
[K in Exclude<keyof I, KeysOfUnion<P>>]: never;
|
|
20
|
+
};
|
|
21
|
+
export interface MessageFns<T> {
|
|
22
|
+
encode(message: T, writer?: BinaryWriter): BinaryWriter;
|
|
23
|
+
decode(input: BinaryReader | Uint8Array, length?: number): T;
|
|
24
|
+
fromJSON(object: any): T;
|
|
25
|
+
toJSON(message: T): unknown;
|
|
26
|
+
create<I extends Exact<DeepPartial<T>, I>>(base?: I): T;
|
|
27
|
+
fromPartial<I extends Exact<DeepPartial<T>, I>>(object: I): T;
|
|
28
|
+
}
|
|
29
|
+
export {};
|
|
@@ -1,6 +1,32 @@
|
|
|
1
1
|
import { BinaryReader, BinaryWriter } from '@bufbuild/protobuf/wire';
|
|
2
|
+
import { CustomAnimation } from '../../common/v1/models';
|
|
2
3
|
import { Timestamp } from '../../google/protobuf/timestamp';
|
|
3
4
|
export declare const protobufPackage = "driveningress.v2";
|
|
5
|
+
export declare enum MessageType {
|
|
6
|
+
MESSAGE_UNSPECIFIED = 0,
|
|
7
|
+
|
|
8
|
+
MESSAGE_CLIENT_CONFIGURE_SESSION = 1,
|
|
9
|
+
|
|
10
|
+
MESSAGE_SERVER_CONFIRM_SESSION = 2,
|
|
11
|
+
MESSAGE_CLIENT_AUDIO_INPUT = 3,
|
|
12
|
+
MESSAGE_SERVER_ERROR = 4,
|
|
13
|
+
MESSAGE_SERVER_RESPONSE_ANIMATION = 5,
|
|
14
|
+
UNRECOGNIZED = -1
|
|
15
|
+
}
|
|
16
|
+
export declare function messageTypeFromJSON(object: any): MessageType;
|
|
17
|
+
export declare function messageTypeToJSON(object: MessageType): string;
|
|
18
|
+
export declare enum AudioFormat {
|
|
19
|
+
AUDIO_FORMAT_PCM_S16LE = 0,
|
|
20
|
+
UNRECOGNIZED = -1
|
|
21
|
+
}
|
|
22
|
+
export declare function audioFormatFromJSON(object: any): AudioFormat;
|
|
23
|
+
export declare function audioFormatToJSON(object: AudioFormat): string;
|
|
24
|
+
export declare enum TransportCompression {
|
|
25
|
+
TRANSPORT_COMPRESSION_NONE = 0,
|
|
26
|
+
UNRECOGNIZED = -1
|
|
27
|
+
}
|
|
28
|
+
export declare function transportCompressionFromJSON(object: any): TransportCompression;
|
|
29
|
+
export declare function transportCompressionToJSON(object: TransportCompression): string;
|
|
4
30
|
export interface GetCharacterInfoRequest {
|
|
5
31
|
characterId: string;
|
|
6
32
|
}
|
|
@@ -16,6 +42,7 @@ export interface CharacterAsset {
|
|
|
16
42
|
characterSettings?: {
|
|
17
43
|
[key: string]: any;
|
|
18
44
|
} | undefined;
|
|
45
|
+
customAnimations: CustomAnimation[];
|
|
19
46
|
}
|
|
20
47
|
export interface Resource {
|
|
21
48
|
type: string;
|
|
@@ -36,12 +63,66 @@ export interface Animations {
|
|
|
36
63
|
frameMono?: ResourceHolder | undefined;
|
|
37
64
|
audioMono?: ResourceHolder | undefined;
|
|
38
65
|
}
|
|
66
|
+
export interface ClientConfigureSession {
|
|
67
|
+
sampleRate: number;
|
|
68
|
+
bitrate: number;
|
|
69
|
+
audioFormat: AudioFormat;
|
|
70
|
+
transportCompression: TransportCompression;
|
|
71
|
+
}
|
|
72
|
+
export interface ServerConfirmSession {
|
|
73
|
+
connectionId: string;
|
|
74
|
+
}
|
|
75
|
+
export interface ClientAudioInput {
|
|
76
|
+
reqId: string;
|
|
77
|
+
end: boolean;
|
|
78
|
+
audio: Uint8Array;
|
|
79
|
+
}
|
|
80
|
+
export interface ServerError {
|
|
81
|
+
connectionId: string;
|
|
82
|
+
reqId: string;
|
|
83
|
+
code: number;
|
|
84
|
+
message: string;
|
|
85
|
+
}
|
|
86
|
+
export interface Flame {
|
|
87
|
+
translation: number[];
|
|
88
|
+
rotation: number[];
|
|
89
|
+
neckPose: number[];
|
|
90
|
+
jawPose: number[];
|
|
91
|
+
eyePose: number[];
|
|
92
|
+
eyeLid: number[];
|
|
93
|
+
expression: number[];
|
|
94
|
+
}
|
|
95
|
+
export interface FlameAnimation {
|
|
96
|
+
keyframes: Flame[];
|
|
97
|
+
}
|
|
98
|
+
export interface ServerResponseAnimation {
|
|
99
|
+
connectionId: string;
|
|
100
|
+
reqId: string;
|
|
101
|
+
end: boolean;
|
|
102
|
+
animation?: FlameAnimation | undefined;
|
|
103
|
+
}
|
|
104
|
+
export interface Message {
|
|
105
|
+
type: MessageType;
|
|
106
|
+
clientConfigureSession?: ClientConfigureSession | undefined;
|
|
107
|
+
serverConfirmSession?: ServerConfirmSession | undefined;
|
|
108
|
+
clientAudioInput?: ClientAudioInput | undefined;
|
|
109
|
+
serverError?: ServerError | undefined;
|
|
110
|
+
serverResponseAnimation?: ServerResponseAnimation | undefined;
|
|
111
|
+
}
|
|
39
112
|
export declare const GetCharacterInfoRequest: MessageFns<GetCharacterInfoRequest>;
|
|
40
113
|
export declare const CharacterAsset: MessageFns<CharacterAsset>;
|
|
41
114
|
export declare const Resource: MessageFns<Resource>;
|
|
42
115
|
export declare const ResourceHolder: MessageFns<ResourceHolder>;
|
|
43
116
|
export declare const Models: MessageFns<Models>;
|
|
44
117
|
export declare const Animations: MessageFns<Animations>;
|
|
118
|
+
export declare const ClientConfigureSession: MessageFns<ClientConfigureSession>;
|
|
119
|
+
export declare const ServerConfirmSession: MessageFns<ServerConfirmSession>;
|
|
120
|
+
export declare const ClientAudioInput: MessageFns<ClientAudioInput>;
|
|
121
|
+
export declare const ServerError: MessageFns<ServerError>;
|
|
122
|
+
export declare const Flame: MessageFns<Flame>;
|
|
123
|
+
export declare const FlameAnimation: MessageFns<FlameAnimation>;
|
|
124
|
+
export declare const ServerResponseAnimation: MessageFns<ServerResponseAnimation>;
|
|
125
|
+
export declare const Message: MessageFns<Message>;
|
|
45
126
|
export interface DrivenIngressService {
|
|
46
127
|
GetCharacterInfo(request: GetCharacterInfoRequest): Promise<CharacterAsset>;
|
|
47
128
|
}
|