@spatialwalk/avatarkit 1.0.0-beta.34 → 1.0.0-beta.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -5,6 +5,19 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [1.0.0-beta.36] - 2025-12-20
9
+
10
+ ### ✨ New Features
11
+ - **Audio Sample Rate Configuration** - Added configurable audio sample rate support in SDK initialization
12
+ - Added `audioFormat` configuration option with `channelCount` (fixed to 1) and `sampleRate` (supports: 8000, 16000, 22050, 24000, 32000, 44100, 48000 Hz, default: 16000)
13
+ - Audio recording and playback now automatically use the configured sample rate
14
+ - Vanilla demo updated with sample rate dropdown selector for easy testing
15
+
16
+ ## [1.0.0-beta.35] - 2025-12-16
17
+
18
+ ### 🔄 API Changes
19
+ - **Avatar Transform API** - Changed `setTransform` method to `transform` getter/setter property for more idiomatic JavaScript API. Now supports both reading and setting transform values.
20
+
8
21
  ## [1.0.0-beta.34] - 2025-12-16
9
22
 
10
23
  ### ✨ New Features
package/README.md CHANGED
@@ -45,6 +45,10 @@ const configuration: Configuration = {
45
45
  // - LogLevel.error: Only error logs
46
46
  // - LogLevel.warning: Warning and error logs
47
47
  // - LogLevel.all: All logs (info, warning, error)
48
+ audioFormat: { // Optional, default is { channelCount: 1, sampleRate: 16000 }
49
+ channelCount: 1, // Fixed to 1 (mono)
50
+ sampleRate: 16000 // Supported: 8000, 16000, 22050, 24000, 32000, 44100, 48000 Hz
51
+ }
48
52
  }
49
53
 
50
54
  await AvatarSDK.initialize('your-app-id', configuration)
@@ -68,8 +72,8 @@ const avatarView = new AvatarView(avatar, container)
68
72
  // 4. Start real-time communication (SDK mode only)
69
73
  await avatarView.avatarController.start()
70
74
 
71
- // 5. Send audio data (SDK mode, must be 16kHz mono PCM16 format)
72
- const audioData = new ArrayBuffer(1024) // Example: 16kHz PCM16 audio data
75
+ // 5. Send audio data (SDK mode, must be mono PCM16 format matching configured sample rate)
76
+ const audioData = new ArrayBuffer(1024) // Example: PCM16 audio data at configured sample rate
73
77
  avatarView.avatarController.send(audioData, false) // Send audio data
74
78
  avatarView.avatarController.send(audioData, true) // end=true marks the end of current conversation round
75
79
  ```
@@ -190,23 +194,37 @@ RenderSystem → WebGPU/WebGL → Canvas rendering
190
194
 
191
195
  ### Audio Format Requirements
192
196
 
193
- **⚠️ Important:** The SDK requires audio data to be in **16kHz mono PCM16** format:
197
+ **⚠️ Important:** The SDK requires audio data to be in **mono PCM16** format:
194
198
 
195
- - **Sample Rate**: 16kHz (16000 Hz) - This is a backend requirement
196
- - **Channels**: Mono (single channel)
199
+ - **Sample Rate**: Configurable via `audioFormat.sampleRate` in SDK initialization (default: 16000 Hz)
200
+ - Supported sample rates: 8000, 16000, 22050, 24000, 32000, 44100, 48000 Hz
201
+ - The configured sample rate will be used for both audio recording and playback
202
+ - **Channels**: Mono (single channel) - Fixed to 1 channel
197
203
  - **Format**: PCM16 (16-bit signed integer, little-endian)
198
204
  - **Byte Order**: Little-endian
199
205
 
200
206
  **Audio Data Format:**
201
207
  - Each sample is 2 bytes (16-bit)
202
208
  - Audio data should be provided as `ArrayBuffer` or `Uint8Array`
203
- - For example: 1 second of audio = 16000 samples × 2 bytes = 32000 bytes
209
+ - For example, with 16kHz sample rate: 1 second of audio = 16000 samples × 2 bytes = 32000 bytes
210
+ - For 48kHz sample rate: 1 second of audio = 48000 samples × 2 bytes = 96000 bytes
204
211
 
205
212
  **Resampling:**
206
- - If your audio source is at a different sample rate (e.g., 24kHz, 48kHz), you must resample it to 16kHz before sending to the SDK
213
+ - If your audio source is at a different sample rate, you must resample it to match the configured sample rate before sending to the SDK
207
214
  - For high-quality resampling, we recommend using Web Audio API's `OfflineAudioContext` with anti-aliasing filtering
208
215
  - See example projects for resampling implementation
209
216
 
217
+ **Configuration Example:**
218
+ ```typescript
219
+ const configuration: Configuration = {
220
+ environment: Environment.cn,
221
+ audioFormat: {
222
+ channelCount: 1, // Fixed to 1 (mono)
223
+ sampleRate: 48000 // Choose from: 8000, 16000, 22050, 24000, 32000, 44100, 48000
224
+ }
225
+ }
226
+ ```
227
+
210
228
  ## 📚 API Reference
211
229
 
212
230
  ### AvatarSDK
@@ -292,8 +310,12 @@ const avatarView = new AvatarView(avatar, container)
292
310
  // Wait for first frame to render
293
311
  await avatarView.ready // Promise that resolves when the first frame is rendered
294
312
 
295
- // Set avatar transform (position and scale)
296
- avatarView.setTransform(x, y, scale)
313
+ // Get or set avatar transform (position and scale)
314
+ // Get current transform
315
+ const currentTransform = avatarView.transform // { x: number, y: number, scale: number }
316
+
317
+ // Set transform
318
+ avatarView.transform = { x, y, scale }
297
319
  // - x: Horizontal offset in normalized coordinates (-1 to 1, where -1 = left edge, 0 = center, 1 = right edge)
298
320
  // - y: Vertical offset in normalized coordinates (-1 to 1, where -1 = bottom edge, 0 = center, 1 = top edge)
299
321
  // - scale: Scale factor (1.0 = original size, 2.0 = double size, 0.5 = half size)
@@ -398,14 +420,18 @@ avatarView.avatarController.onError = (error: Error) => {}
398
420
  #### Avatar Transform Methods
399
421
 
400
422
  ```typescript
401
- // Set avatar transform (position and scale in canvas)
402
- avatarView.setTransform(x, y, scale)
423
+ // Get or set avatar transform (position and scale in canvas)
424
+ // Get current transform
425
+ const currentTransform = avatarView.transform // { x: number, y: number, scale: number }
426
+
427
+ // Set transform
428
+ avatarView.transform = { x, y, scale }
403
429
  // - x: Horizontal offset in normalized coordinates (-1 to 1, where -1 = left edge, 0 = center, 1 = right edge)
404
430
  // - y: Vertical offset in normalized coordinates (-1 to 1, where -1 = bottom edge, 0 = center, 1 = top edge)
405
431
  // - scale: Scale factor (1.0 = original size, 2.0 = double size, 0.5 = half size)
406
432
  // Example:
407
- avatarView.setTransform(0, 0, 1.0) // Center, original size
408
- avatarView.setTransform(0.5, 0, 2.0) // Right half, double size
433
+ avatarView.transform = { x: 0, y: 0, scale: 1.0 } // Center, original size
434
+ avatarView.transform = { x: 0.5, y: 0, scale: 2.0 } // Right half, double size
409
435
  ```
410
436
 
411
437
  **Important Notes:**
@@ -423,6 +449,12 @@ interface Configuration {
423
449
  environment: Environment
424
450
  drivingServiceMode?: DrivingServiceMode // Optional, default is 'sdk' (SDK mode)
425
451
  logLevel?: LogLevel // Optional, default is 'off' (no logs)
452
+ audioFormat?: AudioFormat // Optional, default is { channelCount: 1, sampleRate: 16000 }
453
+ }
454
+
455
+ interface AudioFormat {
456
+ readonly channelCount: 1 // Fixed to 1 (mono)
457
+ readonly sampleRate: number // Supported: 8000, 16000, 22050, 24000, 32000, 44100, 48000 Hz, default: 16000
426
458
  }
427
459
  ```
428
460
 
@@ -451,6 +483,11 @@ enum LogLevel {
451
483
  - `LogLevel.error`: Only error logs
452
484
  - `LogLevel.warning`: Warning and error logs
453
485
  - `LogLevel.all`: All logs (info, warning, error)
486
+ - `audioFormat`: Configures audio sample rate and channel count
487
+ - `channelCount`: Fixed to 1 (mono channel)
488
+ - `sampleRate`: Audio sample rate in Hz (default: 16000)
489
+ - Supported values: 8000, 16000, 22050, 24000, 32000, 44100, 48000
490
+ - The configured sample rate will be used for both audio recording and playback
454
491
  - `sessionToken`: Set separately via `AvatarSDK.setSessionToken()`, not in Configuration
455
492
 
456
493
  ```typescript
@@ -492,13 +529,15 @@ enum ConnectionState {
492
529
  ```typescript
493
530
  enum ConversationState {
494
531
  idle = 'idle', // Idle state (breathing animation)
495
- playing = 'playing' // Playing state (active conversation)
532
+ playing = 'playing', // Playing state (active conversation)
533
+ pausing = 'pausing' // Pausing state (paused during playback)
496
534
  }
497
535
  ```
498
536
 
499
537
  **State Description:**
500
538
  - `idle`: Avatar is in idle state (breathing animation), waiting for conversation to start
501
539
  - `playing`: Avatar is playing conversation content (including during transition animations)
540
+ - `pausing`: Avatar playback is paused (e.g., when `end=false` and waiting for more audio data)
502
541
 
503
542
  **Note:** During transition animations, the target state is notified immediately:
504
543
  - When transitioning from `idle` to `playing`, the `playing` state is notified immediately
@@ -1,7 +1,7 @@
1
1
  var __defProp = Object.defineProperty;
2
2
  var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
3
3
  var __publicField = (obj, key, value) => __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
4
- import { A as APP_CONFIG, e as errorToMessage, l as logEvent, a as logger } from "./index-D8QhzqfR.js";
4
+ import { A as APP_CONFIG, e as errorToMessage, l as logEvent, a as logger } from "./index-B7llDHtO.js";
5
5
  class StreamingAudioPlayer {
6
6
  constructor(options) {
7
7
  __publicField(this, "audioContext", null);
@@ -17,9 +17,13 @@ class StreamingAudioPlayer {
17
17
  __publicField(this, "isPlaying", false);
18
18
  __publicField(this, "isPaused", false);
19
19
  __publicField(this, "autoStartEnabled", true);
20
+ __publicField(this, "autoContinue", false);
20
21
  __publicField(this, "audioChunks", []);
21
22
  __publicField(this, "scheduledChunks", 0);
22
23
  __publicField(this, "activeSources", /* @__PURE__ */ new Set());
24
+ __publicField(this, "lastScheduledChunkEndTime", 0);
25
+ __publicField(this, "lastGetCurrentTimeLog", 0);
26
+ __publicField(this, "scheduledChunkInfo", []);
23
27
  __publicField(this, "gainNode", null);
24
28
  __publicField(this, "volume", 1);
25
29
  __publicField(this, "onEndedCallback");
@@ -70,6 +74,13 @@ class StreamingAudioPlayer {
70
74
  isPlaying: this.isPlaying,
71
75
  scheduledChunks: this.scheduledChunks
72
76
  });
77
+ if (this.autoContinue && this.isPaused) {
78
+ this.log("[StreamingAudioPlayer] autoContinue=true, auto-resuming playback");
79
+ this.autoContinue = false;
80
+ this.resume().catch((err) => {
81
+ logger.errorWithError("Failed to auto-resume playback:", err);
82
+ });
83
+ }
73
84
  if (!this.isPlaying && this.autoStartEnabled && this.audioChunks.length > 0) {
74
85
  this.log("[StreamingAudioPlayer] Auto-starting playback from addChunk");
75
86
  this.startPlayback();
@@ -88,6 +99,7 @@ class StreamingAudioPlayer {
88
99
  this.pausedTimeOffset = 0;
89
100
  this.pausedAt = 0;
90
101
  this.pausedAudioContextTime = 0;
102
+ this.autoContinue = false;
91
103
  this.log("Starting new session", {
92
104
  chunks: audioChunks.length
93
105
  });
@@ -107,6 +119,9 @@ class StreamingAudioPlayer {
107
119
  this.isPlaying = true;
108
120
  this.sessionStartTime = this.audioContext.currentTime;
109
121
  this.scheduledTime = this.sessionStartTime;
122
+ this.lastScheduledChunkEndTime = 0;
123
+ this.scheduledChunkInfo = [];
124
+ this.autoContinue = false;
110
125
  this.log("[StreamingAudioPlayer] Starting playback", {
111
126
  sessionStartTime: this.sessionStartTime,
112
127
  bufferedChunks: this.audioChunks.length,
@@ -155,16 +170,30 @@ class StreamingAudioPlayer {
155
170
  const source = this.audioContext.createBufferSource();
156
171
  source.buffer = audioBuffer;
157
172
  source.connect(this.gainNode);
158
- source.start(this.scheduledTime);
173
+ const chunkStartTime = this.scheduledTime;
174
+ source.start(chunkStartTime);
175
+ const actualStartTime = Math.max(chunkStartTime, this.audioContext.currentTime);
176
+ this.scheduledChunkInfo.push({
177
+ startTime: actualStartTime,
178
+ duration: audioBuffer.duration
179
+ });
159
180
  this.activeSources.add(source);
160
181
  source.onended = () => {
161
182
  this.activeSources.delete(source);
162
- if (isLast && this.activeSources.size === 0) {
163
- this.log("Last audio chunk ended, marking playback as ended");
164
- this.markEnded();
183
+ if (this.activeSources.size === 0) {
184
+ const lastChunk = this.audioChunks[this.scheduledChunks - 1];
185
+ if (lastChunk && !lastChunk.isLast) {
186
+ this.log("All audio chunks ended but end=false, pausing and setting autoContinue");
187
+ this.autoContinue = true;
188
+ this.pause();
189
+ } else if (isLast) {
190
+ this.log("Last audio chunk ended, marking playback as ended");
191
+ this.markEnded();
192
+ }
165
193
  }
166
194
  };
167
195
  this.scheduledTime += audioBuffer.duration;
196
+ this.lastScheduledChunkEndTime = this.scheduledTime - this.sessionStartTime - this.pausedTimeOffset;
168
197
  this.scheduledChunks++;
169
198
  this.log(`[StreamingAudioPlayer] Scheduled chunk ${chunkIndex + 1}/${this.audioChunks.length}`, {
170
199
  startTime: this.scheduledTime - audioBuffer.duration,
@@ -225,8 +254,24 @@ class StreamingAudioPlayer {
225
254
  return this.pausedAt;
226
255
  }
227
256
  const currentAudioTime = this.audioContext.currentTime;
228
- const elapsed = currentAudioTime - this.sessionStartTime - this.pausedTimeOffset;
229
- return Math.max(0, elapsed);
257
+ if (this.activeSources.size === 0 && this.scheduledChunks > 0) {
258
+ return Math.max(0, this.lastScheduledChunkEndTime);
259
+ }
260
+ let totalPlayedDuration = 0;
261
+ for (let i = 0; i < this.scheduledChunkInfo.length; i++) {
262
+ const chunkInfo = this.scheduledChunkInfo[i];
263
+ const chunkEndTime = chunkInfo.startTime + chunkInfo.duration;
264
+ if (currentAudioTime < chunkInfo.startTime) {
265
+ break;
266
+ } else if (chunkEndTime <= currentAudioTime) {
267
+ totalPlayedDuration += chunkInfo.duration;
268
+ } else {
269
+ const playedTime = currentAudioTime - chunkInfo.startTime;
270
+ totalPlayedDuration += playedTime;
271
+ break;
272
+ }
273
+ }
274
+ return Math.max(0, totalPlayedDuration);
230
275
  }
231
276
  pause() {
232
277
  if (!this.isPlaying || this.isPaused || !this.audioContext) {
@@ -251,6 +296,7 @@ class StreamingAudioPlayer {
251
296
  if (!this.isPaused || !this.audioContext || !this.isPlaying) {
252
297
  return;
253
298
  }
299
+ this.autoContinue = false;
254
300
  if (this.audioContext.state === "suspended") {
255
301
  try {
256
302
  await this.audioContext.resume();
@@ -300,6 +346,7 @@ class StreamingAudioPlayer {
300
346
  this.activeSources.clear();
301
347
  this.audioChunks = [];
302
348
  this.scheduledChunks = 0;
349
+ this.autoContinue = false;
303
350
  this.log("[StreamingAudioPlayer] Playback stopped, state reset");
304
351
  }
305
352
  setAutoStart(enabled) {
@@ -18,6 +18,7 @@ export declare class AnimationWebSocketClient extends EventEmitter {
18
18
  private isConnecting;
19
19
  private isManuallyDisconnected;
20
20
  private reconnectTimer;
21
+ private sessionConfigured;
21
22
  constructor(options: AnimationWebSocketClientOptions);
22
23
  connect(characterId: string): Promise<void>;
23
24
  disconnect(): void;
@@ -27,6 +28,8 @@ export declare class AnimationWebSocketClient extends EventEmitter {
27
28
  getCurrentCharacterId(): string;
28
29
  private buildWebSocketUrl;
29
30
  private connectWebSocket;
31
+ private sanitizeUrlForLog;
32
+ private configureSession;
30
33
  private handleMessage;
31
34
  private scheduleReconnect;
32
35
  }
@@ -17,9 +17,13 @@ export declare class StreamingAudioPlayer {
17
17
  private isPlaying;
18
18
  private isPaused;
19
19
  private autoStartEnabled;
20
+ private autoContinue;
20
21
  private audioChunks;
21
22
  private scheduledChunks;
22
23
  private activeSources;
24
+ private lastScheduledChunkEndTime;
25
+ private lastGetCurrentTimeLog;
26
+ private scheduledChunkInfo;
23
27
  private gainNode;
24
28
  private volume;
25
29
  private onEndedCallback?;
@@ -28,6 +28,8 @@ export declare class AvatarController {
28
28
  private keyframesOffset;
29
29
  private readonly MAX_KEYFRAMES;
30
30
  private readonly KEYFRAMES_CLEANUP_THRESHOLD;
31
+ private lastSyncLogTime;
32
+ private lastOutOfBoundsState;
31
33
  private isAudioOnlyMode;
32
34
  private hostModeMetrics;
33
35
  private readonly audioBytesPerSecond;
@@ -61,5 +61,14 @@ export declare class AvatarView {
61
61
  get isOpaque(): boolean;
62
62
  set isOpaque(value: boolean);
63
63
  setBackgroundImage(image: HTMLImageElement | string | null): void;
64
- setTransform(x: number, y: number, scale: number): void;
64
+ get transform(): {
65
+ x: number;
66
+ y: number;
67
+ scale: number;
68
+ };
69
+ set transform(value: {
70
+ x: number;
71
+ y: number;
72
+ scale: number;
73
+ });
65
74
  }
@@ -0,0 +1,29 @@
1
+ import { BinaryReader, BinaryWriter } from '@bufbuild/protobuf/wire';
2
+ export declare const protobufPackage = "common.v1";
3
+
4
+ export interface CustomAnimation {
5
+ key: string;
6
+ pbUrl: string;
7
+ wavUrl: string;
8
+ remark: string;
9
+ }
10
+ export declare const CustomAnimation: MessageFns<CustomAnimation>;
11
+ type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined;
12
+ export type DeepPartial<T> = T extends Builtin ? T : T extends globalThis.Array<infer U> ? globalThis.Array<DeepPartial<U>> : T extends ReadonlyArray<infer U> ? ReadonlyArray<DeepPartial<U>> : T extends {} ? {
13
+ [K in keyof T]?: DeepPartial<T[K]>;
14
+ } : Partial<T>;
15
+ type KeysOfUnion<T> = T extends T ? keyof T : never;
16
+ export type Exact<P, I extends P> = P extends Builtin ? P : P & {
17
+ [K in keyof P]: Exact<P[K], I[K]>;
18
+ } & {
19
+ [K in Exclude<keyof I, KeysOfUnion<P>>]: never;
20
+ };
21
+ export interface MessageFns<T> {
22
+ encode(message: T, writer?: BinaryWriter): BinaryWriter;
23
+ decode(input: BinaryReader | Uint8Array, length?: number): T;
24
+ fromJSON(object: any): T;
25
+ toJSON(message: T): unknown;
26
+ create<I extends Exact<DeepPartial<T>, I>>(base?: I): T;
27
+ fromPartial<I extends Exact<DeepPartial<T>, I>>(object: I): T;
28
+ }
29
+ export {};
@@ -1,6 +1,32 @@
1
1
  import { BinaryReader, BinaryWriter } from '@bufbuild/protobuf/wire';
2
+ import { CustomAnimation } from '../../common/v1/models';
2
3
  import { Timestamp } from '../../google/protobuf/timestamp';
3
4
  export declare const protobufPackage = "driveningress.v2";
5
+ export declare enum MessageType {
6
+ MESSAGE_UNSPECIFIED = 0,
7
+
8
+ MESSAGE_CLIENT_CONFIGURE_SESSION = 1,
9
+
10
+ MESSAGE_SERVER_CONFIRM_SESSION = 2,
11
+ MESSAGE_CLIENT_AUDIO_INPUT = 3,
12
+ MESSAGE_SERVER_ERROR = 4,
13
+ MESSAGE_SERVER_RESPONSE_ANIMATION = 5,
14
+ UNRECOGNIZED = -1
15
+ }
16
+ export declare function messageTypeFromJSON(object: any): MessageType;
17
+ export declare function messageTypeToJSON(object: MessageType): string;
18
+ export declare enum AudioFormat {
19
+ AUDIO_FORMAT_PCM_S16LE = 0,
20
+ UNRECOGNIZED = -1
21
+ }
22
+ export declare function audioFormatFromJSON(object: any): AudioFormat;
23
+ export declare function audioFormatToJSON(object: AudioFormat): string;
24
+ export declare enum TransportCompression {
25
+ TRANSPORT_COMPRESSION_NONE = 0,
26
+ UNRECOGNIZED = -1
27
+ }
28
+ export declare function transportCompressionFromJSON(object: any): TransportCompression;
29
+ export declare function transportCompressionToJSON(object: TransportCompression): string;
4
30
  export interface GetCharacterInfoRequest {
5
31
  characterId: string;
6
32
  }
@@ -16,6 +42,7 @@ export interface CharacterAsset {
16
42
  characterSettings?: {
17
43
  [key: string]: any;
18
44
  } | undefined;
45
+ customAnimations: CustomAnimation[];
19
46
  }
20
47
  export interface Resource {
21
48
  type: string;
@@ -36,12 +63,66 @@ export interface Animations {
36
63
  frameMono?: ResourceHolder | undefined;
37
64
  audioMono?: ResourceHolder | undefined;
38
65
  }
66
+ export interface ClientConfigureSession {
67
+ sampleRate: number;
68
+ bitrate: number;
69
+ audioFormat: AudioFormat;
70
+ transportCompression: TransportCompression;
71
+ }
72
+ export interface ServerConfirmSession {
73
+ connectionId: string;
74
+ }
75
+ export interface ClientAudioInput {
76
+ reqId: string;
77
+ end: boolean;
78
+ audio: Uint8Array;
79
+ }
80
+ export interface ServerError {
81
+ connectionId: string;
82
+ reqId: string;
83
+ code: number;
84
+ message: string;
85
+ }
86
+ export interface Flame {
87
+ translation: number[];
88
+ rotation: number[];
89
+ neckPose: number[];
90
+ jawPose: number[];
91
+ eyePose: number[];
92
+ eyeLid: number[];
93
+ expression: number[];
94
+ }
95
+ export interface FlameAnimation {
96
+ keyframes: Flame[];
97
+ }
98
+ export interface ServerResponseAnimation {
99
+ connectionId: string;
100
+ reqId: string;
101
+ end: boolean;
102
+ animation?: FlameAnimation | undefined;
103
+ }
104
+ export interface Message {
105
+ type: MessageType;
106
+ clientConfigureSession?: ClientConfigureSession | undefined;
107
+ serverConfirmSession?: ServerConfirmSession | undefined;
108
+ clientAudioInput?: ClientAudioInput | undefined;
109
+ serverError?: ServerError | undefined;
110
+ serverResponseAnimation?: ServerResponseAnimation | undefined;
111
+ }
39
112
  export declare const GetCharacterInfoRequest: MessageFns<GetCharacterInfoRequest>;
40
113
  export declare const CharacterAsset: MessageFns<CharacterAsset>;
41
114
  export declare const Resource: MessageFns<Resource>;
42
115
  export declare const ResourceHolder: MessageFns<ResourceHolder>;
43
116
  export declare const Models: MessageFns<Models>;
44
117
  export declare const Animations: MessageFns<Animations>;
118
+ export declare const ClientConfigureSession: MessageFns<ClientConfigureSession>;
119
+ export declare const ServerConfirmSession: MessageFns<ServerConfirmSession>;
120
+ export declare const ClientAudioInput: MessageFns<ClientAudioInput>;
121
+ export declare const ServerError: MessageFns<ServerError>;
122
+ export declare const Flame: MessageFns<Flame>;
123
+ export declare const FlameAnimation: MessageFns<FlameAnimation>;
124
+ export declare const ServerResponseAnimation: MessageFns<ServerResponseAnimation>;
125
+ export declare const Message: MessageFns<Message>;
45
126
  export interface DrivenIngressService {
46
127
  GetCharacterInfo(request: GetCharacterInfoRequest): Promise<CharacterAsset>;
47
128
  }