@spatialwalk/avatarkit 1.0.0-beta.2 → 1.0.0-beta.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +325 -0
- package/README.md +499 -194
- package/dist/StreamingAudioPlayer-Dv4D0GIN.js +334 -0
- package/dist/StreamingAudioPlayer-Dv4D0GIN.js.map +1 -0
- package/dist/animation/AnimationWebSocketClient.d.ts +7 -4
- package/dist/animation/AnimationWebSocketClient.d.ts.map +1 -1
- package/dist/audio/AnimationPlayer.d.ts +12 -0
- package/dist/audio/AnimationPlayer.d.ts.map +1 -1
- package/dist/audio/StreamingAudioPlayer.d.ts +11 -0
- package/dist/audio/StreamingAudioPlayer.d.ts.map +1 -1
- package/dist/avatar_core_wasm-BPIbbUx_.js +1664 -0
- package/dist/avatar_core_wasm-BPIbbUx_.js.map +1 -0
- package/dist/avatar_core_wasm.wasm +0 -0
- package/dist/config/app-config.d.ts +3 -7
- package/dist/config/app-config.d.ts.map +1 -1
- package/dist/config/constants.d.ts +19 -3
- package/dist/config/constants.d.ts.map +1 -1
- package/dist/config/sdk-config-loader.d.ts.map +1 -1
- package/dist/core/Avatar.d.ts +0 -8
- package/dist/core/Avatar.d.ts.map +1 -1
- package/dist/core/AvatarController.d.ts +112 -65
- package/dist/core/AvatarController.d.ts.map +1 -1
- package/dist/core/AvatarDownloader.d.ts +1 -20
- package/dist/core/AvatarDownloader.d.ts.map +1 -1
- package/dist/core/AvatarKit.d.ts +8 -15
- package/dist/core/AvatarKit.d.ts.map +1 -1
- package/dist/core/AvatarManager.d.ts +1 -4
- package/dist/core/AvatarManager.d.ts.map +1 -1
- package/dist/core/AvatarView.d.ts +65 -53
- package/dist/core/AvatarView.d.ts.map +1 -1
- package/dist/core/NetworkLayer.d.ts +8 -0
- package/dist/core/NetworkLayer.d.ts.map +1 -0
- package/dist/index-Cm5BwNVd.js +6395 -0
- package/dist/index-Cm5BwNVd.js.map +1 -0
- package/dist/index.d.ts +0 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +14 -15
- package/dist/renderer/RenderSystem.d.ts +9 -76
- package/dist/renderer/RenderSystem.d.ts.map +1 -1
- package/dist/renderer/webgl/reorderData.d.ts.map +1 -1
- package/dist/renderer/webgl/webglRenderer.d.ts.map +1 -1
- package/dist/types/character.d.ts +0 -11
- package/dist/types/character.d.ts.map +1 -1
- package/dist/types/index.d.ts +18 -6
- package/dist/types/index.d.ts.map +1 -1
- package/dist/utils/cls-tracker.d.ts +17 -0
- package/dist/utils/cls-tracker.d.ts.map +1 -0
- package/dist/utils/{reqId.d.ts → conversationId.d.ts} +6 -6
- package/dist/utils/conversationId.d.ts.map +1 -0
- package/dist/utils/logger.d.ts +2 -10
- package/dist/utils/logger.d.ts.map +1 -1
- package/dist/vanilla/vite.config.d.ts +3 -0
- package/dist/vanilla/vite.config.d.ts.map +1 -0
- package/dist/wasm/avatarCoreAdapter.d.ts +58 -9
- package/dist/wasm/avatarCoreAdapter.d.ts.map +1 -1
- package/dist/wasm/avatarCoreMemory.d.ts +5 -1
- package/dist/wasm/avatarCoreMemory.d.ts.map +1 -1
- package/package.json +10 -4
- package/dist/StreamingAudioPlayer-CMEiGwxE.js +0 -288
- package/dist/StreamingAudioPlayer-CMEiGwxE.js.map +0 -1
- package/dist/avatar_core_wasm-DmkU6dYn.js +0 -1666
- package/dist/avatar_core_wasm-DmkU6dYn.js.map +0 -1
- package/dist/index-CNhquYUE.js +0 -9712
- package/dist/index-CNhquYUE.js.map +0 -1
- package/dist/utils/posthog-tracker.d.ts +0 -82
- package/dist/utils/posthog-tracker.d.ts.map +0 -1
- package/dist/utils/reqId.d.ts.map +0 -1
package/README.md
CHANGED
|
@@ -1,25 +1,26 @@
|
|
|
1
1
|
# SPAvatarKit SDK
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
Real-time virtual avatar rendering SDK based on 3D Gaussian Splatting, supporting audio-driven animation rendering and high-quality 3D rendering.
|
|
4
4
|
|
|
5
|
-
## 🚀
|
|
5
|
+
## 🚀 Features
|
|
6
6
|
|
|
7
|
-
- **3D Gaussian Splatting
|
|
8
|
-
-
|
|
9
|
-
- **
|
|
10
|
-
- **
|
|
11
|
-
- **
|
|
12
|
-
-
|
|
7
|
+
- **3D Gaussian Splatting Rendering** - Based on the latest point cloud rendering technology, providing high-quality 3D virtual avatars
|
|
8
|
+
- **Audio-Driven Real-Time Animation Rendering** - Users provide audio data, SDK handles receiving animation data and rendering
|
|
9
|
+
- **Multi-Character Support** - Support multiple avatar instances simultaneously, each with independent state and rendering
|
|
10
|
+
- **WebGPU/WebGL Dual Rendering Backend** - Automatically selects the best rendering backend for compatibility
|
|
11
|
+
- **WASM High-Performance Computing** - Uses C++ compiled WebAssembly modules for geometric calculations
|
|
12
|
+
- **TypeScript Support** - Complete type definitions and IntelliSense
|
|
13
|
+
- **Modular Architecture** - Clear component separation, easy to integrate and extend
|
|
13
14
|
|
|
14
|
-
## 📦
|
|
15
|
+
## 📦 Installation
|
|
15
16
|
|
|
16
17
|
```bash
|
|
17
18
|
npm install @spatialwalk/avatarkit
|
|
18
19
|
```
|
|
19
20
|
|
|
20
|
-
## 🎯
|
|
21
|
+
## 🎯 Quick Start
|
|
21
22
|
|
|
22
|
-
###
|
|
23
|
+
### Basic Usage
|
|
23
24
|
|
|
24
25
|
```typescript
|
|
25
26
|
import {
|
|
@@ -30,190 +31,467 @@ import {
|
|
|
30
31
|
Environment
|
|
31
32
|
} from '@spatialwalk/avatarkit'
|
|
32
33
|
|
|
33
|
-
// 1.
|
|
34
|
+
// 1. Initialize SDK
|
|
35
|
+
import { DrivingServiceMode } from '@spatialwalk/avatarkit'
|
|
36
|
+
|
|
34
37
|
const configuration: Configuration = {
|
|
35
38
|
environment: Environment.test,
|
|
39
|
+
drivingServiceMode: DrivingServiceMode.sdk, // Optional, 'sdk' is default
|
|
40
|
+
// - DrivingServiceMode.sdk: SDK mode - SDK handles WebSocket communication
|
|
41
|
+
// - DrivingServiceMode.host: Host mode - Host app provides audio and animation data
|
|
36
42
|
}
|
|
37
43
|
|
|
38
44
|
await AvatarKit.initialize('your-app-id', configuration)
|
|
39
45
|
|
|
40
|
-
//
|
|
46
|
+
// Set sessionToken (if needed, call separately)
|
|
41
47
|
// AvatarKit.setSessionToken('your-session-token')
|
|
42
48
|
|
|
43
|
-
// 2.
|
|
44
|
-
const avatarManager =
|
|
49
|
+
// 2. Load character
|
|
50
|
+
const avatarManager = AvatarManager.shared
|
|
45
51
|
const avatar = await avatarManager.load('character-id', (progress) => {
|
|
46
52
|
console.log(`Loading progress: ${progress.progress}%`)
|
|
47
53
|
})
|
|
48
54
|
|
|
49
|
-
// 3.
|
|
55
|
+
// 3. Create view (automatically creates Canvas and AvatarController)
|
|
56
|
+
// The playback mode is determined by drivingServiceMode in AvatarKit configuration
|
|
57
|
+
// - DrivingServiceMode.sdk: SDK mode - SDK handles WebSocket communication
|
|
58
|
+
// - DrivingServiceMode.host: Host mode - Host app provides audio and animation data
|
|
50
59
|
const container = document.getElementById('avatar-container')
|
|
51
60
|
const avatarView = new AvatarView(avatar, container)
|
|
52
61
|
|
|
53
|
-
// 4.
|
|
62
|
+
// 4. Start real-time communication (SDK mode only)
|
|
54
63
|
await avatarView.avatarController.start()
|
|
55
64
|
|
|
56
|
-
// 5.
|
|
57
|
-
//
|
|
58
|
-
|
|
59
|
-
const
|
|
60
|
-
|
|
61
|
-
avatarView.avatarController.send(audioData,
|
|
65
|
+
// 5. Send audio data (SDK mode)
|
|
66
|
+
// ⚠️ Important: Audio must be 16kHz mono PCM16 format
|
|
67
|
+
// If audio is Uint8Array, you can use slice().buffer to convert to ArrayBuffer
|
|
68
|
+
const audioUint8 = new Uint8Array(1024) // Example: 16kHz PCM16 audio data (512 samples = 1024 bytes)
|
|
69
|
+
const audioData = audioUint8.slice().buffer // Simplified conversion, works for ArrayBuffer and SharedArrayBuffer
|
|
70
|
+
avatarView.avatarController.send(audioData, false) // Send audio data, will automatically start playing after accumulating enough data
|
|
71
|
+
avatarView.avatarController.send(audioData, true) // end=true marks the end of current conversation round
|
|
62
72
|
```
|
|
63
73
|
|
|
64
|
-
###
|
|
74
|
+
### Host Mode Example
|
|
75
|
+
|
|
76
|
+
```typescript
|
|
77
|
+
import { AvatarPlaybackMode } from '@spatialwalk/avatarkit'
|
|
78
|
+
|
|
79
|
+
// 1-3. Same as SDK mode (initialize SDK, load character)
|
|
80
|
+
|
|
81
|
+
// 3. Create view with Host mode
|
|
82
|
+
const container = document.getElementById('avatar-container')
|
|
83
|
+
const avatarView = new AvatarView(avatar, container)
|
|
84
|
+
|
|
85
|
+
// 4. Host Mode Workflow:
|
|
86
|
+
// ⚠️ IMPORTANT: In Host mode, you MUST send audio data FIRST to get a conversationId,
|
|
87
|
+
// then use that conversationId to send animation data.
|
|
88
|
+
// Animation data with mismatched conversationId will be discarded.
|
|
89
|
+
|
|
90
|
+
// Option A: Playback existing audio and animation data (replay mode)
|
|
91
|
+
const initialAudioChunks = [{ data: audioData1, isLast: false }, { data: audioData2, isLast: false }]
|
|
92
|
+
const initialKeyframes = animationData1 // Animation keyframes from your service
|
|
93
|
+
// Step 1: Send audio first to get conversationId
|
|
94
|
+
const conversationId = await avatarView.avatarController.playback(initialAudioChunks, initialKeyframes)
|
|
95
|
+
|
|
96
|
+
// Option B: Stream new audio and animation data (start a new session directly)
|
|
97
|
+
// Step 1: Send audio data first to get conversationId (automatically generates conversationId if starting new session)
|
|
98
|
+
const currentConversationId = avatarView.avatarController.yieldAudioData(audioData3, false)
|
|
99
|
+
// Step 2: Use the conversationId to send animation data (mismatched conversationId will be discarded)
|
|
100
|
+
avatarView.avatarController.yieldFramesData(animationData2, currentConversationId || conversationId)
|
|
101
|
+
// Note: To start playback, you need to call playback() with the accumulated data, or ensure enough audio data is sent
|
|
102
|
+
```
|
|
103
|
+
|
|
104
|
+
### Complete Examples
|
|
105
|
+
|
|
106
|
+
Check the example code in the GitHub repository for complete usage flows for both modes.
|
|
107
|
+
|
|
108
|
+
**Example Project:** [AvatarKit-Web-Demo](https://github.com/spatialwalk/AvatarKit-Web-Demo)
|
|
109
|
+
|
|
110
|
+
This repository contains complete examples for Vanilla JS, Vue 3, and React, demonstrating:
|
|
111
|
+
- SDK mode: Real-time audio input with automatic animation data reception
|
|
112
|
+
- Host mode: Custom data sources with manual audio/animation data management
|
|
113
|
+
|
|
114
|
+
## 🏗️ Architecture Overview
|
|
115
|
+
|
|
116
|
+
### Three-Layer Architecture
|
|
65
117
|
|
|
66
|
-
|
|
118
|
+
The SDK uses a three-layer architecture for clear separation of concerns:
|
|
67
119
|
|
|
68
|
-
|
|
120
|
+
1. **Rendering Layer (AvatarView)** - Responsible for 3D rendering only
|
|
121
|
+
2. **Playback Layer (AvatarController)** - Manages audio/animation synchronization and playback
|
|
122
|
+
3. **Network Layer** - Handles WebSocket communication (only in SDK mode, internal implementation)
|
|
69
123
|
|
|
70
|
-
|
|
124
|
+
### Core Components
|
|
71
125
|
|
|
72
|
-
|
|
126
|
+
- **AvatarKit** - SDK initialization and management
|
|
127
|
+
- **AvatarManager** - Character resource loading and management
|
|
128
|
+
- **AvatarView** - 3D rendering view (rendering layer)
|
|
129
|
+
- **AvatarController** - Audio/animation playback controller (playback layer)
|
|
73
130
|
|
|
74
|
-
###
|
|
131
|
+
### Playback Modes
|
|
75
132
|
|
|
76
|
-
|
|
77
|
-
- **AvatarManager** - 角色资源加载和管理
|
|
78
|
-
- **AvatarView** - 3D 渲染视图(内部包含 AvatarController)
|
|
79
|
-
- **AvatarController** - 实时通信和数据处理
|
|
80
|
-
- **AvatarCoreAdapter** - WASM 模块适配器
|
|
133
|
+
The SDK supports two playback modes, configured in `AvatarKit.initialize()`:
|
|
81
134
|
|
|
82
|
-
|
|
135
|
+
#### 1. SDK Mode (Default)
|
|
136
|
+
- Configured via `drivingServiceMode: DrivingServiceMode.sdk` in `AvatarKit.initialize()`
|
|
137
|
+
- SDK handles WebSocket communication automatically
|
|
138
|
+
- Send audio data via `AvatarController.send()`
|
|
139
|
+
- SDK receives animation data from backend and synchronizes playback
|
|
140
|
+
- Best for: Real-time audio input scenarios
|
|
141
|
+
|
|
142
|
+
#### 2. Host Mode
|
|
143
|
+
- Configured via `drivingServiceMode: DrivingServiceMode.host` in `AvatarKit.initialize()`
|
|
144
|
+
- Host application manages its own network/data fetching
|
|
145
|
+
- Host application provides both audio and animation data
|
|
146
|
+
- SDK only handles synchronized playback
|
|
147
|
+
- Best for: Custom data sources, pre-recorded content, or custom network implementations
|
|
148
|
+
|
|
149
|
+
**Note:** The playback mode is determined by `drivingServiceMode` in `AvatarKit.initialize()` configuration.
|
|
150
|
+
|
|
151
|
+
### Fallback Mechanism
|
|
152
|
+
|
|
153
|
+
The SDK includes a fallback mechanism to ensure audio playback continues even when animation data is unavailable:
|
|
154
|
+
|
|
155
|
+
- **SDK Mode**: If the server returns an error or fails to provide animation data, the SDK automatically enters audio-only mode and continues playing audio independently
|
|
156
|
+
- **Host Mode**: If empty animation data is provided (empty array or undefined), the SDK automatically enters audio-only mode
|
|
157
|
+
- Once in audio-only mode, any subsequent animation data for that session will be ignored, and only audio will continue playing
|
|
158
|
+
- The fallback mode is interruptible, just like normal playback mode
|
|
159
|
+
|
|
160
|
+
### Data Flow
|
|
161
|
+
|
|
162
|
+
#### SDK Mode Flow
|
|
83
163
|
|
|
84
164
|
```
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
165
|
+
User audio input (16kHz mono PCM16)
|
|
166
|
+
↓
|
|
167
|
+
AvatarController.send()
|
|
168
|
+
↓
|
|
169
|
+
WebSocket → Backend processing
|
|
170
|
+
↓
|
|
171
|
+
Backend returns animation data (FLAME keyframes)
|
|
172
|
+
↓
|
|
173
|
+
AvatarController → AnimationPlayer
|
|
174
|
+
↓
|
|
175
|
+
FLAME parameters → AvatarCore.computeFrameFlatFromParams() → Splat data
|
|
176
|
+
↓
|
|
177
|
+
AvatarController (playback loop) → AvatarView.renderRealtimeFrame()
|
|
178
|
+
↓
|
|
179
|
+
RenderSystem → WebGPU/WebGL → Canvas rendering
|
|
92
180
|
```
|
|
93
181
|
|
|
94
|
-
|
|
182
|
+
#### Host Mode Flow
|
|
183
|
+
|
|
184
|
+
```
|
|
185
|
+
External data source (audio + animation)
|
|
186
|
+
↓
|
|
187
|
+
Step 1: Send audio data FIRST to get conversationId
|
|
188
|
+
↓
|
|
189
|
+
AvatarController.playback(initialAudio, initialKeyframes) // Returns conversationId
|
|
190
|
+
OR
|
|
191
|
+
AvatarController.yieldAudioData(audioChunk) // Returns conversationId
|
|
192
|
+
↓
|
|
193
|
+
Step 2: Use conversationId to send animation data
|
|
194
|
+
↓
|
|
195
|
+
AvatarController.yieldFramesData(keyframes, conversationId) // Requires conversationId
|
|
196
|
+
↓
|
|
197
|
+
AvatarController → AnimationPlayer (synchronized playback)
|
|
198
|
+
↓
|
|
199
|
+
FLAME parameters → AvatarCore.computeFrameFlatFromParams() → Splat data
|
|
200
|
+
↓
|
|
201
|
+
AvatarController (playback loop) → AvatarView.renderRealtimeFrame()
|
|
202
|
+
↓
|
|
203
|
+
RenderSystem → WebGPU/WebGL → Canvas rendering
|
|
204
|
+
```
|
|
205
|
+
|
|
206
|
+
**Note:**
|
|
207
|
+
- In SDK mode, users provide audio data, SDK handles network communication and animation data reception
|
|
208
|
+
- In Host mode, users provide both audio and animation data, SDK handles synchronized playback only
|
|
209
|
+
|
|
210
|
+
### Audio Format Requirements
|
|
211
|
+
|
|
212
|
+
**⚠️ Important:** The SDK requires audio data to be in **16kHz mono PCM16** format:
|
|
213
|
+
|
|
214
|
+
- **Sample Rate**: 16kHz (16000 Hz) - This is a backend requirement
|
|
215
|
+
- **Channels**: Mono (single channel)
|
|
216
|
+
- **Format**: PCM16 (16-bit signed integer, little-endian)
|
|
217
|
+
- **Byte Order**: Little-endian
|
|
218
|
+
|
|
219
|
+
**Audio Data Format:**
|
|
220
|
+
- Each sample is 2 bytes (16-bit)
|
|
221
|
+
- Audio data should be provided as `ArrayBuffer` or `Uint8Array`
|
|
222
|
+
- For example: 1 second of audio = 16000 samples × 2 bytes = 32000 bytes
|
|
95
223
|
|
|
96
|
-
|
|
224
|
+
**Resampling:**
|
|
225
|
+
- If your audio source is at a different sample rate (e.g., 24kHz, 48kHz), you must resample it to 16kHz before sending to the SDK
|
|
226
|
+
- For high-quality resampling, we recommend using Web Audio API's `OfflineAudioContext` with anti-aliasing filtering
|
|
227
|
+
- See example projects for resampling implementation
|
|
228
|
+
|
|
229
|
+
## 📚 API Reference
|
|
97
230
|
|
|
98
231
|
### AvatarKit
|
|
99
232
|
|
|
100
|
-
SDK
|
|
233
|
+
The core management class of the SDK, responsible for initialization and global configuration.
|
|
101
234
|
|
|
102
235
|
```typescript
|
|
103
|
-
//
|
|
236
|
+
// Initialize SDK
|
|
104
237
|
await AvatarKit.initialize(appId: string, configuration: Configuration)
|
|
105
238
|
|
|
106
|
-
//
|
|
239
|
+
// Check initialization status
|
|
107
240
|
const isInitialized = AvatarKit.isInitialized
|
|
108
241
|
|
|
109
|
-
//
|
|
242
|
+
// Get initialized app ID
|
|
243
|
+
const appId = AvatarKit.appId
|
|
244
|
+
|
|
245
|
+
// Get configuration
|
|
246
|
+
const config = AvatarKit.configuration
|
|
247
|
+
|
|
248
|
+
// Set sessionToken (if needed, call separately)
|
|
249
|
+
AvatarKit.setSessionToken('your-session-token')
|
|
250
|
+
|
|
251
|
+
// Set userId (optional, for telemetry)
|
|
252
|
+
AvatarKit.setUserId('user-id')
|
|
253
|
+
|
|
254
|
+
// Get sessionToken
|
|
255
|
+
const sessionToken = AvatarKit.sessionToken
|
|
256
|
+
|
|
257
|
+
// Get userId
|
|
258
|
+
const userId = AvatarKit.userId
|
|
259
|
+
|
|
260
|
+
// Get SDK version
|
|
261
|
+
const version = AvatarKit.version
|
|
262
|
+
|
|
263
|
+
// Cleanup resources (must be called when no longer in use)
|
|
110
264
|
AvatarKit.cleanup()
|
|
111
265
|
```
|
|
112
266
|
|
|
113
267
|
### AvatarManager
|
|
114
268
|
|
|
115
|
-
|
|
269
|
+
Character resource manager, responsible for downloading, caching, and loading character data. Use the singleton instance via `AvatarManager.shared`.
|
|
116
270
|
|
|
117
271
|
```typescript
|
|
118
|
-
|
|
272
|
+
// Get singleton instance
|
|
273
|
+
const manager = AvatarManager.shared
|
|
119
274
|
|
|
120
|
-
//
|
|
275
|
+
// Load character
|
|
121
276
|
const avatar = await manager.load(
|
|
122
277
|
characterId: string,
|
|
123
278
|
onProgress?: (progress: LoadProgressInfo) => void
|
|
124
279
|
)
|
|
125
280
|
|
|
126
|
-
//
|
|
281
|
+
// Clear cache
|
|
127
282
|
manager.clearCache()
|
|
128
283
|
```
|
|
129
284
|
|
|
130
285
|
### AvatarView
|
|
131
286
|
|
|
132
|
-
3D
|
|
287
|
+
3D rendering view (rendering layer), responsible for 3D rendering only. Internally automatically creates and manages `AvatarController`.
|
|
133
288
|
|
|
134
|
-
|
|
289
|
+
**Playback Mode Configuration:**
|
|
290
|
+
- The playback mode is fixed when creating `AvatarView` and persists throughout its lifecycle
|
|
291
|
+
- Cannot be changed after creation
|
|
135
292
|
|
|
136
293
|
```typescript
|
|
137
|
-
|
|
138
|
-
const avatarView = new AvatarView(avatar: Avatar, container?: HTMLElement)
|
|
294
|
+
import { AvatarPlaybackMode } from '@spatialwalk/avatarkit'
|
|
139
295
|
|
|
140
|
-
//
|
|
141
|
-
|
|
296
|
+
// Create view (Canvas is automatically added to container)
|
|
297
|
+
// Create view (playback mode is determined by drivingServiceMode in AvatarKit configuration)
|
|
298
|
+
const container = document.getElementById('avatar-container')
|
|
299
|
+
const avatarView = new AvatarView(avatar, container)
|
|
142
300
|
|
|
143
|
-
//
|
|
144
|
-
avatarView.
|
|
145
|
-
avatarView.setBackgroundOpaque(true)
|
|
301
|
+
// Get playback mode
|
|
302
|
+
const mode = avatarView.playbackMode // 'network' | 'external'
|
|
146
303
|
|
|
147
|
-
//
|
|
148
|
-
avatarView.
|
|
304
|
+
// Wait for first frame to render
|
|
305
|
+
await avatarView.ready // Promise that resolves when the first frame is rendered
|
|
149
306
|
|
|
150
|
-
//
|
|
307
|
+
// Cleanup resources (must be called before switching characters)
|
|
151
308
|
avatarView.dispose()
|
|
152
309
|
```
|
|
153
310
|
|
|
154
|
-
|
|
311
|
+
**Character Switching Example:**
|
|
155
312
|
|
|
156
313
|
```typescript
|
|
157
|
-
//
|
|
314
|
+
// To switch characters, simply dispose the old view and create a new one
|
|
158
315
|
if (currentAvatarView) {
|
|
159
316
|
currentAvatarView.dispose()
|
|
160
|
-
currentAvatarView = null
|
|
161
317
|
}
|
|
162
318
|
|
|
163
|
-
//
|
|
319
|
+
// Load new character
|
|
164
320
|
const newAvatar = await avatarManager.load('new-character-id')
|
|
165
321
|
|
|
166
|
-
//
|
|
322
|
+
// Create new AvatarView
|
|
167
323
|
currentAvatarView = new AvatarView(newAvatar, container)
|
|
168
|
-
|
|
324
|
+
|
|
325
|
+
// SDK mode: start connection
|
|
326
|
+
if (currentAvatarView.playbackMode === AvatarPlaybackMode.network) {
|
|
327
|
+
await currentAvatarView.controller.start()
|
|
328
|
+
}
|
|
169
329
|
```
|
|
170
330
|
|
|
171
331
|
### AvatarController
|
|
172
332
|
|
|
173
|
-
|
|
333
|
+
Audio/animation playback controller (playback layer), manages synchronized playback of audio and animation. Automatically handles WebSocket communication in SDK mode.
|
|
334
|
+
|
|
335
|
+
**Two Usage Patterns:**
|
|
336
|
+
|
|
337
|
+
#### SDK Mode Methods
|
|
174
338
|
|
|
175
339
|
```typescript
|
|
176
|
-
//
|
|
340
|
+
// Start WebSocket service
|
|
177
341
|
await avatarView.avatarController.start()
|
|
178
342
|
|
|
179
|
-
//
|
|
180
|
-
avatarView.avatarController.send(audioData: ArrayBuffer, end: boolean)
|
|
181
|
-
//
|
|
182
|
-
//
|
|
183
|
-
//
|
|
343
|
+
// Send audio data
|
|
344
|
+
const conversationId = avatarView.avatarController.send(audioData: ArrayBuffer, end: boolean)
|
|
345
|
+
// Returns: conversationId - Conversation ID for this conversation session (used to distinguish each conversation round)
|
|
346
|
+
// audioData: Audio data (ArrayBuffer format, must be 16kHz mono PCM16)
|
|
347
|
+
// - Sample rate: 16kHz (16000 Hz) - backend requirement
|
|
348
|
+
// - Format: PCM16 (16-bit signed integer, little-endian)
|
|
349
|
+
// - Channels: Mono (single channel)
|
|
350
|
+
// - Example: 1 second = 16000 samples × 2 bytes = 32000 bytes
|
|
351
|
+
// end: false (default) - Continue sending audio data for current conversation
|
|
352
|
+
// end: true - Mark the end of current conversation round. After end=true, sending new audio data will interrupt any ongoing playback from the previous conversation round
|
|
353
|
+
|
|
354
|
+
// Close WebSocket service
|
|
355
|
+
avatarView.avatarController.close()
|
|
356
|
+
```
|
|
357
|
+
|
|
358
|
+
#### Host Mode Methods
|
|
359
|
+
|
|
360
|
+
```typescript
|
|
361
|
+
// Playback existing audio and animation data (starts a new conversation)
|
|
362
|
+
const conversationId = await avatarView.avatarController.playback(
|
|
363
|
+
initialAudioChunks?: Array<{ data: Uint8Array, isLast: boolean }>, // Existing audio chunks (16kHz mono PCM16)
|
|
364
|
+
initialKeyframes?: any[] // Existing animation keyframes (obtained from your service)
|
|
365
|
+
)
|
|
366
|
+
// Returns: conversationId - New conversation ID for this conversation session
|
|
367
|
+
|
|
368
|
+
// Stream audio chunks (can be called directly to start a new session, or after playback() to add more data)
|
|
369
|
+
const conversationId = avatarView.avatarController.yieldAudioData(
|
|
370
|
+
data: Uint8Array, // Audio chunk data
|
|
371
|
+
isLast: boolean = false // Whether this is the last chunk
|
|
372
|
+
)
|
|
373
|
+
// Returns: conversationId - Conversation ID for this audio session
|
|
374
|
+
// Note: If no conversationId exists, a new one will be automatically generated
|
|
375
|
+
|
|
376
|
+
// Stream animation keyframes (requires conversationId from audio data)
|
|
377
|
+
avatarView.avatarController.yieldFramesData(
|
|
378
|
+
keyframes: any[], // Animation keyframes (obtained from your service)
|
|
379
|
+
conversationId: string // Conversation ID (required). Use getCurrentConversationId() or yieldAudioData() to get conversationId.
|
|
380
|
+
)
|
|
381
|
+
```
|
|
382
|
+
|
|
383
|
+
**⚠️ Important: Conversation ID (conversationId) Management**
|
|
384
|
+
|
|
385
|
+
**SDK Mode:**
|
|
386
|
+
- `send()` returns a conversationId to distinguish each conversation round
|
|
387
|
+
- `end=true` marks the end of a conversation round. After `end=true`, sending new audio data will interrupt any ongoing playback from the previous conversation round
|
|
388
|
+
|
|
389
|
+
**Host Mode:**
|
|
390
|
+
For each conversation session, you **must**:
|
|
391
|
+
1. **First send audio data** to get a conversationId (used to distinguish each conversation round):
|
|
392
|
+
- `playback()` returns a conversationId when playback existing audio and animation data (replay mode)
|
|
393
|
+
- `yieldAudioData()` returns a conversationId for streaming new audio data
|
|
394
|
+
2. **Then use that conversationId** to send animation data:
|
|
395
|
+
- `yieldFramesData()` requires a valid conversationId parameter
|
|
396
|
+
- Animation data with mismatched conversationId will be **discarded**
|
|
397
|
+
- Use `getCurrentConversationId()` to retrieve the current active conversationId
|
|
398
|
+
|
|
399
|
+
**Example Flow (Host Mode):**
|
|
400
|
+
```typescript
|
|
401
|
+
// Option A: Playback existing complete data (replay mode)
|
|
402
|
+
const conversationId = await avatarView.avatarController.playback(initialAudioChunks, initialKeyframes)
|
|
403
|
+
|
|
404
|
+
// Option B: Start streaming new data directly
|
|
405
|
+
// Step 1: Send audio data first to get conversationId (automatically generates if starting new session)
|
|
406
|
+
const conversationId = avatarView.avatarController.yieldAudioData(audioChunk, false)
|
|
407
|
+
// Step 2: Use the conversationId to send animation data
|
|
408
|
+
avatarView.avatarController.yieldFramesData(keyframes, conversationId)
|
|
409
|
+
// Note: To start playback with Option B, call playback() with accumulated data or ensure enough audio is sent
|
|
410
|
+
```
|
|
411
|
+
|
|
412
|
+
**Why conversationId is required:**
|
|
413
|
+
- Ensures audio and animation data belong to the same conversation session
|
|
414
|
+
- Prevents data from different sessions from being mixed
|
|
415
|
+
- Automatically discards mismatched animation data for data integrity
|
|
416
|
+
|
|
417
|
+
#### Common Methods (Both Modes)
|
|
418
|
+
|
|
419
|
+
```typescript
|
|
420
|
+
// Pause playback (can be resumed later)
|
|
421
|
+
avatarView.avatarController.pause()
|
|
422
|
+
|
|
423
|
+
// Resume playback (from paused state)
|
|
424
|
+
await avatarView.avatarController.resume()
|
|
184
425
|
|
|
185
|
-
//
|
|
426
|
+
// Interrupt current playback (stops and clears data)
|
|
186
427
|
avatarView.avatarController.interrupt()
|
|
187
428
|
|
|
188
|
-
//
|
|
189
|
-
avatarView.avatarController.
|
|
429
|
+
// Clear all data and resources
|
|
430
|
+
avatarView.avatarController.clear()
|
|
431
|
+
|
|
432
|
+
// Get current conversation ID (for Host mode)
|
|
433
|
+
const conversationId = avatarView.avatarController.getCurrentConversationId()
|
|
434
|
+
// Returns: Current conversationId for the active audio session, or null if no active session
|
|
190
435
|
|
|
191
|
-
//
|
|
192
|
-
avatarView.avatarController.onConnectionState = (state: ConnectionState) => {}
|
|
436
|
+
// Set event callbacks
|
|
437
|
+
avatarView.avatarController.onConnectionState = (state: ConnectionState) => {} // SDK mode only
|
|
193
438
|
avatarView.avatarController.onAvatarState = (state: AvatarState) => {}
|
|
194
439
|
avatarView.avatarController.onError = (error: Error) => {}
|
|
195
|
-
|
|
196
|
-
// 注意:不支持 sendText() 方法,调用会抛出错误
|
|
197
440
|
```
|
|
198
441
|
|
|
199
|
-
|
|
442
|
+
**Important Notes:**
|
|
443
|
+
- `start()` and `close()` are only available in SDK mode
|
|
444
|
+
- `playback()`, `yieldAudioData()`, and `yieldFramesData()` are only available in Host mode
|
|
445
|
+
- `pause()`, `resume()`, `interrupt()`, `clear()`, and `getCurrentConversationId()` are available in both modes
|
|
446
|
+
- The playback mode is determined when creating `AvatarView` and cannot be changed
|
|
447
|
+
- **Conversation ID**: In Host mode, always send audio data first to obtain a conversationId, then use that conversationId when sending animation data. Animation data with mismatched conversationId will be discarded. Use `getCurrentConversationId()` to retrieve the current active conversationId.
|
|
448
|
+
|
|
449
|
+
## 🔧 Configuration
|
|
200
450
|
|
|
201
451
|
### Configuration
|
|
202
452
|
|
|
203
453
|
```typescript
|
|
204
454
|
interface Configuration {
|
|
205
455
|
environment: Environment
|
|
456
|
+
drivingServiceMode?: DrivingServiceMode // Optional, default is 'sdk' (SDK mode)
|
|
206
457
|
}
|
|
207
458
|
```
|
|
208
459
|
|
|
209
|
-
|
|
210
|
-
- `environment`:
|
|
211
|
-
- `
|
|
460
|
+
**Description:**
|
|
461
|
+
- `environment`: Specifies the environment (cn/us/test), SDK will automatically use the corresponding API address and WebSocket address based on the environment
|
|
462
|
+
- `drivingServiceMode`: Specifies the driving service mode
|
|
463
|
+
- `DrivingServiceMode.sdk` (default): SDK mode - SDK handles WebSocket communication automatically
|
|
464
|
+
- `DrivingServiceMode.host`: Host mode - Host application provides audio and animation data
|
|
465
|
+
- `sessionToken`: Set separately via `AvatarKit.setSessionToken()`, not in Configuration
|
|
212
466
|
|
|
467
|
+
```typescript
|
|
213
468
|
enum Environment {
|
|
214
|
-
cn = 'cn', //
|
|
215
|
-
us = 'us', //
|
|
216
|
-
test = 'test' //
|
|
469
|
+
cn = 'cn', // China region
|
|
470
|
+
us = 'us', // US region
|
|
471
|
+
test = 'test' // Test environment
|
|
472
|
+
}
|
|
473
|
+
```
|
|
474
|
+
|
|
475
|
+
### AvatarView Constructor
|
|
476
|
+
|
|
477
|
+
```typescript
|
|
478
|
+
constructor(avatar: Avatar, container: HTMLElement)
|
|
479
|
+
```
|
|
480
|
+
|
|
481
|
+
**Parameters:**
|
|
482
|
+
- `avatar`: Avatar 实例
|
|
483
|
+
- `container`: Canvas 容器元素(必选)
|
|
484
|
+
- Canvas 自动使用容器的完整尺寸(宽度和高度)
|
|
485
|
+
- Canvas 宽高比适应容器尺寸 - 设置容器尺寸以控制宽高比
|
|
486
|
+
- Canvas 会自动添加到容器中
|
|
487
|
+
|
|
488
|
+
**Note:** 播放模式由 `AvatarKit.initialize()` 配置中的 `drivingServiceMode` 决定,而不是在构造函数参数中
|
|
489
|
+
- SDK automatically handles resize events via ResizeObserver
|
|
490
|
+
|
|
491
|
+
```typescript
|
|
492
|
+
enum AvatarPlaybackMode {
|
|
493
|
+
network = 'network', // SDK mode: SDK handles WebSocket communication
|
|
494
|
+
external = 'external' // Host mode: Host provides data, SDK handles playback
|
|
217
495
|
}
|
|
218
496
|
```
|
|
219
497
|
|
|
@@ -221,17 +499,17 @@ enum Environment {
|
|
|
221
499
|
|
|
222
500
|
```typescript
|
|
223
501
|
interface CameraConfig {
|
|
224
|
-
position: [number, number, number] //
|
|
225
|
-
target: [number, number, number] //
|
|
226
|
-
fov: number //
|
|
227
|
-
near: number //
|
|
228
|
-
far: number //
|
|
229
|
-
up?: [number, number, number] //
|
|
230
|
-
aspect?: number //
|
|
502
|
+
position: [number, number, number] // Camera position
|
|
503
|
+
target: [number, number, number] // Camera target
|
|
504
|
+
fov: number // Field of view angle
|
|
505
|
+
near: number // Near clipping plane
|
|
506
|
+
far: number // Far clipping plane
|
|
507
|
+
up?: [number, number, number] // Up direction
|
|
508
|
+
aspect?: number // Aspect ratio
|
|
231
509
|
}
|
|
232
510
|
```
|
|
233
511
|
|
|
234
|
-
## 📊
|
|
512
|
+
## 📊 State Management
|
|
235
513
|
|
|
236
514
|
### ConnectionState
|
|
237
515
|
|
|
@@ -248,77 +526,27 @@ enum ConnectionState {
|
|
|
248
526
|
|
|
249
527
|
```typescript
|
|
250
528
|
enum AvatarState {
|
|
251
|
-
idle = 'idle', //
|
|
252
|
-
active = 'active', //
|
|
253
|
-
playing = 'playing' //
|
|
529
|
+
idle = 'idle', // Idle state, showing breathing animation
|
|
530
|
+
active = 'active', // Active, waiting for playable content
|
|
531
|
+
playing = 'playing', // Playing
|
|
532
|
+
paused = 'paused' // Paused (can be resumed)
|
|
254
533
|
}
|
|
255
534
|
```
|
|
256
535
|
|
|
257
|
-
## 🎨
|
|
258
|
-
|
|
259
|
-
SDK 支持两种渲染后端:
|
|
260
|
-
|
|
261
|
-
- **WebGPU** - 现代浏览器的高性能渲染
|
|
262
|
-
- **WebGL** - 兼容性更好的传统渲染
|
|
263
|
-
|
|
264
|
-
渲染系统会自动选择最佳的后端,无需手动配置。
|
|
265
|
-
|
|
266
|
-
## 🔍 调试和监控
|
|
267
|
-
|
|
268
|
-
### 日志系统
|
|
269
|
-
|
|
270
|
-
SDK 内置了完整的日志系统,支持不同级别的日志输出:
|
|
271
|
-
|
|
272
|
-
```typescript
|
|
273
|
-
import { logger } from '@spatialwalk/avatarkit'
|
|
274
|
-
|
|
275
|
-
// 设置日志级别
|
|
276
|
-
logger.setLevel('verbose') // 'basic' | 'verbose'
|
|
277
|
-
|
|
278
|
-
// 手动日志输出
|
|
279
|
-
logger.log('Info message')
|
|
280
|
-
logger.warn('Warning message')
|
|
281
|
-
logger.error('Error message')
|
|
282
|
-
```
|
|
283
|
-
|
|
284
|
-
### 性能监控
|
|
285
|
-
|
|
286
|
-
SDK 提供了性能监控接口,可以监控渲染性能:
|
|
536
|
+
## 🎨 Rendering System
|
|
287
537
|
|
|
288
|
-
|
|
289
|
-
// 获取渲染性能统计
|
|
290
|
-
const stats = avatarView.getPerformanceStats()
|
|
291
|
-
|
|
292
|
-
if (stats) {
|
|
293
|
-
console.log(`渲染耗时: ${stats.renderTime.toFixed(2)}ms`)
|
|
294
|
-
console.log(`排序耗时: ${stats.sortTime.toFixed(2)}ms`)
|
|
295
|
-
console.log(`渲染后端: ${stats.backend}`)
|
|
296
|
-
|
|
297
|
-
// 计算帧率
|
|
298
|
-
const fps = 1000 / stats.renderTime
|
|
299
|
-
console.log(`帧率: ${fps.toFixed(2)} FPS`)
|
|
300
|
-
}
|
|
538
|
+
The SDK supports two rendering backends:
|
|
301
539
|
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
const stats = avatarView.getPerformanceStats()
|
|
305
|
-
if (stats) {
|
|
306
|
-
// 发送到监控服务或显示在 UI 上
|
|
307
|
-
console.log('Performance:', stats)
|
|
308
|
-
}
|
|
309
|
-
}, 1000)
|
|
310
|
-
```
|
|
540
|
+
- **WebGPU** - High-performance rendering for modern browsers
|
|
541
|
+
- **WebGL** - Better compatibility traditional rendering
|
|
311
542
|
|
|
312
|
-
|
|
313
|
-
- `renderTime`: 总渲染耗时(毫秒),包含排序和 GPU 渲染
|
|
314
|
-
- `sortTime`: 排序耗时(毫秒),使用 Radix Sort 算法对点云进行深度排序
|
|
315
|
-
- `backend`: 当前使用的渲染后端(`'webgpu'` | `'webgl'` | `null`)
|
|
543
|
+
The rendering system automatically selects the best backend, no manual configuration needed.
|
|
316
544
|
|
|
317
|
-
## 🚨
|
|
545
|
+
## 🚨 Error Handling
|
|
318
546
|
|
|
319
547
|
### SPAvatarError
|
|
320
548
|
|
|
321
|
-
SDK
|
|
549
|
+
The SDK uses custom error types, providing more detailed error information:
|
|
322
550
|
|
|
323
551
|
```typescript
|
|
324
552
|
import { SPAvatarError } from '@spatialwalk/avatarkit'
|
|
@@ -334,70 +562,147 @@ try {
|
|
|
334
562
|
}
|
|
335
563
|
```
|
|
336
564
|
|
|
337
|
-
###
|
|
565
|
+
### Error Callbacks
|
|
338
566
|
|
|
339
567
|
```typescript
|
|
340
568
|
avatarView.avatarController.onError = (error: Error) => {
|
|
341
569
|
console.error('AvatarController error:', error)
|
|
342
|
-
//
|
|
570
|
+
// Handle error, such as reconnection, user notification, etc.
|
|
343
571
|
}
|
|
344
572
|
```
|
|
345
573
|
|
|
346
|
-
## 🔄
|
|
574
|
+
## 🔄 Resource Management
|
|
347
575
|
|
|
348
|
-
###
|
|
576
|
+
### Lifecycle Management
|
|
577
|
+
|
|
578
|
+
#### SDK Mode Lifecycle
|
|
349
579
|
|
|
350
580
|
```typescript
|
|
351
|
-
//
|
|
581
|
+
// Initialize
|
|
582
|
+
const container = document.getElementById('avatar-container')
|
|
352
583
|
const avatarView = new AvatarView(avatar, container)
|
|
353
584
|
await avatarView.avatarController.start()
|
|
354
585
|
|
|
355
|
-
//
|
|
586
|
+
// Use
|
|
356
587
|
avatarView.avatarController.send(audioData, false)
|
|
357
588
|
|
|
358
|
-
//
|
|
359
|
-
avatarView.
|
|
589
|
+
// Cleanup
|
|
590
|
+
avatarView.avatarController.close()
|
|
591
|
+
avatarView.dispose() // Automatically cleans up all resources
|
|
592
|
+
```
|
|
593
|
+
|
|
594
|
+
#### Host Mode Lifecycle
|
|
595
|
+
|
|
596
|
+
```typescript
|
|
597
|
+
// Initialize
|
|
598
|
+
const container = document.getElementById('avatar-container')
|
|
599
|
+
const avatarView = new AvatarView(avatar, container)
|
|
600
|
+
|
|
601
|
+
// Use
|
|
602
|
+
const initialAudioChunks = [{ data: audioData1, isLast: false }]
|
|
603
|
+
// Step 1: Send audio first to get conversationId
|
|
604
|
+
const conversationId = await avatarView.avatarController.playback(initialAudioChunks, initialKeyframes)
|
|
605
|
+
// Step 2: Stream additional audio (returns conversationId)
|
|
606
|
+
const currentConversationId = avatarView.avatarController.yieldAudioData(audioChunk, false)
|
|
607
|
+
// Step 3: Use conversationId to send animation data (mismatched conversationId will be discarded)
|
|
608
|
+
avatarView.avatarController.yieldFramesData(keyframes, currentConversationId || conversationId)
|
|
609
|
+
|
|
610
|
+
// Cleanup
|
|
611
|
+
avatarView.avatarController.clear() // Clear all data and resources
|
|
612
|
+
avatarView.dispose() // Automatically cleans up all resources
|
|
360
613
|
```
|
|
361
614
|
|
|
362
|
-
**⚠️
|
|
363
|
-
-
|
|
364
|
-
-
|
|
365
|
-
-
|
|
615
|
+
**⚠️ Important Notes:**
|
|
616
|
+
- When disposing AvatarView instances, must call `dispose()` to properly clean up resources
|
|
617
|
+
- Not properly cleaning up may cause resource leaks and rendering errors
|
|
618
|
+
- In SDK mode, call `close()` before `dispose()` to properly close WebSocket connections
|
|
619
|
+
- In Host mode, call `clear()` before `dispose()` to clear all playback data
|
|
620
|
+
|
|
621
|
+
### Memory Optimization
|
|
622
|
+
|
|
623
|
+
- SDK automatically manages WASM memory allocation
|
|
624
|
+
- Supports dynamic loading/unloading of character and animation resources
|
|
625
|
+
- Provides memory usage monitoring interface
|
|
626
|
+
|
|
627
|
+
### Audio Data Sending
|
|
628
|
+
|
|
629
|
+
#### SDK Mode
|
|
630
|
+
|
|
631
|
+
The `send()` method receives audio data in `ArrayBuffer` format:
|
|
366
632
|
|
|
367
|
-
|
|
633
|
+
**Audio Format Requirements:**
|
|
634
|
+
- **Sample Rate**: 16kHz (16000 Hz) - **Backend requirement, must be exactly 16kHz**
|
|
635
|
+
- **Format**: PCM16 (16-bit signed integer, little-endian)
|
|
636
|
+
- **Channels**: Mono (single channel)
|
|
637
|
+
- **Data Size**: Each sample is 2 bytes, so 1 second of audio = 16000 samples × 2 bytes = 32000 bytes
|
|
368
638
|
|
|
369
|
-
|
|
370
|
-
-
|
|
371
|
-
-
|
|
639
|
+
**Usage:**
|
|
640
|
+
- `audioData`: Audio data (ArrayBuffer format, must be 16kHz mono PCM16)
|
|
641
|
+
- `end=false` (default) - Continue sending audio data for current conversation
|
|
642
|
+
- `end=true` - Mark the end of current conversation round. After `end=true`, sending new audio data will interrupt any ongoing playback from the previous conversation round
|
|
643
|
+
- **Important**: No need to wait for `end=true` to start playing, it will automatically start playing after accumulating enough audio data
|
|
372
644
|
|
|
373
|
-
|
|
645
|
+
#### Host Mode
|
|
646
|
+
|
|
647
|
+
The `playback()` method is used to playback existing audio and animation data (replay mode), generating a new conversationId and interrupting any existing conversation.
|
|
648
|
+
|
|
649
|
+
**Two ways to start a session in Host mode:**
|
|
650
|
+
1. **Use `playback()`** - For replaying existing complete audio and animation data
|
|
651
|
+
2. **Use `yieldAudioData()` directly** - For streaming new audio data (automatically generates conversationId if needed)
|
|
652
|
+
|
|
653
|
+
Then use `yieldAudioData()` to stream additional audio:
|
|
654
|
+
|
|
655
|
+
**Audio Format Requirements:**
|
|
656
|
+
- Same as SDK mode: 16kHz mono PCM16 format
|
|
657
|
+
- Audio data should be provided as `Uint8Array` in chunks with `isLast` flag
|
|
658
|
+
|
|
659
|
+
**Usage:**
|
|
660
|
+
```typescript
|
|
661
|
+
// Playback existing audio and animation data (starts a new conversation)
|
|
662
|
+
// Note: Audio and animation data should be obtained from your backend service
|
|
663
|
+
const initialAudioChunks = [
|
|
664
|
+
{ data: audioData1, isLast: false },
|
|
665
|
+
{ data: audioData2, isLast: false }
|
|
666
|
+
]
|
|
667
|
+
const conversationId = await avatarController.playback(initialAudioChunks, initialKeyframes)
|
|
668
|
+
// Returns: conversationId - New conversation ID for this conversation session
|
|
669
|
+
|
|
670
|
+
// Stream additional audio chunks
|
|
671
|
+
const conversationId = avatarController.yieldAudioData(audioChunk, isLast)
|
|
672
|
+
// Returns: conversationId - Conversation ID for this audio session
|
|
673
|
+
```
|
|
374
674
|
|
|
375
|
-
|
|
675
|
+
**⚠️ Conversation ID Workflow:**
|
|
676
|
+
1. **Start a session** → Choose one of two ways:
|
|
677
|
+
- **Option A**: Use `playback(initialAudioChunks, initialKeyframes)` to replay existing complete data
|
|
678
|
+
- **Option B**: Use `yieldAudioData(audioChunk)` directly to start streaming (automatically generates conversationId)
|
|
679
|
+
2. **Get conversationId** → Both methods return a conversationId
|
|
680
|
+
3. **Send animation with conversationId** → Use the conversationId from step 1 in `yieldFramesData()`
|
|
681
|
+
4. **Data matching** → Only animation data with matching conversationId will be accepted
|
|
376
682
|
|
|
377
|
-
|
|
378
|
-
-
|
|
379
|
-
- `
|
|
380
|
-
- `
|
|
381
|
-
- **重要**:不需要等待 `end=true` 才开始播放,积累到一定音频数据后就会自动开始播放
|
|
683
|
+
**Resampling (Both Modes):**
|
|
684
|
+
- If your audio source is at a different sample rate (e.g., 24kHz, 48kHz), you **must** resample it to 16kHz before sending
|
|
685
|
+
- For high-quality resampling, use Web Audio API's `OfflineAudioContext` with anti-aliasing filtering
|
|
686
|
+
- See example projects (`vanilla`, `react`, `vue`) for complete resampling implementation
|
|
382
687
|
|
|
383
|
-
## 🌐
|
|
688
|
+
## 🌐 Browser Compatibility
|
|
384
689
|
|
|
385
|
-
- **Chrome/Edge** 90+ (
|
|
690
|
+
- **Chrome/Edge** 90+ (WebGPU recommended)
|
|
386
691
|
- **Firefox** 90+ (WebGL)
|
|
387
692
|
- **Safari** 14+ (WebGL)
|
|
388
|
-
-
|
|
693
|
+
- **Mobile** iOS 14+, Android 8+
|
|
389
694
|
|
|
390
|
-
## 📝
|
|
695
|
+
## 📝 License
|
|
391
696
|
|
|
392
697
|
MIT License
|
|
393
698
|
|
|
394
|
-
## 🤝
|
|
699
|
+
## 🤝 Contributing
|
|
395
700
|
|
|
396
|
-
|
|
701
|
+
Issues and Pull Requests are welcome!
|
|
397
702
|
|
|
398
|
-
## 📞
|
|
703
|
+
## 📞 Support
|
|
399
704
|
|
|
400
|
-
|
|
401
|
-
-
|
|
402
|
-
-
|
|
403
|
-
- GitHub
|
|
705
|
+
For questions, please contact:
|
|
706
|
+
- Email: support@spavatar.com
|
|
707
|
+
- Documentation: https://docs.spatialreal.ai
|
|
708
|
+
- GitHub: https://github.com/spavatar/sdk
|