@spatialwalk/avatarkit 1.0.0-beta.27 → 1.0.0-beta.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/CHANGELOG.md +37 -4
  2. package/README.md +30 -31
  3. package/dist/StreamingAudioPlayer-C-_1X8K-.js +398 -0
  4. package/dist/animation/AnimationWebSocketClient.d.ts +0 -20
  5. package/dist/animation/utils/eventEmitter.d.ts +0 -3
  6. package/dist/animation/utils/flameConverter.d.ts +3 -10
  7. package/dist/audio/AnimationPlayer.d.ts +0 -46
  8. package/dist/audio/StreamingAudioPlayer.d.ts +0 -81
  9. package/dist/avatar_core_wasm-i0Ocpx6q.js +2693 -0
  10. package/dist/config/app-config.d.ts +1 -5
  11. package/dist/config/constants.d.ts +2 -10
  12. package/dist/config/sdk-config-loader.d.ts +2 -8
  13. package/dist/core/Avatar.d.ts +0 -6
  14. package/dist/core/AvatarController.d.ts +0 -111
  15. package/dist/core/AvatarDownloader.d.ts +0 -75
  16. package/dist/core/AvatarManager.d.ts +6 -13
  17. package/dist/core/AvatarSDK.d.ts +21 -0
  18. package/dist/core/AvatarView.d.ts +4 -103
  19. package/dist/core/NetworkLayer.d.ts +0 -6
  20. package/dist/generated/driveningress/v1/driveningress.d.ts +1 -11
  21. package/dist/generated/driveningress/v2/driveningress.d.ts +0 -2
  22. package/dist/generated/google/protobuf/struct.d.ts +5 -38
  23. package/dist/generated/google/protobuf/timestamp.d.ts +1 -102
  24. package/dist/index-BpVIIm3g.js +7921 -0
  25. package/dist/index.d.ts +1 -4
  26. package/dist/index.js +17 -17
  27. package/dist/renderer/RenderSystem.d.ts +0 -8
  28. package/dist/renderer/covariance.d.ts +0 -11
  29. package/dist/renderer/sortSplats.d.ts +0 -10
  30. package/dist/renderer/webgl/reorderData.d.ts +0 -12
  31. package/dist/renderer/webgl/webglRenderer.d.ts +3 -39
  32. package/dist/renderer/webgpu/webgpuRenderer.d.ts +3 -27
  33. package/dist/types/character-settings.d.ts +0 -4
  34. package/dist/types/character.d.ts +3 -9
  35. package/dist/types/index.d.ts +14 -21
  36. package/dist/utils/animation-interpolation.d.ts +3 -12
  37. package/dist/utils/client-id.d.ts +0 -5
  38. package/dist/utils/cls-tracker.d.ts +5 -26
  39. package/dist/utils/conversationId.d.ts +0 -18
  40. package/dist/utils/error-utils.d.ts +1 -24
  41. package/dist/utils/heartbeat-manager.d.ts +0 -26
  42. package/dist/utils/id-manager.d.ts +0 -23
  43. package/dist/utils/logger.d.ts +1 -4
  44. package/dist/utils/usage-tracker.d.ts +2 -17
  45. package/dist/wasm/avatarCoreAdapter.d.ts +0 -134
  46. package/dist/wasm/avatarCoreMemory.d.ts +0 -52
  47. package/package.json +1 -1
  48. package/dist/StreamingAudioPlayer-C6v9Ed55.js +0 -352
  49. package/dist/avatar_core_wasm-BPIbbUx_.js +0 -1663
  50. package/dist/core/AvatarKit.d.ts +0 -48
  51. package/dist/index-s9KqPWVW.js +0 -6770
@@ -1,9 +1,3 @@
1
- /**
2
- * Avatar Core WASM 适配器
3
- *
4
- * 完全替换 FlameComplete3DGSManager,提供 avatar_core_wasm 的完整功能
5
- * 包括牙齿细分、眼部追踪等高级特性
6
- */
7
1
  export interface WasmModuleConfig {
8
2
  baseUrl?: string;
9
3
  [key: string]: unknown;
@@ -65,172 +59,44 @@ export declare class AvatarCoreAdapter {
65
59
  private flameInfo?;
66
60
  private characterInfo?;
67
61
  constructor(options?: AvatarCoreOptions);
68
- /**
69
- * 加载 WASM 模块并设置 API
70
- * 每次都创建全新的 WASM 实例,确保 C++ 内存是干净的
71
- *
72
- * 注意:这里使用动态 import() 导入 WASM 模块。
73
- * Vite 在构建时会自动为 WASM 文件和 JS glue 代码添加 hash(如 avatar_core_wasm-CxWuw7eS.wasm),
74
- * 确保浏览器缓存与版本一致,不会有缓存问题。
75
- *
76
- * Hash 的作用机制:
77
- * - WASM 文件内容变化 → hash 自动变化 → URL 变化 → 浏览器拉取新版本
78
- * - WASM 文件内容不变 → hash 保持不变 → URL 不变 → 浏览器使用缓存
79
- * - Emscripten 生成的 JS 内部会使用 hard-coded 的 hash 路径加载 .wasm 文件
80
- */
81
62
  loadWASMModule(): Promise<void>;
82
- /**
83
- * 验证 WASM 模块功能
84
- */
85
63
  private validateWASMModule;
86
- /**
87
- * 初始化内存视图
88
- */
89
64
  private initializeMemoryViews;
90
- /**
91
- * 设置 C API 函数包装
92
- */
93
65
  private setupCAPIFunctions;
94
- /**
95
- * 读取当前动画帧的 FlameParams(用于过渡或调试)
96
- */
97
- /**
98
- * Get current frame parameters
99
- * @param frameIndex Frame index
100
- * @param characterId Optional character ID for multi-character support
101
- */
102
66
  getCurrentFrameParams(frameIndex?: number, characterId?: string): Promise<FlameParams>;
103
- /**
104
- * 初始化 Avatar Core 核心
105
- */
106
67
  private initializeAvatarCore;
107
- /**
108
- * 加载模板资源(从预加载的 ArrayBuffers)
109
- */
110
68
  loadTemplateResourcesFromBuffers(templateResources: Record<string, ArrayBuffer>): Promise<boolean>;
111
- /**
112
- * 加载角色数据(从预加载的 ArrayBuffers)
113
- */
114
- /**
115
- * Load character from buffers and return handle
116
- * @param shapeBuffer Shape data buffer
117
- * @param pointCloudBuffer Point cloud data buffer
118
- * @param characterId Optional character ID for multi-character support
119
- * @returns Character handle
120
- */
121
69
  loadCharacterFromBuffers(shapeBuffer: ArrayBuffer, pointCloudBuffer: ArrayBuffer, characterId?: string): Promise<number>;
122
70
  private loadAnimationFromData;
123
- /**
124
- * Load animation from ArrayBuffer (for CDN-based dynamic loading)
125
- * @param animData Animation data buffer
126
- * @param characterId Optional character ID for multi-character support
127
- * @returns Animation handle
128
- */
129
71
  loadAnimationFromBuffer(animData: ArrayBuffer, characterId?: string): Promise<number>;
130
72
  switchAnimationFile(animationFile: string): Promise<number>;
131
- /**
132
- * 获取动画总帧数
133
- * @param animationHandle Optional animation handle (for multi-character support)
134
- */
135
73
  getAnimationTotalFrames(animationHandle?: number): Promise<number>;
136
- /**
137
- * 从动画中获取指定帧的参数
138
- * @param frameIndex Frame index
139
- * @param characterId Optional character ID for multi-character support
140
- */
141
74
  getAnimationFrameParams(frameIndex?: number, characterId?: string): Promise<number>;
142
- /**
143
- * 设置眼部追踪配置(对齐 app 实现)
144
- */
145
75
  setEyeTrackingConfig(config: {
146
76
  enabled: boolean;
147
77
  auto_eyelid_adjustment?: boolean;
148
78
  eyelid_threshold?: number;
149
79
  }): Promise<boolean>;
150
- /**
151
- * 设置眼部追踪目标(高级功能)
152
- */
153
80
  setGazeTarget(x: number, y: number, z: number): Promise<boolean>;
154
- /**
155
- * 重置眼部追踪
156
- */
157
81
  resetEyeTracking(): Promise<boolean>;
158
- /**
159
- * 查询 FLAME 模型信息
160
- */
161
82
  private queryFlameInfo;
162
- /**
163
- * 查询角色信息
164
- */
165
83
  private queryCharacterInfo;
166
- /**
167
- * 检查错误码并抛出异常
168
- */
169
84
  private checkError;
170
- /**
171
- * 更新进度回调
172
- */
173
85
  private updateProgress;
174
- /**
175
- * 获取性能指标
176
- */
177
86
  getPerformanceMetrics(): PerformanceMetrics & {
178
87
  flameInfo?: FlameInfo;
179
88
  characterInfo?: CharacterInfo;
180
89
  version?: string;
181
90
  };
182
- /**
183
- * 释放当前角色和动画资源(但保留 core)
184
- */
185
- /**
186
- * Remove a specific character by handle
187
- * @param characterHandle Character handle to remove
188
- * @param characterId Optional character ID for cleanup
189
- */
190
91
  removeCharacter(characterHandle: number, characterId?: string): void;
191
- /**
192
- * Release current character (legacy method, kept for backward compatibility)
193
- * @deprecated Use removeCharacter() instead for multi-character support
194
- */
195
92
  releaseCurrentCharacter(): void;
196
- /**
197
- * 释放所有资源(包括 core)
198
- */
199
93
  release(): void;
200
- /**
201
- * 兼容性接口:提供与 FlameComplete3DGSManager 相同的接口
202
- */
203
94
  loadFlameModel(_modelData: any): Promise<boolean>;
204
95
  load3DGSData(_original3DGSPoints: any, _binding: any, _flameFaces: any): Promise<boolean>;
205
- /**
206
- * 计算帧并返回 GPU 友好的 flat 格式(零拷贝,协方差预计算)
207
- * 🚀 性能优化版本:
208
- * - C++ 预计算协方差矩阵
209
- * - 零拷贝直接访问 WASM 内存http://localhost:3000/us/
210
- * - 直接输出 GPU 格式 [pos3, color4, cov6]
211
- */
212
- /**
213
- * Compute complete frame from animation
214
- * @param params Frame parameters
215
- * @param characterHandle Optional character handle (for multi-character support)
216
- */
217
96
  computeCompleteFrameFlat(params?: {
218
97
  frameIndex?: number;
219
98
  }, characterHandle?: number): Promise<Float32Array | null>;
220
- /**
221
- * 计算帧并返回 GPU 友好的 flat 格式(从 FLAME 参数)
222
- * 🔑 用于 Realtime: 接受自定义 FLAME 参数并计算 Splat
223
- */
224
- /**
225
- * Compute frame from Flame parameters
226
- * @param flameParams Flame parameters
227
- * @param characterHandle Optional character handle (for multi-character support)
228
- */
229
99
  computeFrameFlatFromParams(flameParams: FlameParams, characterHandle?: number): Promise<Float32Array | null>;
230
- /**
231
- * 获取 WASM 内存使用情况(MB)
232
- * @returns WASM 内存使用量(MB),如果模块未加载则返回 null
233
- */
234
100
  getWASMMemoryMB(): number | null;
235
101
  }
236
102
  export {};
@@ -1,9 +1,3 @@
1
- /**
2
- * Avatar Core WASM 内存管理工具
3
- *
4
- * 提供高效的内存分配、数据传递和零拷贝访问功能
5
- * 专门为 avatar_core_wasm 优化
6
- */
7
1
  interface WASMModule {
8
2
  _malloc: (size: number) => number;
9
3
  _free: (ptr: number) => void;
@@ -47,26 +41,13 @@ export declare class AvatarCoreMemoryManager {
47
41
  private allocatedPointers;
48
42
  private structPointers;
49
43
  constructor(wasmModule: WASMModule);
50
- /**
51
- * 分配 WASM 内存并复制数据 - 纯 API 方式
52
- */
53
44
  allocateAndCopy(arrayBuffer: ArrayBuffer): MemoryAllocation;
54
- /**
55
- * 创建 AvatarTemplateConfig 结构体
56
- */
57
45
  createTemplateConfig(resources: TemplateResources): number;
58
- /**
59
- * 创建 AvatarCharacterData 结构体 - 新版 Emscripten 方式
60
- */
61
46
  createCharacterData(shapeBuffer: ArrayBuffer, plyBuffer: ArrayBuffer, characterId?: string): {
62
47
  dataPtr: number;
63
48
  shapePtr: number;
64
49
  plyPtr: number;
65
50
  };
66
- /**
67
- * 读取 AvatarFlameParams 结构体数据
68
- * Used to extract current animation frame parameters from WASM memory
69
- */
70
51
  readFlameParams(paramsPtr: number): {
71
52
  shape_params: number[];
72
53
  expr_params: number[];
@@ -78,9 +59,6 @@ export declare class AvatarCoreMemoryManager {
78
59
  eyelid: number[];
79
60
  has_eyelid: boolean;
80
61
  };
81
- /**
82
- * 创建 AvatarFlameParams 结构体 - 新版 Emscripten 方式
83
- */
84
62
  createFlameParams(params: {
85
63
  shape_params?: number[];
86
64
  expr_params?: number[];
@@ -92,25 +70,7 @@ export declare class AvatarCoreMemoryManager {
92
70
  eyelid?: number[];
93
71
  has_eyelid?: boolean;
94
72
  }): number;
95
- /**
96
- * 读取 AvatarSplatPointFlatArray 结构体数据(预计算协方差)
97
- *
98
- * 结构体布局:
99
- * - AvatarSplatPointFlat* points (0-3, 32位指针)
100
- * - uint32_t point_count (4-7)
101
- * - float compute_time_ms (8-11)
102
- *
103
- * 每个点布局 (52 bytes):
104
- * - position[3] (12 bytes)
105
- * - color[4] (16 bytes, RGBA)
106
- * - covariance[6] (24 bytes, 预计算好的协方差矩阵上三角)
107
- *
108
- * ⚠️ 使用 getValue 逐个读取,避免动态内存的 HEAPF32 detachment 问题
109
- */
110
73
  readSplatPointFlatArray(arrayPtr: number): Float32Array | null;
111
- /**
112
- * 读取AvatarMeshData结构体数据
113
- */
114
74
  readMeshData(outputPtr: number): {
115
75
  vertices: Float32Array;
116
76
  vertexCount: number;
@@ -120,21 +80,9 @@ export declare class AvatarCoreMemoryManager {
120
80
  landmarkCount: number;
121
81
  computeTime: number;
122
82
  };
123
- /**
124
- * 释放指定指针的内存
125
- */
126
83
  free(ptr: number): void;
127
- /**
128
- * 释放结构体内存
129
- */
130
84
  freeStruct(name: string): void;
131
- /**
132
- * 清理所有分配的内存
133
- */
134
85
  cleanup(): void;
135
- /**
136
- * 获取内存使用统计
137
- */
138
86
  getMemoryStats(): {
139
87
  allocatedPointers: number;
140
88
  structPointers: number;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@spatialwalk/avatarkit",
3
3
  "type": "module",
4
- "version": "1.0.0-beta.27",
4
+ "version": "1.0.0-beta.29",
5
5
  "packageManager": "pnpm@10.18.2",
6
6
  "description": "SPAvatar SDK - 3D Gaussian Splatting Avatar Rendering SDK",
7
7
  "author": "SPAvatar Team",
@@ -1,352 +0,0 @@
1
- var C = Object.defineProperty;
2
- var g = (h, t, e) => t in h ? C(h, t, { enumerable: !0, configurable: !0, writable: !0, value: e }) : h[t] = e;
3
- var s = (h, t, e) => g(h, typeof t != "symbol" ? t + "" : t, e);
4
- import { A as m, e as f, l as c, a as n } from "./index-s9KqPWVW.js";
5
- class y {
6
- constructor(t) {
7
- // AudioContext is managed internally
8
- s(this, "audioContext", null);
9
- s(this, "sampleRate");
10
- s(this, "channelCount");
11
- s(this, "debug");
12
- // Session-level state
13
- s(this, "sessionId");
14
- s(this, "sessionStartTime", 0);
15
- // AudioContext time when session started
16
- s(this, "pausedTimeOffset", 0);
17
- // Accumulated paused time
18
- s(this, "pausedAt", 0);
19
- // Time when paused
20
- s(this, "pausedAudioContextTime", 0);
21
- // audioContext.currentTime when paused (for resume calculation)
22
- s(this, "scheduledTime", 0);
23
- // Next chunk schedule time in AudioContext time
24
- // Playback state
25
- s(this, "isPlaying", !1);
26
- s(this, "isPaused", !1);
27
- s(this, "autoStartEnabled", !0);
28
- // Control whether to auto-start when buffer is ready
29
- // Audio buffer queue
30
- s(this, "audioChunks", []);
31
- s(this, "scheduledChunks", 0);
32
- // Number of chunks already scheduled
33
- s(this, "activeSources", /* @__PURE__ */ new Set());
34
- // Volume control
35
- s(this, "gainNode", null);
36
- s(this, "volume", 1);
37
- // Default volume 1.0 (0.0 - 1.0)
38
- // Event callbacks
39
- s(this, "onEndedCallback");
40
- this.sessionId = `session_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, this.sampleRate = (t == null ? void 0 : t.sampleRate) ?? m.audio.sampleRate, this.channelCount = (t == null ? void 0 : t.channelCount) ?? 1, this.debug = (t == null ? void 0 : t.debug) ?? !1;
41
- }
42
- /**
43
- * Initialize audio context (create and ensure it's ready)
44
- */
45
- async initialize() {
46
- if (!this.audioContext)
47
- try {
48
- this.audioContext = new AudioContext({
49
- sampleRate: this.sampleRate
50
- }), this.gainNode = this.audioContext.createGain(), this.gainNode.gain.value = this.volume, this.gainNode.connect(this.audioContext.destination), this.audioContext.state === "suspended" && await this.audioContext.resume(), this.log("AudioContext initialized", {
51
- sessionId: this.sessionId,
52
- sampleRate: this.audioContext.sampleRate,
53
- state: this.audioContext.state
54
- });
55
- } catch (t) {
56
- const e = f(t);
57
- throw c("activeAudioSessionFailed", "warning", {
58
- sessionId: this.sessionId,
59
- reason: e
60
- }), n.error("Failed to initialize AudioContext:", e), t instanceof Error ? t : new Error(e);
61
- }
62
- }
63
- /**
64
- * Add audio chunk (16-bit PCM)
65
- */
66
- addChunk(t, e = !1) {
67
- if (!this.audioContext) {
68
- n.error("AudioContext not initialized");
69
- return;
70
- }
71
- this.audioChunks.push({ data: t, isLast: e }), this.log(`Added chunk ${this.audioChunks.length}`, {
72
- size: t.length,
73
- totalChunks: this.audioChunks.length,
74
- isLast: e,
75
- isPlaying: this.isPlaying,
76
- scheduledChunks: this.scheduledChunks
77
- }), !this.isPlaying && this.autoStartEnabled && this.audioChunks.length > 0 ? (this.log("[StreamingAudioPlayer] Auto-starting playback from addChunk"), this.startPlayback()) : this.isPlaying && !this.isPaused ? (this.log("[StreamingAudioPlayer] Already playing, scheduling next chunk"), this.scheduleNextChunk()) : this.log("[StreamingAudioPlayer] Not playing and no chunks, waiting for more chunks");
78
- }
79
- /**
80
- * Start new session (stop current and start fresh)
81
- */
82
- async startNewSession(t) {
83
- this.stop(), this.sessionId = `session_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`, this.audioChunks = [], this.scheduledChunks = 0, this.pausedTimeOffset = 0, this.pausedAt = 0, this.pausedAudioContextTime = 0, this.log("Starting new session", {
84
- chunks: t.length
85
- });
86
- for (const e of t)
87
- this.addChunk(e.data, e.isLast);
88
- }
89
- /**
90
- * Start playback
91
- */
92
- startPlayback() {
93
- if (!this.audioContext) {
94
- this.log("[StreamingAudioPlayer] Cannot start playback: AudioContext not initialized");
95
- return;
96
- }
97
- if (this.isPlaying) {
98
- this.log("[StreamingAudioPlayer] Cannot start playback: Already playing");
99
- return;
100
- }
101
- this.isPlaying = !0, this.sessionStartTime = this.audioContext.currentTime, this.scheduledTime = this.sessionStartTime, this.log("[StreamingAudioPlayer] Starting playback", {
102
- sessionStartTime: this.sessionStartTime,
103
- bufferedChunks: this.audioChunks.length,
104
- scheduledChunks: this.scheduledChunks,
105
- activeSources: this.activeSources.size
106
- }), this.scheduleAllChunks();
107
- }
108
- /**
109
- * Schedule all pending chunks
110
- */
111
- scheduleAllChunks() {
112
- for (; this.scheduledChunks < this.audioChunks.length; )
113
- this.scheduleNextChunk();
114
- }
115
- /**
116
- * Schedule next audio chunk
117
- */
118
- scheduleNextChunk() {
119
- if (!this.audioContext) {
120
- this.log("[StreamingAudioPlayer] Cannot schedule chunk: AudioContext not initialized");
121
- return;
122
- }
123
- if (!this.isPlaying || this.isPaused) {
124
- this.log("[StreamingAudioPlayer] Cannot schedule chunk: Not playing or paused");
125
- return;
126
- }
127
- const t = this.scheduledChunks;
128
- if (t >= this.audioChunks.length) {
129
- this.log(`[StreamingAudioPlayer] No more chunks to schedule (chunkIndex: ${t}, totalChunks: ${this.audioChunks.length})`);
130
- return;
131
- }
132
- const e = this.audioChunks[t];
133
- if (e.data.length === 0 && !e.isLast) {
134
- this.scheduledChunks++;
135
- return;
136
- }
137
- const r = e.data, o = e.isLast, a = this.pcmToAudioBuffer(r);
138
- if (!a) {
139
- n.error("Failed to create AudioBuffer from PCM data"), c("character_player", "error", {
140
- sessionId: this.sessionId,
141
- event: "audio_buffer_creation_failed"
142
- });
143
- return;
144
- }
145
- try {
146
- const i = this.audioContext.createBufferSource();
147
- i.buffer = a, i.connect(this.gainNode), i.start(this.scheduledTime), this.activeSources.add(i), i.onended = () => {
148
- this.activeSources.delete(i), o && this.activeSources.size === 0 && (this.log("Last audio chunk ended, marking playback as ended"), this.markEnded());
149
- }, this.scheduledTime += a.duration, this.scheduledChunks++, this.log(`[StreamingAudioPlayer] Scheduled chunk ${t + 1}/${this.audioChunks.length}`, {
150
- startTime: this.scheduledTime - a.duration,
151
- duration: a.duration,
152
- nextScheduleTime: this.scheduledTime,
153
- isLast: o,
154
- activeSources: this.activeSources.size
155
- });
156
- } catch (i) {
157
- n.errorWithError("Failed to schedule audio chunk:", i), c("character_player", "error", {
158
- sessionId: this.sessionId,
159
- event: "schedule_chunk_failed",
160
- reason: i instanceof Error ? i.message : String(i)
161
- });
162
- }
163
- }
164
- /**
165
- * Convert PCM data to AudioBuffer
166
- * Input: 16-bit PCM (int16), Output: AudioBuffer (float32 [-1, 1])
167
- */
168
- pcmToAudioBuffer(t) {
169
- if (!this.audioContext)
170
- return null;
171
- if (t.length === 0) {
172
- const l = Math.floor(this.sampleRate * 0.01), u = this.audioContext.createBuffer(
173
- this.channelCount,
174
- l,
175
- this.sampleRate
176
- );
177
- for (let d = 0; d < this.channelCount; d++)
178
- u.getChannelData(d).fill(0);
179
- return u;
180
- }
181
- const e = new Uint8Array(t), r = new Int16Array(e.buffer, 0, e.length / 2), o = r.length / this.channelCount, a = this.audioContext.createBuffer(
182
- this.channelCount,
183
- o,
184
- this.sampleRate
185
- );
186
- for (let i = 0; i < this.channelCount; i++) {
187
- const l = a.getChannelData(i);
188
- for (let u = 0; u < o; u++) {
189
- const d = u * this.channelCount + i;
190
- l[u] = r[d] / 32768;
191
- }
192
- }
193
- return a;
194
- }
195
- /**
196
- * Get current playback time (seconds)
197
- */
198
- getCurrentTime() {
199
- if (!this.audioContext || !this.isPlaying)
200
- return 0;
201
- if (this.isPaused)
202
- return this.pausedAt;
203
- const e = this.audioContext.currentTime - this.sessionStartTime - this.pausedTimeOffset;
204
- return Math.max(0, e);
205
- }
206
- /**
207
- * Pause playback
208
- */
209
- pause() {
210
- !this.isPlaying || this.isPaused || !this.audioContext || (this.pausedAt = this.getCurrentTime(), this.pausedAudioContextTime = this.audioContext.currentTime, this.isPaused = !0, this.audioContext.state === "running" && this.audioContext.suspend().catch((t) => {
211
- n.errorWithError("Failed to suspend AudioContext:", t), this.isPaused = !1;
212
- }), this.log("Playback paused", {
213
- pausedAt: this.pausedAt,
214
- pausedAudioContextTime: this.pausedAudioContextTime,
215
- audioContextState: this.audioContext.state
216
- }));
217
- }
218
- /**
219
- * Resume playback
220
- */
221
- async resume() {
222
- if (!this.isPaused || !this.audioContext || !this.isPlaying)
223
- return;
224
- if (this.audioContext.state === "suspended")
225
- try {
226
- await this.audioContext.resume();
227
- } catch (e) {
228
- throw n.errorWithError("Failed to resume AudioContext:", e), e;
229
- }
230
- const t = this.audioContext.currentTime;
231
- this.sessionStartTime = this.pausedAudioContextTime - this.pausedAt - this.pausedTimeOffset, this.isPaused = !1, this.scheduledChunks < this.audioChunks.length && this.scheduleAllChunks(), this.log("Playback resumed", {
232
- pausedAt: this.pausedAt,
233
- pausedAudioContextTime: this.pausedAudioContextTime,
234
- currentAudioContextTime: t,
235
- adjustedSessionStartTime: this.sessionStartTime,
236
- audioContextState: this.audioContext.state
237
- });
238
- }
239
- /**
240
- * Stop playback
241
- */
242
- stop() {
243
- if (this.audioContext) {
244
- this.isPaused && this.audioContext.state === "suspended" && (this.audioContext.resume().catch(() => {
245
- }), this.isPaused = !1), this.isPlaying = !1, this.isPaused = !1, this.sessionStartTime = 0, this.scheduledTime = 0;
246
- for (const t of this.activeSources) {
247
- t.onended = null;
248
- try {
249
- t.stop(0);
250
- } catch {
251
- }
252
- try {
253
- t.disconnect();
254
- } catch {
255
- }
256
- }
257
- this.activeSources.clear(), this.audioChunks = [], this.scheduledChunks = 0, this.log("[StreamingAudioPlayer] Playback stopped, state reset");
258
- }
259
- }
260
- /**
261
- * Enable or disable auto-start (for delayed start scenarios)
262
- */
263
- setAutoStart(t) {
264
- this.autoStartEnabled = t, this.log(`Auto-start ${t ? "enabled" : "disabled"}`);
265
- }
266
- /**
267
- * Start playback manually (for delayed start scenarios)
268
- * This allows starting playback after transition animation completes
269
- */
270
- play() {
271
- this.isPlaying || (this.autoStartEnabled = !0, this.startPlayback());
272
- }
273
- /**
274
- * Mark playback as ended
275
- */
276
- markEnded() {
277
- var t;
278
- this.log("Playback ended"), this.isPlaying = !1, (t = this.onEndedCallback) == null || t.call(this);
279
- }
280
- /**
281
- * Set ended callback
282
- */
283
- onEnded(t) {
284
- this.onEndedCallback = t;
285
- }
286
- /**
287
- * Check if playing
288
- */
289
- isPlayingNow() {
290
- return this.isPlaying && !this.isPaused;
291
- }
292
- /**
293
- * Get total duration of buffered audio
294
- */
295
- getBufferedDuration() {
296
- if (!this.audioContext)
297
- return 0;
298
- let t = 0;
299
- for (const e of this.audioChunks)
300
- t += e.data.length / 2 / this.channelCount;
301
- return t / this.sampleRate;
302
- }
303
- /**
304
- * Get remaining duration (buffered - played) in seconds
305
- */
306
- getRemainingDuration() {
307
- const t = this.getBufferedDuration(), e = this.getCurrentTime();
308
- return Math.max(0, t - e);
309
- }
310
- /**
311
- * Dispose and cleanup
312
- */
313
- dispose() {
314
- this.stop(), this.audioContext && (this.audioContext.close(), this.audioContext = null, this.gainNode = null), this.audioChunks = [], this.scheduledChunks = 0, this.sessionStartTime = 0, this.pausedTimeOffset = 0, this.pausedAt = 0, this.pausedAudioContextTime = 0, this.scheduledTime = 0, this.onEndedCallback = void 0, this.log("StreamingAudioPlayer disposed");
315
- }
316
- /**
317
- * Flush buffered audio
318
- * - hard: stops all playing sources and clears all chunks
319
- * - soft (default): clears UNSCHEDULED chunks only
320
- */
321
- flush(t) {
322
- if ((t == null ? void 0 : t.hard) === !0) {
323
- this.stop(), this.audioChunks = [], this.scheduledChunks = 0, this.sessionStartTime = 0, this.pausedAt = 0, this.scheduledTime = 0, this.log("Flushed (hard)");
324
- return;
325
- }
326
- this.scheduledChunks < this.audioChunks.length && this.audioChunks.splice(this.scheduledChunks), this.log("Flushed (soft)", { remainingScheduled: this.scheduledChunks });
327
- }
328
- /**
329
- * 设置音量 (0.0 - 1.0)
330
- * 注意:这仅控制数字人音频播放器的音量,不影响系统音量
331
- * @param volume 音量值,范围 0.0 到 1.0(0.0 为静音,1.0 为最大音量)
332
- */
333
- setVolume(t) {
334
- (t < 0 || t > 1) && (n.warn(`[StreamingAudioPlayer] Volume out of range: ${t}, clamping to [0, 1]`), t = Math.max(0, Math.min(1, t))), this.volume = t, this.gainNode && (this.gainNode.gain.value = t);
335
- }
336
- /**
337
- * 获取当前音量
338
- * @returns 当前音量值 (0.0 - 1.0)
339
- */
340
- getVolume() {
341
- return this.volume;
342
- }
343
- /**
344
- * Debug logging
345
- */
346
- log(t, e) {
347
- this.debug && n.log(`[StreamingAudioPlayer] ${t}`, e || "");
348
- }
349
- }
350
- export {
351
- y as StreamingAudioPlayer
352
- };