@mobileai/react-native 0.4.1 → 0.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. package/README.md +25 -34
  2. package/lib/module/components/AIAgent.js +216 -5
  3. package/lib/module/components/AIAgent.js.map +1 -1
  4. package/lib/module/components/AgentChatBar.js +358 -36
  5. package/lib/module/components/AgentChatBar.js.map +1 -1
  6. package/lib/module/core/AgentRuntime.js +122 -6
  7. package/lib/module/core/AgentRuntime.js.map +1 -1
  8. package/lib/module/core/systemPrompt.js +57 -0
  9. package/lib/module/core/systemPrompt.js.map +1 -1
  10. package/lib/module/index.js +8 -0
  11. package/lib/module/index.js.map +1 -1
  12. package/lib/module/providers/GeminiProvider.js +108 -85
  13. package/lib/module/providers/GeminiProvider.js.map +1 -1
  14. package/lib/module/services/AudioInputService.js +128 -0
  15. package/lib/module/services/AudioInputService.js.map +1 -0
  16. package/lib/module/services/AudioOutputService.js +154 -0
  17. package/lib/module/services/AudioOutputService.js.map +1 -0
  18. package/lib/module/services/VoiceService.js +362 -0
  19. package/lib/module/services/VoiceService.js.map +1 -0
  20. package/lib/module/utils/audioUtils.js +49 -0
  21. package/lib/module/utils/audioUtils.js.map +1 -0
  22. package/lib/module/utils/logger.js +21 -4
  23. package/lib/module/utils/logger.js.map +1 -1
  24. package/lib/typescript/babel.config.d.ts +10 -0
  25. package/lib/typescript/babel.config.d.ts.map +1 -0
  26. package/lib/typescript/eslint.config.d.mts +3 -0
  27. package/lib/typescript/eslint.config.d.mts.map +1 -0
  28. package/lib/typescript/fetch-models.d.mts +2 -0
  29. package/lib/typescript/fetch-models.d.mts.map +1 -0
  30. package/lib/typescript/list-all-models.d.mts +2 -0
  31. package/lib/typescript/list-all-models.d.mts.map +1 -0
  32. package/lib/typescript/list-models.d.mts +2 -0
  33. package/lib/typescript/list-models.d.mts.map +1 -0
  34. package/lib/typescript/src/components/AIAgent.d.ts +8 -2
  35. package/lib/typescript/src/components/AIAgent.d.ts.map +1 -1
  36. package/lib/typescript/src/components/AgentChatBar.d.ts +19 -2
  37. package/lib/typescript/src/components/AgentChatBar.d.ts.map +1 -1
  38. package/lib/typescript/src/core/AgentRuntime.d.ts +17 -1
  39. package/lib/typescript/src/core/AgentRuntime.d.ts.map +1 -1
  40. package/lib/typescript/src/core/systemPrompt.d.ts +8 -0
  41. package/lib/typescript/src/core/systemPrompt.d.ts.map +1 -1
  42. package/lib/typescript/src/core/types.d.ts +24 -1
  43. package/lib/typescript/src/core/types.d.ts.map +1 -1
  44. package/lib/typescript/src/index.d.ts +6 -1
  45. package/lib/typescript/src/index.d.ts.map +1 -1
  46. package/lib/typescript/src/providers/GeminiProvider.d.ts +22 -18
  47. package/lib/typescript/src/providers/GeminiProvider.d.ts.map +1 -1
  48. package/lib/typescript/src/services/AudioInputService.d.ts +31 -0
  49. package/lib/typescript/src/services/AudioInputService.d.ts.map +1 -0
  50. package/lib/typescript/src/services/AudioOutputService.d.ts +34 -0
  51. package/lib/typescript/src/services/AudioOutputService.d.ts.map +1 -0
  52. package/lib/typescript/src/services/VoiceService.d.ts +73 -0
  53. package/lib/typescript/src/services/VoiceService.d.ts.map +1 -0
  54. package/lib/typescript/src/utils/audioUtils.d.ts +17 -0
  55. package/lib/typescript/src/utils/audioUtils.d.ts.map +1 -0
  56. package/lib/typescript/src/utils/logger.d.ts +4 -0
  57. package/lib/typescript/src/utils/logger.d.ts.map +1 -1
  58. package/package.json +24 -8
  59. package/src/components/AIAgent.tsx +222 -3
  60. package/src/components/AgentChatBar.tsx +487 -42
  61. package/src/core/AgentRuntime.ts +131 -2
  62. package/src/core/systemPrompt.ts +62 -0
  63. package/src/core/types.ts +30 -0
  64. package/src/index.ts +16 -0
  65. package/src/providers/GeminiProvider.ts +105 -89
  66. package/src/services/AudioInputService.ts +141 -0
  67. package/src/services/AudioOutputService.ts +167 -0
  68. package/src/services/VoiceService.ts +409 -0
  69. package/src/utils/audioUtils.ts +54 -0
  70. package/src/utils/logger.ts +24 -7
@@ -0,0 +1,409 @@
1
+ /**
2
+ * VoiceService — WebSocket connection to Gemini Live API.
3
+ *
4
+ * Handles bidirectional audio streaming between the app and Gemini:
5
+ * - Sends PCM 16kHz 16-bit audio chunks (mic input)
6
+ * - Receives PCM 24kHz 16-bit audio chunks (AI responses)
7
+ * - Receives function calls (tap, navigate, etc.) for agentic actions
8
+ * - Sends screen context (DOM text + optional screenshot) for live mode
9
+ *
10
+ * Protocol: wss://generativelanguage.googleapis.com/ws/google.ai.generativelanguage.v1beta.GenerativeService.BidiGenerateContent
11
+ */
12
+
13
+ import { logger } from '../utils/logger';
14
+ import type { ToolDefinition } from '../core/types';
15
+
16
+ // ─── Types ─────────────────────────────────────────────────────
17
+
18
+ export interface VoiceServiceConfig {
19
+ apiKey: string;
20
+ model?: string;
21
+ systemPrompt?: string;
22
+ tools?: ToolDefinition[];
23
+ /** Audio sample rate for mic input (default: 16000) */
24
+ inputSampleRate?: number;
25
+ /** Language for Gemini speech generation (e.g., 'en', 'ar') */
26
+ language?: string;
27
+ }
28
+
29
+ export interface VoiceServiceCallbacks {
30
+ onAudioResponse?: (base64Audio: string) => void;
31
+ onToolCall?: (toolCall: { name: string; args: Record<string, any>; id: string }) => void;
32
+ onTranscript?: (text: string, isFinal: boolean, role: 'user' | 'model') => void;
33
+ onStatusChange?: (status: VoiceStatus) => void;
34
+ onError?: (error: string) => void;
35
+ /** Called when AI turn is complete (all audio sent) */
36
+ onTurnComplete?: () => void;
37
+ }
38
+
39
+ export type VoiceStatus = 'disconnected' | 'connecting' | 'connected' | 'error';
40
+
41
+ // ─── Constants ─────────────────────────────────────────────────
42
+
43
+ const WS_HOST = 'generativelanguage.googleapis.com';
44
+ const WS_PATH = '/ws/google.ai.generativelanguage.v1beta.GenerativeService.BidiGenerateContent';
45
+ const DEFAULT_MODEL = 'gemini-2.5-flash-native-audio-preview-12-2025';
46
+ const DEFAULT_INPUT_SAMPLE_RATE = 16000;
47
+
48
+ // ─── Service ───────────────────────────────────────────────────
49
+
50
+ export class VoiceService {
51
+ private ws: WebSocket | null = null;
52
+ private config: VoiceServiceConfig;
53
+ private callbacks: VoiceServiceCallbacks = {};
54
+ private setupComplete = false;
55
+ private _status: VoiceStatus = 'disconnected';
56
+
57
+ constructor(config: VoiceServiceConfig) {
58
+ this.config = config;
59
+ }
60
+
61
+ // ─── Connection ────────────────────────────────────────────
62
+
63
+ connect(callbacks: VoiceServiceCallbacks): void {
64
+ if (this.ws?.readyState === WebSocket.OPEN) {
65
+ logger.info('VoiceService', 'Already connected');
66
+ return;
67
+ }
68
+
69
+ this.callbacks = callbacks;
70
+ this.setStatus('connecting');
71
+
72
+ const model = this.config.model || DEFAULT_MODEL;
73
+ const url = `wss://${WS_HOST}${WS_PATH}?key=${this.config.apiKey}`;
74
+
75
+ logger.info('VoiceService', `Connecting to Gemini Live API (model: ${model})`);
76
+ this.ws = new WebSocket(url);
77
+
78
+ this.ws.onopen = () => {
79
+ logger.info('VoiceService', 'WebSocket connected, sending setup...');
80
+ this.sendSetup();
81
+ };
82
+
83
+ this.ws.onclose = (event) => {
84
+ logger.info('VoiceService', `WebSocket closed: ${event.code} ${event.reason}`);
85
+ this.setStatus('disconnected');
86
+ this.setupComplete = false;
87
+ };
88
+
89
+ this.ws.onerror = (error: any) => {
90
+ logger.error('VoiceService', `WebSocket error: ${error.message || 'Unknown'}`);
91
+ this.setStatus('error');
92
+ this.callbacks.onError?.(error.message || 'WebSocket connection error');
93
+ };
94
+
95
+ this.ws.onmessage = (event) => {
96
+ this.handleMessage(event);
97
+ };
98
+ }
99
+
100
+ disconnect(): void {
101
+ if (this.ws) {
102
+ logger.info('VoiceService', 'Disconnecting...');
103
+ this.ws.close();
104
+ this.ws = null;
105
+ this.setupComplete = false;
106
+ this.setStatus('disconnected');
107
+ }
108
+ }
109
+
110
+ get isConnected(): boolean {
111
+ return this.ws?.readyState === WebSocket.OPEN && this.setupComplete;
112
+ }
113
+
114
+ get currentStatus(): VoiceStatus {
115
+ return this._status;
116
+ }
117
+
118
+ // ─── Send Audio ────────────────────────────────────────────
119
+
120
+ /** Send PCM audio chunk (base64 encoded) to Gemini */
121
+ private sendCount = 0;
122
+ sendAudio(base64Audio: string): void {
123
+ this.sendCount++;
124
+ if (!this.isConnected) {
125
+ logger.warn('VoiceService', `sendAudio #${this.sendCount} DROPPED — not connected (ws=${this.ws?.readyState}, setup=${this.setupComplete})`);
126
+ return;
127
+ }
128
+
129
+ const message = {
130
+ realtimeInput: {
131
+ audio: {
132
+ mimeType: `audio/pcm;rate=${this.config.inputSampleRate || DEFAULT_INPUT_SAMPLE_RATE}`,
133
+ data: base64Audio,
134
+ },
135
+ },
136
+ };
137
+
138
+ logger.info('VoiceService', `📤 #${this.sendCount} sending ${base64Audio.length} chars (ws=${this.ws?.readyState})`);
139
+ this.ws!.send(JSON.stringify(message));
140
+ }
141
+
142
+ // ─── Send Text ─────────────────────────────────────────────
143
+
144
+ /** Send text message via realtimeInput (same channel as audio) */
145
+ sendText(text: string): void {
146
+ if (!this.isConnected) return;
147
+
148
+ const message = {
149
+ realtimeInput: { text },
150
+ };
151
+
152
+ this.ws!.send(JSON.stringify(message));
153
+ }
154
+
155
+ /** Send DOM tree as passive context during live conversation.
156
+ *
157
+ * Uses `clientContent` with `turnComplete: false` to inject context
158
+ * WITHOUT triggering a model response. This is the "incremental content
159
+ * updates" pattern from the Gemini docs for establishing session context.
160
+ *
161
+ * Called once at connect + after each tool call (not on a timer).
162
+ * Screenshots are handled separately via the capture_screenshot tool.
163
+ */
164
+ sendScreenContext(domText: string): void {
165
+ if (!this.isConnected) return;
166
+
167
+ const message = {
168
+ clientContent: {
169
+ turns: [{ role: 'user', parts: [{ text: domText }] }],
170
+ turnComplete: false, // Passive context — don't trigger a response
171
+ },
172
+ };
173
+
174
+ this.ws!.send(JSON.stringify(message));
175
+ logger.debug('VoiceService', `📤 Screen context sent (${domText.length} chars)`);
176
+ }
177
+
178
+ // ─── Send Function Response ────────────────────────────────
179
+
180
+ /** Send function call result back to Gemini */
181
+ sendFunctionResponse(name: string, id: string, result: any): void {
182
+ if (!this.isConnected) return;
183
+
184
+ const message = {
185
+ toolResponse: {
186
+ functionResponses: [{
187
+ name,
188
+ id,
189
+ response: result,
190
+ }],
191
+ },
192
+ };
193
+
194
+ logger.info('VoiceService', `📤 Sending tool response for ${name} (id=${id})`);
195
+ this.ws!.send(JSON.stringify(message));
196
+ }
197
+
198
+ // ─── Internal: Setup ───────────────────────────────────────
199
+
200
+ private sendSetup(): void {
201
+ if (!this.ws || this.ws.readyState !== WebSocket.OPEN) return;
202
+
203
+ const model = this.config.model || DEFAULT_MODEL;
204
+
205
+ const setup: any = {
206
+ model: `models/${model}`,
207
+ generationConfig: {
208
+ responseModalities: ['AUDIO'],
209
+ },
210
+ };
211
+
212
+ if (this.config.language) {
213
+ setup.generationConfig.speechConfig = {
214
+ languageCode: this.config.language === 'ar' ? 'ar-SA' : 'en-US',
215
+ };
216
+ }
217
+
218
+ // Add system instruction if provided
219
+ if (this.config.systemPrompt) {
220
+ setup.systemInstruction = {
221
+ parts: [{ text: this.config.systemPrompt }],
222
+ };
223
+ }
224
+
225
+ // Add tool declarations for function calling
226
+ if (this.config.tools?.length) {
227
+ setup.tools = [{
228
+ functionDeclarations: this.config.tools.map(tool => ({
229
+ name: tool.name,
230
+ description: tool.description,
231
+ parameters: {
232
+ type: 'OBJECT',
233
+ properties: Object.fromEntries(
234
+ Object.entries(tool.parameters).map(([key, param]) => [
235
+ key,
236
+ {
237
+ type: param.type.toUpperCase(),
238
+ description: param.description,
239
+ },
240
+ ])
241
+ ),
242
+ required: Object.entries(tool.parameters)
243
+ .filter(([, param]) => param.required)
244
+ .map(([key]) => key),
245
+ },
246
+ })),
247
+ }];
248
+ }
249
+
250
+ // Enable transcription
251
+ setup.inputAudioTranscription = {};
252
+ setup.outputAudioTranscription = {};
253
+
254
+ const setupMessage = { setup };
255
+ logger.info('VoiceService', `Sending setup (model: ${model}, tools: ${this.config.tools?.length || 0})`);
256
+ this.ws.send(JSON.stringify(setupMessage));
257
+ }
258
+
259
+ // ─── Internal: Message Handling ────────────────────────────
260
+
261
+ private handleMessage(event: WebSocketMessageEvent): void {
262
+ try {
263
+ const dataType = typeof event.data;
264
+ const dataLen = typeof event.data === 'string' ? event.data.length : (event.data?.byteLength || 'unknown');
265
+ logger.info('VoiceService', `📥 WS message received: type=${dataType}, length=${dataLen}`);
266
+
267
+ // Handle binary data (could be JSON or raw PCM)
268
+ if (typeof event.data !== 'string') {
269
+ logger.info('VoiceService', '📥 Binary message — processing...');
270
+ this.handleBinaryMessage(event.data);
271
+ return;
272
+ }
273
+
274
+ // Handle JSON text messages
275
+ const message = JSON.parse(event.data);
276
+ logger.info('VoiceService', `📥 JSON message keys: ${Object.keys(message).join(', ')}`);
277
+ this.processMessage(message);
278
+ } catch (error: any) {
279
+ logger.error('VoiceService', `Error handling message: ${error.message}`);
280
+ }
281
+ }
282
+
283
+ private handleBinaryMessage(data: any): void {
284
+ try {
285
+ // Try to decode as JSON first
286
+ let bytes: Uint8Array;
287
+ if (data instanceof ArrayBuffer) {
288
+ bytes = new Uint8Array(data);
289
+ } else if (data instanceof Blob) {
290
+ // Blob handling — read as ArrayBuffer
291
+ const reader = new FileReader();
292
+ reader.onload = () => {
293
+ if (reader.result instanceof ArrayBuffer) {
294
+ this.processBinaryBytes(new Uint8Array(reader.result));
295
+ }
296
+ };
297
+ reader.readAsArrayBuffer(data);
298
+ return;
299
+ } else {
300
+ return;
301
+ }
302
+
303
+ this.processBinaryBytes(bytes);
304
+ } catch (error: any) {
305
+ logger.error('VoiceService', `Error handling binary message: ${error.message}`);
306
+ }
307
+ }
308
+
309
+ private processBinaryBytes(bytes: Uint8Array): void {
310
+ // Check if it looks like JSON (starts with '{' or '[')
311
+ const looksLikeJson = bytes.length > 0 && (bytes[0] === 123 || bytes[0] === 91);
312
+
313
+ if (looksLikeJson) {
314
+ try {
315
+ const text = new TextDecoder('utf-8').decode(bytes);
316
+ const message = JSON.parse(text);
317
+ this.processMessage(message);
318
+ } catch {
319
+ // Not JSON — treat as raw PCM audio
320
+ this.callbacks.onAudioResponse?.(this.arrayBufferToBase64(bytes.buffer as ArrayBuffer));
321
+ }
322
+ } else {
323
+ // Raw PCM audio data
324
+ this.callbacks.onAudioResponse?.(this.arrayBufferToBase64(bytes.buffer as ArrayBuffer));
325
+ }
326
+ }
327
+
328
+ private processMessage(message: any): void {
329
+ // Setup complete acknowledgment
330
+ if (message.setupComplete !== undefined) {
331
+ logger.info('VoiceService', '✅ Setup complete — ready for audio exchange');
332
+ this.setupComplete = true;
333
+ this.setStatus('connected');
334
+ return;
335
+ }
336
+
337
+ // Server content (audio response + transcripts)
338
+ if (message.serverContent) {
339
+ const content = message.serverContent;
340
+ logger.info('VoiceService', `📥 serverContent received — turnComplete=${content.turnComplete}, hasParts=${!!content.modelTurn?.parts}, inputTranscription=${!!content.inputTranscription}, outputTranscription=${!!content.outputTranscription}`);
341
+
342
+ // Check for turn complete
343
+ if (content.turnComplete) {
344
+ this.callbacks.onTurnComplete?.();
345
+ }
346
+
347
+ // Process model output parts
348
+ if (content.modelTurn?.parts) {
349
+ for (const part of content.modelTurn.parts) {
350
+ // Audio response
351
+ if (part.inlineData?.data) {
352
+ logger.info('VoiceService', `🔊 Audio response: ${part.inlineData.data.length} chars`);
353
+ this.callbacks.onAudioResponse?.(part.inlineData.data);
354
+ }
355
+
356
+ // Text response (transcript)
357
+ if (part.text) {
358
+ logger.info('VoiceService', `💬 Text response: "${part.text}"`);
359
+ this.callbacks.onTranscript?.(part.text, true, 'model');
360
+ }
361
+ }
362
+ }
363
+
364
+ // Input transcription (user's speech)
365
+ if (content.inputTranscription?.text) {
366
+ this.callbacks.onTranscript?.(content.inputTranscription.text, true, 'user');
367
+ }
368
+
369
+ // Output transcription (model's speech-to-text)
370
+ if (content.outputTranscription?.text) {
371
+ this.callbacks.onTranscript?.(content.outputTranscription.text, true, 'model');
372
+ }
373
+ }
374
+
375
+ // Tool calls from the model
376
+ if (message.toolCall?.functionCalls) {
377
+ for (const fn of message.toolCall.functionCalls) {
378
+ logger.info('VoiceService', `Tool call: ${fn.name}(${JSON.stringify(fn.args)})`);
379
+ this.callbacks.onToolCall?.({
380
+ name: fn.name,
381
+ args: fn.args || {},
382
+ id: fn.id,
383
+ });
384
+ }
385
+ }
386
+
387
+ // Error messages
388
+ if (message.error) {
389
+ logger.error('VoiceService', `Server error: ${JSON.stringify(message.error)}`);
390
+ this.callbacks.onError?.(message.error.message || 'Server error');
391
+ }
392
+ }
393
+
394
+ // ─── Helpers ───────────────────────────────────────────────
395
+
396
+ private setStatus(newStatus: VoiceStatus): void {
397
+ this._status = newStatus;
398
+ this.callbacks.onStatusChange?.(newStatus);
399
+ }
400
+
401
+ private arrayBufferToBase64(buffer: ArrayBuffer): string {
402
+ const bytes = new Uint8Array(buffer);
403
+ let binary = '';
404
+ for (let i = 0; i < bytes.byteLength; i++) {
405
+ binary += String.fromCharCode(bytes[i]!);
406
+ }
407
+ return btoa(binary);
408
+ }
409
+ }
@@ -0,0 +1,54 @@
1
+ /**
2
+ * Audio utility functions for PCM conversion.
3
+ *
4
+ * Used by AudioInputService and AudioOutputService to convert between
5
+ * Float32 (Web Audio API) and Int16 (Gemini Live API) PCM formats.
6
+ */
7
+
8
+ /**
9
+ * Convert Float32Array PCM samples to Int16 PCM and encode as base64.
10
+ * Gemini Live API expects Int16 little-endian PCM.
11
+ */
12
+ export function float32ToInt16Base64(float32Data: Float32Array): string {
13
+ const int16Buffer = new Int16Array(float32Data.length);
14
+ for (let i = 0; i < float32Data.length; i++) {
15
+ // Clamp to [-1, 1] and scale to Int16 range
16
+ const sample = Math.max(-1, Math.min(1, float32Data[i] || 0));
17
+ int16Buffer[i] = sample < 0 ? sample * 0x8000 : sample * 0x7fff;
18
+ }
19
+
20
+ // Convert Int16Array to base64
21
+ const bytes = new Uint8Array(int16Buffer.buffer);
22
+ let binary = '';
23
+ for (let i = 0; i < bytes.length; i++) {
24
+ binary += String.fromCharCode(bytes[i] || 0);
25
+ }
26
+
27
+ return typeof global.btoa === 'function'
28
+ ? global.btoa(binary)
29
+ : Buffer.from(binary, 'binary').toString('base64');
30
+ }
31
+
32
+ /**
33
+ * Decode base64 Int16 PCM to Float32Array.
34
+ * Used for manual decoding when decodePCMInBase64 is unavailable.
35
+ */
36
+ export function base64ToFloat32(base64: string): Float32Array {
37
+ const binaryString = typeof global.atob === 'function'
38
+ ? global.atob(base64)
39
+ : Buffer.from(base64, 'base64').toString('binary');
40
+
41
+ const bytes = new Uint8Array(binaryString.length);
42
+ for (let i = 0; i < binaryString.length; i++) {
43
+ bytes[i] = binaryString.charCodeAt(i);
44
+ }
45
+
46
+ const int16Data = new Int16Array(bytes.buffer);
47
+ const float32Data = new Float32Array(int16Data.length);
48
+ for (let i = 0; i < int16Data.length; i++) {
49
+ // Scale Int16 back to Float32 [-1, 1]
50
+ float32Data[i] = (int16Data[i] || 0) / 0x8000;
51
+ }
52
+
53
+ return float32Data;
54
+ }
@@ -1,20 +1,37 @@
1
1
  /**
2
2
  * Logger utility — prefixed console output for easy filtering.
3
+ *
4
+ * Disabled by default. Enable via `logger.setEnabled(true)` or
5
+ * pass `debug={true}` to the <AIAgent> component.
3
6
  */
4
7
  const TAG = '[AIAgent]';
5
8
 
9
+ let enabled = false;
10
+
6
11
  export const logger = {
7
- info: (context: string, ...args: any[]) =>
8
- console.log(`${TAG} [${context}]`, ...args),
12
+ /** Enable or disable all SDK logging. */
13
+ setEnabled: (value: boolean) => {
14
+ enabled = value;
15
+ },
9
16
 
10
- warn: (context: string, ...args: any[]) =>
11
- console.warn(`${TAG} [${context}]`, ...args),
17
+ /** Check if logging is enabled. */
18
+ isEnabled: () => enabled,
19
+
20
+ info: (context: string, ...args: any[]) => {
21
+ if (enabled) console.log(`${TAG} [${context}]`, ...args);
22
+ },
12
23
 
13
- error: (context: string, ...args: any[]) =>
14
- console.error(`${TAG} [${context}]`, ...args),
24
+ warn: (context: string, ...args: any[]) => {
25
+ if (enabled) console.warn(`${TAG} [${context}]`, ...args);
26
+ },
27
+
28
+ error: (context: string, ...args: any[]) => {
29
+ // Errors always log regardless of enabled flag
30
+ console.error(`${TAG} [${context}]`, ...args);
31
+ },
15
32
 
16
33
  debug: (context: string, ...args: any[]) => {
17
- if (__DEV__) {
34
+ if (enabled && __DEV__) {
18
35
  console.log(`${TAG} [${context}] 🐛`, ...args);
19
36
  }
20
37
  },