react-native-ai-core 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,328 @@
1
+ package com.aicore
2
+
3
+ import android.os.Build
4
+ import com.facebook.react.bridge.Arguments
5
+ import com.facebook.react.bridge.Promise
6
+ import com.facebook.react.bridge.ReactApplicationContext
7
+ import com.facebook.react.bridge.WritableMap
8
+ import com.facebook.react.modules.core.DeviceEventManagerModule
9
+ import com.google.mediapipe.tasks.genai.llminference.LlmInference
10
+ import com.google.mediapipe.tasks.genai.llminference.LlmInferenceSession
11
+ import com.google.mediapipe.tasks.genai.llminference.ProgressListener
12
+ import com.google.mlkit.genai.common.DownloadStatus
13
+ import com.google.mlkit.genai.common.FeatureStatus
14
+ import com.google.mlkit.genai.prompt.Generation
15
+ import com.google.mlkit.genai.prompt.GenerativeModel
16
+ import com.google.mlkit.genai.prompt.TextPart
17
+ import com.google.mlkit.genai.prompt.generateContentRequest
18
+ import kotlinx.coroutines.CoroutineScope
19
+ import kotlinx.coroutines.Dispatchers
20
+ import kotlinx.coroutines.SupervisorJob
21
+ import kotlinx.coroutines.cancel
22
+ import kotlinx.coroutines.flow.catch
23
+ import kotlinx.coroutines.launch
24
+ import java.io.File
25
+ import java.util.concurrent.ExecutorService
26
+ import java.util.concurrent.Executors
27
+
28
+ class AiCoreModule(reactContext: ReactApplicationContext) :
29
+ NativeAiCoreSpec(reactContext) {
30
+
31
+ @Volatile private var mlkitModel: GenerativeModel? = null
32
+ @Volatile private var llmInference: LlmInference? = null
33
+
34
+ private val executor: ExecutorService = Executors.newSingleThreadExecutor()
35
+ private val coroutineScope = CoroutineScope(Dispatchers.IO + SupervisorJob())
36
+
37
+ // Historial de conversación: lista de pares (mensaje usuario, respuesta asistente)
38
+ private val conversationHistory = mutableListOf<Pair<String, String>>()
39
+
40
+ companion object {
41
+ const val NAME = NativeAiCoreSpec.NAME
42
+ const val EVENT_STREAM_TOKEN = "AICore_streamToken"
43
+ const val EVENT_STREAM_COMPLETE = "AICore_streamComplete"
44
+ const val EVENT_STREAM_ERROR = "AICore_streamError"
45
+ private const val DEFAULT_TEMPERATURE = 0.7f
46
+ private const val DEFAULT_MAX_TOKENS = 2048 // ventana MediaPipe (entrada+salida)
47
+ private const val DEFAULT_MAX_OUTPUT_TOKENS = 256 // salida ML Kit/AICore (límite API: 1–256)
48
+ private const val DEFAULT_TOP_K = 40
49
+ // Límite de caracteres del historial para no superar ~3000 tokens de entrada
50
+ private const val HISTORY_MAX_CHARS = 9000
51
+ private val STANDARD_MODEL_PATHS = listOf(
52
+ "/data/local/tmp/gemini-nano.bin",
53
+ "/sdcard/Download/gemini-nano.bin"
54
+ )
55
+ }
56
+
57
+ // ── Historial ──────────────────────────────────────────────────────────────
58
+
59
+ @Synchronized
60
+ private fun buildContextualPrompt(userPrompt: String): String {
61
+ if (conversationHistory.isEmpty()) return userPrompt
62
+ val sb = StringBuilder()
63
+ for ((u, a) in conversationHistory) {
64
+ sb.append("User: ").append(u).append("\nAssistant: ").append(a).append("\n")
65
+ }
66
+ sb.append("User: ").append(userPrompt).append("\nAssistant:")
67
+ return sb.toString()
68
+ }
69
+
70
+ @Synchronized
71
+ private fun saveToHistory(userPrompt: String, assistantResponse: String) {
72
+ conversationHistory.add(Pair(userPrompt, assistantResponse))
73
+ // Eliminar turnos más antiguos si el historial supera el límite de caracteres
74
+ var total = conversationHistory.sumOf { it.first.length + it.second.length }
75
+ while (total > HISTORY_MAX_CHARS && conversationHistory.size > 1) {
76
+ val removed = conversationHistory.removeAt(0)
77
+ total -= removed.first.length + removed.second.length
78
+ }
79
+ }
80
+
81
+ @Synchronized
82
+ private fun resetHistory() {
83
+ conversationHistory.clear()
84
+ }
85
+
86
+ private fun sendEvent(name: String, params: WritableMap?) {
87
+ reactApplicationContext
88
+ .getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java)
89
+ .emit(name, params)
90
+ }
91
+
92
+ private fun createErrorMap(code: String, message: String): WritableMap =
93
+ Arguments.createMap().apply {
94
+ putString("code", code)
95
+ putString("message", message)
96
+ }
97
+
98
+ private fun createMediaPipeSession(): LlmInferenceSession {
99
+ val inference = llmInference ?: throw IllegalStateException("LLM no inicializado.")
100
+ val opts = LlmInferenceSession.LlmInferenceSessionOptions.builder()
101
+ .setTemperature(DEFAULT_TEMPERATURE)
102
+ .setTopK(DEFAULT_TOP_K)
103
+ .build()
104
+ return LlmInferenceSession.createFromOptions(inference, opts)
105
+ }
106
+
107
+ override fun initialize(modelPath: String, promise: Promise) {
108
+ mlkitModel = null
109
+ llmInference?.close()
110
+ llmInference = null
111
+ resetHistory()
112
+
113
+ if (modelPath.isEmpty()) {
114
+ coroutineScope.launch {
115
+ try {
116
+ val model = Generation.getClient()
117
+ when (model.checkStatus()) {
118
+ FeatureStatus.AVAILABLE -> {
119
+ mlkitModel = model
120
+ promise.resolve(true)
121
+ }
122
+ FeatureStatus.DOWNLOADABLE -> {
123
+ model.download().collect { ds ->
124
+ when (ds) {
125
+ DownloadStatus.DownloadCompleted -> {
126
+ mlkitModel = model
127
+ promise.resolve(true)
128
+ }
129
+ is DownloadStatus.DownloadFailed -> promise.reject("DOWNLOAD_FAILED", ds.e.message, ds.e)
130
+ else -> {}
131
+ }
132
+ }
133
+ }
134
+ FeatureStatus.DOWNLOADING -> promise.reject("ALREADY_DOWNLOADING", "Ya se está descargando.")
135
+ FeatureStatus.UNAVAILABLE -> promise.reject("AICORE_UNAVAILABLE", "Gemini Nano no disponible en este dispositivo.")
136
+ else -> promise.reject("AICORE_UNKNOWN", "Estado desconocido.")
137
+ }
138
+ } catch (e: Exception) {
139
+ promise.reject("AICORE_ERROR", e.message, e)
140
+ }
141
+ }
142
+ } else {
143
+ executor.execute {
144
+ try {
145
+ if (!File(modelPath).exists()) {
146
+ promise.reject("MODEL_NOT_FOUND", "Modelo no encontrado en: $modelPath")
147
+ return@execute
148
+ }
149
+ val options = LlmInference.LlmInferenceOptions.builder()
150
+ .setModelPath(modelPath)
151
+ .setMaxTokens(DEFAULT_MAX_TOKENS)
152
+ .setPreferredBackend(LlmInference.Backend.DEFAULT)
153
+ .build()
154
+ llmInference = LlmInference.createFromOptions(reactApplicationContext, options)
155
+ promise.resolve(true)
156
+ } catch (e: UnsupportedOperationException) {
157
+ promise.reject("NPU_UNSUPPORTED", e.message, e)
158
+ } catch (e: RuntimeException) {
159
+ promise.reject("INIT_FAILED", e.message, e)
160
+ } catch (e: Exception) {
161
+ promise.reject("INIT_ERROR", e.message, e)
162
+ }
163
+ }
164
+ }
165
+ }
166
+
167
+ override fun generateResponse(prompt: String, promise: Promise) {
168
+ val mlkit = mlkitModel
169
+ val mediapipe = llmInference
170
+ val contextualPrompt = buildContextualPrompt(prompt)
171
+ when {
172
+ mlkit != null -> coroutineScope.launch {
173
+ try {
174
+ val request = generateContentRequest(TextPart(contextualPrompt)) {
175
+ maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS
176
+ }
177
+ val response = mlkit.generateContent(request).candidates.firstOrNull()?.text ?: ""
178
+ saveToHistory(prompt, response)
179
+ promise.resolve(response)
180
+ } catch (e: Exception) {
181
+ promise.reject("GENERATION_ERROR", e.message, e)
182
+ }
183
+ }
184
+ mediapipe != null -> executor.execute {
185
+ var session: LlmInferenceSession? = null
186
+ try {
187
+ session = createMediaPipeSession()
188
+ session.addQueryChunk(contextualPrompt)
189
+ val response = session.generateResponse()
190
+ saveToHistory(prompt, response)
191
+ promise.resolve(response)
192
+ } catch (e: Exception) {
193
+ promise.reject("GENERATION_ERROR", e.message, e)
194
+ } finally {
195
+ session?.close()
196
+ }
197
+ }
198
+ else -> promise.reject("NOT_INITIALIZED", "LLM no inicializado.")
199
+ }
200
+ }
201
+
202
+ override fun generateResponseStream(prompt: String) {
203
+ val mlkit = mlkitModel
204
+ val mediapipe = llmInference
205
+ val contextualPrompt = buildContextualPrompt(prompt)
206
+ when {
207
+ mlkit != null -> coroutineScope.launch {
208
+ var streamError = false
209
+ val fullResponse = StringBuilder()
210
+ try {
211
+ val request = generateContentRequest(TextPart(contextualPrompt)) {
212
+ maxOutputTokens = DEFAULT_MAX_OUTPUT_TOKENS
213
+ }
214
+ mlkit.generateContentStream(request)
215
+ .catch { e ->
216
+ streamError = true
217
+ sendEvent(EVENT_STREAM_ERROR, createErrorMap("STREAM_ERROR", e.message ?: "Error"))
218
+ }
219
+ .collect { chunk ->
220
+ val token = chunk.candidates.firstOrNull()?.text ?: ""
221
+ fullResponse.append(token)
222
+ sendEvent(EVENT_STREAM_TOKEN, Arguments.createMap().apply {
223
+ putString("token", token)
224
+ putBoolean("done", false)
225
+ })
226
+ }
227
+ if (!streamError) {
228
+ saveToHistory(prompt, fullResponse.toString())
229
+ sendEvent(EVENT_STREAM_TOKEN, Arguments.createMap().apply {
230
+ putString("token", "")
231
+ putBoolean("done", true)
232
+ })
233
+ sendEvent(EVENT_STREAM_COMPLETE, Arguments.createMap())
234
+ }
235
+ } catch (e: Exception) {
236
+ if (!streamError) {
237
+ sendEvent(EVENT_STREAM_ERROR, createErrorMap("STREAM_ERROR", e.message ?: "Error"))
238
+ }
239
+ }
240
+ }
241
+ mediapipe != null -> executor.execute {
242
+ val fullResponse = StringBuilder()
243
+ var session: LlmInferenceSession? = null
244
+ try {
245
+ session = createMediaPipeSession()
246
+ val capturedSession = session
247
+ session.addQueryChunk(contextualPrompt)
248
+ session.generateResponseAsync(ProgressListener<String> { partial, done ->
249
+ val token = partial ?: ""
250
+ fullResponse.append(token)
251
+ sendEvent(EVENT_STREAM_TOKEN, Arguments.createMap().apply {
252
+ putString("token", token)
253
+ putBoolean("done", done)
254
+ })
255
+ if (done) {
256
+ saveToHistory(prompt, fullResponse.toString())
257
+ sendEvent(EVENT_STREAM_COMPLETE, Arguments.createMap())
258
+ capturedSession.close()
259
+ }
260
+ })
261
+ } catch (e: Exception) {
262
+ session?.close()
263
+ sendEvent(EVENT_STREAM_ERROR, createErrorMap("STREAM_ERROR", e.message ?: "Error"))
264
+ }
265
+ }
266
+ else -> sendEvent(EVENT_STREAM_ERROR, createErrorMap("NOT_INITIALIZED", "LLM no inicializado."))
267
+ }
268
+ }
269
+
270
+ override fun checkAvailability(promise: Promise) {
271
+ coroutineScope.launch {
272
+ try {
273
+ if (Build.VERSION.SDK_INT < Build.VERSION_CODES.Q) {
274
+ promise.resolve("UNSUPPORTED")
275
+ return@launch
276
+ }
277
+ if (mlkitModel != null) { promise.resolve("AVAILABLE_NPU"); return@launch }
278
+ if (llmInference != null) { promise.resolve("AVAILABLE"); return@launch }
279
+ when (Generation.getClient().checkStatus()) {
280
+ FeatureStatus.AVAILABLE -> promise.resolve("AVAILABLE_NPU")
281
+ FeatureStatus.DOWNLOADABLE, FeatureStatus.DOWNLOADING -> promise.resolve("NEED_DOWNLOAD")
282
+ FeatureStatus.UNAVAILABLE -> {
283
+ val appPath = "${reactApplicationContext.filesDir}/gemini-nano.bin"
284
+ val found = (STANDARD_MODEL_PATHS + appPath).any { File(it).exists() }
285
+ promise.resolve(if (found) "AVAILABLE" else "UNSUPPORTED")
286
+ }
287
+ else -> promise.resolve("UNSUPPORTED")
288
+ }
289
+ } catch (e: Exception) {
290
+ promise.reject("AVAILABILITY_ERROR", e.message, e)
291
+ }
292
+ }
293
+ }
294
+
295
+ override fun release(promise: Promise) {
296
+ executor.execute {
297
+ try {
298
+ mlkitModel = null
299
+ llmInference?.close()
300
+ llmInference = null
301
+ resetHistory()
302
+ promise.resolve(null)
303
+ } catch (e: Exception) {
304
+ promise.reject("RELEASE_ERROR", e.message, e)
305
+ }
306
+ }
307
+ }
308
+
309
+ override fun resetConversation(promise: Promise) {
310
+ resetHistory()
311
+ promise.resolve(null)
312
+ }
313
+
314
+ override fun addListener(eventName: String) {}
315
+ override fun removeListeners(count: Double) {}
316
+
317
+ override fun invalidate() {
318
+ super.invalidate()
319
+ try {
320
+ llmInference?.close()
321
+ llmInference = null
322
+ mlkitModel = null
323
+ } finally {
324
+ executor.shutdown()
325
+ coroutineScope.cancel()
326
+ }
327
+ }
328
+ }
@@ -0,0 +1,31 @@
1
+ package com.aicore
2
+
3
+ import com.facebook.react.BaseReactPackage
4
+ import com.facebook.react.bridge.NativeModule
5
+ import com.facebook.react.bridge.ReactApplicationContext
6
+ import com.facebook.react.module.model.ReactModuleInfo
7
+ import com.facebook.react.module.model.ReactModuleInfoProvider
8
+ import java.util.HashMap
9
+
10
+ class AiCorePackage : BaseReactPackage() {
11
+ override fun getModule(name: String, reactContext: ReactApplicationContext): NativeModule? {
12
+ return if (name == AiCoreModule.NAME) {
13
+ AiCoreModule(reactContext)
14
+ } else {
15
+ null
16
+ }
17
+ }
18
+
19
+ override fun getReactModuleInfoProvider() = ReactModuleInfoProvider {
20
+ mapOf(
21
+ AiCoreModule.NAME to ReactModuleInfo(
22
+ name = AiCoreModule.NAME,
23
+ className = AiCoreModule.NAME,
24
+ canOverrideExistingModule = false,
25
+ needsEagerInit = false,
26
+ isCxxModule = false,
27
+ isTurboModule = true
28
+ )
29
+ )
30
+ }
31
+ }
package/ios/AiCore.h ADDED
@@ -0,0 +1,12 @@
1
+ /**
2
+ * AiCore.h — iOS stub
3
+ *
4
+ * La inferencia local con Gemini Nano via MediaPipe está implementada
5
+ * en Android. Este stub satisface el protocolo NativeAiCoreSpec en iOS
6
+ * devolviendo "UNSUPPORTED" o rechazando las promesas según corresponda.
7
+ */
8
+ #import <AiCoreSpec/AiCoreSpec.h>
9
+
10
+ @interface AiCore : NSObject <NativeAiCoreSpec>
11
+
12
+ @end
package/ios/AiCore.mm ADDED
@@ -0,0 +1,65 @@
1
+ #import "AiCore.h"
2
+
3
+ // ---------------------------------------------------------------------------
4
+ // iOS Stub
5
+ //
6
+ // La inferencia local con Gemini Nano (MediaPipe) está implementada solo
7
+ // para Android (NPU Tensor). En iOS todos los métodos devuelven UNSUPPORTED
8
+ // o rechazan la promesa. Para inferencia en iOS considera Core ML o
9
+ // Apple Intelligence cuando esté disponible públicamente.
10
+ // ---------------------------------------------------------------------------
11
+
12
+ @implementation AiCore
13
+
14
+ + (NSString *)moduleName {
15
+ return @"AiCore";
16
+ }
17
+
18
+ - (void)initialize:(NSString *)modelPath
19
+ resolve:(RCTPromiseResolveBlock)resolve
20
+ reject:(RCTPromiseRejectBlock)reject
21
+ {
22
+ reject(
23
+ @"UNSUPPORTED",
24
+ @"react-native-ai-core: la inferencia local con Gemini Nano no está soportada en iOS.",
25
+ nil
26
+ );
27
+ }
28
+
29
+ - (void)generateResponse:(NSString *)prompt
30
+ resolve:(RCTPromiseResolveBlock)resolve
31
+ reject:(RCTPromiseRejectBlock)reject
32
+ {
33
+ reject(
34
+ @"UNSUPPORTED",
35
+ @"react-native-ai-core: generateResponse no está soportado en iOS.",
36
+ nil
37
+ );
38
+ }
39
+
40
+ // El streaming no tiene promesa; emitir un evento de error desde JS
41
+ - (void)generateResponseStream:(NSString *)prompt {}
42
+
43
+ - (void)checkAvailability:(RCTPromiseResolveBlock)resolve
44
+ reject:(RCTPromiseRejectBlock)reject
45
+ {
46
+ resolve(@"UNSUPPORTED");
47
+ }
48
+
49
+ - (void)release:(RCTPromiseResolveBlock)resolve
50
+ reject:(RCTPromiseRejectBlock)reject
51
+ {
52
+ resolve(nil);
53
+ }
54
+
55
+ // Requeridos por NativeEventEmitter
56
+ - (void)addListener:(NSString *)eventName {}
57
+ - (void)removeListeners:(double)count {}
58
+
59
+ - (std::shared_ptr<facebook::react::TurboModule>)getTurboModule:
60
+ (const facebook::react::ObjCTurboModule::InitParams &)params
61
+ {
62
+ return std::make_shared<facebook::react::NativeAiCoreSpecJSI>(params);
63
+ }
64
+
65
+ @end
@@ -0,0 +1,11 @@
1
+ "use strict";
2
+
3
+ /**
4
+ * NativeAICore — TurboModule Spec (New Architecture)
5
+ *
6
+ * Puente JSI de alto rendimiento hacia el SDK de Google AI Edge (MediaPipe)
7
+ * para ejecutar Gemini Nano en local mediante la NPU del dispositivo.
8
+ */
9
+ import { TurboModuleRegistry } from 'react-native';
10
+ export default TurboModuleRegistry.getEnforcing('AiCore');
11
+ //# sourceMappingURL=NativeAiCore.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["TurboModuleRegistry","getEnforcing"],"sourceRoot":"../../src","sources":["NativeAiCore.ts"],"mappings":";;AAAA;AACA;AACA;AACA;AACA;AACA;AACA,SAASA,mBAAmB,QAA0B,cAAc;AA8CpE,eAAeA,mBAAmB,CAACC,YAAY,CAAO,QAAQ,CAAC","ignoreList":[]}
@@ -0,0 +1,189 @@
1
+ "use strict";
2
+
3
+ /**
4
+ * react-native-ai-core
5
+ *
6
+ * Capa de abstracción JS sobre el TurboModule nativo.
7
+ * Proporciona una API limpia y tipada para usar Gemini Nano
8
+ * en local a través del SDK de Google AI Edge (MediaPipe).
9
+ *
10
+ * @example
11
+ * import AICore from 'react-native-ai-core';
12
+ *
13
+ * await AICore.initialize('/data/local/tmp/gemini-nano.bin');
14
+ * const answer = await AICore.generateResponse('¿Qué es JSI?');
15
+ */
16
+
17
+ import { NativeEventEmitter, Platform } from 'react-native';
18
+ import NativeAiCore from "./NativeAiCore.js";
19
+
20
+ // ── Tipos públicos ────────────────────────────────────────────────────────────
21
+
22
+ /** Estado de disponibilidad de Gemini Nano en el dispositivo */
23
+
24
+ /** Callbacks del streaming de respuesta */
25
+
26
+ /** Estructura de error normalizada */
27
+
28
+ // ── Nombres de eventos (deben coincidir con los del módulo Kotlin) ─────────────
29
+ const EVENT_STREAM_TOKEN = 'AICore_streamToken';
30
+ const EVENT_STREAM_COMPLETE = 'AICore_streamComplete';
31
+ const EVENT_STREAM_ERROR = 'AICore_streamError';
32
+
33
+ // Instancia del emisor de eventos nativo (null en plataformas no soportadas)
34
+ const emitter = NativeAiCore != null ? new NativeEventEmitter(NativeAiCore) : null;
35
+
36
+ // ── Guarda de plataforma ──────────────────────────────────────────────────────
37
+
38
+ function assertAvailable() {
39
+ if (!NativeAiCore) {
40
+ throw new Error(`react-native-ai-core: módulo nativo no disponible en ${Platform.OS}. ` + 'Este módulo requiere Android con soporte de NPU.');
41
+ }
42
+ }
43
+
44
+ // ── API pública ───────────────────────────────────────────────────────────────
45
+
46
+ /**
47
+ * Inicializa el motor de inferencia LLM con el modelo indicado.
48
+ *
49
+ * @param modelPath Ruta absoluta al archivo `.bin` del modelo en el dispositivo.
50
+ * @returns `true` si la inicialización fue correcta.
51
+ *
52
+ * @throws `MODEL_NOT_FOUND` si el archivo no existe en `modelPath`.
53
+ * @throws `NPU_UNSUPPORTED` si la NPU del dispositivo no es compatible.
54
+ * @throws `INIT_FAILED` si el motor no pudo arrancar por otro motivo.
55
+ *
56
+ * @example
57
+ * const ok = await initialize('/data/local/tmp/gemini-nano.bin');
58
+ */
59
+ export async function initialize(modelPath) {
60
+ assertAvailable();
61
+ return NativeAiCore.initialize(modelPath);
62
+ }
63
+
64
+ /**
65
+ * Genera una respuesta completa (no streaming) para el prompt dado.
66
+ *
67
+ * @param prompt Texto de entrada para el modelo.
68
+ * @returns Respuesta completa como string.
69
+ *
70
+ * @throws `NOT_INITIALIZED` si `initialize()` no fue llamado antes.
71
+ * @throws `GENERATION_ERROR` si el modelo falla durante la inferencia.
72
+ *
73
+ * @example
74
+ * const response = await generateResponse('Explícame los TurboModules');
75
+ */
76
+ export async function generateResponse(prompt) {
77
+ assertAvailable();
78
+ return NativeAiCore.generateResponse(prompt);
79
+ }
80
+
81
+ /**
82
+ * Genera una respuesta token a token mediante streaming.
83
+ * Los tokens se entregan en tiempo real a través de los callbacks.
84
+ *
85
+ * @param prompt Texto de entrada para el modelo.
86
+ * @param callbacks `{ onToken, onComplete, onError }`.
87
+ * @returns Función de limpieza — llámala para cancelar las suscripciones.
88
+ *
89
+ * @example
90
+ * const unsubscribe = generateResponseStream('¿Qué es MediaPipe?', {
91
+ * onToken: (token, done) => console.log(token),
92
+ * onComplete: () => console.log('¡Listo!'),
93
+ * onError: (err) => console.error(err),
94
+ * });
95
+ *
96
+ * // Al desmontar el componente:
97
+ * unsubscribe();
98
+ */
99
+ export function generateResponseStream(prompt, callbacks) {
100
+ if (!NativeAiCore || !emitter) {
101
+ callbacks.onError({
102
+ code: 'UNAVAILABLE',
103
+ message: `react-native-ai-core no está disponible en ${Platform.OS}.`
104
+ });
105
+ return () => {};
106
+ }
107
+ const tokenSub = emitter.addListener(EVENT_STREAM_TOKEN,
108
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
109
+ event => {
110
+ callbacks.onToken(event.token, event.done);
111
+ });
112
+ const completeSub = emitter.addListener(EVENT_STREAM_COMPLETE, () => {
113
+ callbacks.onComplete();
114
+ });
115
+ const errorSub = emitter.addListener(EVENT_STREAM_ERROR,
116
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
117
+ error => {
118
+ callbacks.onError(error);
119
+ });
120
+
121
+ // Arrancar la inferencia en el lado nativo
122
+ NativeAiCore.generateResponseStream(prompt);
123
+
124
+ // Devuelve función de limpieza para remover los listeners
125
+ return () => {
126
+ tokenSub.remove();
127
+ completeSub.remove();
128
+ errorSub.remove();
129
+ };
130
+ }
131
+
132
+ /**
133
+ * Comprueba si Gemini Nano está disponible en este dispositivo.
134
+ *
135
+ * @returns
136
+ * - `'AVAILABLE'` → El modelo está listo para usarse.
137
+ * - `'NEED_DOWNLOAD'` → El dispositivo es compatible pero el modelo no está descargado.
138
+ * - `'UNSUPPORTED'` → El dispositivo no cumple los requisitos mínimos.
139
+ *
140
+ * @example
141
+ * const status = await checkAvailability();
142
+ * if (status === 'NEED_DOWNLOAD') {
143
+ * // Mostrar UI de descarga del modelo
144
+ * }
145
+ */
146
+ export async function checkAvailability() {
147
+ if (!NativeAiCore) return 'UNSUPPORTED';
148
+ return NativeAiCore.checkAvailability();
149
+ }
150
+
151
+ /**
152
+ * Libera el modelo de la memoria de la NPU.
153
+ * **Recomendado**: llamar en el `useEffect` cleanup del componente raíz.
154
+ *
155
+ * @example
156
+ * useEffect(() => {
157
+ * initialize(MODEL_PATH);
158
+ * return () => { release(); };
159
+ * }, []);
160
+ */
161
+ export async function release() {
162
+ if (!NativeAiCore) return;
163
+ return NativeAiCore.release();
164
+ }
165
+
166
+ /**
167
+ * Limpia el historial de conversación en el motor nativo sin liberar el modelo.
168
+ * El siguiente `generateResponse` comenzará sin contexto previo.
169
+ *
170
+ * @example
171
+ * await resetConversation(); // nueva conversación, mismo motor
172
+ */
173
+ export async function resetConversation() {
174
+ if (!NativeAiCore) return;
175
+ return NativeAiCore.resetConversation();
176
+ }
177
+
178
+ // ── Exportación por defecto (objeto API) ─────────────────────────────────────
179
+
180
+ const AICore = {
181
+ initialize,
182
+ generateResponse,
183
+ generateResponseStream,
184
+ checkAvailability,
185
+ release,
186
+ resetConversation
187
+ };
188
+ export default AICore;
189
+ //# sourceMappingURL=index.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"names":["NativeEventEmitter","Platform","NativeAiCore","EVENT_STREAM_TOKEN","EVENT_STREAM_COMPLETE","EVENT_STREAM_ERROR","emitter","assertAvailable","Error","OS","initialize","modelPath","generateResponse","prompt","generateResponseStream","callbacks","onError","code","message","tokenSub","addListener","event","onToken","token","done","completeSub","onComplete","errorSub","error","remove","checkAvailability","release","resetConversation","AICore"],"sourceRoot":"../../src","sources":["index.tsx"],"mappings":";;AAAA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;;AAEA,SAASA,kBAAkB,EAAEC,QAAQ,QAAQ,cAAc;AAC3D,OAAOC,YAAY,MAAM,mBAAgB;;AAEzC;;AAEA;;AAGA;;AAcA;;AAMA;AACA,MAAMC,kBAAkB,GAAM,oBAAoB;AAClD,MAAMC,qBAAqB,GAAG,uBAAuB;AACrD,MAAMC,kBAAkB,GAAM,oBAAoB;;AAElD;AACA,MAAMC,OAAO,GACXJ,YAAY,IAAI,IAAI,GAAG,IAAIF,kBAAkB,CAACE,YAAY,CAAC,GAAG,IAAI;;AAEpE;;AAEA,SAASK,eAAeA,CAAA,EAAS;EAC/B,IAAI,CAACL,YAAY,EAAE;IACjB,MAAM,IAAIM,KAAK,CACb,wDAAwDP,QAAQ,CAACQ,EAAE,IAAI,GACrE,kDACJ,CAAC;EACH;AACF;;AAEA;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,eAAeC,UAAUA,CAACC,SAAiB,EAAoB;EACpEJ,eAAe,CAAC,CAAC;EACjB,OAAOL,YAAY,CAAEQ,UAAU,CAACC,SAAS,CAAC;AAC5C;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,eAAeC,gBAAgBA,CAACC,MAAc,EAAmB;EACtEN,eAAe,CAAC,CAAC;EACjB,OAAOL,YAAY,CAAEU,gBAAgB,CAACC,MAAM,CAAC;AAC/C;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,SAASC,sBAAsBA,CACpCD,MAAc,EACdE,SAA0B,EACd;EACZ,IAAI,CAACb,YAAY,IAAI,CAACI,OAAO,EAAE;IAC7BS,SAAS,CAACC,OAAO,CAAC;MAChBC,IAAI,EAAE,aAAa;MACnBC,OAAO,EAAE,8CAA8CjB,QAAQ,CAACQ,EAAE;IACpE,CAAC,CAAC;IACF,OAAO,MAAM,CAAC,CAAC;EACjB;EAEA,MAAMU,QAAQ,GAAGb,OAAO,CAACc,WAAW,CAClCjB,kBAAkB;EAClB;EACCkB,KAAU,IAAK;IACdN,SAAS,CAACO,OAAO,CACdD,KAAK,CAAsCE,KAAK,EAChDF,KAAK,CAAsCG,IAC9C,CAAC;EACH,CACF,CAAC;EAED,MAAMC,WAAW,GAAGnB,OAAO,CAACc,WAAW,CAAChB,qBAAqB,EAAE,MAAM;IACnEW,SAAS,CAACW,UAAU,CAAC,CAAC;EACxB,CAAC,CAAC;EAEF,MAAMC,QAAQ,GAAGrB,OAAO,CAACc,WAAW,CAClCf,kBAAkB;EAClB;EACCuB,KAAU,IAAK;IACdb,SAAS,CAACC,OAAO,CAACY,KAAgB,CAAC;EACrC,CACF,CAAC;;EAED;EACA1B,YAAY,CAACY,sBAAsB,CAACD,MAAM,CAAC;;EAE3C;EACA,OAAO,MAAM;IACXM,QAAQ,CAACU,MAAM,CAAC,CAAC;IACjBJ,WAAW,CAACI,MAAM,CAAC,CAAC;IACpBF,QAAQ,CAACE,MAAM,CAAC,CAAC;EACnB,CAAC;AACH;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,eAAeC,iBAAiBA,CAAA,EAAgC;EACrE,IAAI,CAAC5B,YAAY,EAAE,OAAO,aAAa;EACvC,OAAOA,YAAY,CAAC4B,iBAAiB,CAAC,CAAC;AACzC;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,eAAeC,OAAOA,CAAA,EAAkB;EAC7C,IAAI,CAAC7B,YAAY,EAAE;EACnB,OAAOA,YAAY,CAAC6B,OAAO,CAAC,CAAC;AAC/B;;AAEA;AACA;AACA;AACA;AACA;AACA;AACA;AACA,OAAO,eAAeC,iBAAiBA,CAAA,EAAkB;EACvD,IAAI,CAAC9B,YAAY,EAAE;EACnB,OAAOA,YAAY,CAAC8B,iBAAiB,CAAC,CAAC;AACzC;;AAEA;;AAEA,MAAMC,MAAM,GAAG;EACbvB,UAAU;EACVE,gBAAgB;EAChBE,sBAAsB;EACtBgB,iBAAiB;EACjBC,OAAO;EACPC;AACF,CAAC;AAED,eAAeC,MAAM","ignoreList":[]}
@@ -0,0 +1 @@
1
+ {"type":"module"}
@@ -0,0 +1 @@
1
+ {"type":"module"}