react-native-ai-core 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/AiCore.podspec +20 -0
- package/LICENSE +20 -0
- package/README.md +273 -0
- package/android/build.gradle +76 -0
- package/android/src/main/AndroidManifest.xml +2 -0
- package/android/src/main/java/com/aicore/AiCoreModule.kt +328 -0
- package/android/src/main/java/com/aicore/AiCorePackage.kt +31 -0
- package/ios/AiCore.h +12 -0
- package/ios/AiCore.mm +65 -0
- package/lib/module/NativeAiCore.js +11 -0
- package/lib/module/NativeAiCore.js.map +1 -0
- package/lib/module/index.js +189 -0
- package/lib/module/index.js.map +1 -0
- package/lib/module/package.json +1 -0
- package/lib/typescript/package.json +1 -0
- package/lib/typescript/src/NativeAiCore.d.ts +46 -0
- package/lib/typescript/src/NativeAiCore.d.ts.map +1 -0
- package/lib/typescript/src/index.d.ts +123 -0
- package/lib/typescript/src/index.d.ts.map +1 -0
- package/package.json +164 -0
- package/src/NativeAiCore.ts +53 -0
- package/src/index.tsx +221 -0
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NativeAICore — TurboModule Spec (New Architecture)
|
|
3
|
+
*
|
|
4
|
+
* Puente JSI de alto rendimiento hacia el SDK de Google AI Edge (MediaPipe)
|
|
5
|
+
* para ejecutar Gemini Nano en local mediante la NPU del dispositivo.
|
|
6
|
+
*/
|
|
7
|
+
import { type TurboModule } from 'react-native';
|
|
8
|
+
export interface Spec extends TurboModule {
|
|
9
|
+
/**
|
|
10
|
+
* Inicializa el motor de inferencia LLM con el modelo en `modelPath`.
|
|
11
|
+
* Devuelve `true` si la inicialización fue exitosa.
|
|
12
|
+
*/
|
|
13
|
+
initialize(modelPath: string): Promise<boolean>;
|
|
14
|
+
/**
|
|
15
|
+
* Genera una respuesta completa para el prompt dado (no streaming).
|
|
16
|
+
*/
|
|
17
|
+
generateResponse(prompt: string): Promise<string>;
|
|
18
|
+
/**
|
|
19
|
+
* Inicia la generación en modo streaming.
|
|
20
|
+
* Los tokens se emiten a través de eventos NativeEventEmitter:
|
|
21
|
+
* - 'AICore_streamToken' → { token: string, done: boolean }
|
|
22
|
+
* - 'AICore_streamComplete' → {}
|
|
23
|
+
* - 'AICore_streamError' → { code: string, message: string }
|
|
24
|
+
*/
|
|
25
|
+
generateResponseStream(prompt: string): void;
|
|
26
|
+
/**
|
|
27
|
+
* Comprueba la disponibilidad de Gemini Nano en el dispositivo.
|
|
28
|
+
* Retorna: 'AVAILABLE' | 'NEED_DOWNLOAD' | 'UNSUPPORTED'
|
|
29
|
+
*/
|
|
30
|
+
checkAvailability(): Promise<string>;
|
|
31
|
+
/**
|
|
32
|
+
* Libera el modelo de la memoria de la NPU.
|
|
33
|
+
* Debe llamarse cuando el componente se desmonte para evitar fugas.
|
|
34
|
+
*/
|
|
35
|
+
release(): Promise<void>;
|
|
36
|
+
/**
|
|
37
|
+
* Limpia el historial de conversación sin liberar el modelo.
|
|
38
|
+
* El próximo mensaje comenzará una conversación nueva.
|
|
39
|
+
*/
|
|
40
|
+
resetConversation(): Promise<void>;
|
|
41
|
+
addListener(eventName: string): void;
|
|
42
|
+
removeListeners(count: number): void;
|
|
43
|
+
}
|
|
44
|
+
declare const _default: Spec;
|
|
45
|
+
export default _default;
|
|
46
|
+
//# sourceMappingURL=NativeAiCore.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"NativeAiCore.d.ts","sourceRoot":"","sources":["../../../src/NativeAiCore.ts"],"names":[],"mappings":"AAAA;;;;;GAKG;AACH,OAAO,EAAuB,KAAK,WAAW,EAAE,MAAM,cAAc,CAAC;AAErE,MAAM,WAAW,IAAK,SAAQ,WAAW;IACvC;;;OAGG;IACH,UAAU,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC,CAAC;IAEhD;;OAEG;IACH,gBAAgB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC;IAElD;;;;;;OAMG;IACH,sBAAsB,CAAC,MAAM,EAAE,MAAM,GAAG,IAAI,CAAC;IAE7C;;;OAGG;IACH,iBAAiB,IAAI,OAAO,CAAC,MAAM,CAAC,CAAC;IAErC;;;OAGG;IACH,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;IAEzB;;;OAGG;IACH,iBAAiB,IAAI,OAAO,CAAC,IAAI,CAAC,CAAC;IAGnC,WAAW,CAAC,SAAS,EAAE,MAAM,GAAG,IAAI,CAAC;IACrC,eAAe,CAAC,KAAK,EAAE,MAAM,GAAG,IAAI,CAAC;CACtC;;AAED,wBAAgE"}
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* react-native-ai-core
|
|
3
|
+
*
|
|
4
|
+
* Capa de abstracción JS sobre el TurboModule nativo.
|
|
5
|
+
* Proporciona una API limpia y tipada para usar Gemini Nano
|
|
6
|
+
* en local a través del SDK de Google AI Edge (MediaPipe).
|
|
7
|
+
*
|
|
8
|
+
* @example
|
|
9
|
+
* import AICore from 'react-native-ai-core';
|
|
10
|
+
*
|
|
11
|
+
* await AICore.initialize('/data/local/tmp/gemini-nano.bin');
|
|
12
|
+
* const answer = await AICore.generateResponse('¿Qué es JSI?');
|
|
13
|
+
*/
|
|
14
|
+
/** Estado de disponibilidad de Gemini Nano en el dispositivo */
|
|
15
|
+
export type AvailabilityStatus = 'AVAILABLE' | 'AVAILABLE_NPU' | 'NEED_DOWNLOAD' | 'UNSUPPORTED';
|
|
16
|
+
/** Callbacks del streaming de respuesta */
|
|
17
|
+
export interface StreamCallbacks {
|
|
18
|
+
/**
|
|
19
|
+
* Invocado por cada token recibido.
|
|
20
|
+
* @param token Fragmento de texto parcial.
|
|
21
|
+
* @param done `true` cuando el modelo ha terminado de generar.
|
|
22
|
+
*/
|
|
23
|
+
onToken: (token: string, done: boolean) => void;
|
|
24
|
+
/** Invocado cuando la generación completa ha finalizado. */
|
|
25
|
+
onComplete: () => void;
|
|
26
|
+
/** Invocado si ocurre un error durante el streaming. */
|
|
27
|
+
onError: (error: AIError) => void;
|
|
28
|
+
}
|
|
29
|
+
/** Estructura de error normalizada */
|
|
30
|
+
export interface AIError {
|
|
31
|
+
code: string;
|
|
32
|
+
message: string;
|
|
33
|
+
}
|
|
34
|
+
/**
|
|
35
|
+
* Inicializa el motor de inferencia LLM con el modelo indicado.
|
|
36
|
+
*
|
|
37
|
+
* @param modelPath Ruta absoluta al archivo `.bin` del modelo en el dispositivo.
|
|
38
|
+
* @returns `true` si la inicialización fue correcta.
|
|
39
|
+
*
|
|
40
|
+
* @throws `MODEL_NOT_FOUND` si el archivo no existe en `modelPath`.
|
|
41
|
+
* @throws `NPU_UNSUPPORTED` si la NPU del dispositivo no es compatible.
|
|
42
|
+
* @throws `INIT_FAILED` si el motor no pudo arrancar por otro motivo.
|
|
43
|
+
*
|
|
44
|
+
* @example
|
|
45
|
+
* const ok = await initialize('/data/local/tmp/gemini-nano.bin');
|
|
46
|
+
*/
|
|
47
|
+
export declare function initialize(modelPath: string): Promise<boolean>;
|
|
48
|
+
/**
|
|
49
|
+
* Genera una respuesta completa (no streaming) para el prompt dado.
|
|
50
|
+
*
|
|
51
|
+
* @param prompt Texto de entrada para el modelo.
|
|
52
|
+
* @returns Respuesta completa como string.
|
|
53
|
+
*
|
|
54
|
+
* @throws `NOT_INITIALIZED` si `initialize()` no fue llamado antes.
|
|
55
|
+
* @throws `GENERATION_ERROR` si el modelo falla durante la inferencia.
|
|
56
|
+
*
|
|
57
|
+
* @example
|
|
58
|
+
* const response = await generateResponse('Explícame los TurboModules');
|
|
59
|
+
*/
|
|
60
|
+
export declare function generateResponse(prompt: string): Promise<string>;
|
|
61
|
+
/**
|
|
62
|
+
* Genera una respuesta token a token mediante streaming.
|
|
63
|
+
* Los tokens se entregan en tiempo real a través de los callbacks.
|
|
64
|
+
*
|
|
65
|
+
* @param prompt Texto de entrada para el modelo.
|
|
66
|
+
* @param callbacks `{ onToken, onComplete, onError }`.
|
|
67
|
+
* @returns Función de limpieza — llámala para cancelar las suscripciones.
|
|
68
|
+
*
|
|
69
|
+
* @example
|
|
70
|
+
* const unsubscribe = generateResponseStream('¿Qué es MediaPipe?', {
|
|
71
|
+
* onToken: (token, done) => console.log(token),
|
|
72
|
+
* onComplete: () => console.log('¡Listo!'),
|
|
73
|
+
* onError: (err) => console.error(err),
|
|
74
|
+
* });
|
|
75
|
+
*
|
|
76
|
+
* // Al desmontar el componente:
|
|
77
|
+
* unsubscribe();
|
|
78
|
+
*/
|
|
79
|
+
export declare function generateResponseStream(prompt: string, callbacks: StreamCallbacks): () => void;
|
|
80
|
+
/**
|
|
81
|
+
* Comprueba si Gemini Nano está disponible en este dispositivo.
|
|
82
|
+
*
|
|
83
|
+
* @returns
|
|
84
|
+
* - `'AVAILABLE'` → El modelo está listo para usarse.
|
|
85
|
+
* - `'NEED_DOWNLOAD'` → El dispositivo es compatible pero el modelo no está descargado.
|
|
86
|
+
* - `'UNSUPPORTED'` → El dispositivo no cumple los requisitos mínimos.
|
|
87
|
+
*
|
|
88
|
+
* @example
|
|
89
|
+
* const status = await checkAvailability();
|
|
90
|
+
* if (status === 'NEED_DOWNLOAD') {
|
|
91
|
+
* // Mostrar UI de descarga del modelo
|
|
92
|
+
* }
|
|
93
|
+
*/
|
|
94
|
+
export declare function checkAvailability(): Promise<AvailabilityStatus>;
|
|
95
|
+
/**
|
|
96
|
+
* Libera el modelo de la memoria de la NPU.
|
|
97
|
+
* **Recomendado**: llamar en el `useEffect` cleanup del componente raíz.
|
|
98
|
+
*
|
|
99
|
+
* @example
|
|
100
|
+
* useEffect(() => {
|
|
101
|
+
* initialize(MODEL_PATH);
|
|
102
|
+
* return () => { release(); };
|
|
103
|
+
* }, []);
|
|
104
|
+
*/
|
|
105
|
+
export declare function release(): Promise<void>;
|
|
106
|
+
/**
|
|
107
|
+
* Limpia el historial de conversación en el motor nativo sin liberar el modelo.
|
|
108
|
+
* El siguiente `generateResponse` comenzará sin contexto previo.
|
|
109
|
+
*
|
|
110
|
+
* @example
|
|
111
|
+
* await resetConversation(); // nueva conversación, mismo motor
|
|
112
|
+
*/
|
|
113
|
+
export declare function resetConversation(): Promise<void>;
|
|
114
|
+
declare const AICore: {
|
|
115
|
+
initialize: typeof initialize;
|
|
116
|
+
generateResponse: typeof generateResponse;
|
|
117
|
+
generateResponseStream: typeof generateResponseStream;
|
|
118
|
+
checkAvailability: typeof checkAvailability;
|
|
119
|
+
release: typeof release;
|
|
120
|
+
resetConversation: typeof resetConversation;
|
|
121
|
+
};
|
|
122
|
+
export default AICore;
|
|
123
|
+
//# sourceMappingURL=index.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/index.tsx"],"names":[],"mappings":"AAAA;;;;;;;;;;;;GAYG;AAOH,gEAAgE;AAChE,MAAM,MAAM,kBAAkB,GAAG,WAAW,GAAG,eAAe,GAAG,eAAe,GAAG,aAAa,CAAC;AAEjG,2CAA2C;AAC3C,MAAM,WAAW,eAAe;IAC9B;;;;OAIG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,MAAM,EAAE,IAAI,EAAE,OAAO,KAAK,IAAI,CAAC;IAChD,4DAA4D;IAC5D,UAAU,EAAE,MAAM,IAAI,CAAC;IACvB,wDAAwD;IACxD,OAAO,EAAE,CAAC,KAAK,EAAE,OAAO,KAAK,IAAI,CAAC;CACnC;AAED,sCAAsC;AACtC,MAAM,WAAW,OAAO;IACtB,IAAI,EAAE,MAAM,CAAC;IACb,OAAO,EAAE,MAAM,CAAC;CACjB;AAwBD;;;;;;;;;;;;GAYG;AACH,wBAAsB,UAAU,CAAC,SAAS,EAAE,MAAM,GAAG,OAAO,CAAC,OAAO,CAAC,CAGpE;AAED;;;;;;;;;;;GAWG;AACH,wBAAsB,gBAAgB,CAAC,MAAM,EAAE,MAAM,GAAG,OAAO,CAAC,MAAM,CAAC,CAGtE;AAED;;;;;;;;;;;;;;;;;GAiBG;AACH,wBAAgB,sBAAsB,CACpC,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,eAAe,GACzB,MAAM,IAAI,CAyCZ;AAED;;;;;;;;;;;;;GAaG;AACH,wBAAsB,iBAAiB,IAAI,OAAO,CAAC,kBAAkB,CAAC,CAGrE;AAED;;;;;;;;;GASG;AACH,wBAAsB,OAAO,IAAI,OAAO,CAAC,IAAI,CAAC,CAG7C;AAED;;;;;;GAMG;AACH,wBAAsB,iBAAiB,IAAI,OAAO,CAAC,IAAI,CAAC,CAGvD;AAID,QAAA,MAAM,MAAM;;;;;;;CAOX,CAAC;AAEF,eAAe,MAAM,CAAC"}
|
package/package.json
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "react-native-ai-core",
|
|
3
|
+
"version": "0.1.0",
|
|
4
|
+
"description": "High-performance AI bridge for React Native using Google AI Edge MediaPipe and Gemini Nano",
|
|
5
|
+
"main": "./lib/module/index.js",
|
|
6
|
+
"types": "./lib/typescript/src/index.d.ts",
|
|
7
|
+
"exports": {
|
|
8
|
+
".": {
|
|
9
|
+
"source": "./src/index.tsx",
|
|
10
|
+
"types": "./lib/typescript/src/index.d.ts",
|
|
11
|
+
"default": "./lib/module/index.js"
|
|
12
|
+
},
|
|
13
|
+
"./package.json": "./package.json"
|
|
14
|
+
},
|
|
15
|
+
"files": [
|
|
16
|
+
"src",
|
|
17
|
+
"lib",
|
|
18
|
+
"android",
|
|
19
|
+
"ios",
|
|
20
|
+
"cpp",
|
|
21
|
+
"*.podspec",
|
|
22
|
+
"react-native.config.js",
|
|
23
|
+
"!ios/build",
|
|
24
|
+
"!android/build",
|
|
25
|
+
"!android/gradle",
|
|
26
|
+
"!android/gradlew",
|
|
27
|
+
"!android/gradlew.bat",
|
|
28
|
+
"!android/local.properties",
|
|
29
|
+
"!**/__tests__",
|
|
30
|
+
"!**/__fixtures__",
|
|
31
|
+
"!**/__mocks__",
|
|
32
|
+
"!**/.*"
|
|
33
|
+
],
|
|
34
|
+
"scripts": {
|
|
35
|
+
"example": "yarn workspace react-native-ai-core-example",
|
|
36
|
+
"clean": "del-cli lib",
|
|
37
|
+
"prepare": "bob build",
|
|
38
|
+
"typecheck": "tsc",
|
|
39
|
+
"release": "release-it --only-version",
|
|
40
|
+
"lint": "eslint \"**/*.{js,ts,tsx}\""
|
|
41
|
+
},
|
|
42
|
+
"keywords": [
|
|
43
|
+
"react-native",
|
|
44
|
+
"android",
|
|
45
|
+
"ai",
|
|
46
|
+
"llm",
|
|
47
|
+
"gemini",
|
|
48
|
+
"gemini-nano",
|
|
49
|
+
"on-device",
|
|
50
|
+
"npu",
|
|
51
|
+
"mediapipe",
|
|
52
|
+
"ml-kit",
|
|
53
|
+
"aicore",
|
|
54
|
+
"inference",
|
|
55
|
+
"turbo-module",
|
|
56
|
+
"new-architecture",
|
|
57
|
+
"streaming",
|
|
58
|
+
"local-llm",
|
|
59
|
+
"privacy"
|
|
60
|
+
],
|
|
61
|
+
"repository": {
|
|
62
|
+
"type": "git",
|
|
63
|
+
"url": "git+https://github.com/albertofernandezroda/react-native-ai-core.git"
|
|
64
|
+
},
|
|
65
|
+
"author": "Alberto Fernandez <dev@example.com> (https://github.com/albertofernandezroda)",
|
|
66
|
+
"license": "MIT",
|
|
67
|
+
"bugs": {
|
|
68
|
+
"url": "https://github.com/albertofernandezroda/react-native-ai-core/issues"
|
|
69
|
+
},
|
|
70
|
+
"homepage": "https://github.com/albertofernandezroda/react-native-ai-core#readme",
|
|
71
|
+
"publishConfig": {
|
|
72
|
+
"registry": "https://registry.npmjs.org/"
|
|
73
|
+
},
|
|
74
|
+
"devDependencies": {
|
|
75
|
+
"@eslint/compat": "^2.0.3",
|
|
76
|
+
"@eslint/eslintrc": "^3.3.5",
|
|
77
|
+
"@eslint/js": "^10.0.1",
|
|
78
|
+
"@react-native/babel-preset": "0.83.0",
|
|
79
|
+
"@react-native/eslint-config": "0.83.0",
|
|
80
|
+
"@release-it/conventional-changelog": "^10.0.6",
|
|
81
|
+
"@types/react": "^19.2.0",
|
|
82
|
+
"del-cli": "^7.0.0",
|
|
83
|
+
"eslint": "^9.39.4",
|
|
84
|
+
"eslint-config-prettier": "^10.1.8",
|
|
85
|
+
"eslint-plugin-ft-flow": "^3.0.11",
|
|
86
|
+
"eslint-plugin-prettier": "^5.5.5",
|
|
87
|
+
"prettier": "^3.8.1",
|
|
88
|
+
"react": "19.1.0",
|
|
89
|
+
"react-native": "0.81.5",
|
|
90
|
+
"react-native-builder-bob": "^0.41.0",
|
|
91
|
+
"release-it": "^19.2.4",
|
|
92
|
+
"turbo": "^2.8.21",
|
|
93
|
+
"typescript": "^6.0.2"
|
|
94
|
+
},
|
|
95
|
+
"peerDependencies": {
|
|
96
|
+
"react": "*",
|
|
97
|
+
"react-native": "*"
|
|
98
|
+
},
|
|
99
|
+
"workspaces": [
|
|
100
|
+
"example"
|
|
101
|
+
],
|
|
102
|
+
"packageManager": "yarn@4.11.0",
|
|
103
|
+
"react-native-builder-bob": {
|
|
104
|
+
"source": "src",
|
|
105
|
+
"output": "lib",
|
|
106
|
+
"targets": [
|
|
107
|
+
[
|
|
108
|
+
"module",
|
|
109
|
+
{
|
|
110
|
+
"esm": true
|
|
111
|
+
}
|
|
112
|
+
],
|
|
113
|
+
[
|
|
114
|
+
"typescript",
|
|
115
|
+
{
|
|
116
|
+
"project": "tsconfig.build.json"
|
|
117
|
+
}
|
|
118
|
+
]
|
|
119
|
+
]
|
|
120
|
+
},
|
|
121
|
+
"codegenConfig": {
|
|
122
|
+
"name": "AiCoreSpec",
|
|
123
|
+
"type": "modules",
|
|
124
|
+
"jsSrcsDir": "src",
|
|
125
|
+
"android": {
|
|
126
|
+
"javaPackageName": "com.aicore"
|
|
127
|
+
}
|
|
128
|
+
},
|
|
129
|
+
"release-it": {
|
|
130
|
+
"git": {
|
|
131
|
+
"commitMessage": "chore: release ${version}",
|
|
132
|
+
"tagName": "v${version}"
|
|
133
|
+
},
|
|
134
|
+
"npm": {
|
|
135
|
+
"publish": true
|
|
136
|
+
},
|
|
137
|
+
"github": {
|
|
138
|
+
"release": true
|
|
139
|
+
},
|
|
140
|
+
"plugins": {
|
|
141
|
+
"@release-it/conventional-changelog": {
|
|
142
|
+
"preset": {
|
|
143
|
+
"name": "angular"
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
}
|
|
147
|
+
},
|
|
148
|
+
"prettier": {
|
|
149
|
+
"quoteProps": "consistent",
|
|
150
|
+
"singleQuote": true,
|
|
151
|
+
"tabWidth": 2,
|
|
152
|
+
"trailingComma": "es5",
|
|
153
|
+
"useTabs": false
|
|
154
|
+
},
|
|
155
|
+
"create-react-native-library": {
|
|
156
|
+
"type": "turbo-module",
|
|
157
|
+
"languages": "kotlin-objc",
|
|
158
|
+
"tools": [
|
|
159
|
+
"release-it",
|
|
160
|
+
"eslint"
|
|
161
|
+
],
|
|
162
|
+
"version": "0.60.0"
|
|
163
|
+
}
|
|
164
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* NativeAICore — TurboModule Spec (New Architecture)
|
|
3
|
+
*
|
|
4
|
+
* Puente JSI de alto rendimiento hacia el SDK de Google AI Edge (MediaPipe)
|
|
5
|
+
* para ejecutar Gemini Nano en local mediante la NPU del dispositivo.
|
|
6
|
+
*/
|
|
7
|
+
import { TurboModuleRegistry, type TurboModule } from 'react-native';
|
|
8
|
+
|
|
9
|
+
export interface Spec extends TurboModule {
|
|
10
|
+
/**
|
|
11
|
+
* Inicializa el motor de inferencia LLM con el modelo en `modelPath`.
|
|
12
|
+
* Devuelve `true` si la inicialización fue exitosa.
|
|
13
|
+
*/
|
|
14
|
+
initialize(modelPath: string): Promise<boolean>;
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Genera una respuesta completa para el prompt dado (no streaming).
|
|
18
|
+
*/
|
|
19
|
+
generateResponse(prompt: string): Promise<string>;
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Inicia la generación en modo streaming.
|
|
23
|
+
* Los tokens se emiten a través de eventos NativeEventEmitter:
|
|
24
|
+
* - 'AICore_streamToken' → { token: string, done: boolean }
|
|
25
|
+
* - 'AICore_streamComplete' → {}
|
|
26
|
+
* - 'AICore_streamError' → { code: string, message: string }
|
|
27
|
+
*/
|
|
28
|
+
generateResponseStream(prompt: string): void;
|
|
29
|
+
|
|
30
|
+
/**
|
|
31
|
+
* Comprueba la disponibilidad de Gemini Nano en el dispositivo.
|
|
32
|
+
* Retorna: 'AVAILABLE' | 'NEED_DOWNLOAD' | 'UNSUPPORTED'
|
|
33
|
+
*/
|
|
34
|
+
checkAvailability(): Promise<string>;
|
|
35
|
+
|
|
36
|
+
/**
|
|
37
|
+
* Libera el modelo de la memoria de la NPU.
|
|
38
|
+
* Debe llamarse cuando el componente se desmonte para evitar fugas.
|
|
39
|
+
*/
|
|
40
|
+
release(): Promise<void>;
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Limpia el historial de conversación sin liberar el modelo.
|
|
44
|
+
* El próximo mensaje comenzará una conversación nueva.
|
|
45
|
+
*/
|
|
46
|
+
resetConversation(): Promise<void>;
|
|
47
|
+
|
|
48
|
+
// Requeridos por NativeEventEmitter
|
|
49
|
+
addListener(eventName: string): void;
|
|
50
|
+
removeListeners(count: number): void;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
export default TurboModuleRegistry.getEnforcing<Spec>('AiCore');
|
package/src/index.tsx
ADDED
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* react-native-ai-core
|
|
3
|
+
*
|
|
4
|
+
* Capa de abstracción JS sobre el TurboModule nativo.
|
|
5
|
+
* Proporciona una API limpia y tipada para usar Gemini Nano
|
|
6
|
+
* en local a través del SDK de Google AI Edge (MediaPipe).
|
|
7
|
+
*
|
|
8
|
+
* @example
|
|
9
|
+
* import AICore from 'react-native-ai-core';
|
|
10
|
+
*
|
|
11
|
+
* await AICore.initialize('/data/local/tmp/gemini-nano.bin');
|
|
12
|
+
* const answer = await AICore.generateResponse('¿Qué es JSI?');
|
|
13
|
+
*/
|
|
14
|
+
|
|
15
|
+
import { NativeEventEmitter, Platform } from 'react-native';
|
|
16
|
+
import NativeAiCore from './NativeAiCore';
|
|
17
|
+
|
|
18
|
+
// ── Tipos públicos ────────────────────────────────────────────────────────────
|
|
19
|
+
|
|
20
|
+
/** Estado de disponibilidad de Gemini Nano en el dispositivo */
|
|
21
|
+
export type AvailabilityStatus = 'AVAILABLE' | 'AVAILABLE_NPU' | 'NEED_DOWNLOAD' | 'UNSUPPORTED';
|
|
22
|
+
|
|
23
|
+
/** Callbacks del streaming de respuesta */
|
|
24
|
+
export interface StreamCallbacks {
|
|
25
|
+
/**
|
|
26
|
+
* Invocado por cada token recibido.
|
|
27
|
+
* @param token Fragmento de texto parcial.
|
|
28
|
+
* @param done `true` cuando el modelo ha terminado de generar.
|
|
29
|
+
*/
|
|
30
|
+
onToken: (token: string, done: boolean) => void;
|
|
31
|
+
/** Invocado cuando la generación completa ha finalizado. */
|
|
32
|
+
onComplete: () => void;
|
|
33
|
+
/** Invocado si ocurre un error durante el streaming. */
|
|
34
|
+
onError: (error: AIError) => void;
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
/** Estructura de error normalizada */
|
|
38
|
+
export interface AIError {
|
|
39
|
+
code: string;
|
|
40
|
+
message: string;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
// ── Nombres de eventos (deben coincidir con los del módulo Kotlin) ─────────────
|
|
44
|
+
const EVENT_STREAM_TOKEN = 'AICore_streamToken';
|
|
45
|
+
const EVENT_STREAM_COMPLETE = 'AICore_streamComplete';
|
|
46
|
+
const EVENT_STREAM_ERROR = 'AICore_streamError';
|
|
47
|
+
|
|
48
|
+
// Instancia del emisor de eventos nativo (null en plataformas no soportadas)
|
|
49
|
+
const emitter =
|
|
50
|
+
NativeAiCore != null ? new NativeEventEmitter(NativeAiCore) : null;
|
|
51
|
+
|
|
52
|
+
// ── Guarda de plataforma ──────────────────────────────────────────────────────
|
|
53
|
+
|
|
54
|
+
function assertAvailable(): void {
|
|
55
|
+
if (!NativeAiCore) {
|
|
56
|
+
throw new Error(
|
|
57
|
+
`react-native-ai-core: módulo nativo no disponible en ${Platform.OS}. ` +
|
|
58
|
+
'Este módulo requiere Android con soporte de NPU.'
|
|
59
|
+
);
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// ── API pública ───────────────────────────────────────────────────────────────
|
|
64
|
+
|
|
65
|
+
/**
|
|
66
|
+
* Inicializa el motor de inferencia LLM con el modelo indicado.
|
|
67
|
+
*
|
|
68
|
+
* @param modelPath Ruta absoluta al archivo `.bin` del modelo en el dispositivo.
|
|
69
|
+
* @returns `true` si la inicialización fue correcta.
|
|
70
|
+
*
|
|
71
|
+
* @throws `MODEL_NOT_FOUND` si el archivo no existe en `modelPath`.
|
|
72
|
+
* @throws `NPU_UNSUPPORTED` si la NPU del dispositivo no es compatible.
|
|
73
|
+
* @throws `INIT_FAILED` si el motor no pudo arrancar por otro motivo.
|
|
74
|
+
*
|
|
75
|
+
* @example
|
|
76
|
+
* const ok = await initialize('/data/local/tmp/gemini-nano.bin');
|
|
77
|
+
*/
|
|
78
|
+
export async function initialize(modelPath: string): Promise<boolean> {
|
|
79
|
+
assertAvailable();
|
|
80
|
+
return NativeAiCore!.initialize(modelPath);
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
/**
|
|
84
|
+
* Genera una respuesta completa (no streaming) para el prompt dado.
|
|
85
|
+
*
|
|
86
|
+
* @param prompt Texto de entrada para el modelo.
|
|
87
|
+
* @returns Respuesta completa como string.
|
|
88
|
+
*
|
|
89
|
+
* @throws `NOT_INITIALIZED` si `initialize()` no fue llamado antes.
|
|
90
|
+
* @throws `GENERATION_ERROR` si el modelo falla durante la inferencia.
|
|
91
|
+
*
|
|
92
|
+
* @example
|
|
93
|
+
* const response = await generateResponse('Explícame los TurboModules');
|
|
94
|
+
*/
|
|
95
|
+
export async function generateResponse(prompt: string): Promise<string> {
|
|
96
|
+
assertAvailable();
|
|
97
|
+
return NativeAiCore!.generateResponse(prompt);
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Genera una respuesta token a token mediante streaming.
|
|
102
|
+
* Los tokens se entregan en tiempo real a través de los callbacks.
|
|
103
|
+
*
|
|
104
|
+
* @param prompt Texto de entrada para el modelo.
|
|
105
|
+
* @param callbacks `{ onToken, onComplete, onError }`.
|
|
106
|
+
* @returns Función de limpieza — llámala para cancelar las suscripciones.
|
|
107
|
+
*
|
|
108
|
+
* @example
|
|
109
|
+
* const unsubscribe = generateResponseStream('¿Qué es MediaPipe?', {
|
|
110
|
+
* onToken: (token, done) => console.log(token),
|
|
111
|
+
* onComplete: () => console.log('¡Listo!'),
|
|
112
|
+
* onError: (err) => console.error(err),
|
|
113
|
+
* });
|
|
114
|
+
*
|
|
115
|
+
* // Al desmontar el componente:
|
|
116
|
+
* unsubscribe();
|
|
117
|
+
*/
|
|
118
|
+
export function generateResponseStream(
|
|
119
|
+
prompt: string,
|
|
120
|
+
callbacks: StreamCallbacks
|
|
121
|
+
): () => void {
|
|
122
|
+
if (!NativeAiCore || !emitter) {
|
|
123
|
+
callbacks.onError({
|
|
124
|
+
code: 'UNAVAILABLE',
|
|
125
|
+
message: `react-native-ai-core no está disponible en ${Platform.OS}.`,
|
|
126
|
+
});
|
|
127
|
+
return () => {};
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
const tokenSub = emitter.addListener(
|
|
131
|
+
EVENT_STREAM_TOKEN,
|
|
132
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
133
|
+
(event: any) => {
|
|
134
|
+
callbacks.onToken(
|
|
135
|
+
(event as { token: string; done: boolean }).token,
|
|
136
|
+
(event as { token: string; done: boolean }).done
|
|
137
|
+
);
|
|
138
|
+
}
|
|
139
|
+
);
|
|
140
|
+
|
|
141
|
+
const completeSub = emitter.addListener(EVENT_STREAM_COMPLETE, () => {
|
|
142
|
+
callbacks.onComplete();
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
const errorSub = emitter.addListener(
|
|
146
|
+
EVENT_STREAM_ERROR,
|
|
147
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
148
|
+
(error: any) => {
|
|
149
|
+
callbacks.onError(error as AIError);
|
|
150
|
+
}
|
|
151
|
+
);
|
|
152
|
+
|
|
153
|
+
// Arrancar la inferencia en el lado nativo
|
|
154
|
+
NativeAiCore.generateResponseStream(prompt);
|
|
155
|
+
|
|
156
|
+
// Devuelve función de limpieza para remover los listeners
|
|
157
|
+
return () => {
|
|
158
|
+
tokenSub.remove();
|
|
159
|
+
completeSub.remove();
|
|
160
|
+
errorSub.remove();
|
|
161
|
+
};
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
/**
|
|
165
|
+
* Comprueba si Gemini Nano está disponible en este dispositivo.
|
|
166
|
+
*
|
|
167
|
+
* @returns
|
|
168
|
+
* - `'AVAILABLE'` → El modelo está listo para usarse.
|
|
169
|
+
* - `'NEED_DOWNLOAD'` → El dispositivo es compatible pero el modelo no está descargado.
|
|
170
|
+
* - `'UNSUPPORTED'` → El dispositivo no cumple los requisitos mínimos.
|
|
171
|
+
*
|
|
172
|
+
* @example
|
|
173
|
+
* const status = await checkAvailability();
|
|
174
|
+
* if (status === 'NEED_DOWNLOAD') {
|
|
175
|
+
* // Mostrar UI de descarga del modelo
|
|
176
|
+
* }
|
|
177
|
+
*/
|
|
178
|
+
export async function checkAvailability(): Promise<AvailabilityStatus> {
|
|
179
|
+
if (!NativeAiCore) return 'UNSUPPORTED';
|
|
180
|
+
return NativeAiCore.checkAvailability() as Promise<AvailabilityStatus>;
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
/**
|
|
184
|
+
* Libera el modelo de la memoria de la NPU.
|
|
185
|
+
* **Recomendado**: llamar en el `useEffect` cleanup del componente raíz.
|
|
186
|
+
*
|
|
187
|
+
* @example
|
|
188
|
+
* useEffect(() => {
|
|
189
|
+
* initialize(MODEL_PATH);
|
|
190
|
+
* return () => { release(); };
|
|
191
|
+
* }, []);
|
|
192
|
+
*/
|
|
193
|
+
export async function release(): Promise<void> {
|
|
194
|
+
if (!NativeAiCore) return;
|
|
195
|
+
return NativeAiCore.release();
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Limpia el historial de conversación en el motor nativo sin liberar el modelo.
|
|
200
|
+
* El siguiente `generateResponse` comenzará sin contexto previo.
|
|
201
|
+
*
|
|
202
|
+
* @example
|
|
203
|
+
* await resetConversation(); // nueva conversación, mismo motor
|
|
204
|
+
*/
|
|
205
|
+
export async function resetConversation(): Promise<void> {
|
|
206
|
+
if (!NativeAiCore) return;
|
|
207
|
+
return NativeAiCore.resetConversation();
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
// ── Exportación por defecto (objeto API) ─────────────────────────────────────
|
|
211
|
+
|
|
212
|
+
const AICore = {
|
|
213
|
+
initialize,
|
|
214
|
+
generateResponse,
|
|
215
|
+
generateResponseStream,
|
|
216
|
+
checkAvailability,
|
|
217
|
+
release,
|
|
218
|
+
resetConversation,
|
|
219
|
+
};
|
|
220
|
+
|
|
221
|
+
export default AICore;
|