react-native-gemma-agent 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,155 @@
1
+ import React, {
2
+ useRef,
3
+ useCallback,
4
+ useImperativeHandle,
5
+ forwardRef,
6
+ useState,
7
+ } from 'react';
8
+ import { View, StyleSheet } from 'react-native';
9
+ import { WebView, type WebViewMessageEvent } from 'react-native-webview';
10
+ import type { SkillResult } from './types';
11
+
12
+ export type SkillSandboxHandle = {
13
+ execute: (
14
+ html: string,
15
+ params: Record<string, unknown>,
16
+ timeout?: number,
17
+ ) => Promise<SkillResult>;
18
+ };
19
+
20
+ type PendingExecution = {
21
+ resolve: (result: SkillResult) => void;
22
+ reject: (error: Error) => void;
23
+ timer: ReturnType<typeof setTimeout>;
24
+ };
25
+
26
+ export const SkillSandbox = forwardRef<SkillSandboxHandle>(
27
+ function SkillSandbox(_props, ref) {
28
+ const pendingRef = useRef<PendingExecution | null>(null);
29
+ const [source, setSource] = useState<{ html: string } | undefined>(
30
+ undefined,
31
+ );
32
+
33
+ const handleMessage = useCallback((event: WebViewMessageEvent) => {
34
+ const pending = pendingRef.current;
35
+ if (!pending) return;
36
+
37
+ pendingRef.current = null;
38
+ clearTimeout(pending.timer);
39
+
40
+ try {
41
+ const data = JSON.parse(event.nativeEvent.data);
42
+ if (data.type === 'skill_error') {
43
+ pending.resolve({ error: data.error });
44
+ } else if (data.type === 'skill_result') {
45
+ pending.resolve(data.data);
46
+ } else {
47
+ pending.resolve({ error: 'Unknown response type from skill' });
48
+ }
49
+ } catch {
50
+ pending.resolve({ error: 'Failed to parse skill response' });
51
+ }
52
+ }, []);
53
+
54
+ useImperativeHandle(
55
+ ref,
56
+ () => ({
57
+ execute: (
58
+ html: string,
59
+ params: Record<string, unknown>,
60
+ timeout = 30_000,
61
+ ): Promise<SkillResult> => {
62
+ // Cancel any pending execution
63
+ if (pendingRef.current) {
64
+ clearTimeout(pendingRef.current.timer);
65
+ pendingRef.current.reject(
66
+ new Error('Execution cancelled — new skill started'),
67
+ );
68
+ pendingRef.current = null;
69
+ }
70
+
71
+ return new Promise<SkillResult>((resolve, reject) => {
72
+ const timer = setTimeout(() => {
73
+ pendingRef.current = null;
74
+ resolve({ error: `Skill timed out after ${timeout}ms` });
75
+ }, timeout);
76
+
77
+ pendingRef.current = { resolve, reject, timer };
78
+
79
+ const injectedHtml = injectExecutionScript(html, params);
80
+ setSource({ html: injectedHtml });
81
+ });
82
+ },
83
+ }),
84
+ [],
85
+ );
86
+
87
+ return (
88
+ <View style={styles.container} pointerEvents="none">
89
+ {source && (
90
+ <WebView
91
+ source={source}
92
+ onMessage={handleMessage}
93
+ originWhitelist={['*']}
94
+ javaScriptEnabled
95
+ domStorageEnabled={false}
96
+ incognito
97
+ style={styles.webview}
98
+ />
99
+ )}
100
+ </View>
101
+ );
102
+ },
103
+ );
104
+
105
+ /**
106
+ * Inject the execution bridge script into skill HTML.
107
+ * Calls window['ai_edge_gallery_get_result'] with the params
108
+ * and posts the result back via ReactNativeWebView.postMessage.
109
+ */
110
+ function injectExecutionScript(
111
+ html: string,
112
+ params: Record<string, unknown>,
113
+ ): string {
114
+ const escapedParams = JSON.stringify(JSON.stringify(params));
115
+
116
+ const executionScript = `
117
+ <script>
118
+ (async () => {
119
+ await new Promise(r => setTimeout(r, 100));
120
+ try {
121
+ const fn = window['ai_edge_gallery_get_result'];
122
+ if (!fn) throw new Error('Skill function ai_edge_gallery_get_result not found');
123
+ const result = await fn(${escapedParams});
124
+ window.ReactNativeWebView.postMessage(JSON.stringify({
125
+ type: 'skill_result',
126
+ data: JSON.parse(result)
127
+ }));
128
+ } catch (e) {
129
+ window.ReactNativeWebView.postMessage(JSON.stringify({
130
+ type: 'skill_error',
131
+ error: e.message || 'Unknown skill error'
132
+ }));
133
+ }
134
+ })();
135
+ </script>`;
136
+
137
+ if (html.includes('</body>')) {
138
+ return html.replace('</body>', `${executionScript}\n</body>`);
139
+ }
140
+ return html + executionScript;
141
+ }
142
+
143
+ const styles = StyleSheet.create({
144
+ container: {
145
+ position: 'absolute',
146
+ width: 0,
147
+ height: 0,
148
+ overflow: 'hidden',
149
+ },
150
+ webview: {
151
+ width: 1,
152
+ height: 1,
153
+ opacity: 0,
154
+ },
155
+ });
package/src/index.ts ADDED
@@ -0,0 +1,52 @@
1
+ // react-native-gemma-agent SDK
2
+ export const SDK_VERSION = '0.1.0';
3
+
4
+ // Core classes
5
+ export { ModelManager } from './ModelManager';
6
+ export { InferenceEngine } from './InferenceEngine';
7
+ export { SkillRegistry } from './SkillRegistry';
8
+ export { AgentOrchestrator } from './AgentOrchestrator';
9
+ export { BM25Scorer } from './BM25Scorer';
10
+ export type { SkillExecutor } from './AgentOrchestrator';
11
+
12
+ // React hooks & provider
13
+ export { GemmaAgentProvider } from './GemmaAgentProvider';
14
+ export type { GemmaAgentProviderProps, GemmaAgentContextValue } from './GemmaAgentProvider';
15
+ export { useGemmaAgent } from './useGemmaAgent';
16
+ export type { UseGemmaAgentReturn } from './useGemmaAgent';
17
+ export { useModelDownload } from './useModelDownload';
18
+ export type { UseModelDownloadReturn } from './useModelDownload';
19
+ export { useSkillRegistry } from './useSkillRegistry';
20
+ export type { UseSkillRegistryReturn } from './useSkillRegistry';
21
+
22
+ // Skill execution
23
+ export { SkillSandbox } from './SkillSandbox';
24
+ export type { SkillSandboxHandle } from './SkillSandbox';
25
+
26
+ // Function call parsing
27
+ export { validateToolCalls, extractToolCallsFromText } from './FunctionCallParser';
28
+ export type { ParsedToolCall } from './FunctionCallParser';
29
+
30
+ // Types
31
+ export type {
32
+ ModelStatus,
33
+ ModelConfig,
34
+ ModelInfo,
35
+ DownloadProgress,
36
+ ToolDefinition,
37
+ ToolCall,
38
+ Message,
39
+ MessageRole,
40
+ CompletionResult,
41
+ CompletionTimings,
42
+ GenerateOptions,
43
+ TokenEvent,
44
+ InferenceEngineConfig,
45
+ SkillType,
46
+ SkillParameter,
47
+ SkillManifest,
48
+ SkillResult,
49
+ AgentEvent,
50
+ AgentConfig,
51
+ ContextUsage,
52
+ } from './types';
package/src/types.ts ADDED
@@ -0,0 +1,197 @@
1
+ // react-native-gemma-agent types
2
+
3
+ export type ModelStatus =
4
+ | 'not_downloaded'
5
+ | 'downloading'
6
+ | 'ready'
7
+ | 'loading'
8
+ | 'loaded'
9
+ | 'error';
10
+
11
+ export type ModelConfig = {
12
+ /** HuggingFace repo ID (e.g., 'unsloth/gemma-4-E2B-it-GGUF') */
13
+ repoId: string;
14
+ /** GGUF filename within the repo */
15
+ filename: string;
16
+ /** Expected file size in bytes (for progress calculation) */
17
+ expectedSize?: number;
18
+ /** SHA256 checksum for verification */
19
+ checksum?: string;
20
+ };
21
+
22
+ export type DownloadProgress = {
23
+ bytesDownloaded: number;
24
+ totalBytes: number;
25
+ /** 0-100 */
26
+ percent: number;
27
+ };
28
+
29
+ export type ModelInfo = {
30
+ status: ModelStatus;
31
+ path: string | null;
32
+ sizeBytes: number | null;
33
+ description: string | null;
34
+ nParams: number | null;
35
+ nEmbd: number | null;
36
+ };
37
+
38
+ export type ToolDefinition = {
39
+ type: 'function';
40
+ function: {
41
+ name: string;
42
+ description: string;
43
+ parameters: {
44
+ type: 'object';
45
+ properties: Record<string, {
46
+ type: string;
47
+ description?: string;
48
+ enum?: string[];
49
+ }>;
50
+ required?: string[];
51
+ };
52
+ };
53
+ };
54
+
55
+ export type ToolCall = {
56
+ type: 'function';
57
+ id?: string;
58
+ function: {
59
+ name: string;
60
+ arguments: string;
61
+ };
62
+ };
63
+
64
+ export type MessageRole = 'user' | 'assistant' | 'system' | 'tool';
65
+
66
+ export type Message = {
67
+ role: MessageRole;
68
+ content: string;
69
+ tool_calls?: ToolCall[];
70
+ tool_call_id?: string;
71
+ name?: string;
72
+ };
73
+
74
+ export type CompletionTimings = {
75
+ promptTokens: number;
76
+ promptMs: number;
77
+ promptPerSecond: number;
78
+ predictedTokens: number;
79
+ predictedMs: number;
80
+ predictedPerSecond: number;
81
+ };
82
+
83
+ export type CompletionResult = {
84
+ /** Raw text output (includes thinking + tool call tokens) */
85
+ text: string;
86
+ /** Filtered content (thinking and tool call tokens removed) */
87
+ content: string;
88
+ /** Model's chain-of-thought reasoning, if any */
89
+ reasoning: string | null;
90
+ toolCalls: ToolCall[];
91
+ timings: CompletionTimings;
92
+ stoppedEos: boolean;
93
+ stoppedLimit: boolean;
94
+ contextFull: boolean;
95
+ };
96
+
97
+ export type GenerateOptions = {
98
+ /** Max tokens to generate */
99
+ maxTokens?: number;
100
+ /** Sampling temperature (0-2) */
101
+ temperature?: number;
102
+ /** Top-p nucleus sampling */
103
+ topP?: number;
104
+ /** Top-k sampling */
105
+ topK?: number;
106
+ /** Stop sequences */
107
+ stop?: string[];
108
+ /** Tool definitions for function calling */
109
+ tools?: ToolDefinition[];
110
+ /** Tool choice strategy */
111
+ toolChoice?: 'auto' | 'none' | string;
112
+ };
113
+
114
+ export type TokenEvent = {
115
+ token: string;
116
+ toolCalls?: ToolCall[];
117
+ };
118
+
119
+ export type InferenceEngineConfig = {
120
+ /** Context window size in tokens */
121
+ contextSize?: number;
122
+ /** Batch size for prompt processing */
123
+ batchSize?: number;
124
+ /** Number of threads for inference */
125
+ threads?: number;
126
+ /** Flash attention mode */
127
+ flashAttn?: 'auto' | 'on' | 'off';
128
+ /** Lock model in memory */
129
+ useMlock?: boolean;
130
+ /** Number of GPU layers to offload (-1 = all) */
131
+ gpuLayers?: number;
132
+ };
133
+
134
+ // --- Skill System Types ---
135
+
136
+ export type SkillType = 'js' | 'native';
137
+
138
+ export type SkillParameter = {
139
+ type: string;
140
+ description?: string;
141
+ enum?: string[];
142
+ };
143
+
144
+ export type SkillManifest = {
145
+ name: string;
146
+ description: string;
147
+ version: string;
148
+ type: SkillType;
149
+ parameters: Record<string, SkillParameter>;
150
+ requiredParameters?: string[];
151
+ /** HTML content for JS skills — loaded into hidden WebView */
152
+ html?: string;
153
+ /** Native execution function — for skills that run in RN context */
154
+ execute?: (params: Record<string, unknown>) => Promise<SkillResult>;
155
+ /** Instructions for the LLM on when/how to use this skill */
156
+ instructions?: string;
157
+ /** Whether this skill requires network access. SDK checks connectivity before execution. */
158
+ requiresNetwork?: boolean;
159
+ };
160
+
161
+ export type SkillResult = {
162
+ result?: string;
163
+ error?: string;
164
+ image?: { base64: string };
165
+ };
166
+
167
+ // --- Agent Types ---
168
+
169
+ export type AgentEvent =
170
+ | { type: 'thinking' }
171
+ | { type: 'token'; token: string }
172
+ | { type: 'skill_called'; name: string; parameters: Record<string, unknown> }
173
+ | { type: 'skill_result'; name: string; result: SkillResult }
174
+ | { type: 'response'; text: string; reasoning: string | null }
175
+ | { type: 'error'; error: string };
176
+
177
+ export type AgentConfig = {
178
+ /** Max chained skill calls before stopping (prevents infinite loops). Default: 5 */
179
+ maxChainDepth?: number;
180
+ /** Timeout for each skill execution in ms. Default: 30000 */
181
+ skillTimeout?: number;
182
+ /** Base system prompt prepended to all conversations */
183
+ systemPrompt?: string;
184
+ /** Skill routing strategy. 'all' sends every skill; 'bm25' pre-filters by query relevance. Default: 'all' */
185
+ skillRouting?: 'all' | 'bm25';
186
+ /** Max skills sent to the model per invocation (only used with 'bm25' routing). Default: 5 */
187
+ maxToolsPerInvocation?: number;
188
+ };
189
+
190
+ export type ContextUsage = {
191
+ /** Tokens used so far */
192
+ used: number;
193
+ /** Total context window size in tokens */
194
+ total: number;
195
+ /** Usage percentage (0-100) */
196
+ percent: number;
197
+ };
@@ -0,0 +1,222 @@
1
+ import { useState, useCallback, useRef } from 'react';
2
+ import { useGemmaAgentContext } from './GemmaAgentProvider';
3
+ import type { Message, ModelStatus, AgentEvent, ContextUsage } from './types';
4
+
5
+ export type UseGemmaAgentReturn = {
6
+ /** Send a message to the agent. Returns the final response text. */
7
+ sendMessage: (
8
+ text: string,
9
+ onEvent?: (event: AgentEvent) => void,
10
+ ) => Promise<string>;
11
+ /** Conversation history */
12
+ messages: ReadonlyArray<Message>;
13
+ /** Content tokens streamed so far (thinking excluded). Empty when idle. */
14
+ streamingText: string;
15
+ /** Whether the agent is currently processing */
16
+ isProcessing: boolean;
17
+ /** Whether the model is loaded and ready for inference */
18
+ isModelLoaded: boolean;
19
+ /** Current model status */
20
+ modelStatus: ModelStatus;
21
+ /** Name of the skill currently being executed, or null */
22
+ activeSkill: string | null;
23
+ /** Last error message, or null */
24
+ error: string | null;
25
+ /** Current context window usage (updated after each generation) */
26
+ contextUsage: ContextUsage;
27
+ /** Load the model into memory. Must be downloaded first. */
28
+ loadModel: (onProgress?: (percent: number) => void) => Promise<number>;
29
+ /** Unload the model from memory */
30
+ unloadModel: () => Promise<void>;
31
+ /** Clear conversation history */
32
+ reset: () => void;
33
+ };
34
+
35
+ export function useGemmaAgent(): UseGemmaAgentReturn {
36
+ const { modelManager, engine, orchestrator } = useGemmaAgentContext();
37
+
38
+ const [messages, setMessages] = useState<ReadonlyArray<Message>>([]);
39
+ const [streamingText, setStreamingText] = useState('');
40
+ const [isProcessing, setIsProcessing] = useState(false);
41
+ const [modelStatus, setModelStatus] = useState<ModelStatus>(
42
+ modelManager.status,
43
+ );
44
+ const [activeSkill, setActiveSkill] = useState<string | null>(null);
45
+ const [error, setError] = useState<string | null>(null);
46
+ const [contextUsage, setContextUsage] = useState<ContextUsage>({
47
+ used: 0,
48
+ total: 0,
49
+ percent: 0,
50
+ });
51
+
52
+ // Track model status changes
53
+ const unsubRef = useRef<(() => void) | null>(null);
54
+ if (!unsubRef.current) {
55
+ unsubRef.current = modelManager.onStatusChange((status) => {
56
+ setModelStatus(status);
57
+ });
58
+ }
59
+
60
+ // Buffer to detect special token sequences in streaming
61
+ const tokenBufferRef = useRef('');
62
+ // Whether we've seen the model produce content tokens (after thinking)
63
+ const seenContentRef = useRef(false);
64
+
65
+ const loadModel = useCallback(
66
+ async (onProgress?: (percent: number) => void): Promise<number> => {
67
+ let path = modelManager.modelPath;
68
+ if (!path) {
69
+ path = await modelManager.findModel();
70
+ }
71
+ if (!path) {
72
+ throw new Error(
73
+ 'Model not found on device. Download it first via useModelDownload().',
74
+ );
75
+ }
76
+
77
+ setModelStatus('loading');
78
+ try {
79
+ const loadTimeMs = await engine.loadModel(path, onProgress);
80
+ setModelStatus('loaded');
81
+ return loadTimeMs;
82
+ } catch (err) {
83
+ setModelStatus('error');
84
+ throw err;
85
+ }
86
+ },
87
+ [modelManager, engine],
88
+ );
89
+
90
+ const unloadModel = useCallback(async () => {
91
+ await engine.unload();
92
+ setModelStatus(modelManager.modelPath ? 'ready' : 'not_downloaded');
93
+ }, [engine, modelManager]);
94
+
95
+ const sendMessage = useCallback(
96
+ async (
97
+ text: string,
98
+ onEvent?: (event: AgentEvent) => void,
99
+ ): Promise<string> => {
100
+ if (!engine.isLoaded) {
101
+ throw new Error('Model not loaded. Call loadModel() first.');
102
+ }
103
+
104
+ setError(null);
105
+ setIsProcessing(true);
106
+ setActiveSkill(null);
107
+ setStreamingText('');
108
+ tokenBufferRef.current = '';
109
+ seenContentRef.current = false;
110
+
111
+ try {
112
+ const response = await orchestrator.sendMessage(
113
+ text,
114
+ (event: AgentEvent) => {
115
+ switch (event.type) {
116
+ case 'token': {
117
+ // Accumulate into buffer to detect thinking vs content
118
+ tokenBufferRef.current += event.token;
119
+ const buf = tokenBufferRef.current;
120
+
121
+ // Gemma 4 outputs thinking as: thought\n<reasoning>
122
+ // followed by content after tool call results or directly.
123
+ // The `content` field in the final result has thinking stripped.
124
+ // For streaming: skip tokens until we see content starting.
125
+ // Since llama.rn gives us `content` (filtered) at the end,
126
+ // streamingText is a best-effort preview. We use it only
127
+ // when the model is generating the final answer (not thinking
128
+ // before a tool call).
129
+
130
+ // Don't stream during thinking phase — the model starts with
131
+ // "thought\n" or similar when reasoning before tool calls.
132
+ // We detect this by checking if the buffer starts with "thought"
133
+ if (!seenContentRef.current) {
134
+ if (buf.length >= 7) {
135
+ if (buf.trimStart().startsWith('thought')) {
136
+ // In thinking mode — don't stream these tokens
137
+ break;
138
+ }
139
+ // Not thinking — start streaming
140
+ seenContentRef.current = true;
141
+ setStreamingText(buf);
142
+ }
143
+ // Still buffering — wait for more tokens
144
+ break;
145
+ }
146
+
147
+ // We're in content mode — stream normally
148
+ setStreamingText((prev) => prev + event.token);
149
+ break;
150
+ }
151
+ case 'thinking':
152
+ // Reset streaming state at the start of each generation loop.
153
+ // Ensures clean slate before the final answer turn (after tool results).
154
+ setStreamingText('');
155
+ tokenBufferRef.current = '';
156
+ seenContentRef.current = false;
157
+ break;
158
+ case 'skill_called':
159
+ setActiveSkill(event.name);
160
+ setStreamingText('');
161
+ tokenBufferRef.current = '';
162
+ seenContentRef.current = false;
163
+ break;
164
+ case 'skill_result':
165
+ setActiveSkill(null);
166
+ break;
167
+ case 'error':
168
+ setError(event.error);
169
+ break;
170
+ case 'response':
171
+ setStreamingText('');
172
+ tokenBufferRef.current = '';
173
+ seenContentRef.current = false;
174
+ setMessages([...orchestrator.conversation]);
175
+ break;
176
+ }
177
+ // Always forward raw events to the developer's callback
178
+ onEvent?.(event);
179
+ },
180
+ );
181
+
182
+ setMessages([...orchestrator.conversation]);
183
+ setContextUsage(engine.getContextUsage());
184
+ return response;
185
+ } catch (err) {
186
+ const msg = err instanceof Error ? err.message : 'Unknown error';
187
+ setError(msg);
188
+ throw err;
189
+ } finally {
190
+ setIsProcessing(false);
191
+ setActiveSkill(null);
192
+ setStreamingText('');
193
+ tokenBufferRef.current = '';
194
+ seenContentRef.current = false;
195
+ }
196
+ },
197
+ [engine, orchestrator],
198
+ );
199
+
200
+ const reset = useCallback(() => {
201
+ orchestrator.reset();
202
+ setMessages([]);
203
+ setStreamingText('');
204
+ setError(null);
205
+ setActiveSkill(null);
206
+ }, [orchestrator]);
207
+
208
+ return {
209
+ sendMessage,
210
+ messages,
211
+ streamingText,
212
+ isProcessing,
213
+ isModelLoaded: engine.isLoaded,
214
+ modelStatus,
215
+ activeSkill,
216
+ error,
217
+ contextUsage,
218
+ loadModel,
219
+ unloadModel,
220
+ reset,
221
+ };
222
+ }
@@ -0,0 +1,80 @@
1
+ import { useState, useCallback, useRef } from 'react';
2
+ import { useGemmaAgentContext } from './GemmaAgentProvider';
3
+ import type { ModelStatus, DownloadProgress } from './types';
4
+
5
+ export type UseModelDownloadReturn = {
6
+ /** Start downloading the model. Resolves with the local file path. */
7
+ download: () => Promise<string>;
8
+ /** Cancel an in-progress download */
9
+ cancelDownload: () => void;
10
+ /** Check if model already exists on device */
11
+ checkModel: () => Promise<boolean>;
12
+ /** Set a custom model path (for pre-downloaded models) */
13
+ setModelPath: (path: string) => Promise<void>;
14
+ /** Delete the downloaded model file */
15
+ deleteModel: () => Promise<void>;
16
+ /** Current download progress */
17
+ progress: DownloadProgress | null;
18
+ /** Current model status */
19
+ status: ModelStatus;
20
+ /** Check available storage */
21
+ checkStorage: () => Promise<{ available: number; required: number; sufficient: boolean }>;
22
+ };
23
+
24
+ export function useModelDownload(): UseModelDownloadReturn {
25
+ const { modelManager } = useGemmaAgentContext();
26
+
27
+ const [status, setStatus] = useState<ModelStatus>(modelManager.status);
28
+ const [progress, setProgress] = useState<DownloadProgress | null>(null);
29
+
30
+ const unsubRef = useRef<(() => void) | null>(null);
31
+ if (!unsubRef.current) {
32
+ unsubRef.current = modelManager.onStatusChange((s) => {
33
+ setStatus(s);
34
+ });
35
+ }
36
+
37
+ const download = useCallback(async (): Promise<string> => {
38
+ setProgress(null);
39
+ const path = await modelManager.download((p) => {
40
+ setProgress(p);
41
+ });
42
+ return path;
43
+ }, [modelManager]);
44
+
45
+ const cancelDownload = useCallback(() => {
46
+ modelManager.cancelDownload();
47
+ setProgress(null);
48
+ }, [modelManager]);
49
+
50
+ const checkModel = useCallback(
51
+ () => modelManager.checkModel(),
52
+ [modelManager],
53
+ );
54
+
55
+ const setModelPath = useCallback(
56
+ (path: string) => modelManager.setModelPath(path),
57
+ [modelManager],
58
+ );
59
+
60
+ const deleteModel = useCallback(
61
+ () => modelManager.deleteModel(),
62
+ [modelManager],
63
+ );
64
+
65
+ const checkStorage = useCallback(
66
+ () => modelManager.checkStorage(),
67
+ [modelManager],
68
+ );
69
+
70
+ return {
71
+ download,
72
+ cancelDownload,
73
+ checkModel,
74
+ setModelPath,
75
+ deleteModel,
76
+ progress,
77
+ status,
78
+ checkStorage,
79
+ };
80
+ }