llama-cpp-capacitor 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Yakub Mohammad
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -0,0 +1,17 @@
1
+ require 'json'
2
+
3
+ package = JSON.parse(File.read(File.join(__dir__, 'package.json')))
4
+
5
+ Pod::Spec.new do |s|
6
+ s.name = 'LlamaCpp'
7
+ s.version = package['version']
8
+ s.summary = package['description']
9
+ s.license = package['license']
10
+ s.homepage = package['repository']['url']
11
+ s.author = package['author']
12
+ s.source = { :git => package['repository']['url'], :tag => s.version.to_s }
13
+ s.source_files = 'ios/Sources/**/*.{swift,h,m,c,cc,mm,cpp}'
14
+ s.ios.deployment_target = '14.0'
15
+ s.dependency 'Capacitor'
16
+ s.swift_version = '5.1'
17
+ end
package/Package.swift ADDED
@@ -0,0 +1,28 @@
1
+ // swift-tools-version: 5.9
2
+ import PackageDescription
3
+
4
+ let package = Package(
5
+ name: "LlamaCpp",
6
+ platforms: [.iOS(.v14)],
7
+ products: [
8
+ .library(
9
+ name: "LlamaCpp",
10
+ targets: ["LlamaCppPlugin"])
11
+ ],
12
+ dependencies: [
13
+ .package(url: "https://github.com/ionic-team/capacitor-swift-pm.git", from: "7.0.0")
14
+ ],
15
+ targets: [
16
+ .target(
17
+ name: "LlamaCppPlugin",
18
+ dependencies: [
19
+ .product(name: "Capacitor", package: "capacitor-swift-pm"),
20
+ .product(name: "Cordova", package: "capacitor-swift-pm")
21
+ ],
22
+ path: "ios/Sources/LlamaCppPlugin"),
23
+ .testTarget(
24
+ name: "LlamaCppPluginTests",
25
+ dependencies: ["LlamaCppPlugin"],
26
+ path: "ios/Tests/LlamaCppPluginTests")
27
+ ]
28
+ )
package/README.md ADDED
@@ -0,0 +1,574 @@
1
+ # llama-cpp Capacitor Plugin
2
+
3
+ [![Actions Status](https://github.com/arusatech/llama-cpp/workflows/CI/badge.svg)](https://github.com/arusatech/llama-cpp/actions)
4
+ [![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](https://opensource.org/licenses/MIT)
5
+ [![npm](https://img.shields.io/npm/v/llama-cpp-capacitor.svg)](https://www.npmjs.com/package/llama-cpp-capacitor/)
6
+
7
+ A native Capacitor plugin that embeds [llama.cpp](https://github.com/ggerganov/llama.cpp) directly into mobile apps, enabling offline AI inference with comprehensive support for text generation, multimodal processing, TTS, LoRA adapters, and more.
8
+
9
+ [llama.cpp](https://github.com/ggerganov/llama.cpp): Inference of [LLaMA](https://arxiv.org/abs/2302.13971) model in pure C/C++
10
+
11
+ ## 🚀 Features
12
+
13
+ - **Offline AI Inference**: Run large language models completely offline on mobile devices
14
+ - **Text Generation**: Complete text completion with streaming support
15
+ - **Chat Conversations**: Multi-turn conversations with context management
16
+ - **Multimodal Support**: Process images and audio alongside text
17
+ - **Text-to-Speech (TTS)**: Generate speech from text using vocoder models
18
+ - **LoRA Adapters**: Fine-tune models with LoRA adapters
19
+ - **Embeddings**: Generate vector embeddings for semantic search
20
+ - **Reranking**: Rank documents by relevance to queries
21
+ - **Session Management**: Save and load conversation states
22
+ - **Benchmarking**: Performance testing and optimization tools
23
+ - **Structured Output**: Generate JSON with schema validation
24
+ - **Cross-Platform**: iOS and Android support with native optimizations
25
+
26
+ ## ✅ **Complete Implementation Status**
27
+
28
+ This plugin is now **FULLY IMPLEMENTED** with complete native integration of llama.cpp for both iOS and Android platforms. The implementation includes:
29
+
30
+ ### **Completed Features**
31
+ - **Complete C++ Integration**: Full llama.cpp library integration with all core components
32
+ - **Native Build System**: CMake-based build system for both iOS and Android
33
+ - **Platform Support**: iOS (arm64, x86_64) and Android (arm64-v8a, armeabi-v7a, x86, x86_64)
34
+ - **TypeScript API**: Complete TypeScript interface matching llama.rn functionality
35
+ - **Native Methods**: All 30+ native methods implemented with proper error handling
36
+ - **Event System**: Capacitor event system for progress and token streaming
37
+ - **Documentation**: Comprehensive README and API documentation
38
+
39
+ ### **Technical Implementation**
40
+ - **C++ Core**: Complete llama.cpp library with GGML, GGUF, and all supporting components
41
+ - **iOS Framework**: Native iOS framework with Metal acceleration support
42
+ - **Android JNI**: Complete JNI implementation with multi-architecture support
43
+ - **Build Scripts**: Automated build system for both platforms
44
+ - **Error Handling**: Robust error handling and result types
45
+
46
+ ### **Project Structure**
47
+ ```
48
+ llama-cpp/
49
+ ├── cpp/ # Complete llama.cpp C++ library
50
+ │ ├── ggml.c # GGML core
51
+ │ ├── gguf.cpp # GGUF format support
52
+ │ ├── llama.cpp # Main llama.cpp implementation
53
+ │ ├── rn-llama.cpp # React Native wrapper (adapted)
54
+ │ ├── rn-completion.cpp # Completion handling
55
+ │ ├── rn-tts.cpp # Text-to-speech
56
+ │ └── tools/mtmd/ # Multimodal support
57
+ ├── ios/
58
+ │ ├── CMakeLists.txt # iOS build configuration
59
+ │ └── Sources/ # Swift implementation
60
+ ├── android/
61
+ │ ├── src/main/
62
+ │ │ ├── CMakeLists.txt # Android build configuration
63
+ │ │ ├── jni.cpp # JNI implementation
64
+ │ │ └── jni-utils.h # JNI utilities
65
+ │ └── build.gradle # Android build config
66
+ ├── src/
67
+ │ ├── definitions.ts # Complete TypeScript interfaces
68
+ │ ├── index.ts # Main plugin implementation
69
+ │ └── web.ts # Web fallback
70
+ └── build-native.sh # Automated build script
71
+ ```
72
+
73
+ ## 📦 Installation
74
+
75
+ ```sh
76
+ npm install llama-cpp-capacitor
77
+ ```
78
+
79
+ ## 🔨 **Building the Native Library**
80
+
81
+ The plugin includes a complete native implementation of llama.cpp. To build the native libraries:
82
+
83
+ ### **Prerequisites**
84
+
85
+ - **CMake** (3.16+ for iOS, 3.10+ for Android)
86
+ - **Xcode** (for iOS builds, macOS only)
87
+ - **Android Studio** with NDK (for Android builds)
88
+ - **Make** or **Ninja** build system
89
+
90
+ ### **Automated Build**
91
+
92
+ ```bash
93
+ # Build for all platforms
94
+ npm run build:native
95
+
96
+ # Build for specific platforms
97
+ npm run build:ios # iOS only
98
+ npm run build:android # Android only
99
+
100
+ # Clean native builds
101
+ npm run clean:native
102
+ ```
103
+
104
+ ### **Manual Build**
105
+
106
+ #### **iOS Build**
107
+ ```bash
108
+ cd ios
109
+ cmake -B build -S .
110
+ cmake --build build --config Release
111
+ ```
112
+
113
+ #### **Android Build**
114
+ ```bash
115
+ cd android
116
+ ./gradlew assembleRelease
117
+ ```
118
+
119
+ ### **Build Output**
120
+
121
+ - **iOS**: `ios/build/LlamaCpp.framework/`
122
+ - **Android**: `android/src/main/jniLibs/{arch}/libllama-cpp-{arch}.so`
123
+
124
+ ### iOS Setup
125
+
126
+ 1. Install the plugin:
127
+ ```sh
128
+ npm install llama-cpp
129
+ ```
130
+
131
+ 2. Add to your iOS project:
132
+ ```sh
133
+ npx cap add ios
134
+ npx cap sync ios
135
+ ```
136
+
137
+ 3. Open the project in Xcode:
138
+ ```sh
139
+ npx cap open ios
140
+ ```
141
+
142
+ ### Android Setup
143
+
144
+ 1. Install the plugin:
145
+ ```sh
146
+ npm install llama-cpp
147
+ ```
148
+
149
+ 2. Add to your Android project:
150
+ ```sh
151
+ npx cap add android
152
+ npx cap sync android
153
+ ```
154
+
155
+ 3. Open the project in Android Studio:
156
+ ```sh
157
+ npx cap open android
158
+ ```
159
+
160
+ ## 🎯 Quick Start
161
+
162
+ ### Basic Text Completion
163
+
164
+ ```typescript
165
+ import { initLlama } from 'llama-cpp';
166
+
167
+ // Initialize a model
168
+ const context = await initLlama({
169
+ model: '/path/to/your/model.gguf',
170
+ n_ctx: 2048,
171
+ n_threads: 4,
172
+ n_gpu_layers: 0,
173
+ });
174
+
175
+ // Generate text
176
+ const result = await context.completion({
177
+ prompt: "Hello, how are you today?",
178
+ n_predict: 50,
179
+ temperature: 0.8,
180
+ });
181
+
182
+ console.log('Generated text:', result.text);
183
+ ```
184
+
185
+ ### Chat-Style Conversations
186
+
187
+ ```typescript
188
+ const result = await context.completion({
189
+ messages: [
190
+ { role: "system", content: "You are a helpful AI assistant." },
191
+ { role: "user", content: "What is the capital of France?" },
192
+ { role: "assistant", content: "The capital of France is Paris." },
193
+ { role: "user", content: "Tell me more about it." }
194
+ ],
195
+ n_predict: 100,
196
+ temperature: 0.7,
197
+ });
198
+
199
+ console.log('Chat response:', result.content);
200
+ ```
201
+
202
+ ### Streaming Completion
203
+
204
+ ```typescript
205
+ let fullText = '';
206
+ const result = await context.completion({
207
+ prompt: "Write a short story about a robot learning to paint:",
208
+ n_predict: 150,
209
+ temperature: 0.8,
210
+ }, (tokenData) => {
211
+ // Called for each token as it's generated
212
+ fullText += tokenData.token;
213
+ console.log('Token:', tokenData.token);
214
+ });
215
+
216
+ console.log('Final result:', result.text);
217
+ ```
218
+
219
+ ## 📚 API Reference
220
+
221
+ ### Core Functions
222
+
223
+ #### `initLlama(params: ContextParams, onProgress?: (progress: number) => void): Promise<LlamaContext>`
224
+
225
+ Initialize a new llama.cpp context with a model.
226
+
227
+ **Parameters:**
228
+ - `params`: Context initialization parameters
229
+ - `onProgress`: Optional progress callback (0-100)
230
+
231
+ **Returns:** Promise resolving to a `LlamaContext` instance
232
+
233
+ #### `releaseAllLlama(): Promise<void>`
234
+
235
+ Release all contexts and free memory.
236
+
237
+ #### `toggleNativeLog(enabled: boolean): Promise<void>`
238
+
239
+ Enable or disable native logging.
240
+
241
+ #### `addNativeLogListener(listener: (level: string, text: string) => void): { remove: () => void }`
242
+
243
+ Add a listener for native log messages.
244
+
245
+ ### LlamaContext Class
246
+
247
+ #### `completion(params: CompletionParams, callback?: (data: TokenData) => void): Promise<NativeCompletionResult>`
248
+
249
+ Generate text completion.
250
+
251
+ **Parameters:**
252
+ - `params`: Completion parameters including prompt or messages
253
+ - `callback`: Optional callback for token-by-token streaming
254
+
255
+ #### `tokenize(text: string, options?: { media_paths?: string[] }): Promise<NativeTokenizeResult>`
256
+
257
+ Tokenize text or text with images.
258
+
259
+ #### `detokenize(tokens: number[]): Promise<string>`
260
+
261
+ Convert tokens back to text.
262
+
263
+ #### `embedding(text: string, params?: EmbeddingParams): Promise<NativeEmbeddingResult>`
264
+
265
+ Generate embeddings for text.
266
+
267
+ #### `rerank(query: string, documents: string[], params?: RerankParams): Promise<RerankResult[]>`
268
+
269
+ Rank documents by relevance to a query.
270
+
271
+ #### `bench(pp: number, tg: number, pl: number, nr: number): Promise<BenchResult>`
272
+
273
+ Benchmark model performance.
274
+
275
+ ### Multimodal Support
276
+
277
+ #### `initMultimodal(params: { path: string; use_gpu?: boolean }): Promise<boolean>`
278
+
279
+ Initialize multimodal support with a projector file.
280
+
281
+ #### `isMultimodalEnabled(): Promise<boolean>`
282
+
283
+ Check if multimodal support is enabled.
284
+
285
+ #### `getMultimodalSupport(): Promise<{ vision: boolean; audio: boolean }>`
286
+
287
+ Get multimodal capabilities.
288
+
289
+ #### `releaseMultimodal(): Promise<void>`
290
+
291
+ Release multimodal resources.
292
+
293
+ ### TTS (Text-to-Speech)
294
+
295
+ #### `initVocoder(params: { path: string; n_batch?: number }): Promise<boolean>`
296
+
297
+ Initialize TTS with a vocoder model.
298
+
299
+ #### `isVocoderEnabled(): Promise<boolean>`
300
+
301
+ Check if TTS is enabled.
302
+
303
+ #### `getFormattedAudioCompletion(speaker: object | null, textToSpeak: string): Promise<{ prompt: string; grammar?: string }>`
304
+
305
+ Get formatted audio completion prompt.
306
+
307
+ #### `getAudioCompletionGuideTokens(textToSpeak: string): Promise<Array<number>>`
308
+
309
+ Get guide tokens for audio completion.
310
+
311
+ #### `decodeAudioTokens(tokens: number[]): Promise<Array<number>>`
312
+
313
+ Decode audio tokens to audio data.
314
+
315
+ #### `releaseVocoder(): Promise<void>`
316
+
317
+ Release TTS resources.
318
+
319
+ ### LoRA Adapters
320
+
321
+ #### `applyLoraAdapters(loraList: Array<{ path: string; scaled?: number }>): Promise<void>`
322
+
323
+ Apply LoRA adapters to the model.
324
+
325
+ #### `removeLoraAdapters(): Promise<void>`
326
+
327
+ Remove all LoRA adapters.
328
+
329
+ #### `getLoadedLoraAdapters(): Promise<Array<{ path: string; scaled?: number }>>`
330
+
331
+ Get list of loaded LoRA adapters.
332
+
333
+ ### Session Management
334
+
335
+ #### `saveSession(filepath: string, options?: { tokenSize: number }): Promise<number>`
336
+
337
+ Save current session to a file.
338
+
339
+ #### `loadSession(filepath: string): Promise<NativeSessionLoadResult>`
340
+
341
+ Load session from a file.
342
+
343
+ ## 🔧 Configuration
344
+
345
+ ### Context Parameters
346
+
347
+ ```typescript
348
+ interface ContextParams {
349
+ model: string; // Path to GGUF model file
350
+ n_ctx?: number; // Context size (default: 512)
351
+ n_threads?: number; // Number of threads (default: 4)
352
+ n_gpu_layers?: number; // GPU layers (iOS only)
353
+ use_mlock?: boolean; // Lock memory (default: false)
354
+ use_mmap?: boolean; // Use memory mapping (default: true)
355
+ embedding?: boolean; // Embedding mode (default: false)
356
+ cache_type_k?: string; // KV cache type for K
357
+ cache_type_v?: string; // KV cache type for V
358
+ pooling_type?: string; // Pooling type
359
+ // ... more parameters
360
+ }
361
+ ```
362
+
363
+ ### Completion Parameters
364
+
365
+ ```typescript
366
+ interface CompletionParams {
367
+ prompt?: string; // Text prompt
368
+ messages?: Message[]; // Chat messages
369
+ n_predict?: number; // Max tokens to generate
370
+ temperature?: number; // Sampling temperature
371
+ top_p?: number; // Top-p sampling
372
+ top_k?: number; // Top-k sampling
373
+ stop?: string[]; // Stop sequences
374
+ // ... more parameters
375
+ }
376
+ ```
377
+
378
+ ## 📱 Platform Support
379
+
380
+ | Feature | iOS | Android | Web |
381
+ |---------|-----|---------|-----|
382
+ | Text Generation | ✅ | ✅ | ❌ |
383
+ | Chat Conversations | ✅ | ✅ | ❌ |
384
+ | Streaming | ✅ | ✅ | ❌ |
385
+ | Multimodal | ✅ | ✅ | ❌ |
386
+ | TTS | ✅ | ✅ | ❌ |
387
+ | LoRA Adapters | ✅ | ✅ | ❌ |
388
+ | Embeddings | ✅ | ✅ | ❌ |
389
+ | Reranking | ✅ | ✅ | ❌ |
390
+ | Session Management | ✅ | ✅ | ❌ |
391
+ | Benchmarking | ✅ | ✅ | ❌ |
392
+
393
+ ## 🎨 Advanced Examples
394
+
395
+ ### Multimodal Processing
396
+
397
+ ```typescript
398
+ // Initialize multimodal support
399
+ await context.initMultimodal({
400
+ path: '/path/to/mmproj.gguf',
401
+ use_gpu: true,
402
+ });
403
+
404
+ // Process image with text
405
+ const result = await context.completion({
406
+ messages: [
407
+ {
408
+ role: "user",
409
+ content: [
410
+ { type: "text", text: "What do you see in this image?" },
411
+ { type: "image_url", image_url: { url: "file:///path/to/image.jpg" } }
412
+ ]
413
+ }
414
+ ],
415
+ n_predict: 100,
416
+ });
417
+
418
+ console.log('Image analysis:', result.content);
419
+ ```
420
+
421
+ ### Text-to-Speech
422
+
423
+ ```typescript
424
+ // Initialize TTS
425
+ await context.initVocoder({
426
+ path: '/path/to/vocoder.gguf',
427
+ n_batch: 512,
428
+ });
429
+
430
+ // Generate audio
431
+ const audioCompletion = await context.getFormattedAudioCompletion(
432
+ null, // Speaker configuration
433
+ "Hello, this is a test of text-to-speech functionality."
434
+ );
435
+
436
+ const guideTokens = await context.getAudioCompletionGuideTokens(
437
+ "Hello, this is a test of text-to-speech functionality."
438
+ );
439
+
440
+ const audioResult = await context.completion({
441
+ prompt: audioCompletion.prompt,
442
+ grammar: audioCompletion.grammar,
443
+ guide_tokens: guideTokens,
444
+ n_predict: 1000,
445
+ });
446
+
447
+ const audioData = await context.decodeAudioTokens(audioResult.audio_tokens);
448
+ ```
449
+
450
+ ### LoRA Adapters
451
+
452
+ ```typescript
453
+ // Apply LoRA adapters
454
+ await context.applyLoraAdapters([
455
+ { path: '/path/to/adapter1.gguf', scaled: 1.0 },
456
+ { path: '/path/to/adapter2.gguf', scaled: 0.5 }
457
+ ]);
458
+
459
+ // Check loaded adapters
460
+ const adapters = await context.getLoadedLoraAdapters();
461
+ console.log('Loaded adapters:', adapters);
462
+
463
+ // Generate with adapters
464
+ const result = await context.completion({
465
+ prompt: "Test prompt with LoRA adapters:",
466
+ n_predict: 50,
467
+ });
468
+
469
+ // Remove adapters
470
+ await context.removeLoraAdapters();
471
+ ```
472
+
473
+ ### Structured Output
474
+
475
+ ```typescript
476
+ const result = await context.completion({
477
+ prompt: "Generate a JSON object with a person's name, age, and favorite color:",
478
+ n_predict: 100,
479
+ response_format: {
480
+ type: 'json_schema',
481
+ json_schema: {
482
+ strict: true,
483
+ schema: {
484
+ type: 'object',
485
+ properties: {
486
+ name: { type: 'string' },
487
+ age: { type: 'number' },
488
+ favorite_color: { type: 'string' }
489
+ },
490
+ required: ['name', 'age', 'favorite_color']
491
+ }
492
+ }
493
+ }
494
+ });
495
+
496
+ console.log('Structured output:', result.content);
497
+ ```
498
+
499
+ ## 🔍 Model Compatibility
500
+
501
+ This plugin supports GGUF format models, which are compatible with llama.cpp. You can find GGUF models on Hugging Face by searching for the "GGUF" tag.
502
+
503
+ ### Recommended Models
504
+
505
+ - **Llama 2**: Meta's latest language model
506
+ - **Mistral**: High-performance open model
507
+ - **Code Llama**: Specialized for code generation
508
+ - **Phi-2**: Microsoft's efficient model
509
+ - **Gemma**: Google's open model
510
+
511
+ ### Model Quantization
512
+
513
+ For mobile devices, consider using quantized models (Q4_K_M, Q5_K_M, etc.) to reduce memory usage and improve performance.
514
+
515
+ ## ⚡ Performance Considerations
516
+
517
+ ### Memory Management
518
+
519
+ - Use quantized models for better memory efficiency
520
+ - Adjust `n_ctx` based on your use case
521
+ - Monitor memory usage with `use_mlock: false`
522
+
523
+ ### GPU Acceleration
524
+
525
+ - iOS: Set `n_gpu_layers` to use Metal GPU acceleration
526
+ - Android: GPU acceleration is automatically enabled when available
527
+
528
+ ### Threading
529
+
530
+ - Adjust `n_threads` based on device capabilities
531
+ - More threads may improve performance but increase memory usage
532
+
533
+ ## 🐛 Troubleshooting
534
+
535
+ ### Common Issues
536
+
537
+ 1. **Model not found**: Ensure the model path is correct and the file exists
538
+ 2. **Out of memory**: Try using a quantized model or reducing `n_ctx`
539
+ 3. **Slow performance**: Enable GPU acceleration or increase `n_threads`
540
+ 4. **Multimodal not working**: Ensure the mmproj file is compatible with your model
541
+
542
+ ### Debugging
543
+
544
+ Enable native logging to see detailed information:
545
+
546
+ ```typescript
547
+ import { toggleNativeLog, addNativeLogListener } from 'llama-cpp';
548
+
549
+ await toggleNativeLog(true);
550
+
551
+ const logListener = addNativeLogListener((level, text) => {
552
+ console.log(`[${level}] ${text}`);
553
+ });
554
+ ```
555
+
556
+ ## 🤝 Contributing
557
+
558
+ We welcome contributions! Please see our [Contributing Guide](CONTRIBUTING.md) for details.
559
+
560
+ ## 📄 License
561
+
562
+ This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
563
+
564
+ ## 🙏 Acknowledgments
565
+
566
+ - [llama.cpp](https://github.com/ggerganov/llama.cpp) - The core inference engine
567
+ - [Capacitor](https://capacitorjs.com/) - The cross-platform runtime
568
+ - [llama.rn](https://github.com/mybigday/llama.rn) - Inspiration for the React Native implementation
569
+
570
+ ## 📞 Support
571
+
572
+ - 📧 Email: support@arusatech.com
573
+ - 🐛 Issues: [GitHub Issues](https://github.com/arusatech/llama-cpp/issues)
574
+ - 📖 Documentation: [GitHub Wiki](https://github.com/arusatech/llama-cpp/wiki)
@@ -0,0 +1,58 @@
1
+ ext {
2
+ junitVersion = project.hasProperty('junitVersion') ? rootProject.ext.junitVersion : '4.13.2'
3
+ androidxAppCompatVersion = project.hasProperty('androidxAppCompatVersion') ? rootProject.ext.androidxAppCompatVersion : '1.7.0'
4
+ androidxJunitVersion = project.hasProperty('androidxJunitVersion') ? rootProject.ext.androidxJunitVersion : '1.2.1'
5
+ androidxEspressoCoreVersion = project.hasProperty('androidxEspressoCoreVersion') ? rootProject.ext.androidxEspressoCoreVersion : '3.6.1'
6
+ }
7
+
8
+ buildscript {
9
+ repositories {
10
+ google()
11
+ mavenCentral()
12
+ }
13
+ dependencies {
14
+ classpath 'com.android.tools.build:gradle:8.7.2'
15
+ }
16
+ }
17
+
18
+ apply plugin: 'com.android.library'
19
+
20
+ android {
21
+ namespace "ai.annadata.plugin.capacitor"
22
+ compileSdk project.hasProperty('compileSdkVersion') ? rootProject.ext.compileSdkVersion : 35
23
+ defaultConfig {
24
+ minSdkVersion project.hasProperty('minSdkVersion') ? rootProject.ext.minSdkVersion : 23
25
+ targetSdkVersion project.hasProperty('targetSdkVersion') ? rootProject.ext.targetSdkVersion : 35
26
+ versionCode 1
27
+ versionName "1.0"
28
+ testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
29
+ }
30
+ buildTypes {
31
+ release {
32
+ minifyEnabled false
33
+ proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
34
+ }
35
+ }
36
+ lintOptions {
37
+ abortOnError false
38
+ }
39
+ compileOptions {
40
+ sourceCompatibility JavaVersion.VERSION_21
41
+ targetCompatibility JavaVersion.VERSION_21
42
+ }
43
+ }
44
+
45
+ repositories {
46
+ google()
47
+ mavenCentral()
48
+ }
49
+
50
+
51
+ dependencies {
52
+ implementation fileTree(dir: 'libs', include: ['*.jar'])
53
+ implementation project(':capacitor-android')
54
+ implementation "androidx.appcompat:appcompat:$androidxAppCompatVersion"
55
+ testImplementation "junit:junit:$junitVersion"
56
+ androidTestImplementation "androidx.test.ext:junit:$androidxJunitVersion"
57
+ androidTestImplementation "androidx.test.espresso:espresso-core:$androidxEspressoCoreVersion"
58
+ }
@@ -0,0 +1,2 @@
1
+ <manifest xmlns:android="http://schemas.android.com/apk/res/android">
2
+ </manifest>