cui-llama.rn 1.6.1 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (196) hide show
  1. package/android/src/main/CMakeLists.txt +6 -0
  2. package/android/src/main/java/com/rnllama/LlamaContext.java +38 -5
  3. package/android/src/main/java/com/rnllama/RNLlama.java +139 -4
  4. package/android/src/main/jni.cpp +153 -14
  5. package/android/src/main/jniLibs/arm64-v8a/librnllama.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod.so +0 -0
  9. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_dotprod_i8mm.so +0 -0
  10. package/android/src/main/jniLibs/arm64-v8a/librnllama_v8_2_i8mm.so +0 -0
  11. package/android/src/main/jniLibs/x86_64/librnllama.so +0 -0
  12. package/android/src/main/jniLibs/x86_64/librnllama_x86_64.so +0 -0
  13. package/android/src/newarch/java/com/rnllama/RNLlamaModule.java +24 -4
  14. package/android/src/oldarch/java/com/rnllama/RNLlamaModule.java +22 -2
  15. package/cpp/chat.cpp +128 -106
  16. package/cpp/chat.h +2 -0
  17. package/cpp/common.cpp +41 -76
  18. package/cpp/common.h +23 -19
  19. package/cpp/ggml-backend.cpp +9 -5
  20. package/cpp/ggml-backend.h +4 -4
  21. package/cpp/ggml-cpu/ggml-cpu-aarch64.cpp +0 -2
  22. package/cpp/ggml-cpu/ggml-cpu-quants.c +306 -6
  23. package/cpp/ggml-cpu/ggml-cpu.c +5 -13
  24. package/cpp/ggml-cpu/ggml-cpu.cpp +29 -16
  25. package/cpp/ggml-cpu/ops.cpp +107 -13
  26. package/cpp/ggml-cpu/vec.cpp +0 -6
  27. package/cpp/ggml-cpu/vec.h +16 -0
  28. package/cpp/ggml-llama-sim.metallib +0 -0
  29. package/cpp/ggml-llama.metallib +0 -0
  30. package/cpp/ggml-metal-impl.h +36 -11
  31. package/cpp/ggml-metal.m +321 -132
  32. package/cpp/ggml-opt.cpp +373 -190
  33. package/cpp/ggml-opt.h +49 -28
  34. package/cpp/ggml-quants.c +0 -6
  35. package/cpp/ggml.c +93 -38
  36. package/cpp/ggml.h +21 -7
  37. package/cpp/gguf.cpp +33 -33
  38. package/cpp/llama-adapter.cpp +6 -0
  39. package/cpp/llama-arch.cpp +3 -0
  40. package/cpp/llama-batch.cpp +3 -1
  41. package/cpp/llama-chat.cpp +8 -6
  42. package/cpp/llama-chat.h +1 -0
  43. package/cpp/llama-context.cpp +349 -135
  44. package/cpp/llama-context.h +30 -3
  45. package/cpp/llama-cparams.h +1 -0
  46. package/cpp/llama-graph.cpp +150 -234
  47. package/cpp/llama-graph.h +52 -7
  48. package/cpp/llama-hparams.cpp +17 -1
  49. package/cpp/llama-hparams.h +34 -5
  50. package/cpp/llama-kv-cache.cpp +662 -321
  51. package/cpp/llama-kv-cache.h +203 -93
  52. package/cpp/llama-memory.h +3 -2
  53. package/cpp/llama-model-loader.cpp +24 -15
  54. package/cpp/llama-model-saver.cpp +281 -0
  55. package/cpp/llama-model-saver.h +37 -0
  56. package/cpp/llama-model.cpp +536 -132
  57. package/cpp/llama-model.h +7 -1
  58. package/cpp/llama-sampling.cpp +18 -6
  59. package/cpp/llama-vocab.cpp +46 -8
  60. package/cpp/llama-vocab.h +6 -0
  61. package/cpp/llama.cpp +14 -0
  62. package/cpp/llama.h +72 -131
  63. package/cpp/minja/chat-template.hpp +9 -5
  64. package/cpp/minja/minja.hpp +69 -36
  65. package/cpp/rn-llama.cpp +611 -47
  66. package/cpp/rn-llama.h +33 -3
  67. package/cpp/sampling.cpp +57 -50
  68. package/cpp/tools/mtmd/clip-impl.h +462 -0
  69. package/cpp/tools/mtmd/clip.cpp +4024 -0
  70. package/cpp/tools/mtmd/clip.h +101 -0
  71. package/cpp/tools/mtmd/miniaudio.h +93468 -0
  72. package/cpp/tools/mtmd/mtmd-audio.cpp +855 -0
  73. package/cpp/tools/mtmd/mtmd-audio.h +62 -0
  74. package/cpp/tools/mtmd/mtmd-helper.cpp +297 -0
  75. package/cpp/tools/mtmd/mtmd.cpp +942 -0
  76. package/cpp/tools/mtmd/mtmd.h +362 -0
  77. package/cpp/tools/mtmd/stb_image.h +7988 -0
  78. package/ios/CMakeLists.txt +7 -0
  79. package/ios/RNLlama.mm +77 -3
  80. package/ios/RNLlamaContext.h +5 -1
  81. package/ios/RNLlamaContext.mm +105 -10
  82. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/chat.h +2 -0
  83. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/common.h +23 -19
  84. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  85. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  86. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  87. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/ggml.h +21 -7
  88. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  89. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-context.h +30 -3
  90. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  91. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
  92. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
  93. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  94. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
  95. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  96. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-model.h +7 -1
  97. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  98. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/llama.h +72 -131
  99. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  100. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  101. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
  102. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/Info.plist +0 -0
  103. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  104. package/ios/rnllama.xcframework/ios-arm64/rnllama.framework/rnllama +0 -0
  105. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  106. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
  107. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  108. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  109. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  110. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
  111. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  112. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
  113. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  114. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
  115. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
  116. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  117. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
  118. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  119. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
  120. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  121. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
  122. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  123. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  124. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
  125. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  126. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  127. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  128. package/ios/rnllama.xcframework/ios-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  129. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/chat.h +2 -0
  130. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/common.h +23 -19
  131. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-backend.h +4 -4
  132. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  133. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml-opt.h +49 -28
  134. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/ggml.h +21 -7
  135. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-chat.h +1 -0
  136. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-context.h +30 -3
  137. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-cparams.h +1 -0
  138. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-graph.h +52 -7
  139. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-hparams.h +34 -5
  140. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  141. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-memory.h +3 -2
  142. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model-saver.h +37 -0
  143. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-model.h +7 -1
  144. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama-vocab.h +6 -0
  145. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/llama.h +72 -131
  146. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  147. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/minja/minja.hpp +69 -36
  148. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Headers/rn-llama.h +33 -3
  149. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/Info.plist +0 -0
  150. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/ggml-llama.metallib +0 -0
  151. package/ios/rnllama.xcframework/tvos-arm64/rnllama.framework/rnllama +0 -0
  152. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/chat.h +2 -0
  153. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/common.h +23 -19
  154. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-backend.h +4 -4
  155. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-metal-impl.h +36 -11
  156. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml-opt.h +49 -28
  157. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/ggml.h +21 -7
  158. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-chat.h +1 -0
  159. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-context.h +30 -3
  160. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-cparams.h +1 -0
  161. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-graph.h +52 -7
  162. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-hparams.h +34 -5
  163. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-kv-cache.h +203 -93
  164. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-memory.h +3 -2
  165. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model-saver.h +37 -0
  166. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-model.h +7 -1
  167. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama-vocab.h +6 -0
  168. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/llama.h +72 -131
  169. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/chat-template.hpp +9 -5
  170. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/minja/minja.hpp +69 -36
  171. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Headers/rn-llama.h +33 -3
  172. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/Info.plist +0 -0
  173. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/_CodeSignature/CodeResources +1 -1
  174. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/ggml-llama-sim.metallib +0 -0
  175. package/ios/rnllama.xcframework/tvos-arm64_x86_64-simulator/rnllama.framework/rnllama +0 -0
  176. package/jest/mock.js +33 -7
  177. package/lib/commonjs/NativeRNLlama.js.map +1 -1
  178. package/lib/commonjs/index.js +153 -21
  179. package/lib/commonjs/index.js.map +1 -1
  180. package/lib/module/NativeRNLlama.js.map +1 -1
  181. package/lib/module/index.js +152 -20
  182. package/lib/module/index.js.map +1 -1
  183. package/lib/typescript/NativeRNLlama.d.ts +50 -4
  184. package/lib/typescript/NativeRNLlama.d.ts.map +1 -1
  185. package/lib/typescript/index.d.ts +72 -6
  186. package/lib/typescript/index.d.ts.map +1 -1
  187. package/package.json +1 -1
  188. package/src/NativeRNLlama.ts +67 -4
  189. package/src/index.ts +212 -38
  190. package/lib/commonjs/chat.js +0 -37
  191. package/lib/commonjs/chat.js.map +0 -1
  192. package/lib/module/chat.js +0 -33
  193. package/lib/module/chat.js.map +0 -1
  194. package/lib/typescript/chat.d.ts +0 -10
  195. package/lib/typescript/chat.d.ts.map +0 -1
  196. package/src/chat.ts +0 -44
@@ -104,6 +104,12 @@ export type NativeCompletionParams = {
104
104
  }>
105
105
  preserved_tokens?: Array<string>
106
106
  chat_format?: number
107
+ /**
108
+ * Path to an image file to process before generating text.
109
+ * When provided, the image will be processed and added to the context.
110
+ * Requires multimodal support to be enabled via initMultimodal.
111
+ */
112
+ media_paths?: Array<string>
107
113
  /**
108
114
  * Specify a JSON array of stopping strings.
109
115
  * These words will not be included in the completion, so make sure to add them to the prompt for the next iteration. Default: `[]`
@@ -283,6 +289,22 @@ export type NativeCompletionResult = {
283
289
 
284
290
  export type NativeTokenizeResult = {
285
291
  tokens: Array<number>
292
+ /**
293
+ * Whether the tokenization contains images
294
+ */
295
+ has_images: boolean
296
+ /**
297
+ * Bitmap hashes of the images
298
+ */
299
+ bitmap_hashes: Array<number>
300
+ /**
301
+ * Chunk positions of the text and images
302
+ */
303
+ chunk_pos: Array<number>
304
+ /**
305
+ * Chunk positions of the images
306
+ */
307
+ chunk_pos_images: Array<number>
286
308
  }
287
309
 
288
310
  export type NativeEmbeddingResult = {
@@ -336,9 +358,14 @@ export type NativeSessionLoadResult = {
336
358
  prompt: string
337
359
  }
338
360
 
361
+ export type NativeLlamaMessagePart = {
362
+ type: 'text'
363
+ text: string
364
+ }
365
+
339
366
  export type NativeLlamaChatMessage = {
340
367
  role: string
341
- content: string
368
+ content: string | Array<NativeLlamaMessagePart>
342
369
  }
343
370
 
344
371
  export type NativeCPUFeatures = {
@@ -347,8 +374,14 @@ export type NativeCPUFeatures = {
347
374
  dotprod: boolean
348
375
  }
349
376
 
350
- export type JinjaFormattedChatResult = {
377
+ export type FormattedChatResult = {
378
+ type: 'jinja' | 'llama-chat'
351
379
  prompt: string
380
+ has_media: boolean
381
+ media_paths?: Array<string>
382
+ }
383
+
384
+ export type JinjaFormattedChatResult = FormattedChatResult & {
352
385
  chat_format?: number
353
386
  grammar?: string
354
387
  grammar_lazy?: boolean
@@ -361,6 +394,12 @@ export type JinjaFormattedChatResult = {
361
394
  additional_stops?: Array<string>
362
395
  }
363
396
 
397
+ export type NativeImageProcessingResult = {
398
+ success: boolean
399
+ prompt: string
400
+ error?: string
401
+ }
402
+
364
403
  export interface Spec extends TurboModule {
365
404
  toggleNativeLog(enabled: boolean): Promise<void>
366
405
  setContextLimit(limit: number): Promise<void>
@@ -397,8 +436,8 @@ export interface Spec extends TurboModule {
397
436
  params: NativeCompletionParams,
398
437
  ): Promise<NativeCompletionResult>
399
438
  stopCompletion(contextId: number): Promise<void>
400
- tokenizeAsync(contextId: number, text: string): Promise<NativeTokenizeResult>
401
- tokenizeSync(contextId: number, text: string): NativeTokenizeResult
439
+ tokenizeAsync(contextId: number, text: string, imagePaths?: Array<string>): Promise<NativeTokenizeResult>
440
+ tokenizeSync(contextId: number, text: string, imagePaths?: Array<string>): NativeTokenizeResult
402
441
  getCpuFeatures() : Promise<NativeCPUFeatures>
403
442
  detokenize(contextId: number, tokens: number[]): Promise<string>
404
443
  embedding(
@@ -423,6 +462,30 @@ export interface Spec extends TurboModule {
423
462
  contextId: number,
424
463
  ): Promise<Array<{ path: string; scaled?: number }>>
425
464
 
465
+ // Multimodal methods
466
+ initMultimodal(
467
+ contextId: number,
468
+ params: {
469
+ path: string
470
+ use_gpu: boolean
471
+ },
472
+ ): Promise<boolean>
473
+
474
+ isMultimodalEnabled(
475
+ contextId: number,
476
+ ): Promise<boolean>
477
+
478
+ getMultimodalSupport(
479
+ contextId: number,
480
+ ): Promise<{
481
+ vision: boolean
482
+ audio: boolean
483
+ }>
484
+
485
+ releaseMultimodal(
486
+ contextId: number,
487
+ ): Promise<void>
488
+
426
489
  releaseContext(contextId: number): Promise<void>
427
490
 
428
491
  releaseAllContexts(): Promise<void>
package/src/index.ts CHANGED
@@ -15,14 +15,33 @@ import type {
15
15
  NativeCompletionTokenProbItem,
16
16
  NativeCompletionResultTimings,
17
17
  JinjaFormattedChatResult,
18
+ FormattedChatResult,
19
+ NativeImageProcessingResult,
20
+ NativeLlamaChatMessage,
18
21
  } from './NativeRNLlama'
19
22
  import type {
20
23
  SchemaGrammarConverterPropOrder,
21
24
  SchemaGrammarConverterBuiltinRule,
22
25
  } from './grammar'
23
26
  import { SchemaGrammarConverter, convertJsonSchemaToGrammar } from './grammar'
24
- import type { RNLlamaMessagePart, RNLlamaOAICompatibleMessage } from './chat'
25
- import { formatChat } from './chat'
27
+
28
+ export type RNLlamaMessagePart = {
29
+ type: string
30
+ text?: string
31
+ image_url?: {
32
+ url?: string
33
+ }
34
+ input_audio?: {
35
+ format: string
36
+ data?: string
37
+ url?: string
38
+ }
39
+ }
40
+
41
+ export type RNLlamaOAICompatibleMessage = {
42
+ role: string
43
+ content?: string | RNLlamaMessagePart[]
44
+ }
26
45
 
27
46
  export type {
28
47
  NativeContextParams,
@@ -36,15 +55,17 @@ export type {
36
55
  NativeEmbeddingParams,
37
56
  NativeCompletionTokenProbItem,
38
57
  NativeCompletionResultTimings,
39
- RNLlamaMessagePart,
40
- RNLlamaOAICompatibleMessage,
58
+ FormattedChatResult,
41
59
  JinjaFormattedChatResult,
60
+ NativeImageProcessingResult,
42
61
 
43
62
  // Deprecated
44
63
  SchemaGrammarConverterPropOrder,
45
64
  SchemaGrammarConverterBuiltinRule,
46
65
  }
47
66
 
67
+ export const RNLLAMA_MTMD_DEFAULT_MEDIA_MARKER = '<__media__>'
68
+
48
69
  export { SchemaGrammarConverter, convertJsonSchemaToGrammar }
49
70
 
50
71
  const EVENT_ON_INIT_CONTEXT_PROGRESS = '@RNLlama_onInitContextProgress'
@@ -126,6 +147,7 @@ export type CompletionBaseParams = {
126
147
  parallel_tool_calls?: object
127
148
  tool_choice?: string
128
149
  response_format?: CompletionResponseFormat
150
+ media_paths?: string | string[]
129
151
  }
130
152
  export type CompletionParams = Omit<
131
153
  NativeCompletionParams,
@@ -207,23 +229,94 @@ export class LlamaContext {
207
229
  parallel_tool_calls?: object
208
230
  tool_choice?: string
209
231
  },
210
- ): Promise<JinjaFormattedChatResult | string> {
211
- const chat = formatChat(messages)
232
+ ): Promise<FormattedChatResult | JinjaFormattedChatResult> {
233
+ const mediaPaths: string[] = []
234
+ const chat = messages.map((msg) => {
235
+ if (Array.isArray(msg.content)) {
236
+ const content = msg.content.map((part) => {
237
+ // Handle multimodal content
238
+ if (part.type === 'image_url') {
239
+ let path = part.image_url?.url || ''
240
+ if (path?.startsWith('file://')) path = path.slice(7)
241
+ mediaPaths.push(path)
242
+ return {
243
+ type: 'text',
244
+ text: RNLLAMA_MTMD_DEFAULT_MEDIA_MARKER,
245
+ }
246
+ } else if (part.type === 'input_audio') {
247
+ const { input_audio: audio } = part
248
+ if (!audio) throw new Error('input_audio is required')
249
+
250
+ const { format } = audio
251
+ if (format != 'wav' && format != 'mp3') {
252
+ throw new Error(`Unsupported audio format: ${format}`)
253
+ }
254
+ if (audio.url) {
255
+ const path = audio.url.replace(/file:\/\//, '')
256
+ mediaPaths.push(path)
257
+ } else if (audio.data) {
258
+ mediaPaths.push(audio.data)
259
+ }
260
+ return {
261
+ type: 'text',
262
+ text: RNLLAMA_MTMD_DEFAULT_MEDIA_MARKER,
263
+ }
264
+ }
265
+ return part
266
+ })
267
+
268
+ return {
269
+ ...msg,
270
+ content,
271
+ }
272
+ }
273
+ return msg
274
+ }) as NativeLlamaChatMessage[]
275
+
212
276
  const useJinja = this.isJinjaSupported() && params?.jinja
213
- let tmpl = this.isLlamaChatSupported() || useJinja ? undefined : 'chatml'
277
+ let tmpl
214
278
  if (template) tmpl = template // Force replace if provided
215
279
  const jsonSchema = getJsonSchema(params?.response_format)
216
- return RNLlama.getFormattedChat(this.id, JSON.stringify(chat), tmpl, {
217
- jinja: useJinja,
218
- json_schema: jsonSchema ? JSON.stringify(jsonSchema) : undefined,
219
- tools: params?.tools ? JSON.stringify(params.tools) : undefined,
220
- parallel_tool_calls: params?.parallel_tool_calls
221
- ? JSON.stringify(params.parallel_tool_calls)
222
- : undefined,
223
- tool_choice: params?.tool_choice,
224
- })
280
+
281
+ const result = await RNLlama.getFormattedChat(
282
+ this.id,
283
+ JSON.stringify(chat),
284
+ tmpl,
285
+ {
286
+ jinja: useJinja,
287
+ json_schema: jsonSchema ? JSON.stringify(jsonSchema) : undefined,
288
+ tools: params?.tools ? JSON.stringify(params.tools) : undefined,
289
+ parallel_tool_calls: params?.parallel_tool_calls
290
+ ? JSON.stringify(params.parallel_tool_calls)
291
+ : undefined,
292
+ tool_choice: params?.tool_choice,
293
+ },
294
+ )
295
+ if (!useJinja) {
296
+ return {
297
+ type: 'llama-chat',
298
+ prompt: result as string,
299
+ has_media: mediaPaths.length > 0,
300
+ media_paths: mediaPaths,
301
+ }
302
+ }
303
+ const jinjaResult = result as JinjaFormattedChatResult
304
+ jinjaResult.type = 'jinja'
305
+ jinjaResult.has_media = mediaPaths.length > 0
306
+ jinjaResult.media_paths = mediaPaths
307
+ return jinjaResult
225
308
  }
226
309
 
310
+ /**
311
+ * Generate a completion based on the provided parameters
312
+ * @param params Completion parameters including prompt or messages
313
+ * @param callback Optional callback for token-by-token streaming
314
+ * @returns Promise resolving to the completion result
315
+ *
316
+ * Note: For multimodal support, you can include an media_paths parameter.
317
+ * This will process the images and add them to the context before generating text.
318
+ * Multimodal support must be enabled via initMultimodal() first.
319
+ */
227
320
  async completion(
228
321
  params: CompletionParams,
229
322
  callback?: (data: TokenData) => void,
@@ -233,8 +326,8 @@ export class LlamaContext {
233
326
  prompt: params.prompt || '',
234
327
  emit_partial_completion: !!callback,
235
328
  }
329
+
236
330
  if (params.messages) {
237
- // messages always win
238
331
  const formattedResult = await this.getFormattedChat(
239
332
  params.messages,
240
333
  params.chat_template || params.chatTemplate,
@@ -245,29 +338,42 @@ export class LlamaContext {
245
338
  tool_choice: params.tool_choice,
246
339
  },
247
340
  )
248
- if (typeof formattedResult === 'string') {
249
- nativeParams.prompt = formattedResult || ''
250
- } else {
251
- nativeParams.prompt = formattedResult.prompt || ''
252
- if (typeof formattedResult.chat_format === 'number')
253
- nativeParams.chat_format = formattedResult.chat_format
254
- if (formattedResult.grammar)
255
- nativeParams.grammar = formattedResult.grammar
256
- if (typeof formattedResult.grammar_lazy === 'boolean')
257
- nativeParams.grammar_lazy = formattedResult.grammar_lazy
258
- if (formattedResult.grammar_triggers)
259
- nativeParams.grammar_triggers = formattedResult.grammar_triggers
260
- if (formattedResult.preserved_tokens)
261
- nativeParams.preserved_tokens = formattedResult.preserved_tokens
262
- if (formattedResult.additional_stops) {
341
+ if (formattedResult.type === 'jinja') {
342
+ const jinjaResult = formattedResult as JinjaFormattedChatResult
343
+
344
+ nativeParams.prompt = jinjaResult.prompt || ''
345
+ if (typeof jinjaResult.chat_format === 'number')
346
+ nativeParams.chat_format = jinjaResult.chat_format
347
+ if (jinjaResult.grammar) nativeParams.grammar = jinjaResult.grammar
348
+ if (typeof jinjaResult.grammar_lazy === 'boolean')
349
+ nativeParams.grammar_lazy = jinjaResult.grammar_lazy
350
+ if (jinjaResult.grammar_triggers)
351
+ nativeParams.grammar_triggers = jinjaResult.grammar_triggers
352
+ if (jinjaResult.preserved_tokens)
353
+ nativeParams.preserved_tokens = jinjaResult.preserved_tokens
354
+ if (jinjaResult.additional_stops) {
263
355
  if (!nativeParams.stop) nativeParams.stop = []
264
- nativeParams.stop.push(...formattedResult.additional_stops)
356
+ nativeParams.stop.push(...jinjaResult.additional_stops)
357
+ }
358
+ if (jinjaResult.has_media) {
359
+ nativeParams.media_paths = jinjaResult.media_paths
360
+ }
361
+ } else if (formattedResult.type === 'llama-chat') {
362
+ const llamaChatResult = formattedResult as FormattedChatResult
363
+ nativeParams.prompt = llamaChatResult.prompt || ''
364
+ if (llamaChatResult.has_media) {
365
+ nativeParams.media_paths = llamaChatResult.media_paths
265
366
  }
266
367
  }
267
368
  } else {
268
369
  nativeParams.prompt = params.prompt || ''
269
370
  }
270
371
 
372
+ // If media_paths were explicitly provided or extracted from messages, use them
373
+ if (!nativeParams.media_paths && params.media_paths) {
374
+ nativeParams.media_paths = params.media_paths
375
+ }
376
+
271
377
  if (nativeParams.response_format && !nativeParams.grammar) {
272
378
  const jsonSchema = getJsonSchema(params.response_format)
273
379
  if (jsonSchema) nativeParams.json_schema = JSON.stringify(jsonSchema)
@@ -301,12 +407,32 @@ export class LlamaContext {
301
407
  return RNLlama.stopCompletion(this.id)
302
408
  }
303
409
 
304
- tokenizeAsync(text: string): Promise<NativeTokenizeResult> {
305
- return RNLlama.tokenizeAsync(this.id, text)
410
+ /**
411
+ * Tokenize text or text with images
412
+ * @param text Text to tokenize
413
+ * @param params.media_paths Array of image paths to tokenize (if multimodal is enabled)
414
+ * @returns Promise resolving to the tokenize result
415
+ */
416
+ tokenizeAsync(
417
+ text: string,
418
+ {
419
+ media_paths: mediaPaths,
420
+ }: {
421
+ media_paths?: string[]
422
+ } = {},
423
+ ): Promise<NativeTokenizeResult> {
424
+ return RNLlama.tokenizeAsync(this.id, text, mediaPaths)
306
425
  }
307
426
 
308
- tokenizeSync(text: string): NativeTokenizeResult {
309
- return RNLlama.tokenizeSync(this.id, text)
427
+ tokenizeSync(
428
+ text: string,
429
+ {
430
+ media_paths: mediaPaths,
431
+ }: {
432
+ media_paths?: string[]
433
+ } = {},
434
+ ): NativeTokenizeResult {
435
+ return RNLlama.tokenizeSync(this.id, text, mediaPaths)
310
436
  }
311
437
 
312
438
  detokenize(tokens: number[]): Promise<string> {
@@ -362,6 +488,54 @@ export class LlamaContext {
362
488
  return RNLlama.getLoadedLoraAdapters(this.id)
363
489
  }
364
490
 
491
+ /**
492
+ * Initialize multimodal support with a mmproj file
493
+ * @param params Parameters for multimodal support
494
+ * @param params.path Path to the multimodal projector file
495
+ * @param params.use_gpu Whether to use GPU
496
+ * @returns Promise resolving to true if initialization was successful
497
+ */
498
+ async initMultimodal({
499
+ path,
500
+ use_gpu: useGpu,
501
+ }: {
502
+ path: string
503
+ use_gpu?: boolean
504
+ }): Promise<boolean> {
505
+ if (path.startsWith('file://')) path = path.slice(7)
506
+ return RNLlama.initMultimodal(this.id, {
507
+ path,
508
+ use_gpu: useGpu ?? true,
509
+ })
510
+ }
511
+
512
+ /**
513
+ * Check if multimodal support is enabled
514
+ * @returns Promise resolving to true if multimodal is enabled
515
+ */
516
+ async isMultimodalEnabled(): Promise<boolean> {
517
+ return await RNLlama.isMultimodalEnabled(this.id)
518
+ }
519
+
520
+ /**
521
+ * Check multimodal support
522
+ * @returns Promise resolving to an object with vision and audio support
523
+ */
524
+ async getMultimodalSupport(): Promise<{
525
+ vision: boolean
526
+ audio: boolean
527
+ }> {
528
+ return await RNLlama.getMultimodalSupport(this.id)
529
+ }
530
+
531
+ /**
532
+ * Release multimodal support
533
+ * @returns Promise resolving to void
534
+ */
535
+ async releaseMultimodal(): Promise<void> {
536
+ return await RNLlama.releaseMultimodal(this.id)
537
+ }
538
+
365
539
  async release(): Promise<void> {
366
540
  return RNLlama.releaseContext(this.id)
367
541
  }
@@ -407,7 +581,7 @@ const modelInfoSkip = [
407
581
  'tokenizer.ggml.tokens',
408
582
  'tokenizer.ggml.token_type',
409
583
  'tokenizer.ggml.merges',
410
- 'tokenizer.ggml.scores'
584
+ 'tokenizer.ggml.scores',
411
585
  ]
412
586
  export async function loadLlamaModelInfo(model: string): Promise<Object> {
413
587
  let path = model
@@ -1,37 +0,0 @@
1
- "use strict";
2
-
3
- Object.defineProperty(exports, "__esModule", {
4
- value: true
5
- });
6
- exports.formatChat = formatChat;
7
- function formatChat(messages) {
8
- const chat = [];
9
- messages.forEach(currMsg => {
10
- const role = currMsg.role || '';
11
- let content = '';
12
- if ('content' in currMsg) {
13
- if (typeof currMsg.content === 'string') {
14
- ;
15
- ({
16
- content
17
- } = currMsg);
18
- } else if (Array.isArray(currMsg.content)) {
19
- currMsg.content.forEach(part => {
20
- if ('text' in part) {
21
- content += `${content ? '\n' : ''}${part.text}`;
22
- }
23
- });
24
- } else {
25
- throw new TypeError("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
26
- }
27
- } else {
28
- throw new Error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
29
- }
30
- chat.push({
31
- role,
32
- content
33
- });
34
- });
35
- return chat;
36
- }
37
- //# sourceMappingURL=chat.js.map
@@ -1 +0,0 @@
1
- {"version":3,"names":["formatChat","messages","chat","forEach","currMsg","role","content","Array","isArray","part","text","TypeError","Error","push"],"sourceRoot":"../../src","sources":["chat.ts"],"mappings":";;;;;;AAWO,SAASA,UAAUA,CACxBC,QAAuC,EACb;EAC1B,MAAMC,IAA8B,GAAG,EAAE;EAEzCD,QAAQ,CAACE,OAAO,CAAEC,OAAO,IAAK;IAC5B,MAAMC,IAAY,GAAGD,OAAO,CAACC,IAAI,IAAI,EAAE;IAEvC,IAAIC,OAAe,GAAG,EAAE;IACxB,IAAI,SAAS,IAAIF,OAAO,EAAE;MACxB,IAAI,OAAOA,OAAO,CAACE,OAAO,KAAK,QAAQ,EAAE;QACvC;QAAC,CAAC;UAAEA;QAAQ,CAAC,GAAGF,OAAO;MACzB,CAAC,MAAM,IAAIG,KAAK,CAACC,OAAO,CAACJ,OAAO,CAACE,OAAO,CAAC,EAAE;QACzCF,OAAO,CAACE,OAAO,CAACH,OAAO,CAAEM,IAAI,IAAK;UAChC,IAAI,MAAM,IAAIA,IAAI,EAAE;YAClBH,OAAO,IAAI,GAAGA,OAAO,GAAG,IAAI,GAAG,EAAE,GAAGG,IAAI,CAACC,IAAI,EAAE;UACjD;QACF,CAAC,CAAC;MACJ,CAAC,MAAM;QACL,MAAM,IAAIC,SAAS,CACjB,kFACF,CAAC;MACH;IACF,CAAC,MAAM;MACL,MAAM,IAAIC,KAAK,CACb,6EACF,CAAC;IACH;IAEAV,IAAI,CAACW,IAAI,CAAC;MAAER,IAAI;MAAEC;IAAQ,CAAC,CAAC;EAC9B,CAAC,CAAC;EACF,OAAOJ,IAAI;AACb","ignoreList":[]}
@@ -1,33 +0,0 @@
1
- "use strict";
2
-
3
- export function formatChat(messages) {
4
- const chat = [];
5
- messages.forEach(currMsg => {
6
- const role = currMsg.role || '';
7
- let content = '';
8
- if ('content' in currMsg) {
9
- if (typeof currMsg.content === 'string') {
10
- ;
11
- ({
12
- content
13
- } = currMsg);
14
- } else if (Array.isArray(currMsg.content)) {
15
- currMsg.content.forEach(part => {
16
- if ('text' in part) {
17
- content += `${content ? '\n' : ''}${part.text}`;
18
- }
19
- });
20
- } else {
21
- throw new TypeError("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
22
- }
23
- } else {
24
- throw new Error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
25
- }
26
- chat.push({
27
- role,
28
- content
29
- });
30
- });
31
- return chat;
32
- }
33
- //# sourceMappingURL=chat.js.map
@@ -1 +0,0 @@
1
- {"version":3,"names":["formatChat","messages","chat","forEach","currMsg","role","content","Array","isArray","part","text","TypeError","Error","push"],"sourceRoot":"../../src","sources":["chat.ts"],"mappings":";;AAWA,OAAO,SAASA,UAAUA,CACxBC,QAAuC,EACb;EAC1B,MAAMC,IAA8B,GAAG,EAAE;EAEzCD,QAAQ,CAACE,OAAO,CAAEC,OAAO,IAAK;IAC5B,MAAMC,IAAY,GAAGD,OAAO,CAACC,IAAI,IAAI,EAAE;IAEvC,IAAIC,OAAe,GAAG,EAAE;IACxB,IAAI,SAAS,IAAIF,OAAO,EAAE;MACxB,IAAI,OAAOA,OAAO,CAACE,OAAO,KAAK,QAAQ,EAAE;QACvC;QAAC,CAAC;UAAEA;QAAQ,CAAC,GAAGF,OAAO;MACzB,CAAC,MAAM,IAAIG,KAAK,CAACC,OAAO,CAACJ,OAAO,CAACE,OAAO,CAAC,EAAE;QACzCF,OAAO,CAACE,OAAO,CAACH,OAAO,CAAEM,IAAI,IAAK;UAChC,IAAI,MAAM,IAAIA,IAAI,EAAE;YAClBH,OAAO,IAAI,GAAGA,OAAO,GAAG,IAAI,GAAG,EAAE,GAAGG,IAAI,CAACC,IAAI,EAAE;UACjD;QACF,CAAC,CAAC;MACJ,CAAC,MAAM;QACL,MAAM,IAAIC,SAAS,CACjB,kFACF,CAAC;MACH;IACF,CAAC,MAAM;MACL,MAAM,IAAIC,KAAK,CACb,6EACF,CAAC;IACH;IAEAV,IAAI,CAACW,IAAI,CAAC;MAAER,IAAI;MAAEC;IAAQ,CAAC,CAAC;EAC9B,CAAC,CAAC;EACF,OAAOJ,IAAI;AACb","ignoreList":[]}
@@ -1,10 +0,0 @@
1
- import type { NativeLlamaChatMessage } from './NativeRNLlama';
2
- export type RNLlamaMessagePart = {
3
- text?: string;
4
- };
5
- export type RNLlamaOAICompatibleMessage = {
6
- role: string;
7
- content?: string | RNLlamaMessagePart[] | any;
8
- };
9
- export declare function formatChat(messages: RNLlamaOAICompatibleMessage[]): NativeLlamaChatMessage[];
10
- //# sourceMappingURL=chat.d.ts.map
@@ -1 +0,0 @@
1
- {"version":3,"file":"chat.d.ts","sourceRoot":"","sources":["../../src/chat.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,sBAAsB,EAAE,MAAM,iBAAiB,CAAA;AAE7D,MAAM,MAAM,kBAAkB,GAAG;IAC/B,IAAI,CAAC,EAAE,MAAM,CAAA;CACd,CAAA;AAED,MAAM,MAAM,2BAA2B,GAAG;IACxC,IAAI,EAAE,MAAM,CAAA;IACZ,OAAO,CAAC,EAAE,MAAM,GAAG,kBAAkB,EAAE,GAAG,GAAG,CAAA;CAC9C,CAAA;AAED,wBAAgB,UAAU,CACxB,QAAQ,EAAE,2BAA2B,EAAE,GACtC,sBAAsB,EAAE,CA8B1B"}
package/src/chat.ts DELETED
@@ -1,44 +0,0 @@
1
- import type { NativeLlamaChatMessage } from './NativeRNLlama'
2
-
3
- export type RNLlamaMessagePart = {
4
- text?: string
5
- }
6
-
7
- export type RNLlamaOAICompatibleMessage = {
8
- role: string
9
- content?: string | RNLlamaMessagePart[] | any // any for check invalid content type
10
- }
11
-
12
- export function formatChat(
13
- messages: RNLlamaOAICompatibleMessage[],
14
- ): NativeLlamaChatMessage[] {
15
- const chat: NativeLlamaChatMessage[] = []
16
-
17
- messages.forEach((currMsg) => {
18
- const role: string = currMsg.role || ''
19
-
20
- let content: string = ''
21
- if ('content' in currMsg) {
22
- if (typeof currMsg.content === 'string') {
23
- ;({ content } = currMsg)
24
- } else if (Array.isArray(currMsg.content)) {
25
- currMsg.content.forEach((part) => {
26
- if ('text' in part) {
27
- content += `${content ? '\n' : ''}${part.text}`
28
- }
29
- })
30
- } else {
31
- throw new TypeError(
32
- "Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)",
33
- )
34
- }
35
- } else {
36
- throw new Error(
37
- "Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)",
38
- )
39
- }
40
-
41
- chat.push({ role, content })
42
- })
43
- return chat
44
- }