cactus-react-native 0.1.1 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. package/README.md +872 -146
  2. package/android/src/main/CMakeLists.txt +1 -1
  3. package/android/src/main/jniLibs/arm64-v8a/libcactus.so +0 -0
  4. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod_i8mm.so +0 -0
  8. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_i8mm.so +0 -0
  9. package/android/src/main/jniLibs/x86_64/libcactus.so +0 -0
  10. package/android/src/main/jniLibs/x86_64/libcactus_x86_64.so +0 -0
  11. package/ios/CMakeLists.txt +6 -6
  12. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/cactus.h +12 -0
  13. package/ios/cactus.xcframework/ios-arm64/cactus.framework/cactus +0 -0
  14. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +12 -0
  15. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
  16. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/cactus.h +12 -0
  17. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/cactus +0 -0
  18. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/cactus.h +12 -0
  19. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
  20. package/lib/commonjs/index.js.map +1 -1
  21. package/lib/commonjs/lm.js.map +1 -0
  22. package/lib/commonjs/tts.js.map +1 -0
  23. package/lib/commonjs/vlm.js.map +0 -0
  24. package/lib/module/index.js.map +1 -1
  25. package/lib/module/lm.js.map +0 -0
  26. package/lib/module/tts.js.map +1 -0
  27. package/lib/module/vlm.js.map +1 -0
  28. package/lib/typescript/index.d.ts +5 -1
  29. package/lib/typescript/index.d.ts.map +1 -1
  30. package/lib/typescript/lm.d.ts +41 -0
  31. package/lib/typescript/lm.d.ts.map +1 -0
  32. package/lib/typescript/tts.d.ts +10 -0
  33. package/lib/typescript/tts.d.ts.map +1 -0
  34. package/lib/typescript/vlm.d.ts +44 -0
  35. package/lib/typescript/vlm.d.ts.map +1 -0
  36. package/package.json +2 -1
  37. package/src/index.ts +11 -1
  38. package/src/lm.ts +49 -0
  39. package/src/tts.ts +45 -0
  40. package/src/vlm.ts +70 -0
  41. package/lib/commonjs/NativeCactus.js +0 -10
  42. package/lib/commonjs/chat.js +0 -37
  43. package/lib/commonjs/grammar.js +0 -560
  44. package/lib/commonjs/index.js +0 -412
  45. package/lib/commonjs/tools.js +0 -118
  46. package/lib/commonjs/tools.js.map +0 -1
  47. package/lib/module/NativeCactus.js +0 -8
  48. package/lib/module/chat.js +0 -33
  49. package/lib/module/grammar.js +0 -553
  50. package/lib/module/index.js +0 -363
  51. package/lib/module/tools.js +0 -110
  52. package/lib/module/tools.js.map +0 -1
package/src/lm.ts ADDED
@@ -0,0 +1,49 @@
1
+ import { initLlama, LlamaContext } from './index'
2
+ import type {
3
+ ContextParams,
4
+ CompletionParams,
5
+ CactusOAICompatibleMessage,
6
+ NativeCompletionResult,
7
+ EmbeddingParams,
8
+ NativeEmbeddingResult,
9
+ } from './index'
10
+
11
+ export class CactusLM {
12
+ private context: LlamaContext
13
+
14
+ private constructor(context: LlamaContext) {
15
+ this.context = context
16
+ }
17
+
18
+ static async init(
19
+ params: ContextParams,
20
+ onProgress?: (progress: number) => void,
21
+ ): Promise<CactusLM> {
22
+ const context = await initLlama(params, onProgress)
23
+ return new CactusLM(context)
24
+ }
25
+
26
+ async completion(
27
+ messages: CactusOAICompatibleMessage[],
28
+ params: CompletionParams = {},
29
+ callback?: (data: any) => void,
30
+ ): Promise<NativeCompletionResult> {
31
+ return this.context.completion({ messages, ...params }, callback)
32
+ }
33
+
34
+ async embedding(
35
+ text: string,
36
+ params?: EmbeddingParams,
37
+ ): Promise<NativeEmbeddingResult> {
38
+ return this.context.embedding(text, params)
39
+ }
40
+
41
+ async rewind(): Promise<void> {
42
+ // @ts-ignore
43
+ return this.context?.rewind()
44
+ }
45
+
46
+ async release(): Promise<void> {
47
+ return this.context.release()
48
+ }
49
+ }
package/src/tts.ts ADDED
@@ -0,0 +1,45 @@
1
+ import {
2
+ LlamaContext,
3
+ initVocoder,
4
+ getFormattedAudioCompletion,
5
+ decodeAudioTokens,
6
+ releaseVocoder,
7
+ } from './index'
8
+ import type { NativeAudioDecodeResult } from './index'
9
+
10
+ export class CactusTTS {
11
+ private context: LlamaContext
12
+
13
+ private constructor(context: LlamaContext) {
14
+ this.context = context
15
+ }
16
+
17
+ static async init(
18
+ context: LlamaContext,
19
+ vocoderModelPath: string,
20
+ ): Promise<CactusTTS> {
21
+ await initVocoder(context.id, vocoderModelPath)
22
+ return new CactusTTS(context)
23
+ }
24
+
25
+ async generate(
26
+ textToSpeak: string,
27
+ speakerJsonStr: string,
28
+ ): Promise<NativeAudioDecodeResult> {
29
+ const { formatted_prompt } = await getFormattedAudioCompletion(
30
+ this.context.id,
31
+ speakerJsonStr,
32
+ textToSpeak,
33
+ )
34
+ // This part is simplified. In a real scenario, the tokens from
35
+ // the main model would be generated and passed to decodeAudioTokens.
36
+ // For now, we are assuming a direct path which may not be fully functional
37
+ // without the main model's token output for TTS.
38
+ const tokens = (await this.context.tokenize(formatted_prompt)).tokens
39
+ return decodeAudioTokens(this.context.id, tokens)
40
+ }
41
+
42
+ async release(): Promise<void> {
43
+ return releaseVocoder(this.context.id)
44
+ }
45
+ }
package/src/vlm.ts ADDED
@@ -0,0 +1,70 @@
1
+ import {
2
+ initLlama,
3
+ initMultimodal,
4
+ multimodalCompletion,
5
+ LlamaContext,
6
+ } from './index'
7
+ import type {
8
+ ContextParams,
9
+ CompletionParams,
10
+ CactusOAICompatibleMessage,
11
+ NativeCompletionResult,
12
+ } from './index'
13
+
14
+ export type VLMContextParams = ContextParams & {
15
+ mmproj: string
16
+ }
17
+
18
+ export type VLMCompletionParams = Omit<CompletionParams, 'prompt'> & {
19
+ images?: string[]
20
+ }
21
+
22
+ export class CactusVLM {
23
+ private context: LlamaContext
24
+
25
+ private constructor(context: LlamaContext) {
26
+ this.context = context
27
+ }
28
+
29
+ static async init(
30
+ params: VLMContextParams,
31
+ onProgress?: (progress: number) => void,
32
+ ): Promise<CactusVLM> {
33
+ const context = await initLlama(params, onProgress)
34
+
35
+ // Explicitly disable GPU for the multimodal projector for stability.
36
+ await initMultimodal(context.id, params.mmproj, false)
37
+
38
+ return new CactusVLM(context)
39
+ }
40
+
41
+ async completion(
42
+ messages: CactusOAICompatibleMessage[],
43
+ params: VLMCompletionParams = {},
44
+ callback?: (data: any) => void,
45
+ ): Promise<NativeCompletionResult> {
46
+ if (params.images && params.images.length > 0) {
47
+ const formattedPrompt = await this.context.getFormattedChat(messages)
48
+ const prompt =
49
+ typeof formattedPrompt === 'string'
50
+ ? formattedPrompt
51
+ : formattedPrompt.prompt
52
+ return multimodalCompletion(
53
+ this.context.id,
54
+ prompt,
55
+ params.images,
56
+ { ...params, prompt, emit_partial_completion: !!callback },
57
+ )
58
+ }
59
+ return this.context.completion({ messages, ...params }, callback)
60
+ }
61
+
62
+ async rewind(): Promise<void> {
63
+ // @ts-ignore
64
+ return this.context?.rewind()
65
+ }
66
+
67
+ async release(): Promise<void> {
68
+ return this.context.release()
69
+ }
70
+ }
@@ -1,10 +0,0 @@
1
- "use strict";
2
-
3
- Object.defineProperty(exports, "__esModule", {
4
- value: true
5
- });
6
- exports.default = void 0;
7
- var _reactNative = require("react-native");
8
- // New TTS/Audio types
9
- var _default = exports.default = _reactNative.TurboModuleRegistry.get('Cactus');
10
- //# sourceMappingURL=NativeCactus.js.map
@@ -1,37 +0,0 @@
1
- "use strict";
2
-
3
- Object.defineProperty(exports, "__esModule", {
4
- value: true
5
- });
6
- exports.formatChat = formatChat;
7
- function formatChat(messages) {
8
- const chat = [];
9
- messages.forEach(currMsg => {
10
- const role = currMsg.role || '';
11
- let content = '';
12
- if ('content' in currMsg) {
13
- if (typeof currMsg.content === 'string') {
14
- ;
15
- ({
16
- content
17
- } = currMsg);
18
- } else if (Array.isArray(currMsg.content)) {
19
- currMsg.content.forEach(part => {
20
- if ('text' in part) {
21
- content += `${content ? '\n' : ''}${part.text}`;
22
- }
23
- });
24
- } else {
25
- throw new TypeError("Invalid 'content' type (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
26
- }
27
- } else {
28
- throw new Error("Missing 'content' (ref: https://github.com/ggerganov/llama.cpp/issues/8367)");
29
- }
30
- chat.push({
31
- role,
32
- content
33
- });
34
- });
35
- return chat;
36
- }
37
- //# sourceMappingURL=chat.js.map