cactus-react-native 0.2.8 → 0.2.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. package/README.md +1 -10
  2. package/android/src/main/jniLibs/arm64-v8a/libcactus.so +0 -0
  3. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8.so +0 -0
  4. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2.so +0 -0
  5. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod.so +0 -0
  6. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_dotprod_i8mm.so +0 -0
  7. package/android/src/main/jniLibs/arm64-v8a/libcactus_v8_2_i8mm.so +0 -0
  8. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Headers/minja/chat-template.hpp +1 -0
  9. package/ios/cactus.xcframework/ios-arm64/cactus.framework/Info.plist +0 -0
  10. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +1 -0
  11. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
  12. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +1 -1
  13. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
  14. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Headers/minja/chat-template.hpp +1 -0
  15. package/ios/cactus.xcframework/tvos-arm64/cactus.framework/Info.plist +0 -0
  16. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Headers/minja/chat-template.hpp +1 -0
  17. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/Info.plist +0 -0
  18. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/_CodeSignature/CodeResources +1 -1
  19. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
  20. package/lib/commonjs/agent.js +3 -0
  21. package/lib/commonjs/agent.js.map +1 -1
  22. package/lib/commonjs/lm.js +73 -0
  23. package/lib/commonjs/lm.js.map +1 -1
  24. package/lib/commonjs/remote.js +36 -89
  25. package/lib/commonjs/remote.js.map +1 -1
  26. package/lib/commonjs/tools.js.map +1 -1
  27. package/lib/commonjs/vlm.js +5 -2
  28. package/lib/commonjs/vlm.js.map +1 -1
  29. package/lib/module/agent.js +3 -0
  30. package/lib/module/agent.js.map +1 -1
  31. package/lib/module/lm.js +74 -1
  32. package/lib/module/lm.js.map +1 -1
  33. package/lib/module/remote.js +36 -89
  34. package/lib/module/remote.js.map +1 -1
  35. package/lib/module/tools.js.map +1 -1
  36. package/lib/module/vlm.js +5 -2
  37. package/lib/module/vlm.js.map +1 -1
  38. package/lib/typescript/agent.d.ts +1 -0
  39. package/lib/typescript/agent.d.ts.map +1 -1
  40. package/lib/typescript/lm.d.ts +6 -1
  41. package/lib/typescript/lm.d.ts.map +1 -1
  42. package/lib/typescript/remote.d.ts +5 -4
  43. package/lib/typescript/remote.d.ts.map +1 -1
  44. package/lib/typescript/tools.d.ts +15 -14
  45. package/lib/typescript/tools.d.ts.map +1 -1
  46. package/lib/typescript/vlm.d.ts +1 -0
  47. package/lib/typescript/vlm.d.ts.map +1 -1
  48. package/package.json +1 -1
  49. package/src/agent.ts +3 -0
  50. package/src/lm.ts +88 -2
  51. package/src/remote.ts +43 -105
  52. package/src/tools.ts +14 -1
  53. package/src/vlm.ts +6 -2
package/src/lm.ts CHANGED
@@ -11,7 +11,7 @@ import type {
11
11
  } from './index'
12
12
 
13
13
  import { Telemetry } from './telemetry'
14
- import { setCactusToken, getVertexAIEmbedding } from './remote'
14
+ import { setCactusToken, getVertexAIEmbedding, getTextCompletion } from './remote'
15
15
  import { ConversationHistoryManager } from './chat'
16
16
 
17
17
  interface CactusLMReturn {
@@ -139,7 +139,50 @@ export class CactusLM {
139
139
 
140
140
  completion = async (
141
141
  messages: CactusOAICompatibleMessage[],
142
- params: CompletionParams = {},
142
+ params: CompletionParams & { mode?: string } = {},
143
+ callback?: (data: any) => void,
144
+ ): Promise<NativeCompletionResult> => {
145
+ const mode = params.mode || 'local';
146
+
147
+ let result: NativeCompletionResult;
148
+ let lastError: Error | null = null;
149
+
150
+ if (mode === 'remote') {
151
+ result = await this._handleRemoteCompletion(messages, callback);
152
+ } else if (mode === 'local') {
153
+ result = await this._handleLocalCompletion(messages, params, callback);
154
+ } else if (mode === 'localfirst') {
155
+ try {
156
+ result = await this._handleLocalCompletion(messages, params, callback);
157
+ } catch (e) {
158
+ lastError = e as Error;
159
+ try {
160
+ result = await this._handleRemoteCompletion(messages, callback);
161
+ } catch (remoteError) {
162
+ throw lastError;
163
+ }
164
+ }
165
+ } else if (mode === 'remotefirst') {
166
+ try {
167
+ result = await this._handleRemoteCompletion(messages, callback);
168
+ } catch (e) {
169
+ lastError = e as Error;
170
+ try {
171
+ result = await this._handleLocalCompletion(messages, params, callback);
172
+ } catch (localError) {
173
+ throw lastError;
174
+ }
175
+ }
176
+ } else {
177
+ throw new Error('Invalid mode: ' + mode + '. Must be "local", "remote", "localfirst", or "remotefirst"');
178
+ }
179
+
180
+ return result;
181
+ }
182
+
183
+ private _handleLocalCompletion = async(
184
+ messages: CactusOAICompatibleMessage[],
185
+ params: CompletionParams,
143
186
  callback?: (data: any) => void,
144
187
  ): Promise<NativeCompletionResult> => {
145
188
  const { newMessages, requiresReset } =
@@ -166,6 +209,46 @@ export class CactusLM {
166
209
  return result;
167
210
  }
168
211
 
212
+ private async _handleRemoteCompletion(
213
+ messages: CactusOAICompatibleMessage[],
214
+ callback?: (data: any) => void,
215
+ ): Promise<NativeCompletionResult> {
216
+ const prompt = messages.map((m) => `${m.role}: ${m.content}`).join('\n');
217
+
218
+ const responseText = await getTextCompletion(messages);
219
+
220
+ if (callback) {
221
+ for (let i = 0; i < responseText.length; i++) {
222
+ callback({ token: responseText[i] });
223
+ }
224
+ }
225
+
226
+ return {
227
+ text: responseText,
228
+ reasoning_content: '',
229
+ tool_calls: [],
230
+ content: responseText,
231
+ tokens_predicted: responseText.split(' ').length,
232
+ tokens_evaluated: prompt.split(' ').length,
233
+ truncated: false,
234
+ stopped_eos: true,
235
+ stopped_word: '',
236
+ stopped_limit: 0,
237
+ stopping_word: '',
238
+ tokens_cached: 0,
239
+ timings: {
240
+ prompt_n: prompt.split(' ').length,
241
+ prompt_ms: 0,
242
+ prompt_per_token_ms: 0,
243
+ prompt_per_second: 0,
244
+ predicted_n: responseText.split(' ').length,
245
+ predicted_ms: 0,
246
+ predicted_per_token_ms: 0,
247
+ predicted_per_second: 0,
248
+ },
249
+ };
250
+ }
251
+
169
252
  async embedding(
170
253
  text: string,
171
254
  params?: EmbeddingParams,
@@ -235,4 +318,7 @@ export class CactusLM {
235
318
  return await this.run(() => this.context.stopCompletion())
236
319
  }
237
320
 
321
+ isJinjaSupported(): boolean {
322
+ return this.context.isJinjaSupported();
323
+ }
238
324
  }
package/src/remote.ts CHANGED
@@ -1,3 +1,5 @@
1
+ import type { CactusOAICompatibleMessage } from "./chat";
2
+
1
3
  let _cactusToken: string | null = null;
2
4
 
3
5
  export function setCactusToken(token: string | null): void {
@@ -5,58 +7,12 @@ export function setCactusToken(token: string | null): void {
5
7
  }
6
8
 
7
9
  export async function getVertexAIEmbedding(text: string): Promise<number[]> {
8
- if (_cactusToken === null) {
9
- throw new Error('CactusToken not set. Please call CactusLM.init with cactusToken parameter.');
10
- }
11
-
12
- const projectId = 'cactus-v1-452518';
13
- const location = 'us-central1';
14
- const modelId = 'text-embedding-005';
15
-
16
- const endpoint = `https://${location}-aiplatform.googleapis.com/v1/projects/${projectId}/locations/${location}/publishers/google/models/${modelId}:predict`;
17
-
18
- const headers = {
19
- 'Authorization': `Bearer ${_cactusToken}`,
20
- 'Content-Type': 'application/json',
21
- };
22
-
23
- const requestBody = {
24
- instances: [{ content: text }]
25
- };
26
-
27
- const response = await fetch(endpoint, {
28
- method: 'POST',
29
- headers,
30
- body: JSON.stringify(requestBody),
31
- });
32
-
33
- if (response.status === 401) {
34
- _cactusToken = null;
35
- throw new Error('Authentication failed. Please update your cactusToken.');
36
- } else if (!response.ok) {
37
- const errorText = await response.text();
38
- throw new Error(`HTTP ${response.status}: ${errorText}`);
39
- }
40
-
41
- const responseBody = await response.json();
42
-
43
- if (responseBody.error) {
44
- throw new Error(`API Error: ${responseBody.error.message}`);
45
- }
46
-
47
- const predictions = responseBody.predictions;
48
- if (!predictions || predictions.length === 0) {
49
- throw new Error('No predictions in response');
50
- }
51
-
52
- const embeddings = predictions[0].embeddings;
53
- const values = embeddings.values;
54
-
55
- return values;
10
+ text = text
11
+ throw new Error('Remote embedding is not currently supported. The Cactus library is in active development - if you need this functionality, please contact us at founders@cactuscompute.com');
56
12
  }
57
13
 
58
14
  export async function getVertexAICompletion(
59
- textPrompt: string,
15
+ messages: CactusOAICompatibleMessage[],
60
16
  imageData?: string,
61
17
  imagePath?: string,
62
18
  mimeType?: string,
@@ -64,48 +20,48 @@ export async function getVertexAICompletion(
64
20
  if (_cactusToken === null) {
65
21
  throw new Error('CactusToken not set. Please call CactusVLM.init with cactusToken parameter.');
66
22
  }
67
-
68
- const projectId = 'cactus-v1-452518';
69
- const location = 'global';
70
- const modelId = 'gemini-2.5-flash-lite-preview-06-17';
71
-
72
- const endpoint = `https://aiplatform.googleapis.com/v1/projects/${projectId}/locations/${location}/publishers/google/models/${modelId}:generateContent`;
23
+ const endpoint = 'https://openrouter.ai/api/v1/chat/completions';
73
24
 
74
25
  const headers = {
75
26
  'Authorization': `Bearer ${_cactusToken}`,
76
27
  'Content-Type': 'application/json',
77
28
  };
78
29
 
79
- const parts: any[] = [];
80
-
30
+ const requestBody = {
31
+ model: 'google/gemini-2.5-flash-lite',
32
+ messages: messages,
33
+ };
34
+
35
+ let imageUrl = ''
81
36
  if (imageData) {
82
- const detectedMimeType = mimeType || 'image/jpeg';
83
- parts.push({
84
- inlineData: {
85
- mimeType: detectedMimeType,
86
- data: imageData
87
- }
88
- });
37
+ imageUrl = `data:${mimeType || 'image/jpeg'};base64,${imageData}`
89
38
  } else if (imagePath) {
90
- const detectedMimeType = mimeType || detectMimeType(imagePath);
91
39
  const RNFS = require('react-native-fs');
92
40
  const base64Data = await RNFS.readFile(imagePath, 'base64');
93
- parts.push({
94
- inlineData: {
95
- mimeType: detectedMimeType,
96
- data: base64Data
97
- }
98
- });
41
+ imageUrl = `data:${mimeType || detectMimeType(imagePath)};base64,${base64Data}`
99
42
  }
100
-
101
- parts.push({ text: textPrompt });
102
43
 
103
- const requestBody = {
104
- contents: {
105
- role: 'user',
106
- parts: parts,
44
+ if (imageUrl) {
45
+ if (requestBody.messages[requestBody.messages.length - 1]?.role === 'user') {
46
+ requestBody.messages[requestBody.messages.length - 1] = {
47
+ role: 'user',
48
+ content: [
49
+ {
50
+ type: 'text',
51
+ text: requestBody.messages[requestBody.messages.length - 1]?.content || ''
52
+ },
53
+ {
54
+ type: 'image_url',
55
+ image_url: {
56
+ url: imageUrl
57
+ }
58
+ }
59
+ ]
60
+ }
61
+ }else{
62
+ console.warn('Image data provided but message is not a user message: ', requestBody.messages);
107
63
  }
108
- };
64
+ }
109
65
 
110
66
  const response = await fetch(endpoint, {
111
67
  method: 'POST',
@@ -122,39 +78,21 @@ export async function getVertexAICompletion(
122
78
  }
123
79
 
124
80
  const responseBody = await response.json();
125
-
126
- if (Array.isArray(responseBody)) {
127
- throw new Error('Unexpected response format: received array instead of object');
128
- }
129
-
130
- if (responseBody.error) {
131
- throw new Error(`API Error: ${responseBody.error.message}`);
132
- }
133
-
134
- const candidates = responseBody.candidates;
135
- if (!candidates || candidates.length === 0) {
136
- throw new Error('No candidates in response');
137
- }
138
-
139
- const content = candidates[0].content;
140
- const responseParts = content.parts;
141
- if (!responseParts || responseParts.length === 0) {
142
- throw new Error('No parts in response');
143
- }
144
-
145
- return responseParts[0].text || '';
81
+ const responseText = responseBody.choices[0].message.content;
82
+
83
+ return responseText;
146
84
  }
147
85
 
148
- export async function getTextCompletion(prompt: string): Promise<string> {
149
- return getVertexAICompletion(prompt);
86
+ export async function getTextCompletion(messages: CactusOAICompatibleMessage[]): Promise<string> {
87
+ return getVertexAICompletion(messages);
150
88
  }
151
89
 
152
- export async function getVisionCompletion(prompt: string, imagePath: string): Promise<string> {
153
- return getVertexAICompletion(prompt, undefined, imagePath);
90
+ export async function getVisionCompletion(messages: CactusOAICompatibleMessage[], imagePath: string): Promise<string> {
91
+ return getVertexAICompletion(messages, undefined, imagePath);
154
92
  }
155
93
 
156
- export async function getVisionCompletionFromData(prompt: string, imageData: string, mimeType?: string): Promise<string> {
157
- return getVertexAICompletion(prompt, imageData, undefined, mimeType);
94
+ export async function getVisionCompletionFromData(messages: CactusOAICompatibleMessage[], imageData: string, mimeType?: string): Promise<string> {
95
+ return getVertexAICompletion(messages, imageData, undefined, mimeType);
158
96
  }
159
97
 
160
98
  function detectMimeType(filePath: string): string {
package/src/tools.ts CHANGED
@@ -1,5 +1,18 @@
1
1
  import type { NativeCompletionResult } from "./NativeCactus";
2
2
 
3
+ export type OpenAIToolSchema = {
4
+ type: "function",
5
+ function: {
6
+ name: string,
7
+ description: string,
8
+ parameters: {
9
+ type: "object",
10
+ properties: {[key: string]: Parameter},
11
+ required: string[]
12
+ }
13
+ }
14
+ }
15
+
3
16
  interface Parameter {
4
17
  type: string,
5
18
  description: string,
@@ -32,7 +45,7 @@ export class Tools {
32
45
  return func;
33
46
  }
34
47
 
35
- getSchemas() {
48
+ getSchemas(): OpenAIToolSchema[] {
36
49
  return Array.from(this.tools.entries()).map(([name, { description, parameters, required }]) => ({
37
50
  type: "function",
38
51
  function: {
package/src/vlm.ts CHANGED
@@ -221,9 +221,9 @@ export class CactusVLM {
221
221
 
222
222
  let responseText: string;
223
223
  if (imagePath) {
224
- responseText = await getVisionCompletion(prompt, imagePath);
224
+ responseText = await getVisionCompletion(messages, imagePath);
225
225
  } else {
226
- responseText = await getTextCompletion(prompt);
226
+ responseText = await getTextCompletion(messages);
227
227
  }
228
228
 
229
229
  if (callback) {
@@ -269,4 +269,8 @@ export class CactusVLM {
269
269
  async stopCompletion(): Promise<void> {
270
270
  return await this.context.stopCompletion()
271
271
  }
272
+
273
+ isJinjaSupported(): boolean {
274
+ return this.context.isJinjaSupported();
275
+ }
272
276
  }