cactus-react-native 0.2.5 → 0.2.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/README.md +48 -0
  2. package/ios/cactus.xcframework/ios-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
  3. package/ios/cactus.xcframework/tvos-arm64_x86_64-simulator/cactus.framework/cactus +0 -0
  4. package/lib/commonjs/agent.js +74 -0
  5. package/lib/commonjs/agent.js.map +1 -0
  6. package/lib/commonjs/chat.js.map +1 -1
  7. package/lib/commonjs/index.js +17 -16
  8. package/lib/commonjs/index.js.map +1 -1
  9. package/lib/commonjs/lm.js +1 -4
  10. package/lib/commonjs/lm.js.map +1 -1
  11. package/lib/commonjs/telemetry.js +0 -1
  12. package/lib/commonjs/telemetry.js.map +1 -1
  13. package/lib/commonjs/vlm.js +0 -1
  14. package/lib/commonjs/vlm.js.map +1 -1
  15. package/lib/module/agent.js +69 -0
  16. package/lib/module/agent.js.map +1 -0
  17. package/lib/module/chat.js.map +1 -1
  18. package/lib/module/index.js +4 -17
  19. package/lib/module/index.js.map +1 -1
  20. package/lib/module/lm.js +1 -4
  21. package/lib/module/lm.js.map +1 -1
  22. package/lib/module/telemetry.js +0 -1
  23. package/lib/module/telemetry.js.map +1 -1
  24. package/lib/module/vlm.js +0 -1
  25. package/lib/module/vlm.js.map +1 -1
  26. package/lib/typescript/agent.d.ts +31 -0
  27. package/lib/typescript/agent.d.ts.map +1 -0
  28. package/lib/typescript/chat.d.ts +2 -0
  29. package/lib/typescript/chat.d.ts.map +1 -1
  30. package/lib/typescript/index.d.ts +2 -7
  31. package/lib/typescript/index.d.ts.map +1 -1
  32. package/lib/typescript/lm.d.ts.map +1 -1
  33. package/lib/typescript/telemetry.d.ts.map +1 -1
  34. package/lib/typescript/vlm.d.ts.map +1 -1
  35. package/package.json +1 -1
  36. package/src/agent.ts +114 -0
  37. package/src/chat.ts +3 -1
  38. package/src/index.ts +10 -16
  39. package/src/lm.ts +1 -4
  40. package/src/telemetry.ts +0 -1
  41. package/src/tools.ts +1 -1
  42. package/src/vlm.ts +0 -1
package/src/index.ts CHANGED
@@ -27,7 +27,6 @@ import type { CactusMessagePart, CactusOAICompatibleMessage } from './chat'
27
27
  import { formatChat } from './chat'
28
28
  import { Tools, parseAndExecuteTool } from './tools'
29
29
  import { Telemetry, type TelemetryParams } from './telemetry'
30
-
31
30
  export type {
32
31
  NativeContextParams,
33
32
  NativeLlamaContext,
@@ -45,9 +44,11 @@ export type {
45
44
  JinjaFormattedChatResult,
46
45
  NativeAudioDecodeResult,
47
46
  }
48
-
49
- export {Tools }
50
47
  export * from './remote'
48
+ export {
49
+ Tools,
50
+ parseAndExecuteTool
51
+ } from './tools'
51
52
 
52
53
  const EVENT_ON_INIT_CONTEXT_PROGRESS = '@Cactus_onInitContextProgress'
53
54
  const EVENT_ON_TOKEN = '@Cactus_onToken'
@@ -72,7 +73,6 @@ if (EventEmitter) {
72
73
  logListeners.forEach((listener) => listener(evt.level, evt.text))
73
74
  },
74
75
  )
75
- // Trigger unset to use default log callback
76
76
  Cactus?.toggleNativeLog?.(false)?.catch?.(() => {})
77
77
  }
78
78
 
@@ -181,18 +181,12 @@ export class LlamaContext {
181
181
  this.model = model
182
182
  }
183
183
 
184
- /**
185
- * Load cached prompt & completion state from a file.
186
- */
187
184
  async loadSession(filepath: string): Promise<NativeSessionLoadResult> {
188
185
  let path = filepath
189
186
  if (path.startsWith('file://')) path = path.slice(7)
190
187
  return Cactus.loadSession(this.id, path)
191
188
  }
192
189
 
193
- /**
194
- * Save current cached prompt & completion state to a file.
195
- */
196
190
  async saveSession(
197
191
  filepath: string,
198
192
  options?: { tokenSize: number },
@@ -242,10 +236,10 @@ export class LlamaContext {
242
236
  recursionCount: number = 0,
243
237
  recursionLimit: number = 3
244
238
  ): Promise<NativeCompletionResult> {
245
- if (!params.messages) { // tool calling only works with messages
239
+ if (!params.messages) {
246
240
  return this.completion(params, callback);
247
241
  }
248
- if (!params.tools) { // no tools => default completion
242
+ if (!params.tools) {
249
243
  return this.completion(params, callback);
250
244
  }
251
245
  if (recursionCount >= recursionLimit) {
@@ -256,7 +250,7 @@ export class LlamaContext {
256
250
  }, callback);
257
251
  }
258
252
 
259
- const messages = [...params.messages]; // avoid mutating the original messages
253
+ const messages = [...params.messages];
260
254
 
261
255
  const result = await this.completion({
262
256
  ...params,
@@ -307,7 +301,7 @@ export class LlamaContext {
307
301
  emit_partial_completion: !!callback,
308
302
  }
309
303
  if (params.messages) {
310
- // messages always win
304
+
311
305
  const formattedResult = await this.getFormattedChat(
312
306
  params.messages,
313
307
  params.chat_template || params.chatTemplate,
@@ -502,7 +496,6 @@ export async function loadLlamaModelInfo(model: string): Promise<Object> {
502
496
  }
503
497
 
504
498
  const poolTypeMap = {
505
- // -1 is unspecified as undefined
506
499
  none: 0,
507
500
  mean: 1,
508
501
  cls: 2,
@@ -665,4 +658,5 @@ export const getDeviceInfo = async (contextId: number): Promise<NativeDeviceInfo
665
658
 
666
659
  export { CactusLM } from './lm';
667
660
  export { CactusVLM } from './vlm';
668
- export { CactusTTS } from './tts';
661
+ export { CactusTTS } from './tts';
662
+ export { CactusAgent } from './agent';
package/src/lm.ts CHANGED
@@ -23,8 +23,6 @@ export class CactusLM {
23
23
  protected context: LlamaContext
24
24
  protected conversationHistoryManager: ConversationHistoryManager
25
25
 
26
- // the initPromise enables a "async singleton" initialization pattern which
27
- // protects against a race condition in the event of multiple init attempts
28
26
  private static _initCache: Map<string, Promise<CactusLMReturn>> = new Map();
29
27
 
30
28
  private static getCacheKey(params: ContextParams, cactusToken?: string, retryOptions?: { maxRetries?: number; delayMs?: number }): string {
@@ -49,7 +47,6 @@ export class CactusLM {
49
47
 
50
48
  const key = CactusLM.getCacheKey(params, cactusToken, retryOptions);
51
49
  if (CactusLM._initCache.has(key)) {
52
- // concurrent initialization calls with the same params all get the same cached Promise
53
50
  return CactusLM._initCache.get(key)!;
54
51
  }
55
52
 
@@ -114,7 +111,7 @@ export class CactusLM {
114
111
 
115
112
  const result = await initPromise;
116
113
  if (result.error) {
117
- CactusLM._initCache.delete(key); // Reset on failure to allow retries
114
+ CactusLM._initCache.delete(key);
118
115
  }
119
116
  return result;
120
117
  }
package/src/telemetry.ts CHANGED
@@ -1,5 +1,4 @@
1
1
  import { Platform } from 'react-native'
2
- // Import package.json to get version
3
2
  const packageJson = require('../package.json');
4
3
  import { PROJECT_ID } from './projectId';
5
4
 
package/src/tools.ts CHANGED
@@ -3,7 +3,7 @@ import type { NativeCompletionResult } from "./NativeCactus";
3
3
  interface Parameter {
4
4
  type: string,
5
5
  description: string,
6
- required?: boolean // parameter is optional if not specified
6
+ required?: boolean
7
7
  }
8
8
 
9
9
  interface Tool {
package/src/vlm.ts CHANGED
@@ -31,7 +31,6 @@ export class CactusVLM {
31
31
  private context: LlamaContext
32
32
  protected conversationHistoryManager: ConversationHistoryManager
33
33
 
34
- // see CactusLM for detailed docs
35
34
  private static _initCache: Map<string, Promise<CactusVLMReturn>> = new Map();
36
35
 
37
36
  private static getCacheKey(params: VLMContextParams, cactusToken?: string, retryOptions?: { maxRetries?: number; delayMs?: number }): string {