@reverbia/sdk 1.0.0-next.20251205183506 → 1.0.0-next.20251208094446

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,236 @@
1
+ // src/expo/useChat.ts
2
+ import { useCallback, useEffect, useRef, useState } from "react";
3
+
4
+ // src/clientConfig.ts
5
+ var BASE_URL = "https://ai-portal-dev.zetachain.com";
6
+
7
+ // src/expo/useChat.ts
8
+ function useChat(options) {
9
+ const {
10
+ getToken,
11
+ baseUrl = BASE_URL,
12
+ onData: globalOnData,
13
+ onFinish,
14
+ onError
15
+ } = options || {};
16
+ const [isLoading, setIsLoading] = useState(false);
17
+ const abortControllerRef = useRef(null);
18
+ const stop = useCallback(() => {
19
+ if (abortControllerRef.current) {
20
+ abortControllerRef.current.abort();
21
+ abortControllerRef.current = null;
22
+ }
23
+ }, []);
24
+ useEffect(() => {
25
+ return () => {
26
+ if (abortControllerRef.current) {
27
+ abortControllerRef.current.abort();
28
+ abortControllerRef.current = null;
29
+ }
30
+ };
31
+ }, []);
32
+ const sendMessage = useCallback(
33
+ async ({
34
+ messages,
35
+ model,
36
+ onData
37
+ }) => {
38
+ if (!messages?.length) {
39
+ const errorMsg = "messages are required to call sendMessage.";
40
+ if (onError) onError(new Error(errorMsg));
41
+ return { data: null, error: errorMsg };
42
+ }
43
+ if (!model) {
44
+ const errorMsg = "model is required to call sendMessage.";
45
+ if (onError) onError(new Error(errorMsg));
46
+ return { data: null, error: errorMsg };
47
+ }
48
+ if (!getToken) {
49
+ const errorMsg = "Token getter function is required.";
50
+ if (onError) onError(new Error(errorMsg));
51
+ return { data: null, error: errorMsg };
52
+ }
53
+ if (abortControllerRef.current) {
54
+ abortControllerRef.current.abort();
55
+ }
56
+ const abortController = new AbortController();
57
+ abortControllerRef.current = abortController;
58
+ setIsLoading(true);
59
+ try {
60
+ const token = await getToken();
61
+ if (!token) {
62
+ const errorMsg = "No access token available.";
63
+ setIsLoading(false);
64
+ if (onError) onError(new Error(errorMsg));
65
+ return { data: null, error: errorMsg };
66
+ }
67
+ const result = await new Promise((resolve) => {
68
+ const xhr = new XMLHttpRequest();
69
+ const url = `${baseUrl}/api/v1/chat/completions`;
70
+ let accumulatedContent = "";
71
+ let completionId = "";
72
+ let completionModel = "";
73
+ let accumulatedUsage = {};
74
+ let finishReason;
75
+ let lastProcessedIndex = 0;
76
+ let incompleteLineBuffer = "";
77
+ const abortHandler = () => {
78
+ xhr.abort();
79
+ };
80
+ abortController.signal.addEventListener("abort", abortHandler);
81
+ xhr.open("POST", url, true);
82
+ xhr.setRequestHeader("Content-Type", "application/json");
83
+ xhr.setRequestHeader("Authorization", `Bearer ${token}`);
84
+ xhr.setRequestHeader("Accept", "text/event-stream");
85
+ xhr.onprogress = () => {
86
+ const newData = xhr.responseText.substring(lastProcessedIndex);
87
+ lastProcessedIndex = xhr.responseText.length;
88
+ const dataToProcess = incompleteLineBuffer + newData;
89
+ incompleteLineBuffer = "";
90
+ const lines = dataToProcess.split("\n");
91
+ if (!newData.endsWith("\n") && lines.length > 0) {
92
+ incompleteLineBuffer = lines.pop() || "";
93
+ }
94
+ for (const line of lines) {
95
+ if (line.startsWith("data: ")) {
96
+ const data = line.substring(6).trim();
97
+ if (data === "[DONE]") continue;
98
+ try {
99
+ const chunk = JSON.parse(data);
100
+ if (chunk.id && !completionId) {
101
+ completionId = chunk.id;
102
+ }
103
+ if (chunk.model && !completionModel) {
104
+ completionModel = chunk.model;
105
+ }
106
+ if (chunk.usage) {
107
+ accumulatedUsage = { ...accumulatedUsage, ...chunk.usage };
108
+ }
109
+ if (chunk.choices?.[0]) {
110
+ const choice = chunk.choices[0];
111
+ if (choice.delta?.content) {
112
+ const content = choice.delta.content;
113
+ accumulatedContent += content;
114
+ if (onData) onData(content);
115
+ if (globalOnData) globalOnData(content);
116
+ }
117
+ if (choice.finish_reason) {
118
+ finishReason = choice.finish_reason;
119
+ }
120
+ }
121
+ } catch {
122
+ }
123
+ }
124
+ }
125
+ };
126
+ xhr.onload = () => {
127
+ abortController.signal.removeEventListener("abort", abortHandler);
128
+ if (incompleteLineBuffer) {
129
+ const line = incompleteLineBuffer.trim();
130
+ if (line.startsWith("data: ")) {
131
+ const data = line.substring(6).trim();
132
+ if (data !== "[DONE]") {
133
+ try {
134
+ const chunk = JSON.parse(data);
135
+ if (chunk.id && !completionId) {
136
+ completionId = chunk.id;
137
+ }
138
+ if (chunk.model && !completionModel) {
139
+ completionModel = chunk.model;
140
+ }
141
+ if (chunk.usage) {
142
+ accumulatedUsage = {
143
+ ...accumulatedUsage,
144
+ ...chunk.usage
145
+ };
146
+ }
147
+ if (chunk.choices?.[0]) {
148
+ const choice = chunk.choices[0];
149
+ if (choice.delta?.content) {
150
+ const content = choice.delta.content;
151
+ accumulatedContent += content;
152
+ if (onData) onData(content);
153
+ if (globalOnData) globalOnData(content);
154
+ }
155
+ if (choice.finish_reason) {
156
+ finishReason = choice.finish_reason;
157
+ }
158
+ }
159
+ } catch {
160
+ }
161
+ }
162
+ }
163
+ incompleteLineBuffer = "";
164
+ }
165
+ if (xhr.status >= 200 && xhr.status < 300) {
166
+ const completion = {
167
+ id: completionId,
168
+ model: completionModel,
169
+ choices: [
170
+ {
171
+ index: 0,
172
+ message: {
173
+ role: "assistant",
174
+ content: [{ type: "text", text: accumulatedContent }]
175
+ },
176
+ finish_reason: finishReason
177
+ }
178
+ ],
179
+ usage: Object.keys(accumulatedUsage).length > 0 ? accumulatedUsage : void 0
180
+ };
181
+ setIsLoading(false);
182
+ if (onFinish) onFinish(completion);
183
+ resolve({ data: completion, error: null });
184
+ } else {
185
+ const errorMsg = `Request failed with status ${xhr.status}`;
186
+ setIsLoading(false);
187
+ if (onError) onError(new Error(errorMsg));
188
+ resolve({ data: null, error: errorMsg });
189
+ }
190
+ };
191
+ xhr.onerror = () => {
192
+ abortController.signal.removeEventListener("abort", abortHandler);
193
+ const errorMsg = "Network error";
194
+ setIsLoading(false);
195
+ if (onError) onError(new Error(errorMsg));
196
+ resolve({ data: null, error: errorMsg });
197
+ };
198
+ xhr.onabort = () => {
199
+ abortController.signal.removeEventListener("abort", abortHandler);
200
+ setIsLoading(false);
201
+ resolve({ data: null, error: "Request aborted" });
202
+ };
203
+ xhr.send(
204
+ JSON.stringify({
205
+ messages,
206
+ model,
207
+ stream: true
208
+ })
209
+ );
210
+ });
211
+ return result;
212
+ } catch (err) {
213
+ const errorMsg = err instanceof Error ? err.message : "Failed to send message.";
214
+ const errorObj = err instanceof Error ? err : new Error(errorMsg);
215
+ setIsLoading(false);
216
+ if (onError) {
217
+ onError(errorObj);
218
+ }
219
+ return { data: null, error: errorMsg };
220
+ } finally {
221
+ if (abortControllerRef.current === abortController) {
222
+ abortControllerRef.current = null;
223
+ }
224
+ }
225
+ },
226
+ [getToken, baseUrl, globalOnData, onFinish, onError]
227
+ );
228
+ return {
229
+ isLoading,
230
+ sendMessage,
231
+ stop
232
+ };
233
+ }
234
+ export {
235
+ useChat
236
+ };
@@ -0,0 +1,61 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/lib/polyfills/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ needsTextDecoderStreamPolyfill: () => needsPolyfill
24
+ });
25
+ module.exports = __toCommonJS(index_exports);
26
+
27
+ // src/lib/polyfills/textDecoderStream.ts
28
+ var needsPolyfill = typeof globalThis.TextDecoderStream === "undefined";
29
+ if (needsPolyfill && typeof globalThis.TransformStream !== "undefined") {
30
+ class TextDecoderStreamPolyfill {
31
+ constructor(label = "utf-8", options) {
32
+ this.decoder = new TextDecoder(label, options);
33
+ const decoder = this.decoder;
34
+ this.transform = new TransformStream({
35
+ transform(chunk, controller) {
36
+ const text = decoder.decode(chunk, { stream: true });
37
+ if (text) {
38
+ controller.enqueue(text);
39
+ }
40
+ },
41
+ flush(controller) {
42
+ const text = decoder.decode();
43
+ if (text) {
44
+ controller.enqueue(text);
45
+ }
46
+ }
47
+ });
48
+ }
49
+ get readable() {
50
+ return this.transform.readable;
51
+ }
52
+ get writable() {
53
+ return this.transform.writable;
54
+ }
55
+ }
56
+ globalThis.TextDecoderStream = TextDecoderStreamPolyfill;
57
+ }
58
+ // Annotate the CommonJS export names for ESM import in node:
59
+ 0 && (module.exports = {
60
+ needsTextDecoderStreamPolyfill
61
+ });
@@ -0,0 +1,9 @@
1
+ /**
2
+ * TextDecoderStream polyfill for React Native compatibility.
3
+ *
4
+ * React Native doesn't have TextDecoderStream, but it does have TextDecoder.
5
+ * This polyfill creates a TransformStream that uses TextDecoder internally.
6
+ */
7
+ declare const needsPolyfill: boolean;
8
+
9
+ export { needsPolyfill as needsTextDecoderStreamPolyfill };
@@ -0,0 +1,9 @@
1
+ /**
2
+ * TextDecoderStream polyfill for React Native compatibility.
3
+ *
4
+ * React Native doesn't have TextDecoderStream, but it does have TextDecoder.
5
+ * This polyfill creates a TransformStream that uses TextDecoder internally.
6
+ */
7
+ declare const needsPolyfill: boolean;
8
+
9
+ export { needsPolyfill as needsTextDecoderStreamPolyfill };
@@ -0,0 +1,34 @@
1
+ // src/lib/polyfills/textDecoderStream.ts
2
+ var needsPolyfill = typeof globalThis.TextDecoderStream === "undefined";
3
+ if (needsPolyfill && typeof globalThis.TransformStream !== "undefined") {
4
+ class TextDecoderStreamPolyfill {
5
+ constructor(label = "utf-8", options) {
6
+ this.decoder = new TextDecoder(label, options);
7
+ const decoder = this.decoder;
8
+ this.transform = new TransformStream({
9
+ transform(chunk, controller) {
10
+ const text = decoder.decode(chunk, { stream: true });
11
+ if (text) {
12
+ controller.enqueue(text);
13
+ }
14
+ },
15
+ flush(controller) {
16
+ const text = decoder.decode();
17
+ if (text) {
18
+ controller.enqueue(text);
19
+ }
20
+ }
21
+ });
22
+ }
23
+ get readable() {
24
+ return this.transform.readable;
25
+ }
26
+ get writable() {
27
+ return this.transform.writable;
28
+ }
29
+ }
30
+ globalThis.TextDecoderStream = TextDecoderStreamPolyfill;
31
+ }
32
+ export {
33
+ needsPolyfill as needsTextDecoderStreamPolyfill
34
+ };
@@ -0,0 +1,182 @@
1
+ import {
2
+ getTextGenerationPipeline
3
+ } from "./chunk-Q6FVPTTV.mjs";
4
+
5
+ // src/lib/tools/selector.ts
6
+ var DEFAULT_TOOL_SELECTOR_MODEL = "Xenova/LaMini-GPT-124M";
7
+ function buildToolSelectionPrompt(userMessage, tools) {
8
+ const toolList = tools.map((t) => `${t.name} (${t.description})`).join("\n");
9
+ return `Pick the best tool for the task. Reply with ONLY the tool name.
10
+
11
+ Available tools:
12
+ ${toolList}
13
+ none (no tool needed)
14
+
15
+ Task: "${userMessage}"
16
+
17
+ Best tool:`;
18
+ }
19
+ function buildParamExtractionPrompt(userMessage, paramName, paramDescription) {
20
+ const desc = paramDescription ? ` (${paramDescription})` : "";
21
+ return `Extract the value for "${paramName}"${desc} from the user message. Reply with ONLY the extracted value, nothing else.
22
+
23
+ User message: "${userMessage}"
24
+
25
+ Value for ${paramName}:`;
26
+ }
27
+ async function extractParams(userMessage, tool, options) {
28
+ const params = {};
29
+ if (!tool.parameters || tool.parameters.length === 0) return params;
30
+ const { model, device } = options;
31
+ try {
32
+ const pipeline = await getTextGenerationPipeline({
33
+ model,
34
+ device,
35
+ dtype: "q4"
36
+ });
37
+ for (const param of tool.parameters) {
38
+ const prompt = buildParamExtractionPrompt(
39
+ userMessage,
40
+ param.name,
41
+ param.description
42
+ );
43
+ const output = await pipeline(prompt, {
44
+ max_new_tokens: 32,
45
+ // Allow reasonable length for parameter values
46
+ temperature: 0,
47
+ do_sample: false,
48
+ return_full_text: false
49
+ });
50
+ const generatedText = output?.[0]?.generated_text || output?.generated_text || "";
51
+ const extractedValue = generatedText.trim().split("\n")[0].trim();
52
+ console.log(
53
+ `[Tool Selector] Extracted param "${param.name}":`,
54
+ extractedValue
55
+ );
56
+ params[param.name] = extractedValue || userMessage;
57
+ }
58
+ } catch (error) {
59
+ console.error("[Tool Selector] Error extracting params:", error);
60
+ for (const param of tool.parameters) {
61
+ params[param.name] = userMessage;
62
+ }
63
+ }
64
+ return params;
65
+ }
66
+ async function parseToolSelectionResponse(response, tools, userMessage, options) {
67
+ console.log("[Tool Selector] Raw response:", response);
68
+ const cleaned = response.toLowerCase().trim().split(/[\s\n,.]+/)[0].replace(/[^a-z0-9_-]/g, "");
69
+ console.log("[Tool Selector] Parsed tool name:", cleaned);
70
+ if (cleaned === "none" || cleaned === "null" || cleaned === "") {
71
+ console.log("[Tool Selector] No tool selected");
72
+ return { toolSelected: false };
73
+ }
74
+ const selectedTool = tools.find((t) => t.name.toLowerCase() === cleaned);
75
+ if (!selectedTool) {
76
+ const fuzzyTool = tools.find(
77
+ (t) => t.name.toLowerCase().includes(cleaned) || cleaned.includes(t.name.toLowerCase())
78
+ );
79
+ if (fuzzyTool) {
80
+ console.log(`[Tool Selector] Fuzzy matched tool: ${fuzzyTool.name}`);
81
+ const params2 = await extractParams(userMessage, fuzzyTool, options);
82
+ return {
83
+ toolSelected: true,
84
+ toolName: fuzzyTool.name,
85
+ parameters: params2,
86
+ confidence: 0.6
87
+ };
88
+ }
89
+ console.warn(`[Tool Selector] Unknown tool: ${cleaned}`);
90
+ return { toolSelected: false };
91
+ }
92
+ const params = await extractParams(userMessage, selectedTool, options);
93
+ console.log(`[Tool Selector] Selected tool: ${selectedTool.name}`, params);
94
+ return {
95
+ toolSelected: true,
96
+ toolName: selectedTool.name,
97
+ parameters: params,
98
+ confidence: 0.9
99
+ };
100
+ }
101
+ async function selectTool(userMessage, tools, options = {}) {
102
+ const {
103
+ model = DEFAULT_TOOL_SELECTOR_MODEL,
104
+ signal,
105
+ device = "wasm"
106
+ } = options;
107
+ if (!tools.length) {
108
+ return { toolSelected: false };
109
+ }
110
+ console.log(
111
+ `[Tool Selector] analyzing message: "${userMessage}" with model ${model}`
112
+ );
113
+ try {
114
+ const selectorPipeline = await getTextGenerationPipeline({
115
+ model,
116
+ device,
117
+ dtype: "q4"
118
+ // Aggressive quantization for speed
119
+ });
120
+ const prompt = buildToolSelectionPrompt(userMessage, tools);
121
+ const output = await selectorPipeline(prompt, {
122
+ max_new_tokens: 4,
123
+ // Just need the tool name
124
+ temperature: 0,
125
+ // Deterministic
126
+ do_sample: false,
127
+ return_full_text: false
128
+ });
129
+ if (signal?.aborted) {
130
+ return { toolSelected: false };
131
+ }
132
+ const generatedText = output?.[0]?.generated_text || output?.generated_text || "";
133
+ return await parseToolSelectionResponse(generatedText, tools, userMessage, {
134
+ model,
135
+ device
136
+ });
137
+ } catch (error) {
138
+ console.error("[Tool Selector] Error:", error);
139
+ return { toolSelected: false };
140
+ }
141
+ }
142
+ var preloadPromise = null;
143
+ async function preloadToolSelectorModel(options = {}) {
144
+ if (preloadPromise) {
145
+ return preloadPromise;
146
+ }
147
+ const { model = DEFAULT_TOOL_SELECTOR_MODEL, device = "wasm" } = options;
148
+ console.log(`[Tool Selector] Preloading model: ${model}`);
149
+ preloadPromise = getTextGenerationPipeline({
150
+ model,
151
+ device,
152
+ dtype: "q4"
153
+ }).then(() => {
154
+ console.log(`[Tool Selector] Model preloaded: ${model}`);
155
+ }).catch((error) => {
156
+ console.warn("[Tool Selector] Failed to preload model:", error);
157
+ preloadPromise = null;
158
+ });
159
+ return preloadPromise;
160
+ }
161
+ async function executeTool(tool, params) {
162
+ try {
163
+ console.log(
164
+ `[Tool Selector] Executing tool ${tool.name} with params:`,
165
+ params
166
+ );
167
+ const result = await tool.execute(params);
168
+ console.log(`[Tool Selector] Tool ${tool.name} execution result:`, result);
169
+ return { success: true, result };
170
+ } catch (error) {
171
+ const errorMessage = error instanceof Error ? error.message : "Tool execution failed";
172
+ console.error(`[Tool Selector] Tool ${tool.name} failed:`, errorMessage);
173
+ return { success: false, error: errorMessage };
174
+ }
175
+ }
176
+
177
+ export {
178
+ DEFAULT_TOOL_SELECTOR_MODEL,
179
+ selectTool,
180
+ preloadToolSelectorModel,
181
+ executeTool
182
+ };
@@ -0,0 +1,6 @@
1
+ // src/lib/chat/constants.ts
2
+ var DEFAULT_LOCAL_CHAT_MODEL = "onnx-community/Qwen2.5-0.5B-Instruct";
3
+
4
+ export {
5
+ DEFAULT_LOCAL_CHAT_MODEL
6
+ };
@@ -0,0 +1,28 @@
1
+ // src/lib/chat/pipeline.ts
2
+ var sharedPipeline = null;
3
+ var currentModel = null;
4
+ var currentDevice = null;
5
+ async function getTextGenerationPipeline(options) {
6
+ const { model, device = "wasm", dtype = "q4" } = options;
7
+ if (sharedPipeline && currentModel === model && currentDevice === device) {
8
+ return sharedPipeline;
9
+ }
10
+ const { pipeline, env } = await import("./transformers.node-LUTOZWVQ.mjs");
11
+ env.allowLocalModels = false;
12
+ if (env.backends?.onnx) {
13
+ env.backends.onnx.logLevel = "fatal";
14
+ }
15
+ console.log(`[Pipeline] Loading model: ${model} on ${device}...`);
16
+ sharedPipeline = await pipeline("text-generation", model, {
17
+ dtype,
18
+ device
19
+ });
20
+ currentModel = model;
21
+ currentDevice = device;
22
+ console.log(`[Pipeline] Model loaded: ${model}`);
23
+ return sharedPipeline;
24
+ }
25
+
26
+ export {
27
+ getTextGenerationPipeline
28
+ };
@@ -0,0 +1,7 @@
1
+ import {
2
+ DEFAULT_LOCAL_CHAT_MODEL
3
+ } from "./chunk-LVWIZZZP.mjs";
4
+ import "./chunk-FBCDBTKJ.mjs";
5
+ export {
6
+ DEFAULT_LOCAL_CHAT_MODEL
7
+ };
@@ -0,0 +1,52 @@
1
+ import {
2
+ DEFAULT_LOCAL_CHAT_MODEL
3
+ } from "./chunk-LVWIZZZP.mjs";
4
+ import {
5
+ getTextGenerationPipeline
6
+ } from "./chunk-Q6FVPTTV.mjs";
7
+ import "./chunk-FBCDBTKJ.mjs";
8
+
9
+ // src/lib/chat/generation.ts
10
+ async function generateLocalChatCompletion(messages, options = {}) {
11
+ const {
12
+ model = DEFAULT_LOCAL_CHAT_MODEL,
13
+ temperature = 0.7,
14
+ max_tokens = 1024,
15
+ top_p = 0.9,
16
+ onToken,
17
+ signal
18
+ } = options;
19
+ const { TextStreamer } = await import("./transformers.node-LUTOZWVQ.mjs");
20
+ const chatPipeline = await getTextGenerationPipeline({
21
+ model,
22
+ device: "wasm",
23
+ dtype: "q4"
24
+ });
25
+ class CallbackStreamer extends TextStreamer {
26
+ constructor(tokenizer, cb) {
27
+ super(tokenizer, {
28
+ skip_prompt: true,
29
+ skip_special_tokens: true
30
+ });
31
+ this.cb = cb;
32
+ }
33
+ on_finalized_text(text) {
34
+ if (signal?.aborted) {
35
+ throw new Error("AbortError");
36
+ }
37
+ this.cb(text);
38
+ }
39
+ }
40
+ const streamer = onToken ? new CallbackStreamer(chatPipeline.tokenizer, onToken) : void 0;
41
+ const output = await chatPipeline(messages, {
42
+ max_new_tokens: max_tokens,
43
+ temperature,
44
+ top_p,
45
+ streamer,
46
+ return_full_text: false
47
+ });
48
+ return output;
49
+ }
50
+ export {
51
+ generateLocalChatCompletion
52
+ };