@reverbia/sdk 1.0.0-next.20251205130522 → 1.0.0-next.20251208093930

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,263 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/expo/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ useChat: () => useChat
24
+ });
25
+ module.exports = __toCommonJS(index_exports);
26
+
27
+ // src/expo/useChat.ts
28
+ var import_react = require("react");
29
+
30
+ // src/clientConfig.ts
31
+ var BASE_URL = "https://ai-portal-dev.zetachain.com";
32
+
33
+ // src/expo/useChat.ts
34
+ function useChat(options) {
35
+ const {
36
+ getToken,
37
+ baseUrl = BASE_URL,
38
+ onData: globalOnData,
39
+ onFinish,
40
+ onError
41
+ } = options || {};
42
+ const [isLoading, setIsLoading] = (0, import_react.useState)(false);
43
+ const abortControllerRef = (0, import_react.useRef)(null);
44
+ const stop = (0, import_react.useCallback)(() => {
45
+ if (abortControllerRef.current) {
46
+ abortControllerRef.current.abort();
47
+ abortControllerRef.current = null;
48
+ }
49
+ }, []);
50
+ (0, import_react.useEffect)(() => {
51
+ return () => {
52
+ if (abortControllerRef.current) {
53
+ abortControllerRef.current.abort();
54
+ abortControllerRef.current = null;
55
+ }
56
+ };
57
+ }, []);
58
+ const sendMessage = (0, import_react.useCallback)(
59
+ async ({
60
+ messages,
61
+ model,
62
+ onData
63
+ }) => {
64
+ if (!messages?.length) {
65
+ const errorMsg = "messages are required to call sendMessage.";
66
+ if (onError) onError(new Error(errorMsg));
67
+ return { data: null, error: errorMsg };
68
+ }
69
+ if (!model) {
70
+ const errorMsg = "model is required to call sendMessage.";
71
+ if (onError) onError(new Error(errorMsg));
72
+ return { data: null, error: errorMsg };
73
+ }
74
+ if (!getToken) {
75
+ const errorMsg = "Token getter function is required.";
76
+ if (onError) onError(new Error(errorMsg));
77
+ return { data: null, error: errorMsg };
78
+ }
79
+ if (abortControllerRef.current) {
80
+ abortControllerRef.current.abort();
81
+ }
82
+ const abortController = new AbortController();
83
+ abortControllerRef.current = abortController;
84
+ setIsLoading(true);
85
+ try {
86
+ const token = await getToken();
87
+ if (!token) {
88
+ const errorMsg = "No access token available.";
89
+ setIsLoading(false);
90
+ if (onError) onError(new Error(errorMsg));
91
+ return { data: null, error: errorMsg };
92
+ }
93
+ const result = await new Promise((resolve) => {
94
+ const xhr = new XMLHttpRequest();
95
+ const url = `${baseUrl}/api/v1/chat/completions`;
96
+ let accumulatedContent = "";
97
+ let completionId = "";
98
+ let completionModel = "";
99
+ let accumulatedUsage = {};
100
+ let finishReason;
101
+ let lastProcessedIndex = 0;
102
+ let incompleteLineBuffer = "";
103
+ const abortHandler = () => {
104
+ xhr.abort();
105
+ };
106
+ abortController.signal.addEventListener("abort", abortHandler);
107
+ xhr.open("POST", url, true);
108
+ xhr.setRequestHeader("Content-Type", "application/json");
109
+ xhr.setRequestHeader("Authorization", `Bearer ${token}`);
110
+ xhr.setRequestHeader("Accept", "text/event-stream");
111
+ xhr.onprogress = () => {
112
+ const newData = xhr.responseText.substring(lastProcessedIndex);
113
+ lastProcessedIndex = xhr.responseText.length;
114
+ const dataToProcess = incompleteLineBuffer + newData;
115
+ incompleteLineBuffer = "";
116
+ const lines = dataToProcess.split("\n");
117
+ if (!newData.endsWith("\n") && lines.length > 0) {
118
+ incompleteLineBuffer = lines.pop() || "";
119
+ }
120
+ for (const line of lines) {
121
+ if (line.startsWith("data: ")) {
122
+ const data = line.substring(6).trim();
123
+ if (data === "[DONE]") continue;
124
+ try {
125
+ const chunk = JSON.parse(data);
126
+ if (chunk.id && !completionId) {
127
+ completionId = chunk.id;
128
+ }
129
+ if (chunk.model && !completionModel) {
130
+ completionModel = chunk.model;
131
+ }
132
+ if (chunk.usage) {
133
+ accumulatedUsage = { ...accumulatedUsage, ...chunk.usage };
134
+ }
135
+ if (chunk.choices?.[0]) {
136
+ const choice = chunk.choices[0];
137
+ if (choice.delta?.content) {
138
+ const content = choice.delta.content;
139
+ accumulatedContent += content;
140
+ if (onData) onData(content);
141
+ if (globalOnData) globalOnData(content);
142
+ }
143
+ if (choice.finish_reason) {
144
+ finishReason = choice.finish_reason;
145
+ }
146
+ }
147
+ } catch {
148
+ }
149
+ }
150
+ }
151
+ };
152
+ xhr.onload = () => {
153
+ abortController.signal.removeEventListener("abort", abortHandler);
154
+ if (incompleteLineBuffer) {
155
+ const line = incompleteLineBuffer.trim();
156
+ if (line.startsWith("data: ")) {
157
+ const data = line.substring(6).trim();
158
+ if (data !== "[DONE]") {
159
+ try {
160
+ const chunk = JSON.parse(data);
161
+ if (chunk.id && !completionId) {
162
+ completionId = chunk.id;
163
+ }
164
+ if (chunk.model && !completionModel) {
165
+ completionModel = chunk.model;
166
+ }
167
+ if (chunk.usage) {
168
+ accumulatedUsage = {
169
+ ...accumulatedUsage,
170
+ ...chunk.usage
171
+ };
172
+ }
173
+ if (chunk.choices?.[0]) {
174
+ const choice = chunk.choices[0];
175
+ if (choice.delta?.content) {
176
+ const content = choice.delta.content;
177
+ accumulatedContent += content;
178
+ if (onData) onData(content);
179
+ if (globalOnData) globalOnData(content);
180
+ }
181
+ if (choice.finish_reason) {
182
+ finishReason = choice.finish_reason;
183
+ }
184
+ }
185
+ } catch {
186
+ }
187
+ }
188
+ }
189
+ incompleteLineBuffer = "";
190
+ }
191
+ if (xhr.status >= 200 && xhr.status < 300) {
192
+ const completion = {
193
+ id: completionId,
194
+ model: completionModel,
195
+ choices: [
196
+ {
197
+ index: 0,
198
+ message: {
199
+ role: "assistant",
200
+ content: [{ type: "text", text: accumulatedContent }]
201
+ },
202
+ finish_reason: finishReason
203
+ }
204
+ ],
205
+ usage: Object.keys(accumulatedUsage).length > 0 ? accumulatedUsage : void 0
206
+ };
207
+ setIsLoading(false);
208
+ if (onFinish) onFinish(completion);
209
+ resolve({ data: completion, error: null });
210
+ } else {
211
+ const errorMsg = `Request failed with status ${xhr.status}`;
212
+ setIsLoading(false);
213
+ if (onError) onError(new Error(errorMsg));
214
+ resolve({ data: null, error: errorMsg });
215
+ }
216
+ };
217
+ xhr.onerror = () => {
218
+ abortController.signal.removeEventListener("abort", abortHandler);
219
+ const errorMsg = "Network error";
220
+ setIsLoading(false);
221
+ if (onError) onError(new Error(errorMsg));
222
+ resolve({ data: null, error: errorMsg });
223
+ };
224
+ xhr.onabort = () => {
225
+ abortController.signal.removeEventListener("abort", abortHandler);
226
+ setIsLoading(false);
227
+ resolve({ data: null, error: "Request aborted" });
228
+ };
229
+ xhr.send(
230
+ JSON.stringify({
231
+ messages,
232
+ model,
233
+ stream: true
234
+ })
235
+ );
236
+ });
237
+ return result;
238
+ } catch (err) {
239
+ const errorMsg = err instanceof Error ? err.message : "Failed to send message.";
240
+ const errorObj = err instanceof Error ? err : new Error(errorMsg);
241
+ setIsLoading(false);
242
+ if (onError) {
243
+ onError(errorObj);
244
+ }
245
+ return { data: null, error: errorMsg };
246
+ } finally {
247
+ if (abortControllerRef.current === abortController) {
248
+ abortControllerRef.current = null;
249
+ }
250
+ }
251
+ },
252
+ [getToken, baseUrl, globalOnData, onFinish, onError]
253
+ );
254
+ return {
255
+ isLoading,
256
+ sendMessage,
257
+ stop
258
+ };
259
+ }
260
+ // Annotate the CommonJS export names for ESM import in node:
261
+ 0 && (module.exports = {
262
+ useChat
263
+ });
@@ -0,0 +1,199 @@
1
+ /**
2
+ * ExtraFields contains additional metadata
3
+ */
4
+ type LlmapiChatCompletionExtraFields = {
5
+ /**
6
+ * Latency is the request latency in milliseconds
7
+ */
8
+ latency?: number;
9
+ /**
10
+ * ModelRequested is the model that was requested
11
+ */
12
+ model_requested?: string;
13
+ /**
14
+ * Provider is the LLM provider used (e.g., "openai", "anthropic")
15
+ */
16
+ provider?: string;
17
+ /**
18
+ * RequestType is always "chat_completion"
19
+ */
20
+ request_type?: string;
21
+ };
22
+ type LlmapiChatCompletionResponse = {
23
+ /**
24
+ * Choices contains the completion choices
25
+ */
26
+ choices?: Array<LlmapiChoice>;
27
+ extra_fields?: LlmapiChatCompletionExtraFields;
28
+ /**
29
+ * ID is the completion ID
30
+ */
31
+ id?: string;
32
+ /**
33
+ * Model is the model used
34
+ */
35
+ model?: string;
36
+ usage?: LlmapiChatCompletionUsage;
37
+ };
38
+ /**
39
+ * Usage contains token usage information
40
+ */
41
+ type LlmapiChatCompletionUsage = {
42
+ /**
43
+ * CompletionTokens is the number of tokens in the completion
44
+ */
45
+ completion_tokens?: number;
46
+ /**
47
+ * CostMicroUSD is the cost of this completion in micro-dollars (USD × 1,000,000)
48
+ */
49
+ cost_micro_usd?: number;
50
+ /**
51
+ * PromptTokens is the number of tokens in the prompt
52
+ */
53
+ prompt_tokens?: number;
54
+ /**
55
+ * TotalTokens is the total number of tokens used
56
+ */
57
+ total_tokens?: number;
58
+ };
59
+ type LlmapiChoice = {
60
+ /**
61
+ * FinishReason indicates why the completion stopped
62
+ */
63
+ finish_reason?: string;
64
+ /**
65
+ * Index is the choice index
66
+ */
67
+ index?: number;
68
+ message?: LlmapiMessage;
69
+ };
70
+ /**
71
+ * Message is the generated message
72
+ */
73
+ type LlmapiMessage = {
74
+ /**
75
+ * Content is the message content
76
+ */
77
+ content?: Array<LlmapiMessageContentPart>;
78
+ role?: LlmapiRole;
79
+ };
80
+ /**
81
+ * ImageURL is used when Type=image_url
82
+ */
83
+ type LlmapiMessageContentImage = {
84
+ /**
85
+ * Detail is the OpenAI detail hint (auto|low|high)
86
+ */
87
+ detail?: string;
88
+ /**
89
+ * URL is the image URL or data URI
90
+ */
91
+ url?: string;
92
+ };
93
+ type LlmapiMessageContentPart = {
94
+ image_url?: LlmapiMessageContentImage;
95
+ /**
96
+ * Text holds the text content when Type=text
97
+ */
98
+ text?: string;
99
+ /**
100
+ * Type is the block type (`text` or `image_url`)
101
+ */
102
+ type?: string;
103
+ };
104
+ /**
105
+ * Role is the message role (system, user, assistant)
106
+ */
107
+ type LlmapiRole = string;
108
+
109
+ type SendMessageArgs = {
110
+ messages: LlmapiMessage[];
111
+ model?: string;
112
+ /**
113
+ * Per-request callback for data chunks. Called in addition to the global
114
+ * `onData` callback if provided in `useChat` options.
115
+ *
116
+ * @param chunk - The content delta from the current chunk
117
+ */
118
+ onData?: (chunk: string) => void;
119
+ };
120
+ type SendMessageResult = {
121
+ data: LlmapiChatCompletionResponse;
122
+ error: null;
123
+ } | {
124
+ data: null;
125
+ error: string;
126
+ };
127
+ type UseChatOptions = {
128
+ getToken?: () => Promise<string | null>;
129
+ baseUrl?: string;
130
+ /**
131
+ * Callback function to be called when a new data chunk is received.
132
+ */
133
+ onData?: (chunk: string) => void;
134
+ /**
135
+ * Callback function to be called when the chat completion finishes successfully.
136
+ */
137
+ onFinish?: (response: LlmapiChatCompletionResponse) => void;
138
+ /**
139
+ * Callback function to be called when an unexpected error is encountered.
140
+ *
141
+ * **Note:** This callback is NOT called for aborted requests (via `stop()` or
142
+ * component unmount). Aborts are intentional actions and are not considered
143
+ * errors. To detect aborts, check the `error` field in the `sendMessage` result:
144
+ * `result.error === "Request aborted"`.
145
+ *
146
+ * @param error - The error that occurred (never an AbortError)
147
+ */
148
+ onError?: (error: Error) => void;
149
+ };
150
+ type UseChatResult = {
151
+ isLoading: boolean;
152
+ sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
153
+ /**
154
+ * Aborts the current streaming request if one is in progress.
155
+ *
156
+ * When a request is aborted, `sendMessage` will return with
157
+ * `{ data: null, error: "Request aborted" }`. The `onError` callback
158
+ * will NOT be called, as aborts are intentional actions, not errors.
159
+ */
160
+ stop: () => void;
161
+ };
162
+ /**
163
+ * A React hook for managing chat completions with authentication.
164
+ *
165
+ * **React Native version** - This is a lightweight version that only supports
166
+ * API-based chat completions. Local chat and client-side tools are not available
167
+ * in React Native.
168
+ *
169
+ * @param options - Optional configuration object
170
+ * @param options.getToken - An async function that returns an authentication token.
171
+ * @param options.baseUrl - Optional base URL for the API requests.
172
+ * @param options.onData - Callback function to be called when a new data chunk is received.
173
+ * @param options.onFinish - Callback function to be called when the chat completion finishes successfully.
174
+ * @param options.onError - Callback function to be called when an unexpected error is encountered.
175
+ *
176
+ * @returns An object containing:
177
+ * - `isLoading`: A boolean indicating whether a request is currently in progress
178
+ * - `sendMessage`: An async function to send chat messages
179
+ * - `stop`: A function to abort the current request
180
+ *
181
+ * @example
182
+ * ```tsx
183
+ * const { isLoading, sendMessage, stop } = useChat({
184
+ * getToken: async () => await getAuthToken(),
185
+ * onFinish: (response) => console.log("Chat finished:", response),
186
+ * onError: (error) => console.error("Chat error:", error)
187
+ * });
188
+ *
189
+ * const handleSend = async () => {
190
+ * const result = await sendMessage({
191
+ * messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }],
192
+ * model: 'gpt-4o-mini'
193
+ * });
194
+ * };
195
+ * ```
196
+ */
197
+ declare function useChat(options?: UseChatOptions): UseChatResult;
198
+
199
+ export { useChat };
@@ -0,0 +1,199 @@
1
+ /**
2
+ * ExtraFields contains additional metadata
3
+ */
4
+ type LlmapiChatCompletionExtraFields = {
5
+ /**
6
+ * Latency is the request latency in milliseconds
7
+ */
8
+ latency?: number;
9
+ /**
10
+ * ModelRequested is the model that was requested
11
+ */
12
+ model_requested?: string;
13
+ /**
14
+ * Provider is the LLM provider used (e.g., "openai", "anthropic")
15
+ */
16
+ provider?: string;
17
+ /**
18
+ * RequestType is always "chat_completion"
19
+ */
20
+ request_type?: string;
21
+ };
22
+ type LlmapiChatCompletionResponse = {
23
+ /**
24
+ * Choices contains the completion choices
25
+ */
26
+ choices?: Array<LlmapiChoice>;
27
+ extra_fields?: LlmapiChatCompletionExtraFields;
28
+ /**
29
+ * ID is the completion ID
30
+ */
31
+ id?: string;
32
+ /**
33
+ * Model is the model used
34
+ */
35
+ model?: string;
36
+ usage?: LlmapiChatCompletionUsage;
37
+ };
38
+ /**
39
+ * Usage contains token usage information
40
+ */
41
+ type LlmapiChatCompletionUsage = {
42
+ /**
43
+ * CompletionTokens is the number of tokens in the completion
44
+ */
45
+ completion_tokens?: number;
46
+ /**
47
+ * CostMicroUSD is the cost of this completion in micro-dollars (USD × 1,000,000)
48
+ */
49
+ cost_micro_usd?: number;
50
+ /**
51
+ * PromptTokens is the number of tokens in the prompt
52
+ */
53
+ prompt_tokens?: number;
54
+ /**
55
+ * TotalTokens is the total number of tokens used
56
+ */
57
+ total_tokens?: number;
58
+ };
59
+ type LlmapiChoice = {
60
+ /**
61
+ * FinishReason indicates why the completion stopped
62
+ */
63
+ finish_reason?: string;
64
+ /**
65
+ * Index is the choice index
66
+ */
67
+ index?: number;
68
+ message?: LlmapiMessage;
69
+ };
70
+ /**
71
+ * Message is the generated message
72
+ */
73
+ type LlmapiMessage = {
74
+ /**
75
+ * Content is the message content
76
+ */
77
+ content?: Array<LlmapiMessageContentPart>;
78
+ role?: LlmapiRole;
79
+ };
80
+ /**
81
+ * ImageURL is used when Type=image_url
82
+ */
83
+ type LlmapiMessageContentImage = {
84
+ /**
85
+ * Detail is the OpenAI detail hint (auto|low|high)
86
+ */
87
+ detail?: string;
88
+ /**
89
+ * URL is the image URL or data URI
90
+ */
91
+ url?: string;
92
+ };
93
+ type LlmapiMessageContentPart = {
94
+ image_url?: LlmapiMessageContentImage;
95
+ /**
96
+ * Text holds the text content when Type=text
97
+ */
98
+ text?: string;
99
+ /**
100
+ * Type is the block type (`text` or `image_url`)
101
+ */
102
+ type?: string;
103
+ };
104
+ /**
105
+ * Role is the message role (system, user, assistant)
106
+ */
107
+ type LlmapiRole = string;
108
+
109
+ type SendMessageArgs = {
110
+ messages: LlmapiMessage[];
111
+ model?: string;
112
+ /**
113
+ * Per-request callback for data chunks. Called in addition to the global
114
+ * `onData` callback if provided in `useChat` options.
115
+ *
116
+ * @param chunk - The content delta from the current chunk
117
+ */
118
+ onData?: (chunk: string) => void;
119
+ };
120
+ type SendMessageResult = {
121
+ data: LlmapiChatCompletionResponse;
122
+ error: null;
123
+ } | {
124
+ data: null;
125
+ error: string;
126
+ };
127
+ type UseChatOptions = {
128
+ getToken?: () => Promise<string | null>;
129
+ baseUrl?: string;
130
+ /**
131
+ * Callback function to be called when a new data chunk is received.
132
+ */
133
+ onData?: (chunk: string) => void;
134
+ /**
135
+ * Callback function to be called when the chat completion finishes successfully.
136
+ */
137
+ onFinish?: (response: LlmapiChatCompletionResponse) => void;
138
+ /**
139
+ * Callback function to be called when an unexpected error is encountered.
140
+ *
141
+ * **Note:** This callback is NOT called for aborted requests (via `stop()` or
142
+ * component unmount). Aborts are intentional actions and are not considered
143
+ * errors. To detect aborts, check the `error` field in the `sendMessage` result:
144
+ * `result.error === "Request aborted"`.
145
+ *
146
+ * @param error - The error that occurred (never an AbortError)
147
+ */
148
+ onError?: (error: Error) => void;
149
+ };
150
+ type UseChatResult = {
151
+ isLoading: boolean;
152
+ sendMessage: (args: SendMessageArgs) => Promise<SendMessageResult>;
153
+ /**
154
+ * Aborts the current streaming request if one is in progress.
155
+ *
156
+ * When a request is aborted, `sendMessage` will return with
157
+ * `{ data: null, error: "Request aborted" }`. The `onError` callback
158
+ * will NOT be called, as aborts are intentional actions, not errors.
159
+ */
160
+ stop: () => void;
161
+ };
162
+ /**
163
+ * A React hook for managing chat completions with authentication.
164
+ *
165
+ * **React Native version** - This is a lightweight version that only supports
166
+ * API-based chat completions. Local chat and client-side tools are not available
167
+ * in React Native.
168
+ *
169
+ * @param options - Optional configuration object
170
+ * @param options.getToken - An async function that returns an authentication token.
171
+ * @param options.baseUrl - Optional base URL for the API requests.
172
+ * @param options.onData - Callback function to be called when a new data chunk is received.
173
+ * @param options.onFinish - Callback function to be called when the chat completion finishes successfully.
174
+ * @param options.onError - Callback function to be called when an unexpected error is encountered.
175
+ *
176
+ * @returns An object containing:
177
+ * - `isLoading`: A boolean indicating whether a request is currently in progress
178
+ * - `sendMessage`: An async function to send chat messages
179
+ * - `stop`: A function to abort the current request
180
+ *
181
+ * @example
182
+ * ```tsx
183
+ * const { isLoading, sendMessage, stop } = useChat({
184
+ * getToken: async () => await getAuthToken(),
185
+ * onFinish: (response) => console.log("Chat finished:", response),
186
+ * onError: (error) => console.error("Chat error:", error)
187
+ * });
188
+ *
189
+ * const handleSend = async () => {
190
+ * const result = await sendMessage({
191
+ * messages: [{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }],
192
+ * model: 'gpt-4o-mini'
193
+ * });
194
+ * };
195
+ * ```
196
+ */
197
+ declare function useChat(options?: UseChatOptions): UseChatResult;
198
+
199
+ export { useChat };