@reverbia/sdk 1.0.0-next.20251202130234 → 1.0.0-next.20251203130707
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/react/index.cjs +192 -2
- package/dist/react/index.d.mts +242 -1
- package/dist/react/index.d.ts +242 -1
- package/dist/react/index.mjs +189 -1
- package/package.json +2 -2
package/dist/react/index.cjs
CHANGED
|
@@ -46680,8 +46680,10 @@ __export(index_exports, {
|
|
|
46680
46680
|
selectTool: () => selectTool,
|
|
46681
46681
|
useChat: () => useChat,
|
|
46682
46682
|
useEncryption: () => useEncryption,
|
|
46683
|
+
useImageGeneration: () => useImageGeneration,
|
|
46683
46684
|
useMemory: () => useMemory,
|
|
46684
|
-
useModels: () => useModels
|
|
46685
|
+
useModels: () => useModels,
|
|
46686
|
+
useSearch: () => useSearch
|
|
46685
46687
|
});
|
|
46686
46688
|
module.exports = __toCommonJS(index_exports);
|
|
46687
46689
|
|
|
@@ -48430,12 +48432,32 @@ var postApiV1Embeddings = (options) => {
|
|
|
48430
48432
|
}
|
|
48431
48433
|
});
|
|
48432
48434
|
};
|
|
48435
|
+
var postApiV1ImagesGenerations = (options) => {
|
|
48436
|
+
return (options.client ?? client).post({
|
|
48437
|
+
url: "/api/v1/images/generations",
|
|
48438
|
+
...options,
|
|
48439
|
+
headers: {
|
|
48440
|
+
"Content-Type": "application/json",
|
|
48441
|
+
...options.headers
|
|
48442
|
+
}
|
|
48443
|
+
});
|
|
48444
|
+
};
|
|
48433
48445
|
var getApiV1Models = (options) => {
|
|
48434
48446
|
return (options?.client ?? client).get({
|
|
48435
48447
|
url: "/api/v1/models",
|
|
48436
48448
|
...options
|
|
48437
48449
|
});
|
|
48438
48450
|
};
|
|
48451
|
+
var postApiV1Search = (options) => {
|
|
48452
|
+
return (options.client ?? client).post({
|
|
48453
|
+
url: "/api/v1/search",
|
|
48454
|
+
...options,
|
|
48455
|
+
headers: {
|
|
48456
|
+
"Content-Type": "application/json",
|
|
48457
|
+
...options.headers
|
|
48458
|
+
}
|
|
48459
|
+
});
|
|
48460
|
+
};
|
|
48439
48461
|
|
|
48440
48462
|
// src/lib/memory/constants.ts
|
|
48441
48463
|
var DEFAULT_LOCAL_EMBEDDING_MODEL = "Snowflake/snowflake-arctic-embed-xs";
|
|
@@ -48913,6 +48935,172 @@ function useModels(options = {}) {
|
|
|
48913
48935
|
};
|
|
48914
48936
|
}
|
|
48915
48937
|
|
|
48938
|
+
// src/react/useSearch.ts
|
|
48939
|
+
var import_react5 = require("react");
|
|
48940
|
+
function useSearch(options = {}) {
|
|
48941
|
+
const { getToken, baseUrl = BASE_URL, onError } = options;
|
|
48942
|
+
const [isLoading, setIsLoading] = (0, import_react5.useState)(false);
|
|
48943
|
+
const [results, setResults] = (0, import_react5.useState)(null);
|
|
48944
|
+
const [response, setResponse] = (0, import_react5.useState)(null);
|
|
48945
|
+
const [error, setError] = (0, import_react5.useState)(null);
|
|
48946
|
+
const abortControllerRef = (0, import_react5.useRef)(null);
|
|
48947
|
+
(0, import_react5.useEffect)(() => {
|
|
48948
|
+
return () => {
|
|
48949
|
+
if (abortControllerRef.current) {
|
|
48950
|
+
abortControllerRef.current.abort();
|
|
48951
|
+
abortControllerRef.current = null;
|
|
48952
|
+
}
|
|
48953
|
+
};
|
|
48954
|
+
}, []);
|
|
48955
|
+
const search = (0, import_react5.useCallback)(
|
|
48956
|
+
async (query, searchOptions = {}) => {
|
|
48957
|
+
if (abortControllerRef.current) {
|
|
48958
|
+
abortControllerRef.current.abort();
|
|
48959
|
+
}
|
|
48960
|
+
const abortController = new AbortController();
|
|
48961
|
+
abortControllerRef.current = abortController;
|
|
48962
|
+
setIsLoading(true);
|
|
48963
|
+
setError(null);
|
|
48964
|
+
setResults(null);
|
|
48965
|
+
setResponse(null);
|
|
48966
|
+
try {
|
|
48967
|
+
let token;
|
|
48968
|
+
if (getToken) {
|
|
48969
|
+
token = await getToken() ?? void 0;
|
|
48970
|
+
}
|
|
48971
|
+
if (abortController.signal.aborted) return null;
|
|
48972
|
+
const queryArray = Array.isArray(query) ? query : [query];
|
|
48973
|
+
const res = await postApiV1Search({
|
|
48974
|
+
baseUrl,
|
|
48975
|
+
body: {
|
|
48976
|
+
query: queryArray,
|
|
48977
|
+
...searchOptions
|
|
48978
|
+
},
|
|
48979
|
+
headers: token ? {
|
|
48980
|
+
Authorization: `Bearer ${token}`
|
|
48981
|
+
} : void 0,
|
|
48982
|
+
signal: abortController.signal
|
|
48983
|
+
});
|
|
48984
|
+
if (res.error) {
|
|
48985
|
+
const errorMsg = res.error.error || res.error.message || "Failed to perform search";
|
|
48986
|
+
throw new Error(errorMsg);
|
|
48987
|
+
}
|
|
48988
|
+
if (res.data) {
|
|
48989
|
+
setResponse(res.data);
|
|
48990
|
+
setResults(res.data.results || []);
|
|
48991
|
+
return res.data;
|
|
48992
|
+
}
|
|
48993
|
+
return null;
|
|
48994
|
+
} catch (err) {
|
|
48995
|
+
if (err instanceof Error && err.name === "AbortError") {
|
|
48996
|
+
return null;
|
|
48997
|
+
}
|
|
48998
|
+
const errorObj = err instanceof Error ? err : new Error(String(err));
|
|
48999
|
+
setError(errorObj);
|
|
49000
|
+
if (onError) {
|
|
49001
|
+
onError(errorObj);
|
|
49002
|
+
}
|
|
49003
|
+
return null;
|
|
49004
|
+
} finally {
|
|
49005
|
+
if (abortControllerRef.current === abortController) {
|
|
49006
|
+
setIsLoading(false);
|
|
49007
|
+
abortControllerRef.current = null;
|
|
49008
|
+
}
|
|
49009
|
+
}
|
|
49010
|
+
},
|
|
49011
|
+
[baseUrl, getToken, onError]
|
|
49012
|
+
);
|
|
49013
|
+
return {
|
|
49014
|
+
isLoading,
|
|
49015
|
+
search,
|
|
49016
|
+
results,
|
|
49017
|
+
response,
|
|
49018
|
+
error
|
|
49019
|
+
};
|
|
49020
|
+
}
|
|
49021
|
+
|
|
49022
|
+
// src/react/useImageGeneration.ts
|
|
49023
|
+
var import_react6 = require("react");
|
|
49024
|
+
function useImageGeneration(options = {}) {
|
|
49025
|
+
const { getToken, baseUrl = BASE_URL, onFinish, onError } = options;
|
|
49026
|
+
const [isLoading, setIsLoading] = (0, import_react6.useState)(false);
|
|
49027
|
+
const abortControllerRef = (0, import_react6.useRef)(null);
|
|
49028
|
+
(0, import_react6.useEffect)(() => {
|
|
49029
|
+
return () => {
|
|
49030
|
+
if (abortControllerRef.current) {
|
|
49031
|
+
abortControllerRef.current.abort();
|
|
49032
|
+
abortControllerRef.current = null;
|
|
49033
|
+
}
|
|
49034
|
+
};
|
|
49035
|
+
}, []);
|
|
49036
|
+
const stop = (0, import_react6.useCallback)(() => {
|
|
49037
|
+
if (abortControllerRef.current) {
|
|
49038
|
+
abortControllerRef.current.abort();
|
|
49039
|
+
abortControllerRef.current = null;
|
|
49040
|
+
}
|
|
49041
|
+
}, []);
|
|
49042
|
+
const generateImage = (0, import_react6.useCallback)(
|
|
49043
|
+
async (args) => {
|
|
49044
|
+
if (abortControllerRef.current) {
|
|
49045
|
+
abortControllerRef.current.abort();
|
|
49046
|
+
}
|
|
49047
|
+
const abortController = new AbortController();
|
|
49048
|
+
abortControllerRef.current = abortController;
|
|
49049
|
+
setIsLoading(true);
|
|
49050
|
+
try {
|
|
49051
|
+
if (!getToken) {
|
|
49052
|
+
throw new Error("Token getter function is required.");
|
|
49053
|
+
}
|
|
49054
|
+
const token = await getToken();
|
|
49055
|
+
if (!token) {
|
|
49056
|
+
throw new Error("No access token available.");
|
|
49057
|
+
}
|
|
49058
|
+
const response = await postApiV1ImagesGenerations({
|
|
49059
|
+
baseUrl,
|
|
49060
|
+
body: args,
|
|
49061
|
+
headers: {
|
|
49062
|
+
Authorization: `Bearer ${token}`
|
|
49063
|
+
},
|
|
49064
|
+
signal: abortController.signal
|
|
49065
|
+
});
|
|
49066
|
+
if (response.error) {
|
|
49067
|
+
const errorMsg = response.error.error || "Failed to generate image";
|
|
49068
|
+
throw new Error(errorMsg);
|
|
49069
|
+
}
|
|
49070
|
+
if (!response.data) {
|
|
49071
|
+
throw new Error("No data received from image generation API");
|
|
49072
|
+
}
|
|
49073
|
+
const result = response.data;
|
|
49074
|
+
if (onFinish) {
|
|
49075
|
+
onFinish(result);
|
|
49076
|
+
}
|
|
49077
|
+
return { data: result, error: null };
|
|
49078
|
+
} catch (err) {
|
|
49079
|
+
if (err instanceof Error && err.name === "AbortError") {
|
|
49080
|
+
return { data: null, error: "Request aborted" };
|
|
49081
|
+
}
|
|
49082
|
+
const errorMsg = err instanceof Error ? err.message : "Failed to generate image.";
|
|
49083
|
+
const errorObj = err instanceof Error ? err : new Error(errorMsg);
|
|
49084
|
+
if (onError) {
|
|
49085
|
+
onError(errorObj);
|
|
49086
|
+
}
|
|
49087
|
+
return { data: null, error: errorMsg };
|
|
49088
|
+
} finally {
|
|
49089
|
+
if (abortControllerRef.current === abortController) {
|
|
49090
|
+
setIsLoading(false);
|
|
49091
|
+
abortControllerRef.current = null;
|
|
49092
|
+
}
|
|
49093
|
+
}
|
|
49094
|
+
},
|
|
49095
|
+
[getToken, baseUrl, onFinish, onError]
|
|
49096
|
+
);
|
|
49097
|
+
return {
|
|
49098
|
+
isLoading,
|
|
49099
|
+
generateImage,
|
|
49100
|
+
stop
|
|
49101
|
+
};
|
|
49102
|
+
}
|
|
49103
|
+
|
|
48916
49104
|
// src/lib/memory/chat.ts
|
|
48917
49105
|
var formatMemoriesForChat = (memories, format = "compact") => {
|
|
48918
49106
|
if (memories.length === 0) {
|
|
@@ -48980,8 +49168,10 @@ var extractConversationContext = (messages, maxMessages = 3) => {
|
|
|
48980
49168
|
selectTool,
|
|
48981
49169
|
useChat,
|
|
48982
49170
|
useEncryption,
|
|
49171
|
+
useImageGeneration,
|
|
48983
49172
|
useMemory,
|
|
48984
|
-
useModels
|
|
49173
|
+
useModels,
|
|
49174
|
+
useSearch
|
|
48985
49175
|
});
|
|
48986
49176
|
/*! Bundled license information:
|
|
48987
49177
|
|
package/dist/react/index.d.mts
CHANGED
|
@@ -67,6 +67,96 @@ type LlmapiChoice = {
|
|
|
67
67
|
index?: number;
|
|
68
68
|
message?: LlmapiMessage;
|
|
69
69
|
};
|
|
70
|
+
/**
|
|
71
|
+
* ExtraFields contains additional metadata such as provider/model information.
|
|
72
|
+
*/
|
|
73
|
+
type LlmapiImageGenerationExtraFields = {
|
|
74
|
+
/**
|
|
75
|
+
* ModelRequested is the model identifier that the client asked for.
|
|
76
|
+
*/
|
|
77
|
+
model_requested?: string;
|
|
78
|
+
/**
|
|
79
|
+
* Provider is the gateway that serviced this request.
|
|
80
|
+
*/
|
|
81
|
+
provider?: string;
|
|
82
|
+
/**
|
|
83
|
+
* RequestType is always "image_generation".
|
|
84
|
+
*/
|
|
85
|
+
request_type?: string;
|
|
86
|
+
};
|
|
87
|
+
type LlmapiImageGenerationImage = {
|
|
88
|
+
/**
|
|
89
|
+
* B64JSON is the base64 payload for models that can only return binary.
|
|
90
|
+
*/
|
|
91
|
+
b64_json?: string;
|
|
92
|
+
/**
|
|
93
|
+
* URL is the signed URL to download the image.
|
|
94
|
+
*/
|
|
95
|
+
url?: string;
|
|
96
|
+
};
|
|
97
|
+
type LlmapiImageGenerationRequest = {
|
|
98
|
+
/**
|
|
99
|
+
* Model is the model identifier to use for generation (e.g., "gpt-image-1").
|
|
100
|
+
*/
|
|
101
|
+
model?: string;
|
|
102
|
+
/**
|
|
103
|
+
* Prompt is the text description of the desired image.
|
|
104
|
+
*/
|
|
105
|
+
prompt?: string;
|
|
106
|
+
/**
|
|
107
|
+
* Quality targets a quality preset (e.g., "auto", "high").
|
|
108
|
+
*/
|
|
109
|
+
quality?: string;
|
|
110
|
+
/**
|
|
111
|
+
* ResponseFormat controls how the generated image is returned (e.g., "url" or "b64_json").
|
|
112
|
+
*/
|
|
113
|
+
response_format?: string;
|
|
114
|
+
/**
|
|
115
|
+
* Size controls the dimensions of the generated image (e.g., "1024x1024").
|
|
116
|
+
*/
|
|
117
|
+
size?: string;
|
|
118
|
+
};
|
|
119
|
+
type LlmapiImageGenerationResponse = {
|
|
120
|
+
/**
|
|
121
|
+
* Created is the Unix timestamp when the image was generated.
|
|
122
|
+
*/
|
|
123
|
+
created?: number;
|
|
124
|
+
extra_fields?: LlmapiImageGenerationExtraFields;
|
|
125
|
+
/**
|
|
126
|
+
* Images contains the generated images.
|
|
127
|
+
*/
|
|
128
|
+
images?: Array<LlmapiImageGenerationImage>;
|
|
129
|
+
/**
|
|
130
|
+
* Model is the model identifier that generated the image.
|
|
131
|
+
*/
|
|
132
|
+
model?: string;
|
|
133
|
+
/**
|
|
134
|
+
* Provider is the gateway that produced the image.
|
|
135
|
+
*/
|
|
136
|
+
provider?: string;
|
|
137
|
+
usage?: LlmapiImageGenerationUsage;
|
|
138
|
+
};
|
|
139
|
+
/**
|
|
140
|
+
* Usage documents token usage (when available).
|
|
141
|
+
*/
|
|
142
|
+
type LlmapiImageGenerationUsage = {
|
|
143
|
+
/**
|
|
144
|
+
* CostMicroUSD is the inference cost for this image generation request
|
|
145
|
+
*/
|
|
146
|
+
cost_micro_usd?: number;
|
|
147
|
+
/**
|
|
148
|
+
* InputTokens is the number of tokens sent in the prompt.
|
|
149
|
+
*/
|
|
150
|
+
input_tokens?: number;
|
|
151
|
+
/**
|
|
152
|
+
* OutputTokens is the number of tokens returned by the model.
|
|
153
|
+
*/
|
|
154
|
+
output_tokens?: number;
|
|
155
|
+
/**
|
|
156
|
+
* TotalTokens is the total number of tokens consumed.
|
|
157
|
+
*/
|
|
158
|
+
total_tokens?: number;
|
|
159
|
+
};
|
|
70
160
|
/**
|
|
71
161
|
* Message is the generated message
|
|
72
162
|
*/
|
|
@@ -198,6 +288,80 @@ type LlmapiModelTopProvider = {
|
|
|
198
288
|
* Role is the message role (system, user, assistant)
|
|
199
289
|
*/
|
|
200
290
|
type LlmapiRole = string;
|
|
291
|
+
/**
|
|
292
|
+
* ExtraFields contains additional metadata.
|
|
293
|
+
*/
|
|
294
|
+
type LlmapiSearchExtraFields = {
|
|
295
|
+
/**
|
|
296
|
+
* RequestType is always "search".
|
|
297
|
+
*/
|
|
298
|
+
request_type?: string;
|
|
299
|
+
/**
|
|
300
|
+
* SearchProvider is the search provider used (e.g., "perplexity", "google-pse").
|
|
301
|
+
*/
|
|
302
|
+
search_provider?: string;
|
|
303
|
+
};
|
|
304
|
+
type LlmapiSearchRequest = {
|
|
305
|
+
/**
|
|
306
|
+
* Country code filter (e.g., "US", "GB", "DE").
|
|
307
|
+
*/
|
|
308
|
+
country?: string;
|
|
309
|
+
/**
|
|
310
|
+
* Maximum number of results to return (1-20). Default: 10.
|
|
311
|
+
*/
|
|
312
|
+
max_results?: number;
|
|
313
|
+
/**
|
|
314
|
+
* Maximum tokens per page to process. Default: 1024.
|
|
315
|
+
*/
|
|
316
|
+
max_tokens_per_page?: number;
|
|
317
|
+
/**
|
|
318
|
+
* Search query. Can be a single string or array of strings.
|
|
319
|
+
*/
|
|
320
|
+
query?: Array<string>;
|
|
321
|
+
/**
|
|
322
|
+
* List of domains to filter results (max 20 domains).
|
|
323
|
+
*/
|
|
324
|
+
search_domain_filter?: Array<string>;
|
|
325
|
+
/**
|
|
326
|
+
* The search provider to use.
|
|
327
|
+
*/
|
|
328
|
+
search_tool_name?: string;
|
|
329
|
+
};
|
|
330
|
+
type LlmapiSearchResponse = {
|
|
331
|
+
extra_fields?: LlmapiSearchExtraFields;
|
|
332
|
+
/**
|
|
333
|
+
* List of search results.
|
|
334
|
+
*/
|
|
335
|
+
results?: Array<LlmapiSearchResult>;
|
|
336
|
+
usage?: LlmapiSearchUsage;
|
|
337
|
+
};
|
|
338
|
+
type LlmapiSearchResult = {
|
|
339
|
+
/**
|
|
340
|
+
* Optional publication or last updated date.
|
|
341
|
+
*/
|
|
342
|
+
date?: string;
|
|
343
|
+
/**
|
|
344
|
+
* Text snippet from the result.
|
|
345
|
+
*/
|
|
346
|
+
snippet?: string;
|
|
347
|
+
/**
|
|
348
|
+
* Title of the search result.
|
|
349
|
+
*/
|
|
350
|
+
title?: string;
|
|
351
|
+
/**
|
|
352
|
+
* URL of the search result.
|
|
353
|
+
*/
|
|
354
|
+
url?: string;
|
|
355
|
+
};
|
|
356
|
+
/**
|
|
357
|
+
* Usage contains usage information.
|
|
358
|
+
*/
|
|
359
|
+
type LlmapiSearchUsage = {
|
|
360
|
+
/**
|
|
361
|
+
* CostMicroUSD is the cost of this search in micro-dollars (USD × 1,000,000).
|
|
362
|
+
*/
|
|
363
|
+
cost_micro_usd?: number;
|
|
364
|
+
};
|
|
201
365
|
|
|
202
366
|
/**
|
|
203
367
|
* Parameter definition for a client-side tool
|
|
@@ -554,6 +718,83 @@ type UseModelsResult = {
|
|
|
554
718
|
*/
|
|
555
719
|
declare function useModels(options?: UseModelsOptions): UseModelsResult;
|
|
556
720
|
|
|
721
|
+
type UseSearchOptions = {
|
|
722
|
+
/**
|
|
723
|
+
* Custom function to get auth token for API calls
|
|
724
|
+
*/
|
|
725
|
+
getToken?: () => Promise<string | null>;
|
|
726
|
+
/**
|
|
727
|
+
* Optional base URL for the API requests.
|
|
728
|
+
*/
|
|
729
|
+
baseUrl?: string;
|
|
730
|
+
/**
|
|
731
|
+
* Callback function to be called when an error is encountered.
|
|
732
|
+
*/
|
|
733
|
+
onError?: (error: Error) => void;
|
|
734
|
+
};
|
|
735
|
+
type SearchOptions = Omit<LlmapiSearchRequest, "query">;
|
|
736
|
+
type UseSearchResult = {
|
|
737
|
+
isLoading: boolean;
|
|
738
|
+
search: (query: string | string[], options?: SearchOptions) => Promise<LlmapiSearchResponse | null>;
|
|
739
|
+
results: LlmapiSearchResult[] | null;
|
|
740
|
+
response: LlmapiSearchResponse | null;
|
|
741
|
+
error: Error | null;
|
|
742
|
+
};
|
|
743
|
+
/**
|
|
744
|
+
* React hook for performing search operations using the AI SDK.
|
|
745
|
+
*
|
|
746
|
+
* @param options - Configuration options for the search hook
|
|
747
|
+
* @returns Object containing search function, results, loading state, and error
|
|
748
|
+
*
|
|
749
|
+
* @example
|
|
750
|
+
* ```tsx
|
|
751
|
+
* const { search, results, isLoading } = useSearch({
|
|
752
|
+
* getToken: async () => "my-token"
|
|
753
|
+
* });
|
|
754
|
+
*
|
|
755
|
+
* const handleSearch = async () => {
|
|
756
|
+
* await search("What is ZetaChain?");
|
|
757
|
+
* };
|
|
758
|
+
* ```
|
|
759
|
+
*/
|
|
760
|
+
declare function useSearch(options?: UseSearchOptions): UseSearchResult;
|
|
761
|
+
|
|
762
|
+
type UseImageGenerationOptions = {
|
|
763
|
+
/**
|
|
764
|
+
* Custom function to get auth token for API calls
|
|
765
|
+
*/
|
|
766
|
+
getToken?: () => Promise<string | null>;
|
|
767
|
+
/**
|
|
768
|
+
* Optional base URL for the API requests.
|
|
769
|
+
*/
|
|
770
|
+
baseUrl?: string;
|
|
771
|
+
/**
|
|
772
|
+
* Callback function to be called when the generation finishes successfully.
|
|
773
|
+
*/
|
|
774
|
+
onFinish?: (response: LlmapiImageGenerationResponse) => void;
|
|
775
|
+
/**
|
|
776
|
+
* Callback function to be called when an unexpected error is encountered.
|
|
777
|
+
*/
|
|
778
|
+
onError?: (error: Error) => void;
|
|
779
|
+
};
|
|
780
|
+
type GenerateImageArgs = LlmapiImageGenerationRequest;
|
|
781
|
+
type GenerateImageResult = {
|
|
782
|
+
data: LlmapiImageGenerationResponse;
|
|
783
|
+
error: null;
|
|
784
|
+
} | {
|
|
785
|
+
data: null;
|
|
786
|
+
error: string;
|
|
787
|
+
};
|
|
788
|
+
type UseImageGenerationResult = {
|
|
789
|
+
isLoading: boolean;
|
|
790
|
+
generateImage: (args: GenerateImageArgs) => Promise<GenerateImageResult>;
|
|
791
|
+
stop: () => void;
|
|
792
|
+
};
|
|
793
|
+
/**
|
|
794
|
+
* React hook for generating images using the LLM API.
|
|
795
|
+
*/
|
|
796
|
+
declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
|
|
797
|
+
|
|
557
798
|
/**
|
|
558
799
|
* Format memories into a context string that can be included in chat messages
|
|
559
800
|
* @param memories Array of memories with similarity scores
|
|
@@ -605,4 +846,4 @@ declare function executeTool(tool: ClientTool, params: Record<string, unknown>):
|
|
|
605
846
|
error?: string;
|
|
606
847
|
}>;
|
|
607
848
|
|
|
608
|
-
export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useMemory, useModels };
|
|
849
|
+
export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useImageGeneration, useMemory, useModels, useSearch };
|
package/dist/react/index.d.ts
CHANGED
|
@@ -67,6 +67,96 @@ type LlmapiChoice = {
|
|
|
67
67
|
index?: number;
|
|
68
68
|
message?: LlmapiMessage;
|
|
69
69
|
};
|
|
70
|
+
/**
|
|
71
|
+
* ExtraFields contains additional metadata such as provider/model information.
|
|
72
|
+
*/
|
|
73
|
+
type LlmapiImageGenerationExtraFields = {
|
|
74
|
+
/**
|
|
75
|
+
* ModelRequested is the model identifier that the client asked for.
|
|
76
|
+
*/
|
|
77
|
+
model_requested?: string;
|
|
78
|
+
/**
|
|
79
|
+
* Provider is the gateway that serviced this request.
|
|
80
|
+
*/
|
|
81
|
+
provider?: string;
|
|
82
|
+
/**
|
|
83
|
+
* RequestType is always "image_generation".
|
|
84
|
+
*/
|
|
85
|
+
request_type?: string;
|
|
86
|
+
};
|
|
87
|
+
type LlmapiImageGenerationImage = {
|
|
88
|
+
/**
|
|
89
|
+
* B64JSON is the base64 payload for models that can only return binary.
|
|
90
|
+
*/
|
|
91
|
+
b64_json?: string;
|
|
92
|
+
/**
|
|
93
|
+
* URL is the signed URL to download the image.
|
|
94
|
+
*/
|
|
95
|
+
url?: string;
|
|
96
|
+
};
|
|
97
|
+
type LlmapiImageGenerationRequest = {
|
|
98
|
+
/**
|
|
99
|
+
* Model is the model identifier to use for generation (e.g., "gpt-image-1").
|
|
100
|
+
*/
|
|
101
|
+
model?: string;
|
|
102
|
+
/**
|
|
103
|
+
* Prompt is the text description of the desired image.
|
|
104
|
+
*/
|
|
105
|
+
prompt?: string;
|
|
106
|
+
/**
|
|
107
|
+
* Quality targets a quality preset (e.g., "auto", "high").
|
|
108
|
+
*/
|
|
109
|
+
quality?: string;
|
|
110
|
+
/**
|
|
111
|
+
* ResponseFormat controls how the generated image is returned (e.g., "url" or "b64_json").
|
|
112
|
+
*/
|
|
113
|
+
response_format?: string;
|
|
114
|
+
/**
|
|
115
|
+
* Size controls the dimensions of the generated image (e.g., "1024x1024").
|
|
116
|
+
*/
|
|
117
|
+
size?: string;
|
|
118
|
+
};
|
|
119
|
+
type LlmapiImageGenerationResponse = {
|
|
120
|
+
/**
|
|
121
|
+
* Created is the Unix timestamp when the image was generated.
|
|
122
|
+
*/
|
|
123
|
+
created?: number;
|
|
124
|
+
extra_fields?: LlmapiImageGenerationExtraFields;
|
|
125
|
+
/**
|
|
126
|
+
* Images contains the generated images.
|
|
127
|
+
*/
|
|
128
|
+
images?: Array<LlmapiImageGenerationImage>;
|
|
129
|
+
/**
|
|
130
|
+
* Model is the model identifier that generated the image.
|
|
131
|
+
*/
|
|
132
|
+
model?: string;
|
|
133
|
+
/**
|
|
134
|
+
* Provider is the gateway that produced the image.
|
|
135
|
+
*/
|
|
136
|
+
provider?: string;
|
|
137
|
+
usage?: LlmapiImageGenerationUsage;
|
|
138
|
+
};
|
|
139
|
+
/**
|
|
140
|
+
* Usage documents token usage (when available).
|
|
141
|
+
*/
|
|
142
|
+
type LlmapiImageGenerationUsage = {
|
|
143
|
+
/**
|
|
144
|
+
* CostMicroUSD is the inference cost for this image generation request
|
|
145
|
+
*/
|
|
146
|
+
cost_micro_usd?: number;
|
|
147
|
+
/**
|
|
148
|
+
* InputTokens is the number of tokens sent in the prompt.
|
|
149
|
+
*/
|
|
150
|
+
input_tokens?: number;
|
|
151
|
+
/**
|
|
152
|
+
* OutputTokens is the number of tokens returned by the model.
|
|
153
|
+
*/
|
|
154
|
+
output_tokens?: number;
|
|
155
|
+
/**
|
|
156
|
+
* TotalTokens is the total number of tokens consumed.
|
|
157
|
+
*/
|
|
158
|
+
total_tokens?: number;
|
|
159
|
+
};
|
|
70
160
|
/**
|
|
71
161
|
* Message is the generated message
|
|
72
162
|
*/
|
|
@@ -198,6 +288,80 @@ type LlmapiModelTopProvider = {
|
|
|
198
288
|
* Role is the message role (system, user, assistant)
|
|
199
289
|
*/
|
|
200
290
|
type LlmapiRole = string;
|
|
291
|
+
/**
|
|
292
|
+
* ExtraFields contains additional metadata.
|
|
293
|
+
*/
|
|
294
|
+
type LlmapiSearchExtraFields = {
|
|
295
|
+
/**
|
|
296
|
+
* RequestType is always "search".
|
|
297
|
+
*/
|
|
298
|
+
request_type?: string;
|
|
299
|
+
/**
|
|
300
|
+
* SearchProvider is the search provider used (e.g., "perplexity", "google-pse").
|
|
301
|
+
*/
|
|
302
|
+
search_provider?: string;
|
|
303
|
+
};
|
|
304
|
+
type LlmapiSearchRequest = {
|
|
305
|
+
/**
|
|
306
|
+
* Country code filter (e.g., "US", "GB", "DE").
|
|
307
|
+
*/
|
|
308
|
+
country?: string;
|
|
309
|
+
/**
|
|
310
|
+
* Maximum number of results to return (1-20). Default: 10.
|
|
311
|
+
*/
|
|
312
|
+
max_results?: number;
|
|
313
|
+
/**
|
|
314
|
+
* Maximum tokens per page to process. Default: 1024.
|
|
315
|
+
*/
|
|
316
|
+
max_tokens_per_page?: number;
|
|
317
|
+
/**
|
|
318
|
+
* Search query. Can be a single string or array of strings.
|
|
319
|
+
*/
|
|
320
|
+
query?: Array<string>;
|
|
321
|
+
/**
|
|
322
|
+
* List of domains to filter results (max 20 domains).
|
|
323
|
+
*/
|
|
324
|
+
search_domain_filter?: Array<string>;
|
|
325
|
+
/**
|
|
326
|
+
* The search provider to use.
|
|
327
|
+
*/
|
|
328
|
+
search_tool_name?: string;
|
|
329
|
+
};
|
|
330
|
+
type LlmapiSearchResponse = {
|
|
331
|
+
extra_fields?: LlmapiSearchExtraFields;
|
|
332
|
+
/**
|
|
333
|
+
* List of search results.
|
|
334
|
+
*/
|
|
335
|
+
results?: Array<LlmapiSearchResult>;
|
|
336
|
+
usage?: LlmapiSearchUsage;
|
|
337
|
+
};
|
|
338
|
+
type LlmapiSearchResult = {
|
|
339
|
+
/**
|
|
340
|
+
* Optional publication or last updated date.
|
|
341
|
+
*/
|
|
342
|
+
date?: string;
|
|
343
|
+
/**
|
|
344
|
+
* Text snippet from the result.
|
|
345
|
+
*/
|
|
346
|
+
snippet?: string;
|
|
347
|
+
/**
|
|
348
|
+
* Title of the search result.
|
|
349
|
+
*/
|
|
350
|
+
title?: string;
|
|
351
|
+
/**
|
|
352
|
+
* URL of the search result.
|
|
353
|
+
*/
|
|
354
|
+
url?: string;
|
|
355
|
+
};
|
|
356
|
+
/**
|
|
357
|
+
* Usage contains usage information.
|
|
358
|
+
*/
|
|
359
|
+
type LlmapiSearchUsage = {
|
|
360
|
+
/**
|
|
361
|
+
* CostMicroUSD is the cost of this search in micro-dollars (USD × 1,000,000).
|
|
362
|
+
*/
|
|
363
|
+
cost_micro_usd?: number;
|
|
364
|
+
};
|
|
201
365
|
|
|
202
366
|
/**
|
|
203
367
|
* Parameter definition for a client-side tool
|
|
@@ -554,6 +718,83 @@ type UseModelsResult = {
|
|
|
554
718
|
*/
|
|
555
719
|
declare function useModels(options?: UseModelsOptions): UseModelsResult;
|
|
556
720
|
|
|
721
|
+
type UseSearchOptions = {
|
|
722
|
+
/**
|
|
723
|
+
* Custom function to get auth token for API calls
|
|
724
|
+
*/
|
|
725
|
+
getToken?: () => Promise<string | null>;
|
|
726
|
+
/**
|
|
727
|
+
* Optional base URL for the API requests.
|
|
728
|
+
*/
|
|
729
|
+
baseUrl?: string;
|
|
730
|
+
/**
|
|
731
|
+
* Callback function to be called when an error is encountered.
|
|
732
|
+
*/
|
|
733
|
+
onError?: (error: Error) => void;
|
|
734
|
+
};
|
|
735
|
+
type SearchOptions = Omit<LlmapiSearchRequest, "query">;
|
|
736
|
+
type UseSearchResult = {
|
|
737
|
+
isLoading: boolean;
|
|
738
|
+
search: (query: string | string[], options?: SearchOptions) => Promise<LlmapiSearchResponse | null>;
|
|
739
|
+
results: LlmapiSearchResult[] | null;
|
|
740
|
+
response: LlmapiSearchResponse | null;
|
|
741
|
+
error: Error | null;
|
|
742
|
+
};
|
|
743
|
+
/**
|
|
744
|
+
* React hook for performing search operations using the AI SDK.
|
|
745
|
+
*
|
|
746
|
+
* @param options - Configuration options for the search hook
|
|
747
|
+
* @returns Object containing search function, results, loading state, and error
|
|
748
|
+
*
|
|
749
|
+
* @example
|
|
750
|
+
* ```tsx
|
|
751
|
+
* const { search, results, isLoading } = useSearch({
|
|
752
|
+
* getToken: async () => "my-token"
|
|
753
|
+
* });
|
|
754
|
+
*
|
|
755
|
+
* const handleSearch = async () => {
|
|
756
|
+
* await search("What is ZetaChain?");
|
|
757
|
+
* };
|
|
758
|
+
* ```
|
|
759
|
+
*/
|
|
760
|
+
declare function useSearch(options?: UseSearchOptions): UseSearchResult;
|
|
761
|
+
|
|
762
|
+
type UseImageGenerationOptions = {
|
|
763
|
+
/**
|
|
764
|
+
* Custom function to get auth token for API calls
|
|
765
|
+
*/
|
|
766
|
+
getToken?: () => Promise<string | null>;
|
|
767
|
+
/**
|
|
768
|
+
* Optional base URL for the API requests.
|
|
769
|
+
*/
|
|
770
|
+
baseUrl?: string;
|
|
771
|
+
/**
|
|
772
|
+
* Callback function to be called when the generation finishes successfully.
|
|
773
|
+
*/
|
|
774
|
+
onFinish?: (response: LlmapiImageGenerationResponse) => void;
|
|
775
|
+
/**
|
|
776
|
+
* Callback function to be called when an unexpected error is encountered.
|
|
777
|
+
*/
|
|
778
|
+
onError?: (error: Error) => void;
|
|
779
|
+
};
|
|
780
|
+
type GenerateImageArgs = LlmapiImageGenerationRequest;
|
|
781
|
+
type GenerateImageResult = {
|
|
782
|
+
data: LlmapiImageGenerationResponse;
|
|
783
|
+
error: null;
|
|
784
|
+
} | {
|
|
785
|
+
data: null;
|
|
786
|
+
error: string;
|
|
787
|
+
};
|
|
788
|
+
type UseImageGenerationResult = {
|
|
789
|
+
isLoading: boolean;
|
|
790
|
+
generateImage: (args: GenerateImageArgs) => Promise<GenerateImageResult>;
|
|
791
|
+
stop: () => void;
|
|
792
|
+
};
|
|
793
|
+
/**
|
|
794
|
+
* React hook for generating images using the LLM API.
|
|
795
|
+
*/
|
|
796
|
+
declare function useImageGeneration(options?: UseImageGenerationOptions): UseImageGenerationResult;
|
|
797
|
+
|
|
557
798
|
/**
|
|
558
799
|
* Format memories into a context string that can be included in chat messages
|
|
559
800
|
* @param memories Array of memories with similarity scores
|
|
@@ -605,4 +846,4 @@ declare function executeTool(tool: ClientTool, params: Record<string, unknown>):
|
|
|
605
846
|
error?: string;
|
|
606
847
|
}>;
|
|
607
848
|
|
|
608
|
-
export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useMemory, useModels };
|
|
849
|
+
export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, selectTool, useChat, useEncryption, useImageGeneration, useMemory, useModels, useSearch };
|
package/dist/react/index.mjs
CHANGED
|
@@ -1745,12 +1745,32 @@ var postApiV1Embeddings = (options) => {
|
|
|
1745
1745
|
}
|
|
1746
1746
|
});
|
|
1747
1747
|
};
|
|
1748
|
+
var postApiV1ImagesGenerations = (options) => {
|
|
1749
|
+
return (options.client ?? client).post({
|
|
1750
|
+
url: "/api/v1/images/generations",
|
|
1751
|
+
...options,
|
|
1752
|
+
headers: {
|
|
1753
|
+
"Content-Type": "application/json",
|
|
1754
|
+
...options.headers
|
|
1755
|
+
}
|
|
1756
|
+
});
|
|
1757
|
+
};
|
|
1748
1758
|
var getApiV1Models = (options) => {
|
|
1749
1759
|
return (options?.client ?? client).get({
|
|
1750
1760
|
url: "/api/v1/models",
|
|
1751
1761
|
...options
|
|
1752
1762
|
});
|
|
1753
1763
|
};
|
|
1764
|
+
var postApiV1Search = (options) => {
|
|
1765
|
+
return (options.client ?? client).post({
|
|
1766
|
+
url: "/api/v1/search",
|
|
1767
|
+
...options,
|
|
1768
|
+
headers: {
|
|
1769
|
+
"Content-Type": "application/json",
|
|
1770
|
+
...options.headers
|
|
1771
|
+
}
|
|
1772
|
+
});
|
|
1773
|
+
};
|
|
1754
1774
|
|
|
1755
1775
|
// src/lib/memory/constants.ts
|
|
1756
1776
|
var DEFAULT_LOCAL_EMBEDDING_MODEL = "Snowflake/snowflake-arctic-embed-xs";
|
|
@@ -2228,6 +2248,172 @@ function useModels(options = {}) {
|
|
|
2228
2248
|
};
|
|
2229
2249
|
}
|
|
2230
2250
|
|
|
2251
|
+
// src/react/useSearch.ts
|
|
2252
|
+
import { useCallback as useCallback4, useEffect as useEffect4, useRef as useRef5, useState as useState3 } from "react";
|
|
2253
|
+
function useSearch(options = {}) {
|
|
2254
|
+
const { getToken, baseUrl = BASE_URL, onError } = options;
|
|
2255
|
+
const [isLoading, setIsLoading] = useState3(false);
|
|
2256
|
+
const [results, setResults] = useState3(null);
|
|
2257
|
+
const [response, setResponse] = useState3(null);
|
|
2258
|
+
const [error, setError] = useState3(null);
|
|
2259
|
+
const abortControllerRef = useRef5(null);
|
|
2260
|
+
useEffect4(() => {
|
|
2261
|
+
return () => {
|
|
2262
|
+
if (abortControllerRef.current) {
|
|
2263
|
+
abortControllerRef.current.abort();
|
|
2264
|
+
abortControllerRef.current = null;
|
|
2265
|
+
}
|
|
2266
|
+
};
|
|
2267
|
+
}, []);
|
|
2268
|
+
const search = useCallback4(
|
|
2269
|
+
async (query, searchOptions = {}) => {
|
|
2270
|
+
if (abortControllerRef.current) {
|
|
2271
|
+
abortControllerRef.current.abort();
|
|
2272
|
+
}
|
|
2273
|
+
const abortController = new AbortController();
|
|
2274
|
+
abortControllerRef.current = abortController;
|
|
2275
|
+
setIsLoading(true);
|
|
2276
|
+
setError(null);
|
|
2277
|
+
setResults(null);
|
|
2278
|
+
setResponse(null);
|
|
2279
|
+
try {
|
|
2280
|
+
let token;
|
|
2281
|
+
if (getToken) {
|
|
2282
|
+
token = await getToken() ?? void 0;
|
|
2283
|
+
}
|
|
2284
|
+
if (abortController.signal.aborted) return null;
|
|
2285
|
+
const queryArray = Array.isArray(query) ? query : [query];
|
|
2286
|
+
const res = await postApiV1Search({
|
|
2287
|
+
baseUrl,
|
|
2288
|
+
body: {
|
|
2289
|
+
query: queryArray,
|
|
2290
|
+
...searchOptions
|
|
2291
|
+
},
|
|
2292
|
+
headers: token ? {
|
|
2293
|
+
Authorization: `Bearer ${token}`
|
|
2294
|
+
} : void 0,
|
|
2295
|
+
signal: abortController.signal
|
|
2296
|
+
});
|
|
2297
|
+
if (res.error) {
|
|
2298
|
+
const errorMsg = res.error.error || res.error.message || "Failed to perform search";
|
|
2299
|
+
throw new Error(errorMsg);
|
|
2300
|
+
}
|
|
2301
|
+
if (res.data) {
|
|
2302
|
+
setResponse(res.data);
|
|
2303
|
+
setResults(res.data.results || []);
|
|
2304
|
+
return res.data;
|
|
2305
|
+
}
|
|
2306
|
+
return null;
|
|
2307
|
+
} catch (err) {
|
|
2308
|
+
if (err instanceof Error && err.name === "AbortError") {
|
|
2309
|
+
return null;
|
|
2310
|
+
}
|
|
2311
|
+
const errorObj = err instanceof Error ? err : new Error(String(err));
|
|
2312
|
+
setError(errorObj);
|
|
2313
|
+
if (onError) {
|
|
2314
|
+
onError(errorObj);
|
|
2315
|
+
}
|
|
2316
|
+
return null;
|
|
2317
|
+
} finally {
|
|
2318
|
+
if (abortControllerRef.current === abortController) {
|
|
2319
|
+
setIsLoading(false);
|
|
2320
|
+
abortControllerRef.current = null;
|
|
2321
|
+
}
|
|
2322
|
+
}
|
|
2323
|
+
},
|
|
2324
|
+
[baseUrl, getToken, onError]
|
|
2325
|
+
);
|
|
2326
|
+
return {
|
|
2327
|
+
isLoading,
|
|
2328
|
+
search,
|
|
2329
|
+
results,
|
|
2330
|
+
response,
|
|
2331
|
+
error
|
|
2332
|
+
};
|
|
2333
|
+
}
|
|
2334
|
+
|
|
2335
|
+
// src/react/useImageGeneration.ts
|
|
2336
|
+
import { useCallback as useCallback5, useEffect as useEffect5, useRef as useRef6, useState as useState4 } from "react";
|
|
2337
|
+
function useImageGeneration(options = {}) {
|
|
2338
|
+
const { getToken, baseUrl = BASE_URL, onFinish, onError } = options;
|
|
2339
|
+
const [isLoading, setIsLoading] = useState4(false);
|
|
2340
|
+
const abortControllerRef = useRef6(null);
|
|
2341
|
+
useEffect5(() => {
|
|
2342
|
+
return () => {
|
|
2343
|
+
if (abortControllerRef.current) {
|
|
2344
|
+
abortControllerRef.current.abort();
|
|
2345
|
+
abortControllerRef.current = null;
|
|
2346
|
+
}
|
|
2347
|
+
};
|
|
2348
|
+
}, []);
|
|
2349
|
+
const stop = useCallback5(() => {
|
|
2350
|
+
if (abortControllerRef.current) {
|
|
2351
|
+
abortControllerRef.current.abort();
|
|
2352
|
+
abortControllerRef.current = null;
|
|
2353
|
+
}
|
|
2354
|
+
}, []);
|
|
2355
|
+
const generateImage = useCallback5(
|
|
2356
|
+
async (args) => {
|
|
2357
|
+
if (abortControllerRef.current) {
|
|
2358
|
+
abortControllerRef.current.abort();
|
|
2359
|
+
}
|
|
2360
|
+
const abortController = new AbortController();
|
|
2361
|
+
abortControllerRef.current = abortController;
|
|
2362
|
+
setIsLoading(true);
|
|
2363
|
+
try {
|
|
2364
|
+
if (!getToken) {
|
|
2365
|
+
throw new Error("Token getter function is required.");
|
|
2366
|
+
}
|
|
2367
|
+
const token = await getToken();
|
|
2368
|
+
if (!token) {
|
|
2369
|
+
throw new Error("No access token available.");
|
|
2370
|
+
}
|
|
2371
|
+
const response = await postApiV1ImagesGenerations({
|
|
2372
|
+
baseUrl,
|
|
2373
|
+
body: args,
|
|
2374
|
+
headers: {
|
|
2375
|
+
Authorization: `Bearer ${token}`
|
|
2376
|
+
},
|
|
2377
|
+
signal: abortController.signal
|
|
2378
|
+
});
|
|
2379
|
+
if (response.error) {
|
|
2380
|
+
const errorMsg = response.error.error || "Failed to generate image";
|
|
2381
|
+
throw new Error(errorMsg);
|
|
2382
|
+
}
|
|
2383
|
+
if (!response.data) {
|
|
2384
|
+
throw new Error("No data received from image generation API");
|
|
2385
|
+
}
|
|
2386
|
+
const result = response.data;
|
|
2387
|
+
if (onFinish) {
|
|
2388
|
+
onFinish(result);
|
|
2389
|
+
}
|
|
2390
|
+
return { data: result, error: null };
|
|
2391
|
+
} catch (err) {
|
|
2392
|
+
if (err instanceof Error && err.name === "AbortError") {
|
|
2393
|
+
return { data: null, error: "Request aborted" };
|
|
2394
|
+
}
|
|
2395
|
+
const errorMsg = err instanceof Error ? err.message : "Failed to generate image.";
|
|
2396
|
+
const errorObj = err instanceof Error ? err : new Error(errorMsg);
|
|
2397
|
+
if (onError) {
|
|
2398
|
+
onError(errorObj);
|
|
2399
|
+
}
|
|
2400
|
+
return { data: null, error: errorMsg };
|
|
2401
|
+
} finally {
|
|
2402
|
+
if (abortControllerRef.current === abortController) {
|
|
2403
|
+
setIsLoading(false);
|
|
2404
|
+
abortControllerRef.current = null;
|
|
2405
|
+
}
|
|
2406
|
+
}
|
|
2407
|
+
},
|
|
2408
|
+
[getToken, baseUrl, onFinish, onError]
|
|
2409
|
+
);
|
|
2410
|
+
return {
|
|
2411
|
+
isLoading,
|
|
2412
|
+
generateImage,
|
|
2413
|
+
stop
|
|
2414
|
+
};
|
|
2415
|
+
}
|
|
2416
|
+
|
|
2231
2417
|
// src/lib/memory/chat.ts
|
|
2232
2418
|
var formatMemoriesForChat = (memories, format = "compact") => {
|
|
2233
2419
|
if (memories.length === 0) {
|
|
@@ -2294,6 +2480,8 @@ export {
|
|
|
2294
2480
|
selectTool,
|
|
2295
2481
|
useChat,
|
|
2296
2482
|
useEncryption,
|
|
2483
|
+
useImageGeneration,
|
|
2297
2484
|
useMemory,
|
|
2298
|
-
useModels
|
|
2485
|
+
useModels,
|
|
2486
|
+
useSearch
|
|
2299
2487
|
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@reverbia/sdk",
|
|
3
|
-
"version": "1.0.0-next.
|
|
3
|
+
"version": "1.0.0-next.20251203130707",
|
|
4
4
|
"description": "",
|
|
5
5
|
"main": "./dist/index.cjs",
|
|
6
6
|
"module": "./dist/index.mjs",
|
|
@@ -57,7 +57,7 @@
|
|
|
57
57
|
"homepage": "https://github.com/zeta-chain/ai-sdk#readme",
|
|
58
58
|
"dependencies": {
|
|
59
59
|
"@huggingface/transformers": "^3.8.0",
|
|
60
|
-
"@reverbia/portal": "1.0.0-next.
|
|
60
|
+
"@reverbia/portal": "1.0.0-next.20251202220311",
|
|
61
61
|
"ai": "5.0.93"
|
|
62
62
|
},
|
|
63
63
|
"devDependencies": {
|