@reverbia/sdk 1.0.0-next.20251202085655 → 1.0.0-next.20251202085701
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/react/index.cjs +114 -2
- package/dist/react/index.d.mts +124 -1
- package/dist/react/index.d.ts +124 -1
- package/dist/react/index.mjs +112 -1
- package/package.json +1 -1
package/dist/react/index.cjs
CHANGED
|
@@ -38,7 +38,8 @@ __export(index_exports, {
|
|
|
38
38
|
formatMemoriesForChat: () => formatMemoriesForChat,
|
|
39
39
|
useChat: () => useChat,
|
|
40
40
|
useEncryption: () => useEncryption,
|
|
41
|
-
useMemory: () => useMemory
|
|
41
|
+
useMemory: () => useMemory,
|
|
42
|
+
useModels: () => useModels
|
|
42
43
|
});
|
|
43
44
|
module.exports = __toCommonJS(index_exports);
|
|
44
45
|
|
|
@@ -1409,6 +1410,12 @@ var postApiV1Embeddings = (options) => {
|
|
|
1409
1410
|
}
|
|
1410
1411
|
});
|
|
1411
1412
|
};
|
|
1413
|
+
var getApiV1Models = (options) => {
|
|
1414
|
+
return (options?.client ?? client).get({
|
|
1415
|
+
url: "/api/v1/models",
|
|
1416
|
+
...options
|
|
1417
|
+
});
|
|
1418
|
+
};
|
|
1412
1419
|
|
|
1413
1420
|
// src/lib/memory/embeddings.ts
|
|
1414
1421
|
var generateEmbeddingForText = async (text, options = {}) => {
|
|
@@ -1744,6 +1751,110 @@ function useMemory(options = {}) {
|
|
|
1744
1751
|
};
|
|
1745
1752
|
}
|
|
1746
1753
|
|
|
1754
|
+
// src/react/useModels.ts
|
|
1755
|
+
var import_react4 = require("react");
|
|
1756
|
+
function useModels(options = {}) {
|
|
1757
|
+
const { getToken, baseUrl = BASE_URL, provider, autoFetch = true } = options;
|
|
1758
|
+
const [models, setModels] = (0, import_react4.useState)([]);
|
|
1759
|
+
const [isLoading, setIsLoading] = (0, import_react4.useState)(false);
|
|
1760
|
+
const [error, setError] = (0, import_react4.useState)(null);
|
|
1761
|
+
const getTokenRef = (0, import_react4.useRef)(getToken);
|
|
1762
|
+
const baseUrlRef = (0, import_react4.useRef)(baseUrl);
|
|
1763
|
+
const providerRef = (0, import_react4.useRef)(provider);
|
|
1764
|
+
const abortControllerRef = (0, import_react4.useRef)(null);
|
|
1765
|
+
(0, import_react4.useEffect)(() => {
|
|
1766
|
+
getTokenRef.current = getToken;
|
|
1767
|
+
baseUrlRef.current = baseUrl;
|
|
1768
|
+
providerRef.current = provider;
|
|
1769
|
+
});
|
|
1770
|
+
(0, import_react4.useEffect)(() => {
|
|
1771
|
+
return () => {
|
|
1772
|
+
if (abortControllerRef.current) {
|
|
1773
|
+
abortControllerRef.current.abort();
|
|
1774
|
+
abortControllerRef.current = null;
|
|
1775
|
+
}
|
|
1776
|
+
};
|
|
1777
|
+
}, []);
|
|
1778
|
+
const fetchModels = (0, import_react4.useCallback)(async () => {
|
|
1779
|
+
if (abortControllerRef.current) {
|
|
1780
|
+
abortControllerRef.current.abort();
|
|
1781
|
+
}
|
|
1782
|
+
const abortController = new AbortController();
|
|
1783
|
+
abortControllerRef.current = abortController;
|
|
1784
|
+
const signal = abortController.signal;
|
|
1785
|
+
setIsLoading(true);
|
|
1786
|
+
setError(null);
|
|
1787
|
+
try {
|
|
1788
|
+
let token;
|
|
1789
|
+
if (getTokenRef.current) {
|
|
1790
|
+
token = await getTokenRef.current() ?? void 0;
|
|
1791
|
+
}
|
|
1792
|
+
if (signal.aborted) return;
|
|
1793
|
+
const headers = {};
|
|
1794
|
+
if (token) {
|
|
1795
|
+
headers["Authorization"] = `Bearer ${token}`;
|
|
1796
|
+
}
|
|
1797
|
+
let allModels = [];
|
|
1798
|
+
let nextPageToken;
|
|
1799
|
+
do {
|
|
1800
|
+
if (signal.aborted) return;
|
|
1801
|
+
const response = await getApiV1Models({
|
|
1802
|
+
baseUrl: baseUrlRef.current,
|
|
1803
|
+
headers,
|
|
1804
|
+
query: {
|
|
1805
|
+
provider: providerRef.current,
|
|
1806
|
+
page_token: nextPageToken
|
|
1807
|
+
},
|
|
1808
|
+
signal
|
|
1809
|
+
});
|
|
1810
|
+
if (response.error) {
|
|
1811
|
+
const errorMsg = response.error.error ?? "Failed to fetch models";
|
|
1812
|
+
throw new Error(errorMsg);
|
|
1813
|
+
}
|
|
1814
|
+
if (response.data) {
|
|
1815
|
+
const newModels = response.data.data || [];
|
|
1816
|
+
allModels = [...allModels, ...newModels];
|
|
1817
|
+
nextPageToken = response.data.next_page_token;
|
|
1818
|
+
}
|
|
1819
|
+
} while (nextPageToken);
|
|
1820
|
+
if (signal.aborted) return;
|
|
1821
|
+
setModels(allModels);
|
|
1822
|
+
} catch (err) {
|
|
1823
|
+
if (err instanceof Error && err.name === "AbortError") {
|
|
1824
|
+
return;
|
|
1825
|
+
}
|
|
1826
|
+
setError(err instanceof Error ? err : new Error(String(err)));
|
|
1827
|
+
} finally {
|
|
1828
|
+
if (!signal.aborted) {
|
|
1829
|
+
setIsLoading(false);
|
|
1830
|
+
}
|
|
1831
|
+
if (abortControllerRef.current === abortController) {
|
|
1832
|
+
abortControllerRef.current = null;
|
|
1833
|
+
}
|
|
1834
|
+
}
|
|
1835
|
+
}, []);
|
|
1836
|
+
const refetch = (0, import_react4.useCallback)(async () => {
|
|
1837
|
+
setModels([]);
|
|
1838
|
+
await fetchModels();
|
|
1839
|
+
}, [fetchModels]);
|
|
1840
|
+
const hasFetchedRef = (0, import_react4.useRef)(false);
|
|
1841
|
+
(0, import_react4.useEffect)(() => {
|
|
1842
|
+
if (autoFetch && !hasFetchedRef.current) {
|
|
1843
|
+
hasFetchedRef.current = true;
|
|
1844
|
+
fetchModels();
|
|
1845
|
+
}
|
|
1846
|
+
if (!autoFetch) {
|
|
1847
|
+
hasFetchedRef.current = false;
|
|
1848
|
+
}
|
|
1849
|
+
}, [autoFetch, fetchModels]);
|
|
1850
|
+
return {
|
|
1851
|
+
models,
|
|
1852
|
+
isLoading,
|
|
1853
|
+
error,
|
|
1854
|
+
refetch
|
|
1855
|
+
};
|
|
1856
|
+
}
|
|
1857
|
+
|
|
1747
1858
|
// src/lib/memory/chat.ts
|
|
1748
1859
|
var formatMemoriesForChat = (memories, format = "compact") => {
|
|
1749
1860
|
if (memories.length === 0) {
|
|
@@ -1808,5 +1919,6 @@ var extractConversationContext = (messages, maxMessages = 3) => {
|
|
|
1808
1919
|
formatMemoriesForChat,
|
|
1809
1920
|
useChat,
|
|
1810
1921
|
useEncryption,
|
|
1811
|
-
useMemory
|
|
1922
|
+
useMemory,
|
|
1923
|
+
useModels
|
|
1812
1924
|
});
|
package/dist/react/index.d.mts
CHANGED
|
@@ -77,6 +77,99 @@ type LlmapiMessage = {
|
|
|
77
77
|
content?: string;
|
|
78
78
|
role?: LlmapiRole;
|
|
79
79
|
};
|
|
80
|
+
type LlmapiModel = {
|
|
81
|
+
architecture?: LlmapiModelArchitecture;
|
|
82
|
+
/**
|
|
83
|
+
* CanonicalSlug is the canonical slug for the model
|
|
84
|
+
*/
|
|
85
|
+
canonical_slug?: string;
|
|
86
|
+
/**
|
|
87
|
+
* ContextLength is the maximum context length in tokens
|
|
88
|
+
*/
|
|
89
|
+
context_length?: number;
|
|
90
|
+
/**
|
|
91
|
+
* Created is the Unix timestamp of when the model was created
|
|
92
|
+
*/
|
|
93
|
+
created?: number;
|
|
94
|
+
/**
|
|
95
|
+
* DefaultParameters contains default parameter values
|
|
96
|
+
*/
|
|
97
|
+
default_parameters?: {
|
|
98
|
+
[key: string]: unknown;
|
|
99
|
+
};
|
|
100
|
+
/**
|
|
101
|
+
* Description describes the model and its capabilities
|
|
102
|
+
*/
|
|
103
|
+
description?: string;
|
|
104
|
+
/**
|
|
105
|
+
* HuggingFaceID is the Hugging Face model identifier
|
|
106
|
+
*/
|
|
107
|
+
hugging_face_id?: string;
|
|
108
|
+
/**
|
|
109
|
+
* ID is the model identifier (e.g., "openai/gpt-4")
|
|
110
|
+
*/
|
|
111
|
+
id?: string;
|
|
112
|
+
/**
|
|
113
|
+
* MaxInputTokens is the maximum input tokens
|
|
114
|
+
*/
|
|
115
|
+
max_input_tokens?: number;
|
|
116
|
+
/**
|
|
117
|
+
* MaxOutputTokens is the maximum output tokens
|
|
118
|
+
*/
|
|
119
|
+
max_output_tokens?: number;
|
|
120
|
+
/**
|
|
121
|
+
* Name is the human-readable model name (optional)
|
|
122
|
+
*/
|
|
123
|
+
name?: string;
|
|
124
|
+
/**
|
|
125
|
+
* OwnedBy is the organization that owns the model
|
|
126
|
+
*/
|
|
127
|
+
owned_by?: string;
|
|
128
|
+
per_request_limits?: LlmapiModelPerRequestLimits;
|
|
129
|
+
pricing?: LlmapiModelPricing;
|
|
130
|
+
/**
|
|
131
|
+
* SupportedMethods is a list of supported API methods
|
|
132
|
+
*/
|
|
133
|
+
supported_methods?: Array<string>;
|
|
134
|
+
/**
|
|
135
|
+
* SupportedParameters is a list of supported parameter names
|
|
136
|
+
*/
|
|
137
|
+
supported_parameters?: Array<string>;
|
|
138
|
+
top_provider?: LlmapiModelTopProvider;
|
|
139
|
+
};
|
|
140
|
+
/**
|
|
141
|
+
* Architecture describes the model's technical capabilities
|
|
142
|
+
*/
|
|
143
|
+
type LlmapiModelArchitecture = {
|
|
144
|
+
instruct_type?: string;
|
|
145
|
+
modality?: string;
|
|
146
|
+
prompt_formatting?: string;
|
|
147
|
+
tokenizer?: string;
|
|
148
|
+
};
|
|
149
|
+
/**
|
|
150
|
+
* PerRequestLimits contains rate limiting information
|
|
151
|
+
*/
|
|
152
|
+
type LlmapiModelPerRequestLimits = {
|
|
153
|
+
completion_tokens?: number;
|
|
154
|
+
prompt_tokens?: number;
|
|
155
|
+
};
|
|
156
|
+
/**
|
|
157
|
+
* Pricing contains the pricing structure for using this model
|
|
158
|
+
*/
|
|
159
|
+
type LlmapiModelPricing = {
|
|
160
|
+
completion?: string;
|
|
161
|
+
image?: string;
|
|
162
|
+
prompt?: string;
|
|
163
|
+
request?: string;
|
|
164
|
+
};
|
|
165
|
+
/**
|
|
166
|
+
* TopProvider contains configuration details for the primary provider
|
|
167
|
+
*/
|
|
168
|
+
type LlmapiModelTopProvider = {
|
|
169
|
+
context_length?: number;
|
|
170
|
+
is_moderated?: boolean;
|
|
171
|
+
max_completion_tokens?: number;
|
|
172
|
+
};
|
|
80
173
|
/**
|
|
81
174
|
* Role is the message role (system, user, assistant)
|
|
82
175
|
*/
|
|
@@ -296,6 +389,36 @@ type UseMemoryResult = {
|
|
|
296
389
|
*/
|
|
297
390
|
declare function useMemory(options?: UseMemoryOptions): UseMemoryResult;
|
|
298
391
|
|
|
392
|
+
type UseModelsOptions = {
|
|
393
|
+
/**
|
|
394
|
+
* Custom function to get auth token for API calls
|
|
395
|
+
*/
|
|
396
|
+
getToken?: () => Promise<string | null>;
|
|
397
|
+
/**
|
|
398
|
+
* Optional base URL for the API requests.
|
|
399
|
+
*/
|
|
400
|
+
baseUrl?: string;
|
|
401
|
+
/**
|
|
402
|
+
* Optional filter for specific provider (e.g. "openai")
|
|
403
|
+
*/
|
|
404
|
+
provider?: string;
|
|
405
|
+
/**
|
|
406
|
+
* Whether to fetch models automatically on mount (default: true)
|
|
407
|
+
*/
|
|
408
|
+
autoFetch?: boolean;
|
|
409
|
+
};
|
|
410
|
+
type UseModelsResult = {
|
|
411
|
+
models: LlmapiModel[];
|
|
412
|
+
isLoading: boolean;
|
|
413
|
+
error: Error | null;
|
|
414
|
+
refetch: () => Promise<void>;
|
|
415
|
+
};
|
|
416
|
+
/**
|
|
417
|
+
* React hook for fetching available LLM models.
|
|
418
|
+
* Automatically fetches all available models.
|
|
419
|
+
*/
|
|
420
|
+
declare function useModels(options?: UseModelsOptions): UseModelsResult;
|
|
421
|
+
|
|
299
422
|
/**
|
|
300
423
|
* Format memories into a context string that can be included in chat messages
|
|
301
424
|
* @param memories Array of memories with similarity scores
|
|
@@ -325,4 +448,4 @@ declare const extractConversationContext: (messages: Array<{
|
|
|
325
448
|
content: string;
|
|
326
449
|
}>, maxMessages?: number) => string;
|
|
327
450
|
|
|
328
|
-
export { createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, extractConversationContext, formatMemoriesForChat, useChat, useEncryption, useMemory };
|
|
451
|
+
export { createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, extractConversationContext, formatMemoriesForChat, useChat, useEncryption, useMemory, useModels };
|
package/dist/react/index.d.ts
CHANGED
|
@@ -77,6 +77,99 @@ type LlmapiMessage = {
|
|
|
77
77
|
content?: string;
|
|
78
78
|
role?: LlmapiRole;
|
|
79
79
|
};
|
|
80
|
+
type LlmapiModel = {
|
|
81
|
+
architecture?: LlmapiModelArchitecture;
|
|
82
|
+
/**
|
|
83
|
+
* CanonicalSlug is the canonical slug for the model
|
|
84
|
+
*/
|
|
85
|
+
canonical_slug?: string;
|
|
86
|
+
/**
|
|
87
|
+
* ContextLength is the maximum context length in tokens
|
|
88
|
+
*/
|
|
89
|
+
context_length?: number;
|
|
90
|
+
/**
|
|
91
|
+
* Created is the Unix timestamp of when the model was created
|
|
92
|
+
*/
|
|
93
|
+
created?: number;
|
|
94
|
+
/**
|
|
95
|
+
* DefaultParameters contains default parameter values
|
|
96
|
+
*/
|
|
97
|
+
default_parameters?: {
|
|
98
|
+
[key: string]: unknown;
|
|
99
|
+
};
|
|
100
|
+
/**
|
|
101
|
+
* Description describes the model and its capabilities
|
|
102
|
+
*/
|
|
103
|
+
description?: string;
|
|
104
|
+
/**
|
|
105
|
+
* HuggingFaceID is the Hugging Face model identifier
|
|
106
|
+
*/
|
|
107
|
+
hugging_face_id?: string;
|
|
108
|
+
/**
|
|
109
|
+
* ID is the model identifier (e.g., "openai/gpt-4")
|
|
110
|
+
*/
|
|
111
|
+
id?: string;
|
|
112
|
+
/**
|
|
113
|
+
* MaxInputTokens is the maximum input tokens
|
|
114
|
+
*/
|
|
115
|
+
max_input_tokens?: number;
|
|
116
|
+
/**
|
|
117
|
+
* MaxOutputTokens is the maximum output tokens
|
|
118
|
+
*/
|
|
119
|
+
max_output_tokens?: number;
|
|
120
|
+
/**
|
|
121
|
+
* Name is the human-readable model name (optional)
|
|
122
|
+
*/
|
|
123
|
+
name?: string;
|
|
124
|
+
/**
|
|
125
|
+
* OwnedBy is the organization that owns the model
|
|
126
|
+
*/
|
|
127
|
+
owned_by?: string;
|
|
128
|
+
per_request_limits?: LlmapiModelPerRequestLimits;
|
|
129
|
+
pricing?: LlmapiModelPricing;
|
|
130
|
+
/**
|
|
131
|
+
* SupportedMethods is a list of supported API methods
|
|
132
|
+
*/
|
|
133
|
+
supported_methods?: Array<string>;
|
|
134
|
+
/**
|
|
135
|
+
* SupportedParameters is a list of supported parameter names
|
|
136
|
+
*/
|
|
137
|
+
supported_parameters?: Array<string>;
|
|
138
|
+
top_provider?: LlmapiModelTopProvider;
|
|
139
|
+
};
|
|
140
|
+
/**
|
|
141
|
+
* Architecture describes the model's technical capabilities
|
|
142
|
+
*/
|
|
143
|
+
type LlmapiModelArchitecture = {
|
|
144
|
+
instruct_type?: string;
|
|
145
|
+
modality?: string;
|
|
146
|
+
prompt_formatting?: string;
|
|
147
|
+
tokenizer?: string;
|
|
148
|
+
};
|
|
149
|
+
/**
|
|
150
|
+
* PerRequestLimits contains rate limiting information
|
|
151
|
+
*/
|
|
152
|
+
type LlmapiModelPerRequestLimits = {
|
|
153
|
+
completion_tokens?: number;
|
|
154
|
+
prompt_tokens?: number;
|
|
155
|
+
};
|
|
156
|
+
/**
|
|
157
|
+
* Pricing contains the pricing structure for using this model
|
|
158
|
+
*/
|
|
159
|
+
type LlmapiModelPricing = {
|
|
160
|
+
completion?: string;
|
|
161
|
+
image?: string;
|
|
162
|
+
prompt?: string;
|
|
163
|
+
request?: string;
|
|
164
|
+
};
|
|
165
|
+
/**
|
|
166
|
+
* TopProvider contains configuration details for the primary provider
|
|
167
|
+
*/
|
|
168
|
+
type LlmapiModelTopProvider = {
|
|
169
|
+
context_length?: number;
|
|
170
|
+
is_moderated?: boolean;
|
|
171
|
+
max_completion_tokens?: number;
|
|
172
|
+
};
|
|
80
173
|
/**
|
|
81
174
|
* Role is the message role (system, user, assistant)
|
|
82
175
|
*/
|
|
@@ -296,6 +389,36 @@ type UseMemoryResult = {
|
|
|
296
389
|
*/
|
|
297
390
|
declare function useMemory(options?: UseMemoryOptions): UseMemoryResult;
|
|
298
391
|
|
|
392
|
+
type UseModelsOptions = {
|
|
393
|
+
/**
|
|
394
|
+
* Custom function to get auth token for API calls
|
|
395
|
+
*/
|
|
396
|
+
getToken?: () => Promise<string | null>;
|
|
397
|
+
/**
|
|
398
|
+
* Optional base URL for the API requests.
|
|
399
|
+
*/
|
|
400
|
+
baseUrl?: string;
|
|
401
|
+
/**
|
|
402
|
+
* Optional filter for specific provider (e.g. "openai")
|
|
403
|
+
*/
|
|
404
|
+
provider?: string;
|
|
405
|
+
/**
|
|
406
|
+
* Whether to fetch models automatically on mount (default: true)
|
|
407
|
+
*/
|
|
408
|
+
autoFetch?: boolean;
|
|
409
|
+
};
|
|
410
|
+
type UseModelsResult = {
|
|
411
|
+
models: LlmapiModel[];
|
|
412
|
+
isLoading: boolean;
|
|
413
|
+
error: Error | null;
|
|
414
|
+
refetch: () => Promise<void>;
|
|
415
|
+
};
|
|
416
|
+
/**
|
|
417
|
+
* React hook for fetching available LLM models.
|
|
418
|
+
* Automatically fetches all available models.
|
|
419
|
+
*/
|
|
420
|
+
declare function useModels(options?: UseModelsOptions): UseModelsResult;
|
|
421
|
+
|
|
299
422
|
/**
|
|
300
423
|
* Format memories into a context string that can be included in chat messages
|
|
301
424
|
* @param memories Array of memories with similarity scores
|
|
@@ -325,4 +448,4 @@ declare const extractConversationContext: (messages: Array<{
|
|
|
325
448
|
content: string;
|
|
326
449
|
}>, maxMessages?: number) => string;
|
|
327
450
|
|
|
328
|
-
export { createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, extractConversationContext, formatMemoriesForChat, useChat, useEncryption, useMemory };
|
|
451
|
+
export { createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, extractConversationContext, formatMemoriesForChat, useChat, useEncryption, useMemory, useModels };
|
package/dist/react/index.mjs
CHANGED
|
@@ -1365,6 +1365,12 @@ var postApiV1Embeddings = (options) => {
|
|
|
1365
1365
|
}
|
|
1366
1366
|
});
|
|
1367
1367
|
};
|
|
1368
|
+
var getApiV1Models = (options) => {
|
|
1369
|
+
return (options?.client ?? client).get({
|
|
1370
|
+
url: "/api/v1/models",
|
|
1371
|
+
...options
|
|
1372
|
+
});
|
|
1373
|
+
};
|
|
1368
1374
|
|
|
1369
1375
|
// src/lib/memory/embeddings.ts
|
|
1370
1376
|
var generateEmbeddingForText = async (text, options = {}) => {
|
|
@@ -1700,6 +1706,110 @@ function useMemory(options = {}) {
|
|
|
1700
1706
|
};
|
|
1701
1707
|
}
|
|
1702
1708
|
|
|
1709
|
+
// src/react/useModels.ts
|
|
1710
|
+
import { useCallback as useCallback3, useEffect as useEffect3, useRef as useRef4, useState as useState2 } from "react";
|
|
1711
|
+
function useModels(options = {}) {
|
|
1712
|
+
const { getToken, baseUrl = BASE_URL, provider, autoFetch = true } = options;
|
|
1713
|
+
const [models, setModels] = useState2([]);
|
|
1714
|
+
const [isLoading, setIsLoading] = useState2(false);
|
|
1715
|
+
const [error, setError] = useState2(null);
|
|
1716
|
+
const getTokenRef = useRef4(getToken);
|
|
1717
|
+
const baseUrlRef = useRef4(baseUrl);
|
|
1718
|
+
const providerRef = useRef4(provider);
|
|
1719
|
+
const abortControllerRef = useRef4(null);
|
|
1720
|
+
useEffect3(() => {
|
|
1721
|
+
getTokenRef.current = getToken;
|
|
1722
|
+
baseUrlRef.current = baseUrl;
|
|
1723
|
+
providerRef.current = provider;
|
|
1724
|
+
});
|
|
1725
|
+
useEffect3(() => {
|
|
1726
|
+
return () => {
|
|
1727
|
+
if (abortControllerRef.current) {
|
|
1728
|
+
abortControllerRef.current.abort();
|
|
1729
|
+
abortControllerRef.current = null;
|
|
1730
|
+
}
|
|
1731
|
+
};
|
|
1732
|
+
}, []);
|
|
1733
|
+
const fetchModels = useCallback3(async () => {
|
|
1734
|
+
if (abortControllerRef.current) {
|
|
1735
|
+
abortControllerRef.current.abort();
|
|
1736
|
+
}
|
|
1737
|
+
const abortController = new AbortController();
|
|
1738
|
+
abortControllerRef.current = abortController;
|
|
1739
|
+
const signal = abortController.signal;
|
|
1740
|
+
setIsLoading(true);
|
|
1741
|
+
setError(null);
|
|
1742
|
+
try {
|
|
1743
|
+
let token;
|
|
1744
|
+
if (getTokenRef.current) {
|
|
1745
|
+
token = await getTokenRef.current() ?? void 0;
|
|
1746
|
+
}
|
|
1747
|
+
if (signal.aborted) return;
|
|
1748
|
+
const headers = {};
|
|
1749
|
+
if (token) {
|
|
1750
|
+
headers["Authorization"] = `Bearer ${token}`;
|
|
1751
|
+
}
|
|
1752
|
+
let allModels = [];
|
|
1753
|
+
let nextPageToken;
|
|
1754
|
+
do {
|
|
1755
|
+
if (signal.aborted) return;
|
|
1756
|
+
const response = await getApiV1Models({
|
|
1757
|
+
baseUrl: baseUrlRef.current,
|
|
1758
|
+
headers,
|
|
1759
|
+
query: {
|
|
1760
|
+
provider: providerRef.current,
|
|
1761
|
+
page_token: nextPageToken
|
|
1762
|
+
},
|
|
1763
|
+
signal
|
|
1764
|
+
});
|
|
1765
|
+
if (response.error) {
|
|
1766
|
+
const errorMsg = response.error.error ?? "Failed to fetch models";
|
|
1767
|
+
throw new Error(errorMsg);
|
|
1768
|
+
}
|
|
1769
|
+
if (response.data) {
|
|
1770
|
+
const newModels = response.data.data || [];
|
|
1771
|
+
allModels = [...allModels, ...newModels];
|
|
1772
|
+
nextPageToken = response.data.next_page_token;
|
|
1773
|
+
}
|
|
1774
|
+
} while (nextPageToken);
|
|
1775
|
+
if (signal.aborted) return;
|
|
1776
|
+
setModels(allModels);
|
|
1777
|
+
} catch (err) {
|
|
1778
|
+
if (err instanceof Error && err.name === "AbortError") {
|
|
1779
|
+
return;
|
|
1780
|
+
}
|
|
1781
|
+
setError(err instanceof Error ? err : new Error(String(err)));
|
|
1782
|
+
} finally {
|
|
1783
|
+
if (!signal.aborted) {
|
|
1784
|
+
setIsLoading(false);
|
|
1785
|
+
}
|
|
1786
|
+
if (abortControllerRef.current === abortController) {
|
|
1787
|
+
abortControllerRef.current = null;
|
|
1788
|
+
}
|
|
1789
|
+
}
|
|
1790
|
+
}, []);
|
|
1791
|
+
const refetch = useCallback3(async () => {
|
|
1792
|
+
setModels([]);
|
|
1793
|
+
await fetchModels();
|
|
1794
|
+
}, [fetchModels]);
|
|
1795
|
+
const hasFetchedRef = useRef4(false);
|
|
1796
|
+
useEffect3(() => {
|
|
1797
|
+
if (autoFetch && !hasFetchedRef.current) {
|
|
1798
|
+
hasFetchedRef.current = true;
|
|
1799
|
+
fetchModels();
|
|
1800
|
+
}
|
|
1801
|
+
if (!autoFetch) {
|
|
1802
|
+
hasFetchedRef.current = false;
|
|
1803
|
+
}
|
|
1804
|
+
}, [autoFetch, fetchModels]);
|
|
1805
|
+
return {
|
|
1806
|
+
models,
|
|
1807
|
+
isLoading,
|
|
1808
|
+
error,
|
|
1809
|
+
refetch
|
|
1810
|
+
};
|
|
1811
|
+
}
|
|
1812
|
+
|
|
1703
1813
|
// src/lib/memory/chat.ts
|
|
1704
1814
|
var formatMemoriesForChat = (memories, format = "compact") => {
|
|
1705
1815
|
if (memories.length === 0) {
|
|
@@ -1763,5 +1873,6 @@ export {
|
|
|
1763
1873
|
formatMemoriesForChat,
|
|
1764
1874
|
useChat,
|
|
1765
1875
|
useEncryption,
|
|
1766
|
-
useMemory
|
|
1876
|
+
useMemory,
|
|
1877
|
+
useModels
|
|
1767
1878
|
};
|