@reverbia/sdk 1.0.0-next.20251208094446 → 1.0.0-next.20251208111334
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +8 -8
- package/dist/index.d.ts +8 -8
- package/dist/react/index.cjs +147 -29
- package/dist/react/index.d.mts +36 -30
- package/dist/react/index.d.ts +36 -30
- package/dist/react/index.mjs +134 -17
- package/package.json +4 -3
package/dist/index.d.mts
CHANGED
|
@@ -40,11 +40,11 @@ type LlmapiChatCompletionRequest = {
|
|
|
40
40
|
/**
|
|
41
41
|
* Messages is the conversation history
|
|
42
42
|
*/
|
|
43
|
-
messages
|
|
43
|
+
messages: Array<LlmapiMessage>;
|
|
44
44
|
/**
|
|
45
45
|
* Model is the model identifier
|
|
46
46
|
*/
|
|
47
|
-
model
|
|
47
|
+
model: string;
|
|
48
48
|
/**
|
|
49
49
|
* Stream indicates if response should be streamed
|
|
50
50
|
*/
|
|
@@ -149,11 +149,11 @@ type LlmapiEmbeddingRequest = {
|
|
|
149
149
|
/**
|
|
150
150
|
* Input text or tokens to embed (can be string, []string, []int, or [][]int)
|
|
151
151
|
*/
|
|
152
|
-
input
|
|
152
|
+
input: unknown;
|
|
153
153
|
/**
|
|
154
154
|
* Model identifier in 'provider/model' format
|
|
155
155
|
*/
|
|
156
|
-
model
|
|
156
|
+
model: string;
|
|
157
157
|
};
|
|
158
158
|
type LlmapiEmbeddingResponse = {
|
|
159
159
|
/**
|
|
@@ -219,11 +219,11 @@ type LlmapiImageGenerationRequest = {
|
|
|
219
219
|
/**
|
|
220
220
|
* Model is the model identifier to use for generation (e.g., "gpt-image-1").
|
|
221
221
|
*/
|
|
222
|
-
model
|
|
222
|
+
model: string;
|
|
223
223
|
/**
|
|
224
224
|
* Prompt is the text description of the desired image.
|
|
225
225
|
*/
|
|
226
|
-
prompt
|
|
226
|
+
prompt: string;
|
|
227
227
|
/**
|
|
228
228
|
* Quality targets a quality preset (e.g., "auto", "high").
|
|
229
229
|
*/
|
|
@@ -466,7 +466,7 @@ type LlmapiSearchRequest = {
|
|
|
466
466
|
/**
|
|
467
467
|
* Search query. Can be a single string or array of strings.
|
|
468
468
|
*/
|
|
469
|
-
query
|
|
469
|
+
query: Array<string>;
|
|
470
470
|
/**
|
|
471
471
|
* List of domains to filter results (max 20 domains).
|
|
472
472
|
*/
|
|
@@ -474,7 +474,7 @@ type LlmapiSearchRequest = {
|
|
|
474
474
|
/**
|
|
475
475
|
* The search provider to use.
|
|
476
476
|
*/
|
|
477
|
-
search_tool_name
|
|
477
|
+
search_tool_name: string;
|
|
478
478
|
};
|
|
479
479
|
type LlmapiSearchResponse = {
|
|
480
480
|
extra_fields?: LlmapiSearchExtraFields;
|
package/dist/index.d.ts
CHANGED
|
@@ -40,11 +40,11 @@ type LlmapiChatCompletionRequest = {
|
|
|
40
40
|
/**
|
|
41
41
|
* Messages is the conversation history
|
|
42
42
|
*/
|
|
43
|
-
messages
|
|
43
|
+
messages: Array<LlmapiMessage>;
|
|
44
44
|
/**
|
|
45
45
|
* Model is the model identifier
|
|
46
46
|
*/
|
|
47
|
-
model
|
|
47
|
+
model: string;
|
|
48
48
|
/**
|
|
49
49
|
* Stream indicates if response should be streamed
|
|
50
50
|
*/
|
|
@@ -149,11 +149,11 @@ type LlmapiEmbeddingRequest = {
|
|
|
149
149
|
/**
|
|
150
150
|
* Input text or tokens to embed (can be string, []string, []int, or [][]int)
|
|
151
151
|
*/
|
|
152
|
-
input
|
|
152
|
+
input: unknown;
|
|
153
153
|
/**
|
|
154
154
|
* Model identifier in 'provider/model' format
|
|
155
155
|
*/
|
|
156
|
-
model
|
|
156
|
+
model: string;
|
|
157
157
|
};
|
|
158
158
|
type LlmapiEmbeddingResponse = {
|
|
159
159
|
/**
|
|
@@ -219,11 +219,11 @@ type LlmapiImageGenerationRequest = {
|
|
|
219
219
|
/**
|
|
220
220
|
* Model is the model identifier to use for generation (e.g., "gpt-image-1").
|
|
221
221
|
*/
|
|
222
|
-
model
|
|
222
|
+
model: string;
|
|
223
223
|
/**
|
|
224
224
|
* Prompt is the text description of the desired image.
|
|
225
225
|
*/
|
|
226
|
-
prompt
|
|
226
|
+
prompt: string;
|
|
227
227
|
/**
|
|
228
228
|
* Quality targets a quality preset (e.g., "auto", "high").
|
|
229
229
|
*/
|
|
@@ -466,7 +466,7 @@ type LlmapiSearchRequest = {
|
|
|
466
466
|
/**
|
|
467
467
|
* Search query. Can be a single string or array of strings.
|
|
468
468
|
*/
|
|
469
|
-
query
|
|
469
|
+
query: Array<string>;
|
|
470
470
|
/**
|
|
471
471
|
* List of domains to filter results (max 20 domains).
|
|
472
472
|
*/
|
|
@@ -474,7 +474,7 @@ type LlmapiSearchRequest = {
|
|
|
474
474
|
/**
|
|
475
475
|
* The search provider to use.
|
|
476
476
|
*/
|
|
477
|
-
search_tool_name
|
|
477
|
+
search_tool_name: string;
|
|
478
478
|
};
|
|
479
479
|
type LlmapiSearchResponse = {
|
|
480
480
|
extra_fields?: LlmapiSearchExtraFields;
|
package/dist/react/index.cjs
CHANGED
|
@@ -47400,6 +47400,7 @@ __export(index_exports, {
|
|
|
47400
47400
|
useImageGeneration: () => useImageGeneration,
|
|
47401
47401
|
useMemory: () => useMemory,
|
|
47402
47402
|
useModels: () => useModels,
|
|
47403
|
+
useOCR: () => useOCR,
|
|
47403
47404
|
usePdf: () => usePdf,
|
|
47404
47405
|
useSearch: () => useSearch
|
|
47405
47406
|
});
|
|
@@ -49064,7 +49065,7 @@ var generateEmbeddingForText = async (text, options = {}) => {
|
|
|
49064
49065
|
baseUrl,
|
|
49065
49066
|
body: {
|
|
49066
49067
|
input: text,
|
|
49067
|
-
model: model2
|
|
49068
|
+
model: model2 ?? DEFAULT_API_EMBEDDING_MODEL
|
|
49068
49069
|
},
|
|
49069
49070
|
headers: {
|
|
49070
49071
|
Authorization: `Bearer ${token}`
|
|
@@ -49439,6 +49440,31 @@ async function extractTextFromPdf(pdfDataUrl) {
|
|
|
49439
49440
|
throw error;
|
|
49440
49441
|
}
|
|
49441
49442
|
}
|
|
49443
|
+
async function convertPdfToImages(pdfDataUrl) {
|
|
49444
|
+
const images = [];
|
|
49445
|
+
try {
|
|
49446
|
+
const loadingTask = pdfjs.getDocument(pdfDataUrl);
|
|
49447
|
+
const pdf = await loadingTask.promise;
|
|
49448
|
+
for (let i = 1; i <= pdf.numPages; i++) {
|
|
49449
|
+
const page = await pdf.getPage(i);
|
|
49450
|
+
const viewport = page.getViewport({ scale: 1.5 });
|
|
49451
|
+
const canvas = document.createElement("canvas");
|
|
49452
|
+
const context = canvas.getContext("2d");
|
|
49453
|
+
if (!context) continue;
|
|
49454
|
+
canvas.height = viewport.height;
|
|
49455
|
+
canvas.width = viewport.width;
|
|
49456
|
+
await page.render({
|
|
49457
|
+
canvasContext: context,
|
|
49458
|
+
viewport
|
|
49459
|
+
}).promise;
|
|
49460
|
+
images.push(canvas.toDataURL("image/png"));
|
|
49461
|
+
}
|
|
49462
|
+
} catch (error) {
|
|
49463
|
+
console.error("Error converting PDF to images:", error);
|
|
49464
|
+
throw error;
|
|
49465
|
+
}
|
|
49466
|
+
return images;
|
|
49467
|
+
}
|
|
49442
49468
|
|
|
49443
49469
|
// src/react/usePdf.ts
|
|
49444
49470
|
var PDF_MIME_TYPE = "application/pdf";
|
|
@@ -49491,23 +49517,114 @@ ${text}`;
|
|
|
49491
49517
|
};
|
|
49492
49518
|
}
|
|
49493
49519
|
|
|
49494
|
-
// src/react/
|
|
49520
|
+
// src/react/useOCR.ts
|
|
49495
49521
|
var import_react4 = require("react");
|
|
49522
|
+
var import_tesseract = __toESM(require("tesseract.js"));
|
|
49523
|
+
function useOCR() {
|
|
49524
|
+
const [isProcessing, setIsProcessing] = (0, import_react4.useState)(false);
|
|
49525
|
+
const [error, setError] = (0, import_react4.useState)(null);
|
|
49526
|
+
const extractOCRContext = (0, import_react4.useCallback)(
|
|
49527
|
+
async (files) => {
|
|
49528
|
+
setIsProcessing(true);
|
|
49529
|
+
setError(null);
|
|
49530
|
+
try {
|
|
49531
|
+
if (files.length === 0) {
|
|
49532
|
+
return null;
|
|
49533
|
+
}
|
|
49534
|
+
const contexts = await Promise.all(
|
|
49535
|
+
files.map(async (file) => {
|
|
49536
|
+
try {
|
|
49537
|
+
let imagesToProcess = [];
|
|
49538
|
+
const language = file.language || "eng";
|
|
49539
|
+
const filename = file.filename || (file.url instanceof File ? file.url.name : "");
|
|
49540
|
+
let isPdf = false;
|
|
49541
|
+
if (typeof file.url === "string") {
|
|
49542
|
+
isPdf = file.url.toLowerCase().endsWith(".pdf") || (filename?.toLowerCase().endsWith(".pdf") ?? false);
|
|
49543
|
+
} else if (file.url instanceof Blob) {
|
|
49544
|
+
isPdf = file.url.type === "application/pdf" || (filename?.toLowerCase().endsWith(".pdf") ?? false);
|
|
49545
|
+
}
|
|
49546
|
+
if (isPdf) {
|
|
49547
|
+
let pdfUrl;
|
|
49548
|
+
let shouldRevoke = false;
|
|
49549
|
+
if (typeof file.url === "string") {
|
|
49550
|
+
pdfUrl = file.url;
|
|
49551
|
+
} else {
|
|
49552
|
+
pdfUrl = URL.createObjectURL(file.url);
|
|
49553
|
+
shouldRevoke = true;
|
|
49554
|
+
}
|
|
49555
|
+
try {
|
|
49556
|
+
const pdfImages = await convertPdfToImages(pdfUrl);
|
|
49557
|
+
imagesToProcess = pdfImages;
|
|
49558
|
+
} catch (e) {
|
|
49559
|
+
console.error("Failed to convert PDF to images", e);
|
|
49560
|
+
throw e;
|
|
49561
|
+
} finally {
|
|
49562
|
+
if (shouldRevoke) {
|
|
49563
|
+
URL.revokeObjectURL(pdfUrl);
|
|
49564
|
+
}
|
|
49565
|
+
}
|
|
49566
|
+
} else {
|
|
49567
|
+
imagesToProcess = [file.url];
|
|
49568
|
+
}
|
|
49569
|
+
const pageTexts = [];
|
|
49570
|
+
for (const image of imagesToProcess) {
|
|
49571
|
+
const result = await import_tesseract.default.recognize(image, language);
|
|
49572
|
+
pageTexts.push(result.data.text);
|
|
49573
|
+
}
|
|
49574
|
+
const text = pageTexts.join("\n\n");
|
|
49575
|
+
if (!text.trim()) {
|
|
49576
|
+
console.warn(
|
|
49577
|
+
`No text found in OCR source ${filename || "unknown"}`
|
|
49578
|
+
);
|
|
49579
|
+
return null;
|
|
49580
|
+
}
|
|
49581
|
+
return `[Context from OCR attachment ${filename || "unknown"}]:
|
|
49582
|
+
${text}`;
|
|
49583
|
+
} catch (err) {
|
|
49584
|
+
console.error(
|
|
49585
|
+
`Failed to process OCR for ${file.filename || "unknown"}:`,
|
|
49586
|
+
err
|
|
49587
|
+
);
|
|
49588
|
+
return null;
|
|
49589
|
+
}
|
|
49590
|
+
})
|
|
49591
|
+
);
|
|
49592
|
+
const mergedContext = contexts.filter(Boolean).join("\n\n");
|
|
49593
|
+
return mergedContext || null;
|
|
49594
|
+
} catch (err) {
|
|
49595
|
+
const processedError = err instanceof Error ? err : new Error(String(err));
|
|
49596
|
+
setError(processedError);
|
|
49597
|
+
throw processedError;
|
|
49598
|
+
} finally {
|
|
49599
|
+
setIsProcessing(false);
|
|
49600
|
+
}
|
|
49601
|
+
},
|
|
49602
|
+
[]
|
|
49603
|
+
);
|
|
49604
|
+
return {
|
|
49605
|
+
extractOCRContext,
|
|
49606
|
+
isProcessing,
|
|
49607
|
+
error
|
|
49608
|
+
};
|
|
49609
|
+
}
|
|
49610
|
+
|
|
49611
|
+
// src/react/useModels.ts
|
|
49612
|
+
var import_react5 = require("react");
|
|
49496
49613
|
function useModels(options = {}) {
|
|
49497
49614
|
const { getToken, baseUrl = BASE_URL, provider, autoFetch = true } = options;
|
|
49498
|
-
const [models, setModels] = (0,
|
|
49499
|
-
const [isLoading, setIsLoading] = (0,
|
|
49500
|
-
const [error, setError] = (0,
|
|
49501
|
-
const getTokenRef = (0,
|
|
49502
|
-
const baseUrlRef = (0,
|
|
49503
|
-
const providerRef = (0,
|
|
49504
|
-
const abortControllerRef = (0,
|
|
49505
|
-
(0,
|
|
49615
|
+
const [models, setModels] = (0, import_react5.useState)([]);
|
|
49616
|
+
const [isLoading, setIsLoading] = (0, import_react5.useState)(false);
|
|
49617
|
+
const [error, setError] = (0, import_react5.useState)(null);
|
|
49618
|
+
const getTokenRef = (0, import_react5.useRef)(getToken);
|
|
49619
|
+
const baseUrlRef = (0, import_react5.useRef)(baseUrl);
|
|
49620
|
+
const providerRef = (0, import_react5.useRef)(provider);
|
|
49621
|
+
const abortControllerRef = (0, import_react5.useRef)(null);
|
|
49622
|
+
(0, import_react5.useEffect)(() => {
|
|
49506
49623
|
getTokenRef.current = getToken;
|
|
49507
49624
|
baseUrlRef.current = baseUrl;
|
|
49508
49625
|
providerRef.current = provider;
|
|
49509
49626
|
});
|
|
49510
|
-
(0,
|
|
49627
|
+
(0, import_react5.useEffect)(() => {
|
|
49511
49628
|
return () => {
|
|
49512
49629
|
if (abortControllerRef.current) {
|
|
49513
49630
|
abortControllerRef.current.abort();
|
|
@@ -49515,7 +49632,7 @@ function useModels(options = {}) {
|
|
|
49515
49632
|
}
|
|
49516
49633
|
};
|
|
49517
49634
|
}, []);
|
|
49518
|
-
const fetchModels = (0,
|
|
49635
|
+
const fetchModels = (0, import_react5.useCallback)(async () => {
|
|
49519
49636
|
if (abortControllerRef.current) {
|
|
49520
49637
|
abortControllerRef.current.abort();
|
|
49521
49638
|
}
|
|
@@ -49573,12 +49690,12 @@ function useModels(options = {}) {
|
|
|
49573
49690
|
}
|
|
49574
49691
|
}
|
|
49575
49692
|
}, []);
|
|
49576
|
-
const refetch = (0,
|
|
49693
|
+
const refetch = (0, import_react5.useCallback)(async () => {
|
|
49577
49694
|
setModels([]);
|
|
49578
49695
|
await fetchModels();
|
|
49579
49696
|
}, [fetchModels]);
|
|
49580
|
-
const hasFetchedRef = (0,
|
|
49581
|
-
(0,
|
|
49697
|
+
const hasFetchedRef = (0, import_react5.useRef)(false);
|
|
49698
|
+
(0, import_react5.useEffect)(() => {
|
|
49582
49699
|
if (autoFetch && !hasFetchedRef.current) {
|
|
49583
49700
|
hasFetchedRef.current = true;
|
|
49584
49701
|
fetchModels();
|
|
@@ -49596,15 +49713,15 @@ function useModels(options = {}) {
|
|
|
49596
49713
|
}
|
|
49597
49714
|
|
|
49598
49715
|
// src/react/useSearch.ts
|
|
49599
|
-
var
|
|
49716
|
+
var import_react6 = require("react");
|
|
49600
49717
|
function useSearch(options = {}) {
|
|
49601
49718
|
const { getToken, baseUrl = BASE_URL, onError } = options;
|
|
49602
|
-
const [isLoading, setIsLoading] = (0,
|
|
49603
|
-
const [results, setResults] = (0,
|
|
49604
|
-
const [response, setResponse] = (0,
|
|
49605
|
-
const [error, setError] = (0,
|
|
49606
|
-
const abortControllerRef = (0,
|
|
49607
|
-
(0,
|
|
49719
|
+
const [isLoading, setIsLoading] = (0, import_react6.useState)(false);
|
|
49720
|
+
const [results, setResults] = (0, import_react6.useState)(null);
|
|
49721
|
+
const [response, setResponse] = (0, import_react6.useState)(null);
|
|
49722
|
+
const [error, setError] = (0, import_react6.useState)(null);
|
|
49723
|
+
const abortControllerRef = (0, import_react6.useRef)(null);
|
|
49724
|
+
(0, import_react6.useEffect)(() => {
|
|
49608
49725
|
return () => {
|
|
49609
49726
|
if (abortControllerRef.current) {
|
|
49610
49727
|
abortControllerRef.current.abort();
|
|
@@ -49612,7 +49729,7 @@ function useSearch(options = {}) {
|
|
|
49612
49729
|
}
|
|
49613
49730
|
};
|
|
49614
49731
|
}, []);
|
|
49615
|
-
const search = (0,
|
|
49732
|
+
const search = (0, import_react6.useCallback)(
|
|
49616
49733
|
async (query, searchOptions = {}) => {
|
|
49617
49734
|
if (abortControllerRef.current) {
|
|
49618
49735
|
abortControllerRef.current.abort();
|
|
@@ -49680,12 +49797,12 @@ function useSearch(options = {}) {
|
|
|
49680
49797
|
}
|
|
49681
49798
|
|
|
49682
49799
|
// src/react/useImageGeneration.ts
|
|
49683
|
-
var
|
|
49800
|
+
var import_react7 = require("react");
|
|
49684
49801
|
function useImageGeneration(options = {}) {
|
|
49685
49802
|
const { getToken, baseUrl = BASE_URL, onFinish, onError } = options;
|
|
49686
|
-
const [isLoading, setIsLoading] = (0,
|
|
49687
|
-
const abortControllerRef = (0,
|
|
49688
|
-
(0,
|
|
49803
|
+
const [isLoading, setIsLoading] = (0, import_react7.useState)(false);
|
|
49804
|
+
const abortControllerRef = (0, import_react7.useRef)(null);
|
|
49805
|
+
(0, import_react7.useEffect)(() => {
|
|
49689
49806
|
return () => {
|
|
49690
49807
|
if (abortControllerRef.current) {
|
|
49691
49808
|
abortControllerRef.current.abort();
|
|
@@ -49693,13 +49810,13 @@ function useImageGeneration(options = {}) {
|
|
|
49693
49810
|
}
|
|
49694
49811
|
};
|
|
49695
49812
|
}, []);
|
|
49696
|
-
const stop = (0,
|
|
49813
|
+
const stop = (0, import_react7.useCallback)(() => {
|
|
49697
49814
|
if (abortControllerRef.current) {
|
|
49698
49815
|
abortControllerRef.current.abort();
|
|
49699
49816
|
abortControllerRef.current = null;
|
|
49700
49817
|
}
|
|
49701
49818
|
}, []);
|
|
49702
|
-
const generateImage = (0,
|
|
49819
|
+
const generateImage = (0, import_react7.useCallback)(
|
|
49703
49820
|
async (args) => {
|
|
49704
49821
|
if (abortControllerRef.current) {
|
|
49705
49822
|
abortControllerRef.current.abort();
|
|
@@ -49836,6 +49953,7 @@ init_selector();
|
|
|
49836
49953
|
useImageGeneration,
|
|
49837
49954
|
useMemory,
|
|
49838
49955
|
useModels,
|
|
49956
|
+
useOCR,
|
|
49839
49957
|
usePdf,
|
|
49840
49958
|
useSearch
|
|
49841
49959
|
});
|
package/dist/react/index.d.mts
CHANGED
|
@@ -98,11 +98,11 @@ type LlmapiImageGenerationRequest = {
|
|
|
98
98
|
/**
|
|
99
99
|
* Model is the model identifier to use for generation (e.g., "gpt-image-1").
|
|
100
100
|
*/
|
|
101
|
-
model
|
|
101
|
+
model: string;
|
|
102
102
|
/**
|
|
103
103
|
* Prompt is the text description of the desired image.
|
|
104
104
|
*/
|
|
105
|
-
prompt
|
|
105
|
+
prompt: string;
|
|
106
106
|
/**
|
|
107
107
|
* Quality targets a quality preset (e.g., "auto", "high").
|
|
108
108
|
*/
|
|
@@ -301,32 +301,6 @@ type LlmapiSearchExtraFields = {
|
|
|
301
301
|
*/
|
|
302
302
|
search_provider?: string;
|
|
303
303
|
};
|
|
304
|
-
type LlmapiSearchRequest = {
|
|
305
|
-
/**
|
|
306
|
-
* Country code filter (e.g., "US", "GB", "DE").
|
|
307
|
-
*/
|
|
308
|
-
country?: string;
|
|
309
|
-
/**
|
|
310
|
-
* Maximum number of results to return (1-20). Default: 10.
|
|
311
|
-
*/
|
|
312
|
-
max_results?: number;
|
|
313
|
-
/**
|
|
314
|
-
* Maximum tokens per page to process. Default: 1024.
|
|
315
|
-
*/
|
|
316
|
-
max_tokens_per_page?: number;
|
|
317
|
-
/**
|
|
318
|
-
* Search query. Can be a single string or array of strings.
|
|
319
|
-
*/
|
|
320
|
-
query?: Array<string>;
|
|
321
|
-
/**
|
|
322
|
-
* List of domains to filter results (max 20 domains).
|
|
323
|
-
*/
|
|
324
|
-
search_domain_filter?: Array<string>;
|
|
325
|
-
/**
|
|
326
|
-
* The search provider to use.
|
|
327
|
-
*/
|
|
328
|
-
search_tool_name?: string;
|
|
329
|
-
};
|
|
330
304
|
type LlmapiSearchResponse = {
|
|
331
305
|
extra_fields?: LlmapiSearchExtraFields;
|
|
332
306
|
/**
|
|
@@ -722,6 +696,17 @@ declare function usePdf(): {
|
|
|
722
696
|
error: Error | null;
|
|
723
697
|
};
|
|
724
698
|
|
|
699
|
+
interface OCRFile {
|
|
700
|
+
url: string | File | Blob;
|
|
701
|
+
filename?: string;
|
|
702
|
+
language?: string;
|
|
703
|
+
}
|
|
704
|
+
declare function useOCR(): {
|
|
705
|
+
extractOCRContext: (files: OCRFile[]) => Promise<string | null>;
|
|
706
|
+
isProcessing: boolean;
|
|
707
|
+
error: Error | null;
|
|
708
|
+
};
|
|
709
|
+
|
|
725
710
|
type UseModelsOptions = {
|
|
726
711
|
/**
|
|
727
712
|
* Custom function to get auth token for API calls
|
|
@@ -766,7 +751,28 @@ type UseSearchOptions = {
|
|
|
766
751
|
*/
|
|
767
752
|
onError?: (error: Error) => void;
|
|
768
753
|
};
|
|
769
|
-
type SearchOptions =
|
|
754
|
+
type SearchOptions = {
|
|
755
|
+
/**
|
|
756
|
+
* Country code for search results (e.g., "us", "gb")
|
|
757
|
+
*/
|
|
758
|
+
country?: string;
|
|
759
|
+
/**
|
|
760
|
+
* Maximum number of results to return
|
|
761
|
+
*/
|
|
762
|
+
max_results?: number;
|
|
763
|
+
/**
|
|
764
|
+
* Maximum tokens per page
|
|
765
|
+
*/
|
|
766
|
+
max_tokens_per_page?: number;
|
|
767
|
+
/**
|
|
768
|
+
* List of domains to filter results (max 20 domains)
|
|
769
|
+
*/
|
|
770
|
+
search_domain_filter?: string[];
|
|
771
|
+
/**
|
|
772
|
+
* The search provider to use
|
|
773
|
+
*/
|
|
774
|
+
search_tool_name?: string;
|
|
775
|
+
};
|
|
770
776
|
type UseSearchResult = {
|
|
771
777
|
isLoading: boolean;
|
|
772
778
|
search: (query: string | string[], options?: SearchOptions) => Promise<LlmapiSearchResponse | null>;
|
|
@@ -880,4 +886,4 @@ declare function executeTool(tool: ClientTool, params: Record<string, unknown>):
|
|
|
880
886
|
error?: string;
|
|
881
887
|
}>;
|
|
882
888
|
|
|
883
|
-
export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type PdfFile, type SignMessageFn, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, hasEncryptionKey, requestEncryptionKey, selectTool, useChat, useEncryption, useImageGeneration, useMemory, useModels, usePdf, useSearch };
|
|
889
|
+
export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type OCRFile, type PdfFile, type SignMessageFn, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, hasEncryptionKey, requestEncryptionKey, selectTool, useChat, useEncryption, useImageGeneration, useMemory, useModels, useOCR, usePdf, useSearch };
|
package/dist/react/index.d.ts
CHANGED
|
@@ -98,11 +98,11 @@ type LlmapiImageGenerationRequest = {
|
|
|
98
98
|
/**
|
|
99
99
|
* Model is the model identifier to use for generation (e.g., "gpt-image-1").
|
|
100
100
|
*/
|
|
101
|
-
model
|
|
101
|
+
model: string;
|
|
102
102
|
/**
|
|
103
103
|
* Prompt is the text description of the desired image.
|
|
104
104
|
*/
|
|
105
|
-
prompt
|
|
105
|
+
prompt: string;
|
|
106
106
|
/**
|
|
107
107
|
* Quality targets a quality preset (e.g., "auto", "high").
|
|
108
108
|
*/
|
|
@@ -301,32 +301,6 @@ type LlmapiSearchExtraFields = {
|
|
|
301
301
|
*/
|
|
302
302
|
search_provider?: string;
|
|
303
303
|
};
|
|
304
|
-
type LlmapiSearchRequest = {
|
|
305
|
-
/**
|
|
306
|
-
* Country code filter (e.g., "US", "GB", "DE").
|
|
307
|
-
*/
|
|
308
|
-
country?: string;
|
|
309
|
-
/**
|
|
310
|
-
* Maximum number of results to return (1-20). Default: 10.
|
|
311
|
-
*/
|
|
312
|
-
max_results?: number;
|
|
313
|
-
/**
|
|
314
|
-
* Maximum tokens per page to process. Default: 1024.
|
|
315
|
-
*/
|
|
316
|
-
max_tokens_per_page?: number;
|
|
317
|
-
/**
|
|
318
|
-
* Search query. Can be a single string or array of strings.
|
|
319
|
-
*/
|
|
320
|
-
query?: Array<string>;
|
|
321
|
-
/**
|
|
322
|
-
* List of domains to filter results (max 20 domains).
|
|
323
|
-
*/
|
|
324
|
-
search_domain_filter?: Array<string>;
|
|
325
|
-
/**
|
|
326
|
-
* The search provider to use.
|
|
327
|
-
*/
|
|
328
|
-
search_tool_name?: string;
|
|
329
|
-
};
|
|
330
304
|
type LlmapiSearchResponse = {
|
|
331
305
|
extra_fields?: LlmapiSearchExtraFields;
|
|
332
306
|
/**
|
|
@@ -722,6 +696,17 @@ declare function usePdf(): {
|
|
|
722
696
|
error: Error | null;
|
|
723
697
|
};
|
|
724
698
|
|
|
699
|
+
interface OCRFile {
|
|
700
|
+
url: string | File | Blob;
|
|
701
|
+
filename?: string;
|
|
702
|
+
language?: string;
|
|
703
|
+
}
|
|
704
|
+
declare function useOCR(): {
|
|
705
|
+
extractOCRContext: (files: OCRFile[]) => Promise<string | null>;
|
|
706
|
+
isProcessing: boolean;
|
|
707
|
+
error: Error | null;
|
|
708
|
+
};
|
|
709
|
+
|
|
725
710
|
type UseModelsOptions = {
|
|
726
711
|
/**
|
|
727
712
|
* Custom function to get auth token for API calls
|
|
@@ -766,7 +751,28 @@ type UseSearchOptions = {
|
|
|
766
751
|
*/
|
|
767
752
|
onError?: (error: Error) => void;
|
|
768
753
|
};
|
|
769
|
-
type SearchOptions =
|
|
754
|
+
type SearchOptions = {
|
|
755
|
+
/**
|
|
756
|
+
* Country code for search results (e.g., "us", "gb")
|
|
757
|
+
*/
|
|
758
|
+
country?: string;
|
|
759
|
+
/**
|
|
760
|
+
* Maximum number of results to return
|
|
761
|
+
*/
|
|
762
|
+
max_results?: number;
|
|
763
|
+
/**
|
|
764
|
+
* Maximum tokens per page
|
|
765
|
+
*/
|
|
766
|
+
max_tokens_per_page?: number;
|
|
767
|
+
/**
|
|
768
|
+
* List of domains to filter results (max 20 domains)
|
|
769
|
+
*/
|
|
770
|
+
search_domain_filter?: string[];
|
|
771
|
+
/**
|
|
772
|
+
* The search provider to use
|
|
773
|
+
*/
|
|
774
|
+
search_tool_name?: string;
|
|
775
|
+
};
|
|
770
776
|
type UseSearchResult = {
|
|
771
777
|
isLoading: boolean;
|
|
772
778
|
search: (query: string | string[], options?: SearchOptions) => Promise<LlmapiSearchResponse | null>;
|
|
@@ -880,4 +886,4 @@ declare function executeTool(tool: ClientTool, params: Record<string, unknown>):
|
|
|
880
886
|
error?: string;
|
|
881
887
|
}>;
|
|
882
888
|
|
|
883
|
-
export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type PdfFile, type SignMessageFn, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, hasEncryptionKey, requestEncryptionKey, selectTool, useChat, useEncryption, useImageGeneration, useMemory, useModels, usePdf, useSearch };
|
|
889
|
+
export { type ClientTool, DEFAULT_TOOL_SELECTOR_MODEL, type OCRFile, type PdfFile, type SignMessageFn, type ToolExecutionResult, type ToolParameter, type ToolSelectionResult, createMemoryContextSystemMessage, decryptData, decryptDataBytes, encryptData, executeTool, extractConversationContext, formatMemoriesForChat, hasEncryptionKey, requestEncryptionKey, selectTool, useChat, useEncryption, useImageGeneration, useMemory, useModels, useOCR, usePdf, useSearch };
|
package/dist/react/index.mjs
CHANGED
|
@@ -1665,7 +1665,7 @@ var generateEmbeddingForText = async (text, options = {}) => {
|
|
|
1665
1665
|
baseUrl,
|
|
1666
1666
|
body: {
|
|
1667
1667
|
input: text,
|
|
1668
|
-
model: model2
|
|
1668
|
+
model: model2 ?? DEFAULT_API_EMBEDDING_MODEL
|
|
1669
1669
|
},
|
|
1670
1670
|
headers: {
|
|
1671
1671
|
Authorization: `Bearer ${token}`
|
|
@@ -2040,6 +2040,31 @@ async function extractTextFromPdf(pdfDataUrl) {
|
|
|
2040
2040
|
throw error;
|
|
2041
2041
|
}
|
|
2042
2042
|
}
|
|
2043
|
+
async function convertPdfToImages(pdfDataUrl) {
|
|
2044
|
+
const images = [];
|
|
2045
|
+
try {
|
|
2046
|
+
const loadingTask = pdfjs.getDocument(pdfDataUrl);
|
|
2047
|
+
const pdf = await loadingTask.promise;
|
|
2048
|
+
for (let i = 1; i <= pdf.numPages; i++) {
|
|
2049
|
+
const page = await pdf.getPage(i);
|
|
2050
|
+
const viewport = page.getViewport({ scale: 1.5 });
|
|
2051
|
+
const canvas = document.createElement("canvas");
|
|
2052
|
+
const context = canvas.getContext("2d");
|
|
2053
|
+
if (!context) continue;
|
|
2054
|
+
canvas.height = viewport.height;
|
|
2055
|
+
canvas.width = viewport.width;
|
|
2056
|
+
await page.render({
|
|
2057
|
+
canvasContext: context,
|
|
2058
|
+
viewport
|
|
2059
|
+
}).promise;
|
|
2060
|
+
images.push(canvas.toDataURL("image/png"));
|
|
2061
|
+
}
|
|
2062
|
+
} catch (error) {
|
|
2063
|
+
console.error("Error converting PDF to images:", error);
|
|
2064
|
+
throw error;
|
|
2065
|
+
}
|
|
2066
|
+
return images;
|
|
2067
|
+
}
|
|
2043
2068
|
|
|
2044
2069
|
// src/react/usePdf.ts
|
|
2045
2070
|
var PDF_MIME_TYPE = "application/pdf";
|
|
@@ -2092,13 +2117,104 @@ ${text}`;
|
|
|
2092
2117
|
};
|
|
2093
2118
|
}
|
|
2094
2119
|
|
|
2120
|
+
// src/react/useOCR.ts
|
|
2121
|
+
import { useCallback as useCallback4, useState as useState3 } from "react";
|
|
2122
|
+
import Tesseract from "tesseract.js";
|
|
2123
|
+
function useOCR() {
|
|
2124
|
+
const [isProcessing, setIsProcessing] = useState3(false);
|
|
2125
|
+
const [error, setError] = useState3(null);
|
|
2126
|
+
const extractOCRContext = useCallback4(
|
|
2127
|
+
async (files) => {
|
|
2128
|
+
setIsProcessing(true);
|
|
2129
|
+
setError(null);
|
|
2130
|
+
try {
|
|
2131
|
+
if (files.length === 0) {
|
|
2132
|
+
return null;
|
|
2133
|
+
}
|
|
2134
|
+
const contexts = await Promise.all(
|
|
2135
|
+
files.map(async (file) => {
|
|
2136
|
+
try {
|
|
2137
|
+
let imagesToProcess = [];
|
|
2138
|
+
const language = file.language || "eng";
|
|
2139
|
+
const filename = file.filename || (file.url instanceof File ? file.url.name : "");
|
|
2140
|
+
let isPdf = false;
|
|
2141
|
+
if (typeof file.url === "string") {
|
|
2142
|
+
isPdf = file.url.toLowerCase().endsWith(".pdf") || (filename?.toLowerCase().endsWith(".pdf") ?? false);
|
|
2143
|
+
} else if (file.url instanceof Blob) {
|
|
2144
|
+
isPdf = file.url.type === "application/pdf" || (filename?.toLowerCase().endsWith(".pdf") ?? false);
|
|
2145
|
+
}
|
|
2146
|
+
if (isPdf) {
|
|
2147
|
+
let pdfUrl;
|
|
2148
|
+
let shouldRevoke = false;
|
|
2149
|
+
if (typeof file.url === "string") {
|
|
2150
|
+
pdfUrl = file.url;
|
|
2151
|
+
} else {
|
|
2152
|
+
pdfUrl = URL.createObjectURL(file.url);
|
|
2153
|
+
shouldRevoke = true;
|
|
2154
|
+
}
|
|
2155
|
+
try {
|
|
2156
|
+
const pdfImages = await convertPdfToImages(pdfUrl);
|
|
2157
|
+
imagesToProcess = pdfImages;
|
|
2158
|
+
} catch (e) {
|
|
2159
|
+
console.error("Failed to convert PDF to images", e);
|
|
2160
|
+
throw e;
|
|
2161
|
+
} finally {
|
|
2162
|
+
if (shouldRevoke) {
|
|
2163
|
+
URL.revokeObjectURL(pdfUrl);
|
|
2164
|
+
}
|
|
2165
|
+
}
|
|
2166
|
+
} else {
|
|
2167
|
+
imagesToProcess = [file.url];
|
|
2168
|
+
}
|
|
2169
|
+
const pageTexts = [];
|
|
2170
|
+
for (const image of imagesToProcess) {
|
|
2171
|
+
const result = await Tesseract.recognize(image, language);
|
|
2172
|
+
pageTexts.push(result.data.text);
|
|
2173
|
+
}
|
|
2174
|
+
const text = pageTexts.join("\n\n");
|
|
2175
|
+
if (!text.trim()) {
|
|
2176
|
+
console.warn(
|
|
2177
|
+
`No text found in OCR source ${filename || "unknown"}`
|
|
2178
|
+
);
|
|
2179
|
+
return null;
|
|
2180
|
+
}
|
|
2181
|
+
return `[Context from OCR attachment ${filename || "unknown"}]:
|
|
2182
|
+
${text}`;
|
|
2183
|
+
} catch (err) {
|
|
2184
|
+
console.error(
|
|
2185
|
+
`Failed to process OCR for ${file.filename || "unknown"}:`,
|
|
2186
|
+
err
|
|
2187
|
+
);
|
|
2188
|
+
return null;
|
|
2189
|
+
}
|
|
2190
|
+
})
|
|
2191
|
+
);
|
|
2192
|
+
const mergedContext = contexts.filter(Boolean).join("\n\n");
|
|
2193
|
+
return mergedContext || null;
|
|
2194
|
+
} catch (err) {
|
|
2195
|
+
const processedError = err instanceof Error ? err : new Error(String(err));
|
|
2196
|
+
setError(processedError);
|
|
2197
|
+
throw processedError;
|
|
2198
|
+
} finally {
|
|
2199
|
+
setIsProcessing(false);
|
|
2200
|
+
}
|
|
2201
|
+
},
|
|
2202
|
+
[]
|
|
2203
|
+
);
|
|
2204
|
+
return {
|
|
2205
|
+
extractOCRContext,
|
|
2206
|
+
isProcessing,
|
|
2207
|
+
error
|
|
2208
|
+
};
|
|
2209
|
+
}
|
|
2210
|
+
|
|
2095
2211
|
// src/react/useModels.ts
|
|
2096
|
-
import { useCallback as
|
|
2212
|
+
import { useCallback as useCallback5, useEffect as useEffect2, useRef as useRef3, useState as useState4 } from "react";
|
|
2097
2213
|
function useModels(options = {}) {
|
|
2098
2214
|
const { getToken, baseUrl = BASE_URL, provider, autoFetch = true } = options;
|
|
2099
|
-
const [models, setModels] =
|
|
2100
|
-
const [isLoading, setIsLoading] =
|
|
2101
|
-
const [error, setError] =
|
|
2215
|
+
const [models, setModels] = useState4([]);
|
|
2216
|
+
const [isLoading, setIsLoading] = useState4(false);
|
|
2217
|
+
const [error, setError] = useState4(null);
|
|
2102
2218
|
const getTokenRef = useRef3(getToken);
|
|
2103
2219
|
const baseUrlRef = useRef3(baseUrl);
|
|
2104
2220
|
const providerRef = useRef3(provider);
|
|
@@ -2116,7 +2232,7 @@ function useModels(options = {}) {
|
|
|
2116
2232
|
}
|
|
2117
2233
|
};
|
|
2118
2234
|
}, []);
|
|
2119
|
-
const fetchModels =
|
|
2235
|
+
const fetchModels = useCallback5(async () => {
|
|
2120
2236
|
if (abortControllerRef.current) {
|
|
2121
2237
|
abortControllerRef.current.abort();
|
|
2122
2238
|
}
|
|
@@ -2174,7 +2290,7 @@ function useModels(options = {}) {
|
|
|
2174
2290
|
}
|
|
2175
2291
|
}
|
|
2176
2292
|
}, []);
|
|
2177
|
-
const refetch =
|
|
2293
|
+
const refetch = useCallback5(async () => {
|
|
2178
2294
|
setModels([]);
|
|
2179
2295
|
await fetchModels();
|
|
2180
2296
|
}, [fetchModels]);
|
|
@@ -2197,13 +2313,13 @@ function useModels(options = {}) {
|
|
|
2197
2313
|
}
|
|
2198
2314
|
|
|
2199
2315
|
// src/react/useSearch.ts
|
|
2200
|
-
import { useCallback as
|
|
2316
|
+
import { useCallback as useCallback6, useEffect as useEffect3, useRef as useRef4, useState as useState5 } from "react";
|
|
2201
2317
|
function useSearch(options = {}) {
|
|
2202
2318
|
const { getToken, baseUrl = BASE_URL, onError } = options;
|
|
2203
|
-
const [isLoading, setIsLoading] =
|
|
2204
|
-
const [results, setResults] =
|
|
2205
|
-
const [response, setResponse] =
|
|
2206
|
-
const [error, setError] =
|
|
2319
|
+
const [isLoading, setIsLoading] = useState5(false);
|
|
2320
|
+
const [results, setResults] = useState5(null);
|
|
2321
|
+
const [response, setResponse] = useState5(null);
|
|
2322
|
+
const [error, setError] = useState5(null);
|
|
2207
2323
|
const abortControllerRef = useRef4(null);
|
|
2208
2324
|
useEffect3(() => {
|
|
2209
2325
|
return () => {
|
|
@@ -2213,7 +2329,7 @@ function useSearch(options = {}) {
|
|
|
2213
2329
|
}
|
|
2214
2330
|
};
|
|
2215
2331
|
}, []);
|
|
2216
|
-
const search =
|
|
2332
|
+
const search = useCallback6(
|
|
2217
2333
|
async (query, searchOptions = {}) => {
|
|
2218
2334
|
if (abortControllerRef.current) {
|
|
2219
2335
|
abortControllerRef.current.abort();
|
|
@@ -2281,10 +2397,10 @@ function useSearch(options = {}) {
|
|
|
2281
2397
|
}
|
|
2282
2398
|
|
|
2283
2399
|
// src/react/useImageGeneration.ts
|
|
2284
|
-
import { useCallback as
|
|
2400
|
+
import { useCallback as useCallback7, useEffect as useEffect4, useRef as useRef5, useState as useState6 } from "react";
|
|
2285
2401
|
function useImageGeneration(options = {}) {
|
|
2286
2402
|
const { getToken, baseUrl = BASE_URL, onFinish, onError } = options;
|
|
2287
|
-
const [isLoading, setIsLoading] =
|
|
2403
|
+
const [isLoading, setIsLoading] = useState6(false);
|
|
2288
2404
|
const abortControllerRef = useRef5(null);
|
|
2289
2405
|
useEffect4(() => {
|
|
2290
2406
|
return () => {
|
|
@@ -2294,13 +2410,13 @@ function useImageGeneration(options = {}) {
|
|
|
2294
2410
|
}
|
|
2295
2411
|
};
|
|
2296
2412
|
}, []);
|
|
2297
|
-
const stop =
|
|
2413
|
+
const stop = useCallback7(() => {
|
|
2298
2414
|
if (abortControllerRef.current) {
|
|
2299
2415
|
abortControllerRef.current.abort();
|
|
2300
2416
|
abortControllerRef.current = null;
|
|
2301
2417
|
}
|
|
2302
2418
|
}, []);
|
|
2303
|
-
const generateImage =
|
|
2419
|
+
const generateImage = useCallback7(
|
|
2304
2420
|
async (args) => {
|
|
2305
2421
|
if (abortControllerRef.current) {
|
|
2306
2422
|
abortControllerRef.current.abort();
|
|
@@ -2433,6 +2549,7 @@ export {
|
|
|
2433
2549
|
useImageGeneration,
|
|
2434
2550
|
useMemory,
|
|
2435
2551
|
useModels,
|
|
2552
|
+
useOCR,
|
|
2436
2553
|
usePdf,
|
|
2437
2554
|
useSearch
|
|
2438
2555
|
};
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@reverbia/sdk",
|
|
3
|
-
"version": "1.0.0-next.
|
|
3
|
+
"version": "1.0.0-next.20251208111334",
|
|
4
4
|
"description": "",
|
|
5
5
|
"main": "./dist/index.cjs",
|
|
6
6
|
"module": "./dist/index.mjs",
|
|
@@ -72,9 +72,10 @@
|
|
|
72
72
|
"homepage": "https://github.com/zeta-chain/ai-sdk#readme",
|
|
73
73
|
"dependencies": {
|
|
74
74
|
"@huggingface/transformers": "^3.8.0",
|
|
75
|
+
"@reverbia/portal": "1.0.0-next.20251208093751",
|
|
76
|
+
"ai": "5.0.93",
|
|
75
77
|
"pdfjs-dist": "^4.10.38",
|
|
76
|
-
"
|
|
77
|
-
"ai": "5.0.93"
|
|
78
|
+
"tesseract.js": "^6.0.1"
|
|
78
79
|
},
|
|
79
80
|
"devDependencies": {
|
|
80
81
|
"@hey-api/openapi-ts": "0.87.2",
|