react-native-executorch 0.5.4 → 0.5.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/common/rnexecutorch/models/speech_to_text/SpeechToText.cpp +5 -5
- package/common/rnexecutorch/models/speech_to_text/SpeechToText.h +3 -7
- package/common/rnexecutorch/models/speech_to_text/asr/ASR.cpp +3 -0
- package/common/rnexecutorch/models/speech_to_text/asr/ASR.h +12 -11
- package/common/rnexecutorch/models/speech_to_text/stream/HypothesisBuffer.cpp +2 -0
- package/common/rnexecutorch/models/speech_to_text/stream/HypothesisBuffer.h +6 -8
- package/common/rnexecutorch/models/speech_to_text/stream/OnlineASRProcessor.cpp +3 -0
- package/common/rnexecutorch/models/speech_to_text/stream/OnlineASRProcessor.h +6 -9
- package/package.json +1 -1
- package/lib/Error.js +0 -53
- package/lib/ThreadPool.d.ts +0 -10
- package/lib/ThreadPool.js +0 -28
- package/lib/common/Logger.d.ts +0 -8
- package/lib/common/Logger.js +0 -19
- package/lib/constants/directories.js +0 -2
- package/lib/constants/llmDefaults.d.ts +0 -6
- package/lib/constants/llmDefaults.js +0 -16
- package/lib/constants/modelUrls.d.ts +0 -223
- package/lib/constants/modelUrls.js +0 -322
- package/lib/constants/ocr/models.d.ts +0 -882
- package/lib/constants/ocr/models.js +0 -182
- package/lib/constants/ocr/symbols.js +0 -139
- package/lib/constants/sttDefaults.d.ts +0 -28
- package/lib/constants/sttDefaults.js +0 -68
- package/lib/controllers/LLMController.d.ts +0 -47
- package/lib/controllers/LLMController.js +0 -213
- package/lib/controllers/OCRController.js +0 -67
- package/lib/controllers/SpeechToTextController.d.ts +0 -56
- package/lib/controllers/SpeechToTextController.js +0 -349
- package/lib/controllers/VerticalOCRController.js +0 -70
- package/lib/hooks/computer_vision/useClassification.d.ts +0 -15
- package/lib/hooks/computer_vision/useClassification.js +0 -7
- package/lib/hooks/computer_vision/useImageEmbeddings.d.ts +0 -15
- package/lib/hooks/computer_vision/useImageEmbeddings.js +0 -7
- package/lib/hooks/computer_vision/useImageSegmentation.d.ts +0 -38
- package/lib/hooks/computer_vision/useImageSegmentation.js +0 -7
- package/lib/hooks/computer_vision/useOCR.d.ts +0 -20
- package/lib/hooks/computer_vision/useOCR.js +0 -41
- package/lib/hooks/computer_vision/useObjectDetection.d.ts +0 -15
- package/lib/hooks/computer_vision/useObjectDetection.js +0 -7
- package/lib/hooks/computer_vision/useStyleTransfer.d.ts +0 -15
- package/lib/hooks/computer_vision/useStyleTransfer.js +0 -7
- package/lib/hooks/computer_vision/useVerticalOCR.d.ts +0 -21
- package/lib/hooks/computer_vision/useVerticalOCR.js +0 -43
- package/lib/hooks/general/useExecutorchModule.d.ts +0 -13
- package/lib/hooks/general/useExecutorchModule.js +0 -7
- package/lib/hooks/natural_language_processing/useLLM.d.ts +0 -10
- package/lib/hooks/natural_language_processing/useLLM.js +0 -78
- package/lib/hooks/natural_language_processing/useSpeechToText.d.ts +0 -27
- package/lib/hooks/natural_language_processing/useSpeechToText.js +0 -49
- package/lib/hooks/natural_language_processing/useTextEmbeddings.d.ts +0 -16
- package/lib/hooks/natural_language_processing/useTextEmbeddings.js +0 -7
- package/lib/hooks/natural_language_processing/useTokenizer.d.ts +0 -17
- package/lib/hooks/natural_language_processing/useTokenizer.js +0 -52
- package/lib/hooks/useModule.js +0 -45
- package/lib/hooks/useNonStaticModule.d.ts +0 -20
- package/lib/hooks/useNonStaticModule.js +0 -49
- package/lib/index.d.ts +0 -48
- package/lib/index.js +0 -58
- package/lib/modules/BaseModule.js +0 -25
- package/lib/modules/BaseNonStaticModule.js +0 -14
- package/lib/modules/computer_vision/ClassificationModule.d.ts +0 -8
- package/lib/modules/computer_vision/ClassificationModule.js +0 -17
- package/lib/modules/computer_vision/ImageEmbeddingsModule.d.ts +0 -8
- package/lib/modules/computer_vision/ImageEmbeddingsModule.js +0 -17
- package/lib/modules/computer_vision/ImageSegmentationModule.d.ts +0 -11
- package/lib/modules/computer_vision/ImageSegmentationModule.js +0 -27
- package/lib/modules/computer_vision/OCRModule.d.ts +0 -14
- package/lib/modules/computer_vision/OCRModule.js +0 -17
- package/lib/modules/computer_vision/ObjectDetectionModule.d.ts +0 -9
- package/lib/modules/computer_vision/ObjectDetectionModule.js +0 -17
- package/lib/modules/computer_vision/StyleTransferModule.d.ts +0 -8
- package/lib/modules/computer_vision/StyleTransferModule.js +0 -17
- package/lib/modules/computer_vision/VerticalOCRModule.d.ts +0 -14
- package/lib/modules/computer_vision/VerticalOCRModule.js +0 -19
- package/lib/modules/general/ExecutorchModule.d.ts +0 -7
- package/lib/modules/general/ExecutorchModule.js +0 -14
- package/lib/modules/natural_language_processing/LLMModule.d.ts +0 -28
- package/lib/modules/natural_language_processing/LLMModule.js +0 -45
- package/lib/modules/natural_language_processing/SpeechToTextModule.d.ts +0 -24
- package/lib/modules/natural_language_processing/SpeechToTextModule.js +0 -36
- package/lib/modules/natural_language_processing/TextEmbeddingsModule.d.ts +0 -9
- package/lib/modules/natural_language_processing/TextEmbeddingsModule.js +0 -21
- package/lib/modules/natural_language_processing/TokenizerModule.d.ts +0 -12
- package/lib/modules/natural_language_processing/TokenizerModule.js +0 -30
- package/lib/native/NativeETInstaller.js +0 -2
- package/lib/native/NativeOCR.js +0 -2
- package/lib/native/NativeVerticalOCR.js +0 -2
- package/lib/native/RnExecutorchModules.d.ts +0 -7
- package/lib/native/RnExecutorchModules.js +0 -18
- package/lib/tsconfig.tsbuildinfo +0 -1
- package/lib/types/common.d.ts +0 -32
- package/lib/types/common.js +0 -25
- package/lib/types/imageSegmentation.js +0 -26
- package/lib/types/llm.d.ts +0 -46
- package/lib/types/llm.js +0 -9
- package/lib/types/objectDetection.js +0 -94
- package/lib/types/ocr.js +0 -1
- package/lib/types/stt.d.ts +0 -94
- package/lib/types/stt.js +0 -85
- package/lib/utils/ResourceFetcher.d.ts +0 -24
- package/lib/utils/ResourceFetcher.js +0 -305
- package/lib/utils/ResourceFetcherUtils.d.ts +0 -54
- package/lib/utils/ResourceFetcherUtils.js +0 -127
- package/lib/utils/llm.d.ts +0 -6
- package/lib/utils/llm.js +0 -72
- package/lib/utils/stt.js +0 -21
|
@@ -1,70 +0,0 @@
|
|
|
1
|
-
import { symbols } from '../constants/ocr/symbols';
|
|
2
|
-
import { ETError, getError } from '../Error';
|
|
3
|
-
import { VerticalOCRNativeModule } from '../native/RnExecutorchModules';
|
|
4
|
-
import { ResourceFetcher } from '../utils/ResourceFetcher';
|
|
5
|
-
export class VerticalOCRController {
|
|
6
|
-
ocrNativeModule;
|
|
7
|
-
isReady = false;
|
|
8
|
-
isGenerating = false;
|
|
9
|
-
error = null;
|
|
10
|
-
modelDownloadProgressCallback;
|
|
11
|
-
isReadyCallback;
|
|
12
|
-
isGeneratingCallback;
|
|
13
|
-
errorCallback;
|
|
14
|
-
constructor({ modelDownloadProgressCallback = (_downloadProgress) => { }, isReadyCallback = (_isReady) => { }, isGeneratingCallback = (_isGenerating) => { }, errorCallback = (_error) => { }, }) {
|
|
15
|
-
this.ocrNativeModule = VerticalOCRNativeModule;
|
|
16
|
-
this.modelDownloadProgressCallback = modelDownloadProgressCallback;
|
|
17
|
-
this.isReadyCallback = isReadyCallback;
|
|
18
|
-
this.isGeneratingCallback = isGeneratingCallback;
|
|
19
|
-
this.errorCallback = errorCallback;
|
|
20
|
-
}
|
|
21
|
-
loadModel = async (detectorSources, recognizerSources, language, independentCharacters) => {
|
|
22
|
-
try {
|
|
23
|
-
if (Object.keys(detectorSources).length !== 2 ||
|
|
24
|
-
Object.keys(recognizerSources).length !== 2)
|
|
25
|
-
return;
|
|
26
|
-
if (!symbols[language]) {
|
|
27
|
-
throw new Error(getError(ETError.LanguageNotSupported));
|
|
28
|
-
}
|
|
29
|
-
this.isReady = false;
|
|
30
|
-
this.isReadyCallback(this.isReady);
|
|
31
|
-
const paths = await ResourceFetcher.fetch(this.modelDownloadProgressCallback, detectorSources.detectorLarge, detectorSources.detectorNarrow, independentCharacters
|
|
32
|
-
? recognizerSources.recognizerSmall
|
|
33
|
-
: recognizerSources.recognizerLarge);
|
|
34
|
-
if (paths === null || paths.length < 3) {
|
|
35
|
-
throw new Error('Download interrupted');
|
|
36
|
-
}
|
|
37
|
-
await this.ocrNativeModule.loadModule(paths[0], paths[1], paths[2], symbols[language], independentCharacters);
|
|
38
|
-
this.isReady = true;
|
|
39
|
-
this.isReadyCallback(this.isReady);
|
|
40
|
-
}
|
|
41
|
-
catch (e) {
|
|
42
|
-
if (this.errorCallback) {
|
|
43
|
-
this.errorCallback(getError(e));
|
|
44
|
-
}
|
|
45
|
-
else {
|
|
46
|
-
throw new Error(getError(e));
|
|
47
|
-
}
|
|
48
|
-
}
|
|
49
|
-
};
|
|
50
|
-
forward = async (input) => {
|
|
51
|
-
if (!this.isReady) {
|
|
52
|
-
throw new Error(getError(ETError.ModuleNotLoaded));
|
|
53
|
-
}
|
|
54
|
-
if (this.isGenerating) {
|
|
55
|
-
throw new Error(getError(ETError.ModelGenerating));
|
|
56
|
-
}
|
|
57
|
-
try {
|
|
58
|
-
this.isGenerating = true;
|
|
59
|
-
this.isGeneratingCallback(this.isGenerating);
|
|
60
|
-
return await this.ocrNativeModule.forward(input);
|
|
61
|
-
}
|
|
62
|
-
catch (e) {
|
|
63
|
-
throw new Error(getError(e));
|
|
64
|
-
}
|
|
65
|
-
finally {
|
|
66
|
-
this.isGenerating = false;
|
|
67
|
-
this.isGeneratingCallback(this.isGenerating);
|
|
68
|
-
}
|
|
69
|
-
};
|
|
70
|
-
}
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
interface Props {
|
|
3
|
-
model: {
|
|
4
|
-
modelSource: ResourceSource;
|
|
5
|
-
};
|
|
6
|
-
preventLoad?: boolean;
|
|
7
|
-
}
|
|
8
|
-
export declare const useClassification: ({ model, preventLoad }: Props) => {
|
|
9
|
-
error: string | null;
|
|
10
|
-
isReady: boolean;
|
|
11
|
-
isGenerating: boolean;
|
|
12
|
-
downloadProgress: number;
|
|
13
|
-
forward: (imageSource: string) => Promise<any>;
|
|
14
|
-
};
|
|
15
|
-
export {};
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { useNonStaticModule } from '../useNonStaticModule';
|
|
2
|
-
import { ClassificationModule } from '../../modules/computer_vision/ClassificationModule';
|
|
3
|
-
export const useClassification = ({ model, preventLoad = false }) => useNonStaticModule({
|
|
4
|
-
module: ClassificationModule,
|
|
5
|
-
model,
|
|
6
|
-
preventLoad: preventLoad,
|
|
7
|
-
});
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
interface Props {
|
|
3
|
-
model: {
|
|
4
|
-
modelSource: ResourceSource;
|
|
5
|
-
};
|
|
6
|
-
preventLoad?: boolean;
|
|
7
|
-
}
|
|
8
|
-
export declare const useImageEmbeddings: ({ model, preventLoad }: Props) => {
|
|
9
|
-
error: string | null;
|
|
10
|
-
isReady: boolean;
|
|
11
|
-
isGenerating: boolean;
|
|
12
|
-
downloadProgress: number;
|
|
13
|
-
forward: (imageSource: string) => Promise<Float32Array<ArrayBufferLike>>;
|
|
14
|
-
};
|
|
15
|
-
export {};
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { ImageEmbeddingsModule } from '../../modules/computer_vision/ImageEmbeddingsModule';
|
|
2
|
-
import { useNonStaticModule } from '../useNonStaticModule';
|
|
3
|
-
export const useImageEmbeddings = ({ model, preventLoad = false }) => useNonStaticModule({
|
|
4
|
-
module: ImageEmbeddingsModule,
|
|
5
|
-
model,
|
|
6
|
-
preventLoad,
|
|
7
|
-
});
|
|
@@ -1,38 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
interface Props {
|
|
3
|
-
model: {
|
|
4
|
-
modelSource: ResourceSource;
|
|
5
|
-
};
|
|
6
|
-
preventLoad?: boolean;
|
|
7
|
-
}
|
|
8
|
-
export declare const useImageSegmentation: ({ model, preventLoad }: Props) => {
|
|
9
|
-
error: string | null;
|
|
10
|
-
isReady: boolean;
|
|
11
|
-
isGenerating: boolean;
|
|
12
|
-
downloadProgress: number;
|
|
13
|
-
forward: (imageSource: string, classesOfInterest?: import("../..").DeeplabLabel[] | undefined, resize?: boolean | undefined) => Promise<{
|
|
14
|
-
0?: number[] | undefined;
|
|
15
|
-
1?: number[] | undefined;
|
|
16
|
-
2?: number[] | undefined;
|
|
17
|
-
3?: number[] | undefined;
|
|
18
|
-
4?: number[] | undefined;
|
|
19
|
-
5?: number[] | undefined;
|
|
20
|
-
6?: number[] | undefined;
|
|
21
|
-
7?: number[] | undefined;
|
|
22
|
-
8?: number[] | undefined;
|
|
23
|
-
9?: number[] | undefined;
|
|
24
|
-
10?: number[] | undefined;
|
|
25
|
-
11?: number[] | undefined;
|
|
26
|
-
12?: number[] | undefined;
|
|
27
|
-
13?: number[] | undefined;
|
|
28
|
-
14?: number[] | undefined;
|
|
29
|
-
15?: number[] | undefined;
|
|
30
|
-
16?: number[] | undefined;
|
|
31
|
-
17?: number[] | undefined;
|
|
32
|
-
18?: number[] | undefined;
|
|
33
|
-
19?: number[] | undefined;
|
|
34
|
-
20?: number[] | undefined;
|
|
35
|
-
21?: number[] | undefined;
|
|
36
|
-
}>;
|
|
37
|
-
};
|
|
38
|
-
export {};
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { useNonStaticModule } from '../useNonStaticModule';
|
|
2
|
-
import { ImageSegmentationModule } from '../../modules/computer_vision/ImageSegmentationModule';
|
|
3
|
-
export const useImageSegmentation = ({ model, preventLoad = false }) => useNonStaticModule({
|
|
4
|
-
module: ImageSegmentationModule,
|
|
5
|
-
model,
|
|
6
|
-
preventLoad,
|
|
7
|
-
});
|
|
@@ -1,20 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
import { OCRDetection, OCRLanguage } from '../../types/ocr';
|
|
3
|
-
interface OCRModule {
|
|
4
|
-
error: string | null;
|
|
5
|
-
isReady: boolean;
|
|
6
|
-
isGenerating: boolean;
|
|
7
|
-
forward: (input: string) => Promise<OCRDetection[]>;
|
|
8
|
-
downloadProgress: number;
|
|
9
|
-
}
|
|
10
|
-
export declare const useOCR: ({ model, preventLoad, }: {
|
|
11
|
-
model: {
|
|
12
|
-
detectorSource: ResourceSource;
|
|
13
|
-
recognizerLarge: ResourceSource;
|
|
14
|
-
recognizerMedium: ResourceSource;
|
|
15
|
-
recognizerSmall: ResourceSource;
|
|
16
|
-
language: OCRLanguage;
|
|
17
|
-
};
|
|
18
|
-
preventLoad?: boolean;
|
|
19
|
-
}) => OCRModule;
|
|
20
|
-
export {};
|
|
@@ -1,41 +0,0 @@
|
|
|
1
|
-
import { useEffect, useMemo, useState } from 'react';
|
|
2
|
-
import { OCRController } from '../../controllers/OCRController';
|
|
3
|
-
export const useOCR = ({ model, preventLoad = false, }) => {
|
|
4
|
-
const [error, setError] = useState(null);
|
|
5
|
-
const [isReady, setIsReady] = useState(false);
|
|
6
|
-
const [isGenerating, setIsGenerating] = useState(false);
|
|
7
|
-
const [downloadProgress, setDownloadProgress] = useState(0);
|
|
8
|
-
const controllerInstance = useMemo(() => new OCRController({
|
|
9
|
-
modelDownloadProgressCallback: setDownloadProgress,
|
|
10
|
-
isReadyCallback: setIsReady,
|
|
11
|
-
isGeneratingCallback: setIsGenerating,
|
|
12
|
-
errorCallback: setError,
|
|
13
|
-
}), []);
|
|
14
|
-
useEffect(() => {
|
|
15
|
-
const loadModel = async () => {
|
|
16
|
-
await controllerInstance.loadModel(model.detectorSource, {
|
|
17
|
-
recognizerLarge: model.recognizerLarge,
|
|
18
|
-
recognizerMedium: model.recognizerMedium,
|
|
19
|
-
recognizerSmall: model.recognizerSmall,
|
|
20
|
-
}, model.language);
|
|
21
|
-
};
|
|
22
|
-
if (!preventLoad) {
|
|
23
|
-
loadModel();
|
|
24
|
-
}
|
|
25
|
-
}, [
|
|
26
|
-
controllerInstance,
|
|
27
|
-
model.detectorSource,
|
|
28
|
-
model.recognizerLarge,
|
|
29
|
-
model.recognizerMedium,
|
|
30
|
-
model.recognizerSmall,
|
|
31
|
-
model.language,
|
|
32
|
-
preventLoad,
|
|
33
|
-
]);
|
|
34
|
-
return {
|
|
35
|
-
error,
|
|
36
|
-
isReady,
|
|
37
|
-
isGenerating,
|
|
38
|
-
forward: controllerInstance.forward,
|
|
39
|
-
downloadProgress,
|
|
40
|
-
};
|
|
41
|
-
};
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
interface Props {
|
|
3
|
-
model: {
|
|
4
|
-
modelSource: ResourceSource;
|
|
5
|
-
};
|
|
6
|
-
preventLoad?: boolean;
|
|
7
|
-
}
|
|
8
|
-
export declare const useObjectDetection: ({ model, preventLoad }: Props) => {
|
|
9
|
-
error: string | null;
|
|
10
|
-
isReady: boolean;
|
|
11
|
-
isGenerating: boolean;
|
|
12
|
-
downloadProgress: number;
|
|
13
|
-
forward: (imageSource: string, detectionThreshold?: number | undefined) => Promise<import("../..").Detection[]>;
|
|
14
|
-
};
|
|
15
|
-
export {};
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { useNonStaticModule } from '../useNonStaticModule';
|
|
2
|
-
import { ObjectDetectionModule } from '../../modules/computer_vision/ObjectDetectionModule';
|
|
3
|
-
export const useObjectDetection = ({ model, preventLoad = false }) => useNonStaticModule({
|
|
4
|
-
module: ObjectDetectionModule,
|
|
5
|
-
model,
|
|
6
|
-
preventLoad: preventLoad,
|
|
7
|
-
});
|
|
@@ -1,15 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
interface Props {
|
|
3
|
-
model: {
|
|
4
|
-
modelSource: ResourceSource;
|
|
5
|
-
};
|
|
6
|
-
preventLoad?: boolean;
|
|
7
|
-
}
|
|
8
|
-
export declare const useStyleTransfer: ({ model, preventLoad }: Props) => {
|
|
9
|
-
error: string | null;
|
|
10
|
-
isReady: boolean;
|
|
11
|
-
isGenerating: boolean;
|
|
12
|
-
downloadProgress: number;
|
|
13
|
-
forward: (imageSource: string) => Promise<string>;
|
|
14
|
-
};
|
|
15
|
-
export {};
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { useNonStaticModule } from '../useNonStaticModule';
|
|
2
|
-
import { StyleTransferModule } from '../../modules/computer_vision/StyleTransferModule';
|
|
3
|
-
export const useStyleTransfer = ({ model, preventLoad = false }) => useNonStaticModule({
|
|
4
|
-
module: StyleTransferModule,
|
|
5
|
-
model,
|
|
6
|
-
preventLoad: preventLoad,
|
|
7
|
-
});
|
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
import { OCRDetection, OCRLanguage } from '../../types/ocr';
|
|
3
|
-
interface OCRModule {
|
|
4
|
-
error: string | null;
|
|
5
|
-
isReady: boolean;
|
|
6
|
-
isGenerating: boolean;
|
|
7
|
-
forward: (input: string) => Promise<OCRDetection[]>;
|
|
8
|
-
downloadProgress: number;
|
|
9
|
-
}
|
|
10
|
-
export declare const useVerticalOCR: ({ model, independentCharacters, preventLoad, }: {
|
|
11
|
-
model: {
|
|
12
|
-
detectorLarge: ResourceSource;
|
|
13
|
-
detectorNarrow: ResourceSource;
|
|
14
|
-
recognizerLarge: ResourceSource;
|
|
15
|
-
recognizerSmall: ResourceSource;
|
|
16
|
-
language: OCRLanguage;
|
|
17
|
-
};
|
|
18
|
-
independentCharacters?: boolean;
|
|
19
|
-
preventLoad?: boolean;
|
|
20
|
-
}) => OCRModule;
|
|
21
|
-
export {};
|
|
@@ -1,43 +0,0 @@
|
|
|
1
|
-
import { useEffect, useMemo, useState } from 'react';
|
|
2
|
-
import { VerticalOCRController } from '../../controllers/VerticalOCRController';
|
|
3
|
-
export const useVerticalOCR = ({ model, independentCharacters = false, preventLoad = false, }) => {
|
|
4
|
-
const [error, setError] = useState(null);
|
|
5
|
-
const [isReady, setIsReady] = useState(false);
|
|
6
|
-
const [isGenerating, setIsGenerating] = useState(false);
|
|
7
|
-
const [downloadProgress, setDownloadProgress] = useState(0);
|
|
8
|
-
const controllerInstance = useMemo(() => new VerticalOCRController({
|
|
9
|
-
modelDownloadProgressCallback: setDownloadProgress,
|
|
10
|
-
isReadyCallback: setIsReady,
|
|
11
|
-
isGeneratingCallback: setIsGenerating,
|
|
12
|
-
errorCallback: setError,
|
|
13
|
-
}), []);
|
|
14
|
-
useEffect(() => {
|
|
15
|
-
if (preventLoad)
|
|
16
|
-
return;
|
|
17
|
-
(async () => {
|
|
18
|
-
await controllerInstance.loadModel({
|
|
19
|
-
detectorLarge: model.detectorLarge,
|
|
20
|
-
detectorNarrow: model.detectorNarrow,
|
|
21
|
-
}, {
|
|
22
|
-
recognizerLarge: model.recognizerLarge,
|
|
23
|
-
recognizerSmall: model.recognizerSmall,
|
|
24
|
-
}, model.language, independentCharacters);
|
|
25
|
-
})();
|
|
26
|
-
}, [
|
|
27
|
-
controllerInstance,
|
|
28
|
-
model.detectorLarge,
|
|
29
|
-
model.detectorNarrow,
|
|
30
|
-
model.recognizerLarge,
|
|
31
|
-
model.recognizerSmall,
|
|
32
|
-
model.language,
|
|
33
|
-
independentCharacters,
|
|
34
|
-
preventLoad,
|
|
35
|
-
]);
|
|
36
|
-
return {
|
|
37
|
-
error,
|
|
38
|
-
isReady,
|
|
39
|
-
isGenerating,
|
|
40
|
-
forward: controllerInstance.forward,
|
|
41
|
-
downloadProgress,
|
|
42
|
-
};
|
|
43
|
-
};
|
|
@@ -1,13 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
interface Props {
|
|
3
|
-
modelSource: ResourceSource;
|
|
4
|
-
preventLoad?: boolean;
|
|
5
|
-
}
|
|
6
|
-
export declare const useExecutorchModule: ({ modelSource, preventLoad, }: Props) => {
|
|
7
|
-
error: string | null;
|
|
8
|
-
isReady: boolean;
|
|
9
|
-
isGenerating: boolean;
|
|
10
|
-
downloadProgress: number;
|
|
11
|
-
forward: (inputTensor: import("../../types/common").TensorPtr[]) => Promise<import("../../types/common").TensorPtr[]>;
|
|
12
|
-
};
|
|
13
|
-
export {};
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { ExecutorchModule } from '../../modules/general/ExecutorchModule';
|
|
2
|
-
import { useNonStaticModule } from '../useNonStaticModule';
|
|
3
|
-
export const useExecutorchModule = ({ modelSource, preventLoad = false, }) => useNonStaticModule({
|
|
4
|
-
module: ExecutorchModule,
|
|
5
|
-
model: { modelSource },
|
|
6
|
-
preventLoad,
|
|
7
|
-
});
|
|
@@ -1,10 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
import { LLMType } from '../../types/llm';
|
|
3
|
-
export declare const useLLM: ({ model, preventLoad, }: {
|
|
4
|
-
model: {
|
|
5
|
-
modelSource: ResourceSource;
|
|
6
|
-
tokenizerSource: ResourceSource;
|
|
7
|
-
tokenizerConfigSource: ResourceSource;
|
|
8
|
-
};
|
|
9
|
-
preventLoad?: boolean;
|
|
10
|
-
}) => LLMType;
|
|
@@ -1,78 +0,0 @@
|
|
|
1
|
-
import { useCallback, useEffect, useMemo, useState } from 'react';
|
|
2
|
-
import { LLMController } from '../../controllers/LLMController';
|
|
3
|
-
/*
|
|
4
|
-
Hook version of LLMModule
|
|
5
|
-
*/
|
|
6
|
-
export const useLLM = ({ model, preventLoad = false, }) => {
|
|
7
|
-
const [token, setToken] = useState('');
|
|
8
|
-
const [response, setResponse] = useState('');
|
|
9
|
-
const [messageHistory, setMessageHistory] = useState([]);
|
|
10
|
-
const [isReady, setIsReady] = useState(false);
|
|
11
|
-
const [isGenerating, setIsGenerating] = useState(false);
|
|
12
|
-
const [downloadProgress, setDownloadProgress] = useState(0);
|
|
13
|
-
const [error, setError] = useState(null);
|
|
14
|
-
const tokenCallback = useCallback((newToken) => {
|
|
15
|
-
setToken(newToken);
|
|
16
|
-
setResponse((prevResponse) => prevResponse + newToken);
|
|
17
|
-
}, []);
|
|
18
|
-
const controllerInstance = useMemo(() => new LLMController({
|
|
19
|
-
tokenCallback: tokenCallback,
|
|
20
|
-
messageHistoryCallback: setMessageHistory,
|
|
21
|
-
isReadyCallback: setIsReady,
|
|
22
|
-
isGeneratingCallback: setIsGenerating,
|
|
23
|
-
}), [tokenCallback]);
|
|
24
|
-
useEffect(() => {
|
|
25
|
-
setDownloadProgress(0);
|
|
26
|
-
setError(null);
|
|
27
|
-
if (preventLoad)
|
|
28
|
-
return;
|
|
29
|
-
(async () => {
|
|
30
|
-
try {
|
|
31
|
-
await controllerInstance.load({
|
|
32
|
-
modelSource: model.modelSource,
|
|
33
|
-
tokenizerSource: model.tokenizerSource,
|
|
34
|
-
tokenizerConfigSource: model.tokenizerConfigSource,
|
|
35
|
-
onDownloadProgressCallback: setDownloadProgress,
|
|
36
|
-
});
|
|
37
|
-
}
|
|
38
|
-
catch (e) {
|
|
39
|
-
setError(e);
|
|
40
|
-
}
|
|
41
|
-
})();
|
|
42
|
-
return () => {
|
|
43
|
-
controllerInstance.delete();
|
|
44
|
-
};
|
|
45
|
-
}, [
|
|
46
|
-
controllerInstance,
|
|
47
|
-
model.modelSource,
|
|
48
|
-
model.tokenizerSource,
|
|
49
|
-
model.tokenizerConfigSource,
|
|
50
|
-
preventLoad,
|
|
51
|
-
]);
|
|
52
|
-
// memoization of returned functions
|
|
53
|
-
const configure = useCallback(({ chatConfig, toolsConfig, }) => controllerInstance.configure({ chatConfig, toolsConfig }), [controllerInstance]);
|
|
54
|
-
const generate = useCallback((messages, tools) => {
|
|
55
|
-
setResponse('');
|
|
56
|
-
return controllerInstance.generate(messages, tools);
|
|
57
|
-
}, [controllerInstance]);
|
|
58
|
-
const sendMessage = useCallback((message) => {
|
|
59
|
-
setResponse('');
|
|
60
|
-
return controllerInstance.sendMessage(message);
|
|
61
|
-
}, [controllerInstance]);
|
|
62
|
-
const deleteMessage = useCallback((index) => controllerInstance.deleteMessage(index), [controllerInstance]);
|
|
63
|
-
const interrupt = useCallback(() => controllerInstance.interrupt(), [controllerInstance]);
|
|
64
|
-
return {
|
|
65
|
-
messageHistory,
|
|
66
|
-
response,
|
|
67
|
-
token,
|
|
68
|
-
isReady,
|
|
69
|
-
isGenerating,
|
|
70
|
-
downloadProgress,
|
|
71
|
-
error,
|
|
72
|
-
configure: configure,
|
|
73
|
-
generate: generate,
|
|
74
|
-
sendMessage: sendMessage,
|
|
75
|
-
deleteMessage: deleteMessage,
|
|
76
|
-
interrupt: interrupt,
|
|
77
|
-
};
|
|
78
|
-
};
|
|
@@ -1,27 +0,0 @@
|
|
|
1
|
-
import { SpeechToTextController } from '../../controllers/SpeechToTextController';
|
|
2
|
-
import { ResourceSource } from '../../types/common';
|
|
3
|
-
import { STREAMING_ACTION } from '../../constants/sttDefaults';
|
|
4
|
-
import { AvailableModels, SpeechToTextLanguage } from '../../types/stt';
|
|
5
|
-
interface SpeechToTextModule {
|
|
6
|
-
isReady: boolean;
|
|
7
|
-
isGenerating: boolean;
|
|
8
|
-
sequence: string;
|
|
9
|
-
downloadProgress: number;
|
|
10
|
-
configureStreaming: SpeechToTextController['configureStreaming'];
|
|
11
|
-
error: Error | undefined;
|
|
12
|
-
transcribe: (input: number[], audioLanguage?: SpeechToTextLanguage) => ReturnType<SpeechToTextController['transcribe']>;
|
|
13
|
-
streamingTranscribe: (streamAction: STREAMING_ACTION, input?: number[], audioLanguage?: SpeechToTextLanguage) => ReturnType<SpeechToTextController['streamingTranscribe']>;
|
|
14
|
-
}
|
|
15
|
-
export declare const useSpeechToText: ({ model, overlapSeconds, windowSize, streamingConfig, preventLoad, }: {
|
|
16
|
-
model: {
|
|
17
|
-
modelName: AvailableModels;
|
|
18
|
-
encoderSource: ResourceSource;
|
|
19
|
-
decoderSource: ResourceSource;
|
|
20
|
-
tokenizerSource: ResourceSource;
|
|
21
|
-
};
|
|
22
|
-
overlapSeconds?: ConstructorParameters<typeof SpeechToTextController>["0"]["overlapSeconds"];
|
|
23
|
-
windowSize?: ConstructorParameters<typeof SpeechToTextController>["0"]["windowSize"];
|
|
24
|
-
streamingConfig?: ConstructorParameters<typeof SpeechToTextController>["0"]["streamingConfig"];
|
|
25
|
-
preventLoad?: boolean;
|
|
26
|
-
}) => SpeechToTextModule;
|
|
27
|
-
export {};
|
|
@@ -1,49 +0,0 @@
|
|
|
1
|
-
import { useEffect, useMemo, useState } from 'react';
|
|
2
|
-
import { SpeechToTextController } from '../../controllers/SpeechToTextController';
|
|
3
|
-
export const useSpeechToText = ({ model, overlapSeconds, windowSize, streamingConfig, preventLoad = false, }) => {
|
|
4
|
-
const [sequence, setSequence] = useState('');
|
|
5
|
-
const [isReady, setIsReady] = useState(false);
|
|
6
|
-
const [downloadProgress, setDownloadProgress] = useState(0);
|
|
7
|
-
const [isGenerating, setIsGenerating] = useState(false);
|
|
8
|
-
const [error, setError] = useState();
|
|
9
|
-
const controllerInstance = useMemo(() => new SpeechToTextController({
|
|
10
|
-
transcribeCallback: setSequence,
|
|
11
|
-
isReadyCallback: setIsReady,
|
|
12
|
-
isGeneratingCallback: setIsGenerating,
|
|
13
|
-
onErrorCallback: setError,
|
|
14
|
-
}), []);
|
|
15
|
-
useEffect(() => {
|
|
16
|
-
controllerInstance.configureStreaming(overlapSeconds, windowSize, streamingConfig);
|
|
17
|
-
}, [controllerInstance, overlapSeconds, windowSize, streamingConfig]);
|
|
18
|
-
useEffect(() => {
|
|
19
|
-
const loadModel = async () => {
|
|
20
|
-
await controllerInstance.load({
|
|
21
|
-
modelName: model.modelName,
|
|
22
|
-
encoderSource: model.encoderSource,
|
|
23
|
-
decoderSource: model.decoderSource,
|
|
24
|
-
tokenizerSource: model.tokenizerSource,
|
|
25
|
-
onDownloadProgressCallback: setDownloadProgress,
|
|
26
|
-
});
|
|
27
|
-
};
|
|
28
|
-
if (!preventLoad) {
|
|
29
|
-
loadModel();
|
|
30
|
-
}
|
|
31
|
-
}, [
|
|
32
|
-
controllerInstance,
|
|
33
|
-
model.modelName,
|
|
34
|
-
model.encoderSource,
|
|
35
|
-
model.decoderSource,
|
|
36
|
-
model.tokenizerSource,
|
|
37
|
-
preventLoad,
|
|
38
|
-
]);
|
|
39
|
-
return {
|
|
40
|
-
isReady,
|
|
41
|
-
isGenerating,
|
|
42
|
-
downloadProgress,
|
|
43
|
-
configureStreaming: controllerInstance.configureStreaming,
|
|
44
|
-
sequence,
|
|
45
|
-
error,
|
|
46
|
-
transcribe: (waveform, audioLanguage) => controllerInstance.transcribe(waveform, audioLanguage),
|
|
47
|
-
streamingTranscribe: (streamAction, waveform, audioLanguage) => controllerInstance.streamingTranscribe(streamAction, waveform, audioLanguage),
|
|
48
|
-
};
|
|
49
|
-
};
|
|
@@ -1,16 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
interface Props {
|
|
3
|
-
model: {
|
|
4
|
-
modelSource: ResourceSource;
|
|
5
|
-
tokenizerSource: ResourceSource;
|
|
6
|
-
};
|
|
7
|
-
preventLoad?: boolean;
|
|
8
|
-
}
|
|
9
|
-
export declare const useTextEmbeddings: ({ model, preventLoad }: Props) => {
|
|
10
|
-
error: string | null;
|
|
11
|
-
isReady: boolean;
|
|
12
|
-
isGenerating: boolean;
|
|
13
|
-
downloadProgress: number;
|
|
14
|
-
forward: (input: string) => Promise<Float32Array<ArrayBufferLike>>;
|
|
15
|
-
};
|
|
16
|
-
export {};
|
|
@@ -1,7 +0,0 @@
|
|
|
1
|
-
import { TextEmbeddingsModule } from '../../modules/natural_language_processing/TextEmbeddingsModule';
|
|
2
|
-
import { useNonStaticModule } from '../useNonStaticModule';
|
|
3
|
-
export const useTextEmbeddings = ({ model, preventLoad = false }) => useNonStaticModule({
|
|
4
|
-
module: TextEmbeddingsModule,
|
|
5
|
-
model,
|
|
6
|
-
preventLoad,
|
|
7
|
-
});
|
|
@@ -1,17 +0,0 @@
|
|
|
1
|
-
import { ResourceSource } from '../../types/common';
|
|
2
|
-
export declare const useTokenizer: ({ tokenizer, preventLoad, }: {
|
|
3
|
-
tokenizer: {
|
|
4
|
-
tokenizerSource: ResourceSource;
|
|
5
|
-
};
|
|
6
|
-
preventLoad?: boolean;
|
|
7
|
-
}) => {
|
|
8
|
-
error: string | null;
|
|
9
|
-
isReady: boolean;
|
|
10
|
-
isGenerating: boolean;
|
|
11
|
-
downloadProgress: number;
|
|
12
|
-
decode: (tokens: number[], skipSpecialTokens?: boolean | undefined) => Promise<Promise<any>>;
|
|
13
|
-
encode: (s: string) => Promise<Promise<any>>;
|
|
14
|
-
getVocabSize: () => Promise<Promise<number>>;
|
|
15
|
-
idToToken: (tokenId: number) => Promise<Promise<string>>;
|
|
16
|
-
tokenToId: (token: string) => Promise<Promise<number>>;
|
|
17
|
-
};
|
|
@@ -1,52 +0,0 @@
|
|
|
1
|
-
import { useEffect, useMemo, useState } from 'react';
|
|
2
|
-
import { TokenizerModule } from '../../modules/natural_language_processing/TokenizerModule';
|
|
3
|
-
import { ETError, getError } from '../../Error';
|
|
4
|
-
export const useTokenizer = ({ tokenizer, preventLoad = false, }) => {
|
|
5
|
-
const [error, setError] = useState(null);
|
|
6
|
-
const [isReady, setIsReady] = useState(false);
|
|
7
|
-
const [isGenerating, setIsGenerating] = useState(false);
|
|
8
|
-
const [downloadProgress, setDownloadProgress] = useState(0);
|
|
9
|
-
const _tokenizer = useMemo(() => new TokenizerModule(), []);
|
|
10
|
-
useEffect(() => {
|
|
11
|
-
if (preventLoad)
|
|
12
|
-
return;
|
|
13
|
-
(async () => {
|
|
14
|
-
setDownloadProgress(0);
|
|
15
|
-
setError(null);
|
|
16
|
-
try {
|
|
17
|
-
setIsReady(false);
|
|
18
|
-
await _tokenizer.load({ tokenizerSource: tokenizer.tokenizerSource }, setDownloadProgress);
|
|
19
|
-
setIsReady(true);
|
|
20
|
-
}
|
|
21
|
-
catch (err) {
|
|
22
|
-
setError(err.message);
|
|
23
|
-
}
|
|
24
|
-
})();
|
|
25
|
-
}, [_tokenizer, tokenizer.tokenizerSource, preventLoad]);
|
|
26
|
-
const stateWrapper = (fn) => {
|
|
27
|
-
return (...args) => {
|
|
28
|
-
if (!isReady)
|
|
29
|
-
throw new Error(getError(ETError.ModuleNotLoaded));
|
|
30
|
-
if (isGenerating)
|
|
31
|
-
throw new Error(getError(ETError.ModelGenerating));
|
|
32
|
-
try {
|
|
33
|
-
setIsGenerating(true);
|
|
34
|
-
return fn.apply(_tokenizer, args);
|
|
35
|
-
}
|
|
36
|
-
finally {
|
|
37
|
-
setIsGenerating(false);
|
|
38
|
-
}
|
|
39
|
-
};
|
|
40
|
-
};
|
|
41
|
-
return {
|
|
42
|
-
error,
|
|
43
|
-
isReady,
|
|
44
|
-
isGenerating,
|
|
45
|
-
downloadProgress,
|
|
46
|
-
decode: stateWrapper(TokenizerModule.prototype.decode),
|
|
47
|
-
encode: stateWrapper(TokenizerModule.prototype.encode),
|
|
48
|
-
getVocabSize: stateWrapper(TokenizerModule.prototype.getVocabSize),
|
|
49
|
-
idToToken: stateWrapper(TokenizerModule.prototype.idToToken),
|
|
50
|
-
tokenToId: stateWrapper(TokenizerModule.prototype.tokenToId),
|
|
51
|
-
};
|
|
52
|
-
};
|