modelfusion 0.74.1 → 0.76.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +55 -33
- package/guard/fixStructure.cjs +1 -1
- package/guard/fixStructure.d.ts +1 -1
- package/guard/fixStructure.js +1 -1
- package/model-function/embed/EmbeddingModel.d.ts +1 -1
- package/model-function/embed/embed.cjs +1 -1
- package/model-function/embed/embed.d.ts +2 -2
- package/model-function/embed/embed.js +1 -1
- package/model-function/generate-image/generateImage.d.ts +1 -1
- package/model-function/generate-speech/generateSpeech.d.ts +1 -1
- package/model-function/generate-speech/streamSpeech.d.ts +1 -1
- package/model-function/generate-structure/generateStructure.d.ts +1 -1
- package/model-function/generate-structure/streamStructure.d.ts +1 -1
- package/model-function/generate-text/generateText.d.ts +1 -1
- package/model-function/generate-text/streamText.d.ts +1 -1
- package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
- package/model-provider/anthropic/AnthropicFacade.cjs +15 -0
- package/model-provider/anthropic/AnthropicFacade.d.ts +9 -0
- package/model-provider/anthropic/AnthropicFacade.js +11 -0
- package/model-provider/anthropic/AnthropicPromptFormat.cjs +2 -5
- package/model-provider/anthropic/AnthropicPromptFormat.js +2 -5
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +4 -1
- package/model-provider/anthropic/AnthropicTextGenerationModel.d.ts +4 -1
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +4 -1
- package/model-provider/anthropic/index.cjs +2 -1
- package/model-provider/anthropic/index.d.ts +1 -0
- package/model-provider/anthropic/index.js +1 -0
- package/model-provider/automatic1111/Automatic1111Facade.cjs +15 -0
- package/model-provider/automatic1111/Automatic1111Facade.d.ts +9 -0
- package/model-provider/automatic1111/Automatic1111Facade.js +11 -0
- package/model-provider/automatic1111/index.cjs +14 -1
- package/model-provider/automatic1111/index.d.ts +1 -0
- package/model-provider/automatic1111/index.js +1 -0
- package/model-provider/cohere/CohereFacade.cjs +71 -0
- package/model-provider/cohere/CohereFacade.d.ts +59 -0
- package/model-provider/cohere/CohereFacade.js +65 -0
- package/model-provider/cohere/CohereTextEmbeddingModel.cjs +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.js +1 -1
- package/model-provider/cohere/index.cjs +14 -1
- package/model-provider/cohere/index.d.ts +1 -0
- package/model-provider/cohere/index.js +1 -0
- package/model-provider/elevenlabs/ElevenLabsFacade.cjs +18 -0
- package/model-provider/elevenlabs/ElevenLabsFacade.d.ts +12 -0
- package/model-provider/elevenlabs/ElevenLabsFacade.js +14 -0
- package/model-provider/elevenlabs/index.cjs +14 -0
- package/model-provider/elevenlabs/index.d.ts +1 -0
- package/model-provider/elevenlabs/index.js +1 -0
- package/model-provider/huggingface/HuggingFaceFacade.cjs +55 -0
- package/model-provider/huggingface/HuggingFaceFacade.d.ts +46 -0
- package/model-provider/huggingface/HuggingFaceFacade.js +50 -0
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
- package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +1 -1
- package/model-provider/huggingface/index.cjs +14 -2
- package/model-provider/huggingface/index.d.ts +1 -1
- package/model-provider/huggingface/index.js +1 -1
- package/model-provider/llamacpp/LlamaCppFacade.cjs +19 -0
- package/model-provider/llamacpp/LlamaCppFacade.d.ts +7 -0
- package/model-provider/llamacpp/LlamaCppFacade.js +13 -0
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +2 -2
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +2 -2
- package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +2 -2
- package/model-provider/llamacpp/index.cjs +2 -1
- package/model-provider/llamacpp/index.d.ts +1 -0
- package/model-provider/llamacpp/index.js +1 -0
- package/model-provider/lmnt/LmntFacade.cjs +15 -0
- package/model-provider/lmnt/LmntFacade.d.ts +9 -0
- package/model-provider/lmnt/LmntFacade.js +11 -0
- package/model-provider/lmnt/index.cjs +14 -0
- package/model-provider/lmnt/index.d.ts +1 -0
- package/model-provider/lmnt/index.js +1 -0
- package/model-provider/ollama/OllamaFacade.cjs +13 -0
- package/model-provider/ollama/OllamaFacade.d.ts +4 -0
- package/model-provider/ollama/OllamaFacade.js +8 -0
- package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +2 -2
- package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +2 -2
- package/model-provider/ollama/OllamaTextEmbeddingModel.js +2 -2
- package/model-provider/ollama/index.cjs +14 -1
- package/model-provider/ollama/index.d.ts +1 -0
- package/model-provider/ollama/index.js +1 -0
- package/model-provider/openai/OpenAIFacade.cjs +148 -0
- package/model-provider/openai/OpenAIFacade.d.ts +124 -0
- package/model-provider/openai/OpenAIFacade.js +138 -0
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
- package/model-provider/openai/OpenAITextEmbeddingModel.js +1 -1
- package/model-provider/openai/TikTokenTokenizer.cjs +2 -2
- package/model-provider/openai/TikTokenTokenizer.d.ts +4 -3
- package/model-provider/openai/TikTokenTokenizer.js +2 -2
- package/model-provider/openai/index.cjs +2 -1
- package/model-provider/openai/index.d.ts +1 -0
- package/model-provider/openai/index.js +1 -0
- package/model-provider/stability/StabilityFacade.cjs +32 -0
- package/model-provider/stability/StabilityFacade.d.ts +26 -0
- package/model-provider/stability/StabilityFacade.js +28 -0
- package/model-provider/stability/index.cjs +14 -1
- package/model-provider/stability/index.d.ts +1 -0
- package/model-provider/stability/index.js +1 -0
- package/package.json +1 -1
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +0 -94
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +0 -44
- package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +0 -90
@@ -57,7 +57,7 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
|
57
57
|
writable: true,
|
58
58
|
value: 2048
|
59
59
|
});
|
60
|
-
Object.defineProperty(this, "
|
60
|
+
Object.defineProperty(this, "isParallelizable", {
|
61
61
|
enumerable: true,
|
62
62
|
configurable: true,
|
63
63
|
writable: true,
|
@@ -41,7 +41,7 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
|
|
41
41
|
readonly provider: "openai";
|
42
42
|
get modelName(): "text-embedding-ada-002";
|
43
43
|
readonly maxValuesPerCall = 2048;
|
44
|
-
readonly
|
44
|
+
readonly isParallelizable = true;
|
45
45
|
readonly embeddingDimensions: number;
|
46
46
|
readonly tokenizer: TikTokenTokenizer;
|
47
47
|
readonly contextWindowSize: number;
|
@@ -52,7 +52,7 @@ export class OpenAITextEmbeddingModel extends AbstractModel {
|
|
52
52
|
writable: true,
|
53
53
|
value: 2048
|
54
54
|
});
|
55
|
-
Object.defineProperty(this, "
|
55
|
+
Object.defineProperty(this, "isParallelizable", {
|
56
56
|
enumerable: true,
|
57
57
|
configurable: true,
|
58
58
|
writable: true,
|
@@ -28,14 +28,14 @@ class TikTokenTokenizer {
|
|
28
28
|
/**
|
29
29
|
* Get a TikToken tokenizer for a specific model or encoding.
|
30
30
|
*/
|
31
|
-
constructor(
|
31
|
+
constructor(settings) {
|
32
32
|
Object.defineProperty(this, "tiktoken", {
|
33
33
|
enumerable: true,
|
34
34
|
configurable: true,
|
35
35
|
writable: true,
|
36
36
|
value: void 0
|
37
37
|
});
|
38
|
-
this.tiktoken = new lite_1.Tiktoken(getTiktokenBPE(
|
38
|
+
this.tiktoken = new lite_1.Tiktoken(getTiktokenBPE(settings.model));
|
39
39
|
}
|
40
40
|
async tokenize(text) {
|
41
41
|
return this.tiktoken.encode(text);
|
@@ -2,6 +2,9 @@ import { FullTokenizer } from "../../model-function/tokenize-text/Tokenizer.js";
|
|
2
2
|
import { OpenAITextEmbeddingModelType } from "./OpenAITextEmbeddingModel.js";
|
3
3
|
import { OpenAICompletionBaseModelType } from "./OpenAICompletionModel.js";
|
4
4
|
import { OpenAIChatBaseModelType } from "./chat/OpenAIChatModel.js";
|
5
|
+
export type TikTokenTokenizerSettings = {
|
6
|
+
model: OpenAIChatBaseModelType | OpenAICompletionBaseModelType | OpenAITextEmbeddingModelType;
|
7
|
+
};
|
5
8
|
/**
|
6
9
|
* TikToken tokenizer for OpenAI language models.
|
7
10
|
*
|
@@ -21,9 +24,7 @@ export declare class TikTokenTokenizer implements FullTokenizer {
|
|
21
24
|
/**
|
22
25
|
* Get a TikToken tokenizer for a specific model or encoding.
|
23
26
|
*/
|
24
|
-
constructor(
|
25
|
-
model: OpenAIChatBaseModelType | OpenAICompletionBaseModelType | OpenAITextEmbeddingModelType;
|
26
|
-
});
|
27
|
+
constructor(settings: TikTokenTokenizerSettings);
|
27
28
|
private readonly tiktoken;
|
28
29
|
tokenize(text: string): Promise<number[]>;
|
29
30
|
tokenizeWithTexts(text: string): Promise<{
|
@@ -22,14 +22,14 @@ export class TikTokenTokenizer {
|
|
22
22
|
/**
|
23
23
|
* Get a TikToken tokenizer for a specific model or encoding.
|
24
24
|
*/
|
25
|
-
constructor(
|
25
|
+
constructor(settings) {
|
26
26
|
Object.defineProperty(this, "tiktoken", {
|
27
27
|
enumerable: true,
|
28
28
|
configurable: true,
|
29
29
|
writable: true,
|
30
30
|
value: void 0
|
31
31
|
});
|
32
|
-
this.tiktoken = new Tiktoken(getTiktokenBPE(
|
32
|
+
this.tiktoken = new Tiktoken(getTiktokenBPE(settings.model));
|
33
33
|
}
|
34
34
|
async tokenize(text) {
|
35
35
|
return this.tiktoken.encode(text);
|
@@ -26,13 +26,14 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
26
26
|
return result;
|
27
27
|
};
|
28
28
|
Object.defineProperty(exports, "__esModule", { value: true });
|
29
|
-
exports.OpenAIChatPromptFormat = exports.OpenAIError = void 0;
|
29
|
+
exports.OpenAIChatPromptFormat = exports.openai = exports.OpenAIError = void 0;
|
30
30
|
__exportStar(require("./AzureOpenAIApiConfiguration.cjs"), exports);
|
31
31
|
__exportStar(require("./OpenAIApiConfiguration.cjs"), exports);
|
32
32
|
__exportStar(require("./OpenAICompletionModel.cjs"), exports);
|
33
33
|
__exportStar(require("./OpenAICostCalculator.cjs"), exports);
|
34
34
|
var OpenAIError_js_1 = require("./OpenAIError.cjs");
|
35
35
|
Object.defineProperty(exports, "OpenAIError", { enumerable: true, get: function () { return OpenAIError_js_1.OpenAIError; } });
|
36
|
+
exports.openai = __importStar(require("./OpenAIFacade.cjs"));
|
36
37
|
__exportStar(require("./OpenAIImageGenerationModel.cjs"), exports);
|
37
38
|
__exportStar(require("./OpenAISpeechModel.cjs"), exports);
|
38
39
|
__exportStar(require("./OpenAITextEmbeddingModel.cjs"), exports);
|
@@ -3,6 +3,7 @@ export * from "./OpenAIApiConfiguration.js";
|
|
3
3
|
export * from "./OpenAICompletionModel.js";
|
4
4
|
export * from "./OpenAICostCalculator.js";
|
5
5
|
export { OpenAIError, OpenAIErrorData } from "./OpenAIError.js";
|
6
|
+
export * as openai from "./OpenAIFacade.js";
|
6
7
|
export * from "./OpenAIImageGenerationModel.js";
|
7
8
|
export * from "./OpenAISpeechModel.js";
|
8
9
|
export * from "./OpenAITextEmbeddingModel.js";
|
@@ -3,6 +3,7 @@ export * from "./OpenAIApiConfiguration.js";
|
|
3
3
|
export * from "./OpenAICompletionModel.js";
|
4
4
|
export * from "./OpenAICostCalculator.js";
|
5
5
|
export { OpenAIError } from "./OpenAIError.js";
|
6
|
+
export * as openai from "./OpenAIFacade.js";
|
6
7
|
export * from "./OpenAIImageGenerationModel.js";
|
7
8
|
export * from "./OpenAISpeechModel.js";
|
8
9
|
export * from "./OpenAITextEmbeddingModel.js";
|
@@ -0,0 +1,32 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.ImageGenerator = void 0;
|
4
|
+
const StabilityImageGenerationModel_js_1 = require("./StabilityImageGenerationModel.cjs");
|
5
|
+
/**
|
6
|
+
* Create an image generation model that calls the Stability AI image generation API.
|
7
|
+
*
|
8
|
+
* @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
|
9
|
+
*
|
10
|
+
* @example
|
11
|
+
* const image = await generateImage(
|
12
|
+
* stability.ImageGenerator({
|
13
|
+
* model: "stable-diffusion-512-v2-1",
|
14
|
+
* cfgScale: 7,
|
15
|
+
* clipGuidancePreset: "FAST_BLUE",
|
16
|
+
* height: 512,
|
17
|
+
* width: 512,
|
18
|
+
* samples: 1,
|
19
|
+
* steps: 30,
|
20
|
+
* })
|
21
|
+
* [
|
22
|
+
* { text: "the wicked witch of the west" },
|
23
|
+
* { text: "style of early 19th century painting", weight: 0.5 },
|
24
|
+
* ]
|
25
|
+
* );
|
26
|
+
*
|
27
|
+
* @returns A new instance of {@link StabilityImageGenerationModel}.
|
28
|
+
*/
|
29
|
+
function ImageGenerator(settings) {
|
30
|
+
return new StabilityImageGenerationModel_js_1.StabilityImageGenerationModel(settings);
|
31
|
+
}
|
32
|
+
exports.ImageGenerator = ImageGenerator;
|
@@ -0,0 +1,26 @@
|
|
1
|
+
import { StabilityImageGenerationModel, StabilityImageGenerationSettings } from "./StabilityImageGenerationModel.js";
|
2
|
+
/**
|
3
|
+
* Create an image generation model that calls the Stability AI image generation API.
|
4
|
+
*
|
5
|
+
* @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
|
6
|
+
*
|
7
|
+
* @example
|
8
|
+
* const image = await generateImage(
|
9
|
+
* stability.ImageGenerator({
|
10
|
+
* model: "stable-diffusion-512-v2-1",
|
11
|
+
* cfgScale: 7,
|
12
|
+
* clipGuidancePreset: "FAST_BLUE",
|
13
|
+
* height: 512,
|
14
|
+
* width: 512,
|
15
|
+
* samples: 1,
|
16
|
+
* steps: 30,
|
17
|
+
* })
|
18
|
+
* [
|
19
|
+
* { text: "the wicked witch of the west" },
|
20
|
+
* { text: "style of early 19th century painting", weight: 0.5 },
|
21
|
+
* ]
|
22
|
+
* );
|
23
|
+
*
|
24
|
+
* @returns A new instance of {@link StabilityImageGenerationModel}.
|
25
|
+
*/
|
26
|
+
export declare function ImageGenerator(settings: StabilityImageGenerationSettings): StabilityImageGenerationModel;
|
@@ -0,0 +1,28 @@
|
|
1
|
+
import { StabilityImageGenerationModel, } from "./StabilityImageGenerationModel.js";
|
2
|
+
/**
|
3
|
+
* Create an image generation model that calls the Stability AI image generation API.
|
4
|
+
*
|
5
|
+
* @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
|
6
|
+
*
|
7
|
+
* @example
|
8
|
+
* const image = await generateImage(
|
9
|
+
* stability.ImageGenerator({
|
10
|
+
* model: "stable-diffusion-512-v2-1",
|
11
|
+
* cfgScale: 7,
|
12
|
+
* clipGuidancePreset: "FAST_BLUE",
|
13
|
+
* height: 512,
|
14
|
+
* width: 512,
|
15
|
+
* samples: 1,
|
16
|
+
* steps: 30,
|
17
|
+
* })
|
18
|
+
* [
|
19
|
+
* { text: "the wicked witch of the west" },
|
20
|
+
* { text: "style of early 19th century painting", weight: 0.5 },
|
21
|
+
* ]
|
22
|
+
* );
|
23
|
+
*
|
24
|
+
* @returns A new instance of {@link StabilityImageGenerationModel}.
|
25
|
+
*/
|
26
|
+
export function ImageGenerator(settings) {
|
27
|
+
return new StabilityImageGenerationModel(settings);
|
28
|
+
}
|
@@ -10,13 +10,26 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
|
|
10
10
|
if (k2 === undefined) k2 = k;
|
11
11
|
o[k2] = m[k];
|
12
12
|
}));
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
15
|
+
}) : function(o, v) {
|
16
|
+
o["default"] = v;
|
17
|
+
});
|
13
18
|
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
14
19
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
20
|
};
|
21
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
22
|
+
if (mod && mod.__esModule) return mod;
|
23
|
+
var result = {};
|
24
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
25
|
+
__setModuleDefault(result, mod);
|
26
|
+
return result;
|
27
|
+
};
|
16
28
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
-
exports.StabilityError = void 0;
|
29
|
+
exports.stability = exports.StabilityError = void 0;
|
18
30
|
__exportStar(require("./StabilityApiConfiguration.cjs"), exports);
|
19
31
|
var StabilityError_js_1 = require("./StabilityError.cjs");
|
20
32
|
Object.defineProperty(exports, "StabilityError", { enumerable: true, get: function () { return StabilityError_js_1.StabilityError; } });
|
33
|
+
exports.stability = __importStar(require("./StabilityFacade.cjs"));
|
21
34
|
__exportStar(require("./StabilityImageGenerationModel.cjs"), exports);
|
22
35
|
__exportStar(require("./StabilityImageGenerationPrompt.cjs"), exports);
|
@@ -1,4 +1,5 @@
|
|
1
1
|
export * from "./StabilityApiConfiguration.js";
|
2
2
|
export { StabilityError, StabilityErrorData } from "./StabilityError.js";
|
3
|
+
export * as stability from "./StabilityFacade.js";
|
3
4
|
export * from "./StabilityImageGenerationModel.js";
|
4
5
|
export * from "./StabilityImageGenerationPrompt.js";
|
package/package.json
CHANGED
@@ -1,94 +0,0 @@
|
|
1
|
-
"use strict";
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.HuggingFaceImageDescriptionModel = void 0;
|
4
|
-
const zod_1 = require("zod");
|
5
|
-
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
|
-
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
-
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
8
|
-
const PromptFormatTextGenerationModel_js_1 = require("../../model-function/generate-text/PromptFormatTextGenerationModel.cjs");
|
9
|
-
const HuggingFaceApiConfiguration_js_1 = require("./HuggingFaceApiConfiguration.cjs");
|
10
|
-
const HuggingFaceError_js_1 = require("./HuggingFaceError.cjs");
|
11
|
-
/**
|
12
|
-
* Create an image to text model that calls a Hugging Face Image-to-Text Inference API.
|
13
|
-
*
|
14
|
-
* @see https://huggingface.co/tasks/image-to-text
|
15
|
-
*/
|
16
|
-
class HuggingFaceImageDescriptionModel extends AbstractModel_js_1.AbstractModel {
|
17
|
-
constructor(settings) {
|
18
|
-
super({ settings });
|
19
|
-
Object.defineProperty(this, "provider", {
|
20
|
-
enumerable: true,
|
21
|
-
configurable: true,
|
22
|
-
writable: true,
|
23
|
-
value: "huggingface"
|
24
|
-
});
|
25
|
-
Object.defineProperty(this, "contextWindowSize", {
|
26
|
-
enumerable: true,
|
27
|
-
configurable: true,
|
28
|
-
writable: true,
|
29
|
-
value: undefined
|
30
|
-
});
|
31
|
-
Object.defineProperty(this, "tokenizer", {
|
32
|
-
enumerable: true,
|
33
|
-
configurable: true,
|
34
|
-
writable: true,
|
35
|
-
value: undefined
|
36
|
-
});
|
37
|
-
Object.defineProperty(this, "countPromptTokens", {
|
38
|
-
enumerable: true,
|
39
|
-
configurable: true,
|
40
|
-
writable: true,
|
41
|
-
value: undefined
|
42
|
-
});
|
43
|
-
}
|
44
|
-
get modelName() {
|
45
|
-
return this.settings.model;
|
46
|
-
}
|
47
|
-
async callAPI(data, options) {
|
48
|
-
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
49
|
-
retry: this.settings.api?.retry,
|
50
|
-
throttle: this.settings.api?.throttle,
|
51
|
-
call: async () => callHuggingFaceImageDescriptionAPI({
|
52
|
-
...this.settings,
|
53
|
-
abortSignal: options?.run?.abortSignal,
|
54
|
-
data,
|
55
|
-
}),
|
56
|
-
});
|
57
|
-
}
|
58
|
-
get settingsForEvent() {
|
59
|
-
return {};
|
60
|
-
}
|
61
|
-
async doGenerateText(data, options) {
|
62
|
-
const response = await this.callAPI(data, options);
|
63
|
-
return {
|
64
|
-
response,
|
65
|
-
text: response[0].generated_text,
|
66
|
-
};
|
67
|
-
}
|
68
|
-
withPromptFormat(promptFormat) {
|
69
|
-
return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
|
70
|
-
model: this,
|
71
|
-
promptFormat,
|
72
|
-
});
|
73
|
-
}
|
74
|
-
withSettings(additionalSettings) {
|
75
|
-
return new HuggingFaceImageDescriptionModel(Object.assign({}, this.settings, additionalSettings));
|
76
|
-
}
|
77
|
-
}
|
78
|
-
exports.HuggingFaceImageDescriptionModel = HuggingFaceImageDescriptionModel;
|
79
|
-
const huggingFaceImageDescriptionResponseSchema = zod_1.z.array(zod_1.z.object({
|
80
|
-
generated_text: zod_1.z.string(),
|
81
|
-
}));
|
82
|
-
async function callHuggingFaceImageDescriptionAPI({ api = new HuggingFaceApiConfiguration_js_1.HuggingFaceApiConfiguration(), abortSignal, model, data, }) {
|
83
|
-
return (0, postToApi_js_1.postToApi)({
|
84
|
-
url: api.assembleUrl(`/${model}`),
|
85
|
-
headers: api.headers,
|
86
|
-
body: {
|
87
|
-
content: data,
|
88
|
-
values: {},
|
89
|
-
},
|
90
|
-
failedResponseHandler: HuggingFaceError_js_1.failedHuggingFaceCallResponseHandler,
|
91
|
-
successfulResponseHandler: (0, postToApi_js_1.createJsonResponseHandler)(huggingFaceImageDescriptionResponseSchema),
|
92
|
-
abortSignal,
|
93
|
-
});
|
94
|
-
}
|
@@ -1,44 +0,0 @@
|
|
1
|
-
/// <reference types="node" />
|
2
|
-
import { z } from "zod";
|
3
|
-
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
4
|
-
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
5
|
-
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
|
-
import { PromptFormatTextGenerationModel } from "../../model-function/generate-text/PromptFormatTextGenerationModel.js";
|
7
|
-
import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
|
8
|
-
import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
|
9
|
-
export interface HuggingFaceImageDescriptionModelSettings extends TextGenerationModelSettings {
|
10
|
-
api?: ApiConfiguration;
|
11
|
-
model: string;
|
12
|
-
}
|
13
|
-
/**
|
14
|
-
* Create an image to text model that calls a Hugging Face Image-to-Text Inference API.
|
15
|
-
*
|
16
|
-
* @see https://huggingface.co/tasks/image-to-text
|
17
|
-
*/
|
18
|
-
export declare class HuggingFaceImageDescriptionModel extends AbstractModel<HuggingFaceImageDescriptionModelSettings> implements TextGenerationModel<Buffer, HuggingFaceImageDescriptionModelSettings> {
|
19
|
-
constructor(settings: HuggingFaceImageDescriptionModelSettings);
|
20
|
-
readonly provider = "huggingface";
|
21
|
-
get modelName(): string;
|
22
|
-
callAPI(data: Buffer, options?: FunctionOptions): Promise<HuggingFaceImageDescriptionResponse>;
|
23
|
-
get settingsForEvent(): Partial<HuggingFaceImageDescriptionModelSettings>;
|
24
|
-
readonly contextWindowSize: undefined;
|
25
|
-
readonly tokenizer: undefined;
|
26
|
-
readonly countPromptTokens: undefined;
|
27
|
-
doGenerateText(data: Buffer, options?: FunctionOptions): Promise<{
|
28
|
-
response: {
|
29
|
-
generated_text: string;
|
30
|
-
}[];
|
31
|
-
text: string;
|
32
|
-
}>;
|
33
|
-
withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, Buffer>): PromptFormatTextGenerationModel<INPUT_PROMPT, Buffer, HuggingFaceImageDescriptionModelSettings, this>;
|
34
|
-
withSettings(additionalSettings: Partial<HuggingFaceImageDescriptionModelSettings>): this;
|
35
|
-
}
|
36
|
-
declare const huggingFaceImageDescriptionResponseSchema: z.ZodArray<z.ZodObject<{
|
37
|
-
generated_text: z.ZodString;
|
38
|
-
}, "strip", z.ZodTypeAny, {
|
39
|
-
generated_text: string;
|
40
|
-
}, {
|
41
|
-
generated_text: string;
|
42
|
-
}>, "many">;
|
43
|
-
export type HuggingFaceImageDescriptionResponse = z.infer<typeof huggingFaceImageDescriptionResponseSchema>;
|
44
|
-
export {};
|
@@ -1,90 +0,0 @@
|
|
1
|
-
import { z } from "zod";
|
2
|
-
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
|
-
import { createJsonResponseHandler, postToApi, } from "../../core/api/postToApi.js";
|
4
|
-
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
|
-
import { PromptFormatTextGenerationModel } from "../../model-function/generate-text/PromptFormatTextGenerationModel.js";
|
6
|
-
import { HuggingFaceApiConfiguration } from "./HuggingFaceApiConfiguration.js";
|
7
|
-
import { failedHuggingFaceCallResponseHandler } from "./HuggingFaceError.js";
|
8
|
-
/**
|
9
|
-
* Create an image to text model that calls a Hugging Face Image-to-Text Inference API.
|
10
|
-
*
|
11
|
-
* @see https://huggingface.co/tasks/image-to-text
|
12
|
-
*/
|
13
|
-
export class HuggingFaceImageDescriptionModel extends AbstractModel {
|
14
|
-
constructor(settings) {
|
15
|
-
super({ settings });
|
16
|
-
Object.defineProperty(this, "provider", {
|
17
|
-
enumerable: true,
|
18
|
-
configurable: true,
|
19
|
-
writable: true,
|
20
|
-
value: "huggingface"
|
21
|
-
});
|
22
|
-
Object.defineProperty(this, "contextWindowSize", {
|
23
|
-
enumerable: true,
|
24
|
-
configurable: true,
|
25
|
-
writable: true,
|
26
|
-
value: undefined
|
27
|
-
});
|
28
|
-
Object.defineProperty(this, "tokenizer", {
|
29
|
-
enumerable: true,
|
30
|
-
configurable: true,
|
31
|
-
writable: true,
|
32
|
-
value: undefined
|
33
|
-
});
|
34
|
-
Object.defineProperty(this, "countPromptTokens", {
|
35
|
-
enumerable: true,
|
36
|
-
configurable: true,
|
37
|
-
writable: true,
|
38
|
-
value: undefined
|
39
|
-
});
|
40
|
-
}
|
41
|
-
get modelName() {
|
42
|
-
return this.settings.model;
|
43
|
-
}
|
44
|
-
async callAPI(data, options) {
|
45
|
-
return callWithRetryAndThrottle({
|
46
|
-
retry: this.settings.api?.retry,
|
47
|
-
throttle: this.settings.api?.throttle,
|
48
|
-
call: async () => callHuggingFaceImageDescriptionAPI({
|
49
|
-
...this.settings,
|
50
|
-
abortSignal: options?.run?.abortSignal,
|
51
|
-
data,
|
52
|
-
}),
|
53
|
-
});
|
54
|
-
}
|
55
|
-
get settingsForEvent() {
|
56
|
-
return {};
|
57
|
-
}
|
58
|
-
async doGenerateText(data, options) {
|
59
|
-
const response = await this.callAPI(data, options);
|
60
|
-
return {
|
61
|
-
response,
|
62
|
-
text: response[0].generated_text,
|
63
|
-
};
|
64
|
-
}
|
65
|
-
withPromptFormat(promptFormat) {
|
66
|
-
return new PromptFormatTextGenerationModel({
|
67
|
-
model: this,
|
68
|
-
promptFormat,
|
69
|
-
});
|
70
|
-
}
|
71
|
-
withSettings(additionalSettings) {
|
72
|
-
return new HuggingFaceImageDescriptionModel(Object.assign({}, this.settings, additionalSettings));
|
73
|
-
}
|
74
|
-
}
|
75
|
-
const huggingFaceImageDescriptionResponseSchema = z.array(z.object({
|
76
|
-
generated_text: z.string(),
|
77
|
-
}));
|
78
|
-
async function callHuggingFaceImageDescriptionAPI({ api = new HuggingFaceApiConfiguration(), abortSignal, model, data, }) {
|
79
|
-
return postToApi({
|
80
|
-
url: api.assembleUrl(`/${model}`),
|
81
|
-
headers: api.headers,
|
82
|
-
body: {
|
83
|
-
content: data,
|
84
|
-
values: {},
|
85
|
-
},
|
86
|
-
failedResponseHandler: failedHuggingFaceCallResponseHandler,
|
87
|
-
successfulResponseHandler: createJsonResponseHandler(huggingFaceImageDescriptionResponseSchema),
|
88
|
-
abortSignal,
|
89
|
-
});
|
90
|
-
}
|