modelfusion 0.74.0 → 0.75.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/README.md +40 -32
  2. package/guard/fixStructure.cjs +1 -1
  3. package/guard/fixStructure.d.ts +1 -1
  4. package/guard/fixStructure.js +1 -1
  5. package/model-function/embed/EmbeddingModel.d.ts +1 -1
  6. package/model-function/embed/embed.cjs +1 -1
  7. package/model-function/embed/embed.d.ts +2 -2
  8. package/model-function/embed/embed.js +1 -1
  9. package/model-function/generate-image/generateImage.d.ts +1 -1
  10. package/model-function/generate-speech/generateSpeech.d.ts +1 -1
  11. package/model-function/generate-speech/streamSpeech.d.ts +1 -1
  12. package/model-function/generate-structure/generateStructure.d.ts +1 -1
  13. package/model-function/generate-structure/streamStructure.d.ts +1 -1
  14. package/model-function/generate-text/generateText.d.ts +1 -1
  15. package/model-function/generate-text/streamText.d.ts +1 -1
  16. package/model-function/generate-transcription/generateTranscription.d.ts +1 -1
  17. package/model-provider/anthropic/AnthropicFacade.cjs +15 -0
  18. package/model-provider/anthropic/AnthropicFacade.d.ts +9 -0
  19. package/model-provider/anthropic/AnthropicFacade.js +11 -0
  20. package/model-provider/anthropic/index.cjs +2 -1
  21. package/model-provider/anthropic/index.d.ts +1 -0
  22. package/model-provider/anthropic/index.js +1 -0
  23. package/model-provider/automatic1111/Automatic1111Facade.cjs +15 -0
  24. package/model-provider/automatic1111/Automatic1111Facade.d.ts +9 -0
  25. package/model-provider/automatic1111/Automatic1111Facade.js +11 -0
  26. package/model-provider/automatic1111/index.cjs +14 -1
  27. package/model-provider/automatic1111/index.d.ts +1 -0
  28. package/model-provider/automatic1111/index.js +1 -0
  29. package/model-provider/cohere/CohereFacade.cjs +71 -0
  30. package/model-provider/cohere/CohereFacade.d.ts +59 -0
  31. package/model-provider/cohere/CohereFacade.js +65 -0
  32. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +1 -1
  33. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +1 -1
  34. package/model-provider/cohere/CohereTextEmbeddingModel.js +1 -1
  35. package/model-provider/cohere/index.cjs +14 -1
  36. package/model-provider/cohere/index.d.ts +1 -0
  37. package/model-provider/cohere/index.js +1 -0
  38. package/model-provider/elevenlabs/ElevenLabsFacade.cjs +18 -0
  39. package/model-provider/elevenlabs/ElevenLabsFacade.d.ts +12 -0
  40. package/model-provider/elevenlabs/ElevenLabsFacade.js +14 -0
  41. package/model-provider/elevenlabs/index.cjs +14 -0
  42. package/model-provider/elevenlabs/index.d.ts +1 -0
  43. package/model-provider/elevenlabs/index.js +1 -0
  44. package/model-provider/huggingface/HuggingFaceFacade.cjs +55 -0
  45. package/model-provider/huggingface/HuggingFaceFacade.d.ts +46 -0
  46. package/model-provider/huggingface/HuggingFaceFacade.js +50 -0
  47. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +1 -1
  48. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +1 -1
  49. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +1 -1
  50. package/model-provider/huggingface/index.cjs +14 -2
  51. package/model-provider/huggingface/index.d.ts +1 -1
  52. package/model-provider/huggingface/index.js +1 -1
  53. package/model-provider/llamacpp/LlamaCppFacade.cjs +19 -0
  54. package/model-provider/llamacpp/LlamaCppFacade.d.ts +7 -0
  55. package/model-provider/llamacpp/LlamaCppFacade.js +13 -0
  56. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +2 -2
  57. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +2 -2
  58. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +2 -2
  59. package/model-provider/llamacpp/index.cjs +2 -1
  60. package/model-provider/llamacpp/index.d.ts +1 -0
  61. package/model-provider/llamacpp/index.js +1 -0
  62. package/model-provider/lmnt/LmntFacade.cjs +15 -0
  63. package/model-provider/lmnt/LmntFacade.d.ts +9 -0
  64. package/model-provider/lmnt/LmntFacade.js +11 -0
  65. package/model-provider/lmnt/index.cjs +14 -0
  66. package/model-provider/lmnt/index.d.ts +1 -0
  67. package/model-provider/lmnt/index.js +1 -0
  68. package/model-provider/ollama/OllamaFacade.cjs +13 -0
  69. package/model-provider/ollama/OllamaFacade.d.ts +4 -0
  70. package/model-provider/ollama/OllamaFacade.js +8 -0
  71. package/model-provider/ollama/OllamaTextEmbeddingModel.cjs +2 -2
  72. package/model-provider/ollama/OllamaTextEmbeddingModel.d.ts +2 -2
  73. package/model-provider/ollama/OllamaTextEmbeddingModel.js +2 -2
  74. package/model-provider/ollama/index.cjs +14 -1
  75. package/model-provider/ollama/index.d.ts +1 -0
  76. package/model-provider/ollama/index.js +1 -0
  77. package/model-provider/openai/OpenAIFacade.cjs +148 -0
  78. package/model-provider/openai/OpenAIFacade.d.ts +124 -0
  79. package/model-provider/openai/OpenAIFacade.js +138 -0
  80. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +1 -1
  81. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +1 -1
  82. package/model-provider/openai/OpenAITextEmbeddingModel.js +1 -1
  83. package/model-provider/openai/TikTokenTokenizer.cjs +2 -2
  84. package/model-provider/openai/TikTokenTokenizer.d.ts +4 -3
  85. package/model-provider/openai/TikTokenTokenizer.js +2 -2
  86. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +22 -7
  87. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +22 -7
  88. package/model-provider/openai/index.cjs +2 -1
  89. package/model-provider/openai/index.d.ts +1 -0
  90. package/model-provider/openai/index.js +1 -0
  91. package/model-provider/stability/StabilityFacade.cjs +32 -0
  92. package/model-provider/stability/StabilityFacade.d.ts +26 -0
  93. package/model-provider/stability/StabilityFacade.js +28 -0
  94. package/model-provider/stability/index.cjs +14 -1
  95. package/model-provider/stability/index.d.ts +1 -0
  96. package/model-provider/stability/index.js +1 -0
  97. package/package.json +1 -1
  98. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +0 -94
  99. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +0 -44
  100. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +0 -90
@@ -22,14 +22,14 @@ export class TikTokenTokenizer {
22
22
  /**
23
23
  * Get a TikToken tokenizer for a specific model or encoding.
24
24
  */
25
- constructor(options) {
25
+ constructor(settings) {
26
26
  Object.defineProperty(this, "tiktoken", {
27
27
  enumerable: true,
28
28
  configurable: true,
29
29
  writable: true,
30
30
  value: void 0
31
31
  });
32
- this.tiktoken = new Tiktoken(getTiktokenBPE(options.model));
32
+ this.tiktoken = new Tiktoken(getTiktokenBPE(settings.model));
33
33
  }
34
34
  async tokenize(text) {
35
35
  return this.tiktoken.encode(text);
@@ -6,7 +6,8 @@ const AsyncQueue_js_1 = require("../../../util/AsyncQueue.cjs");
6
6
  const parseEventSourceStream_js_1 = require("../../../util/streaming/parseEventSourceStream.cjs");
7
7
  const parseJSON_js_1 = require("../../../core/schema/parseJSON.cjs");
8
8
  const ZodSchema_js_1 = require("../../../core/schema/ZodSchema.cjs");
9
- const chatResponseStreamEventSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
9
+ const chatCompletionChunkSchema = zod_1.z.object({
10
+ object: zod_1.z.literal("chat.completion.chunk"),
10
11
  id: zod_1.z.string(),
11
12
  choices: zod_1.z.array(zod_1.z.object({
12
13
  delta: zod_1.z.object({
@@ -44,8 +45,15 @@ const chatResponseStreamEventSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.objec
44
45
  created: zod_1.z.number(),
45
46
  model: zod_1.z.string(),
46
47
  system_fingerprint: zod_1.z.string().optional(),
47
- object: zod_1.z.literal("chat.completion.chunk"),
48
- }));
48
+ });
49
+ const chatResponseStreamEventSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.union([
50
+ chatCompletionChunkSchema,
51
+ zod_1.z.object({
52
+ object: zod_1.z.string().refine((obj) => obj !== "chat.completion.chunk", {
53
+ message: "Object must not be 'chat.completion.chunk'",
54
+ }),
55
+ }),
56
+ ]));
49
57
  async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
50
58
  const queue = new AsyncQueue_js_1.AsyncQueue();
51
59
  const streamDelta = [];
@@ -68,12 +76,19 @@ async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
68
76
  type: "error",
69
77
  error: parseResult.error,
70
78
  });
71
- queue.close();
72
- return;
79
+ // Note: the queue is not closed on purpose. Some providers might add additional
80
+ // chunks that are not parsable, and ModelFusion should be resilient to that.
81
+ continue;
73
82
  }
74
83
  const eventData = parseResult.data;
75
- for (let i = 0; i < eventData.choices.length; i++) {
76
- const eventChoice = eventData.choices[i];
84
+ // ignore objects that are not "chat.completion.chunk" events.
85
+ // Such additional objects are e.g. sent by Azure OpenAI.
86
+ if (eventData.object !== "chat.completion.chunk") {
87
+ continue;
88
+ }
89
+ const completionChunk = eventData;
90
+ for (let i = 0; i < completionChunk.choices.length; i++) {
91
+ const eventChoice = completionChunk.choices[i];
77
92
  const delta = eventChoice.delta;
78
93
  if (streamDelta[i] == null) {
79
94
  streamDelta[i] = {
@@ -3,7 +3,8 @@ import { AsyncQueue } from "../../../util/AsyncQueue.js";
3
3
  import { parseEventSourceStream } from "../../../util/streaming/parseEventSourceStream.js";
4
4
  import { safeParseJSON } from "../../../core/schema/parseJSON.js";
5
5
  import { ZodSchema } from "../../../core/schema/ZodSchema.js";
6
- const chatResponseStreamEventSchema = new ZodSchema(z.object({
6
+ const chatCompletionChunkSchema = z.object({
7
+ object: z.literal("chat.completion.chunk"),
7
8
  id: z.string(),
8
9
  choices: z.array(z.object({
9
10
  delta: z.object({
@@ -41,8 +42,15 @@ const chatResponseStreamEventSchema = new ZodSchema(z.object({
41
42
  created: z.number(),
42
43
  model: z.string(),
43
44
  system_fingerprint: z.string().optional(),
44
- object: z.literal("chat.completion.chunk"),
45
- }));
45
+ });
46
+ const chatResponseStreamEventSchema = new ZodSchema(z.union([
47
+ chatCompletionChunkSchema,
48
+ z.object({
49
+ object: z.string().refine((obj) => obj !== "chat.completion.chunk", {
50
+ message: "Object must not be 'chat.completion.chunk'",
51
+ }),
52
+ }),
53
+ ]));
46
54
  export async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
47
55
  const queue = new AsyncQueue();
48
56
  const streamDelta = [];
@@ -65,12 +73,19 @@ export async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaVal
65
73
  type: "error",
66
74
  error: parseResult.error,
67
75
  });
68
- queue.close();
69
- return;
76
+ // Note: the queue is not closed on purpose. Some providers might add additional
77
+ // chunks that are not parsable, and ModelFusion should be resilient to that.
78
+ continue;
70
79
  }
71
80
  const eventData = parseResult.data;
72
- for (let i = 0; i < eventData.choices.length; i++) {
73
- const eventChoice = eventData.choices[i];
81
+ // ignore objects that are not "chat.completion.chunk" events.
82
+ // Such additional objects are e.g. sent by Azure OpenAI.
83
+ if (eventData.object !== "chat.completion.chunk") {
84
+ continue;
85
+ }
86
+ const completionChunk = eventData;
87
+ for (let i = 0; i < completionChunk.choices.length; i++) {
88
+ const eventChoice = completionChunk.choices[i];
74
89
  const delta = eventChoice.delta;
75
90
  if (streamDelta[i] == null) {
76
91
  streamDelta[i] = {
@@ -26,13 +26,14 @@ var __importStar = (this && this.__importStar) || function (mod) {
26
26
  return result;
27
27
  };
28
28
  Object.defineProperty(exports, "__esModule", { value: true });
29
- exports.OpenAIChatPromptFormat = exports.OpenAIError = void 0;
29
+ exports.OpenAIChatPromptFormat = exports.openai = exports.OpenAIError = void 0;
30
30
  __exportStar(require("./AzureOpenAIApiConfiguration.cjs"), exports);
31
31
  __exportStar(require("./OpenAIApiConfiguration.cjs"), exports);
32
32
  __exportStar(require("./OpenAICompletionModel.cjs"), exports);
33
33
  __exportStar(require("./OpenAICostCalculator.cjs"), exports);
34
34
  var OpenAIError_js_1 = require("./OpenAIError.cjs");
35
35
  Object.defineProperty(exports, "OpenAIError", { enumerable: true, get: function () { return OpenAIError_js_1.OpenAIError; } });
36
+ exports.openai = __importStar(require("./OpenAIFacade.cjs"));
36
37
  __exportStar(require("./OpenAIImageGenerationModel.cjs"), exports);
37
38
  __exportStar(require("./OpenAISpeechModel.cjs"), exports);
38
39
  __exportStar(require("./OpenAITextEmbeddingModel.cjs"), exports);
@@ -3,6 +3,7 @@ export * from "./OpenAIApiConfiguration.js";
3
3
  export * from "./OpenAICompletionModel.js";
4
4
  export * from "./OpenAICostCalculator.js";
5
5
  export { OpenAIError, OpenAIErrorData } from "./OpenAIError.js";
6
+ export * as openai from "./OpenAIFacade.js";
6
7
  export * from "./OpenAIImageGenerationModel.js";
7
8
  export * from "./OpenAISpeechModel.js";
8
9
  export * from "./OpenAITextEmbeddingModel.js";
@@ -3,6 +3,7 @@ export * from "./OpenAIApiConfiguration.js";
3
3
  export * from "./OpenAICompletionModel.js";
4
4
  export * from "./OpenAICostCalculator.js";
5
5
  export { OpenAIError } from "./OpenAIError.js";
6
+ export * as openai from "./OpenAIFacade.js";
6
7
  export * from "./OpenAIImageGenerationModel.js";
7
8
  export * from "./OpenAISpeechModel.js";
8
9
  export * from "./OpenAITextEmbeddingModel.js";
@@ -0,0 +1,32 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.ImageGenerator = void 0;
4
+ const StabilityImageGenerationModel_js_1 = require("./StabilityImageGenerationModel.cjs");
5
+ /**
6
+ * Create an image generation model that calls the Stability AI image generation API.
7
+ *
8
+ * @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
9
+ *
10
+ * @example
11
+ * const image = await generateImage(
12
+ * stability.ImageGenerator({
13
+ * model: "stable-diffusion-512-v2-1",
14
+ * cfgScale: 7,
15
+ * clipGuidancePreset: "FAST_BLUE",
16
+ * height: 512,
17
+ * width: 512,
18
+ * samples: 1,
19
+ * steps: 30,
20
+ * })
21
+ * [
22
+ * { text: "the wicked witch of the west" },
23
+ * { text: "style of early 19th century painting", weight: 0.5 },
24
+ * ]
25
+ * );
26
+ *
27
+ * @returns A new instance of {@link StabilityImageGenerationModel}.
28
+ */
29
+ function ImageGenerator(settings) {
30
+ return new StabilityImageGenerationModel_js_1.StabilityImageGenerationModel(settings);
31
+ }
32
+ exports.ImageGenerator = ImageGenerator;
@@ -0,0 +1,26 @@
1
+ import { StabilityImageGenerationModel, StabilityImageGenerationSettings } from "./StabilityImageGenerationModel.js";
2
+ /**
3
+ * Create an image generation model that calls the Stability AI image generation API.
4
+ *
5
+ * @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
6
+ *
7
+ * @example
8
+ * const image = await generateImage(
9
+ * stability.ImageGenerator({
10
+ * model: "stable-diffusion-512-v2-1",
11
+ * cfgScale: 7,
12
+ * clipGuidancePreset: "FAST_BLUE",
13
+ * height: 512,
14
+ * width: 512,
15
+ * samples: 1,
16
+ * steps: 30,
17
+ * })
18
+ * [
19
+ * { text: "the wicked witch of the west" },
20
+ * { text: "style of early 19th century painting", weight: 0.5 },
21
+ * ]
22
+ * );
23
+ *
24
+ * @returns A new instance of {@link StabilityImageGenerationModel}.
25
+ */
26
+ export declare function ImageGenerator(settings: StabilityImageGenerationSettings): StabilityImageGenerationModel;
@@ -0,0 +1,28 @@
1
+ import { StabilityImageGenerationModel, } from "./StabilityImageGenerationModel.js";
2
+ /**
3
+ * Create an image generation model that calls the Stability AI image generation API.
4
+ *
5
+ * @see https://api.stability.ai/docs#tag/v1generation/operation/textToImage
6
+ *
7
+ * @example
8
+ * const image = await generateImage(
9
+ * stability.ImageGenerator({
10
+ * model: "stable-diffusion-512-v2-1",
11
+ * cfgScale: 7,
12
+ * clipGuidancePreset: "FAST_BLUE",
13
+ * height: 512,
14
+ * width: 512,
15
+ * samples: 1,
16
+ * steps: 30,
17
+ * })
18
+ * [
19
+ * { text: "the wicked witch of the west" },
20
+ * { text: "style of early 19th century painting", weight: 0.5 },
21
+ * ]
22
+ * );
23
+ *
24
+ * @returns A new instance of {@link StabilityImageGenerationModel}.
25
+ */
26
+ export function ImageGenerator(settings) {
27
+ return new StabilityImageGenerationModel(settings);
28
+ }
@@ -10,13 +10,26 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
10
10
  if (k2 === undefined) k2 = k;
11
11
  o[k2] = m[k];
12
12
  }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
13
18
  var __exportStar = (this && this.__exportStar) || function(m, exports) {
14
19
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
15
20
  };
21
+ var __importStar = (this && this.__importStar) || function (mod) {
22
+ if (mod && mod.__esModule) return mod;
23
+ var result = {};
24
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
25
+ __setModuleDefault(result, mod);
26
+ return result;
27
+ };
16
28
  Object.defineProperty(exports, "__esModule", { value: true });
17
- exports.StabilityError = void 0;
29
+ exports.stability = exports.StabilityError = void 0;
18
30
  __exportStar(require("./StabilityApiConfiguration.cjs"), exports);
19
31
  var StabilityError_js_1 = require("./StabilityError.cjs");
20
32
  Object.defineProperty(exports, "StabilityError", { enumerable: true, get: function () { return StabilityError_js_1.StabilityError; } });
33
+ exports.stability = __importStar(require("./StabilityFacade.cjs"));
21
34
  __exportStar(require("./StabilityImageGenerationModel.cjs"), exports);
22
35
  __exportStar(require("./StabilityImageGenerationPrompt.cjs"), exports);
@@ -1,4 +1,5 @@
1
1
  export * from "./StabilityApiConfiguration.js";
2
2
  export { StabilityError, StabilityErrorData } from "./StabilityError.js";
3
+ export * as stability from "./StabilityFacade.js";
3
4
  export * from "./StabilityImageGenerationModel.js";
4
5
  export * from "./StabilityImageGenerationPrompt.js";
@@ -1,4 +1,5 @@
1
1
  export * from "./StabilityApiConfiguration.js";
2
2
  export { StabilityError } from "./StabilityError.js";
3
+ export * as stability from "./StabilityFacade.js";
3
4
  export * from "./StabilityImageGenerationModel.js";
4
5
  export * from "./StabilityImageGenerationPrompt.js";
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building multi-modal AI applications.",
4
- "version": "0.74.0",
4
+ "version": "0.75.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -1,94 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.HuggingFaceImageDescriptionModel = void 0;
4
- const zod_1 = require("zod");
5
- const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
- const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
- const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
- const PromptFormatTextGenerationModel_js_1 = require("../../model-function/generate-text/PromptFormatTextGenerationModel.cjs");
9
- const HuggingFaceApiConfiguration_js_1 = require("./HuggingFaceApiConfiguration.cjs");
10
- const HuggingFaceError_js_1 = require("./HuggingFaceError.cjs");
11
- /**
12
- * Create an image to text model that calls a Hugging Face Image-to-Text Inference API.
13
- *
14
- * @see https://huggingface.co/tasks/image-to-text
15
- */
16
- class HuggingFaceImageDescriptionModel extends AbstractModel_js_1.AbstractModel {
17
- constructor(settings) {
18
- super({ settings });
19
- Object.defineProperty(this, "provider", {
20
- enumerable: true,
21
- configurable: true,
22
- writable: true,
23
- value: "huggingface"
24
- });
25
- Object.defineProperty(this, "contextWindowSize", {
26
- enumerable: true,
27
- configurable: true,
28
- writable: true,
29
- value: undefined
30
- });
31
- Object.defineProperty(this, "tokenizer", {
32
- enumerable: true,
33
- configurable: true,
34
- writable: true,
35
- value: undefined
36
- });
37
- Object.defineProperty(this, "countPromptTokens", {
38
- enumerable: true,
39
- configurable: true,
40
- writable: true,
41
- value: undefined
42
- });
43
- }
44
- get modelName() {
45
- return this.settings.model;
46
- }
47
- async callAPI(data, options) {
48
- return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
49
- retry: this.settings.api?.retry,
50
- throttle: this.settings.api?.throttle,
51
- call: async () => callHuggingFaceImageDescriptionAPI({
52
- ...this.settings,
53
- abortSignal: options?.run?.abortSignal,
54
- data,
55
- }),
56
- });
57
- }
58
- get settingsForEvent() {
59
- return {};
60
- }
61
- async doGenerateText(data, options) {
62
- const response = await this.callAPI(data, options);
63
- return {
64
- response,
65
- text: response[0].generated_text,
66
- };
67
- }
68
- withPromptFormat(promptFormat) {
69
- return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
70
- model: this,
71
- promptFormat,
72
- });
73
- }
74
- withSettings(additionalSettings) {
75
- return new HuggingFaceImageDescriptionModel(Object.assign({}, this.settings, additionalSettings));
76
- }
77
- }
78
- exports.HuggingFaceImageDescriptionModel = HuggingFaceImageDescriptionModel;
79
- const huggingFaceImageDescriptionResponseSchema = zod_1.z.array(zod_1.z.object({
80
- generated_text: zod_1.z.string(),
81
- }));
82
- async function callHuggingFaceImageDescriptionAPI({ api = new HuggingFaceApiConfiguration_js_1.HuggingFaceApiConfiguration(), abortSignal, model, data, }) {
83
- return (0, postToApi_js_1.postToApi)({
84
- url: api.assembleUrl(`/${model}`),
85
- headers: api.headers,
86
- body: {
87
- content: data,
88
- values: {},
89
- },
90
- failedResponseHandler: HuggingFaceError_js_1.failedHuggingFaceCallResponseHandler,
91
- successfulResponseHandler: (0, postToApi_js_1.createJsonResponseHandler)(huggingFaceImageDescriptionResponseSchema),
92
- abortSignal,
93
- });
94
- }
@@ -1,44 +0,0 @@
1
- /// <reference types="node" />
2
- import { z } from "zod";
3
- import { FunctionOptions } from "../../core/FunctionOptions.js";
4
- import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
5
- import { AbstractModel } from "../../model-function/AbstractModel.js";
6
- import { PromptFormatTextGenerationModel } from "../../model-function/generate-text/PromptFormatTextGenerationModel.js";
7
- import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
8
- import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
9
- export interface HuggingFaceImageDescriptionModelSettings extends TextGenerationModelSettings {
10
- api?: ApiConfiguration;
11
- model: string;
12
- }
13
- /**
14
- * Create an image to text model that calls a Hugging Face Image-to-Text Inference API.
15
- *
16
- * @see https://huggingface.co/tasks/image-to-text
17
- */
18
- export declare class HuggingFaceImageDescriptionModel extends AbstractModel<HuggingFaceImageDescriptionModelSettings> implements TextGenerationModel<Buffer, HuggingFaceImageDescriptionModelSettings> {
19
- constructor(settings: HuggingFaceImageDescriptionModelSettings);
20
- readonly provider = "huggingface";
21
- get modelName(): string;
22
- callAPI(data: Buffer, options?: FunctionOptions): Promise<HuggingFaceImageDescriptionResponse>;
23
- get settingsForEvent(): Partial<HuggingFaceImageDescriptionModelSettings>;
24
- readonly contextWindowSize: undefined;
25
- readonly tokenizer: undefined;
26
- readonly countPromptTokens: undefined;
27
- doGenerateText(data: Buffer, options?: FunctionOptions): Promise<{
28
- response: {
29
- generated_text: string;
30
- }[];
31
- text: string;
32
- }>;
33
- withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, Buffer>): PromptFormatTextGenerationModel<INPUT_PROMPT, Buffer, HuggingFaceImageDescriptionModelSettings, this>;
34
- withSettings(additionalSettings: Partial<HuggingFaceImageDescriptionModelSettings>): this;
35
- }
36
- declare const huggingFaceImageDescriptionResponseSchema: z.ZodArray<z.ZodObject<{
37
- generated_text: z.ZodString;
38
- }, "strip", z.ZodTypeAny, {
39
- generated_text: string;
40
- }, {
41
- generated_text: string;
42
- }>, "many">;
43
- export type HuggingFaceImageDescriptionResponse = z.infer<typeof huggingFaceImageDescriptionResponseSchema>;
44
- export {};
@@ -1,90 +0,0 @@
1
- import { z } from "zod";
2
- import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
- import { createJsonResponseHandler, postToApi, } from "../../core/api/postToApi.js";
4
- import { AbstractModel } from "../../model-function/AbstractModel.js";
5
- import { PromptFormatTextGenerationModel } from "../../model-function/generate-text/PromptFormatTextGenerationModel.js";
6
- import { HuggingFaceApiConfiguration } from "./HuggingFaceApiConfiguration.js";
7
- import { failedHuggingFaceCallResponseHandler } from "./HuggingFaceError.js";
8
- /**
9
- * Create an image to text model that calls a Hugging Face Image-to-Text Inference API.
10
- *
11
- * @see https://huggingface.co/tasks/image-to-text
12
- */
13
- export class HuggingFaceImageDescriptionModel extends AbstractModel {
14
- constructor(settings) {
15
- super({ settings });
16
- Object.defineProperty(this, "provider", {
17
- enumerable: true,
18
- configurable: true,
19
- writable: true,
20
- value: "huggingface"
21
- });
22
- Object.defineProperty(this, "contextWindowSize", {
23
- enumerable: true,
24
- configurable: true,
25
- writable: true,
26
- value: undefined
27
- });
28
- Object.defineProperty(this, "tokenizer", {
29
- enumerable: true,
30
- configurable: true,
31
- writable: true,
32
- value: undefined
33
- });
34
- Object.defineProperty(this, "countPromptTokens", {
35
- enumerable: true,
36
- configurable: true,
37
- writable: true,
38
- value: undefined
39
- });
40
- }
41
- get modelName() {
42
- return this.settings.model;
43
- }
44
- async callAPI(data, options) {
45
- return callWithRetryAndThrottle({
46
- retry: this.settings.api?.retry,
47
- throttle: this.settings.api?.throttle,
48
- call: async () => callHuggingFaceImageDescriptionAPI({
49
- ...this.settings,
50
- abortSignal: options?.run?.abortSignal,
51
- data,
52
- }),
53
- });
54
- }
55
- get settingsForEvent() {
56
- return {};
57
- }
58
- async doGenerateText(data, options) {
59
- const response = await this.callAPI(data, options);
60
- return {
61
- response,
62
- text: response[0].generated_text,
63
- };
64
- }
65
- withPromptFormat(promptFormat) {
66
- return new PromptFormatTextGenerationModel({
67
- model: this,
68
- promptFormat,
69
- });
70
- }
71
- withSettings(additionalSettings) {
72
- return new HuggingFaceImageDescriptionModel(Object.assign({}, this.settings, additionalSettings));
73
- }
74
- }
75
- const huggingFaceImageDescriptionResponseSchema = z.array(z.object({
76
- generated_text: z.string(),
77
- }));
78
- async function callHuggingFaceImageDescriptionAPI({ api = new HuggingFaceApiConfiguration(), abortSignal, model, data, }) {
79
- return postToApi({
80
- url: api.assembleUrl(`/${model}`),
81
- headers: api.headers,
82
- body: {
83
- content: data,
84
- values: {},
85
- },
86
- failedResponseHandler: failedHuggingFaceCallResponseHandler,
87
- successfulResponseHandler: createJsonResponseHandler(huggingFaceImageDescriptionResponseSchema),
88
- abortSignal,
89
- });
90
- }