modelfusion 0.76.0 → 0.78.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -124,7 +124,7 @@ import { generateSpeech, lmnt } from "modelfusion";
124
124
 
125
125
  // `speech` is a Buffer with MP3 audio data
126
126
  const speech = await generateSpeech(
127
- lmnt.Speech({
127
+ lmnt.SpeechGenerator({
128
128
  voice: "034b632b-df71-46c8-b440-86a42ffc3cf3", // Henry
129
129
  }),
130
130
  "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
@@ -146,7 +146,7 @@ import { streamSpeech, elevenlabs } from "modelfusion";
146
146
  const textStream: AsyncIterable<string>;
147
147
 
148
148
  const speechStream = await streamSpeech(
149
- elevenlabs.Speech({
149
+ elevenlabs.SpeechGenerator({
150
150
  model: "eleven_turbo_v2",
151
151
  voice: "pNInz6obpgDQGcFmaJgB", // Adam
152
152
  optimizeStreamingLatency: 1,
@@ -173,7 +173,7 @@ Transcribe speech (audio) data into text. Also called speech-to-text (STT).
173
173
  import { generateTranscription, openai } from "modelfusion";
174
174
 
175
175
  const transcription = await generateTranscription(
176
- openai.Transcription({ model: "whisper-1" }),
176
+ openai.Transcriber({ model: "whisper-1" }),
177
177
  {
178
178
  type: "mp3",
179
179
  data: await fs.promises.readFile("data/test.mp3"),
@@ -9,7 +9,7 @@ import { SpeechGenerationModel, SpeechGenerationModelSettings } from "./SpeechGe
9
9
  *
10
10
  * @example
11
11
  * const speech = await generateSpeech(
12
- * lmnt.Speech(...),
12
+ * lmnt.SpeechGenerator(...),
13
13
  * "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
14
14
  * "as The Rolling Stones unveil 'Hackney Diamonds.'
15
15
  * );
@@ -12,7 +12,7 @@ import { SpeechGenerationModelSettings, StreamingSpeechGenerationModel } from ".
12
12
  * const textStream = await streamText(...);
13
13
  *
14
14
  * const speechStream = await streamSpeech(
15
- * elevenlabs.Speech(...),
15
+ * elevenlabs.SpeechGenerator(...),
16
16
  * textStream
17
17
  * );
18
18
  *
@@ -10,7 +10,7 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
10
10
  * const data = await fs.promises.readFile("data/test.mp3");
11
11
  *
12
12
  * const transcription = await generateTranscription(
13
- * openai.Transcription({ model: "whisper-1" }),
13
+ * openai.Transcriber({ model: "whisper-1" }),
14
14
  * { type: "mp3", data }
15
15
  * );
16
16
  *
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Speech = void 0;
3
+ exports.SpeechGenerator = void 0;
4
4
  const ElevenLabsSpeechModel_js_1 = require("./ElevenLabsSpeechModel.cjs");
5
5
  /**
6
6
  * Synthesize speech using the ElevenLabs Text to Speech API.
@@ -12,7 +12,7 @@ const ElevenLabsSpeechModel_js_1 = require("./ElevenLabsSpeechModel.cjs");
12
12
  *
13
13
  * @returns A new instance of {@link ElevenLabsSpeechModel}.
14
14
  */
15
- function Speech(settings) {
15
+ function SpeechGenerator(settings) {
16
16
  return new ElevenLabsSpeechModel_js_1.ElevenLabsSpeechModel(settings);
17
17
  }
18
- exports.Speech = Speech;
18
+ exports.SpeechGenerator = SpeechGenerator;
@@ -9,4 +9,4 @@ import { ElevenLabsSpeechModel, ElevenLabsSpeechModelSettings } from "./ElevenLa
9
9
  *
10
10
  * @returns A new instance of {@link ElevenLabsSpeechModel}.
11
11
  */
12
- export declare function Speech(settings: ElevenLabsSpeechModelSettings): ElevenLabsSpeechModel;
12
+ export declare function SpeechGenerator(settings: ElevenLabsSpeechModelSettings): ElevenLabsSpeechModel;
@@ -9,6 +9,6 @@ import { ElevenLabsSpeechModel, } from "./ElevenLabsSpeechModel.js";
9
9
  *
10
10
  * @returns A new instance of {@link ElevenLabsSpeechModel}.
11
11
  */
12
- export function Speech(settings) {
12
+ export function SpeechGenerator(settings) {
13
13
  return new ElevenLabsSpeechModel(settings);
14
14
  }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Speech = void 0;
3
+ exports.SpeechGenerator = void 0;
4
4
  const LmntSpeechModel_js_1 = require("./LmntSpeechModel.cjs");
5
5
  /**
6
6
  * Synthesize speech using the LMNT API.
@@ -9,7 +9,7 @@ const LmntSpeechModel_js_1 = require("./LmntSpeechModel.cjs");
9
9
  *
10
10
  * @returns A new instance of {@link LmntSpeechModel}.
11
11
  */
12
- function Speech(settings) {
12
+ function SpeechGenerator(settings) {
13
13
  return new LmntSpeechModel_js_1.LmntSpeechModel(settings);
14
14
  }
15
- exports.Speech = Speech;
15
+ exports.SpeechGenerator = SpeechGenerator;
@@ -6,4 +6,4 @@ import { LmntSpeechModel, LmntSpeechModelSettings } from "./LmntSpeechModel.js";
6
6
  *
7
7
  * @returns A new instance of {@link LmntSpeechModel}.
8
8
  */
9
- export declare function Speech(settings: LmntSpeechModelSettings): LmntSpeechModel;
9
+ export declare function SpeechGenerator(settings: LmntSpeechModelSettings): LmntSpeechModel;
@@ -6,6 +6,6 @@ import { LmntSpeechModel } from "./LmntSpeechModel.js";
6
6
  *
7
7
  * @returns A new instance of {@link LmntSpeechModel}.
8
8
  */
9
- export function Speech(settings) {
9
+ export function SpeechGenerator(settings) {
10
10
  return new LmntSpeechModel(settings);
11
11
  }
@@ -97,7 +97,7 @@ class OllamaTextGenerationModel extends AbstractModel_js_1.AbstractModel {
97
97
  }
98
98
  asToolCallGenerationModel(promptFormat) {
99
99
  return new TextGenerationToolCallModel_js_1.TextGenerationToolCallModel({
100
- model: this.withSettings({ format: "json" }),
100
+ model: this,
101
101
  format: promptFormat,
102
102
  });
103
103
  }
@@ -94,7 +94,7 @@ export class OllamaTextGenerationModel extends AbstractModel {
94
94
  }
95
95
  asToolCallGenerationModel(promptFormat) {
96
96
  return new TextGenerationToolCallModel({
97
- model: this.withSettings({ format: "json" }),
97
+ model: this,
98
98
  format: promptFormat,
99
99
  });
100
100
  }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Tokenizer = exports.ImageGenerator = exports.Transcription = exports.Speech = exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = void 0;
3
+ exports.Tokenizer = exports.ImageGenerator = exports.Transcriber = exports.SpeechGenerator = exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = void 0;
4
4
  const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
5
5
  const OpenAIImageGenerationModel_js_1 = require("./OpenAIImageGenerationModel.cjs");
6
6
  const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
@@ -82,10 +82,10 @@ exports.TextEmbedder = TextEmbedder;
82
82
  *
83
83
  * @returns A new instance of {@link OpenAISpeechModel}.
84
84
  */
85
- function Speech(settings) {
85
+ function SpeechGenerator(settings) {
86
86
  return new OpenAISpeechModel_js_1.OpenAISpeechModel(settings);
87
87
  }
88
- exports.Speech = Speech;
88
+ exports.SpeechGenerator = SpeechGenerator;
89
89
  /**
90
90
  * Create a transcription model that calls the OpenAI transcription API.
91
91
  *
@@ -95,7 +95,7 @@ exports.Speech = Speech;
95
95
  * const data = await fs.promises.readFile("data/test.mp3");
96
96
  *
97
97
  * const transcription = await transcribe(
98
- * openai.Transcription({ model: "whisper-1" }),
98
+ * openai.Transcriber({ model: "whisper-1" }),
99
99
  * {
100
100
  * type: "mp3",
101
101
  * data,
@@ -104,10 +104,10 @@ exports.Speech = Speech;
104
104
  *
105
105
  * @returns A new instance of {@link OpenAITranscriptionModel}.
106
106
  */
107
- function Transcription(settings) {
107
+ function Transcriber(settings) {
108
108
  return new OpenAITranscriptionModel_js_1.OpenAITranscriptionModel(settings);
109
109
  }
110
- exports.Transcription = Transcription;
110
+ exports.Transcriber = Transcriber;
111
111
  /**
112
112
  * Create an image generation model that calls the OpenAI AI image creation API.
113
113
  *
@@ -70,7 +70,7 @@ export declare function TextEmbedder(settings: OpenAITextEmbeddingModelSettings)
70
70
  *
71
71
  * @returns A new instance of {@link OpenAISpeechModel}.
72
72
  */
73
- export declare function Speech(settings: OpenAISpeechModelSettings): OpenAISpeechModel;
73
+ export declare function SpeechGenerator(settings: OpenAISpeechModelSettings): OpenAISpeechModel;
74
74
  /**
75
75
  * Create a transcription model that calls the OpenAI transcription API.
76
76
  *
@@ -80,7 +80,7 @@ export declare function Speech(settings: OpenAISpeechModelSettings): OpenAISpeec
80
80
  * const data = await fs.promises.readFile("data/test.mp3");
81
81
  *
82
82
  * const transcription = await transcribe(
83
- * openai.Transcription({ model: "whisper-1" }),
83
+ * openai.Transcriber({ model: "whisper-1" }),
84
84
  * {
85
85
  * type: "mp3",
86
86
  * data,
@@ -89,7 +89,7 @@ export declare function Speech(settings: OpenAISpeechModelSettings): OpenAISpeec
89
89
  *
90
90
  * @returns A new instance of {@link OpenAITranscriptionModel}.
91
91
  */
92
- export declare function Transcription(settings: OpenAITranscriptionModelSettings): OpenAITranscriptionModel;
92
+ export declare function Transcriber(settings: OpenAITranscriptionModelSettings): OpenAITranscriptionModel;
93
93
  /**
94
94
  * Create an image generation model that calls the OpenAI AI image creation API.
95
95
  *
@@ -76,7 +76,7 @@ export function TextEmbedder(settings) {
76
76
  *
77
77
  * @returns A new instance of {@link OpenAISpeechModel}.
78
78
  */
79
- export function Speech(settings) {
79
+ export function SpeechGenerator(settings) {
80
80
  return new OpenAISpeechModel(settings);
81
81
  }
82
82
  /**
@@ -88,7 +88,7 @@ export function Speech(settings) {
88
88
  * const data = await fs.promises.readFile("data/test.mp3");
89
89
  *
90
90
  * const transcription = await transcribe(
91
- * openai.Transcription({ model: "whisper-1" }),
91
+ * openai.Transcriber({ model: "whisper-1" }),
92
92
  * {
93
93
  * type: "mp3",
94
94
  * data,
@@ -97,7 +97,7 @@ export function Speech(settings) {
97
97
  *
98
98
  * @returns A new instance of {@link OpenAITranscriptionModel}.
99
99
  */
100
- export function Transcription(settings) {
100
+ export function Transcriber(settings) {
101
101
  return new OpenAITranscriptionModel(settings);
102
102
  }
103
103
  /**
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building multi-modal AI applications.",
4
- "version": "0.76.0",
4
+ "version": "0.78.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -10,24 +10,10 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
10
10
  if (k2 === undefined) k2 = k;
11
11
  o[k2] = m[k];
12
12
  }));
13
- var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
- Object.defineProperty(o, "default", { enumerable: true, value: v });
15
- }) : function(o, v) {
16
- o["default"] = v;
17
- });
18
- var __importStar = (this && this.__importStar) || function (mod) {
19
- if (mod && mod.__esModule) return mod;
20
- var result = {};
21
- if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
22
- __setModuleDefault(result, mod);
23
- return result;
24
- };
25
13
  var __exportStar = (this && this.__exportStar) || function(m, exports) {
26
14
  for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
27
15
  };
28
16
  Object.defineProperty(exports, "__esModule", { value: true });
29
- exports.FunctionListToolCallPromptFormat = void 0;
30
- exports.FunctionListToolCallPromptFormat = __importStar(require("./FunctionListToolCallPromptFormat.cjs"));
31
17
  __exportStar(require("./TextGenerationToolCallModel.cjs"), exports);
32
18
  __exportStar(require("./ToolCallGenerationEvent.cjs"), exports);
33
19
  __exportStar(require("./ToolCallGenerationModel.cjs"), exports);
@@ -1,4 +1,3 @@
1
- export * as FunctionListToolCallPromptFormat from "./FunctionListToolCallPromptFormat.js";
2
1
  export * from "./TextGenerationToolCallModel.js";
3
2
  export * from "./ToolCallGenerationEvent.js";
4
3
  export * from "./ToolCallGenerationModel.js";
@@ -1,4 +1,3 @@
1
- export * as FunctionListToolCallPromptFormat from "./FunctionListToolCallPromptFormat.js";
2
1
  export * from "./TextGenerationToolCallModel.js";
3
2
  export * from "./ToolCallGenerationEvent.js";
4
3
  export * from "./ToolCallGenerationModel.js";
@@ -1,70 +0,0 @@
1
- "use strict";
2
- Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.instruction = exports.text = void 0;
4
- const nanoid_1 = require("nanoid");
5
- const zod_1 = require("zod");
6
- const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
7
- const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
8
- const functionSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.object({
9
- function: zod_1.z.string(),
10
- parameters: zod_1.z.any(),
11
- }));
12
- const DEFAULT_FUNCTION_CALL_PROMPT = [
13
- ``,
14
- `Select the most suitable function and parameters ` +
15
- `from the list of available functions below, based on the user's input. ` +
16
- `Provide your response in JSON format.`,
17
- ``,
18
- `Available functions:`,
19
- ].join("\n");
20
- function text({ functionCallPrompt = DEFAULT_FUNCTION_CALL_PROMPT, baseFormat, } = {}) {
21
- return {
22
- createPrompt(instruction, tool) {
23
- const instructionWithFunctionCall = [
24
- instruction,
25
- functionCallPrompt,
26
- `${tool.name}:`,
27
- ` description: ${tool.description ?? ""}`,
28
- ` parameters: ${JSON.stringify(tool.parameters.getJsonSchema())}`,
29
- ``,
30
- ].join("\n");
31
- return (baseFormat?.format(instructionWithFunctionCall) ??
32
- // handled by signature overloading:
33
- instructionWithFunctionCall); // eslint-disable-line @typescript-eslint/no-explicit-any
34
- },
35
- extractToolCall(response) {
36
- const json = (0, parseJSON_js_1.parseJSON)({ text: response, schema: functionSchema });
37
- return {
38
- id: (0, nanoid_1.nanoid)(),
39
- args: json.parameters,
40
- };
41
- },
42
- };
43
- }
44
- exports.text = text;
45
- function instruction({ functionCallPrompt = DEFAULT_FUNCTION_CALL_PROMPT, baseFormat, }) {
46
- return {
47
- createPrompt(instruction, tool) {
48
- const instructionWithFunctionCall = [
49
- instruction.instruction,
50
- functionCallPrompt,
51
- `${tool.name}:`,
52
- ` description: ${tool.description ?? ""}`,
53
- ` parameters: ${JSON.stringify(tool.parameters.getJsonSchema())}`,
54
- ``,
55
- ].join("\n");
56
- return baseFormat.format({
57
- ...instruction,
58
- instruction: instructionWithFunctionCall,
59
- });
60
- },
61
- extractToolCall(response) {
62
- const json = (0, parseJSON_js_1.parseJSON)({ text: response, schema: functionSchema });
63
- return {
64
- id: (0, nanoid_1.nanoid)(),
65
- args: json.parameters,
66
- };
67
- },
68
- };
69
- }
70
- exports.instruction = instruction;
@@ -1,14 +0,0 @@
1
- import { PromptFormat } from "../../model-function/PromptFormat.js";
2
- import { InstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
3
- import { ToolCallPromptFormat } from "./TextGenerationToolCallModel.js";
4
- export declare function text(options?: {
5
- functionCallPrompt?: string;
6
- }): ToolCallPromptFormat<string, string>;
7
- export declare function text<TARGET_PROMPT>({ functionCallPrompt, baseFormat, }: {
8
- functionCallPrompt?: string;
9
- baseFormat: PromptFormat<string, TARGET_PROMPT>;
10
- }): ToolCallPromptFormat<string, TARGET_PROMPT>;
11
- export declare function instruction<TARGET_PROMPT>({ functionCallPrompt, baseFormat, }: {
12
- functionCallPrompt?: string;
13
- baseFormat: PromptFormat<InstructionPrompt, TARGET_PROMPT>;
14
- }): ToolCallPromptFormat<InstructionPrompt, TARGET_PROMPT>;
@@ -1,65 +0,0 @@
1
- import { nanoid } from "nanoid";
2
- import { z } from "zod";
3
- import { ZodSchema } from "../../core/schema/ZodSchema.js";
4
- import { parseJSON } from "../../core/schema/parseJSON.js";
5
- const functionSchema = new ZodSchema(z.object({
6
- function: z.string(),
7
- parameters: z.any(),
8
- }));
9
- const DEFAULT_FUNCTION_CALL_PROMPT = [
10
- ``,
11
- `Select the most suitable function and parameters ` +
12
- `from the list of available functions below, based on the user's input. ` +
13
- `Provide your response in JSON format.`,
14
- ``,
15
- `Available functions:`,
16
- ].join("\n");
17
- export function text({ functionCallPrompt = DEFAULT_FUNCTION_CALL_PROMPT, baseFormat, } = {}) {
18
- return {
19
- createPrompt(instruction, tool) {
20
- const instructionWithFunctionCall = [
21
- instruction,
22
- functionCallPrompt,
23
- `${tool.name}:`,
24
- ` description: ${tool.description ?? ""}`,
25
- ` parameters: ${JSON.stringify(tool.parameters.getJsonSchema())}`,
26
- ``,
27
- ].join("\n");
28
- return (baseFormat?.format(instructionWithFunctionCall) ??
29
- // handled by signature overloading:
30
- instructionWithFunctionCall); // eslint-disable-line @typescript-eslint/no-explicit-any
31
- },
32
- extractToolCall(response) {
33
- const json = parseJSON({ text: response, schema: functionSchema });
34
- return {
35
- id: nanoid(),
36
- args: json.parameters,
37
- };
38
- },
39
- };
40
- }
41
- export function instruction({ functionCallPrompt = DEFAULT_FUNCTION_CALL_PROMPT, baseFormat, }) {
42
- return {
43
- createPrompt(instruction, tool) {
44
- const instructionWithFunctionCall = [
45
- instruction.instruction,
46
- functionCallPrompt,
47
- `${tool.name}:`,
48
- ` description: ${tool.description ?? ""}`,
49
- ` parameters: ${JSON.stringify(tool.parameters.getJsonSchema())}`,
50
- ``,
51
- ].join("\n");
52
- return baseFormat.format({
53
- ...instruction,
54
- instruction: instructionWithFunctionCall,
55
- });
56
- },
57
- extractToolCall(response) {
58
- const json = parseJSON({ text: response, schema: functionSchema });
59
- return {
60
- id: nanoid(),
61
- args: json.parameters,
62
- };
63
- },
64
- };
65
- }