modelfusion 0.76.0 → 0.77.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -124,7 +124,7 @@ import { generateSpeech, lmnt } from "modelfusion";
124
124
 
125
125
  // `speech` is a Buffer with MP3 audio data
126
126
  const speech = await generateSpeech(
127
- lmnt.Speech({
127
+ lmnt.SpeechGenerator({
128
128
  voice: "034b632b-df71-46c8-b440-86a42ffc3cf3", // Henry
129
129
  }),
130
130
  "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
@@ -146,7 +146,7 @@ import { streamSpeech, elevenlabs } from "modelfusion";
146
146
  const textStream: AsyncIterable<string>;
147
147
 
148
148
  const speechStream = await streamSpeech(
149
- elevenlabs.Speech({
149
+ elevenlabs.SpeechGenerator({
150
150
  model: "eleven_turbo_v2",
151
151
  voice: "pNInz6obpgDQGcFmaJgB", // Adam
152
152
  optimizeStreamingLatency: 1,
@@ -173,7 +173,7 @@ Transcribe speech (audio) data into text. Also called speech-to-text (STT).
173
173
  import { generateTranscription, openai } from "modelfusion";
174
174
 
175
175
  const transcription = await generateTranscription(
176
- openai.Transcription({ model: "whisper-1" }),
176
+ openai.Transcriber({ model: "whisper-1" }),
177
177
  {
178
178
  type: "mp3",
179
179
  data: await fs.promises.readFile("data/test.mp3"),
@@ -9,7 +9,7 @@ import { SpeechGenerationModel, SpeechGenerationModelSettings } from "./SpeechGe
9
9
  *
10
10
  * @example
11
11
  * const speech = await generateSpeech(
12
- * lmnt.Speech(...),
12
+ * lmnt.SpeechGenerator(...),
13
13
  * "Good evening, ladies and gentlemen! Exciting news on the airwaves tonight " +
14
14
  * "as The Rolling Stones unveil 'Hackney Diamonds.'
15
15
  * );
@@ -12,7 +12,7 @@ import { SpeechGenerationModelSettings, StreamingSpeechGenerationModel } from ".
12
12
  * const textStream = await streamText(...);
13
13
  *
14
14
  * const speechStream = await streamSpeech(
15
- * elevenlabs.Speech(...),
15
+ * elevenlabs.SpeechGenerator(...),
16
16
  * textStream
17
17
  * );
18
18
  *
@@ -10,7 +10,7 @@ import { TranscriptionModel, TranscriptionModelSettings } from "./TranscriptionM
10
10
  * const data = await fs.promises.readFile("data/test.mp3");
11
11
  *
12
12
  * const transcription = await generateTranscription(
13
- * openai.Transcription({ model: "whisper-1" }),
13
+ * openai.Transcriber({ model: "whisper-1" }),
14
14
  * { type: "mp3", data }
15
15
  * );
16
16
  *
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Speech = void 0;
3
+ exports.SpeechGenerator = void 0;
4
4
  const ElevenLabsSpeechModel_js_1 = require("./ElevenLabsSpeechModel.cjs");
5
5
  /**
6
6
  * Synthesize speech using the ElevenLabs Text to Speech API.
@@ -12,7 +12,7 @@ const ElevenLabsSpeechModel_js_1 = require("./ElevenLabsSpeechModel.cjs");
12
12
  *
13
13
  * @returns A new instance of {@link ElevenLabsSpeechModel}.
14
14
  */
15
- function Speech(settings) {
15
+ function SpeechGenerator(settings) {
16
16
  return new ElevenLabsSpeechModel_js_1.ElevenLabsSpeechModel(settings);
17
17
  }
18
- exports.Speech = Speech;
18
+ exports.SpeechGenerator = SpeechGenerator;
@@ -9,4 +9,4 @@ import { ElevenLabsSpeechModel, ElevenLabsSpeechModelSettings } from "./ElevenLa
9
9
  *
10
10
  * @returns A new instance of {@link ElevenLabsSpeechModel}.
11
11
  */
12
- export declare function Speech(settings: ElevenLabsSpeechModelSettings): ElevenLabsSpeechModel;
12
+ export declare function SpeechGenerator(settings: ElevenLabsSpeechModelSettings): ElevenLabsSpeechModel;
@@ -9,6 +9,6 @@ import { ElevenLabsSpeechModel, } from "./ElevenLabsSpeechModel.js";
9
9
  *
10
10
  * @returns A new instance of {@link ElevenLabsSpeechModel}.
11
11
  */
12
- export function Speech(settings) {
12
+ export function SpeechGenerator(settings) {
13
13
  return new ElevenLabsSpeechModel(settings);
14
14
  }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Speech = void 0;
3
+ exports.SpeechGenerator = void 0;
4
4
  const LmntSpeechModel_js_1 = require("./LmntSpeechModel.cjs");
5
5
  /**
6
6
  * Synthesize speech using the LMNT API.
@@ -9,7 +9,7 @@ const LmntSpeechModel_js_1 = require("./LmntSpeechModel.cjs");
9
9
  *
10
10
  * @returns A new instance of {@link LmntSpeechModel}.
11
11
  */
12
- function Speech(settings) {
12
+ function SpeechGenerator(settings) {
13
13
  return new LmntSpeechModel_js_1.LmntSpeechModel(settings);
14
14
  }
15
- exports.Speech = Speech;
15
+ exports.SpeechGenerator = SpeechGenerator;
@@ -6,4 +6,4 @@ import { LmntSpeechModel, LmntSpeechModelSettings } from "./LmntSpeechModel.js";
6
6
  *
7
7
  * @returns A new instance of {@link LmntSpeechModel}.
8
8
  */
9
- export declare function Speech(settings: LmntSpeechModelSettings): LmntSpeechModel;
9
+ export declare function SpeechGenerator(settings: LmntSpeechModelSettings): LmntSpeechModel;
@@ -6,6 +6,6 @@ import { LmntSpeechModel } from "./LmntSpeechModel.js";
6
6
  *
7
7
  * @returns A new instance of {@link LmntSpeechModel}.
8
8
  */
9
- export function Speech(settings) {
9
+ export function SpeechGenerator(settings) {
10
10
  return new LmntSpeechModel(settings);
11
11
  }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.Tokenizer = exports.ImageGenerator = exports.Transcription = exports.Speech = exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = void 0;
3
+ exports.Tokenizer = exports.ImageGenerator = exports.Transcriber = exports.SpeechGenerator = exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = void 0;
4
4
  const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
5
5
  const OpenAIImageGenerationModel_js_1 = require("./OpenAIImageGenerationModel.cjs");
6
6
  const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
@@ -82,10 +82,10 @@ exports.TextEmbedder = TextEmbedder;
82
82
  *
83
83
  * @returns A new instance of {@link OpenAISpeechModel}.
84
84
  */
85
- function Speech(settings) {
85
+ function SpeechGenerator(settings) {
86
86
  return new OpenAISpeechModel_js_1.OpenAISpeechModel(settings);
87
87
  }
88
- exports.Speech = Speech;
88
+ exports.SpeechGenerator = SpeechGenerator;
89
89
  /**
90
90
  * Create a transcription model that calls the OpenAI transcription API.
91
91
  *
@@ -95,7 +95,7 @@ exports.Speech = Speech;
95
95
  * const data = await fs.promises.readFile("data/test.mp3");
96
96
  *
97
97
  * const transcription = await transcribe(
98
- * openai.Transcription({ model: "whisper-1" }),
98
+ * openai.Transcriber({ model: "whisper-1" }),
99
99
  * {
100
100
  * type: "mp3",
101
101
  * data,
@@ -104,10 +104,10 @@ exports.Speech = Speech;
104
104
  *
105
105
  * @returns A new instance of {@link OpenAITranscriptionModel}.
106
106
  */
107
- function Transcription(settings) {
107
+ function Transcriber(settings) {
108
108
  return new OpenAITranscriptionModel_js_1.OpenAITranscriptionModel(settings);
109
109
  }
110
- exports.Transcription = Transcription;
110
+ exports.Transcriber = Transcriber;
111
111
  /**
112
112
  * Create an image generation model that calls the OpenAI AI image creation API.
113
113
  *
@@ -70,7 +70,7 @@ export declare function TextEmbedder(settings: OpenAITextEmbeddingModelSettings)
70
70
  *
71
71
  * @returns A new instance of {@link OpenAISpeechModel}.
72
72
  */
73
- export declare function Speech(settings: OpenAISpeechModelSettings): OpenAISpeechModel;
73
+ export declare function SpeechGenerator(settings: OpenAISpeechModelSettings): OpenAISpeechModel;
74
74
  /**
75
75
  * Create a transcription model that calls the OpenAI transcription API.
76
76
  *
@@ -80,7 +80,7 @@ export declare function Speech(settings: OpenAISpeechModelSettings): OpenAISpeec
80
80
  * const data = await fs.promises.readFile("data/test.mp3");
81
81
  *
82
82
  * const transcription = await transcribe(
83
- * openai.Transcription({ model: "whisper-1" }),
83
+ * openai.Transcriber({ model: "whisper-1" }),
84
84
  * {
85
85
  * type: "mp3",
86
86
  * data,
@@ -89,7 +89,7 @@ export declare function Speech(settings: OpenAISpeechModelSettings): OpenAISpeec
89
89
  *
90
90
  * @returns A new instance of {@link OpenAITranscriptionModel}.
91
91
  */
92
- export declare function Transcription(settings: OpenAITranscriptionModelSettings): OpenAITranscriptionModel;
92
+ export declare function Transcriber(settings: OpenAITranscriptionModelSettings): OpenAITranscriptionModel;
93
93
  /**
94
94
  * Create an image generation model that calls the OpenAI AI image creation API.
95
95
  *
@@ -76,7 +76,7 @@ export function TextEmbedder(settings) {
76
76
  *
77
77
  * @returns A new instance of {@link OpenAISpeechModel}.
78
78
  */
79
- export function Speech(settings) {
79
+ export function SpeechGenerator(settings) {
80
80
  return new OpenAISpeechModel(settings);
81
81
  }
82
82
  /**
@@ -88,7 +88,7 @@ export function Speech(settings) {
88
88
  * const data = await fs.promises.readFile("data/test.mp3");
89
89
  *
90
90
  * const transcription = await transcribe(
91
- * openai.Transcription({ model: "whisper-1" }),
91
+ * openai.Transcriber({ model: "whisper-1" }),
92
92
  * {
93
93
  * type: "mp3",
94
94
  * data,
@@ -97,7 +97,7 @@ export function Speech(settings) {
97
97
  *
98
98
  * @returns A new instance of {@link OpenAITranscriptionModel}.
99
99
  */
100
- export function Transcription(settings) {
100
+ export function Transcriber(settings) {
101
101
  return new OpenAITranscriptionModel(settings);
102
102
  }
103
103
  /**
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building multi-modal AI applications.",
4
- "version": "0.76.0",
4
+ "version": "0.77.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [