modelfusion 0.48.0 → 0.50.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. package/README.md +186 -183
  2. package/core/FunctionEvent.d.ts +9 -1
  3. package/core/FunctionOptions.d.ts +4 -0
  4. package/core/executeFunctionCall.cjs +85 -0
  5. package/core/executeFunctionCall.d.ts +10 -0
  6. package/core/executeFunctionCall.js +81 -0
  7. package/guard/GuardEvent.d.ts +7 -0
  8. package/guard/guard.cjs +60 -54
  9. package/guard/guard.d.ts +2 -1
  10. package/guard/guard.js +60 -54
  11. package/guard/index.cjs +1 -0
  12. package/guard/index.d.ts +1 -0
  13. package/guard/index.js +1 -0
  14. package/model-function/AsyncIterableResultPromise.cjs +5 -5
  15. package/model-function/AsyncIterableResultPromise.d.ts +3 -3
  16. package/model-function/AsyncIterableResultPromise.js +5 -5
  17. package/model-function/Model.d.ts +1 -1
  18. package/model-function/ModelCallEvent.d.ts +5 -7
  19. package/model-function/embed/EmbeddingEvent.d.ts +2 -2
  20. package/model-function/embed/embed.cjs +5 -5
  21. package/model-function/embed/embed.js +5 -5
  22. package/model-function/{executeCall.cjs → executeStandardCall.cjs} +5 -3
  23. package/model-function/{executeCall.d.ts → executeStandardCall.d.ts} +1 -1
  24. package/model-function/{executeCall.js → executeStandardCall.js} +3 -1
  25. package/model-function/executeStreamCall.cjs +134 -0
  26. package/model-function/executeStreamCall.d.ts +20 -0
  27. package/model-function/executeStreamCall.js +130 -0
  28. package/model-function/generate-image/ImageGenerationEvent.d.ts +2 -2
  29. package/model-function/generate-image/generateImage.cjs +3 -3
  30. package/model-function/generate-image/generateImage.js +3 -3
  31. package/model-function/generate-speech/SpeechGenerationEvent.d.ts +27 -0
  32. package/model-function/generate-speech/SpeechGenerationModel.d.ts +15 -0
  33. package/model-function/generate-speech/generateSpeech.cjs +24 -0
  34. package/model-function/generate-speech/generateSpeech.d.ts +8 -0
  35. package/model-function/generate-speech/generateSpeech.js +20 -0
  36. package/model-function/generate-speech/index.cjs +20 -0
  37. package/model-function/generate-speech/index.d.ts +4 -0
  38. package/model-function/generate-speech/index.js +4 -0
  39. package/model-function/generate-speech/streamSpeech.cjs +34 -0
  40. package/model-function/generate-speech/streamSpeech.d.ts +8 -0
  41. package/model-function/generate-speech/streamSpeech.js +30 -0
  42. package/model-function/generate-structure/StructureGenerationEvent.d.ts +2 -2
  43. package/model-function/generate-structure/StructureStreamingEvent.d.ts +2 -2
  44. package/model-function/generate-structure/generateStructure.cjs +3 -3
  45. package/model-function/generate-structure/generateStructure.js +3 -3
  46. package/model-function/generate-structure/generateStructureOrText.cjs +3 -3
  47. package/model-function/generate-structure/generateStructureOrText.js +3 -3
  48. package/model-function/generate-structure/index.cjs +27 -0
  49. package/model-function/generate-structure/index.d.ts +11 -0
  50. package/model-function/generate-structure/index.js +11 -0
  51. package/model-function/generate-structure/streamStructure.cjs +29 -137
  52. package/model-function/generate-structure/streamStructure.js +28 -136
  53. package/model-function/generate-text/TextGenerationEvent.d.ts +8 -2
  54. package/model-function/generate-text/generateText.cjs +4 -4
  55. package/model-function/generate-text/generateText.d.ts +1 -1
  56. package/model-function/generate-text/generateText.js +4 -4
  57. package/model-function/generate-text/index.cjs +0 -1
  58. package/model-function/generate-text/index.d.ts +0 -1
  59. package/model-function/generate-text/index.js +0 -1
  60. package/model-function/generate-text/streamText.cjs +22 -129
  61. package/model-function/generate-text/streamText.js +21 -128
  62. package/model-function/generate-text/trimChatPrompt.cjs +1 -1
  63. package/model-function/generate-text/trimChatPrompt.d.ts +1 -1
  64. package/model-function/generate-text/trimChatPrompt.js +1 -1
  65. package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.d.ts +2 -2
  66. package/model-function/{transcribe-speech/transcribe.cjs → generate-transcription/generateTranscription.cjs} +7 -7
  67. package/model-function/{transcribe-speech/transcribe.d.ts → generate-transcription/generateTranscription.d.ts} +2 -2
  68. package/model-function/{transcribe-speech/transcribe.js → generate-transcription/generateTranscription.js} +5 -5
  69. package/model-function/index.cjs +5 -20
  70. package/model-function/index.d.ts +5 -20
  71. package/model-function/index.js +5 -20
  72. package/model-provider/elevenlabs/{ElevenLabsSpeechSynthesisModel.cjs → ElevenLabsSpeechModel.cjs} +6 -6
  73. package/model-provider/elevenlabs/{ElevenLabsSpeechSynthesisModel.d.ts → ElevenLabsSpeechModel.d.ts} +8 -8
  74. package/model-provider/elevenlabs/{ElevenLabsSpeechSynthesisModel.js → ElevenLabsSpeechModel.js} +4 -4
  75. package/model-provider/elevenlabs/index.cjs +1 -1
  76. package/model-provider/elevenlabs/index.d.ts +1 -1
  77. package/model-provider/elevenlabs/index.js +1 -1
  78. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +21 -2
  79. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +11 -6
  80. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +21 -2
  81. package/model-provider/lmnt/{LmntSpeechSynthesisModel.cjs → LmntSpeechModel.cjs} +5 -5
  82. package/model-provider/lmnt/LmntSpeechModel.d.ts +26 -0
  83. package/model-provider/lmnt/{LmntSpeechSynthesisModel.js → LmntSpeechModel.js} +3 -3
  84. package/model-provider/lmnt/index.cjs +1 -1
  85. package/model-provider/lmnt/index.d.ts +1 -1
  86. package/model-provider/lmnt/index.js +1 -1
  87. package/model-provider/openai/{OpenAITextGenerationModel.cjs → OpenAICompletionModel.cjs} +17 -17
  88. package/model-provider/openai/{OpenAITextGenerationModel.d.ts → OpenAICompletionModel.d.ts} +29 -29
  89. package/model-provider/openai/{OpenAITextGenerationModel.js → OpenAICompletionModel.js} +12 -12
  90. package/model-provider/openai/OpenAICostCalculator.cjs +8 -8
  91. package/model-provider/openai/OpenAICostCalculator.js +8 -8
  92. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +3 -3
  93. package/model-provider/openai/OpenAITranscriptionModel.d.ts +1 -1
  94. package/model-provider/openai/TikTokenTokenizer.d.ts +2 -2
  95. package/model-provider/openai/chat/OpenAIChatModel.d.ts +7 -7
  96. package/model-provider/openai/index.cjs +1 -1
  97. package/model-provider/openai/index.d.ts +1 -1
  98. package/model-provider/openai/index.js +1 -1
  99. package/package.json +1 -1
  100. package/retriever/retrieve.cjs +7 -75
  101. package/retriever/retrieve.js +7 -75
  102. package/tool/UseToolEvent.d.ts +7 -0
  103. package/tool/UseToolOrGenerateTextEvent.d.ts +7 -0
  104. package/tool/executeTool.cjs +2 -0
  105. package/tool/executeTool.js +2 -0
  106. package/tool/index.cjs +2 -0
  107. package/tool/index.d.ts +2 -0
  108. package/tool/index.js +2 -0
  109. package/tool/useTool.cjs +18 -10
  110. package/tool/useTool.js +18 -10
  111. package/tool/useToolOrGenerateText.cjs +34 -26
  112. package/tool/useToolOrGenerateText.js +34 -26
  113. package/vector-index/UpsertIntoVectorIndexEvent.cjs +2 -0
  114. package/vector-index/UpsertIntoVectorIndexEvent.d.ts +9 -0
  115. package/vector-index/UpsertIntoVectorIndexEvent.js +1 -0
  116. package/vector-index/VectorIndexRetriever.cjs +1 -4
  117. package/vector-index/VectorIndexRetriever.js +1 -4
  118. package/vector-index/index.cjs +1 -0
  119. package/vector-index/index.d.ts +1 -0
  120. package/vector-index/index.js +1 -0
  121. package/vector-index/upsertIntoVectorIndex.cjs +16 -7
  122. package/vector-index/upsertIntoVectorIndex.js +16 -7
  123. package/model-function/describe-image/ImageDescriptionEvent.d.ts +0 -18
  124. package/model-function/describe-image/ImageDescriptionModel.d.ts +0 -10
  125. package/model-function/describe-image/describeImage.cjs +0 -26
  126. package/model-function/describe-image/describeImage.d.ts +0 -9
  127. package/model-function/describe-image/describeImage.js +0 -22
  128. package/model-function/generate-text/TextStreamingEvent.d.ts +0 -7
  129. package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +0 -21
  130. package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +0 -15
  131. package/model-function/synthesize-speech/synthesizeSpeech.cjs +0 -67
  132. package/model-function/synthesize-speech/synthesizeSpeech.d.ts +0 -14
  133. package/model-function/synthesize-speech/synthesizeSpeech.js +0 -63
  134. package/model-provider/lmnt/LmntSpeechSynthesisModel.d.ts +0 -26
  135. /package/{model-function/describe-image/ImageDescriptionEvent.cjs → guard/GuardEvent.cjs} +0 -0
  136. /package/{model-function/describe-image/ImageDescriptionEvent.js → guard/GuardEvent.js} +0 -0
  137. /package/model-function/{describe-image/ImageDescriptionModel.cjs → generate-speech/SpeechGenerationEvent.cjs} +0 -0
  138. /package/model-function/{describe-image/ImageDescriptionModel.js → generate-speech/SpeechGenerationEvent.js} +0 -0
  139. /package/model-function/{generate-text/TextStreamingEvent.cjs → generate-speech/SpeechGenerationModel.cjs} +0 -0
  140. /package/model-function/{generate-text/TextStreamingEvent.js → generate-speech/SpeechGenerationModel.js} +0 -0
  141. /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.cjs +0 -0
  142. /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionEvent.js +0 -0
  143. /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.cjs +0 -0
  144. /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.d.ts +0 -0
  145. /package/model-function/{transcribe-speech → generate-transcription}/TranscriptionModel.js +0 -0
  146. /package/{model-function/synthesize-speech/SpeechSynthesisEvent.cjs → tool/UseToolEvent.cjs} +0 -0
  147. /package/{model-function/synthesize-speech/SpeechSynthesisEvent.js → tool/UseToolEvent.js} +0 -0
  148. /package/{model-function/synthesize-speech/SpeechSynthesisModel.cjs → tool/UseToolOrGenerateTextEvent.cjs} +0 -0
  149. /package/{model-function/synthesize-speech/SpeechSynthesisModel.js → tool/UseToolOrGenerateTextEvent.js} +0 -0
@@ -18,7 +18,7 @@ const defaultModel = "eleven_multilingual_v2";
18
18
  *
19
19
  * @see https://api.elevenlabs.io/docs#/text-to-speech/Text_to_speech_v1_text_to_speech__voice_id__post
20
20
  */
21
- export class ElevenLabsSpeechSynthesisModel extends AbstractModel {
21
+ export class ElevenLabsSpeechModel extends AbstractModel {
22
22
  constructor(settings) {
23
23
  super({ settings });
24
24
  Object.defineProperty(this, "provider", {
@@ -52,10 +52,10 @@ export class ElevenLabsSpeechSynthesisModel extends AbstractModel {
52
52
  voiceSettings: this.settings.voiceSettings,
53
53
  };
54
54
  }
55
- doSynthesizeSpeechStandard(text, options) {
55
+ doGenerateSpeechStandard(text, options) {
56
56
  return this.callAPI(text, options);
57
57
  }
58
- async doSynthesizeSpeechStreamDuplex(textStream
58
+ async doGenerateSpeechStreamDuplex(textStream
59
59
  // options?: FunctionOptions | undefined
60
60
  ) {
61
61
  const responseSchema = z.union([
@@ -148,7 +148,7 @@ export class ElevenLabsSpeechSynthesisModel extends AbstractModel {
148
148
  return queue;
149
149
  }
150
150
  withSettings(additionalSettings) {
151
- return new ElevenLabsSpeechSynthesisModel({
151
+ return new ElevenLabsSpeechModel({
152
152
  ...this.settings,
153
153
  ...additionalSettings,
154
154
  });
@@ -15,4 +15,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
17
  __exportStar(require("./ElevenLabsApiConfiguration.cjs"), exports);
18
- __exportStar(require("./ElevenLabsSpeechSynthesisModel.cjs"), exports);
18
+ __exportStar(require("./ElevenLabsSpeechModel.cjs"), exports);
@@ -1,2 +1,2 @@
1
1
  export * from "./ElevenLabsApiConfiguration.js";
2
- export * from "./ElevenLabsSpeechSynthesisModel.js";
2
+ export * from "./ElevenLabsSpeechModel.js";
@@ -1,2 +1,2 @@
1
1
  export * from "./ElevenLabsApiConfiguration.js";
2
- export * from "./ElevenLabsSpeechSynthesisModel.js";
2
+ export * from "./ElevenLabsSpeechModel.js";
@@ -5,6 +5,7 @@ const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
7
7
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
8
+ const PromptFormatTextGenerationModel_js_1 = require("../../model-function/generate-text/PromptFormatTextGenerationModel.cjs");
8
9
  const HuggingFaceApiConfiguration_js_1 = require("./HuggingFaceApiConfiguration.cjs");
9
10
  const HuggingFaceError_js_1 = require("./HuggingFaceError.cjs");
10
11
  /**
@@ -21,6 +22,18 @@ class HuggingFaceImageDescriptionModel extends AbstractModel_js_1.AbstractModel
21
22
  writable: true,
22
23
  value: "huggingface"
23
24
  });
25
+ Object.defineProperty(this, "contextWindowSize", {
26
+ enumerable: true,
27
+ configurable: true,
28
+ writable: true,
29
+ value: undefined
30
+ });
31
+ Object.defineProperty(this, "tokenizer", {
32
+ enumerable: true,
33
+ configurable: true,
34
+ writable: true,
35
+ value: undefined
36
+ });
24
37
  Object.defineProperty(this, "countPromptTokens", {
25
38
  enumerable: true,
26
39
  configurable: true,
@@ -45,13 +58,19 @@ class HuggingFaceImageDescriptionModel extends AbstractModel_js_1.AbstractModel
45
58
  get settingsForEvent() {
46
59
  return {};
47
60
  }
48
- async doDescribeImage(data, options) {
61
+ async doGenerateText(data, options) {
49
62
  const response = await this.callAPI(data, options);
50
63
  return {
51
64
  response,
52
- description: response[0].generated_text,
65
+ text: response[0].generated_text,
53
66
  };
54
67
  }
68
+ withPromptFormat(promptFormat) {
69
+ return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
70
+ model: this,
71
+ promptFormat,
72
+ });
73
+ }
55
74
  withSettings(additionalSettings) {
56
75
  return new HuggingFaceImageDescriptionModel(Object.assign({}, this.settings, additionalSettings));
57
76
  }
@@ -1,10 +1,12 @@
1
1
  /// <reference types="node" />
2
2
  import { z } from "zod";
3
+ import { FunctionOptions } from "../../core/FunctionOptions.js";
3
4
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
- import { FunctionOptions } from "../../core/FunctionOptions.js";
6
- import { ImageDescriptionModel, ImageDescriptionModelSettings } from "../../model-function/describe-image/ImageDescriptionModel.js";
7
- export interface HuggingFaceImageDescriptionModelSettings extends ImageDescriptionModelSettings {
6
+ import { PromptFormatTextGenerationModel } from "../../model-function/generate-text/PromptFormatTextGenerationModel.js";
7
+ import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
8
+ import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
9
+ export interface HuggingFaceImageDescriptionModelSettings extends TextGenerationModelSettings {
8
10
  api?: ApiConfiguration;
9
11
  model: string;
10
12
  }
@@ -13,19 +15,22 @@ export interface HuggingFaceImageDescriptionModelSettings extends ImageDescripti
13
15
  *
14
16
  * @see https://huggingface.co/tasks/image-to-text
15
17
  */
16
- export declare class HuggingFaceImageDescriptionModel extends AbstractModel<HuggingFaceImageDescriptionModelSettings> implements ImageDescriptionModel<Buffer, HuggingFaceImageDescriptionModelSettings> {
18
+ export declare class HuggingFaceImageDescriptionModel extends AbstractModel<HuggingFaceImageDescriptionModelSettings> implements TextGenerationModel<Buffer, HuggingFaceImageDescriptionModelSettings> {
17
19
  constructor(settings: HuggingFaceImageDescriptionModelSettings);
18
20
  readonly provider = "huggingface";
19
21
  get modelName(): string;
20
22
  callAPI(data: Buffer, options?: FunctionOptions): Promise<HuggingFaceImageDescriptionResponse>;
21
23
  get settingsForEvent(): Partial<HuggingFaceImageDescriptionModelSettings>;
24
+ readonly contextWindowSize: undefined;
25
+ readonly tokenizer: undefined;
22
26
  readonly countPromptTokens: undefined;
23
- doDescribeImage(data: Buffer, options?: FunctionOptions): Promise<{
27
+ doGenerateText(data: Buffer, options?: FunctionOptions): Promise<{
24
28
  response: {
25
29
  generated_text: string;
26
30
  }[];
27
- description: string;
31
+ text: string;
28
32
  }>;
33
+ withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, Buffer>): PromptFormatTextGenerationModel<INPUT_PROMPT, Buffer, HuggingFaceImageDescriptionModelSettings, this>;
29
34
  withSettings(additionalSettings: Partial<HuggingFaceImageDescriptionModelSettings>): this;
30
35
  }
31
36
  declare const huggingFaceImageDescriptionResponseSchema: z.ZodArray<z.ZodObject<{
@@ -2,6 +2,7 @@ import { z } from "zod";
2
2
  import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
3
3
  import { createJsonResponseHandler, postToApi, } from "../../core/api/postToApi.js";
4
4
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
+ import { PromptFormatTextGenerationModel } from "../../model-function/generate-text/PromptFormatTextGenerationModel.js";
5
6
  import { HuggingFaceApiConfiguration } from "./HuggingFaceApiConfiguration.js";
6
7
  import { failedHuggingFaceCallResponseHandler } from "./HuggingFaceError.js";
7
8
  /**
@@ -18,6 +19,18 @@ export class HuggingFaceImageDescriptionModel extends AbstractModel {
18
19
  writable: true,
19
20
  value: "huggingface"
20
21
  });
22
+ Object.defineProperty(this, "contextWindowSize", {
23
+ enumerable: true,
24
+ configurable: true,
25
+ writable: true,
26
+ value: undefined
27
+ });
28
+ Object.defineProperty(this, "tokenizer", {
29
+ enumerable: true,
30
+ configurable: true,
31
+ writable: true,
32
+ value: undefined
33
+ });
21
34
  Object.defineProperty(this, "countPromptTokens", {
22
35
  enumerable: true,
23
36
  configurable: true,
@@ -42,13 +55,19 @@ export class HuggingFaceImageDescriptionModel extends AbstractModel {
42
55
  get settingsForEvent() {
43
56
  return {};
44
57
  }
45
- async doDescribeImage(data, options) {
58
+ async doGenerateText(data, options) {
46
59
  const response = await this.callAPI(data, options);
47
60
  return {
48
61
  response,
49
- description: response[0].generated_text,
62
+ text: response[0].generated_text,
50
63
  };
51
64
  }
65
+ withPromptFormat(promptFormat) {
66
+ return new PromptFormatTextGenerationModel({
67
+ model: this,
68
+ promptFormat,
69
+ });
70
+ }
52
71
  withSettings(additionalSettings) {
53
72
  return new HuggingFaceImageDescriptionModel(Object.assign({}, this.settings, additionalSettings));
54
73
  }
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.LmntSpeechSynthesisModel = void 0;
3
+ exports.LmntSpeechModel = void 0;
4
4
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
@@ -11,7 +11,7 @@ const LmntError_js_1 = require("./LmntError.cjs");
11
11
  *
12
12
  * @see https://www.lmnt.com/docs/rest/#synthesize-speech
13
13
  */
14
- class LmntSpeechSynthesisModel extends AbstractModel_js_1.AbstractModel {
14
+ class LmntSpeechModel extends AbstractModel_js_1.AbstractModel {
15
15
  constructor(settings) {
16
16
  super({ settings });
17
17
  Object.defineProperty(this, "provider", {
@@ -43,17 +43,17 @@ class LmntSpeechSynthesisModel extends AbstractModel_js_1.AbstractModel {
43
43
  length: this.settings.length,
44
44
  };
45
45
  }
46
- doSynthesizeSpeechStandard(text, options) {
46
+ doGenerateSpeechStandard(text, options) {
47
47
  return this.callAPI(text, options);
48
48
  }
49
49
  withSettings(additionalSettings) {
50
- return new LmntSpeechSynthesisModel({
50
+ return new LmntSpeechModel({
51
51
  ...this.settings,
52
52
  ...additionalSettings,
53
53
  });
54
54
  }
55
55
  }
56
- exports.LmntSpeechSynthesisModel = LmntSpeechSynthesisModel;
56
+ exports.LmntSpeechModel = LmntSpeechModel;
57
57
  async function callLmntTextToSpeechAPI({ api = new LmntApiConfiguration_js_1.LmntApiConfiguration(), abortSignal, text, voice, speed, seed, length, }) {
58
58
  const formData = new FormData();
59
59
  formData.append("text", text);
@@ -0,0 +1,26 @@
1
+ /// <reference types="node" />
2
+ import { AbstractModel } from "../../model-function/AbstractModel.js";
3
+ import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
+ import { FunctionOptions } from "../../core/FunctionOptions.js";
5
+ import { SpeechGenerationModel, SpeechGenerationModelSettings } from "../../model-function/generate-speech/SpeechGenerationModel.js";
6
+ export interface LmntSpeechModelSettings extends SpeechGenerationModelSettings {
7
+ api?: ApiConfiguration;
8
+ voice: string;
9
+ speed?: number;
10
+ seed?: number;
11
+ length?: number;
12
+ }
13
+ /**
14
+ * Synthesize speech using the LMNT API.
15
+ *
16
+ * @see https://www.lmnt.com/docs/rest/#synthesize-speech
17
+ */
18
+ export declare class LmntSpeechModel extends AbstractModel<LmntSpeechModelSettings> implements SpeechGenerationModel<LmntSpeechModelSettings> {
19
+ constructor(settings: LmntSpeechModelSettings);
20
+ readonly provider = "lmnt";
21
+ get modelName(): string;
22
+ private callAPI;
23
+ get settingsForEvent(): Partial<LmntSpeechModelSettings>;
24
+ doGenerateSpeechStandard(text: string, options?: FunctionOptions): Promise<Buffer>;
25
+ withSettings(additionalSettings: Partial<LmntSpeechModelSettings>): this;
26
+ }
@@ -8,7 +8,7 @@ import { failedLmntCallResponseHandler } from "./LmntError.js";
8
8
  *
9
9
  * @see https://www.lmnt.com/docs/rest/#synthesize-speech
10
10
  */
11
- export class LmntSpeechSynthesisModel extends AbstractModel {
11
+ export class LmntSpeechModel extends AbstractModel {
12
12
  constructor(settings) {
13
13
  super({ settings });
14
14
  Object.defineProperty(this, "provider", {
@@ -40,11 +40,11 @@ export class LmntSpeechSynthesisModel extends AbstractModel {
40
40
  length: this.settings.length,
41
41
  };
42
42
  }
43
- doSynthesizeSpeechStandard(text, options) {
43
+ doGenerateSpeechStandard(text, options) {
44
44
  return this.callAPI(text, options);
45
45
  }
46
46
  withSettings(additionalSettings) {
47
- return new LmntSpeechSynthesisModel({
47
+ return new LmntSpeechModel({
48
48
  ...this.settings,
49
49
  ...additionalSettings,
50
50
  });
@@ -15,4 +15,4 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
15
15
  };
16
16
  Object.defineProperty(exports, "__esModule", { value: true });
17
17
  __exportStar(require("./LmntApiConfiguration.cjs"), exports);
18
- __exportStar(require("./LmntSpeechSynthesisModel.cjs"), exports);
18
+ __exportStar(require("./LmntSpeechModel.cjs"), exports);
@@ -1,2 +1,2 @@
1
1
  export * from "./LmntApiConfiguration.js";
2
- export * from "./LmntSpeechSynthesisModel.js";
2
+ export * from "./LmntSpeechModel.js";
@@ -1,2 +1,2 @@
1
1
  export * from "./LmntApiConfiguration.js";
2
- export * from "./LmntSpeechSynthesisModel.js";
2
+ export * from "./LmntSpeechModel.js";
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.OpenAITextResponseFormat = exports.OpenAITextGenerationModel = exports.calculateOpenAITextGenerationCostInMillicents = exports.isOpenAITextGenerationModel = exports.getOpenAITextGenerationModelInformation = exports.OPENAI_TEXT_GENERATION_MODELS = void 0;
3
+ exports.OpenAITextResponseFormat = exports.OpenAICompletionModel = exports.calculateOpenAICompletionCostInMillicents = exports.isOpenAICompletionModel = exports.getOpenAICompletionModelInformation = exports.OPENAI_TEXT_GENERATION_MODELS = void 0;
4
4
  const zod_1 = require("zod");
5
5
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
6
6
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
@@ -87,7 +87,7 @@ exports.OPENAI_TEXT_GENERATION_MODELS = {
87
87
  completionTokenCostInMillicents: 0.04,
88
88
  },
89
89
  };
90
- function getOpenAITextGenerationModelInformation(model) {
90
+ function getOpenAICompletionModelInformation(model) {
91
91
  // Model is already a base model:
92
92
  if (model in exports.OPENAI_TEXT_GENERATION_MODELS) {
93
93
  const baseModelInformation = exports.OPENAI_TEXT_GENERATION_MODELS[model];
@@ -114,26 +114,26 @@ function getOpenAITextGenerationModelInformation(model) {
114
114
  }
115
115
  throw new Error(`Unknown OpenAI chat base model ${baseModel}.`);
116
116
  }
117
- exports.getOpenAITextGenerationModelInformation = getOpenAITextGenerationModelInformation;
118
- const isOpenAITextGenerationModel = (model) => model in exports.OPENAI_TEXT_GENERATION_MODELS ||
117
+ exports.getOpenAICompletionModelInformation = getOpenAICompletionModelInformation;
118
+ const isOpenAICompletionModel = (model) => model in exports.OPENAI_TEXT_GENERATION_MODELS ||
119
119
  model.startsWith("ft:davinci-002:") ||
120
120
  model.startsWith("ft:babbage-002:");
121
- exports.isOpenAITextGenerationModel = isOpenAITextGenerationModel;
122
- const calculateOpenAITextGenerationCostInMillicents = ({ model, response, }) => {
123
- const modelInformation = getOpenAITextGenerationModelInformation(model);
121
+ exports.isOpenAICompletionModel = isOpenAICompletionModel;
122
+ const calculateOpenAICompletionCostInMillicents = ({ model, response, }) => {
123
+ const modelInformation = getOpenAICompletionModelInformation(model);
124
124
  return (response.usage.prompt_tokens *
125
125
  modelInformation.promptTokenCostInMillicents +
126
126
  response.usage.completion_tokens *
127
127
  modelInformation.completionTokenCostInMillicents);
128
128
  };
129
- exports.calculateOpenAITextGenerationCostInMillicents = calculateOpenAITextGenerationCostInMillicents;
129
+ exports.calculateOpenAICompletionCostInMillicents = calculateOpenAICompletionCostInMillicents;
130
130
  /**
131
131
  * Create a text generation model that calls the OpenAI text completion API.
132
132
  *
133
133
  * @see https://platform.openai.com/docs/api-reference/completions/create
134
134
  *
135
135
  * @example
136
- * const model = new OpenAITextGenerationModel({
136
+ * const model = new OpenAICompletionModel({
137
137
  * model: "gpt-3.5-turbo-instruct",
138
138
  * temperature: 0.7,
139
139
  * maxCompletionTokens: 500,
@@ -145,7 +145,7 @@ exports.calculateOpenAITextGenerationCostInMillicents = calculateOpenAITextGener
145
145
  * "Write a short story about a robot learning to love:\n\n"
146
146
  * );
147
147
  */
148
- class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
148
+ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
149
149
  constructor(settings) {
150
150
  super({ settings });
151
151
  Object.defineProperty(this, "provider", {
@@ -166,7 +166,7 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
166
166
  writable: true,
167
167
  value: void 0
168
168
  });
169
- const modelInformation = getOpenAITextGenerationModelInformation(this.settings.model);
169
+ const modelInformation = getOpenAICompletionModelInformation(this.settings.model);
170
170
  this.tokenizer = new TikTokenTokenizer_js_1.TikTokenTokenizer({
171
171
  model: modelInformation.baseModel,
172
172
  });
@@ -195,7 +195,7 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
195
195
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
196
196
  retry: callSettings.api?.retry,
197
197
  throttle: callSettings.api?.throttle,
198
- call: async () => callOpenAITextGenerationAPI(callSettings),
198
+ call: async () => callOpenAICompletionAPI(callSettings),
199
199
  });
200
200
  }
201
201
  get settingsForEvent() {
@@ -260,11 +260,11 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
260
260
  });
261
261
  }
262
262
  withSettings(additionalSettings) {
263
- return new OpenAITextGenerationModel(Object.assign({}, this.settings, additionalSettings));
263
+ return new OpenAICompletionModel(Object.assign({}, this.settings, additionalSettings));
264
264
  }
265
265
  }
266
- exports.OpenAITextGenerationModel = OpenAITextGenerationModel;
267
- const openAITextGenerationResponseSchema = zod_1.z.object({
266
+ exports.OpenAICompletionModel = OpenAICompletionModel;
267
+ const OpenAICompletionResponseSchema = zod_1.z.object({
268
268
  id: zod_1.z.string(),
269
269
  object: zod_1.z.literal("text_completion"),
270
270
  created: zod_1.z.number(),
@@ -281,7 +281,7 @@ const openAITextGenerationResponseSchema = zod_1.z.object({
281
281
  total_tokens: zod_1.z.number(),
282
282
  }),
283
283
  });
284
- async function callOpenAITextGenerationAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, responseFormat, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, logitBias, user, }) {
284
+ async function callOpenAICompletionAPI({ api = new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration(), abortSignal, responseFormat, model, prompt, suffix, maxTokens, temperature, topP, n, logprobs, echo, stop, presencePenalty, frequencyPenalty, bestOf, logitBias, user, }) {
285
285
  // empty arrays are not allowed for stop:
286
286
  if (stop != null && Array.isArray(stop) && stop.length === 0) {
287
287
  stop = undefined;
@@ -318,7 +318,7 @@ exports.OpenAITextResponseFormat = {
318
318
  */
319
319
  json: {
320
320
  stream: false,
321
- handler: (0, postToApi_js_1.createJsonResponseHandler)(openAITextGenerationResponseSchema),
321
+ handler: (0, postToApi_js_1.createJsonResponseHandler)(OpenAICompletionResponseSchema),
322
322
  },
323
323
  /**
324
324
  * Returns an async iterable over the full deltas (all choices, including full current state at time of event)
@@ -81,25 +81,25 @@ export declare const OPENAI_TEXT_GENERATION_MODELS: {
81
81
  completionTokenCostInMillicents: number;
82
82
  };
83
83
  };
84
- export declare function getOpenAITextGenerationModelInformation(model: OpenAITextGenerationModelType): {
85
- baseModel: OpenAITextGenerationBaseModelType;
84
+ export declare function getOpenAICompletionModelInformation(model: OpenAICompletionModelType): {
85
+ baseModel: OpenAICompletionBaseModelType;
86
86
  isFineTuned: boolean;
87
87
  contextWindowSize: number;
88
88
  promptTokenCostInMillicents: number;
89
89
  completionTokenCostInMillicents: number;
90
90
  };
91
- type FineTuneableOpenAITextGenerationModelType = "davinci-002" | "babbage-002";
92
- type FineTunedOpenAITextGenerationModelType = `ft:${FineTuneableOpenAITextGenerationModelType}:${string}:${string}:${string}`;
93
- export type OpenAITextGenerationBaseModelType = keyof typeof OPENAI_TEXT_GENERATION_MODELS;
94
- export type OpenAITextGenerationModelType = OpenAITextGenerationBaseModelType | FineTunedOpenAITextGenerationModelType;
95
- export declare const isOpenAITextGenerationModel: (model: string) => model is OpenAITextGenerationModelType;
96
- export declare const calculateOpenAITextGenerationCostInMillicents: ({ model, response, }: {
97
- model: OpenAITextGenerationModelType;
98
- response: OpenAITextGenerationResponse;
91
+ type FineTuneableOpenAICompletionModelType = "davinci-002" | "babbage-002";
92
+ type FineTunedOpenAICompletionModelType = `ft:${FineTuneableOpenAICompletionModelType}:${string}:${string}:${string}`;
93
+ export type OpenAICompletionBaseModelType = keyof typeof OPENAI_TEXT_GENERATION_MODELS;
94
+ export type OpenAICompletionModelType = OpenAICompletionBaseModelType | FineTunedOpenAICompletionModelType;
95
+ export declare const isOpenAICompletionModel: (model: string) => model is OpenAICompletionModelType;
96
+ export declare const calculateOpenAICompletionCostInMillicents: ({ model, response, }: {
97
+ model: OpenAICompletionModelType;
98
+ response: OpenAICompletionResponse;
99
99
  }) => number;
100
- export interface OpenAITextGenerationCallSettings {
100
+ export interface OpenAICompletionCallSettings {
101
101
  api?: ApiConfiguration;
102
- model: OpenAITextGenerationModelType;
102
+ model: OpenAICompletionModelType;
103
103
  suffix?: string;
104
104
  maxTokens?: number;
105
105
  temperature?: number;
@@ -113,7 +113,7 @@ export interface OpenAITextGenerationCallSettings {
113
113
  bestOf?: number;
114
114
  logitBias?: Record<number, number>;
115
115
  }
116
- export interface OpenAITextGenerationModelSettings extends TextGenerationModelSettings, Omit<OpenAITextGenerationCallSettings, "stop" | "maxTokens"> {
116
+ export interface OpenAICompletionModelSettings extends TextGenerationModelSettings, Omit<OpenAICompletionCallSettings, "stop" | "maxTokens"> {
117
117
  isUserIdForwardingEnabled?: boolean;
118
118
  }
119
119
  /**
@@ -122,7 +122,7 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
122
122
  * @see https://platform.openai.com/docs/api-reference/completions/create
123
123
  *
124
124
  * @example
125
- * const model = new OpenAITextGenerationModel({
125
+ * const model = new OpenAICompletionModel({
126
126
  * model: "gpt-3.5-turbo-instruct",
127
127
  * temperature: 0.7,
128
128
  * maxCompletionTokens: 500,
@@ -134,26 +134,26 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
134
134
  * "Write a short story about a robot learning to love:\n\n"
135
135
  * );
136
136
  */
137
- export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextGenerationModelSettings> implements TextStreamingModel<string, OpenAITextGenerationModelSettings> {
138
- constructor(settings: OpenAITextGenerationModelSettings);
137
+ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletionModelSettings> implements TextStreamingModel<string, OpenAICompletionModelSettings> {
138
+ constructor(settings: OpenAICompletionModelSettings);
139
139
  readonly provider: "openai";
140
- get modelName(): OpenAITextGenerationModelType;
140
+ get modelName(): OpenAICompletionModelType;
141
141
  readonly contextWindowSize: number;
142
142
  readonly tokenizer: TikTokenTokenizer;
143
143
  countPromptTokens(input: string): Promise<number>;
144
144
  callAPI<RESULT>(prompt: string, options: {
145
145
  responseFormat: OpenAITextResponseFormatType<RESULT>;
146
146
  } & FunctionOptions): Promise<RESULT>;
147
- get settingsForEvent(): Partial<OpenAITextGenerationModelSettings>;
147
+ get settingsForEvent(): Partial<OpenAICompletionModelSettings>;
148
148
  doGenerateText(prompt: string, options?: FunctionOptions): Promise<{
149
149
  response: {
150
150
  object: "text_completion";
151
- model: string;
152
151
  usage: {
153
152
  prompt_tokens: number;
154
153
  completion_tokens: number;
155
154
  total_tokens: number;
156
155
  };
156
+ model: string;
157
157
  id: string;
158
158
  created: number;
159
159
  choices: {
@@ -174,18 +174,18 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
174
174
  /**
175
175
  * Returns this model with an instruction prompt format.
176
176
  */
177
- withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").InstructionPrompt, string, OpenAITextGenerationModelSettings, this>;
177
+ withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").InstructionPrompt, string, OpenAICompletionModelSettings, this>;
178
178
  /**
179
179
  * Returns this model with a chat prompt format.
180
180
  */
181
181
  withChatPrompt(options?: {
182
182
  user?: string;
183
183
  ai?: string;
184
- }): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, OpenAITextGenerationModelSettings, this>;
185
- withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, OpenAITextGenerationModelSettings, this>;
186
- withSettings(additionalSettings: Partial<OpenAITextGenerationModelSettings>): this;
184
+ }): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, OpenAICompletionModelSettings, this>;
185
+ withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
186
+ withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
187
187
  }
188
- declare const openAITextGenerationResponseSchema: z.ZodObject<{
188
+ declare const OpenAICompletionResponseSchema: z.ZodObject<{
189
189
  id: z.ZodString;
190
190
  object: z.ZodLiteral<"text_completion">;
191
191
  created: z.ZodNumber;
@@ -221,12 +221,12 @@ declare const openAITextGenerationResponseSchema: z.ZodObject<{
221
221
  }>;
222
222
  }, "strip", z.ZodTypeAny, {
223
223
  object: "text_completion";
224
- model: string;
225
224
  usage: {
226
225
  prompt_tokens: number;
227
226
  completion_tokens: number;
228
227
  total_tokens: number;
229
228
  };
229
+ model: string;
230
230
  id: string;
231
231
  created: number;
232
232
  choices: {
@@ -237,12 +237,12 @@ declare const openAITextGenerationResponseSchema: z.ZodObject<{
237
237
  }[];
238
238
  }, {
239
239
  object: "text_completion";
240
- model: string;
241
240
  usage: {
242
241
  prompt_tokens: number;
243
242
  completion_tokens: number;
244
243
  total_tokens: number;
245
244
  };
245
+ model: string;
246
246
  id: string;
247
247
  created: number;
248
248
  choices: {
@@ -252,7 +252,7 @@ declare const openAITextGenerationResponseSchema: z.ZodObject<{
252
252
  logprobs?: any;
253
253
  }[];
254
254
  }>;
255
- export type OpenAITextGenerationResponse = z.infer<typeof openAITextGenerationResponseSchema>;
255
+ export type OpenAICompletionResponse = z.infer<typeof OpenAICompletionResponseSchema>;
256
256
  export type OpenAITextResponseFormatType<T> = {
257
257
  stream: boolean;
258
258
  handler: ResponseHandler<T>;
@@ -265,12 +265,12 @@ export declare const OpenAITextResponseFormat: {
265
265
  stream: false;
266
266
  handler: ResponseHandler<{
267
267
  object: "text_completion";
268
- model: string;
269
268
  usage: {
270
269
  prompt_tokens: number;
271
270
  completion_tokens: number;
272
271
  total_tokens: number;
273
272
  };
273
+ model: string;
274
274
  id: string;
275
275
  created: number;
276
276
  choices: {
@@ -292,7 +292,7 @@ export declare const OpenAITextResponseFormat: {
292
292
  }) => Promise<AsyncIterable<Delta<string>>>;
293
293
  };
294
294
  };
295
- export type OpenAITextGenerationDelta = Array<{
295
+ export type OpenAICompletionDelta = Array<{
296
296
  content: string;
297
297
  isComplete: boolean;
298
298
  delta: string;