modelfusion 0.40.0 → 0.41.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (146) hide show
  1. package/README.md +14 -7
  2. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.d.ts +3 -3
  3. package/core/FunctionEvent.d.ts +1 -1
  4. package/model-function/AsyncIterableResultPromise.d.ts +1 -1
  5. package/model-function/Delta.d.ts +8 -0
  6. package/model-function/ModelCallEvent.d.ts +1 -1
  7. package/model-function/ModelCallMetadata.d.ts +13 -0
  8. package/model-function/describe-image/ImageDescriptionEvent.d.ts +1 -1
  9. package/model-function/describe-image/ImageDescriptionModel.d.ts +6 -4
  10. package/model-function/describe-image/describeImage.cjs +7 -2
  11. package/model-function/describe-image/describeImage.d.ts +2 -2
  12. package/model-function/describe-image/describeImage.js +7 -2
  13. package/model-function/embed/EmbeddingEvent.d.ts +1 -1
  14. package/model-function/embed/EmbeddingModel.d.ts +6 -4
  15. package/model-function/embed/embed.cjs +16 -11
  16. package/model-function/embed/embed.d.ts +3 -3
  17. package/model-function/embed/embed.js +16 -11
  18. package/model-function/executeCall.cjs +26 -30
  19. package/model-function/executeCall.d.ts +19 -28
  20. package/model-function/executeCall.js +26 -30
  21. package/model-function/generate-image/ImageGenerationEvent.d.ts +1 -1
  22. package/model-function/generate-image/ImageGenerationModel.d.ts +6 -4
  23. package/model-function/generate-image/generateImage.cjs +7 -2
  24. package/model-function/generate-image/generateImage.d.ts +2 -2
  25. package/model-function/generate-image/generateImage.js +7 -2
  26. package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +6 -5
  27. package/model-function/generate-structure/StructureFromTextGenerationModel.d.ts +7 -5
  28. package/model-function/generate-structure/StructureFromTextGenerationModel.js +6 -5
  29. package/model-function/generate-structure/StructureGenerationEvent.d.ts +1 -1
  30. package/model-function/generate-structure/StructureGenerationModel.d.ts +15 -18
  31. package/model-function/generate-structure/StructureOrTextGenerationModel.d.ts +19 -17
  32. package/model-function/generate-structure/generateStructure.cjs +10 -8
  33. package/model-function/generate-structure/generateStructure.d.ts +2 -2
  34. package/model-function/generate-structure/generateStructure.js +10 -8
  35. package/model-function/generate-structure/generateStructureOrText.cjs +15 -8
  36. package/model-function/generate-structure/generateStructureOrText.d.ts +4 -4
  37. package/model-function/generate-structure/generateStructureOrText.js +15 -8
  38. package/model-function/generate-structure/streamStructure.cjs +4 -16
  39. package/model-function/generate-structure/streamStructure.d.ts +3 -7
  40. package/model-function/generate-structure/streamStructure.js +4 -16
  41. package/model-function/generate-text/TextGenerationEvent.d.ts +1 -1
  42. package/model-function/generate-text/TextGenerationModel.d.ts +18 -19
  43. package/model-function/generate-text/generateText.cjs +8 -9
  44. package/model-function/generate-text/generateText.d.ts +2 -2
  45. package/model-function/generate-text/generateText.js +8 -9
  46. package/model-function/generate-text/streamText.cjs +8 -21
  47. package/model-function/generate-text/streamText.d.ts +3 -7
  48. package/model-function/generate-text/streamText.js +8 -21
  49. package/model-function/index.cjs +2 -2
  50. package/model-function/index.d.ts +2 -2
  51. package/model-function/index.js +2 -2
  52. package/model-function/synthesize-speech/SpeechSynthesisEvent.d.ts +1 -1
  53. package/model-function/synthesize-speech/SpeechSynthesisModel.d.ts +3 -3
  54. package/model-function/synthesize-speech/synthesizeSpeech.cjs +7 -2
  55. package/model-function/synthesize-speech/synthesizeSpeech.d.ts +2 -2
  56. package/model-function/synthesize-speech/synthesizeSpeech.js +7 -2
  57. package/model-function/transcribe-speech/TranscriptionEvent.d.ts +1 -1
  58. package/model-function/transcribe-speech/TranscriptionModel.d.ts +6 -4
  59. package/model-function/transcribe-speech/transcribe.cjs +7 -2
  60. package/model-function/transcribe-speech/transcribe.d.ts +2 -2
  61. package/model-function/transcribe-speech/transcribe.js +7 -2
  62. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.cjs +14 -18
  63. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.d.ts +11 -9
  64. package/model-provider/automatic1111/Automatic1111ImageGenerationModel.js +14 -18
  65. package/model-provider/cohere/CohereTextEmbeddingModel.cjs +13 -16
  66. package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +12 -10
  67. package/model-provider/cohere/CohereTextEmbeddingModel.js +13 -16
  68. package/model-provider/cohere/CohereTextGenerationModel.cjs +29 -29
  69. package/model-provider/cohere/CohereTextGenerationModel.d.ts +24 -22
  70. package/model-provider/cohere/CohereTextGenerationModel.js +29 -29
  71. package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.cjs +10 -17
  72. package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.d.ts +2 -2
  73. package/model-provider/elevenlabs/ElevenLabsSpeechSynthesisModel.js +10 -17
  74. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.cjs +13 -16
  75. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.d.ts +9 -7
  76. package/model-provider/huggingface/HuggingFaceImageDescriptionModel.js +13 -16
  77. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.cjs +19 -25
  78. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.d.ts +8 -6
  79. package/model-provider/huggingface/HuggingFaceTextEmbeddingModel.js +19 -25
  80. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +18 -24
  81. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +10 -8
  82. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +18 -24
  83. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.cjs +13 -16
  84. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.d.ts +8 -6
  85. package/model-provider/llamacpp/LlamaCppTextEmbeddingModel.js +13 -16
  86. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +31 -34
  87. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +62 -60
  88. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +31 -34
  89. package/model-provider/lmnt/LmntSpeechSynthesisModel.cjs +7 -12
  90. package/model-provider/lmnt/LmntSpeechSynthesisModel.d.ts +2 -2
  91. package/model-provider/lmnt/LmntSpeechSynthesisModel.js +7 -12
  92. package/model-provider/openai/OpenAIImageGenerationModel.cjs +8 -16
  93. package/model-provider/openai/OpenAIImageGenerationModel.d.ts +11 -11
  94. package/model-provider/openai/OpenAIImageGenerationModel.js +8 -16
  95. package/model-provider/openai/OpenAITextEmbeddingModel.cjs +18 -24
  96. package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +18 -16
  97. package/model-provider/openai/OpenAITextEmbeddingModel.js +18 -24
  98. package/model-provider/openai/OpenAITextGenerationModel.cjs +23 -27
  99. package/model-provider/openai/OpenAITextGenerationModel.d.ts +31 -33
  100. package/model-provider/openai/OpenAITextGenerationModel.js +23 -27
  101. package/model-provider/openai/OpenAITranscriptionModel.cjs +19 -28
  102. package/model-provider/openai/OpenAITranscriptionModel.d.ts +27 -7
  103. package/model-provider/openai/OpenAITranscriptionModel.js +19 -28
  104. package/model-provider/openai/chat/OpenAIChatModel.cjs +82 -86
  105. package/model-provider/openai/chat/OpenAIChatModel.d.ts +127 -50
  106. package/model-provider/openai/chat/OpenAIChatModel.js +83 -87
  107. package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +4 -3
  108. package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +2 -2
  109. package/model-provider/openai/chat/OpenAIChatStreamIterable.js +2 -1
  110. package/model-provider/stability/StabilityImageGenerationModel.cjs +16 -21
  111. package/model-provider/stability/StabilityImageGenerationModel.d.ts +13 -11
  112. package/model-provider/stability/StabilityImageGenerationModel.js +16 -21
  113. package/package.json +1 -1
  114. package/prompt/PromptFormatTextGenerationModel.cjs +6 -19
  115. package/prompt/PromptFormatTextGenerationModel.d.ts +14 -10
  116. package/prompt/PromptFormatTextGenerationModel.js +6 -19
  117. package/prompt/PromptFormatTextStreamingModel.cjs +31 -0
  118. package/prompt/PromptFormatTextStreamingModel.d.ts +13 -0
  119. package/prompt/PromptFormatTextStreamingModel.js +27 -0
  120. package/prompt/chat/trimChatPrompt.d.ts +2 -2
  121. package/prompt/index.cjs +1 -0
  122. package/prompt/index.d.ts +1 -0
  123. package/prompt/index.js +1 -0
  124. package/retriever/Retriever.d.ts +3 -6
  125. package/retriever/retrieve.cjs +2 -2
  126. package/retriever/retrieve.d.ts +3 -3
  127. package/retriever/retrieve.js +2 -2
  128. package/tool/executeTool.cjs +2 -2
  129. package/tool/executeTool.js +2 -2
  130. package/tool/useTool.cjs +2 -4
  131. package/tool/useTool.d.ts +2 -2
  132. package/tool/useTool.js +2 -4
  133. package/tool/useToolOrGenerateText.d.ts +2 -2
  134. package/util/SafeResult.d.ts +1 -1
  135. package/util/runSafe.cjs +1 -1
  136. package/util/runSafe.js +1 -1
  137. package/vector-index/VectorIndexRetriever.cjs +0 -7
  138. package/vector-index/VectorIndexRetriever.d.ts +5 -5
  139. package/vector-index/VectorIndexRetriever.js +0 -7
  140. package/vector-index/upsertIntoVectorIndex.d.ts +4 -4
  141. package/model-function/DeltaEvent.d.ts +0 -7
  142. package/model-function/ModelFunctionOptions.d.ts +0 -4
  143. /package/model-function/{DeltaEvent.cjs → Delta.cjs} +0 -0
  144. /package/model-function/{DeltaEvent.js → Delta.js} +0 -0
  145. /package/model-function/{ModelFunctionOptions.cjs → ModelCallMetadata.cjs} +0 -0
  146. /package/model-function/{ModelFunctionOptions.js → ModelCallMetadata.js} +0 -0
@@ -1,12 +1,12 @@
1
1
  import z from "zod";
2
+ import { FunctionOptions } from "../../core/FunctionOptions.js";
2
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
3
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
4
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
5
- import { DeltaEvent } from "../../model-function/DeltaEvent.js";
6
- import { ModelFunctionOptions } from "../../model-function/ModelFunctionOptions.js";
7
- import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
6
+ import { Delta } from "../../model-function/Delta.js";
7
+ import { TextGenerationModelSettings, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
8
8
  import { PromptFormat } from "../../prompt/PromptFormat.js";
9
- import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
9
+ import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
10
10
  import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
11
11
  export interface LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends number | undefined> extends TextGenerationModelSettings {
12
12
  api?: ApiConfiguration;
@@ -31,7 +31,7 @@ export interface LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends
31
31
  ignoreEos?: boolean;
32
32
  logitBias?: Array<[number, number | false]>;
33
33
  }
34
- export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> implements TextGenerationModel<string, LlamaCppTextGenerationResponse, LlamaCppTextGenerationDelta, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> {
34
+ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<string, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> {
35
35
  constructor(settings?: LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>);
36
36
  readonly provider = "llamacpp";
37
37
  get modelName(): null;
@@ -39,67 +39,69 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
39
39
  readonly tokenizer: LlamaCppTokenizer;
40
40
  callAPI<RESPONSE>(prompt: string, options: {
41
41
  responseFormat: LlamaCppTextGenerationResponseFormatType<RESPONSE>;
42
- } & ModelFunctionOptions<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): Promise<RESPONSE>;
42
+ } & FunctionOptions): Promise<RESPONSE>;
43
43
  get settingsForEvent(): Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>;
44
44
  countPromptTokens(prompt: string): Promise<number>;
45
- generateTextResponse(prompt: string, options?: ModelFunctionOptions<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): Promise<{
46
- model: string;
47
- prompt: string;
48
- content: string;
49
- stop: true;
50
- generation_settings: {
45
+ doGenerateText(prompt: string, options?: FunctionOptions): Promise<{
46
+ response: {
51
47
  model: string;
52
- stream: boolean;
53
- seed: number;
54
- mirostat: number;
55
- stop: string[];
56
- frequency_penalty: number;
57
- ignore_eos: boolean;
58
- logit_bias: number[];
59
- mirostat_eta: number;
60
- mirostat_tau: number;
61
- n_ctx: number;
62
- n_keep: number;
63
- n_predict: number;
64
- n_probs: number;
65
- penalize_nl: boolean;
66
- presence_penalty: number;
67
- repeat_last_n: number;
68
- repeat_penalty: number;
69
- temp: number;
70
- tfs_z: number;
71
- top_k: number;
72
- top_p: number;
73
- typical_p: number;
48
+ prompt: string;
49
+ content: string;
50
+ stop: true;
51
+ generation_settings: {
52
+ model: string;
53
+ stream: boolean;
54
+ seed: number;
55
+ mirostat: number;
56
+ stop: string[];
57
+ frequency_penalty: number;
58
+ ignore_eos: boolean;
59
+ logit_bias: number[];
60
+ mirostat_eta: number;
61
+ mirostat_tau: number;
62
+ n_ctx: number;
63
+ n_keep: number;
64
+ n_predict: number;
65
+ n_probs: number;
66
+ penalize_nl: boolean;
67
+ presence_penalty: number;
68
+ repeat_last_n: number;
69
+ repeat_penalty: number;
70
+ temp: number;
71
+ tfs_z: number;
72
+ top_k: number;
73
+ top_p: number;
74
+ typical_p: number;
75
+ };
76
+ stopped_eos: boolean;
77
+ stopped_limit: boolean;
78
+ stopped_word: boolean;
79
+ stopping_word: string;
80
+ timings: {
81
+ predicted_ms: number;
82
+ predicted_n: number;
83
+ predicted_per_second: number | null;
84
+ predicted_per_token_ms: number | null;
85
+ prompt_ms: number | null;
86
+ prompt_n: number;
87
+ prompt_per_second: number | null;
88
+ prompt_per_token_ms: number | null;
89
+ };
90
+ tokens_cached: number;
91
+ tokens_evaluated: number;
92
+ tokens_predicted: number;
93
+ truncated: boolean;
74
94
  };
75
- stopped_eos: boolean;
76
- stopped_limit: boolean;
77
- stopped_word: boolean;
78
- stopping_word: string;
79
- timings: {
80
- predicted_ms: number;
81
- predicted_n: number;
82
- predicted_per_second: number | null;
83
- predicted_per_token_ms: number | null;
84
- prompt_ms: number | null;
85
- prompt_n: number;
86
- prompt_per_second: number | null;
87
- prompt_per_token_ms: number | null;
95
+ text: string;
96
+ usage: {
97
+ promptTokens: number;
98
+ completionTokens: number;
99
+ totalTokens: number;
88
100
  };
89
- tokens_cached: number;
90
- tokens_evaluated: number;
91
- tokens_predicted: number;
92
- truncated: boolean;
93
101
  }>;
94
- extractText(response: LlamaCppTextGenerationResponse): string;
95
- generateDeltaStreamResponse(prompt: string, options?: ModelFunctionOptions<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): Promise<AsyncIterable<DeltaEvent<LlamaCppTextGenerationDelta>>>;
102
+ doStreamText(prompt: string, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
96
103
  extractTextDelta(fullDelta: LlamaCppTextGenerationDelta): string | undefined;
97
- withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, LlamaCppTextGenerationResponse, LlamaCppTextGenerationDelta, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
98
- extractUsage(response: LlamaCppTextGenerationResponse): {
99
- promptTokens: number;
100
- completionTokens: number;
101
- totalTokens: number;
102
- };
104
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
103
105
  withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
104
106
  }
105
107
  declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
@@ -387,7 +389,7 @@ export declare const LlamaCppTextGenerationResponseFormat: {
387
389
  stream: true;
388
390
  handler: ({ response }: {
389
391
  response: Response;
390
- }) => Promise<AsyncIterable<DeltaEvent<LlamaCppTextGenerationDelta>>>;
392
+ }) => Promise<AsyncIterable<Delta<string>>>;
391
393
  };
392
394
  };
393
395
  export {};
@@ -5,7 +5,7 @@ import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postTo
5
5
  import { AsyncQueue } from "../../event-source/AsyncQueue.js";
6
6
  import { parseEventSourceStream } from "../../event-source/parseEventSourceStream.js";
7
7
  import { AbstractModel } from "../../model-function/AbstractModel.js";
8
- import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
8
+ import { PromptFormatTextStreamingModel } from "../../prompt/PromptFormatTextStreamingModel.js";
9
9
  import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
10
10
  import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
11
11
  import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
@@ -33,25 +33,19 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
33
33
  return this.settings.contextWindowSize;
34
34
  }
35
35
  async callAPI(prompt, options) {
36
- const { run, settings, responseFormat } = options;
37
- const combinedSettings = {
38
- ...this.settings,
39
- ...settings,
40
- };
41
- const callSettings = {
42
- ...combinedSettings,
43
- // mapping
44
- nPredict: combinedSettings.maxCompletionTokens,
45
- stop: combinedSettings.stopSequences,
46
- // other
47
- abortSignal: run?.abortSignal,
48
- prompt,
49
- responseFormat,
50
- };
51
36
  return callWithRetryAndThrottle({
52
- retry: callSettings.api?.retry,
53
- throttle: callSettings.api?.throttle,
54
- call: async () => callLlamaCppTextGenerationAPI(callSettings),
37
+ retry: this.settings.api?.retry,
38
+ throttle: this.settings.api?.throttle,
39
+ call: async () => callLlamaCppTextGenerationAPI({
40
+ ...this.settings,
41
+ // mapping
42
+ nPredict: this.settings.maxCompletionTokens,
43
+ stop: this.settings.stopSequences,
44
+ // other
45
+ abortSignal: options.run?.abortSignal,
46
+ prompt,
47
+ responseFormat: options.responseFormat,
48
+ }),
55
49
  });
56
50
  }
57
51
  get settingsForEvent() {
@@ -81,16 +75,22 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
81
75
  const tokens = await this.tokenizer.tokenize(prompt);
82
76
  return tokens.length;
83
77
  }
84
- generateTextResponse(prompt, options) {
85
- return this.callAPI(prompt, {
78
+ async doGenerateText(prompt, options) {
79
+ const response = await this.callAPI(prompt, {
86
80
  ...options,
87
81
  responseFormat: LlamaCppTextGenerationResponseFormat.json,
88
82
  });
83
+ return {
84
+ response,
85
+ text: response.content,
86
+ usage: {
87
+ promptTokens: response.tokens_evaluated,
88
+ completionTokens: response.tokens_predicted,
89
+ totalTokens: response.tokens_evaluated + response.tokens_predicted,
90
+ },
91
+ };
89
92
  }
90
- extractText(response) {
91
- return response.content;
92
- }
93
- generateDeltaStreamResponse(prompt, options) {
93
+ doStreamText(prompt, options) {
94
94
  return this.callAPI(prompt, {
95
95
  ...options,
96
96
  responseFormat: LlamaCppTextGenerationResponseFormat.deltaIterable,
@@ -100,20 +100,16 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
100
100
  return fullDelta.delta;
101
101
  }
102
102
  withPromptFormat(promptFormat) {
103
- return new PromptFormatTextGenerationModel({
103
+ return new PromptFormatTextStreamingModel({
104
104
  model: this.withSettings({
105
- stopSequences: promptFormat.stopSequences,
105
+ stopSequences: [
106
+ ...(this.settings.stopSequences ?? []),
107
+ ...promptFormat.stopSequences,
108
+ ],
106
109
  }),
107
110
  promptFormat,
108
111
  });
109
112
  }
110
- extractUsage(response) {
111
- return {
112
- promptTokens: response.tokens_evaluated,
113
- completionTokens: response.tokens_predicted,
114
- totalTokens: response.tokens_evaluated + response.tokens_predicted,
115
- };
116
- }
117
113
  withSettings(additionalSettings) {
118
114
  return new LlamaCppTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
119
115
  }
@@ -232,6 +228,7 @@ async function createLlamaCppFullDeltaIterableQueue(stream) {
232
228
  isComplete: eventData.stop,
233
229
  delta: eventData.content,
234
230
  },
231
+ valueDelta: eventData.content,
235
232
  });
236
233
  if (eventData.stop) {
237
234
  queue.close();
@@ -25,19 +25,14 @@ class LmntSpeechSynthesisModel extends AbstractModel_js_1.AbstractModel {
25
25
  return this.settings.voice;
26
26
  }
27
27
  async callAPI(text, options) {
28
- const run = options?.run;
29
- const settings = options?.settings;
30
- const callSettings = {
31
- // copied settings:
32
- ...this.settings,
33
- ...settings,
34
- abortSignal: run?.abortSignal,
35
- text,
36
- };
37
28
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
38
- retry: callSettings.api?.retry,
39
- throttle: callSettings.api?.throttle,
40
- call: async () => callLmntTextToSpeechAPI(callSettings),
29
+ retry: this.settings.api?.retry,
30
+ throttle: this.settings.api?.throttle,
31
+ call: async () => callLmntTextToSpeechAPI({
32
+ ...this.settings,
33
+ abortSignal: options?.run?.abortSignal,
34
+ text,
35
+ }),
41
36
  });
42
37
  }
43
38
  get settingsForEvent() {
@@ -1,7 +1,7 @@
1
1
  /// <reference types="node" />
2
2
  import { AbstractModel } from "../../model-function/AbstractModel.js";
3
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
- import { ModelFunctionOptions } from "../../model-function/ModelFunctionOptions.js";
4
+ import { FunctionOptions } from "../../core/FunctionOptions.js";
5
5
  import { SpeechSynthesisModel, SpeechSynthesisModelSettings } from "../../model-function/synthesize-speech/SpeechSynthesisModel.js";
6
6
  export interface LmntSpeechSynthesisModelSettings extends SpeechSynthesisModelSettings {
7
7
  api?: ApiConfiguration;
@@ -21,6 +21,6 @@ export declare class LmntSpeechSynthesisModel extends AbstractModel<LmntSpeechSy
21
21
  get modelName(): string;
22
22
  private callAPI;
23
23
  get settingsForEvent(): Partial<LmntSpeechSynthesisModelSettings>;
24
- generateSpeechResponse(text: string, options?: ModelFunctionOptions<LmntSpeechSynthesisModelSettings> | undefined): Promise<Buffer>;
24
+ generateSpeechResponse(text: string, options?: FunctionOptions): Promise<Buffer>;
25
25
  withSettings(additionalSettings: Partial<LmntSpeechSynthesisModelSettings>): this;
26
26
  }
@@ -22,19 +22,14 @@ export class LmntSpeechSynthesisModel extends AbstractModel {
22
22
  return this.settings.voice;
23
23
  }
24
24
  async callAPI(text, options) {
25
- const run = options?.run;
26
- const settings = options?.settings;
27
- const callSettings = {
28
- // copied settings:
29
- ...this.settings,
30
- ...settings,
31
- abortSignal: run?.abortSignal,
32
- text,
33
- };
34
25
  return callWithRetryAndThrottle({
35
- retry: callSettings.api?.retry,
36
- throttle: callSettings.api?.throttle,
37
- call: async () => callLmntTextToSpeechAPI(callSettings),
26
+ retry: this.settings.api?.retry,
27
+ throttle: this.settings.api?.throttle,
28
+ call: async () => callLmntTextToSpeechAPI({
29
+ ...this.settings,
30
+ abortSignal: options?.run?.abortSignal,
31
+ text,
32
+ }),
38
33
  });
39
34
  }
40
35
  get settingsForEvent() {
@@ -46,17 +46,10 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
46
46
  }
47
47
  async callAPI(prompt, options) {
48
48
  const run = options?.run;
49
- const settings = options?.settings;
50
49
  const responseFormat = options?.responseFormat;
51
- const combinedSettings = {
52
- ...this.settings,
53
- ...settings,
54
- };
55
50
  const callSettings = {
51
+ ...this.settings,
56
52
  user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
57
- // Copied settings:
58
- ...combinedSettings,
59
- // other settings:
60
53
  abortSignal: run?.abortSignal,
61
54
  responseFormat,
62
55
  prompt,
@@ -74,16 +67,15 @@ class OpenAIImageGenerationModel extends AbstractModel_js_1.AbstractModel {
74
67
  ];
75
68
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
76
69
  }
77
- generateImageResponse(prompt, options) {
78
- return this.callAPI(prompt, {
70
+ async doGenerateImage(prompt, options) {
71
+ const response = await this.callAPI(prompt, {
79
72
  responseFormat: exports.OpenAIImageGenerationResponseFormat.base64Json,
80
- functionId: options?.functionId,
81
- settings: options?.settings,
82
- run: options?.run,
73
+ ...options,
83
74
  });
84
- }
85
- extractBase64Image(response) {
86
- return response.data[0].b64_json;
75
+ return {
76
+ response,
77
+ base64Image: response.data[0].b64_json,
78
+ };
87
79
  }
88
80
  withSettings(additionalSettings) {
89
81
  return new OpenAIImageGenerationModel(Object.assign({}, this.settings, additionalSettings));
@@ -1,9 +1,9 @@
1
1
  import { z } from "zod";
2
2
  import { AbstractModel } from "../../model-function/AbstractModel.js";
3
- import { ModelFunctionOptions } from "../../model-function/ModelFunctionOptions.js";
4
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
5
4
  import { ImageGenerationModel, ImageGenerationModelSettings } from "../../model-function/generate-image/ImageGenerationModel.js";
6
5
  import { ResponseHandler } from "../../core/api/postToApi.js";
6
+ import { FunctionOptions } from "../../core/FunctionOptions.js";
7
7
  export interface OpenAIImageGenerationCallSettings {
8
8
  n?: number;
9
9
  size?: "256x256" | "512x512" | "1024x1024";
@@ -26,23 +26,23 @@ export interface OpenAIImageGenerationSettings extends ImageGenerationModelSetti
26
26
  * "the wicked witch of the west in the style of early 19th century painting"
27
27
  * );
28
28
  */
29
- export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImageGenerationSettings> implements ImageGenerationModel<string, OpenAIImageGenerationBase64JsonResponse, OpenAIImageGenerationSettings> {
29
+ export declare class OpenAIImageGenerationModel extends AbstractModel<OpenAIImageGenerationSettings> implements ImageGenerationModel<string, OpenAIImageGenerationSettings> {
30
30
  constructor(settings: OpenAIImageGenerationSettings);
31
31
  readonly provider: "openai";
32
32
  readonly modelName: null;
33
33
  callAPI<RESULT>(prompt: string, options: {
34
34
  responseFormat: OpenAIImageGenerationResponseFormatType<RESULT>;
35
- } & ModelFunctionOptions<Partial<OpenAIImageGenerationCallSettings & {
36
- user?: string;
37
- }>>): Promise<RESULT>;
35
+ } & FunctionOptions): Promise<RESULT>;
38
36
  get settingsForEvent(): Partial<OpenAIImageGenerationSettings>;
39
- generateImageResponse(prompt: string, options?: ModelFunctionOptions<OpenAIImageGenerationSettings>): Promise<{
40
- data: {
41
- b64_json: string;
42
- }[];
43
- created: number;
37
+ doGenerateImage(prompt: string, options?: FunctionOptions): Promise<{
38
+ response: {
39
+ data: {
40
+ b64_json: string;
41
+ }[];
42
+ created: number;
43
+ };
44
+ base64Image: string;
44
45
  }>;
45
- extractBase64Image(response: OpenAIImageGenerationBase64JsonResponse): string;
46
46
  withSettings(additionalSettings: Partial<OpenAIImageGenerationSettings>): this;
47
47
  }
48
48
  export type OpenAIImageGenerationResponseFormatType<T> = {
@@ -42,17 +42,10 @@ export class OpenAIImageGenerationModel extends AbstractModel {
42
42
  }
43
43
  async callAPI(prompt, options) {
44
44
  const run = options?.run;
45
- const settings = options?.settings;
46
45
  const responseFormat = options?.responseFormat;
47
- const combinedSettings = {
48
- ...this.settings,
49
- ...settings,
50
- };
51
46
  const callSettings = {
47
+ ...this.settings,
52
48
  user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
53
- // Copied settings:
54
- ...combinedSettings,
55
- // other settings:
56
49
  abortSignal: run?.abortSignal,
57
50
  responseFormat,
58
51
  prompt,
@@ -70,16 +63,15 @@ export class OpenAIImageGenerationModel extends AbstractModel {
70
63
  ];
71
64
  return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
72
65
  }
73
- generateImageResponse(prompt, options) {
74
- return this.callAPI(prompt, {
66
+ async doGenerateImage(prompt, options) {
67
+ const response = await this.callAPI(prompt, {
75
68
  responseFormat: OpenAIImageGenerationResponseFormat.base64Json,
76
- functionId: options?.functionId,
77
- settings: options?.settings,
78
- run: options?.run,
69
+ ...options,
79
70
  });
80
- }
81
- extractBase64Image(response) {
82
- return response.data[0].b64_json;
71
+ return {
72
+ response,
73
+ base64Image: response.data[0].b64_json,
74
+ };
83
75
  }
84
76
  withSettings(additionalSettings) {
85
77
  return new OpenAIImageGenerationModel(Object.assign({}, this.settings, additionalSettings));
@@ -5,10 +5,10 @@ var __importDefault = (this && this.__importDefault) || function (mod) {
5
5
  Object.defineProperty(exports, "__esModule", { value: true });
6
6
  exports.OpenAITextEmbeddingModel = exports.calculateOpenAIEmbeddingCostInMillicents = exports.isOpenAIEmbeddingModel = exports.OPENAI_TEXT_EMBEDDING_MODELS = void 0;
7
7
  const zod_1 = __importDefault(require("zod"));
8
- const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
- const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
10
8
  const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
11
9
  const postToApi_js_1 = require("../../core/api/postToApi.cjs");
10
+ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
11
+ const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
12
12
  const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
13
13
  const OpenAIError_js_1 = require("./OpenAIError.cjs");
14
14
  const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
@@ -91,37 +91,31 @@ class OpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
91
91
  return (0, countTokens_js_1.countTokens)(this.tokenizer, input);
92
92
  }
93
93
  async callAPI(texts, options) {
94
- const run = options?.run;
95
- const settings = options?.settings;
96
- const combinedSettings = {
97
- ...this.settings,
98
- ...settings,
99
- };
100
- const callSettings = {
101
- user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
102
- // Copied settings:
103
- ...combinedSettings,
104
- // other settings:
105
- abortSignal: run?.abortSignal,
106
- input: texts,
107
- };
108
94
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
109
- retry: callSettings.api?.retry,
110
- throttle: callSettings.api?.throttle,
111
- call: async () => callOpenAITextEmbeddingAPI(callSettings),
95
+ retry: this.settings.api?.retry,
96
+ throttle: this.settings.api?.throttle,
97
+ call: async () => callOpenAITextEmbeddingAPI({
98
+ ...this.settings,
99
+ user: this.settings.isUserIdForwardingEnabled
100
+ ? options?.run?.userId
101
+ : undefined,
102
+ abortSignal: options?.run?.abortSignal,
103
+ input: texts,
104
+ }),
112
105
  });
113
106
  }
114
107
  get settingsForEvent() {
115
108
  return {};
116
109
  }
117
- generateEmbeddingResponse(texts, options) {
110
+ async doEmbedValues(texts, options) {
118
111
  if (texts.length > this.maxValuesPerCall) {
119
112
  throw new Error(`The OpenAI embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
120
113
  }
121
- return this.callAPI(texts, options);
122
- }
123
- extractEmbeddings(response) {
124
- return response.data.map((data) => data.embedding);
114
+ const response = await this.callAPI(texts, options);
115
+ return {
116
+ response,
117
+ embeddings: response.data.map((data) => data.embedding),
118
+ };
125
119
  }
126
120
  withSettings(additionalSettings) {
127
121
  return new OpenAITextEmbeddingModel(Object.assign({}, this.settings, additionalSettings));
@@ -1,7 +1,7 @@
1
1
  import z from "zod";
2
- import { AbstractModel } from "../../model-function/AbstractModel.js";
3
- import { ModelFunctionOptions } from "../../model-function/ModelFunctionOptions.js";
2
+ import { FunctionOptions } from "../../core/FunctionOptions.js";
4
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
+ import { AbstractModel } from "../../model-function/AbstractModel.js";
5
5
  import { EmbeddingModel, EmbeddingModelSettings } from "../../model-function/embed/EmbeddingModel.js";
6
6
  import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
7
7
  export declare const OPENAI_TEXT_EMBEDDING_MODELS: {
@@ -36,7 +36,7 @@ export interface OpenAITextEmbeddingModelSettings extends EmbeddingModelSettings
36
36
  * ]
37
37
  * );
38
38
  */
39
- export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEmbeddingModelSettings> implements EmbeddingModel<string, OpenAITextEmbeddingResponse, OpenAITextEmbeddingModelSettings> {
39
+ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEmbeddingModelSettings> implements EmbeddingModel<string, OpenAITextEmbeddingModelSettings> {
40
40
  constructor(settings: OpenAITextEmbeddingModelSettings);
41
41
  readonly provider: "openai";
42
42
  get modelName(): "text-embedding-ada-002";
@@ -45,22 +45,24 @@ export declare class OpenAITextEmbeddingModel extends AbstractModel<OpenAITextEm
45
45
  readonly tokenizer: TikTokenTokenizer;
46
46
  readonly contextWindowSize: number;
47
47
  countTokens(input: string): Promise<number>;
48
- callAPI(texts: Array<string>, options?: ModelFunctionOptions<OpenAITextEmbeddingModelSettings>): Promise<OpenAITextEmbeddingResponse>;
48
+ callAPI(texts: Array<string>, options?: FunctionOptions): Promise<OpenAITextEmbeddingResponse>;
49
49
  get settingsForEvent(): Partial<OpenAITextEmbeddingModelSettings>;
50
- generateEmbeddingResponse(texts: string[], options?: ModelFunctionOptions<OpenAITextEmbeddingModelSettings>): Promise<{
51
- object: "list";
52
- model: string;
53
- usage: {
54
- prompt_tokens: number;
55
- total_tokens: number;
50
+ doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
51
+ response: {
52
+ object: "list";
53
+ model: string;
54
+ usage: {
55
+ prompt_tokens: number;
56
+ total_tokens: number;
57
+ };
58
+ data: {
59
+ object: "embedding";
60
+ embedding: number[];
61
+ index: number;
62
+ }[];
56
63
  };
57
- data: {
58
- object: "embedding";
59
- embedding: number[];
60
- index: number;
61
- }[];
64
+ embeddings: number[][];
62
65
  }>;
63
- extractEmbeddings(response: OpenAITextEmbeddingResponse): number[][];
64
66
  withSettings(additionalSettings: OpenAITextEmbeddingModelSettings): this;
65
67
  }
66
68
  declare const openAITextEmbeddingResponseSchema: z.ZodObject<{