modelfusion 0.123.0 → 0.125.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +47 -1
- package/README.md +9 -22
- package/model-function/generate-text/PromptTemplateFullTextModel.cjs +0 -11
- package/model-function/generate-text/PromptTemplateFullTextModel.d.ts +0 -1
- package/model-function/generate-text/PromptTemplateFullTextModel.js +0 -11
- package/model-function/generate-text/PromptTemplateTextGenerationModel.cjs +0 -11
- package/model-function/generate-text/PromptTemplateTextGenerationModel.d.ts +0 -1
- package/model-function/generate-text/PromptTemplateTextGenerationModel.js +0 -11
- package/model-function/generate-text/PromptTemplateTextStreamingModel.cjs +0 -11
- package/model-function/generate-text/PromptTemplateTextStreamingModel.d.ts +0 -1
- package/model-function/generate-text/PromptTemplateTextStreamingModel.js +0 -11
- package/model-function/generate-text/TextGenerationModel.d.ts +31 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.cjs +6 -9
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -9
- package/model-provider/cohere/CohereTextGenerationModel.js +7 -10
- package/model-provider/cohere/CohereTokenizer.d.ts +3 -3
- package/model-provider/llamacpp/LlamaCppCompletionModel.d.ts +2 -2
- package/model-provider/mistral/MistralChatModel.cjs +0 -9
- package/model-provider/mistral/MistralChatModel.d.ts +2 -11
- package/model-provider/mistral/MistralChatModel.js +0 -9
- package/model-provider/mistral/index.cjs +1 -2
- package/model-provider/mistral/index.d.ts +0 -1
- package/model-provider/mistral/index.js +0 -1
- package/model-provider/ollama/OllamaChatModel.cjs +0 -9
- package/model-provider/ollama/OllamaChatModel.d.ts +2 -11
- package/model-provider/ollama/OllamaChatModel.js +0 -9
- package/model-provider/ollama/OllamaCompletionModel.d.ts +2 -2
- package/model-provider/ollama/index.cjs +0 -1
- package/model-provider/ollama/index.d.ts +0 -1
- package/model-provider/ollama/index.js +0 -1
- package/model-provider/openai/AbstractOpenAIChatModel.cjs +5 -3
- package/model-provider/openai/AbstractOpenAIChatModel.d.ts +5 -5
- package/model-provider/openai/AbstractOpenAIChatModel.js +5 -3
- package/model-provider/openai/AbstractOpenAITextEmbeddingModel.cjs +82 -0
- package/model-provider/openai/AbstractOpenAITextEmbeddingModel.d.ts +91 -0
- package/model-provider/openai/AbstractOpenAITextEmbeddingModel.js +78 -0
- package/model-provider/openai/OpenAIChatFunctionCallStructureGenerationModel.d.ts +1 -1
- package/model-provider/openai/OpenAIChatModel.cjs +0 -9
- package/model-provider/openai/OpenAIChatModel.d.ts +2 -11
- package/model-provider/openai/OpenAIChatModel.js +0 -9
- package/model-provider/openai/OpenAICompletionModel.cjs +3 -6
- package/model-provider/openai/OpenAICompletionModel.d.ts +3 -8
- package/model-provider/openai/OpenAICompletionModel.js +4 -7
- package/model-provider/openai/OpenAIFacade.cjs +18 -18
- package/model-provider/openai/OpenAIFacade.d.ts +18 -18
- package/model-provider/openai/OpenAIFacade.js +18 -18
- package/model-provider/openai/OpenAITextEmbeddingModel.cjs +3 -68
- package/model-provider/openai/OpenAITextEmbeddingModel.d.ts +4 -82
- package/model-provider/openai/OpenAITextEmbeddingModel.js +3 -68
- package/model-provider/openai/index.cjs +2 -2
- package/model-provider/openai/index.d.ts +1 -1
- package/model-provider/openai/index.js +1 -1
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.cjs +0 -9
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.d.ts +4 -11
- package/model-provider/openai-compatible/OpenAICompatibleChatModel.js +0 -9
- package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.cjs +10 -0
- package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.d.ts +10 -2
- package/model-provider/openai-compatible/OpenAICompatibleCompletionModel.js +10 -0
- package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +40 -7
- package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +35 -6
- package/model-provider/openai-compatible/OpenAICompatibleFacade.js +37 -6
- package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.cjs +27 -0
- package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.d.ts +18 -0
- package/model-provider/openai-compatible/OpenAICompatibleTextEmbeddingModel.js +23 -0
- package/model-provider/openai-compatible/PerplexityApiConfiguration.cjs +33 -0
- package/model-provider/openai-compatible/PerplexityApiConfiguration.d.ts +13 -0
- package/model-provider/openai-compatible/PerplexityApiConfiguration.js +29 -0
- package/model-provider/openai-compatible/index.cjs +2 -0
- package/model-provider/openai-compatible/index.d.ts +2 -0
- package/model-provider/openai-compatible/index.js +2 -0
- package/package.json +1 -1
@@ -172,21 +172,12 @@ class OllamaChatModel extends AbstractModel_js_1.AbstractModel {
|
|
172
172
|
template: promptTemplate,
|
173
173
|
});
|
174
174
|
}
|
175
|
-
/**
|
176
|
-
* Returns this model with a text prompt template.
|
177
|
-
*/
|
178
175
|
withTextPrompt() {
|
179
176
|
return this.withPromptTemplate((0, OllamaChatPromptTemplate_js_1.text)());
|
180
177
|
}
|
181
|
-
/**
|
182
|
-
* Returns this model with an instruction prompt template.
|
183
|
-
*/
|
184
178
|
withInstructionPrompt() {
|
185
179
|
return this.withPromptTemplate((0, OllamaChatPromptTemplate_js_1.instruction)());
|
186
180
|
}
|
187
|
-
/**
|
188
|
-
* Returns this model with a chat prompt template.
|
189
|
-
*/
|
190
181
|
withChatPrompt() {
|
191
182
|
return this.withPromptTemplate((0, OllamaChatPromptTemplate_js_1.chat)());
|
192
183
|
}
|
@@ -6,7 +6,7 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
|
|
6
6
|
import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
|
7
7
|
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
8
8
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
9
|
-
import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
|
+
import { TextStreamingBaseModel, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
10
10
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
11
11
|
import { TextGenerationToolCallModel, ToolCallPromptTemplate } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
|
12
12
|
import { TextGenerationToolCallsModel } from "../../tool/generate-tool-calls/TextGenerationToolCallsModel.js";
|
@@ -27,7 +27,7 @@ export interface OllamaChatModelSettings extends OllamaTextGenerationSettings {
|
|
27
27
|
/**
|
28
28
|
* Text generation model that uses the Ollama chat API.
|
29
29
|
*/
|
30
|
-
export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettings> implements
|
30
|
+
export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettings> implements TextStreamingBaseModel<OllamaChatPrompt, OllamaChatModelSettings> {
|
31
31
|
constructor(settings: OllamaChatModelSettings);
|
32
32
|
readonly provider = "ollama";
|
33
33
|
get modelName(): string;
|
@@ -104,17 +104,8 @@ export declare class OllamaChatModel extends AbstractModel<OllamaChatModelSettin
|
|
104
104
|
asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaChatPrompt, this>;
|
105
105
|
asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): TextGenerationToolCallsModel<INPUT_PROMPT, OllamaChatPrompt, this>;
|
106
106
|
asStructureGenerationModel<INPUT_PROMPT, OllamaChatPrompt>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, OllamaChatPrompt> | FlexibleStructureFromTextPromptTemplate<INPUT_PROMPT, unknown>): StructureFromTextStreamingModel<INPUT_PROMPT, unknown, TextStreamingModel<unknown, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>> | StructureFromTextStreamingModel<INPUT_PROMPT, OllamaChatPrompt, TextStreamingModel<OllamaChatPrompt, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>>;
|
107
|
-
/**
|
108
|
-
* Returns this model with a text prompt template.
|
109
|
-
*/
|
110
107
|
withTextPrompt(): PromptTemplateTextStreamingModel<string, OllamaChatPrompt, OllamaChatModelSettings, this>;
|
111
|
-
/**
|
112
|
-
* Returns this model with an instruction prompt template.
|
113
|
-
*/
|
114
108
|
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, OllamaChatPrompt, OllamaChatModelSettings, this>;
|
115
|
-
/**
|
116
|
-
* Returns this model with a chat prompt template.
|
117
|
-
*/
|
118
109
|
withChatPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").ChatPrompt, OllamaChatPrompt, OllamaChatModelSettings, this>;
|
119
110
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OllamaChatPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OllamaChatPrompt, OllamaChatModelSettings, this>;
|
120
111
|
withJsonOutput(): this;
|
@@ -169,21 +169,12 @@ export class OllamaChatModel extends AbstractModel {
|
|
169
169
|
template: promptTemplate,
|
170
170
|
});
|
171
171
|
}
|
172
|
-
/**
|
173
|
-
* Returns this model with a text prompt template.
|
174
|
-
*/
|
175
172
|
withTextPrompt() {
|
176
173
|
return this.withPromptTemplate(text());
|
177
174
|
}
|
178
|
-
/**
|
179
|
-
* Returns this model with an instruction prompt template.
|
180
|
-
*/
|
181
175
|
withInstructionPrompt() {
|
182
176
|
return this.withPromptTemplate(instruction());
|
183
177
|
}
|
184
|
-
/**
|
185
|
-
* Returns this model with a chat prompt template.
|
186
|
-
*/
|
187
178
|
withChatPrompt() {
|
188
179
|
return this.withPromptTemplate(chat());
|
189
180
|
}
|
@@ -6,7 +6,7 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
|
|
6
6
|
import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
|
7
7
|
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
8
8
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
9
|
-
import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
|
+
import { TextStreamingBaseModel, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
10
10
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
11
11
|
import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
12
12
|
import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
@@ -49,7 +49,7 @@ export interface OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE extends numbe
|
|
49
49
|
*/
|
50
50
|
promptTemplate?: TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
|
51
51
|
}
|
52
|
-
export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>> implements
|
52
|
+
export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingBaseModel<OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>> {
|
53
53
|
constructor(settings: OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>);
|
54
54
|
readonly provider = "ollama";
|
55
55
|
get modelName(): string;
|
@@ -29,7 +29,6 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
29
29
|
exports.ollama = void 0;
|
30
30
|
__exportStar(require("./OllamaApiConfiguration.cjs"), exports);
|
31
31
|
__exportStar(require("./OllamaChatModel.cjs"), exports);
|
32
|
-
__exportStar(require("./OllamaChatPromptTemplate.cjs"), exports);
|
33
32
|
__exportStar(require("./OllamaCompletionModel.cjs"), exports);
|
34
33
|
exports.ollama = __importStar(require("./OllamaFacade.cjs"));
|
35
34
|
__exportStar(require("./OllamaTextEmbeddingModel.cjs"), exports);
|
@@ -1,6 +1,5 @@
|
|
1
1
|
export * from "./OllamaApiConfiguration.js";
|
2
2
|
export * from "./OllamaChatModel.js";
|
3
|
-
export * from "./OllamaChatPromptTemplate.js";
|
4
3
|
export * from "./OllamaCompletionModel.js";
|
5
4
|
export { OllamaErrorData } from "./OllamaError.js";
|
6
5
|
export * as ollama from "./OllamaFacade.js";
|
@@ -1,6 +1,5 @@
|
|
1
1
|
export * from "./OllamaApiConfiguration.js";
|
2
2
|
export * from "./OllamaChatModel.js";
|
3
|
-
export * from "./OllamaChatPromptTemplate.js";
|
4
3
|
export * from "./OllamaCompletionModel.js";
|
5
4
|
export * as ollama from "./OllamaFacade.js";
|
6
5
|
export * from "./OllamaTextEmbeddingModel.js";
|
@@ -63,7 +63,7 @@ class AbstractOpenAIChatModel extends AbstractModel_js_1.AbstractModel {
|
|
63
63
|
temperature: this.settings.temperature,
|
64
64
|
top_p: this.settings.topP,
|
65
65
|
n: this.settings.numberOfGenerations,
|
66
|
-
stop:
|
66
|
+
stop: stopSequences,
|
67
67
|
max_tokens: this.settings.maxGenerationTokens,
|
68
68
|
presence_penalty: this.settings.presencePenalty,
|
69
69
|
frequency_penalty: this.settings.frequencyPenalty,
|
@@ -122,7 +122,9 @@ class AbstractOpenAIChatModel extends AbstractModel_js_1.AbstractModel {
|
|
122
122
|
}
|
123
123
|
extractTextDelta(delta) {
|
124
124
|
const chunk = delta;
|
125
|
-
if (chunk.object !== "chat.completion.chunk"
|
125
|
+
if (chunk.object !== "chat.completion.chunk" &&
|
126
|
+
chunk.object !== "chat.completion" // for OpenAI-compatible models
|
127
|
+
) {
|
126
128
|
return undefined;
|
127
129
|
}
|
128
130
|
const chatChunk = chunk;
|
@@ -243,7 +245,7 @@ const openAIChatResponseSchema = zod_1.z.object({
|
|
243
245
|
}),
|
244
246
|
});
|
245
247
|
const openaiChatChunkSchema = zod_1.z.object({
|
246
|
-
object: zod_1.z.literal("chat.completion.chunk")
|
248
|
+
object: zod_1.z.string(), // generalized for openai compatible providers, z.literal("chat.completion.chunk")
|
247
249
|
id: zod_1.z.string(),
|
248
250
|
choices: zod_1.z.array(zod_1.z.object({
|
249
251
|
delta: zod_1.z.object({
|
@@ -223,7 +223,7 @@ export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractO
|
|
223
223
|
};
|
224
224
|
private translateFinishReason;
|
225
225
|
doStreamText(prompt: OpenAIChatPrompt, options: FunctionCallOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
|
226
|
-
object:
|
226
|
+
object: string;
|
227
227
|
id: string;
|
228
228
|
created: number;
|
229
229
|
choices: {
|
@@ -549,7 +549,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
|
|
549
549
|
}>;
|
550
550
|
export type OpenAIChatResponse = z.infer<typeof openAIChatResponseSchema>;
|
551
551
|
declare const openaiChatChunkSchema: z.ZodObject<{
|
552
|
-
object: z.
|
552
|
+
object: z.ZodString;
|
553
553
|
id: z.ZodString;
|
554
554
|
choices: z.ZodArray<z.ZodObject<{
|
555
555
|
delta: z.ZodObject<{
|
@@ -669,7 +669,7 @@ declare const openaiChatChunkSchema: z.ZodObject<{
|
|
669
669
|
model: z.ZodOptional<z.ZodString>;
|
670
670
|
system_fingerprint: z.ZodNullable<z.ZodOptional<z.ZodString>>;
|
671
671
|
}, "strip", z.ZodTypeAny, {
|
672
|
-
object:
|
672
|
+
object: string;
|
673
673
|
id: string;
|
674
674
|
created: number;
|
675
675
|
choices: {
|
@@ -695,7 +695,7 @@ declare const openaiChatChunkSchema: z.ZodObject<{
|
|
695
695
|
model?: string | undefined;
|
696
696
|
system_fingerprint?: string | null | undefined;
|
697
697
|
}, {
|
698
|
-
object:
|
698
|
+
object: string;
|
699
699
|
id: string;
|
700
700
|
created: number;
|
701
701
|
choices: {
|
@@ -774,7 +774,7 @@ export declare const OpenAIChatResponseFormat: {
|
|
774
774
|
handler: ({ response }: {
|
775
775
|
response: Response;
|
776
776
|
}) => Promise<AsyncIterable<import("../../index.js").Delta<{
|
777
|
-
object:
|
777
|
+
object: string;
|
778
778
|
id: string;
|
779
779
|
created: number;
|
780
780
|
choices: {
|
@@ -60,7 +60,7 @@ export class AbstractOpenAIChatModel extends AbstractModel {
|
|
60
60
|
temperature: this.settings.temperature,
|
61
61
|
top_p: this.settings.topP,
|
62
62
|
n: this.settings.numberOfGenerations,
|
63
|
-
stop:
|
63
|
+
stop: stopSequences,
|
64
64
|
max_tokens: this.settings.maxGenerationTokens,
|
65
65
|
presence_penalty: this.settings.presencePenalty,
|
66
66
|
frequency_penalty: this.settings.frequencyPenalty,
|
@@ -119,7 +119,9 @@ export class AbstractOpenAIChatModel extends AbstractModel {
|
|
119
119
|
}
|
120
120
|
extractTextDelta(delta) {
|
121
121
|
const chunk = delta;
|
122
|
-
if (chunk.object !== "chat.completion.chunk"
|
122
|
+
if (chunk.object !== "chat.completion.chunk" &&
|
123
|
+
chunk.object !== "chat.completion" // for OpenAI-compatible models
|
124
|
+
) {
|
123
125
|
return undefined;
|
124
126
|
}
|
125
127
|
const chatChunk = chunk;
|
@@ -239,7 +241,7 @@ const openAIChatResponseSchema = z.object({
|
|
239
241
|
}),
|
240
242
|
});
|
241
243
|
const openaiChatChunkSchema = z.object({
|
242
|
-
object: z.literal("chat.completion.chunk")
|
244
|
+
object: z.string(), // generalized for openai compatible providers, z.literal("chat.completion.chunk")
|
243
245
|
id: z.string(),
|
244
246
|
choices: z.array(z.object({
|
245
247
|
delta: z.object({
|
@@ -0,0 +1,82 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.AbstractOpenAITextEmbeddingModel = void 0;
|
4
|
+
const zod_1 = require("zod");
|
5
|
+
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
|
+
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
+
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
8
|
+
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
9
|
+
const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
|
10
|
+
const OpenAIError_js_1 = require("./OpenAIError.cjs");
|
11
|
+
/**
|
12
|
+
* Abstract text embedding model that calls an API that is compatible with the OpenAI embedding API.
|
13
|
+
*
|
14
|
+
* @see https://platform.openai.com/docs/api-reference/embeddings
|
15
|
+
*/
|
16
|
+
class AbstractOpenAITextEmbeddingModel extends AbstractModel_js_1.AbstractModel {
|
17
|
+
constructor(settings) {
|
18
|
+
super({ settings });
|
19
|
+
Object.defineProperty(this, "isParallelizable", {
|
20
|
+
enumerable: true,
|
21
|
+
configurable: true,
|
22
|
+
writable: true,
|
23
|
+
value: true
|
24
|
+
});
|
25
|
+
}
|
26
|
+
get maxValuesPerCall() {
|
27
|
+
return this.settings.maxValuesPerCall ?? 2048;
|
28
|
+
}
|
29
|
+
async callAPI(texts, callOptions) {
|
30
|
+
const api = this.settings.api ?? new OpenAIApiConfiguration_js_1.OpenAIApiConfiguration();
|
31
|
+
const abortSignal = callOptions.run?.abortSignal;
|
32
|
+
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
33
|
+
retry: api.retry,
|
34
|
+
throttle: api.throttle,
|
35
|
+
call: async () => (0, postToApi_js_1.postJsonToApi)({
|
36
|
+
url: api.assembleUrl("/embeddings"),
|
37
|
+
headers: api.headers({
|
38
|
+
functionType: callOptions.functionType,
|
39
|
+
functionId: callOptions.functionId,
|
40
|
+
run: callOptions.run,
|
41
|
+
callId: callOptions.callId,
|
42
|
+
}),
|
43
|
+
body: {
|
44
|
+
model: this.modelName,
|
45
|
+
input: texts,
|
46
|
+
user: this.settings.isUserIdForwardingEnabled
|
47
|
+
? callOptions.run?.userId
|
48
|
+
: undefined,
|
49
|
+
},
|
50
|
+
failedResponseHandler: OpenAIError_js_1.failedOpenAICallResponseHandler,
|
51
|
+
successfulResponseHandler: (0, postToApi_js_1.createJsonResponseHandler)((0, ZodSchema_js_1.zodSchema)(openAITextEmbeddingResponseSchema)),
|
52
|
+
abortSignal,
|
53
|
+
}),
|
54
|
+
});
|
55
|
+
}
|
56
|
+
async doEmbedValues(texts, callOptions) {
|
57
|
+
if (texts.length > this.maxValuesPerCall) {
|
58
|
+
throw new Error(`The OpenAI embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
|
59
|
+
}
|
60
|
+
const rawResponse = await this.callAPI(texts, callOptions);
|
61
|
+
return {
|
62
|
+
rawResponse,
|
63
|
+
embeddings: rawResponse.data.map((data) => data.embedding),
|
64
|
+
};
|
65
|
+
}
|
66
|
+
}
|
67
|
+
exports.AbstractOpenAITextEmbeddingModel = AbstractOpenAITextEmbeddingModel;
|
68
|
+
const openAITextEmbeddingResponseSchema = zod_1.z.object({
|
69
|
+
object: zod_1.z.literal("list"),
|
70
|
+
data: zod_1.z.array(zod_1.z.object({
|
71
|
+
object: zod_1.z.literal("embedding"),
|
72
|
+
embedding: zod_1.z.array(zod_1.z.number()),
|
73
|
+
index: zod_1.z.number(),
|
74
|
+
})),
|
75
|
+
model: zod_1.z.string(),
|
76
|
+
usage: zod_1.z
|
77
|
+
.object({
|
78
|
+
prompt_tokens: zod_1.z.number(),
|
79
|
+
total_tokens: zod_1.z.number(),
|
80
|
+
})
|
81
|
+
.optional(), // for openai-compatible models
|
82
|
+
});
|
@@ -0,0 +1,91 @@
|
|
1
|
+
import { z } from "zod";
|
2
|
+
import { FunctionCallOptions } from "../../core/FunctionOptions.js";
|
3
|
+
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
|
+
import { EmbeddingModelSettings } from "../../model-function/embed/EmbeddingModel.js";
|
6
|
+
export interface AbstractOpenAITextEmbeddingModelSettings extends EmbeddingModelSettings {
|
7
|
+
api?: ApiConfiguration;
|
8
|
+
model: string;
|
9
|
+
maxValuesPerCall?: number | undefined;
|
10
|
+
isUserIdForwardingEnabled?: boolean;
|
11
|
+
}
|
12
|
+
/**
|
13
|
+
* Abstract text embedding model that calls an API that is compatible with the OpenAI embedding API.
|
14
|
+
*
|
15
|
+
* @see https://platform.openai.com/docs/api-reference/embeddings
|
16
|
+
*/
|
17
|
+
export declare abstract class AbstractOpenAITextEmbeddingModel<SETTINGS extends AbstractOpenAITextEmbeddingModelSettings> extends AbstractModel<SETTINGS> {
|
18
|
+
constructor(settings: SETTINGS);
|
19
|
+
get maxValuesPerCall(): number;
|
20
|
+
readonly isParallelizable = true;
|
21
|
+
callAPI(texts: Array<string>, callOptions: FunctionCallOptions): Promise<OpenAITextEmbeddingResponse>;
|
22
|
+
doEmbedValues(texts: string[], callOptions: FunctionCallOptions): Promise<{
|
23
|
+
rawResponse: {
|
24
|
+
object: "list";
|
25
|
+
data: {
|
26
|
+
object: "embedding";
|
27
|
+
embedding: number[];
|
28
|
+
index: number;
|
29
|
+
}[];
|
30
|
+
model: string;
|
31
|
+
usage?: {
|
32
|
+
prompt_tokens: number;
|
33
|
+
total_tokens: number;
|
34
|
+
} | undefined;
|
35
|
+
};
|
36
|
+
embeddings: number[][];
|
37
|
+
}>;
|
38
|
+
}
|
39
|
+
declare const openAITextEmbeddingResponseSchema: z.ZodObject<{
|
40
|
+
object: z.ZodLiteral<"list">;
|
41
|
+
data: z.ZodArray<z.ZodObject<{
|
42
|
+
object: z.ZodLiteral<"embedding">;
|
43
|
+
embedding: z.ZodArray<z.ZodNumber, "many">;
|
44
|
+
index: z.ZodNumber;
|
45
|
+
}, "strip", z.ZodTypeAny, {
|
46
|
+
object: "embedding";
|
47
|
+
embedding: number[];
|
48
|
+
index: number;
|
49
|
+
}, {
|
50
|
+
object: "embedding";
|
51
|
+
embedding: number[];
|
52
|
+
index: number;
|
53
|
+
}>, "many">;
|
54
|
+
model: z.ZodString;
|
55
|
+
usage: z.ZodOptional<z.ZodObject<{
|
56
|
+
prompt_tokens: z.ZodNumber;
|
57
|
+
total_tokens: z.ZodNumber;
|
58
|
+
}, "strip", z.ZodTypeAny, {
|
59
|
+
prompt_tokens: number;
|
60
|
+
total_tokens: number;
|
61
|
+
}, {
|
62
|
+
prompt_tokens: number;
|
63
|
+
total_tokens: number;
|
64
|
+
}>>;
|
65
|
+
}, "strip", z.ZodTypeAny, {
|
66
|
+
object: "list";
|
67
|
+
data: {
|
68
|
+
object: "embedding";
|
69
|
+
embedding: number[];
|
70
|
+
index: number;
|
71
|
+
}[];
|
72
|
+
model: string;
|
73
|
+
usage?: {
|
74
|
+
prompt_tokens: number;
|
75
|
+
total_tokens: number;
|
76
|
+
} | undefined;
|
77
|
+
}, {
|
78
|
+
object: "list";
|
79
|
+
data: {
|
80
|
+
object: "embedding";
|
81
|
+
embedding: number[];
|
82
|
+
index: number;
|
83
|
+
}[];
|
84
|
+
model: string;
|
85
|
+
usage?: {
|
86
|
+
prompt_tokens: number;
|
87
|
+
total_tokens: number;
|
88
|
+
} | undefined;
|
89
|
+
}>;
|
90
|
+
export type OpenAITextEmbeddingResponse = z.infer<typeof openAITextEmbeddingResponseSchema>;
|
91
|
+
export {};
|
@@ -0,0 +1,78 @@
|
|
1
|
+
import { z } from "zod";
|
2
|
+
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
|
+
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
+
import { zodSchema } from "../../core/schema/ZodSchema.js";
|
5
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
|
+
import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
|
7
|
+
import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
|
8
|
+
/**
|
9
|
+
* Abstract text embedding model that calls an API that is compatible with the OpenAI embedding API.
|
10
|
+
*
|
11
|
+
* @see https://platform.openai.com/docs/api-reference/embeddings
|
12
|
+
*/
|
13
|
+
export class AbstractOpenAITextEmbeddingModel extends AbstractModel {
|
14
|
+
constructor(settings) {
|
15
|
+
super({ settings });
|
16
|
+
Object.defineProperty(this, "isParallelizable", {
|
17
|
+
enumerable: true,
|
18
|
+
configurable: true,
|
19
|
+
writable: true,
|
20
|
+
value: true
|
21
|
+
});
|
22
|
+
}
|
23
|
+
get maxValuesPerCall() {
|
24
|
+
return this.settings.maxValuesPerCall ?? 2048;
|
25
|
+
}
|
26
|
+
async callAPI(texts, callOptions) {
|
27
|
+
const api = this.settings.api ?? new OpenAIApiConfiguration();
|
28
|
+
const abortSignal = callOptions.run?.abortSignal;
|
29
|
+
return callWithRetryAndThrottle({
|
30
|
+
retry: api.retry,
|
31
|
+
throttle: api.throttle,
|
32
|
+
call: async () => postJsonToApi({
|
33
|
+
url: api.assembleUrl("/embeddings"),
|
34
|
+
headers: api.headers({
|
35
|
+
functionType: callOptions.functionType,
|
36
|
+
functionId: callOptions.functionId,
|
37
|
+
run: callOptions.run,
|
38
|
+
callId: callOptions.callId,
|
39
|
+
}),
|
40
|
+
body: {
|
41
|
+
model: this.modelName,
|
42
|
+
input: texts,
|
43
|
+
user: this.settings.isUserIdForwardingEnabled
|
44
|
+
? callOptions.run?.userId
|
45
|
+
: undefined,
|
46
|
+
},
|
47
|
+
failedResponseHandler: failedOpenAICallResponseHandler,
|
48
|
+
successfulResponseHandler: createJsonResponseHandler(zodSchema(openAITextEmbeddingResponseSchema)),
|
49
|
+
abortSignal,
|
50
|
+
}),
|
51
|
+
});
|
52
|
+
}
|
53
|
+
async doEmbedValues(texts, callOptions) {
|
54
|
+
if (texts.length > this.maxValuesPerCall) {
|
55
|
+
throw new Error(`The OpenAI embedding API only supports ${this.maxValuesPerCall} texts per API call.`);
|
56
|
+
}
|
57
|
+
const rawResponse = await this.callAPI(texts, callOptions);
|
58
|
+
return {
|
59
|
+
rawResponse,
|
60
|
+
embeddings: rawResponse.data.map((data) => data.embedding),
|
61
|
+
};
|
62
|
+
}
|
63
|
+
}
|
64
|
+
const openAITextEmbeddingResponseSchema = z.object({
|
65
|
+
object: z.literal("list"),
|
66
|
+
data: z.array(z.object({
|
67
|
+
object: z.literal("embedding"),
|
68
|
+
embedding: z.array(z.number()),
|
69
|
+
index: z.number(),
|
70
|
+
})),
|
71
|
+
model: z.string(),
|
72
|
+
usage: z
|
73
|
+
.object({
|
74
|
+
prompt_tokens: z.number(),
|
75
|
+
total_tokens: z.number(),
|
76
|
+
})
|
77
|
+
.optional(), // for openai-compatible models
|
78
|
+
});
|
@@ -170,7 +170,7 @@ OpenAIChatSettings> {
|
|
170
170
|
}>;
|
171
171
|
doStreamStructure(schema: Schema<unknown> & JsonSchemaProducer, prompt: Parameters<PROMPT_TEMPLATE["format"]>[0], // first argument of the function
|
172
172
|
options: FunctionCallOptions): Promise<AsyncIterable<import("../../index.js").Delta<{
|
173
|
-
object:
|
173
|
+
object: string;
|
174
174
|
id: string;
|
175
175
|
created: number;
|
176
176
|
choices: {
|
@@ -229,21 +229,12 @@ class OpenAIChatModel extends AbstractOpenAIChatModel_js_1.AbstractOpenAIChatMod
|
|
229
229
|
template: promptTemplate,
|
230
230
|
});
|
231
231
|
}
|
232
|
-
/**
|
233
|
-
* Returns this model with a text prompt template.
|
234
|
-
*/
|
235
232
|
withTextPrompt() {
|
236
233
|
return this.withPromptTemplate((0, OpenAIChatPromptTemplate_js_1.text)());
|
237
234
|
}
|
238
|
-
/**
|
239
|
-
* Returns this model with an instruction prompt template.
|
240
|
-
*/
|
241
235
|
withInstructionPrompt() {
|
242
236
|
return this.withPromptTemplate((0, OpenAIChatPromptTemplate_js_1.instruction)());
|
243
237
|
}
|
244
|
-
/**
|
245
|
-
* Returns this model with a chat prompt template.
|
246
|
-
*/
|
247
238
|
withChatPrompt() {
|
248
239
|
return this.withPromptTemplate((0, OpenAIChatPromptTemplate_js_1.chat)());
|
249
240
|
}
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
|
2
2
|
import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
|
3
3
|
import { PromptTemplateFullTextModel } from "../../model-function/generate-text/PromptTemplateFullTextModel.js";
|
4
|
-
import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
4
|
+
import { TextStreamingBaseModel, TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
5
5
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
6
6
|
import { ToolCallGenerationModel } from "../../tool/generate-tool-call/ToolCallGenerationModel.js";
|
7
7
|
import { ToolCallsGenerationModel } from "../../tool/generate-tool-calls/ToolCallsGenerationModel.js";
|
@@ -124,7 +124,7 @@ export interface OpenAIChatSettings extends AbstractOpenAIChatSettings {
|
|
124
124
|
* ),
|
125
125
|
* ]);
|
126
126
|
*/
|
127
|
-
export declare class OpenAIChatModel extends AbstractOpenAIChatModel<OpenAIChatSettings> implements
|
127
|
+
export declare class OpenAIChatModel extends AbstractOpenAIChatModel<OpenAIChatSettings> implements TextStreamingBaseModel<OpenAIChatPrompt, OpenAIChatSettings>, ToolCallGenerationModel<OpenAIChatPrompt, OpenAIChatSettings>, ToolCallsGenerationModel<OpenAIChatPrompt, OpenAIChatSettings> {
|
128
128
|
constructor(settings: OpenAIChatSettings);
|
129
129
|
readonly provider: "openai";
|
130
130
|
get modelName(): OpenAIChatModelType;
|
@@ -141,17 +141,8 @@ export declare class OpenAIChatModel extends AbstractOpenAIChatModel<OpenAIChatS
|
|
141
141
|
fnDescription?: string;
|
142
142
|
}): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<OpenAIChatPrompt, OpenAIChatPrompt>>;
|
143
143
|
asStructureGenerationModel<INPUT_PROMPT, OpenAIChatPrompt>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt> | FlexibleStructureFromTextPromptTemplate<INPUT_PROMPT, unknown>): StructureFromTextStreamingModel<INPUT_PROMPT, unknown, TextStreamingModel<unknown, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>> | StructureFromTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, TextStreamingModel<OpenAIChatPrompt, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>>;
|
144
|
-
/**
|
145
|
-
* Returns this model with a text prompt template.
|
146
|
-
*/
|
147
144
|
withTextPrompt(): PromptTemplateFullTextModel<string, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
148
|
-
/**
|
149
|
-
* Returns this model with an instruction prompt template.
|
150
|
-
*/
|
151
145
|
withInstructionPrompt(): PromptTemplateFullTextModel<import("../../index.js").InstructionPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
152
|
-
/**
|
153
|
-
* Returns this model with a chat prompt template.
|
154
|
-
*/
|
155
146
|
withChatPrompt(): PromptTemplateFullTextModel<import("../../index.js").ChatPrompt, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
156
147
|
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>): PromptTemplateFullTextModel<INPUT_PROMPT, OpenAIChatPrompt, OpenAIChatSettings, this>;
|
157
148
|
withJsonOutput(): this;
|
@@ -223,21 +223,12 @@ export class OpenAIChatModel extends AbstractOpenAIChatModel {
|
|
223
223
|
template: promptTemplate,
|
224
224
|
});
|
225
225
|
}
|
226
|
-
/**
|
227
|
-
* Returns this model with a text prompt template.
|
228
|
-
*/
|
229
226
|
withTextPrompt() {
|
230
227
|
return this.withPromptTemplate(text());
|
231
228
|
}
|
232
|
-
/**
|
233
|
-
* Returns this model with an instruction prompt template.
|
234
|
-
*/
|
235
229
|
withInstructionPrompt() {
|
236
230
|
return this.withPromptTemplate(instruction());
|
237
231
|
}
|
238
|
-
/**
|
239
|
-
* Returns this model with a chat prompt template.
|
240
|
-
*/
|
241
232
|
withChatPrompt() {
|
242
233
|
return this.withPromptTemplate(chat());
|
243
234
|
}
|
@@ -99,15 +99,12 @@ class OpenAICompletionModel extends AbstractOpenAICompletionModel_js_1.AbstractO
|
|
99
99
|
];
|
100
100
|
return Object.fromEntries(Object.entries(this.settings).filter(([key]) => eventSettingProperties.includes(key)));
|
101
101
|
}
|
102
|
-
|
103
|
-
|
104
|
-
|
102
|
+
withTextPrompt() {
|
103
|
+
return this.withPromptTemplate((0, TextPromptTemplate_js_1.text)());
|
104
|
+
}
|
105
105
|
withInstructionPrompt() {
|
106
106
|
return this.withPromptTemplate((0, TextPromptTemplate_js_1.instruction)());
|
107
107
|
}
|
108
|
-
/**
|
109
|
-
* Returns this model with a chat prompt template.
|
110
|
-
*/
|
111
108
|
withChatPrompt(options) {
|
112
109
|
return this.withPromptTemplate((0, TextPromptTemplate_js_1.chat)(options));
|
113
110
|
}
|
@@ -1,5 +1,5 @@
|
|
1
1
|
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
2
|
-
import {
|
2
|
+
import { TextStreamingBaseModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
3
3
|
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
4
4
|
import { AbstractOpenAICompletionModel, AbstractOpenAICompletionModelSettings, OpenAICompletionResponse } from "./AbstractOpenAICompletionModel.js";
|
5
5
|
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
@@ -46,7 +46,7 @@ export interface OpenAICompletionModelSettings extends AbstractOpenAICompletionM
|
|
46
46
|
* "Write a short story about a robot learning to love:\n\n"
|
47
47
|
* );
|
48
48
|
*/
|
49
|
-
export declare class OpenAICompletionModel extends AbstractOpenAICompletionModel<OpenAICompletionModelSettings> implements
|
49
|
+
export declare class OpenAICompletionModel extends AbstractOpenAICompletionModel<OpenAICompletionModelSettings> implements TextStreamingBaseModel<string, OpenAICompletionModelSettings> {
|
50
50
|
constructor(settings: OpenAICompletionModelSettings);
|
51
51
|
readonly provider: "openai";
|
52
52
|
get modelName(): "gpt-3.5-turbo-instruct";
|
@@ -54,13 +54,8 @@ export declare class OpenAICompletionModel extends AbstractOpenAICompletionModel
|
|
54
54
|
readonly tokenizer: TikTokenTokenizer;
|
55
55
|
countPromptTokens(input: string): Promise<number>;
|
56
56
|
get settingsForEvent(): Partial<OpenAICompletionModelSettings>;
|
57
|
-
|
58
|
-
* Returns this model with an instruction prompt template.
|
59
|
-
*/
|
57
|
+
withTextPrompt(): PromptTemplateTextStreamingModel<string, string, OpenAICompletionModelSettings, this>;
|
60
58
|
withInstructionPrompt(): PromptTemplateTextStreamingModel<import("../../index.js").InstructionPrompt, string, OpenAICompletionModelSettings, this>;
|
61
|
-
/**
|
62
|
-
* Returns this model with a chat prompt template.
|
63
|
-
*/
|
64
59
|
withChatPrompt(options?: {
|
65
60
|
user?: string;
|
66
61
|
assistant?: string;
|