modelfusion 0.64.0 → 0.65.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +24 -12
- package/model-function/generate-text/index.cjs +0 -1
- package/model-function/generate-text/index.d.ts +0 -1
- package/model-function/generate-text/index.js +0 -1
- package/model-function/generate-text/prompt-format/InstructionPrompt.d.ts +13 -0
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.cjs +4 -2
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.js +4 -2
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +0 -4
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +0 -1
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +0 -4
- package/model-provider/llamacpp/index.cjs +1 -0
- package/model-provider/llamacpp/index.d.ts +1 -0
- package/model-provider/llamacpp/index.js +1 -0
- package/model-provider/llamacpp/mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs +36 -0
- package/model-provider/llamacpp/mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.d.ts +9 -0
- package/model-provider/llamacpp/mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js +32 -0
- package/model-provider/openai/chat/OpenAIChatMessage.cjs +15 -2
- package/model-provider/openai/chat/OpenAIChatMessage.d.ts +7 -1
- package/model-provider/openai/chat/OpenAIChatMessage.js +15 -2
- package/model-provider/openai/chat/OpenAIChatModel.cjs +4 -7
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +11 -12
- package/model-provider/openai/chat/OpenAIChatModel.js +5 -8
- package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +7 -36
- package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +1 -6
- package/model-provider/openai/chat/OpenAIChatPromptFormat.js +6 -34
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +3 -2
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +3 -2
- package/package.json +1 -1
- package/model-function/generate-text/prompt-format/VisionInstructionPrompt.cjs +0 -2
- package/model-function/generate-text/prompt-format/VisionInstructionPrompt.d.ts +0 -31
- package/model-function/generate-text/prompt-format/VisionInstructionPrompt.js +0 -1
- package/model-provider/llamacpp/mapVisionInstructionPromptToLlamaCppFormat.cjs +0 -15
- package/model-provider/llamacpp/mapVisionInstructionPromptToLlamaCppFormat.d.ts +0 -4
- package/model-provider/llamacpp/mapVisionInstructionPromptToLlamaCppFormat.js +0 -11
package/README.md
CHANGED
@@ -14,8 +14,8 @@
|
|
14
14
|
|
15
15
|
**ModelFusion** is a TypeScript library for building AI applications, chatbots, and agents.
|
16
16
|
|
17
|
-
- **Vendor-neutral**: ModelFusion is a non-commercial open source project that is community-driven. You can use it with any supported
|
18
|
-
- **Multimodal**: ModelFusion supports a wide range of models including text generation, image generation, text-to-speech, speech-to-text, and embedding models.
|
17
|
+
- **Vendor-neutral**: ModelFusion is a non-commercial open source project that is community-driven. You can use it with any supported provider.
|
18
|
+
- **Multimodal**: ModelFusion supports a wide range of models including text generation, image generation, vision, text-to-speech, speech-to-text, and embedding models.
|
19
19
|
- **Streaming**: ModelFusion supports streaming for many generation models, e.g. text streaming, structure streaming, and full duplex speech streaming.
|
20
20
|
- **Utility functions**: ModelFusion provides functionality for tools and tool usage, vector indices, and guards functions.
|
21
21
|
- **Type inference and validation**: ModelFusion infers TypeScript types wherever possible and to validates model responses.
|
@@ -47,9 +47,7 @@ You can use [prompt formats](https://modelfusion.dev/guide/function/generate-tex
|
|
47
47
|
|
48
48
|
```ts
|
49
49
|
const text = await generateText(
|
50
|
-
new OpenAICompletionModel({
|
51
|
-
model: "gpt-3.5-turbo-instruct",
|
52
|
-
}),
|
50
|
+
new OpenAICompletionModel({ model: "gpt-3.5-turbo-instruct" }),
|
53
51
|
"Write a short story about a robot learning to love:\n\n"
|
54
52
|
);
|
55
53
|
```
|
@@ -60,9 +58,7 @@ Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai),
|
|
60
58
|
|
61
59
|
```ts
|
62
60
|
const textStream = await streamText(
|
63
|
-
new OpenAICompletionModel({
|
64
|
-
model: "gpt-3.5-turbo-instruct",
|
65
|
-
}),
|
61
|
+
new OpenAICompletionModel({ model: "gpt-3.5-turbo-instruct" }),
|
66
62
|
"Write a short story about a robot learning to love:\n\n"
|
67
63
|
);
|
68
64
|
|
@@ -73,16 +69,30 @@ for await (const textPart of textStream) {
|
|
73
69
|
|
74
70
|
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Anthropic](https://modelfusion.dev/integration/model-provider/anthropic), [Cohere](https://modelfusion.dev/integration/model-provider/cohere), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp), [Ollama](https://modelfusion.dev/integration/model-provider/ollama)
|
75
71
|
|
72
|
+
#### streamText with multi-modal prompt
|
73
|
+
|
74
|
+
Multi-modal vision models such as GPT 4 Vision can process images as part of the prompt.
|
75
|
+
|
76
|
+
```ts
|
77
|
+
const textStream = await streamText(
|
78
|
+
new OpenAIChatModel({ model: "gpt-4-vision-preview" }),
|
79
|
+
[
|
80
|
+
OpenAIChatMessage.user("Describe the image in detail:", {
|
81
|
+
image: { base64Content: image, mimeType: "image/png" },
|
82
|
+
}),
|
83
|
+
]
|
84
|
+
);
|
85
|
+
```
|
86
|
+
|
87
|
+
Providers: [OpenAI](https://modelfusion.dev/integration/model-provider/openai), [Llama.cpp](https://modelfusion.dev/integration/model-provider/llamacpp)
|
88
|
+
|
76
89
|
### [Generate Image](https://modelfusion.dev/guide/function/generate-image)
|
77
90
|
|
78
91
|
Generate an image from a prompt.
|
79
92
|
|
80
93
|
```ts
|
81
94
|
const image = await generateImage(
|
82
|
-
new OpenAIImageGenerationModel({
|
83
|
-
model: "dall-e-3",
|
84
|
-
size: "1024x1024",
|
85
|
-
}),
|
95
|
+
new OpenAIImageGenerationModel({ model: "dall-e-3", size: "1024x1024" }),
|
86
96
|
"the wicked witch of the west in the style of early 19th century painting"
|
87
97
|
);
|
88
98
|
```
|
@@ -499,6 +509,8 @@ const textStream = await streamText(
|
|
499
509
|
| Vicuna | ❌ | ✅ |
|
500
510
|
| Generic Text | ✅ | ✅ |
|
501
511
|
|
512
|
+
#### [Vision Prompts]
|
513
|
+
|
502
514
|
#### [Image Generation Prompt Formats](https://modelfusion.dev/guide/function/generate-image/prompt-format)
|
503
515
|
|
504
516
|
You an use prompt formats with image models as well, e.g. to use a basic text prompt. It is available as a shorthand method:
|
@@ -26,7 +26,6 @@ __exportStar(require("./prompt-format/InstructionPrompt.cjs"), exports);
|
|
26
26
|
__exportStar(require("./prompt-format/Llama2PromptFormat.cjs"), exports);
|
27
27
|
__exportStar(require("./prompt-format/TextPromptFormat.cjs"), exports);
|
28
28
|
__exportStar(require("./prompt-format/VicunaPromptFormat.cjs"), exports);
|
29
|
-
__exportStar(require("./prompt-format/VisionInstructionPrompt.cjs"), exports);
|
30
29
|
__exportStar(require("./prompt-format/trimChatPrompt.cjs"), exports);
|
31
30
|
__exportStar(require("./prompt-format/validateChatPrompt.cjs"), exports);
|
32
31
|
__exportStar(require("./streamText.cjs"), exports);
|
@@ -10,7 +10,6 @@ export * from "./prompt-format/InstructionPrompt.js";
|
|
10
10
|
export * from "./prompt-format/Llama2PromptFormat.js";
|
11
11
|
export * from "./prompt-format/TextPromptFormat.js";
|
12
12
|
export * from "./prompt-format/VicunaPromptFormat.js";
|
13
|
-
export * from "./prompt-format/VisionInstructionPrompt.js";
|
14
13
|
export * from "./prompt-format/trimChatPrompt.js";
|
15
14
|
export * from "./prompt-format/validateChatPrompt.js";
|
16
15
|
export * from "./streamText.js";
|
@@ -10,7 +10,6 @@ export * from "./prompt-format/InstructionPrompt.js";
|
|
10
10
|
export * from "./prompt-format/Llama2PromptFormat.js";
|
11
11
|
export * from "./prompt-format/TextPromptFormat.js";
|
12
12
|
export * from "./prompt-format/VicunaPromptFormat.js";
|
13
|
-
export * from "./prompt-format/VisionInstructionPrompt.js";
|
14
13
|
export * from "./prompt-format/trimChatPrompt.js";
|
15
14
|
export * from "./prompt-format/validateChatPrompt.js";
|
16
15
|
export * from "./streamText.js";
|
@@ -25,4 +25,17 @@ export type InstructionPrompt = {
|
|
25
25
|
* Optional additional input or context, e.g. a the content from which information should be extracted.
|
26
26
|
*/
|
27
27
|
input?: string;
|
28
|
+
/**
|
29
|
+
* Optional image to provide context for the language model. Only supported by some models.
|
30
|
+
*/
|
31
|
+
image?: {
|
32
|
+
/**
|
33
|
+
* Base-64 encoded image.
|
34
|
+
*/
|
35
|
+
base64Content: string;
|
36
|
+
/**
|
37
|
+
* Optional mime type of the image.
|
38
|
+
*/
|
39
|
+
mimeType?: string;
|
40
|
+
};
|
28
41
|
};
|
@@ -2,7 +2,9 @@
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
3
|
exports.mapChatPromptToVicunaFormat = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("./validateChatPrompt.cjs");
|
5
|
-
|
5
|
+
// default Vicuna 1 system message
|
6
|
+
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
7
|
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
6
8
|
/**
|
7
9
|
* Formats a chat prompt as a Vicuna prompt.
|
8
10
|
*
|
@@ -32,7 +34,7 @@ function mapChatPromptToVicunaFormat() {
|
|
32
34
|
}
|
33
35
|
// first message was not a system message:
|
34
36
|
if (i === 0) {
|
35
|
-
text += `${
|
37
|
+
text += `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
36
38
|
}
|
37
39
|
// user message
|
38
40
|
if ("user" in message) {
|
@@ -1,5 +1,7 @@
|
|
1
1
|
import { validateChatPrompt } from "./validateChatPrompt.js";
|
2
|
-
|
2
|
+
// default Vicuna 1 system message
|
3
|
+
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
4
|
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
3
5
|
/**
|
4
6
|
* Formats a chat prompt as a Vicuna prompt.
|
5
7
|
*
|
@@ -29,7 +31,7 @@ export function mapChatPromptToVicunaFormat() {
|
|
29
31
|
}
|
30
32
|
// first message was not a system message:
|
31
33
|
if (i === 0) {
|
32
|
-
text += `${
|
34
|
+
text += `${DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
33
35
|
}
|
34
36
|
// user message
|
35
37
|
if ("user" in message) {
|
@@ -12,7 +12,6 @@ const parseJSON_js_1 = require("../../util/parseJSON.cjs");
|
|
12
12
|
const LlamaCppApiConfiguration_js_1 = require("./LlamaCppApiConfiguration.cjs");
|
13
13
|
const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
|
14
14
|
const LlamaCppTokenizer_js_1 = require("./LlamaCppTokenizer.cjs");
|
15
|
-
const mapVisionInstructionPromptToLlamaCppFormat_js_1 = require("./mapVisionInstructionPromptToLlamaCppFormat.cjs");
|
16
15
|
class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
17
16
|
constructor(settings = {}) {
|
18
17
|
super({ settings });
|
@@ -108,9 +107,6 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
108
107
|
stopSequences: [],
|
109
108
|
});
|
110
109
|
}
|
111
|
-
withVisionInstructionPrompt() {
|
112
|
-
return this.withPromptFormat((0, mapVisionInstructionPromptToLlamaCppFormat_js_1.mapVisionInstructionPromptToLlamaCppFormat)());
|
113
|
-
}
|
114
110
|
withPromptFormat(promptFormat) {
|
115
111
|
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
116
112
|
model: this.withSettings({
|
@@ -111,7 +111,6 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
|
|
111
111
|
}>;
|
112
112
|
doStreamText(prompt: LlamaCppTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
113
113
|
withTextPrompt(): PromptFormatTextStreamingModel<string, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
114
|
-
withVisionInstructionPrompt(): PromptFormatTextStreamingModel<import("../../index.js").VisionInstructionPrompt, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
115
114
|
withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, LlamaCppTextGenerationPrompt>): PromptFormatTextStreamingModel<INPUT_PROMPT, LlamaCppTextGenerationPrompt, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
116
115
|
withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
|
117
116
|
}
|
@@ -9,7 +9,6 @@ import { parseJsonWithZod } from "../../util/parseJSON.js";
|
|
9
9
|
import { LlamaCppApiConfiguration } from "./LlamaCppApiConfiguration.js";
|
10
10
|
import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
|
11
11
|
import { LlamaCppTokenizer } from "./LlamaCppTokenizer.js";
|
12
|
-
import { mapVisionInstructionPromptToLlamaCppFormat } from "./mapVisionInstructionPromptToLlamaCppFormat.js";
|
13
12
|
export class LlamaCppTextGenerationModel extends AbstractModel {
|
14
13
|
constructor(settings = {}) {
|
15
14
|
super({ settings });
|
@@ -105,9 +104,6 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
105
104
|
stopSequences: [],
|
106
105
|
});
|
107
106
|
}
|
108
|
-
withVisionInstructionPrompt() {
|
109
|
-
return this.withPromptFormat(mapVisionInstructionPromptToLlamaCppFormat());
|
110
|
-
}
|
111
107
|
withPromptFormat(promptFormat) {
|
112
108
|
return new PromptFormatTextStreamingModel({
|
113
109
|
model: this.withSettings({
|
@@ -21,3 +21,4 @@ Object.defineProperty(exports, "LlamaCppError", { enumerable: true, get: functio
|
|
21
21
|
__exportStar(require("./LlamaCppTextEmbeddingModel.cjs"), exports);
|
22
22
|
__exportStar(require("./LlamaCppTextGenerationModel.cjs"), exports);
|
23
23
|
__exportStar(require("./LlamaCppTokenizer.cjs"), exports);
|
24
|
+
__exportStar(require("./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs"), exports);
|
@@ -3,3 +3,4 @@ export { LlamaCppError, LlamaCppErrorData } from "./LlamaCppError.js";
|
|
3
3
|
export * from "./LlamaCppTextEmbeddingModel.js";
|
4
4
|
export * from "./LlamaCppTextGenerationModel.js";
|
5
5
|
export * from "./LlamaCppTokenizer.js";
|
6
|
+
export * from "./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js";
|
@@ -0,0 +1,36 @@
|
|
1
|
+
"use strict";
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
+
exports.mapInstructionPromptToBakLLaVA1ForLlamaCppFormat = void 0;
|
4
|
+
// default Vicuna 1 system message
|
5
|
+
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
6
|
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
7
|
+
/**
|
8
|
+
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompts.
|
9
|
+
*
|
10
|
+
* @see https://github.com/SkunkworksAI/BakLLaVA
|
11
|
+
*/
|
12
|
+
function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat() {
|
13
|
+
return {
|
14
|
+
format: (instruction) => {
|
15
|
+
let text = "";
|
16
|
+
text += `${instruction.system ?? DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
17
|
+
text += `USER: `;
|
18
|
+
if (instruction.image != null) {
|
19
|
+
text += `[img-1]\n`;
|
20
|
+
}
|
21
|
+
text += `${instruction.instruction}\n`;
|
22
|
+
if (instruction.input != null) {
|
23
|
+
text += `${instruction.input}\n`;
|
24
|
+
}
|
25
|
+
text += `ASSISTANT: `;
|
26
|
+
return {
|
27
|
+
text,
|
28
|
+
images: instruction.image != null
|
29
|
+
? { "1": instruction.image.base64Content }
|
30
|
+
: undefined,
|
31
|
+
};
|
32
|
+
},
|
33
|
+
stopSequences: [`\nUSER:`],
|
34
|
+
};
|
35
|
+
}
|
36
|
+
exports.mapInstructionPromptToBakLLaVA1ForLlamaCppFormat = mapInstructionPromptToBakLLaVA1ForLlamaCppFormat;
|
@@ -0,0 +1,9 @@
|
|
1
|
+
import { InstructionPrompt } from "../../model-function/generate-text/prompt-format/InstructionPrompt.js";
|
2
|
+
import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
|
3
|
+
import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
|
4
|
+
/**
|
5
|
+
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompts.
|
6
|
+
*
|
7
|
+
* @see https://github.com/SkunkworksAI/BakLLaVA
|
8
|
+
*/
|
9
|
+
export declare function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat(): TextGenerationPromptFormat<InstructionPrompt, LlamaCppTextGenerationPrompt>;
|
@@ -0,0 +1,32 @@
|
|
1
|
+
// default Vicuna 1 system message
|
2
|
+
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
3
|
+
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
4
|
+
/**
|
5
|
+
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompts.
|
6
|
+
*
|
7
|
+
* @see https://github.com/SkunkworksAI/BakLLaVA
|
8
|
+
*/
|
9
|
+
export function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat() {
|
10
|
+
return {
|
11
|
+
format: (instruction) => {
|
12
|
+
let text = "";
|
13
|
+
text += `${instruction.system ?? DEFAULT_SYSTEM_MESSAGE}\n\n`;
|
14
|
+
text += `USER: `;
|
15
|
+
if (instruction.image != null) {
|
16
|
+
text += `[img-1]\n`;
|
17
|
+
}
|
18
|
+
text += `${instruction.instruction}\n`;
|
19
|
+
if (instruction.input != null) {
|
20
|
+
text += `${instruction.input}\n`;
|
21
|
+
}
|
22
|
+
text += `ASSISTANT: `;
|
23
|
+
return {
|
24
|
+
text,
|
25
|
+
images: instruction.image != null
|
26
|
+
? { "1": instruction.image.base64Content }
|
27
|
+
: undefined,
|
28
|
+
};
|
29
|
+
},
|
30
|
+
stopSequences: [`\nUSER:`],
|
31
|
+
};
|
32
|
+
}
|
@@ -5,8 +5,21 @@ exports.OpenAIChatMessage = {
|
|
5
5
|
system(content) {
|
6
6
|
return { role: "system", content };
|
7
7
|
},
|
8
|
-
user(content) {
|
9
|
-
|
8
|
+
user(content, options) {
|
9
|
+
if (options?.image != null) {
|
10
|
+
return {
|
11
|
+
role: "user",
|
12
|
+
content: [
|
13
|
+
{ type: "text", text: content },
|
14
|
+
{
|
15
|
+
type: "image_url",
|
16
|
+
image_url: `data:${options.image.mimeType ?? "image/jpeg"};base64,${options.image.base64Content}`,
|
17
|
+
},
|
18
|
+
],
|
19
|
+
name: options.name,
|
20
|
+
};
|
21
|
+
}
|
22
|
+
return { role: "user", content, name: options?.name };
|
10
23
|
},
|
11
24
|
assistant(content) {
|
12
25
|
return { role: "assistant", content };
|
@@ -39,7 +39,13 @@ export type OpenAIChatMessage = {
|
|
39
39
|
};
|
40
40
|
export declare const OpenAIChatMessage: {
|
41
41
|
system(content: string): OpenAIChatMessage;
|
42
|
-
user(content: string
|
42
|
+
user(content: string, options?: {
|
43
|
+
name?: string;
|
44
|
+
image?: {
|
45
|
+
base64Content: string;
|
46
|
+
mimeType?: string;
|
47
|
+
};
|
48
|
+
}): OpenAIChatMessage;
|
43
49
|
assistant(content: string): OpenAIChatMessage;
|
44
50
|
functionCall(content: string | null, functionCall: {
|
45
51
|
name: string;
|
@@ -2,8 +2,21 @@ export const OpenAIChatMessage = {
|
|
2
2
|
system(content) {
|
3
3
|
return { role: "system", content };
|
4
4
|
},
|
5
|
-
user(content) {
|
6
|
-
|
5
|
+
user(content, options) {
|
6
|
+
if (options?.image != null) {
|
7
|
+
return {
|
8
|
+
role: "user",
|
9
|
+
content: [
|
10
|
+
{ type: "text", text: content },
|
11
|
+
{
|
12
|
+
type: "image_url",
|
13
|
+
image_url: `data:${options.image.mimeType ?? "image/jpeg"};base64,${options.image.base64Content}`,
|
14
|
+
},
|
15
|
+
],
|
16
|
+
name: options.name,
|
17
|
+
};
|
18
|
+
}
|
19
|
+
return { role: "user", content, name: options?.name };
|
7
20
|
},
|
8
21
|
assistant(content) {
|
9
22
|
return { role: "assistant", content };
|
@@ -364,9 +364,6 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
|
|
364
364
|
withInstructionPrompt() {
|
365
365
|
return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.mapInstructionPromptToOpenAIChatFormat)());
|
366
366
|
}
|
367
|
-
withVisionInstructionPrompt() {
|
368
|
-
return this.withPromptFormat((0, OpenAIChatPromptFormat_js_1.mapVisionInstructionPromptToOpenAIChatFormat)());
|
369
|
-
}
|
370
367
|
/**
|
371
368
|
* Returns this model with a chat prompt format.
|
372
369
|
*/
|
@@ -391,10 +388,6 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
|
|
391
388
|
exports.OpenAIChatModel = OpenAIChatModel;
|
392
389
|
const openAIChatResponseSchema = zod_1.z.object({
|
393
390
|
id: zod_1.z.string(),
|
394
|
-
object: zod_1.z.literal("chat.completion"),
|
395
|
-
created: zod_1.z.number(),
|
396
|
-
model: zod_1.z.string(),
|
397
|
-
system_fingerprint: zod_1.z.string(),
|
398
391
|
choices: zod_1.z.array(zod_1.z.object({
|
399
392
|
message: zod_1.z.object({
|
400
393
|
role: zod_1.z.literal("assistant"),
|
@@ -416,6 +409,10 @@ const openAIChatResponseSchema = zod_1.z.object({
|
|
416
409
|
"function_call",
|
417
410
|
]),
|
418
411
|
})),
|
412
|
+
created: zod_1.z.number(),
|
413
|
+
model: zod_1.z.string(),
|
414
|
+
system_fingerprint: zod_1.z.string().optional(),
|
415
|
+
object: zod_1.z.literal("chat.completion"),
|
419
416
|
usage: zod_1.z.object({
|
420
417
|
prompt_tokens: zod_1.z.number(),
|
421
418
|
completion_tokens: zod_1.z.number(),
|
@@ -198,7 +198,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
198
198
|
logprobs?: any;
|
199
199
|
}[];
|
200
200
|
created: number;
|
201
|
-
system_fingerprint
|
201
|
+
system_fingerprint?: string | undefined;
|
202
202
|
};
|
203
203
|
text: string;
|
204
204
|
usage: {
|
@@ -239,7 +239,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
239
239
|
logprobs?: any;
|
240
240
|
}[];
|
241
241
|
created: number;
|
242
|
-
system_fingerprint
|
242
|
+
system_fingerprint?: string | undefined;
|
243
243
|
};
|
244
244
|
valueText: string;
|
245
245
|
value: any;
|
@@ -274,7 +274,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
274
274
|
logprobs?: any;
|
275
275
|
}[];
|
276
276
|
created: number;
|
277
|
-
system_fingerprint
|
277
|
+
system_fingerprint?: string | undefined;
|
278
278
|
};
|
279
279
|
structureAndText: {
|
280
280
|
structure: null;
|
@@ -311,7 +311,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
311
311
|
logprobs?: any;
|
312
312
|
}[];
|
313
313
|
created: number;
|
314
|
-
system_fingerprint
|
314
|
+
system_fingerprint?: string | undefined;
|
315
315
|
};
|
316
316
|
structureAndText: {
|
317
317
|
structure: string;
|
@@ -334,7 +334,6 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
334
334
|
* Returns this model with an instruction prompt format.
|
335
335
|
*/
|
336
336
|
withInstructionPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").InstructionPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
|
337
|
-
withVisionInstructionPrompt(): PromptFormatTextStreamingModel<import("../../../index.js").VisionInstructionPrompt, OpenAIChatMessage[], OpenAIChatSettings, this>;
|
338
337
|
/**
|
339
338
|
* Returns this model with a chat prompt format.
|
340
339
|
*/
|
@@ -344,10 +343,6 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
344
343
|
}
|
345
344
|
declare const openAIChatResponseSchema: z.ZodObject<{
|
346
345
|
id: z.ZodString;
|
347
|
-
object: z.ZodLiteral<"chat.completion">;
|
348
|
-
created: z.ZodNumber;
|
349
|
-
model: z.ZodString;
|
350
|
-
system_fingerprint: z.ZodString;
|
351
346
|
choices: z.ZodArray<z.ZodObject<{
|
352
347
|
message: z.ZodObject<{
|
353
348
|
role: z.ZodLiteral<"assistant">;
|
@@ -405,6 +400,10 @@ declare const openAIChatResponseSchema: z.ZodObject<{
|
|
405
400
|
index: number;
|
406
401
|
logprobs?: any;
|
407
402
|
}>, "many">;
|
403
|
+
created: z.ZodNumber;
|
404
|
+
model: z.ZodString;
|
405
|
+
system_fingerprint: z.ZodOptional<z.ZodString>;
|
406
|
+
object: z.ZodLiteral<"chat.completion">;
|
408
407
|
usage: z.ZodObject<{
|
409
408
|
prompt_tokens: z.ZodNumber;
|
410
409
|
completion_tokens: z.ZodNumber;
|
@@ -441,7 +440,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
|
|
441
440
|
logprobs?: any;
|
442
441
|
}[];
|
443
442
|
created: number;
|
444
|
-
system_fingerprint
|
443
|
+
system_fingerprint?: string | undefined;
|
445
444
|
}, {
|
446
445
|
object: "chat.completion";
|
447
446
|
usage: {
|
@@ -465,7 +464,7 @@ declare const openAIChatResponseSchema: z.ZodObject<{
|
|
465
464
|
logprobs?: any;
|
466
465
|
}[];
|
467
466
|
created: number;
|
468
|
-
system_fingerprint
|
467
|
+
system_fingerprint?: string | undefined;
|
469
468
|
}>;
|
470
469
|
export type OpenAIChatResponse = z.infer<typeof openAIChatResponseSchema>;
|
471
470
|
export type OpenAIChatResponseFormatType<T> = {
|
@@ -501,7 +500,7 @@ export declare const OpenAIChatResponseFormat: {
|
|
501
500
|
logprobs?: any;
|
502
501
|
}[];
|
503
502
|
created: number;
|
504
|
-
system_fingerprint
|
503
|
+
system_fingerprint?: string | undefined;
|
505
504
|
}>;
|
506
505
|
};
|
507
506
|
/**
|
@@ -9,7 +9,7 @@ import { PromptFormatTextStreamingModel } from "../../../model-function/generate
|
|
9
9
|
import { OpenAIApiConfiguration } from "../OpenAIApiConfiguration.js";
|
10
10
|
import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
|
11
11
|
import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
|
12
|
-
import { mapChatPromptToOpenAIChatFormat, mapInstructionPromptToOpenAIChatFormat,
|
12
|
+
import { mapChatPromptToOpenAIChatFormat, mapInstructionPromptToOpenAIChatFormat, } from "./OpenAIChatPromptFormat.js";
|
13
13
|
import { createOpenAIChatDeltaIterableQueue } from "./OpenAIChatStreamIterable.js";
|
14
14
|
import { countOpenAIChatPromptTokens } from "./countOpenAIChatMessageTokens.js";
|
15
15
|
/*
|
@@ -355,9 +355,6 @@ export class OpenAIChatModel extends AbstractModel {
|
|
355
355
|
withInstructionPrompt() {
|
356
356
|
return this.withPromptFormat(mapInstructionPromptToOpenAIChatFormat());
|
357
357
|
}
|
358
|
-
withVisionInstructionPrompt() {
|
359
|
-
return this.withPromptFormat(mapVisionInstructionPromptToOpenAIChatFormat());
|
360
|
-
}
|
361
358
|
/**
|
362
359
|
* Returns this model with a chat prompt format.
|
363
360
|
*/
|
@@ -381,10 +378,6 @@ export class OpenAIChatModel extends AbstractModel {
|
|
381
378
|
}
|
382
379
|
const openAIChatResponseSchema = z.object({
|
383
380
|
id: z.string(),
|
384
|
-
object: z.literal("chat.completion"),
|
385
|
-
created: z.number(),
|
386
|
-
model: z.string(),
|
387
|
-
system_fingerprint: z.string(),
|
388
381
|
choices: z.array(z.object({
|
389
382
|
message: z.object({
|
390
383
|
role: z.literal("assistant"),
|
@@ -406,6 +399,10 @@ const openAIChatResponseSchema = z.object({
|
|
406
399
|
"function_call",
|
407
400
|
]),
|
408
401
|
})),
|
402
|
+
created: z.number(),
|
403
|
+
model: z.string(),
|
404
|
+
system_fingerprint: z.string().optional(),
|
405
|
+
object: z.literal("chat.completion"),
|
409
406
|
usage: z.object({
|
410
407
|
prompt_tokens: z.number(),
|
411
408
|
completion_tokens: z.number(),
|
@@ -1,7 +1,8 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.mapChatPromptToOpenAIChatFormat = exports.
|
3
|
+
exports.mapChatPromptToOpenAIChatFormat = exports.mapInstructionPromptToOpenAIChatFormat = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("../../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
|
5
|
+
const OpenAIChatMessage_js_1 = require("./OpenAIChatMessage.cjs");
|
5
6
|
/**
|
6
7
|
* Formats an instruction prompt as an OpenAI chat prompt.
|
7
8
|
*/
|
@@ -10,20 +11,13 @@ function mapInstructionPromptToOpenAIChatFormat() {
|
|
10
11
|
format: (instruction) => {
|
11
12
|
const messages = [];
|
12
13
|
if (instruction.system != null) {
|
13
|
-
messages.push(
|
14
|
-
role: "system",
|
15
|
-
content: instruction.system,
|
16
|
-
});
|
14
|
+
messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.system(instruction.system));
|
17
15
|
}
|
18
|
-
messages.push({
|
19
|
-
|
20
|
-
|
21
|
-
});
|
16
|
+
messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(instruction.instruction, {
|
17
|
+
image: instruction.image,
|
18
|
+
}));
|
22
19
|
if (instruction.input != null) {
|
23
|
-
messages.push(
|
24
|
-
role: "user",
|
25
|
-
content: instruction.input,
|
26
|
-
});
|
20
|
+
messages.push(OpenAIChatMessage_js_1.OpenAIChatMessage.user(instruction.input));
|
27
21
|
}
|
28
22
|
return messages;
|
29
23
|
},
|
@@ -31,29 +25,6 @@ function mapInstructionPromptToOpenAIChatFormat() {
|
|
31
25
|
};
|
32
26
|
}
|
33
27
|
exports.mapInstructionPromptToOpenAIChatFormat = mapInstructionPromptToOpenAIChatFormat;
|
34
|
-
/**
|
35
|
-
* Formats a version prompt as an OpenAI chat prompt.
|
36
|
-
*/
|
37
|
-
function mapVisionInstructionPromptToOpenAIChatFormat() {
|
38
|
-
return {
|
39
|
-
format: ({ instruction, image, mimeType }) => {
|
40
|
-
return [
|
41
|
-
{
|
42
|
-
role: "user",
|
43
|
-
content: [
|
44
|
-
{ type: "text", text: instruction },
|
45
|
-
{
|
46
|
-
type: "image_url",
|
47
|
-
image_url: `data:${mimeType ?? "image/jpeg"};base64,${image}`,
|
48
|
-
},
|
49
|
-
],
|
50
|
-
},
|
51
|
-
];
|
52
|
-
},
|
53
|
-
stopSequences: [],
|
54
|
-
};
|
55
|
-
}
|
56
|
-
exports.mapVisionInstructionPromptToOpenAIChatFormat = mapVisionInstructionPromptToOpenAIChatFormat;
|
57
28
|
/**
|
58
29
|
* Formats a chat prompt as an OpenAI chat prompt.
|
59
30
|
*/
|
@@ -1,16 +1,11 @@
|
|
1
|
+
import { TextGenerationPromptFormat } from "../../../model-function/generate-text/TextGenerationPromptFormat.js";
|
1
2
|
import { ChatPrompt } from "../../../model-function/generate-text/prompt-format/ChatPrompt.js";
|
2
3
|
import { InstructionPrompt } from "../../../model-function/generate-text/prompt-format/InstructionPrompt.js";
|
3
|
-
import { TextGenerationPromptFormat } from "../../../model-function/generate-text/TextGenerationPromptFormat.js";
|
4
4
|
import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
|
5
|
-
import { VisionInstructionPrompt } from "../../../model-function/generate-text/prompt-format/VisionInstructionPrompt.js";
|
6
5
|
/**
|
7
6
|
* Formats an instruction prompt as an OpenAI chat prompt.
|
8
7
|
*/
|
9
8
|
export declare function mapInstructionPromptToOpenAIChatFormat(): TextGenerationPromptFormat<InstructionPrompt, Array<OpenAIChatMessage>>;
|
10
|
-
/**
|
11
|
-
* Formats a version prompt as an OpenAI chat prompt.
|
12
|
-
*/
|
13
|
-
export declare function mapVisionInstructionPromptToOpenAIChatFormat(): TextGenerationPromptFormat<VisionInstructionPrompt, Array<OpenAIChatMessage>>;
|
14
9
|
/**
|
15
10
|
* Formats a chat prompt as an OpenAI chat prompt.
|
16
11
|
*/
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import { validateChatPrompt } from "../../../model-function/generate-text/prompt-format/validateChatPrompt.js";
|
2
|
+
import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
|
2
3
|
/**
|
3
4
|
* Formats an instruction prompt as an OpenAI chat prompt.
|
4
5
|
*/
|
@@ -7,48 +8,19 @@ export function mapInstructionPromptToOpenAIChatFormat() {
|
|
7
8
|
format: (instruction) => {
|
8
9
|
const messages = [];
|
9
10
|
if (instruction.system != null) {
|
10
|
-
messages.push(
|
11
|
-
role: "system",
|
12
|
-
content: instruction.system,
|
13
|
-
});
|
11
|
+
messages.push(OpenAIChatMessage.system(instruction.system));
|
14
12
|
}
|
15
|
-
messages.push({
|
16
|
-
|
17
|
-
|
18
|
-
});
|
13
|
+
messages.push(OpenAIChatMessage.user(instruction.instruction, {
|
14
|
+
image: instruction.image,
|
15
|
+
}));
|
19
16
|
if (instruction.input != null) {
|
20
|
-
messages.push(
|
21
|
-
role: "user",
|
22
|
-
content: instruction.input,
|
23
|
-
});
|
17
|
+
messages.push(OpenAIChatMessage.user(instruction.input));
|
24
18
|
}
|
25
19
|
return messages;
|
26
20
|
},
|
27
21
|
stopSequences: [],
|
28
22
|
};
|
29
23
|
}
|
30
|
-
/**
|
31
|
-
* Formats a version prompt as an OpenAI chat prompt.
|
32
|
-
*/
|
33
|
-
export function mapVisionInstructionPromptToOpenAIChatFormat() {
|
34
|
-
return {
|
35
|
-
format: ({ instruction, image, mimeType }) => {
|
36
|
-
return [
|
37
|
-
{
|
38
|
-
role: "user",
|
39
|
-
content: [
|
40
|
-
{ type: "text", text: instruction },
|
41
|
-
{
|
42
|
-
type: "image_url",
|
43
|
-
image_url: `data:${mimeType ?? "image/jpeg"};base64,${image}`,
|
44
|
-
},
|
45
|
-
],
|
46
|
-
},
|
47
|
-
];
|
48
|
-
},
|
49
|
-
stopSequences: [],
|
50
|
-
};
|
51
|
-
}
|
52
24
|
/**
|
53
25
|
* Formats a chat prompt as an OpenAI chat prompt.
|
54
26
|
*/
|
@@ -6,6 +6,7 @@ const AsyncQueue_js_1 = require("../../../util/AsyncQueue.cjs");
|
|
6
6
|
const parseEventSourceStream_js_1 = require("../../../util/streaming/parseEventSourceStream.cjs");
|
7
7
|
const parseJSON_js_1 = require("../../../util/parseJSON.cjs");
|
8
8
|
const chatResponseStreamEventSchema = zod_1.z.object({
|
9
|
+
id: zod_1.z.string(),
|
9
10
|
choices: zod_1.z.array(zod_1.z.object({
|
10
11
|
delta: zod_1.z.object({
|
11
12
|
role: zod_1.z.enum(["assistant", "user"]).optional(),
|
@@ -30,9 +31,9 @@ const chatResponseStreamEventSchema = zod_1.z.object({
|
|
30
31
|
index: zod_1.z.number(),
|
31
32
|
})),
|
32
33
|
created: zod_1.z.number(),
|
33
|
-
id: zod_1.z.string(),
|
34
34
|
model: zod_1.z.string(),
|
35
|
-
|
35
|
+
system_fingerprint: zod_1.z.string().optional(),
|
36
|
+
object: zod_1.z.literal("chat.completion.chunk"),
|
36
37
|
});
|
37
38
|
async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
|
38
39
|
const queue = new AsyncQueue_js_1.AsyncQueue();
|
@@ -3,6 +3,7 @@ import { AsyncQueue } from "../../../util/AsyncQueue.js";
|
|
3
3
|
import { parseEventSourceStream } from "../../../util/streaming/parseEventSourceStream.js";
|
4
4
|
import { safeParseJsonWithZod } from "../../../util/parseJSON.js";
|
5
5
|
const chatResponseStreamEventSchema = z.object({
|
6
|
+
id: z.string(),
|
6
7
|
choices: z.array(z.object({
|
7
8
|
delta: z.object({
|
8
9
|
role: z.enum(["assistant", "user"]).optional(),
|
@@ -27,9 +28,9 @@ const chatResponseStreamEventSchema = z.object({
|
|
27
28
|
index: z.number(),
|
28
29
|
})),
|
29
30
|
created: z.number(),
|
30
|
-
id: z.string(),
|
31
31
|
model: z.string(),
|
32
|
-
|
32
|
+
system_fingerprint: z.string().optional(),
|
33
|
+
object: z.literal("chat.completion.chunk"),
|
33
34
|
});
|
34
35
|
export async function createOpenAIChatDeltaIterableQueue(stream, extractDeltaValue) {
|
35
36
|
const queue = new AsyncQueue();
|
package/package.json
CHANGED
@@ -1,31 +0,0 @@
|
|
1
|
-
/**
|
2
|
-
* A single instruction version prompt. It contains an instruction, a base64 encoded image
|
3
|
-
* and an optional mime type of the image.
|
4
|
-
*
|
5
|
-
* If no mime type is provided, the mime type default to "image/jpeg".
|
6
|
-
*
|
7
|
-
* @example
|
8
|
-
* ```ts
|
9
|
-
* {
|
10
|
-
* instruction: "Describe the image in detail:",
|
11
|
-
* image: fs.readFileSync(path.join("data", "example-image.png"), {
|
12
|
-
* encoding: "base64",
|
13
|
-
* }),
|
14
|
-
* mimeType: "image/png"
|
15
|
-
* }
|
16
|
-
* ```
|
17
|
-
*/
|
18
|
-
export type VisionInstructionPrompt = {
|
19
|
-
/**
|
20
|
-
* The instruction for the model.
|
21
|
-
*/
|
22
|
-
instruction: string;
|
23
|
-
/**
|
24
|
-
* Base-64 encoded image.
|
25
|
-
*/
|
26
|
-
image: string;
|
27
|
-
/**
|
28
|
-
* Optional mime type of the image.
|
29
|
-
*/
|
30
|
-
mimeType?: string;
|
31
|
-
};
|
@@ -1 +0,0 @@
|
|
1
|
-
export {};
|
@@ -1,15 +0,0 @@
|
|
1
|
-
"use strict";
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.mapVisionInstructionPromptToLlamaCppFormat = void 0;
|
4
|
-
function mapVisionInstructionPromptToLlamaCppFormat() {
|
5
|
-
return {
|
6
|
-
format: ({ instruction, image }) => {
|
7
|
-
return {
|
8
|
-
text: `[img-1]\n\n${instruction}`,
|
9
|
-
images: { "1": image },
|
10
|
-
};
|
11
|
-
},
|
12
|
-
stopSequences: [],
|
13
|
-
};
|
14
|
-
}
|
15
|
-
exports.mapVisionInstructionPromptToLlamaCppFormat = mapVisionInstructionPromptToLlamaCppFormat;
|
@@ -1,4 +0,0 @@
|
|
1
|
-
import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
|
2
|
-
import { VisionInstructionPrompt } from "../../model-function/generate-text/prompt-format/VisionInstructionPrompt.js";
|
3
|
-
import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
|
4
|
-
export declare function mapVisionInstructionPromptToLlamaCppFormat(): TextGenerationPromptFormat<VisionInstructionPrompt, LlamaCppTextGenerationPrompt>;
|