modelfusion 0.69.0 → 0.71.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +26 -13
- package/model-function/ModelCallEvent.d.ts +3 -2
- package/model-function/generate-text/index.cjs +1 -8
- package/model-function/generate-text/index.d.ts +1 -8
- package/model-function/generate-text/index.js +1 -8
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.cjs +31 -3
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.d.ts +29 -1
- package/model-function/generate-text/prompt-format/AlpacaPromptFormat.js +29 -1
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.cjs +79 -0
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.d.ts +31 -0
- package/model-function/generate-text/prompt-format/ChatMLPromptFormat.js +74 -0
- package/model-function/generate-text/prompt-format/ChatPrompt.d.ts +28 -23
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.cjs +17 -0
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.d.ts +8 -0
- package/model-function/generate-text/prompt-format/ChatPromptValidationError.js +13 -0
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.cjs +41 -27
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.d.ts +20 -2
- package/model-function/generate-text/prompt-format/Llama2PromptFormat.js +38 -24
- package/model-function/generate-text/prompt-format/TextPromptFormat.cjs +27 -30
- package/model-function/generate-text/prompt-format/TextPromptFormat.d.ts +7 -5
- package/model-function/generate-text/prompt-format/TextPromptFormat.js +24 -27
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.cjs +21 -29
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.d.ts +2 -2
- package/model-function/generate-text/prompt-format/VicunaPromptFormat.js +19 -27
- package/model-function/generate-text/prompt-format/index.cjs +39 -0
- package/model-function/generate-text/prompt-format/index.d.ts +10 -0
- package/model-function/generate-text/prompt-format/index.js +10 -0
- package/model-function/generate-text/prompt-format/trimChatPrompt.cjs +17 -22
- package/model-function/generate-text/prompt-format/trimChatPrompt.js +17 -22
- package/model-function/generate-text/prompt-format/validateChatPrompt.cjs +12 -24
- package/model-function/generate-text/prompt-format/validateChatPrompt.d.ts +0 -3
- package/model-function/generate-text/prompt-format/validateChatPrompt.js +10 -21
- package/model-function/generate-tool-call/NoSuchToolDefinitionError.cjs +41 -0
- package/model-function/generate-tool-call/NoSuchToolDefinitionError.d.ts +17 -0
- package/model-function/generate-tool-call/NoSuchToolDefinitionError.js +37 -0
- package/model-function/generate-tool-call/ToolCall.d.ts +5 -0
- package/model-function/generate-tool-call/ToolCallGenerationModel.d.ts +3 -3
- package/model-function/generate-tool-call/ToolCallParametersValidationError.cjs +1 -1
- package/model-function/generate-tool-call/ToolCallParametersValidationError.js +1 -1
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.cjs +2 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.d.ts +23 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationEvent.js +1 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.cjs +2 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.d.ts +21 -0
- package/model-function/generate-tool-call/ToolCallsOrTextGenerationModel.js +1 -0
- package/model-function/generate-tool-call/ToolDefinition.cjs +2 -0
- package/model-function/generate-tool-call/{ToolCallDefinition.d.ts → ToolDefinition.d.ts} +1 -1
- package/model-function/generate-tool-call/ToolDefinition.js +1 -0
- package/model-function/generate-tool-call/generateToolCall.cjs +2 -1
- package/model-function/generate-tool-call/generateToolCall.d.ts +6 -11
- package/model-function/generate-tool-call/generateToolCall.js +2 -1
- package/model-function/generate-tool-call/generateToolCallsOrText.cjs +63 -0
- package/model-function/generate-tool-call/generateToolCallsOrText.d.ts +33 -0
- package/model-function/generate-tool-call/generateToolCallsOrText.js +59 -0
- package/model-function/generate-tool-call/index.cjs +7 -2
- package/model-function/generate-tool-call/index.d.ts +7 -2
- package/model-function/generate-tool-call/index.js +7 -2
- package/model-provider/anthropic/AnthropicPromptFormat.cjs +22 -26
- package/model-provider/anthropic/AnthropicPromptFormat.d.ts +4 -2
- package/model-provider/anthropic/AnthropicPromptFormat.js +19 -23
- package/model-provider/anthropic/AnthropicTextGenerationModel.cjs +2 -2
- package/model-provider/anthropic/AnthropicTextGenerationModel.js +3 -3
- package/model-provider/anthropic/index.cjs +14 -2
- package/model-provider/anthropic/index.d.ts +1 -1
- package/model-provider/anthropic/index.js +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.cjs +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +1 -1
- package/model-provider/cohere/CohereTextGenerationModel.js +4 -4
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs → LlamaCppBakLLaVA1Format.cjs} +4 -4
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.d.ts → LlamaCppBakLLaVA1Format.d.ts} +2 -2
- package/model-provider/llamacpp/{mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js → LlamaCppBakLLaVA1Format.js} +2 -2
- package/model-provider/llamacpp/index.cjs +14 -2
- package/model-provider/llamacpp/index.d.ts +1 -1
- package/model-provider/llamacpp/index.js +1 -1
- package/model-provider/openai/OpenAICompletionModel.cjs +4 -4
- package/model-provider/openai/OpenAICompletionModel.d.ts +1 -1
- package/model-provider/openai/OpenAICompletionModel.js +5 -5
- package/model-provider/openai/chat/OpenAIChatMessage.d.ts +4 -1
- package/model-provider/openai/chat/OpenAIChatModel.cjs +29 -3
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +63 -16
- package/model-provider/openai/chat/OpenAIChatModel.js +30 -4
- package/model-provider/openai/chat/OpenAIChatPromptFormat.cjs +22 -34
- package/model-provider/openai/chat/OpenAIChatPromptFormat.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatPromptFormat.js +19 -31
- package/model-provider/openai/index.cjs +14 -2
- package/model-provider/openai/index.d.ts +1 -1
- package/model-provider/openai/index.js +1 -1
- package/package.json +2 -2
- package/tool/Tool.cjs +1 -1
- package/tool/Tool.d.ts +1 -1
- package/tool/Tool.js +1 -1
- /package/model-function/generate-tool-call/{ToolCallDefinition.cjs → ToolCall.cjs} +0 -0
- /package/model-function/generate-tool-call/{ToolCallDefinition.js → ToolCall.js} +0 -0
@@ -0,0 +1,59 @@
|
|
1
|
+
import { executeStandardCall } from "../executeStandardCall.js";
|
2
|
+
import { NoSuchToolDefinitionError } from "./NoSuchToolDefinitionError.js";
|
3
|
+
import { ToolCallParametersValidationError } from "./ToolCallParametersValidationError.js";
|
4
|
+
export async function generateToolCallsOrText(model, tools, prompt, options) {
|
5
|
+
// Note: PROMPT must not be a function.
|
6
|
+
const expandedPrompt = typeof prompt === "function"
|
7
|
+
? prompt(tools)
|
8
|
+
: prompt;
|
9
|
+
const fullResponse = await executeStandardCall({
|
10
|
+
functionType: "generate-tool-calls-or-text",
|
11
|
+
input: expandedPrompt,
|
12
|
+
model,
|
13
|
+
options,
|
14
|
+
generateResponse: async (options) => {
|
15
|
+
const result = await model.doGenerateToolCallsOrText(tools, expandedPrompt, options);
|
16
|
+
const { text, toolCalls: rawToolCalls } = result;
|
17
|
+
// no tool calls:
|
18
|
+
if (rawToolCalls == null) {
|
19
|
+
return {
|
20
|
+
response: result.response,
|
21
|
+
extractedValue: { text, toolCalls: null },
|
22
|
+
usage: result.usage,
|
23
|
+
};
|
24
|
+
}
|
25
|
+
// map tool calls:
|
26
|
+
const toolCalls = rawToolCalls.map((rawToolCall) => {
|
27
|
+
const tool = tools.find((tool) => tool.name === rawToolCall.name);
|
28
|
+
if (tool == undefined) {
|
29
|
+
throw new NoSuchToolDefinitionError({
|
30
|
+
toolName: rawToolCall.name,
|
31
|
+
parameters: rawToolCall.parameters,
|
32
|
+
});
|
33
|
+
}
|
34
|
+
const parseResult = tool.parameters.validate(rawToolCall.parameters);
|
35
|
+
if (!parseResult.success) {
|
36
|
+
throw new ToolCallParametersValidationError({
|
37
|
+
toolName: tool.name,
|
38
|
+
parameters: rawToolCall.parameters,
|
39
|
+
cause: parseResult.error,
|
40
|
+
});
|
41
|
+
}
|
42
|
+
return {
|
43
|
+
id: rawToolCall.id,
|
44
|
+
name: tool.name,
|
45
|
+
parameters: parseResult.data,
|
46
|
+
};
|
47
|
+
});
|
48
|
+
return {
|
49
|
+
response: result.response,
|
50
|
+
extractedValue: {
|
51
|
+
text,
|
52
|
+
toolCalls: toolCalls,
|
53
|
+
},
|
54
|
+
usage: result.usage,
|
55
|
+
};
|
56
|
+
},
|
57
|
+
});
|
58
|
+
return options?.returnType === "full" ? fullResponse : fullResponse.value;
|
59
|
+
}
|
@@ -14,8 +14,13 @@ var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
|
14
14
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
15
|
};
|
16
16
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
-
__exportStar(require("./
|
18
|
-
__exportStar(require("./
|
17
|
+
__exportStar(require("./ToolCall.cjs"), exports);
|
18
|
+
__exportStar(require("./ToolCallGenerationError.cjs"), exports);
|
19
19
|
__exportStar(require("./ToolCallGenerationEvent.cjs"), exports);
|
20
20
|
__exportStar(require("./ToolCallGenerationModel.cjs"), exports);
|
21
|
+
__exportStar(require("./ToolCallParametersValidationError.cjs"), exports);
|
22
|
+
__exportStar(require("./ToolCallsOrTextGenerationEvent.cjs"), exports);
|
23
|
+
__exportStar(require("./ToolCallsOrTextGenerationModel.cjs"), exports);
|
24
|
+
__exportStar(require("./ToolDefinition.cjs"), exports);
|
21
25
|
__exportStar(require("./generateToolCall.cjs"), exports);
|
26
|
+
__exportStar(require("./generateToolCallsOrText.cjs"), exports);
|
@@ -1,5 +1,10 @@
|
|
1
|
-
export * from "./
|
2
|
-
export * from "./
|
1
|
+
export * from "./ToolCall.js";
|
2
|
+
export * from "./ToolCallGenerationError.js";
|
3
3
|
export * from "./ToolCallGenerationEvent.js";
|
4
4
|
export * from "./ToolCallGenerationModel.js";
|
5
|
+
export * from "./ToolCallParametersValidationError.js";
|
6
|
+
export * from "./ToolCallsOrTextGenerationEvent.js";
|
7
|
+
export * from "./ToolCallsOrTextGenerationModel.js";
|
8
|
+
export * from "./ToolDefinition.js";
|
5
9
|
export * from "./generateToolCall.js";
|
10
|
+
export * from "./generateToolCallsOrText.js";
|
@@ -1,5 +1,10 @@
|
|
1
|
-
export * from "./
|
2
|
-
export * from "./
|
1
|
+
export * from "./ToolCall.js";
|
2
|
+
export * from "./ToolCallGenerationError.js";
|
3
3
|
export * from "./ToolCallGenerationEvent.js";
|
4
4
|
export * from "./ToolCallGenerationModel.js";
|
5
|
+
export * from "./ToolCallParametersValidationError.js";
|
6
|
+
export * from "./ToolCallsOrTextGenerationEvent.js";
|
7
|
+
export * from "./ToolCallsOrTextGenerationModel.js";
|
8
|
+
export * from "./ToolDefinition.js";
|
5
9
|
export * from "./generateToolCall.js";
|
10
|
+
export * from "./generateToolCallsOrText.js";
|
@@ -1,11 +1,11 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.chat = exports.instruction = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("../../model-function/generate-text/prompt-format/validateChatPrompt.cjs");
|
5
5
|
/**
|
6
6
|
* Formats an instruction prompt as an Anthropic prompt.
|
7
7
|
*/
|
8
|
-
function
|
8
|
+
function instruction() {
|
9
9
|
return {
|
10
10
|
format: (instruction) => {
|
11
11
|
let text = "";
|
@@ -25,36 +25,32 @@ function mapInstructionPromptToAnthropicFormat() {
|
|
25
25
|
stopSequences: [],
|
26
26
|
};
|
27
27
|
}
|
28
|
-
exports.
|
28
|
+
exports.instruction = instruction;
|
29
29
|
/**
|
30
30
|
* Formats a chat prompt as an Anthropic prompt.
|
31
|
+
*
|
32
|
+
* @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
|
31
33
|
*/
|
32
|
-
function
|
34
|
+
function chat() {
|
33
35
|
return {
|
34
36
|
format: (chatPrompt) => {
|
35
37
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
36
|
-
let text = "";
|
37
|
-
for (
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
// ai message:
|
52
|
-
if ("ai" in message) {
|
53
|
-
text += `\n\nAssistant:${message.ai}`;
|
54
|
-
continue;
|
38
|
+
let text = chatPrompt.system != null ? `${chatPrompt.system}\n\n` : "";
|
39
|
+
for (const { role, content } of chatPrompt.messages) {
|
40
|
+
switch (role) {
|
41
|
+
case "user": {
|
42
|
+
text += `\n\nHuman:${content}`;
|
43
|
+
break;
|
44
|
+
}
|
45
|
+
case "assistant": {
|
46
|
+
text += `\n\nAssistant:${content}`;
|
47
|
+
break;
|
48
|
+
}
|
49
|
+
default: {
|
50
|
+
const _exhaustiveCheck = role;
|
51
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
52
|
+
}
|
55
53
|
}
|
56
|
-
// unsupported message:
|
57
|
-
throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
|
58
54
|
}
|
59
55
|
// AI message prefix:
|
60
56
|
text += `\n\nAssistant:`;
|
@@ -63,4 +59,4 @@ function mapChatPromptToAnthropicFormat() {
|
|
63
59
|
stopSequences: [],
|
64
60
|
};
|
65
61
|
}
|
66
|
-
exports.
|
62
|
+
exports.chat = chat;
|
@@ -4,8 +4,10 @@ import { TextGenerationPromptFormat } from "../../model-function/generate-text/T
|
|
4
4
|
/**
|
5
5
|
* Formats an instruction prompt as an Anthropic prompt.
|
6
6
|
*/
|
7
|
-
export declare function
|
7
|
+
export declare function instruction(): TextGenerationPromptFormat<InstructionPrompt, string>;
|
8
8
|
/**
|
9
9
|
* Formats a chat prompt as an Anthropic prompt.
|
10
|
+
*
|
11
|
+
* @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
|
10
12
|
*/
|
11
|
-
export declare function
|
13
|
+
export declare function chat(): TextGenerationPromptFormat<ChatPrompt, string>;
|
@@ -2,7 +2,7 @@ import { validateChatPrompt } from "../../model-function/generate-text/prompt-fo
|
|
2
2
|
/**
|
3
3
|
* Formats an instruction prompt as an Anthropic prompt.
|
4
4
|
*/
|
5
|
-
export function
|
5
|
+
export function instruction() {
|
6
6
|
return {
|
7
7
|
format: (instruction) => {
|
8
8
|
let text = "";
|
@@ -24,33 +24,29 @@ export function mapInstructionPromptToAnthropicFormat() {
|
|
24
24
|
}
|
25
25
|
/**
|
26
26
|
* Formats a chat prompt as an Anthropic prompt.
|
27
|
+
*
|
28
|
+
* @see https://docs.anthropic.com/claude/docs/constructing-a-prompt
|
27
29
|
*/
|
28
|
-
export function
|
30
|
+
export function chat() {
|
29
31
|
return {
|
30
32
|
format: (chatPrompt) => {
|
31
33
|
validateChatPrompt(chatPrompt);
|
32
|
-
let text = "";
|
33
|
-
for (
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
// ai message:
|
48
|
-
if ("ai" in message) {
|
49
|
-
text += `\n\nAssistant:${message.ai}`;
|
50
|
-
continue;
|
34
|
+
let text = chatPrompt.system != null ? `${chatPrompt.system}\n\n` : "";
|
35
|
+
for (const { role, content } of chatPrompt.messages) {
|
36
|
+
switch (role) {
|
37
|
+
case "user": {
|
38
|
+
text += `\n\nHuman:${content}`;
|
39
|
+
break;
|
40
|
+
}
|
41
|
+
case "assistant": {
|
42
|
+
text += `\n\nAssistant:${content}`;
|
43
|
+
break;
|
44
|
+
}
|
45
|
+
default: {
|
46
|
+
const _exhaustiveCheck = role;
|
47
|
+
throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
|
48
|
+
}
|
51
49
|
}
|
52
|
-
// unsupported message:
|
53
|
-
throw new Error(`Unsupported message: ${JSON.stringify(message)}`);
|
54
50
|
}
|
55
51
|
// AI message prefix:
|
56
52
|
text += `\n\nAssistant:`;
|
@@ -110,13 +110,13 @@ class AnthropicTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
110
110
|
* Returns this model with an instruction prompt format.
|
111
111
|
*/
|
112
112
|
withInstructionPrompt() {
|
113
|
-
return this.withPromptFormat((0, AnthropicPromptFormat_js_1.
|
113
|
+
return this.withPromptFormat((0, AnthropicPromptFormat_js_1.instruction)());
|
114
114
|
}
|
115
115
|
/**
|
116
116
|
* Returns this model with a chat prompt format.
|
117
117
|
*/
|
118
118
|
withChatPrompt() {
|
119
|
-
return this.withPromptFormat((0, AnthropicPromptFormat_js_1.
|
119
|
+
return this.withPromptFormat((0, AnthropicPromptFormat_js_1.chat)());
|
120
120
|
}
|
121
121
|
withPromptFormat(promptFormat) {
|
122
122
|
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
@@ -9,7 +9,7 @@ import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
|
9
9
|
import { parseJSON } from "../../core/schema/parseJSON.js";
|
10
10
|
import { AnthropicApiConfiguration } from "./AnthropicApiConfiguration.js";
|
11
11
|
import { failedAnthropicCallResponseHandler } from "./AnthropicError.js";
|
12
|
-
import {
|
12
|
+
import { instruction, chat } from "./AnthropicPromptFormat.js";
|
13
13
|
export const ANTHROPIC_TEXT_GENERATION_MODELS = {
|
14
14
|
"claude-instant-1": {
|
15
15
|
contextWindowSize: 100000,
|
@@ -107,13 +107,13 @@ export class AnthropicTextGenerationModel extends AbstractModel {
|
|
107
107
|
* Returns this model with an instruction prompt format.
|
108
108
|
*/
|
109
109
|
withInstructionPrompt() {
|
110
|
-
return this.withPromptFormat(
|
110
|
+
return this.withPromptFormat(instruction());
|
111
111
|
}
|
112
112
|
/**
|
113
113
|
* Returns this model with a chat prompt format.
|
114
114
|
*/
|
115
115
|
withChatPrompt() {
|
116
|
-
return this.withPromptFormat(
|
116
|
+
return this.withPromptFormat(chat());
|
117
117
|
}
|
118
118
|
withPromptFormat(promptFormat) {
|
119
119
|
return new PromptFormatTextStreamingModel({
|
@@ -10,14 +10,26 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
|
|
10
10
|
if (k2 === undefined) k2 = k;
|
11
11
|
o[k2] = m[k];
|
12
12
|
}));
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
15
|
+
}) : function(o, v) {
|
16
|
+
o["default"] = v;
|
17
|
+
});
|
13
18
|
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
14
19
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
20
|
};
|
21
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
22
|
+
if (mod && mod.__esModule) return mod;
|
23
|
+
var result = {};
|
24
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
25
|
+
__setModuleDefault(result, mod);
|
26
|
+
return result;
|
27
|
+
};
|
16
28
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
-
exports.anthropicErrorDataSchema = exports.AnthropicError = void 0;
|
29
|
+
exports.AnthropicPromptFormat = exports.anthropicErrorDataSchema = exports.AnthropicError = void 0;
|
18
30
|
__exportStar(require("./AnthropicApiConfiguration.cjs"), exports);
|
19
31
|
var AnthropicError_js_1 = require("./AnthropicError.cjs");
|
20
32
|
Object.defineProperty(exports, "AnthropicError", { enumerable: true, get: function () { return AnthropicError_js_1.AnthropicError; } });
|
21
33
|
Object.defineProperty(exports, "anthropicErrorDataSchema", { enumerable: true, get: function () { return AnthropicError_js_1.anthropicErrorDataSchema; } });
|
22
|
-
|
34
|
+
exports.AnthropicPromptFormat = __importStar(require("./AnthropicPromptFormat.cjs"));
|
23
35
|
__exportStar(require("./AnthropicTextGenerationModel.cjs"), exports);
|
@@ -1,4 +1,4 @@
|
|
1
1
|
export * from "./AnthropicApiConfiguration.js";
|
2
2
|
export { AnthropicError, anthropicErrorDataSchema } from "./AnthropicError.js";
|
3
|
-
export * from "./AnthropicPromptFormat.js";
|
3
|
+
export * as AnthropicPromptFormat from "./AnthropicPromptFormat.js";
|
4
4
|
export * from "./AnthropicTextGenerationModel.js";
|
@@ -1,4 +1,4 @@
|
|
1
1
|
export * from "./AnthropicApiConfiguration.js";
|
2
2
|
export { AnthropicError, anthropicErrorDataSchema } from "./AnthropicError.js";
|
3
|
-
export * from "./AnthropicPromptFormat.js";
|
3
|
+
export * as AnthropicPromptFormat from "./AnthropicPromptFormat.js";
|
4
4
|
export * from "./AnthropicTextGenerationModel.js";
|
@@ -4,6 +4,7 @@ exports.CohereTextGenerationResponseFormat = exports.CohereTextGenerationModel =
|
|
4
4
|
const zod_1 = require("zod");
|
5
5
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
+
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
7
8
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
8
9
|
const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
|
9
10
|
const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
|
@@ -13,7 +14,6 @@ const parseJsonStream_js_1 = require("../../util/streaming/parseJsonStream.cjs")
|
|
13
14
|
const CohereApiConfiguration_js_1 = require("./CohereApiConfiguration.cjs");
|
14
15
|
const CohereError_js_1 = require("./CohereError.cjs");
|
15
16
|
const CohereTokenizer_js_1 = require("./CohereTokenizer.cjs");
|
16
|
-
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
17
17
|
exports.COHERE_TEXT_GENERATION_MODELS = {
|
18
18
|
command: {
|
19
19
|
contextWindowSize: 2048,
|
@@ -137,13 +137,13 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
137
137
|
* Returns this model with an instruction prompt format.
|
138
138
|
*/
|
139
139
|
withInstructionPrompt() {
|
140
|
-
return this.withPromptFormat((0, TextPromptFormat_js_1.
|
140
|
+
return this.withPromptFormat((0, TextPromptFormat_js_1.instruction)());
|
141
141
|
}
|
142
142
|
/**
|
143
143
|
* Returns this model with a chat prompt format.
|
144
144
|
*/
|
145
145
|
withChatPrompt(options) {
|
146
|
-
return this.withPromptFormat((0, TextPromptFormat_js_1.
|
146
|
+
return this.withPromptFormat((0, TextPromptFormat_js_1.chat)(options));
|
147
147
|
}
|
148
148
|
withPromptFormat(promptFormat) {
|
149
149
|
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
@@ -93,7 +93,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
|
|
93
93
|
*/
|
94
94
|
withChatPrompt(options?: {
|
95
95
|
user?: string;
|
96
|
-
|
96
|
+
assistant?: string;
|
97
97
|
}): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, CohereTextGenerationModelSettings, this>;
|
98
98
|
withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, CohereTextGenerationModelSettings, this>;
|
99
99
|
withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
|
@@ -1,16 +1,16 @@
|
|
1
1
|
import { z } from "zod";
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
+
import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
4
5
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
6
|
import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
|
6
|
-
import {
|
7
|
+
import { chat, instruction, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
|
7
8
|
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
8
9
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
9
10
|
import { parseJsonStream } from "../../util/streaming/parseJsonStream.js";
|
10
11
|
import { CohereApiConfiguration } from "./CohereApiConfiguration.js";
|
11
12
|
import { failedCohereCallResponseHandler } from "./CohereError.js";
|
12
13
|
import { CohereTokenizer } from "./CohereTokenizer.js";
|
13
|
-
import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
14
14
|
export const COHERE_TEXT_GENERATION_MODELS = {
|
15
15
|
command: {
|
16
16
|
contextWindowSize: 2048,
|
@@ -134,13 +134,13 @@ export class CohereTextGenerationModel extends AbstractModel {
|
|
134
134
|
* Returns this model with an instruction prompt format.
|
135
135
|
*/
|
136
136
|
withInstructionPrompt() {
|
137
|
-
return this.withPromptFormat(
|
137
|
+
return this.withPromptFormat(instruction());
|
138
138
|
}
|
139
139
|
/**
|
140
140
|
* Returns this model with a chat prompt format.
|
141
141
|
*/
|
142
142
|
withChatPrompt(options) {
|
143
|
-
return this.withPromptFormat(
|
143
|
+
return this.withPromptFormat(chat(options));
|
144
144
|
}
|
145
145
|
withPromptFormat(promptFormat) {
|
146
146
|
return new PromptFormatTextStreamingModel({
|
@@ -1,15 +1,15 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.instruction = void 0;
|
4
4
|
// default Vicuna 1 system message
|
5
5
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
6
6
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
7
7
|
/**
|
8
|
-
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp
|
8
|
+
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
9
9
|
*
|
10
10
|
* @see https://github.com/SkunkworksAI/BakLLaVA
|
11
11
|
*/
|
12
|
-
function
|
12
|
+
function instruction() {
|
13
13
|
return {
|
14
14
|
format: (instruction) => {
|
15
15
|
let text = "";
|
@@ -33,4 +33,4 @@ function mapInstructionPromptToBakLLaVA1ForLlamaCppFormat() {
|
|
33
33
|
stopSequences: [`\nUSER:`],
|
34
34
|
};
|
35
35
|
}
|
36
|
-
exports.
|
36
|
+
exports.instruction = instruction;
|
@@ -2,8 +2,8 @@ import { InstructionPrompt } from "../../model-function/generate-text/prompt-for
|
|
2
2
|
import { TextGenerationPromptFormat } from "../../model-function/generate-text/TextGenerationPromptFormat.js";
|
3
3
|
import { LlamaCppTextGenerationPrompt } from "./LlamaCppTextGenerationModel.js";
|
4
4
|
/**
|
5
|
-
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp
|
5
|
+
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
6
6
|
*
|
7
7
|
* @see https://github.com/SkunkworksAI/BakLLaVA
|
8
8
|
*/
|
9
|
-
export declare function
|
9
|
+
export declare function instruction(): TextGenerationPromptFormat<InstructionPrompt, LlamaCppTextGenerationPrompt>;
|
@@ -2,11 +2,11 @@
|
|
2
2
|
const DEFAULT_SYSTEM_MESSAGE = "A chat between a curious user and an artificial intelligence assistant. " +
|
3
3
|
"The assistant gives helpful, detailed, and polite answers to the user's questions.";
|
4
4
|
/**
|
5
|
-
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp
|
5
|
+
* BakLLaVA 1 uses a Vicuna 1 prompt. This mapping combines it with the LlamaCpp prompt structure.
|
6
6
|
*
|
7
7
|
* @see https://github.com/SkunkworksAI/BakLLaVA
|
8
8
|
*/
|
9
|
-
export function
|
9
|
+
export function instruction() {
|
10
10
|
return {
|
11
11
|
format: (instruction) => {
|
12
12
|
let text = "";
|
@@ -10,15 +10,27 @@ var __createBinding = (this && this.__createBinding) || (Object.create ? (functi
|
|
10
10
|
if (k2 === undefined) k2 = k;
|
11
11
|
o[k2] = m[k];
|
12
12
|
}));
|
13
|
+
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
14
|
+
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
15
|
+
}) : function(o, v) {
|
16
|
+
o["default"] = v;
|
17
|
+
});
|
13
18
|
var __exportStar = (this && this.__exportStar) || function(m, exports) {
|
14
19
|
for (var p in m) if (p !== "default" && !Object.prototype.hasOwnProperty.call(exports, p)) __createBinding(exports, m, p);
|
15
20
|
};
|
21
|
+
var __importStar = (this && this.__importStar) || function (mod) {
|
22
|
+
if (mod && mod.__esModule) return mod;
|
23
|
+
var result = {};
|
24
|
+
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
25
|
+
__setModuleDefault(result, mod);
|
26
|
+
return result;
|
27
|
+
};
|
16
28
|
Object.defineProperty(exports, "__esModule", { value: true });
|
17
|
-
exports.LlamaCppError = void 0;
|
29
|
+
exports.LlamaCppError = exports.LlamaCppBakLLaVA1Format = void 0;
|
18
30
|
__exportStar(require("./LlamaCppApiConfiguration.cjs"), exports);
|
31
|
+
exports.LlamaCppBakLLaVA1Format = __importStar(require("./LlamaCppBakLLaVA1Format.cjs"));
|
19
32
|
var LlamaCppError_js_1 = require("./LlamaCppError.cjs");
|
20
33
|
Object.defineProperty(exports, "LlamaCppError", { enumerable: true, get: function () { return LlamaCppError_js_1.LlamaCppError; } });
|
21
34
|
__exportStar(require("./LlamaCppTextEmbeddingModel.cjs"), exports);
|
22
35
|
__exportStar(require("./LlamaCppTextGenerationModel.cjs"), exports);
|
23
36
|
__exportStar(require("./LlamaCppTokenizer.cjs"), exports);
|
24
|
-
__exportStar(require("./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.cjs"), exports);
|
@@ -1,6 +1,6 @@
|
|
1
1
|
export * from "./LlamaCppApiConfiguration.js";
|
2
|
+
export * as LlamaCppBakLLaVA1Format from "./LlamaCppBakLLaVA1Format.js";
|
2
3
|
export { LlamaCppError, LlamaCppErrorData } from "./LlamaCppError.js";
|
3
4
|
export * from "./LlamaCppTextEmbeddingModel.js";
|
4
5
|
export * from "./LlamaCppTextGenerationModel.js";
|
5
6
|
export * from "./LlamaCppTokenizer.js";
|
6
|
-
export * from "./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js";
|
@@ -1,6 +1,6 @@
|
|
1
1
|
export * from "./LlamaCppApiConfiguration.js";
|
2
|
+
export * as LlamaCppBakLLaVA1Format from "./LlamaCppBakLLaVA1Format.js";
|
2
3
|
export { LlamaCppError } from "./LlamaCppError.js";
|
3
4
|
export * from "./LlamaCppTextEmbeddingModel.js";
|
4
5
|
export * from "./LlamaCppTextGenerationModel.js";
|
5
6
|
export * from "./LlamaCppTokenizer.js";
|
6
|
-
export * from "./mapInstructionPromptToBakLLaVA1ForLlamaCppFormat.js";
|
@@ -4,17 +4,17 @@ exports.OpenAITextResponseFormat = exports.OpenAICompletionModel = exports.calcu
|
|
4
4
|
const zod_1 = require("zod");
|
5
5
|
const callWithRetryAndThrottle_js_1 = require("../../core/api/callWithRetryAndThrottle.cjs");
|
6
6
|
const postToApi_js_1 = require("../../core/api/postToApi.cjs");
|
7
|
+
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
8
|
+
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
7
9
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
8
10
|
const PromptFormatTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptFormatTextStreamingModel.cjs");
|
9
11
|
const TextPromptFormat_js_1 = require("../../model-function/generate-text/prompt-format/TextPromptFormat.cjs");
|
10
12
|
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
11
13
|
const AsyncQueue_js_1 = require("../../util/AsyncQueue.cjs");
|
12
|
-
const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
|
13
14
|
const parseEventSourceStream_js_1 = require("../../util/streaming/parseEventSourceStream.cjs");
|
14
15
|
const OpenAIApiConfiguration_js_1 = require("./OpenAIApiConfiguration.cjs");
|
15
16
|
const OpenAIError_js_1 = require("./OpenAIError.cjs");
|
16
17
|
const TikTokenTokenizer_js_1 = require("./TikTokenTokenizer.cjs");
|
17
|
-
const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
|
18
18
|
/**
|
19
19
|
* @see https://platform.openai.com/docs/models/
|
20
20
|
* @see https://openai.com/pricing
|
@@ -242,13 +242,13 @@ class OpenAICompletionModel extends AbstractModel_js_1.AbstractModel {
|
|
242
242
|
* Returns this model with an instruction prompt format.
|
243
243
|
*/
|
244
244
|
withInstructionPrompt() {
|
245
|
-
return this.withPromptFormat((0, TextPromptFormat_js_1.
|
245
|
+
return this.withPromptFormat((0, TextPromptFormat_js_1.instruction)());
|
246
246
|
}
|
247
247
|
/**
|
248
248
|
* Returns this model with a chat prompt format.
|
249
249
|
*/
|
250
250
|
withChatPrompt(options) {
|
251
|
-
return this.withPromptFormat((0, TextPromptFormat_js_1.
|
251
|
+
return this.withPromptFormat((0, TextPromptFormat_js_1.chat)(options));
|
252
252
|
}
|
253
253
|
withPromptFormat(promptFormat) {
|
254
254
|
return new PromptFormatTextStreamingModel_js_1.PromptFormatTextStreamingModel({
|
@@ -182,7 +182,7 @@ export declare class OpenAICompletionModel extends AbstractModel<OpenAICompletio
|
|
182
182
|
*/
|
183
183
|
withChatPrompt(options?: {
|
184
184
|
user?: string;
|
185
|
-
|
185
|
+
assistant?: string;
|
186
186
|
}): PromptFormatTextStreamingModel<import("../../index.js").ChatPrompt, string, OpenAICompletionModelSettings, this>;
|
187
187
|
withPromptFormat<INPUT_PROMPT>(promptFormat: TextGenerationPromptFormat<INPUT_PROMPT, string>): PromptFormatTextStreamingModel<INPUT_PROMPT, string, OpenAICompletionModelSettings, this>;
|
188
188
|
withSettings(additionalSettings: Partial<OpenAICompletionModelSettings>): this;
|
@@ -1,17 +1,17 @@
|
|
1
1
|
import { z } from "zod";
|
2
2
|
import { callWithRetryAndThrottle } from "../../core/api/callWithRetryAndThrottle.js";
|
3
3
|
import { createJsonResponseHandler, postJsonToApi, } from "../../core/api/postToApi.js";
|
4
|
+
import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
5
|
+
import { parseJSON } from "../../core/schema/parseJSON.js";
|
4
6
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
5
7
|
import { PromptFormatTextStreamingModel } from "../../model-function/generate-text/PromptFormatTextStreamingModel.js";
|
6
|
-
import {
|
8
|
+
import { chat, instruction, } from "../../model-function/generate-text/prompt-format/TextPromptFormat.js";
|
7
9
|
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
8
10
|
import { AsyncQueue } from "../../util/AsyncQueue.js";
|
9
|
-
import { parseJSON } from "../../core/schema/parseJSON.js";
|
10
11
|
import { parseEventSourceStream } from "../../util/streaming/parseEventSourceStream.js";
|
11
12
|
import { OpenAIApiConfiguration } from "./OpenAIApiConfiguration.js";
|
12
13
|
import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
|
13
14
|
import { TikTokenTokenizer } from "./TikTokenTokenizer.js";
|
14
|
-
import { ZodSchema } from "../../core/schema/ZodSchema.js";
|
15
15
|
/**
|
16
16
|
* @see https://platform.openai.com/docs/models/
|
17
17
|
* @see https://openai.com/pricing
|
@@ -236,13 +236,13 @@ export class OpenAICompletionModel extends AbstractModel {
|
|
236
236
|
* Returns this model with an instruction prompt format.
|
237
237
|
*/
|
238
238
|
withInstructionPrompt() {
|
239
|
-
return this.withPromptFormat(
|
239
|
+
return this.withPromptFormat(instruction());
|
240
240
|
}
|
241
241
|
/**
|
242
242
|
* Returns this model with a chat prompt format.
|
243
243
|
*/
|
244
244
|
withChatPrompt(options) {
|
245
|
-
return this.withPromptFormat(
|
245
|
+
return this.withPromptFormat(chat(options));
|
246
246
|
}
|
247
247
|
withPromptFormat(promptFormat) {
|
248
248
|
return new PromptFormatTextStreamingModel({
|