modelfusion 0.13.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -5
- package/model-function/generate-text/TextGenerationModel.d.ts +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.cjs +5 -5
- package/model-provider/cohere/CohereTextGenerationModel.d.ts +3 -3
- package/model-provider/cohere/CohereTextGenerationModel.js +5 -5
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +4 -4
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +3 -3
- package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +4 -4
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +5 -5
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +3 -3
- package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +5 -5
- package/model-provider/openai/OpenAITextGenerationModel.cjs +5 -5
- package/model-provider/openai/OpenAITextGenerationModel.d.ts +3 -3
- package/model-provider/openai/OpenAITextGenerationModel.js +5 -5
- package/model-provider/openai/chat/OpenAIChatModel.cjs +5 -5
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +3 -3
- package/model-provider/openai/chat/OpenAIChatModel.js +5 -5
- package/package.json +1 -1
- package/prompt/{AlpacaPromptMapping.cjs → AlpacaPromptFormat.cjs} +5 -5
- package/prompt/{AlpacaPromptMapping.d.ts → AlpacaPromptFormat.d.ts} +3 -3
- package/prompt/{AlpacaPromptMapping.js → AlpacaPromptFormat.js} +3 -3
- package/prompt/{Llama2PromptMapping.cjs → Llama2PromptFormat.cjs} +11 -8
- package/prompt/Llama2PromptFormat.d.ts +13 -0
- package/prompt/{Llama2PromptMapping.js → Llama2PromptFormat.js} +8 -5
- package/prompt/{OpenAIChatPromptMapping.cjs → OpenAIChatPromptFormat.cjs} +13 -7
- package/prompt/OpenAIChatPromptFormat.d.ts +12 -0
- package/prompt/{OpenAIChatPromptMapping.js → OpenAIChatPromptFormat.js} +10 -4
- package/prompt/PromptFormat.d.ts +14 -0
- package/prompt/{PromptMappingTextGenerationModel.cjs → PromptFormatTextGenerationModel.cjs} +19 -19
- package/prompt/{PromptMappingTextGenerationModel.d.ts → PromptFormatTextGenerationModel.d.ts} +6 -6
- package/prompt/{PromptMappingTextGenerationModel.js → PromptFormatTextGenerationModel.js} +17 -17
- package/prompt/{TextPromptMapping.cjs → TextPromptFormat.cjs} +11 -8
- package/prompt/TextPromptFormat.d.ts +17 -0
- package/prompt/{TextPromptMapping.js → TextPromptFormat.js} +8 -5
- package/prompt/{VicunaPromptMapping.cjs → VicunaPromptFormat.cjs} +5 -5
- package/prompt/{VicunaPromptMapping.d.ts → VicunaPromptFormat.d.ts} +3 -3
- package/prompt/{VicunaPromptMapping.js → VicunaPromptFormat.js} +3 -3
- package/prompt/chat/trimChatPrompt.cjs +1 -1
- package/prompt/chat/trimChatPrompt.d.ts +1 -1
- package/prompt/chat/trimChatPrompt.js +1 -1
- package/prompt/index.cjs +7 -7
- package/prompt/index.d.ts +7 -7
- package/prompt/index.js +7 -7
- package/tool/WebSearchTool.cjs +7 -28
- package/tool/WebSearchTool.d.ts +6 -67
- package/tool/WebSearchTool.js +7 -28
- package/tool/executeTool.cjs +1 -0
- package/tool/executeTool.d.ts +5 -4
- package/tool/executeTool.js +1 -0
- package/prompt/Llama2PromptMapping.d.ts +0 -10
- package/prompt/OpenAIChatPromptMapping.d.ts +0 -6
- package/prompt/PromptMapping.d.ts +0 -7
- package/prompt/TextPromptMapping.d.ts +0 -14
- /package/prompt/{PromptMapping.cjs → PromptFormat.cjs} +0 -0
- /package/prompt/{PromptMapping.js → PromptFormat.js} +0 -0
package/README.md
CHANGED
@@ -43,7 +43,7 @@ You can provide API keys for the different [integrations](https://modelfusion.de
|
|
43
43
|
|
44
44
|
Generate text using a language model and a prompt.
|
45
45
|
You can stream the text if it is supported by the model.
|
46
|
-
You can use [prompt
|
46
|
+
You can use [prompt formats](https://modelfusion.dev/guide/function/generate-text/prompt-format) to change the prompt format of a model.
|
47
47
|
|
48
48
|
#### generateText
|
49
49
|
|
@@ -70,16 +70,16 @@ for await (const textFragment of textStream) {
|
|
70
70
|
}
|
71
71
|
```
|
72
72
|
|
73
|
-
#### Prompt
|
73
|
+
#### Prompt Format
|
74
74
|
|
75
|
-
[Prompt
|
75
|
+
[Prompt format](https://modelfusion.dev/guide/function/generate-text/prompt-format) lets you use higher level prompt structures (such as instruction or chat prompts) for different models.
|
76
76
|
|
77
77
|
```ts
|
78
78
|
const text = await generateText(
|
79
79
|
new LlamaCppTextGenerationModel({
|
80
80
|
contextWindowSize: 4096, // Llama 2 context window size
|
81
81
|
nPredict: 1000,
|
82
|
-
}).
|
82
|
+
}).withPromptFormat(Llama2InstructionPromptFormat()),
|
83
83
|
{
|
84
84
|
system: "You are a story writer.",
|
85
85
|
instruction: "Write a short story about a robot learning to love.",
|
@@ -91,7 +91,7 @@ const text = await generateText(
|
|
91
91
|
const textStream = await streamText(
|
92
92
|
new OpenAIChatModel({
|
93
93
|
model: "gpt-3.5-turbo",
|
94
|
-
}).
|
94
|
+
}).withPromptFormat(OpenAIChatChatPromptFormat()),
|
95
95
|
[
|
96
96
|
{ system: "You are a celebrated poet." },
|
97
97
|
{ user: "Write a short story about a robot learning to love." },
|
@@ -1,5 +1,5 @@
|
|
1
|
-
import {
|
2
|
-
import {
|
1
|
+
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
2
|
+
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
3
3
|
import { FunctionOptions } from "../FunctionOptions.js";
|
4
4
|
import { Model, ModelSettings } from "../Model.js";
|
5
5
|
import { BasicTokenizer, FullTokenizer } from "../tokenize-text/Tokenizer.js";
|
@@ -24,7 +24,7 @@ export interface TextGenerationModel<PROMPT, RESPONSE, FULL_DELTA, SETTINGS exte
|
|
24
24
|
* Optional. Implement for streaming support.
|
25
25
|
*/
|
26
26
|
readonly extractTextDelta: ((fullDelta: FULL_DELTA) => string | undefined) | undefined;
|
27
|
-
|
27
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>): PromptFormatTextGenerationModel<INPUT_PROMPT, PROMPT, RESPONSE, FULL_DELTA, SETTINGS, this>;
|
28
28
|
/**
|
29
29
|
* Maximum number of tokens to generate.
|
30
30
|
*/
|
@@ -9,7 +9,7 @@ const zod_1 = require("zod");
|
|
9
9
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
10
10
|
const AsyncQueue_js_1 = require("../../model-function/generate-text/AsyncQueue.cjs");
|
11
11
|
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
12
|
-
const
|
12
|
+
const PromptFormatTextGenerationModel_js_1 = require("../../prompt/PromptFormatTextGenerationModel.cjs");
|
13
13
|
const callWithRetryAndThrottle_js_1 = require("../../util/api/callWithRetryAndThrottle.cjs");
|
14
14
|
const postToApi_js_1 = require("../../util/api/postToApi.cjs");
|
15
15
|
const CohereError_js_1 = require("./CohereError.cjs");
|
@@ -122,10 +122,10 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
122
122
|
extractTextDelta(fullDelta) {
|
123
123
|
return fullDelta.delta;
|
124
124
|
}
|
125
|
-
|
126
|
-
return new
|
127
|
-
model: this.withStopTokens(
|
128
|
-
|
125
|
+
withPromptFormat(promptFormat) {
|
126
|
+
return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
|
127
|
+
model: this.withStopTokens(promptFormat.stopTokens),
|
128
|
+
promptFormat,
|
129
129
|
});
|
130
130
|
}
|
131
131
|
withSettings(additionalSettings) {
|
@@ -3,8 +3,8 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
|
|
3
3
|
import { FunctionOptions } from "../../model-function/FunctionOptions.js";
|
4
4
|
import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
|
5
5
|
import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
|
6
|
-
import {
|
7
|
-
import {
|
6
|
+
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
7
|
+
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
8
8
|
import { RetryFunction } from "../../util/api/RetryFunction.js";
|
9
9
|
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
|
10
10
|
import { ResponseHandler } from "../../util/api/postToApi.js";
|
@@ -92,7 +92,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
|
|
92
92
|
extractText(response: CohereTextGenerationResponse): string;
|
93
93
|
generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<CohereTextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<CohereTextGenerationDelta>>>;
|
94
94
|
extractTextDelta(fullDelta: CohereTextGenerationDelta): string | undefined;
|
95
|
-
|
95
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, CohereTextGenerationResponse, CohereTextGenerationDelta, CohereTextGenerationModelSettings, this>;
|
96
96
|
withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
|
97
97
|
get maxCompletionTokens(): number | undefined;
|
98
98
|
withMaxCompletionTokens(maxCompletionTokens: number): this;
|
@@ -3,7 +3,7 @@ import { z } from "zod";
|
|
3
3
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
4
4
|
import { AsyncQueue } from "../../model-function/generate-text/AsyncQueue.js";
|
5
5
|
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
6
|
-
import {
|
6
|
+
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
7
7
|
import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
|
8
8
|
import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
|
9
9
|
import { failedCohereCallResponseHandler } from "./CohereError.js";
|
@@ -116,10 +116,10 @@ export class CohereTextGenerationModel extends AbstractModel {
|
|
116
116
|
extractTextDelta(fullDelta) {
|
117
117
|
return fullDelta.delta;
|
118
118
|
}
|
119
|
-
|
120
|
-
return new
|
121
|
-
model: this.withStopTokens(
|
122
|
-
|
119
|
+
withPromptFormat(promptFormat) {
|
120
|
+
return new PromptFormatTextGenerationModel({
|
121
|
+
model: this.withStopTokens(promptFormat.stopTokens),
|
122
|
+
promptFormat,
|
123
123
|
});
|
124
124
|
}
|
125
125
|
withSettings(additionalSettings) {
|
@@ -9,7 +9,7 @@ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
|
9
9
|
const callWithRetryAndThrottle_js_1 = require("../../util/api/callWithRetryAndThrottle.cjs");
|
10
10
|
const postToApi_js_1 = require("../../util/api/postToApi.cjs");
|
11
11
|
const HuggingFaceError_js_1 = require("./HuggingFaceError.cjs");
|
12
|
-
const
|
12
|
+
const PromptFormatTextGenerationModel_js_1 = require("../../prompt/PromptFormatTextGenerationModel.cjs");
|
13
13
|
/**
|
14
14
|
* Create a text generation model that calls a Hugging Face Inference API Text Generation Task.
|
15
15
|
*
|
@@ -103,10 +103,10 @@ class HuggingFaceTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
103
103
|
extractText(response) {
|
104
104
|
return response[0].generated_text;
|
105
105
|
}
|
106
|
-
|
107
|
-
return new
|
106
|
+
withPromptFormat(promptFormat) {
|
107
|
+
return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
|
108
108
|
model: this,
|
109
|
-
|
109
|
+
promptFormat,
|
110
110
|
});
|
111
111
|
}
|
112
112
|
withSettings(additionalSettings) {
|
@@ -4,8 +4,8 @@ import { FunctionOptions } from "../../model-function/FunctionOptions.js";
|
|
4
4
|
import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
|
5
5
|
import { RetryFunction } from "../../util/api/RetryFunction.js";
|
6
6
|
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
|
7
|
-
import {
|
8
|
-
import {
|
7
|
+
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
8
|
+
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
9
9
|
export interface HuggingFaceTextGenerationModelSettings extends TextGenerationModelSettings {
|
10
10
|
model: string;
|
11
11
|
baseUrl?: string;
|
@@ -58,7 +58,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
|
|
58
58
|
extractText(response: HuggingFaceTextGenerationResponse): string;
|
59
59
|
generateDeltaStreamResponse: undefined;
|
60
60
|
extractTextDelta: undefined;
|
61
|
-
|
61
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, HuggingFaceTextGenerationResponse, undefined, HuggingFaceTextGenerationModelSettings, this>;
|
62
62
|
withSettings(additionalSettings: Partial<HuggingFaceTextGenerationModelSettings>): this;
|
63
63
|
get maxCompletionTokens(): number | undefined;
|
64
64
|
withMaxCompletionTokens(maxCompletionTokens: number): this;
|
@@ -3,7 +3,7 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
|
|
3
3
|
import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
|
4
4
|
import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
|
5
5
|
import { failedHuggingFaceCallResponseHandler } from "./HuggingFaceError.js";
|
6
|
-
import {
|
6
|
+
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
7
7
|
/**
|
8
8
|
* Create a text generation model that calls a Hugging Face Inference API Text Generation Task.
|
9
9
|
*
|
@@ -97,10 +97,10 @@ export class HuggingFaceTextGenerationModel extends AbstractModel {
|
|
97
97
|
extractText(response) {
|
98
98
|
return response[0].generated_text;
|
99
99
|
}
|
100
|
-
|
101
|
-
return new
|
100
|
+
withPromptFormat(promptFormat) {
|
101
|
+
return new PromptFormatTextGenerationModel({
|
102
102
|
model: this,
|
103
|
-
|
103
|
+
promptFormat,
|
104
104
|
});
|
105
105
|
}
|
106
106
|
withSettings(additionalSettings) {
|
@@ -9,7 +9,7 @@ const zod_1 = __importDefault(require("zod"));
|
|
9
9
|
const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
10
10
|
const AsyncQueue_js_1 = require("../../model-function/generate-text/AsyncQueue.cjs");
|
11
11
|
const parseEventSourceReadableStream_js_1 = require("../../model-function/generate-text/parseEventSourceReadableStream.cjs");
|
12
|
-
const
|
12
|
+
const PromptFormatTextGenerationModel_js_1 = require("../../prompt/PromptFormatTextGenerationModel.cjs");
|
13
13
|
const callWithRetryAndThrottle_js_1 = require("../../util/api/callWithRetryAndThrottle.cjs");
|
14
14
|
const postToApi_js_1 = require("../../util/api/postToApi.cjs");
|
15
15
|
const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
|
@@ -76,10 +76,10 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
76
76
|
extractTextDelta(fullDelta) {
|
77
77
|
return fullDelta.delta;
|
78
78
|
}
|
79
|
-
|
80
|
-
return new
|
81
|
-
model: this.withStopTokens(
|
82
|
-
|
79
|
+
withPromptFormat(promptFormat) {
|
80
|
+
return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
|
81
|
+
model: this.withStopTokens(promptFormat.stopTokens),
|
82
|
+
promptFormat,
|
83
83
|
});
|
84
84
|
}
|
85
85
|
withSettings(additionalSettings) {
|
@@ -3,8 +3,8 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
|
|
3
3
|
import { FunctionOptions } from "../../model-function/FunctionOptions.js";
|
4
4
|
import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
|
5
5
|
import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
|
6
|
-
import {
|
7
|
-
import {
|
6
|
+
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
7
|
+
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
8
8
|
import { RetryFunction } from "../../util/api/RetryFunction.js";
|
9
9
|
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
|
10
10
|
import { ResponseHandler } from "../../util/api/postToApi.js";
|
@@ -102,7 +102,7 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
|
|
102
102
|
extractText(response: LlamaCppTextGenerationResponse): string;
|
103
103
|
generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): Promise<AsyncIterable<DeltaEvent<LlamaCppTextGenerationDelta>>>;
|
104
104
|
extractTextDelta(fullDelta: LlamaCppTextGenerationDelta): string | undefined;
|
105
|
-
|
105
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, LlamaCppTextGenerationResponse, LlamaCppTextGenerationDelta, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
106
106
|
withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
|
107
107
|
get maxCompletionTokens(): number | undefined;
|
108
108
|
withMaxCompletionTokens(maxCompletionTokens: number): this;
|
@@ -3,7 +3,7 @@ import z from "zod";
|
|
3
3
|
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
4
4
|
import { AsyncQueue } from "../../model-function/generate-text/AsyncQueue.js";
|
5
5
|
import { parseEventSourceReadableStream } from "../../model-function/generate-text/parseEventSourceReadableStream.js";
|
6
|
-
import {
|
6
|
+
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
7
7
|
import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
|
8
8
|
import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
|
9
9
|
import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
|
@@ -70,10 +70,10 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
|
|
70
70
|
extractTextDelta(fullDelta) {
|
71
71
|
return fullDelta.delta;
|
72
72
|
}
|
73
|
-
|
74
|
-
return new
|
75
|
-
model: this.withStopTokens(
|
76
|
-
|
73
|
+
withPromptFormat(promptFormat) {
|
74
|
+
return new PromptFormatTextGenerationModel({
|
75
|
+
model: this.withStopTokens(promptFormat.stopTokens),
|
76
|
+
promptFormat,
|
77
77
|
});
|
78
78
|
}
|
79
79
|
withSettings(additionalSettings) {
|
@@ -10,7 +10,7 @@ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
|
|
10
10
|
const AsyncQueue_js_1 = require("../../model-function/generate-text/AsyncQueue.cjs");
|
11
11
|
const parseEventSourceReadableStream_js_1 = require("../../model-function/generate-text/parseEventSourceReadableStream.cjs");
|
12
12
|
const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
|
13
|
-
const
|
13
|
+
const PromptFormatTextGenerationModel_js_1 = require("../../prompt/PromptFormatTextGenerationModel.cjs");
|
14
14
|
const callWithRetryAndThrottle_js_1 = require("../../util/api/callWithRetryAndThrottle.cjs");
|
15
15
|
const postToApi_js_1 = require("../../util/api/postToApi.cjs");
|
16
16
|
const OpenAIError_js_1 = require("./OpenAIError.cjs");
|
@@ -156,10 +156,10 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
156
156
|
extractTextDelta(fullDelta) {
|
157
157
|
return fullDelta[0].delta;
|
158
158
|
}
|
159
|
-
|
160
|
-
return new
|
161
|
-
model: this.withStopTokens(
|
162
|
-
|
159
|
+
withPromptFormat(promptFormat) {
|
160
|
+
return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
|
161
|
+
model: this.withStopTokens(promptFormat.stopTokens),
|
162
|
+
promptFormat,
|
163
163
|
});
|
164
164
|
}
|
165
165
|
withSettings(additionalSettings) {
|
@@ -3,8 +3,8 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
|
|
3
3
|
import { FunctionOptions } from "../../model-function/FunctionOptions.js";
|
4
4
|
import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
|
5
5
|
import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
|
6
|
-
import {
|
7
|
-
import {
|
6
|
+
import { PromptFormat } from "../../prompt/PromptFormat.js";
|
7
|
+
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
8
8
|
import { RetryFunction } from "../../util/api/RetryFunction.js";
|
9
9
|
import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
|
10
10
|
import { ResponseHandler } from "../../util/api/postToApi.js";
|
@@ -134,7 +134,7 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
|
|
134
134
|
extractText(response: OpenAITextGenerationResponse): string;
|
135
135
|
generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<OpenAITextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<OpenAITextGenerationDelta>>>;
|
136
136
|
extractTextDelta(fullDelta: OpenAITextGenerationDelta): string | undefined;
|
137
|
-
|
137
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings, this>;
|
138
138
|
withSettings(additionalSettings: Partial<OpenAITextGenerationModelSettings>): this;
|
139
139
|
get maxCompletionTokens(): number | undefined;
|
140
140
|
withMaxCompletionTokens(maxCompletionTokens: number): this;
|
@@ -4,7 +4,7 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
|
|
4
4
|
import { AsyncQueue } from "../../model-function/generate-text/AsyncQueue.js";
|
5
5
|
import { parseEventSourceReadableStream } from "../../model-function/generate-text/parseEventSourceReadableStream.js";
|
6
6
|
import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
|
7
|
-
import {
|
7
|
+
import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
|
8
8
|
import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
|
9
9
|
import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
|
10
10
|
import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
|
@@ -148,10 +148,10 @@ export class OpenAITextGenerationModel extends AbstractModel {
|
|
148
148
|
extractTextDelta(fullDelta) {
|
149
149
|
return fullDelta[0].delta;
|
150
150
|
}
|
151
|
-
|
152
|
-
return new
|
153
|
-
model: this.withStopTokens(
|
154
|
-
|
151
|
+
withPromptFormat(promptFormat) {
|
152
|
+
return new PromptFormatTextGenerationModel({
|
153
|
+
model: this.withStopTokens(promptFormat.stopTokens),
|
154
|
+
promptFormat,
|
155
155
|
});
|
156
156
|
}
|
157
157
|
withSettings(additionalSettings) {
|
@@ -7,7 +7,7 @@ exports.OpenAIChatResponseFormat = exports.OpenAIChatModel = exports.calculateOp
|
|
7
7
|
const secure_json_parse_1 = __importDefault(require("secure-json-parse"));
|
8
8
|
const zod_1 = __importDefault(require("zod"));
|
9
9
|
const AbstractModel_js_1 = require("../../../model-function/AbstractModel.cjs");
|
10
|
-
const
|
10
|
+
const PromptFormatTextGenerationModel_js_1 = require("../../../prompt/PromptFormatTextGenerationModel.cjs");
|
11
11
|
const callWithRetryAndThrottle_js_1 = require("../../../util/api/callWithRetryAndThrottle.cjs");
|
12
12
|
const postToApi_js_1 = require("../../../util/api/postToApi.cjs");
|
13
13
|
const OpenAIError_js_1 = require("../OpenAIError.cjs");
|
@@ -205,10 +205,10 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
|
|
205
205
|
const jsonText = response.choices[0].message.function_call.arguments;
|
206
206
|
return secure_json_parse_1.default.parse(jsonText);
|
207
207
|
}
|
208
|
-
|
209
|
-
return new
|
210
|
-
model: this.withStopTokens(
|
211
|
-
|
208
|
+
withPromptFormat(promptFormat) {
|
209
|
+
return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
|
210
|
+
model: this.withStopTokens(promptFormat.stopTokens),
|
211
|
+
promptFormat,
|
212
212
|
});
|
213
213
|
}
|
214
214
|
withSettings(additionalSettings) {
|
@@ -5,8 +5,8 @@ import { GenerateJsonModel } from "../../../model-function/generate-json/Generat
|
|
5
5
|
import { GenerateJsonOrTextModel } from "../../../model-function/generate-json/GenerateJsonOrTextModel.js";
|
6
6
|
import { DeltaEvent } from "../../../model-function/generate-text/DeltaEvent.js";
|
7
7
|
import { TextGenerationModel, TextGenerationModelSettings } from "../../../model-function/generate-text/TextGenerationModel.js";
|
8
|
-
import {
|
9
|
-
import {
|
8
|
+
import { PromptFormat } from "../../../prompt/PromptFormat.js";
|
9
|
+
import { PromptFormatTextGenerationModel } from "../../../prompt/PromptFormatTextGenerationModel.js";
|
10
10
|
import { ResponseHandler } from "../../../util/api/postToApi.js";
|
11
11
|
import { OpenAIModelSettings } from "../OpenAIModelSettings.js";
|
12
12
|
import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
|
@@ -170,7 +170,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
|
|
170
170
|
*/
|
171
171
|
generateJsonResponse(prompt: OpenAIChatSingleFunctionPrompt<unknown> | OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, options?: FunctionOptions<OpenAIChatSettings> | undefined): PromiseLike<OpenAIChatResponse>;
|
172
172
|
extractJson(response: OpenAIChatResponse): unknown;
|
173
|
-
|
173
|
+
withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, OpenAIChatMessage[]>): PromptFormatTextGenerationModel<INPUT_PROMPT, OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings, this>;
|
174
174
|
withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
|
175
175
|
get maxCompletionTokens(): number | undefined;
|
176
176
|
withMaxCompletionTokens(maxCompletionTokens: number): this;
|
@@ -1,7 +1,7 @@
|
|
1
1
|
import SecureJSON from "secure-json-parse";
|
2
2
|
import z from "zod";
|
3
3
|
import { AbstractModel } from "../../../model-function/AbstractModel.js";
|
4
|
-
import {
|
4
|
+
import { PromptFormatTextGenerationModel } from "../../../prompt/PromptFormatTextGenerationModel.js";
|
5
5
|
import { callWithRetryAndThrottle } from "../../../util/api/callWithRetryAndThrottle.js";
|
6
6
|
import { createJsonResponseHandler, postJsonToApi, } from "../../../util/api/postToApi.js";
|
7
7
|
import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
|
@@ -197,10 +197,10 @@ export class OpenAIChatModel extends AbstractModel {
|
|
197
197
|
const jsonText = response.choices[0].message.function_call.arguments;
|
198
198
|
return SecureJSON.parse(jsonText);
|
199
199
|
}
|
200
|
-
|
201
|
-
return new
|
202
|
-
model: this.withStopTokens(
|
203
|
-
|
200
|
+
withPromptFormat(promptFormat) {
|
201
|
+
return new PromptFormatTextGenerationModel({
|
202
|
+
model: this.withStopTokens(promptFormat.stopTokens),
|
203
|
+
promptFormat,
|
204
204
|
});
|
205
205
|
}
|
206
206
|
withSettings(additionalSettings) {
|
package/package.json
CHANGED
@@ -1,19 +1,19 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.AlpacaInstructionPromptFormat = void 0;
|
4
4
|
const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
|
5
5
|
const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
|
6
6
|
/**
|
7
|
-
*
|
7
|
+
* Formats an instruction prompt as an Alpaca prompt.
|
8
8
|
*
|
9
9
|
* If the instruction has a system prompt, it overrides the default system prompt
|
10
10
|
* (which can impact the results, because the model may be trained on the default system prompt).
|
11
11
|
*
|
12
12
|
* @see https://github.com/tatsu-lab/stanford_alpaca#data-release
|
13
13
|
*/
|
14
|
-
const
|
14
|
+
const AlpacaInstructionPromptFormat = () => ({
|
15
15
|
stopTokens: [],
|
16
|
-
|
16
|
+
format: (instruction) => {
|
17
17
|
let text = instruction.system ??
|
18
18
|
(instruction.input != null
|
19
19
|
? DEFAULT_SYSTEM_PROMPT_INPUT
|
@@ -30,4 +30,4 @@ const InstructionToAlpacaPromptMapping = () => ({
|
|
30
30
|
return text;
|
31
31
|
},
|
32
32
|
});
|
33
|
-
exports.
|
33
|
+
exports.AlpacaInstructionPromptFormat = AlpacaInstructionPromptFormat;
|
@@ -1,11 +1,11 @@
|
|
1
1
|
import { InstructionPrompt } from "./InstructionPrompt.js";
|
2
|
-
import {
|
2
|
+
import { PromptFormat } from "./PromptFormat.js";
|
3
3
|
/**
|
4
|
-
*
|
4
|
+
* Formats an instruction prompt as an Alpaca prompt.
|
5
5
|
*
|
6
6
|
* If the instruction has a system prompt, it overrides the default system prompt
|
7
7
|
* (which can impact the results, because the model may be trained on the default system prompt).
|
8
8
|
*
|
9
9
|
* @see https://github.com/tatsu-lab/stanford_alpaca#data-release
|
10
10
|
*/
|
11
|
-
export declare const
|
11
|
+
export declare const AlpacaInstructionPromptFormat: () => PromptFormat<InstructionPrompt, string>;
|
@@ -1,16 +1,16 @@
|
|
1
1
|
const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
|
2
2
|
const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
|
3
3
|
/**
|
4
|
-
*
|
4
|
+
* Formats an instruction prompt as an Alpaca prompt.
|
5
5
|
*
|
6
6
|
* If the instruction has a system prompt, it overrides the default system prompt
|
7
7
|
* (which can impact the results, because the model may be trained on the default system prompt).
|
8
8
|
*
|
9
9
|
* @see https://github.com/tatsu-lab/stanford_alpaca#data-release
|
10
10
|
*/
|
11
|
-
export const
|
11
|
+
export const AlpacaInstructionPromptFormat = () => ({
|
12
12
|
stopTokens: [],
|
13
|
-
|
13
|
+
format: (instruction) => {
|
14
14
|
let text = instruction.system ??
|
15
15
|
(instruction.input != null
|
16
16
|
? DEFAULT_SYSTEM_PROMPT_INPUT
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.Llama2ChatPromptFormat = exports.Llama2InstructionPromptFormat = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("./chat/validateChatPrompt.cjs");
|
5
5
|
// see https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L44
|
6
6
|
const BEGIN_SEGMENT = "<s>";
|
@@ -10,19 +10,22 @@ const END_INSTRUCTION = "[/INST]\n";
|
|
10
10
|
const BEGIN_SYSTEM = "<<SYS>>\n";
|
11
11
|
const END_SYSTEM = "\n<</SYS>>\n\n";
|
12
12
|
/**
|
13
|
-
*
|
13
|
+
* Formats an instruction prompt as a Llama 2 prompt.
|
14
14
|
*
|
15
15
|
* @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
|
16
16
|
*/
|
17
|
-
const
|
17
|
+
const Llama2InstructionPromptFormat = () => ({
|
18
18
|
stopTokens: [END_SEGMENT],
|
19
|
-
|
19
|
+
format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
|
20
20
|
? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
|
21
21
|
: ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
|
22
22
|
});
|
23
|
-
exports.
|
24
|
-
|
25
|
-
|
23
|
+
exports.Llama2InstructionPromptFormat = Llama2InstructionPromptFormat;
|
24
|
+
/**
|
25
|
+
* Formats a chat prompt as a Llama 2 prompt.
|
26
|
+
*/
|
27
|
+
const Llama2ChatPromptFormat = () => ({
|
28
|
+
format: (chatPrompt) => {
|
26
29
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
27
30
|
let text = "";
|
28
31
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -53,4 +56,4 @@ const ChatToLlama2PromptMapping = () => ({
|
|
53
56
|
},
|
54
57
|
stopTokens: [END_SEGMENT],
|
55
58
|
});
|
56
|
-
exports.
|
59
|
+
exports.Llama2ChatPromptFormat = Llama2ChatPromptFormat;
|
@@ -0,0 +1,13 @@
|
|
1
|
+
import { PromptFormat } from "./PromptFormat.js";
|
2
|
+
import { InstructionPrompt } from "./InstructionPrompt.js";
|
3
|
+
import { ChatPrompt } from "./chat/ChatPrompt.js";
|
4
|
+
/**
|
5
|
+
* Formats an instruction prompt as a Llama 2 prompt.
|
6
|
+
*
|
7
|
+
* @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
|
8
|
+
*/
|
9
|
+
export declare const Llama2InstructionPromptFormat: () => PromptFormat<InstructionPrompt, string>;
|
10
|
+
/**
|
11
|
+
* Formats a chat prompt as a Llama 2 prompt.
|
12
|
+
*/
|
13
|
+
export declare const Llama2ChatPromptFormat: () => PromptFormat<ChatPrompt, string>;
|
@@ -7,18 +7,21 @@ const END_INSTRUCTION = "[/INST]\n";
|
|
7
7
|
const BEGIN_SYSTEM = "<<SYS>>\n";
|
8
8
|
const END_SYSTEM = "\n<</SYS>>\n\n";
|
9
9
|
/**
|
10
|
-
*
|
10
|
+
* Formats an instruction prompt as a Llama 2 prompt.
|
11
11
|
*
|
12
12
|
* @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
|
13
13
|
*/
|
14
|
-
export const
|
14
|
+
export const Llama2InstructionPromptFormat = () => ({
|
15
15
|
stopTokens: [END_SEGMENT],
|
16
|
-
|
16
|
+
format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
|
17
17
|
? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
|
18
18
|
: ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
|
19
19
|
});
|
20
|
-
|
21
|
-
|
20
|
+
/**
|
21
|
+
* Formats a chat prompt as a Llama 2 prompt.
|
22
|
+
*/
|
23
|
+
export const Llama2ChatPromptFormat = () => ({
|
24
|
+
format: (chatPrompt) => {
|
22
25
|
validateChatPrompt(chatPrompt);
|
23
26
|
let text = "";
|
24
27
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -1,9 +1,12 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.
|
3
|
+
exports.OpenAIChatChatPromptFormat = exports.OpenAIChatInstructionPromptFormat = void 0;
|
4
4
|
const validateChatPrompt_js_1 = require("./chat/validateChatPrompt.cjs");
|
5
|
-
|
6
|
-
|
5
|
+
/**
|
6
|
+
* Formats an instruction prompt as an OpenAI chat prompt.
|
7
|
+
*/
|
8
|
+
const OpenAIChatInstructionPromptFormat = () => ({
|
9
|
+
format: (instruction) => {
|
7
10
|
const messages = [];
|
8
11
|
if (instruction.system != null) {
|
9
12
|
messages.push({
|
@@ -25,9 +28,12 @@ const InstructionToOpenAIChatPromptMapping = () => ({
|
|
25
28
|
},
|
26
29
|
stopTokens: [],
|
27
30
|
});
|
28
|
-
exports.
|
29
|
-
|
30
|
-
|
31
|
+
exports.OpenAIChatInstructionPromptFormat = OpenAIChatInstructionPromptFormat;
|
32
|
+
/**
|
33
|
+
* Formats a chat prompt as an OpenAI chat prompt.
|
34
|
+
*/
|
35
|
+
const OpenAIChatChatPromptFormat = () => ({
|
36
|
+
format: (chatPrompt) => {
|
31
37
|
(0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
|
32
38
|
const messages = [];
|
33
39
|
for (let i = 0; i < chatPrompt.length; i++) {
|
@@ -65,4 +71,4 @@ const ChatToOpenAIChatPromptMapping = () => ({
|
|
65
71
|
},
|
66
72
|
stopTokens: [],
|
67
73
|
});
|
68
|
-
exports.
|
74
|
+
exports.OpenAIChatChatPromptFormat = OpenAIChatChatPromptFormat;
|