modelfusion 0.12.0 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/README.md +5 -5
  2. package/model-function/generate-text/TextGenerationModel.d.ts +3 -3
  3. package/model-provider/cohere/CohereTextGenerationModel.cjs +5 -5
  4. package/model-provider/cohere/CohereTextGenerationModel.d.ts +3 -3
  5. package/model-provider/cohere/CohereTextGenerationModel.js +5 -5
  6. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +4 -4
  7. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +3 -3
  8. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +4 -4
  9. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +5 -5
  10. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +3 -3
  11. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +5 -5
  12. package/model-provider/openai/OpenAITextGenerationModel.cjs +5 -5
  13. package/model-provider/openai/OpenAITextGenerationModel.d.ts +3 -3
  14. package/model-provider/openai/OpenAITextGenerationModel.js +5 -5
  15. package/model-provider/openai/chat/OpenAIChatModel.cjs +5 -5
  16. package/model-provider/openai/chat/OpenAIChatModel.d.ts +3 -3
  17. package/model-provider/openai/chat/OpenAIChatModel.js +5 -5
  18. package/package.json +3 -3
  19. package/prompt/{AlpacaPromptMapping.cjs → AlpacaPromptFormat.cjs} +5 -5
  20. package/prompt/{AlpacaPromptMapping.d.ts → AlpacaPromptFormat.d.ts} +3 -3
  21. package/prompt/{AlpacaPromptMapping.js → AlpacaPromptFormat.js} +3 -3
  22. package/prompt/{Llama2PromptMapping.cjs → Llama2PromptFormat.cjs} +11 -8
  23. package/prompt/Llama2PromptFormat.d.ts +13 -0
  24. package/prompt/{Llama2PromptMapping.js → Llama2PromptFormat.js} +8 -5
  25. package/prompt/{OpenAIChatPromptMapping.cjs → OpenAIChatPromptFormat.cjs} +13 -7
  26. package/prompt/OpenAIChatPromptFormat.d.ts +12 -0
  27. package/prompt/{OpenAIChatPromptMapping.js → OpenAIChatPromptFormat.js} +10 -4
  28. package/prompt/PromptFormat.d.ts +14 -0
  29. package/prompt/{PromptMappingTextGenerationModel.cjs → PromptFormatTextGenerationModel.cjs} +19 -19
  30. package/prompt/{PromptMappingTextGenerationModel.d.ts → PromptFormatTextGenerationModel.d.ts} +6 -6
  31. package/prompt/{PromptMappingTextGenerationModel.js → PromptFormatTextGenerationModel.js} +17 -17
  32. package/prompt/{TextPromptMapping.cjs → TextPromptFormat.cjs} +11 -8
  33. package/prompt/TextPromptFormat.d.ts +17 -0
  34. package/prompt/{TextPromptMapping.js → TextPromptFormat.js} +8 -5
  35. package/prompt/{VicunaPromptMapping.cjs → VicunaPromptFormat.cjs} +5 -5
  36. package/prompt/{VicunaPromptMapping.d.ts → VicunaPromptFormat.d.ts} +3 -3
  37. package/prompt/{VicunaPromptMapping.js → VicunaPromptFormat.js} +3 -3
  38. package/prompt/chat/trimChatPrompt.cjs +1 -1
  39. package/prompt/chat/trimChatPrompt.d.ts +1 -1
  40. package/prompt/chat/trimChatPrompt.js +1 -1
  41. package/prompt/index.cjs +7 -7
  42. package/prompt/index.d.ts +7 -7
  43. package/prompt/index.js +7 -7
  44. package/tool/WebSearchTool.cjs +7 -28
  45. package/tool/WebSearchTool.d.ts +6 -67
  46. package/tool/WebSearchTool.js +7 -28
  47. package/tool/executeTool.cjs +1 -0
  48. package/tool/executeTool.d.ts +5 -4
  49. package/tool/executeTool.js +1 -0
  50. package/prompt/Llama2PromptMapping.d.ts +0 -10
  51. package/prompt/OpenAIChatPromptMapping.d.ts +0 -6
  52. package/prompt/PromptMapping.d.ts +0 -7
  53. package/prompt/TextPromptMapping.d.ts +0 -14
  54. /package/prompt/{PromptMapping.cjs → PromptFormat.cjs} +0 -0
  55. /package/prompt/{PromptMapping.js → PromptFormat.js} +0 -0
package/README.md CHANGED
@@ -43,7 +43,7 @@ You can provide API keys for the different [integrations](https://modelfusion.de
43
43
 
44
44
  Generate text using a language model and a prompt.
45
45
  You can stream the text if it is supported by the model.
46
- You can use [prompt mappings](https://modelfusion.dev/guide/function/generate-text/prompt-mapping) to change the prompt format of a model.
46
+ You can use [prompt formats](https://modelfusion.dev/guide/function/generate-text/prompt-format) to change the prompt format of a model.
47
47
 
48
48
  #### generateText
49
49
 
@@ -70,16 +70,16 @@ for await (const textFragment of textStream) {
70
70
  }
71
71
  ```
72
72
 
73
- #### Prompt Mapping
73
+ #### Prompt Format
74
74
 
75
- [Prompt mapping](https://modelfusion.dev/guide/function/generate-text/prompt-mapping) lets you use higher level prompt structures (such as instruction or chat prompts) for different models.
75
+ [Prompt format](https://modelfusion.dev/guide/function/generate-text/prompt-format) lets you use higher level prompt structures (such as instruction or chat prompts) for different models.
76
76
 
77
77
  ```ts
78
78
  const text = await generateText(
79
79
  new LlamaCppTextGenerationModel({
80
80
  contextWindowSize: 4096, // Llama 2 context window size
81
81
  nPredict: 1000,
82
- }).mapPrompt(InstructionToLlama2PromptMapping()),
82
+ }).withPromptFormat(Llama2InstructionPromptFormat()),
83
83
  {
84
84
  system: "You are a story writer.",
85
85
  instruction: "Write a short story about a robot learning to love.",
@@ -91,7 +91,7 @@ const text = await generateText(
91
91
  const textStream = await streamText(
92
92
  new OpenAIChatModel({
93
93
  model: "gpt-3.5-turbo",
94
- }).mapPrompt(ChatToOpenAIChatPromptMapping()),
94
+ }).withPromptFormat(OpenAIChatChatPromptFormat()),
95
95
  [
96
96
  { system: "You are a celebrated poet." },
97
97
  { user: "Write a short story about a robot learning to love." },
@@ -1,5 +1,5 @@
1
- import { PromptMapping } from "../../prompt/PromptMapping.js";
2
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
1
+ import { PromptFormat } from "../../prompt/PromptFormat.js";
2
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
3
3
  import { FunctionOptions } from "../FunctionOptions.js";
4
4
  import { Model, ModelSettings } from "../Model.js";
5
5
  import { BasicTokenizer, FullTokenizer } from "../tokenize-text/Tokenizer.js";
@@ -24,7 +24,7 @@ export interface TextGenerationModel<PROMPT, RESPONSE, FULL_DELTA, SETTINGS exte
24
24
  * Optional. Implement for streaming support.
25
25
  */
26
26
  readonly extractTextDelta: ((fullDelta: FULL_DELTA) => string | undefined) | undefined;
27
- mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, PROMPT>): PromptMappingTextGenerationModel<INPUT_PROMPT, PROMPT, RESPONSE, FULL_DELTA, SETTINGS, this>;
27
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, PROMPT>): PromptFormatTextGenerationModel<INPUT_PROMPT, PROMPT, RESPONSE, FULL_DELTA, SETTINGS, this>;
28
28
  /**
29
29
  * Maximum number of tokens to generate.
30
30
  */
@@ -9,7 +9,7 @@ const zod_1 = require("zod");
9
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
10
  const AsyncQueue_js_1 = require("../../model-function/generate-text/AsyncQueue.cjs");
11
11
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
12
- const PromptMappingTextGenerationModel_js_1 = require("../../prompt/PromptMappingTextGenerationModel.cjs");
12
+ const PromptFormatTextGenerationModel_js_1 = require("../../prompt/PromptFormatTextGenerationModel.cjs");
13
13
  const callWithRetryAndThrottle_js_1 = require("../../util/api/callWithRetryAndThrottle.cjs");
14
14
  const postToApi_js_1 = require("../../util/api/postToApi.cjs");
15
15
  const CohereError_js_1 = require("./CohereError.cjs");
@@ -122,10 +122,10 @@ class CohereTextGenerationModel extends AbstractModel_js_1.AbstractModel {
122
122
  extractTextDelta(fullDelta) {
123
123
  return fullDelta.delta;
124
124
  }
125
- mapPrompt(promptMapping) {
126
- return new PromptMappingTextGenerationModel_js_1.PromptMappingTextGenerationModel({
127
- model: this.withStopTokens(promptMapping.stopTokens),
128
- promptMapping,
125
+ withPromptFormat(promptFormat) {
126
+ return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
127
+ model: this.withStopTokens(promptFormat.stopTokens),
128
+ promptFormat,
129
129
  });
130
130
  }
131
131
  withSettings(additionalSettings) {
@@ -3,8 +3,8 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
3
3
  import { FunctionOptions } from "../../model-function/FunctionOptions.js";
4
4
  import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
5
5
  import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
6
- import { PromptMapping } from "../../prompt/PromptMapping.js";
7
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
6
+ import { PromptFormat } from "../../prompt/PromptFormat.js";
7
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
8
8
  import { RetryFunction } from "../../util/api/RetryFunction.js";
9
9
  import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
10
10
  import { ResponseHandler } from "../../util/api/postToApi.js";
@@ -92,7 +92,7 @@ export declare class CohereTextGenerationModel extends AbstractModel<CohereTextG
92
92
  extractText(response: CohereTextGenerationResponse): string;
93
93
  generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<CohereTextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<CohereTextGenerationDelta>>>;
94
94
  extractTextDelta(fullDelta: CohereTextGenerationDelta): string | undefined;
95
- mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, string>): PromptMappingTextGenerationModel<INPUT_PROMPT, string, CohereTextGenerationResponse, CohereTextGenerationDelta, CohereTextGenerationModelSettings, this>;
95
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, CohereTextGenerationResponse, CohereTextGenerationDelta, CohereTextGenerationModelSettings, this>;
96
96
  withSettings(additionalSettings: Partial<CohereTextGenerationModelSettings>): this;
97
97
  get maxCompletionTokens(): number | undefined;
98
98
  withMaxCompletionTokens(maxCompletionTokens: number): this;
@@ -3,7 +3,7 @@ import { z } from "zod";
3
3
  import { AbstractModel } from "../../model-function/AbstractModel.js";
4
4
  import { AsyncQueue } from "../../model-function/generate-text/AsyncQueue.js";
5
5
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
6
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
6
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
7
7
  import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
8
8
  import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
9
9
  import { failedCohereCallResponseHandler } from "./CohereError.js";
@@ -116,10 +116,10 @@ export class CohereTextGenerationModel extends AbstractModel {
116
116
  extractTextDelta(fullDelta) {
117
117
  return fullDelta.delta;
118
118
  }
119
- mapPrompt(promptMapping) {
120
- return new PromptMappingTextGenerationModel({
121
- model: this.withStopTokens(promptMapping.stopTokens),
122
- promptMapping,
119
+ withPromptFormat(promptFormat) {
120
+ return new PromptFormatTextGenerationModel({
121
+ model: this.withStopTokens(promptFormat.stopTokens),
122
+ promptFormat,
123
123
  });
124
124
  }
125
125
  withSettings(additionalSettings) {
@@ -9,7 +9,7 @@ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
9
9
  const callWithRetryAndThrottle_js_1 = require("../../util/api/callWithRetryAndThrottle.cjs");
10
10
  const postToApi_js_1 = require("../../util/api/postToApi.cjs");
11
11
  const HuggingFaceError_js_1 = require("./HuggingFaceError.cjs");
12
- const PromptMappingTextGenerationModel_js_1 = require("../../prompt/PromptMappingTextGenerationModel.cjs");
12
+ const PromptFormatTextGenerationModel_js_1 = require("../../prompt/PromptFormatTextGenerationModel.cjs");
13
13
  /**
14
14
  * Create a text generation model that calls a Hugging Face Inference API Text Generation Task.
15
15
  *
@@ -103,10 +103,10 @@ class HuggingFaceTextGenerationModel extends AbstractModel_js_1.AbstractModel {
103
103
  extractText(response) {
104
104
  return response[0].generated_text;
105
105
  }
106
- mapPrompt(promptMapping) {
107
- return new PromptMappingTextGenerationModel_js_1.PromptMappingTextGenerationModel({
106
+ withPromptFormat(promptFormat) {
107
+ return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
108
108
  model: this,
109
- promptMapping,
109
+ promptFormat,
110
110
  });
111
111
  }
112
112
  withSettings(additionalSettings) {
@@ -4,8 +4,8 @@ import { FunctionOptions } from "../../model-function/FunctionOptions.js";
4
4
  import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
5
5
  import { RetryFunction } from "../../util/api/RetryFunction.js";
6
6
  import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
7
- import { PromptMapping } from "../../prompt/PromptMapping.js";
8
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
7
+ import { PromptFormat } from "../../prompt/PromptFormat.js";
8
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
9
9
  export interface HuggingFaceTextGenerationModelSettings extends TextGenerationModelSettings {
10
10
  model: string;
11
11
  baseUrl?: string;
@@ -58,7 +58,7 @@ export declare class HuggingFaceTextGenerationModel extends AbstractModel<Huggin
58
58
  extractText(response: HuggingFaceTextGenerationResponse): string;
59
59
  generateDeltaStreamResponse: undefined;
60
60
  extractTextDelta: undefined;
61
- mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, string>): PromptMappingTextGenerationModel<INPUT_PROMPT, string, HuggingFaceTextGenerationResponse, undefined, HuggingFaceTextGenerationModelSettings, this>;
61
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, HuggingFaceTextGenerationResponse, undefined, HuggingFaceTextGenerationModelSettings, this>;
62
62
  withSettings(additionalSettings: Partial<HuggingFaceTextGenerationModelSettings>): this;
63
63
  get maxCompletionTokens(): number | undefined;
64
64
  withMaxCompletionTokens(maxCompletionTokens: number): this;
@@ -3,7 +3,7 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
3
3
  import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
4
4
  import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
5
5
  import { failedHuggingFaceCallResponseHandler } from "./HuggingFaceError.js";
6
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
6
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
7
7
  /**
8
8
  * Create a text generation model that calls a Hugging Face Inference API Text Generation Task.
9
9
  *
@@ -97,10 +97,10 @@ export class HuggingFaceTextGenerationModel extends AbstractModel {
97
97
  extractText(response) {
98
98
  return response[0].generated_text;
99
99
  }
100
- mapPrompt(promptMapping) {
101
- return new PromptMappingTextGenerationModel({
100
+ withPromptFormat(promptFormat) {
101
+ return new PromptFormatTextGenerationModel({
102
102
  model: this,
103
- promptMapping,
103
+ promptFormat,
104
104
  });
105
105
  }
106
106
  withSettings(additionalSettings) {
@@ -9,7 +9,7 @@ const zod_1 = __importDefault(require("zod"));
9
9
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
10
  const AsyncQueue_js_1 = require("../../model-function/generate-text/AsyncQueue.cjs");
11
11
  const parseEventSourceReadableStream_js_1 = require("../../model-function/generate-text/parseEventSourceReadableStream.cjs");
12
- const PromptMappingTextGenerationModel_js_1 = require("../../prompt/PromptMappingTextGenerationModel.cjs");
12
+ const PromptFormatTextGenerationModel_js_1 = require("../../prompt/PromptFormatTextGenerationModel.cjs");
13
13
  const callWithRetryAndThrottle_js_1 = require("../../util/api/callWithRetryAndThrottle.cjs");
14
14
  const postToApi_js_1 = require("../../util/api/postToApi.cjs");
15
15
  const LlamaCppError_js_1 = require("./LlamaCppError.cjs");
@@ -76,10 +76,10 @@ class LlamaCppTextGenerationModel extends AbstractModel_js_1.AbstractModel {
76
76
  extractTextDelta(fullDelta) {
77
77
  return fullDelta.delta;
78
78
  }
79
- mapPrompt(promptMapping) {
80
- return new PromptMappingTextGenerationModel_js_1.PromptMappingTextGenerationModel({
81
- model: this.withStopTokens(promptMapping.stopTokens),
82
- promptMapping,
79
+ withPromptFormat(promptFormat) {
80
+ return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
81
+ model: this.withStopTokens(promptFormat.stopTokens),
82
+ promptFormat,
83
83
  });
84
84
  }
85
85
  withSettings(additionalSettings) {
@@ -3,8 +3,8 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
3
3
  import { FunctionOptions } from "../../model-function/FunctionOptions.js";
4
4
  import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
5
5
  import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
6
- import { PromptMapping } from "../../prompt/PromptMapping.js";
7
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
6
+ import { PromptFormat } from "../../prompt/PromptFormat.js";
7
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
8
8
  import { RetryFunction } from "../../util/api/RetryFunction.js";
9
9
  import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
10
10
  import { ResponseHandler } from "../../util/api/postToApi.js";
@@ -102,7 +102,7 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
102
102
  extractText(response: LlamaCppTextGenerationResponse): string;
103
103
  generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): Promise<AsyncIterable<DeltaEvent<LlamaCppTextGenerationDelta>>>;
104
104
  extractTextDelta(fullDelta: LlamaCppTextGenerationDelta): string | undefined;
105
- mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, string>): PromptMappingTextGenerationModel<INPUT_PROMPT, string, LlamaCppTextGenerationResponse, LlamaCppTextGenerationDelta, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
105
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, LlamaCppTextGenerationResponse, LlamaCppTextGenerationDelta, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
106
106
  withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
107
107
  get maxCompletionTokens(): number | undefined;
108
108
  withMaxCompletionTokens(maxCompletionTokens: number): this;
@@ -3,7 +3,7 @@ import z from "zod";
3
3
  import { AbstractModel } from "../../model-function/AbstractModel.js";
4
4
  import { AsyncQueue } from "../../model-function/generate-text/AsyncQueue.js";
5
5
  import { parseEventSourceReadableStream } from "../../model-function/generate-text/parseEventSourceReadableStream.js";
6
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
6
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
7
7
  import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
8
8
  import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
9
9
  import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
@@ -70,10 +70,10 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
70
70
  extractTextDelta(fullDelta) {
71
71
  return fullDelta.delta;
72
72
  }
73
- mapPrompt(promptMapping) {
74
- return new PromptMappingTextGenerationModel({
75
- model: this.withStopTokens(promptMapping.stopTokens),
76
- promptMapping,
73
+ withPromptFormat(promptFormat) {
74
+ return new PromptFormatTextGenerationModel({
75
+ model: this.withStopTokens(promptFormat.stopTokens),
76
+ promptFormat,
77
77
  });
78
78
  }
79
79
  withSettings(additionalSettings) {
@@ -10,7 +10,7 @@ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
10
  const AsyncQueue_js_1 = require("../../model-function/generate-text/AsyncQueue.cjs");
11
11
  const parseEventSourceReadableStream_js_1 = require("../../model-function/generate-text/parseEventSourceReadableStream.cjs");
12
12
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
13
- const PromptMappingTextGenerationModel_js_1 = require("../../prompt/PromptMappingTextGenerationModel.cjs");
13
+ const PromptFormatTextGenerationModel_js_1 = require("../../prompt/PromptFormatTextGenerationModel.cjs");
14
14
  const callWithRetryAndThrottle_js_1 = require("../../util/api/callWithRetryAndThrottle.cjs");
15
15
  const postToApi_js_1 = require("../../util/api/postToApi.cjs");
16
16
  const OpenAIError_js_1 = require("./OpenAIError.cjs");
@@ -156,10 +156,10 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
156
156
  extractTextDelta(fullDelta) {
157
157
  return fullDelta[0].delta;
158
158
  }
159
- mapPrompt(promptMapping) {
160
- return new PromptMappingTextGenerationModel_js_1.PromptMappingTextGenerationModel({
161
- model: this.withStopTokens(promptMapping.stopTokens),
162
- promptMapping,
159
+ withPromptFormat(promptFormat) {
160
+ return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
161
+ model: this.withStopTokens(promptFormat.stopTokens),
162
+ promptFormat,
163
163
  });
164
164
  }
165
165
  withSettings(additionalSettings) {
@@ -3,8 +3,8 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
3
3
  import { FunctionOptions } from "../../model-function/FunctionOptions.js";
4
4
  import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
5
5
  import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
6
- import { PromptMapping } from "../../prompt/PromptMapping.js";
7
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
6
+ import { PromptFormat } from "../../prompt/PromptFormat.js";
7
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
8
8
  import { RetryFunction } from "../../util/api/RetryFunction.js";
9
9
  import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
10
10
  import { ResponseHandler } from "../../util/api/postToApi.js";
@@ -134,7 +134,7 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
134
134
  extractText(response: OpenAITextGenerationResponse): string;
135
135
  generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<OpenAITextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<OpenAITextGenerationDelta>>>;
136
136
  extractTextDelta(fullDelta: OpenAITextGenerationDelta): string | undefined;
137
- mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, string>): PromptMappingTextGenerationModel<INPUT_PROMPT, string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings, this>;
137
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings, this>;
138
138
  withSettings(additionalSettings: Partial<OpenAITextGenerationModelSettings>): this;
139
139
  get maxCompletionTokens(): number | undefined;
140
140
  withMaxCompletionTokens(maxCompletionTokens: number): this;
@@ -4,7 +4,7 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
4
4
  import { AsyncQueue } from "../../model-function/generate-text/AsyncQueue.js";
5
5
  import { parseEventSourceReadableStream } from "../../model-function/generate-text/parseEventSourceReadableStream.js";
6
6
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
7
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
7
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
8
8
  import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
9
9
  import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
10
10
  import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
@@ -148,10 +148,10 @@ export class OpenAITextGenerationModel extends AbstractModel {
148
148
  extractTextDelta(fullDelta) {
149
149
  return fullDelta[0].delta;
150
150
  }
151
- mapPrompt(promptMapping) {
152
- return new PromptMappingTextGenerationModel({
153
- model: this.withStopTokens(promptMapping.stopTokens),
154
- promptMapping,
151
+ withPromptFormat(promptFormat) {
152
+ return new PromptFormatTextGenerationModel({
153
+ model: this.withStopTokens(promptFormat.stopTokens),
154
+ promptFormat,
155
155
  });
156
156
  }
157
157
  withSettings(additionalSettings) {
@@ -7,7 +7,7 @@ exports.OpenAIChatResponseFormat = exports.OpenAIChatModel = exports.calculateOp
7
7
  const secure_json_parse_1 = __importDefault(require("secure-json-parse"));
8
8
  const zod_1 = __importDefault(require("zod"));
9
9
  const AbstractModel_js_1 = require("../../../model-function/AbstractModel.cjs");
10
- const PromptMappingTextGenerationModel_js_1 = require("../../../prompt/PromptMappingTextGenerationModel.cjs");
10
+ const PromptFormatTextGenerationModel_js_1 = require("../../../prompt/PromptFormatTextGenerationModel.cjs");
11
11
  const callWithRetryAndThrottle_js_1 = require("../../../util/api/callWithRetryAndThrottle.cjs");
12
12
  const postToApi_js_1 = require("../../../util/api/postToApi.cjs");
13
13
  const OpenAIError_js_1 = require("../OpenAIError.cjs");
@@ -205,10 +205,10 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
205
205
  const jsonText = response.choices[0].message.function_call.arguments;
206
206
  return secure_json_parse_1.default.parse(jsonText);
207
207
  }
208
- mapPrompt(promptMapping) {
209
- return new PromptMappingTextGenerationModel_js_1.PromptMappingTextGenerationModel({
210
- model: this.withStopTokens(promptMapping.stopTokens),
211
- promptMapping,
208
+ withPromptFormat(promptFormat) {
209
+ return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
210
+ model: this.withStopTokens(promptFormat.stopTokens),
211
+ promptFormat,
212
212
  });
213
213
  }
214
214
  withSettings(additionalSettings) {
@@ -5,8 +5,8 @@ import { GenerateJsonModel } from "../../../model-function/generate-json/Generat
5
5
  import { GenerateJsonOrTextModel } from "../../../model-function/generate-json/GenerateJsonOrTextModel.js";
6
6
  import { DeltaEvent } from "../../../model-function/generate-text/DeltaEvent.js";
7
7
  import { TextGenerationModel, TextGenerationModelSettings } from "../../../model-function/generate-text/TextGenerationModel.js";
8
- import { PromptMapping } from "../../../prompt/PromptMapping.js";
9
- import { PromptMappingTextGenerationModel } from "../../../prompt/PromptMappingTextGenerationModel.js";
8
+ import { PromptFormat } from "../../../prompt/PromptFormat.js";
9
+ import { PromptFormatTextGenerationModel } from "../../../prompt/PromptFormatTextGenerationModel.js";
10
10
  import { ResponseHandler } from "../../../util/api/postToApi.js";
11
11
  import { OpenAIModelSettings } from "../OpenAIModelSettings.js";
12
12
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
@@ -170,7 +170,7 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
170
170
  */
171
171
  generateJsonResponse(prompt: OpenAIChatSingleFunctionPrompt<unknown> | OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, options?: FunctionOptions<OpenAIChatSettings> | undefined): PromiseLike<OpenAIChatResponse>;
172
172
  extractJson(response: OpenAIChatResponse): unknown;
173
- mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, OpenAIChatMessage[]>): PromptMappingTextGenerationModel<INPUT_PROMPT, OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings, this>;
173
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, OpenAIChatMessage[]>): PromptFormatTextGenerationModel<INPUT_PROMPT, OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings, this>;
174
174
  withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
175
175
  get maxCompletionTokens(): number | undefined;
176
176
  withMaxCompletionTokens(maxCompletionTokens: number): this;
@@ -1,7 +1,7 @@
1
1
  import SecureJSON from "secure-json-parse";
2
2
  import z from "zod";
3
3
  import { AbstractModel } from "../../../model-function/AbstractModel.js";
4
- import { PromptMappingTextGenerationModel } from "../../../prompt/PromptMappingTextGenerationModel.js";
4
+ import { PromptFormatTextGenerationModel } from "../../../prompt/PromptFormatTextGenerationModel.js";
5
5
  import { callWithRetryAndThrottle } from "../../../util/api/callWithRetryAndThrottle.js";
6
6
  import { createJsonResponseHandler, postJsonToApi, } from "../../../util/api/postToApi.js";
7
7
  import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
@@ -197,10 +197,10 @@ export class OpenAIChatModel extends AbstractModel {
197
197
  const jsonText = response.choices[0].message.function_call.arguments;
198
198
  return SecureJSON.parse(jsonText);
199
199
  }
200
- mapPrompt(promptMapping) {
201
- return new PromptMappingTextGenerationModel({
202
- model: this.withStopTokens(promptMapping.stopTokens),
203
- promptMapping,
200
+ withPromptFormat(promptFormat) {
201
+ return new PromptFormatTextGenerationModel({
202
+ model: this.withStopTokens(promptFormat.stopTokens),
203
+ promptFormat,
204
204
  });
205
205
  }
206
206
  withSettings(additionalSettings) {
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build AI applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.12.0",
4
+ "version": "0.14.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -68,10 +68,10 @@
68
68
  "eslint-config-prettier": "9.0.0",
69
69
  "husky": "^8.0.3",
70
70
  "lint-staged": "14.0.0",
71
- "prettier": "3.0.1",
71
+ "prettier": "3.0.2",
72
72
  "rimraf": "5.0.1",
73
73
  "typescript": "5.1.6",
74
- "zod": "3.22.0",
74
+ "zod": "3.22.1",
75
75
  "zod-to-json-schema": "3.21.4"
76
76
  },
77
77
  "peerDependencies": {
@@ -1,19 +1,19 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.InstructionToAlpacaPromptMapping = void 0;
3
+ exports.AlpacaInstructionPromptFormat = void 0;
4
4
  const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
5
5
  const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
6
6
  /**
7
- * Maps an instruction prompt to the Alpaca prompt format.
7
+ * Formats an instruction prompt as an Alpaca prompt.
8
8
  *
9
9
  * If the instruction has a system prompt, it overrides the default system prompt
10
10
  * (which can impact the results, because the model may be trained on the default system prompt).
11
11
  *
12
12
  * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
13
13
  */
14
- const InstructionToAlpacaPromptMapping = () => ({
14
+ const AlpacaInstructionPromptFormat = () => ({
15
15
  stopTokens: [],
16
- map: (instruction) => {
16
+ format: (instruction) => {
17
17
  let text = instruction.system ??
18
18
  (instruction.input != null
19
19
  ? DEFAULT_SYSTEM_PROMPT_INPUT
@@ -30,4 +30,4 @@ const InstructionToAlpacaPromptMapping = () => ({
30
30
  return text;
31
31
  },
32
32
  });
33
- exports.InstructionToAlpacaPromptMapping = InstructionToAlpacaPromptMapping;
33
+ exports.AlpacaInstructionPromptFormat = AlpacaInstructionPromptFormat;
@@ -1,11 +1,11 @@
1
1
  import { InstructionPrompt } from "./InstructionPrompt.js";
2
- import { PromptMapping } from "./PromptMapping.js";
2
+ import { PromptFormat } from "./PromptFormat.js";
3
3
  /**
4
- * Maps an instruction prompt to the Alpaca prompt format.
4
+ * Formats an instruction prompt as an Alpaca prompt.
5
5
  *
6
6
  * If the instruction has a system prompt, it overrides the default system prompt
7
7
  * (which can impact the results, because the model may be trained on the default system prompt).
8
8
  *
9
9
  * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
10
10
  */
11
- export declare const InstructionToAlpacaPromptMapping: () => PromptMapping<InstructionPrompt, string>;
11
+ export declare const AlpacaInstructionPromptFormat: () => PromptFormat<InstructionPrompt, string>;
@@ -1,16 +1,16 @@
1
1
  const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
2
2
  const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
3
3
  /**
4
- * Maps an instruction prompt to the Alpaca prompt format.
4
+ * Formats an instruction prompt as an Alpaca prompt.
5
5
  *
6
6
  * If the instruction has a system prompt, it overrides the default system prompt
7
7
  * (which can impact the results, because the model may be trained on the default system prompt).
8
8
  *
9
9
  * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
10
10
  */
11
- export const InstructionToAlpacaPromptMapping = () => ({
11
+ export const AlpacaInstructionPromptFormat = () => ({
12
12
  stopTokens: [],
13
- map: (instruction) => {
13
+ format: (instruction) => {
14
14
  let text = instruction.system ??
15
15
  (instruction.input != null
16
16
  ? DEFAULT_SYSTEM_PROMPT_INPUT
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.ChatToLlama2PromptMapping = exports.InstructionToLlama2PromptMapping = void 0;
3
+ exports.Llama2ChatPromptFormat = exports.Llama2InstructionPromptFormat = void 0;
4
4
  const validateChatPrompt_js_1 = require("./chat/validateChatPrompt.cjs");
5
5
  // see https://github.com/facebookresearch/llama/blob/6c7fe276574e78057f917549435a2554000a876d/llama/generation.py#L44
6
6
  const BEGIN_SEGMENT = "<s>";
@@ -10,19 +10,22 @@ const END_INSTRUCTION = "[/INST]\n";
10
10
  const BEGIN_SYSTEM = "<<SYS>>\n";
11
11
  const END_SYSTEM = "\n<</SYS>>\n\n";
12
12
  /**
13
- * Maps an instruction prompt to the Llama2 prompt format.
13
+ * Formats an instruction prompt as a Llama 2 prompt.
14
14
  *
15
15
  * @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
16
16
  */
17
- const InstructionToLlama2PromptMapping = () => ({
17
+ const Llama2InstructionPromptFormat = () => ({
18
18
  stopTokens: [END_SEGMENT],
19
- map: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
19
+ format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
20
20
  ? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
21
21
  : ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
22
22
  });
23
- exports.InstructionToLlama2PromptMapping = InstructionToLlama2PromptMapping;
24
- const ChatToLlama2PromptMapping = () => ({
25
- map: (chatPrompt) => {
23
+ exports.Llama2InstructionPromptFormat = Llama2InstructionPromptFormat;
24
+ /**
25
+ * Formats a chat prompt as a Llama 2 prompt.
26
+ */
27
+ const Llama2ChatPromptFormat = () => ({
28
+ format: (chatPrompt) => {
26
29
  (0, validateChatPrompt_js_1.validateChatPrompt)(chatPrompt);
27
30
  let text = "";
28
31
  for (let i = 0; i < chatPrompt.length; i++) {
@@ -53,4 +56,4 @@ const ChatToLlama2PromptMapping = () => ({
53
56
  },
54
57
  stopTokens: [END_SEGMENT],
55
58
  });
56
- exports.ChatToLlama2PromptMapping = ChatToLlama2PromptMapping;
59
+ exports.Llama2ChatPromptFormat = Llama2ChatPromptFormat;
@@ -0,0 +1,13 @@
1
+ import { PromptFormat } from "./PromptFormat.js";
2
+ import { InstructionPrompt } from "./InstructionPrompt.js";
3
+ import { ChatPrompt } from "./chat/ChatPrompt.js";
4
+ /**
5
+ * Formats an instruction prompt as a Llama 2 prompt.
6
+ *
7
+ * @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
8
+ */
9
+ export declare const Llama2InstructionPromptFormat: () => PromptFormat<InstructionPrompt, string>;
10
+ /**
11
+ * Formats a chat prompt as a Llama 2 prompt.
12
+ */
13
+ export declare const Llama2ChatPromptFormat: () => PromptFormat<ChatPrompt, string>;
@@ -7,18 +7,21 @@ const END_INSTRUCTION = "[/INST]\n";
7
7
  const BEGIN_SYSTEM = "<<SYS>>\n";
8
8
  const END_SYSTEM = "\n<</SYS>>\n\n";
9
9
  /**
10
- * Maps an instruction prompt to the Llama2 prompt format.
10
+ * Formats an instruction prompt as a Llama 2 prompt.
11
11
  *
12
12
  * @see https://www.philschmid.de/llama-2#how-to-prompt-llama-2-chat
13
13
  */
14
- export const InstructionToLlama2PromptMapping = () => ({
14
+ export const Llama2InstructionPromptFormat = () => ({
15
15
  stopTokens: [END_SEGMENT],
16
- map: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
16
+ format: (instruction) => `${BEGIN_SEGMENT}${BEGIN_INSTRUCTION}${instruction.system != null
17
17
  ? ` ${BEGIN_SYSTEM}${instruction.system}${END_SYSTEM}`
18
18
  : ""} ${instruction.instruction}${instruction.input != null ? `\n\n${instruction.input}` : ""} ${END_INSTRUCTION}\n`,
19
19
  });
20
- export const ChatToLlama2PromptMapping = () => ({
21
- map: (chatPrompt) => {
20
+ /**
21
+ * Formats a chat prompt as a Llama 2 prompt.
22
+ */
23
+ export const Llama2ChatPromptFormat = () => ({
24
+ format: (chatPrompt) => {
22
25
  validateChatPrompt(chatPrompt);
23
26
  let text = "";
24
27
  for (let i = 0; i < chatPrompt.length; i++) {