modelfusion 0.119.0 → 0.120.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,32 @@
1
1
  # Changelog
2
2
 
3
+ ## v0.120.0 - 2024-01-09
4
+
5
+ ### Added
6
+
7
+ - `OllamaCompletionModel` supports setting the prompt template in the settings. Prompt formats are available under `ollama.prompt.*`. You can then call `.withTextPrompt()`, `.withInstructionPrompt()` or `.withChatPrompt()` to use a standardized prompt.
8
+
9
+ ```ts
10
+ const model = ollama
11
+ .CompletionTextGenerator({
12
+ model: "mistral",
13
+ promptTemplate: ollama.prompt.Mistral,
14
+ raw: true, // required when using custom prompt template
15
+ maxGenerationTokens: 120,
16
+ })
17
+ .withTextPrompt();
18
+ ```
19
+
20
+ ### Removed
21
+
22
+ - **breaking change**: removed `.withTextPromptTemplate` on `OllamaCompletionModel`.
23
+
24
+ ## v0.119.1 - 2024-01-08
25
+
26
+ ### Fixed
27
+
28
+ - Incorrect export. Thanks [@mloenow](https://github.com/mloenow) for the fix!
29
+
3
30
  ## v0.119.0 - 2024-01-07
4
31
 
5
32
  ### Added
@@ -82,5 +82,5 @@ root ::= item+
82
82
  # Excludes various line break characters
83
83
  item ::= "- " [^\r\n\x0b\x0c\x85\u2028\u2029]+ "\n"
84
84
  `;
85
- var convertJsonSchemaToGBNF_1 = require("./convertJsonSchemaToGBNF");
86
- Object.defineProperty(exports, "fromJsonSchema", { enumerable: true, get: function () { return convertJsonSchemaToGBNF_1.convertJsonSchemaToGBNF; } });
85
+ var convertJsonSchemaToGBNF_js_1 = require("./convertJsonSchemaToGBNF.cjs");
86
+ Object.defineProperty(exports, "fromJsonSchema", { enumerable: true, get: function () { return convertJsonSchemaToGBNF_js_1.convertJsonSchemaToGBNF; } });
@@ -16,4 +16,4 @@ export declare const jsonArray: string;
16
16
  * @see https://github.com/ggerganov/llama.cpp/blob/master/grammars/list.gbnf
17
17
  */
18
18
  export declare const list: string;
19
- export { convertJsonSchemaToGBNF as fromJsonSchema } from "./convertJsonSchemaToGBNF";
19
+ export { convertJsonSchemaToGBNF as fromJsonSchema } from "./convertJsonSchemaToGBNF.js";
@@ -79,4 +79,4 @@ root ::= item+
79
79
  # Excludes various line break characters
80
80
  item ::= "- " [^\r\n\x0b\x0c\x85\u2028\u2029]+ "\n"
81
81
  `;
82
- export { convertJsonSchemaToGBNF as fromJsonSchema } from "./convertJsonSchemaToGBNF";
82
+ export { convertJsonSchemaToGBNF as fromJsonSchema } from "./convertJsonSchemaToGBNF.js";
@@ -50,6 +50,38 @@ function asLlamaCppTextPromptTemplateProvider(promptTemplateProvider) {
50
50
  }
51
51
  exports.asLlamaCppTextPromptTemplateProvider = asLlamaCppTextPromptTemplateProvider;
52
52
  exports.Text = asLlamaCppTextPromptTemplateProvider(textPrompt);
53
+ /**
54
+ * Formats text, instruction or chat prompts as a Mistral instruct prompt.
55
+ *
56
+ * Note that Mistral does not support system prompts. We emulate them.
57
+ *
58
+ * Text prompt:
59
+ * ```
60
+ * <s>[INST] { instruction } [/INST]
61
+ * ```
62
+ *
63
+ * Instruction prompt when system prompt is set:
64
+ * ```
65
+ * <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
66
+ * ```
67
+ *
68
+ * Instruction prompt template when there is no system prompt:
69
+ * ```
70
+ * <s>[INST] ${ instruction } [/INST] ${ response prefix }
71
+ * ```
72
+ *
73
+ * Chat prompt when system prompt is set:
74
+ * ```
75
+ * <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
76
+ * ```
77
+ *
78
+ * Chat prompt when there is no system prompt:
79
+ * ```
80
+ * <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
81
+ * ```
82
+ *
83
+ * @see https://docs.mistral.ai/models/#chat-template
84
+ */
53
85
  exports.Mistral = asLlamaCppTextPromptTemplateProvider(mistralPrompt);
54
86
  exports.ChatML = asLlamaCppTextPromptTemplateProvider(chatMlPrompt);
55
87
  exports.Llama2 = asLlamaCppTextPromptTemplateProvider(llama2Prompt);
@@ -5,6 +5,38 @@ import { LlamaCppCompletionPrompt } from "./LlamaCppCompletionModel.js";
5
5
  export declare function asLlamaCppPromptTemplate<SOURCE_PROMPT>(promptTemplate: TextGenerationPromptTemplate<SOURCE_PROMPT, string>): TextGenerationPromptTemplate<SOURCE_PROMPT, LlamaCppCompletionPrompt>;
6
6
  export declare function asLlamaCppTextPromptTemplateProvider(promptTemplateProvider: TextGenerationPromptTemplateProvider<string>): TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
7
7
  export declare const Text: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
8
+ /**
9
+ * Formats text, instruction or chat prompts as a Mistral instruct prompt.
10
+ *
11
+ * Note that Mistral does not support system prompts. We emulate them.
12
+ *
13
+ * Text prompt:
14
+ * ```
15
+ * <s>[INST] { instruction } [/INST]
16
+ * ```
17
+ *
18
+ * Instruction prompt when system prompt is set:
19
+ * ```
20
+ * <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
21
+ * ```
22
+ *
23
+ * Instruction prompt template when there is no system prompt:
24
+ * ```
25
+ * <s>[INST] ${ instruction } [/INST] ${ response prefix }
26
+ * ```
27
+ *
28
+ * Chat prompt when system prompt is set:
29
+ * ```
30
+ * <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
31
+ * ```
32
+ *
33
+ * Chat prompt when there is no system prompt:
34
+ * ```
35
+ * <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
36
+ * ```
37
+ *
38
+ * @see https://docs.mistral.ai/models/#chat-template
39
+ */
8
40
  export declare const Mistral: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
9
41
  export declare const ChatML: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
10
42
  export declare const Llama2: TextGenerationPromptTemplateProvider<LlamaCppCompletionPrompt>;
@@ -22,6 +22,38 @@ export function asLlamaCppTextPromptTemplateProvider(promptTemplateProvider) {
22
22
  };
23
23
  }
24
24
  export const Text = asLlamaCppTextPromptTemplateProvider(textPrompt);
25
+ /**
26
+ * Formats text, instruction or chat prompts as a Mistral instruct prompt.
27
+ *
28
+ * Note that Mistral does not support system prompts. We emulate them.
29
+ *
30
+ * Text prompt:
31
+ * ```
32
+ * <s>[INST] { instruction } [/INST]
33
+ * ```
34
+ *
35
+ * Instruction prompt when system prompt is set:
36
+ * ```
37
+ * <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
38
+ * ```
39
+ *
40
+ * Instruction prompt template when there is no system prompt:
41
+ * ```
42
+ * <s>[INST] ${ instruction } [/INST] ${ response prefix }
43
+ * ```
44
+ *
45
+ * Chat prompt when system prompt is set:
46
+ * ```
47
+ * <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
48
+ * ```
49
+ *
50
+ * Chat prompt when there is no system prompt:
51
+ * ```
52
+ * <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
53
+ * ```
54
+ *
55
+ * @see https://docs.mistral.ai/models/#chat-template
56
+ */
25
57
  export const Mistral = asLlamaCppTextPromptTemplateProvider(mistralPrompt);
26
58
  export const ChatML = asLlamaCppTextPromptTemplateProvider(chatMlPrompt);
27
59
  export const Llama2 = asLlamaCppTextPromptTemplateProvider(llama2Prompt);
@@ -9,12 +9,14 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
9
9
  const parseJSON_js_1 = require("../../core/schema/parseJSON.cjs");
10
10
  const validateTypes_js_1 = require("../../core/schema/validateTypes.cjs");
11
11
  const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
12
+ const StructureFromTextStreamingModel_js_1 = require("../../model-function/generate-structure/StructureFromTextStreamingModel.cjs");
12
13
  const PromptTemplateTextStreamingModel_js_1 = require("../../model-function/generate-text/PromptTemplateTextStreamingModel.cjs");
13
14
  const TextGenerationModel_js_1 = require("../../model-function/generate-text/TextGenerationModel.cjs");
14
15
  const TextGenerationToolCallModel_js_1 = require("../../tool/generate-tool-call/TextGenerationToolCallModel.cjs");
15
16
  const TextGenerationToolCallsModel_js_1 = require("../../tool/generate-tool-calls/TextGenerationToolCallsModel.cjs");
16
17
  const createJsonStreamResponseHandler_js_1 = require("../../util/streaming/createJsonStreamResponseHandler.cjs");
17
18
  const OllamaApiConfiguration_js_1 = require("./OllamaApiConfiguration.cjs");
19
+ const OllamaCompletionPrompt_js_1 = require("./OllamaCompletionPrompt.cjs");
18
20
  const OllamaError_js_1 = require("./OllamaError.cjs");
19
21
  class OllamaCompletionModel extends AbstractModel_js_1.AbstractModel {
20
22
  constructor(settings) {
@@ -151,6 +153,17 @@ class OllamaCompletionModel extends AbstractModel_js_1.AbstractModel {
151
153
  const chunk = delta;
152
154
  return chunk.done === true ? undefined : chunk.response;
153
155
  }
156
+ asStructureGenerationModel(promptTemplate) {
157
+ return "adaptModel" in promptTemplate
158
+ ? new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
159
+ model: promptTemplate.adaptModel(this),
160
+ template: promptTemplate,
161
+ })
162
+ : new StructureFromTextStreamingModel_js_1.StructureFromTextStreamingModel({
163
+ model: this,
164
+ template: promptTemplate,
165
+ });
166
+ }
154
167
  asToolCallGenerationModel(promptTemplate) {
155
168
  return new TextGenerationToolCallModel_js_1.TextGenerationToolCallModel({
156
169
  model: this,
@@ -163,30 +176,20 @@ class OllamaCompletionModel extends AbstractModel_js_1.AbstractModel {
163
176
  template: promptTemplate,
164
177
  });
165
178
  }
179
+ get promptTemplateProvider() {
180
+ return this.settings.promptTemplate ?? OllamaCompletionPrompt_js_1.Text;
181
+ }
166
182
  withJsonOutput() {
167
183
  return this;
168
184
  }
169
185
  withTextPrompt() {
170
- return this.withPromptTemplate({
171
- format(prompt) {
172
- return { prompt };
173
- },
174
- stopSequences: [],
175
- });
186
+ return this.withPromptTemplate(this.promptTemplateProvider.text());
176
187
  }
177
- /**
178
- * Maps the prompt for a text version of the Ollama completion prompt template (without image support).
179
- */
180
- withTextPromptTemplate(promptTemplate) {
181
- return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
182
- model: this.withTextPrompt().withSettings({
183
- stopSequences: [
184
- ...(this.settings.stopSequences ?? []),
185
- ...promptTemplate.stopSequences,
186
- ],
187
- }),
188
- promptTemplate,
189
- });
188
+ withInstructionPrompt() {
189
+ return this.withPromptTemplate(this.promptTemplateProvider.instruction());
190
+ }
191
+ withChatPrompt() {
192
+ return this.withPromptTemplate(this.promptTemplateProvider.chat());
190
193
  }
191
194
  withPromptTemplate(promptTemplate) {
192
195
  return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
@@ -3,9 +3,14 @@ import { FunctionCallOptions } from "../../core/FunctionOptions.js";
3
3
  import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
4
4
  import { ResponseHandler } from "../../core/api/postToApi.js";
5
5
  import { AbstractModel } from "../../model-function/AbstractModel.js";
6
+ import { FlexibleStructureFromTextPromptTemplate, StructureFromTextPromptTemplate } from "../../model-function/generate-structure/StructureFromTextPromptTemplate.js";
7
+ import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
6
8
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
7
9
  import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
8
10
  import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
11
+ import { ChatPrompt } from "../../model-function/generate-text/prompt-template/ChatPrompt.js";
12
+ import { InstructionPrompt } from "../../model-function/generate-text/prompt-template/InstructionPrompt.js";
13
+ import { TextGenerationPromptTemplateProvider } from "../../model-function/generate-text/prompt-template/PromptTemplateProvider.js";
9
14
  import { TextGenerationToolCallModel, ToolCallPromptTemplate } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
10
15
  import { TextGenerationToolCallsModel } from "../../tool/generate-tool-calls/TextGenerationToolCallsModel.js";
11
16
  import { ToolCallsPromptTemplate } from "../../tool/generate-tool-calls/ToolCallsPromptTemplate.js";
@@ -39,6 +44,10 @@ export interface OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE extends numbe
39
44
  raw?: boolean;
40
45
  system?: string;
41
46
  context?: number[];
47
+ /**
48
+ * Prompt template provider that is used when calling `.withTextPrompt()`, `withInstructionPrompt()` or `withChatPrompt()`.
49
+ */
50
+ promptTemplate?: TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
42
51
  }
43
52
  export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>> {
44
53
  constructor(settings: OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>);
@@ -128,14 +137,14 @@ export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number |
128
137
  context?: number[] | undefined;
129
138
  }>>>;
130
139
  extractTextDelta(delta: unknown): string | undefined;
140
+ asStructureGenerationModel<INPUT_PROMPT, OllamaCompletionPrompt>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, OllamaCompletionPrompt> | FlexibleStructureFromTextPromptTemplate<INPUT_PROMPT, unknown>): StructureFromTextStreamingModel<INPUT_PROMPT, unknown, TextStreamingModel<unknown, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>> | StructureFromTextStreamingModel<INPUT_PROMPT, OllamaCompletionPrompt, TextStreamingModel<OllamaCompletionPrompt, import("../../model-function/generate-text/TextGenerationModel.js").TextGenerationModelSettings>>;
131
141
  asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaCompletionPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaCompletionPrompt, this>;
132
142
  asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsPromptTemplate<INPUT_PROMPT, OllamaCompletionPrompt>): TextGenerationToolCallsModel<INPUT_PROMPT, OllamaCompletionPrompt, this>;
143
+ private get promptTemplateProvider();
133
144
  withJsonOutput(): this;
134
145
  withTextPrompt(): PromptTemplateTextStreamingModel<string, OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
135
- /**
136
- * Maps the prompt for a text version of the Ollama completion prompt template (without image support).
137
- */
138
- withTextPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, string>): PromptTemplateTextStreamingModel<INPUT_PROMPT, string, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>, PromptTemplateTextStreamingModel<string, OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>>;
146
+ withInstructionPrompt(): PromptTemplateTextStreamingModel<InstructionPrompt, OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
147
+ withChatPrompt(): PromptTemplateTextStreamingModel<ChatPrompt, OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
139
148
  withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OllamaCompletionPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
140
149
  withSettings(additionalSettings: Partial<OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>>): this;
141
150
  }
@@ -6,12 +6,14 @@ import { zodSchema } from "../../core/schema/ZodSchema.js";
6
6
  import { safeParseJSON } from "../../core/schema/parseJSON.js";
7
7
  import { validateTypes } from "../../core/schema/validateTypes.js";
8
8
  import { AbstractModel } from "../../model-function/AbstractModel.js";
9
+ import { StructureFromTextStreamingModel } from "../../model-function/generate-structure/StructureFromTextStreamingModel.js";
9
10
  import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
10
11
  import { textGenerationModelProperties, } from "../../model-function/generate-text/TextGenerationModel.js";
11
12
  import { TextGenerationToolCallModel, } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
12
13
  import { TextGenerationToolCallsModel } from "../../tool/generate-tool-calls/TextGenerationToolCallsModel.js";
13
14
  import { createJsonStreamResponseHandler } from "../../util/streaming/createJsonStreamResponseHandler.js";
14
15
  import { OllamaApiConfiguration } from "./OllamaApiConfiguration.js";
16
+ import { Text } from "./OllamaCompletionPrompt.js";
15
17
  import { failedOllamaCallResponseHandler } from "./OllamaError.js";
16
18
  export class OllamaCompletionModel extends AbstractModel {
17
19
  constructor(settings) {
@@ -148,6 +150,17 @@ export class OllamaCompletionModel extends AbstractModel {
148
150
  const chunk = delta;
149
151
  return chunk.done === true ? undefined : chunk.response;
150
152
  }
153
+ asStructureGenerationModel(promptTemplate) {
154
+ return "adaptModel" in promptTemplate
155
+ ? new StructureFromTextStreamingModel({
156
+ model: promptTemplate.adaptModel(this),
157
+ template: promptTemplate,
158
+ })
159
+ : new StructureFromTextStreamingModel({
160
+ model: this,
161
+ template: promptTemplate,
162
+ });
163
+ }
151
164
  asToolCallGenerationModel(promptTemplate) {
152
165
  return new TextGenerationToolCallModel({
153
166
  model: this,
@@ -160,30 +173,20 @@ export class OllamaCompletionModel extends AbstractModel {
160
173
  template: promptTemplate,
161
174
  });
162
175
  }
176
+ get promptTemplateProvider() {
177
+ return this.settings.promptTemplate ?? Text;
178
+ }
163
179
  withJsonOutput() {
164
180
  return this;
165
181
  }
166
182
  withTextPrompt() {
167
- return this.withPromptTemplate({
168
- format(prompt) {
169
- return { prompt };
170
- },
171
- stopSequences: [],
172
- });
183
+ return this.withPromptTemplate(this.promptTemplateProvider.text());
173
184
  }
174
- /**
175
- * Maps the prompt for a text version of the Ollama completion prompt template (without image support).
176
- */
177
- withTextPromptTemplate(promptTemplate) {
178
- return new PromptTemplateTextStreamingModel({
179
- model: this.withTextPrompt().withSettings({
180
- stopSequences: [
181
- ...(this.settings.stopSequences ?? []),
182
- ...promptTemplate.stopSequences,
183
- ],
184
- }),
185
- promptTemplate,
186
- });
185
+ withInstructionPrompt() {
186
+ return this.withPromptTemplate(this.promptTemplateProvider.instruction());
187
+ }
188
+ withChatPrompt() {
189
+ return this.withPromptTemplate(this.promptTemplateProvider.chat());
187
190
  }
188
191
  withPromptTemplate(promptTemplate) {
189
192
  return new PromptTemplateTextStreamingModel({
@@ -1,27 +1,4 @@
1
1
  "use strict";
2
- var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
- if (k2 === undefined) k2 = k;
4
- var desc = Object.getOwnPropertyDescriptor(m, k);
5
- if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
- desc = { enumerable: true, get: function() { return m[k]; } };
7
- }
8
- Object.defineProperty(o, k2, desc);
9
- }) : (function(o, m, k, k2) {
10
- if (k2 === undefined) k2 = k;
11
- o[k2] = m[k];
12
- }));
13
- var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
- Object.defineProperty(o, "default", { enumerable: true, value: v });
15
- }) : function(o, v) {
16
- o["default"] = v;
17
- });
18
- var __importStar = (this && this.__importStar) || function (mod) {
19
- if (mod && mod.__esModule) return mod;
20
- var result = {};
21
- if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
22
- __setModuleDefault(result, mod);
23
- return result;
24
- };
25
2
  Object.defineProperty(exports, "__esModule", { value: true });
26
3
  const assert_1 = require("assert");
27
4
  const zod_1 = require("zod");
@@ -31,13 +8,13 @@ const ZodSchema_js_1 = require("../../core/schema/ZodSchema.cjs");
31
8
  const jsonStructurePrompt_js_1 = require("../../model-function/generate-structure/jsonStructurePrompt.cjs");
32
9
  const streamStructure_js_1 = require("../../model-function/generate-structure/streamStructure.cjs");
33
10
  const generateText_js_1 = require("../../model-function/generate-text/generateText.cjs");
34
- const TextPrompt = __importStar(require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs"));
35
11
  const streamText_js_1 = require("../../model-function/generate-text/streamText.cjs");
36
12
  const JsonTestServer_js_1 = require("../../test/JsonTestServer.cjs");
37
13
  const StreamingTestServer_js_1 = require("../../test/StreamingTestServer.cjs");
38
14
  const arrayFromAsync_js_1 = require("../../test/arrayFromAsync.cjs");
39
15
  const OllamaApiConfiguration_js_1 = require("./OllamaApiConfiguration.cjs");
40
16
  const OllamaCompletionModel_js_1 = require("./OllamaCompletionModel.cjs");
17
+ const OllamaCompletionPrompt_js_1 = require("./OllamaCompletionPrompt.cjs");
41
18
  describe("generateText", () => {
42
19
  const server = new JsonTestServer_js_1.JsonTestServer("http://127.0.0.1:11434/api/generate");
43
20
  server.setupTestEnvironment();
@@ -128,11 +105,10 @@ describe("streamStructure", () => {
128
105
  ];
129
106
  const stream = await (0, streamStructure_js_1.streamStructure)(new OllamaCompletionModel_js_1.OllamaCompletionModel({
130
107
  model: "mistral:text",
108
+ promptTemplate: OllamaCompletionPrompt_js_1.Text,
131
109
  format: "json",
132
110
  raw: true,
133
- })
134
- .withTextPromptTemplate(TextPrompt.instruction())
135
- .asStructureGenerationModel(jsonStructurePrompt_js_1.jsonStructurePrompt.text()), (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })), "generate a name");
111
+ }).asStructureGenerationModel(jsonStructurePrompt_js_1.jsonStructurePrompt.text()), (0, ZodSchema_js_1.zodSchema)(zod_1.z.object({ name: zod_1.z.string() })), "generate a name");
136
112
  // note: space moved to last chunk bc of trimming
137
113
  expect(await (0, arrayFromAsync_js_1.arrayFromAsync)(stream)).toStrictEqual([
138
114
  { isComplete: false, value: {} },
@@ -6,13 +6,13 @@ import { zodSchema } from "../../core/schema/ZodSchema.js";
6
6
  import { jsonStructurePrompt } from "../../model-function/generate-structure/jsonStructurePrompt.js";
7
7
  import { streamStructure } from "../../model-function/generate-structure/streamStructure.js";
8
8
  import { generateText } from "../../model-function/generate-text/generateText.js";
9
- import * as TextPrompt from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
10
9
  import { streamText } from "../../model-function/generate-text/streamText.js";
11
10
  import { JsonTestServer } from "../../test/JsonTestServer.js";
12
11
  import { StreamingTestServer } from "../../test/StreamingTestServer.js";
13
12
  import { arrayFromAsync } from "../../test/arrayFromAsync.js";
14
13
  import { OllamaApiConfiguration } from "./OllamaApiConfiguration.js";
15
14
  import { OllamaCompletionModel } from "./OllamaCompletionModel.js";
15
+ import { Text } from "./OllamaCompletionPrompt.js";
16
16
  describe("generateText", () => {
17
17
  const server = new JsonTestServer("http://127.0.0.1:11434/api/generate");
18
18
  server.setupTestEnvironment();
@@ -103,11 +103,10 @@ describe("streamStructure", () => {
103
103
  ];
104
104
  const stream = await streamStructure(new OllamaCompletionModel({
105
105
  model: "mistral:text",
106
+ promptTemplate: Text,
106
107
  format: "json",
107
108
  raw: true,
108
- })
109
- .withTextPromptTemplate(TextPrompt.instruction())
110
- .asStructureGenerationModel(jsonStructurePrompt.text()), zodSchema(z.object({ name: z.string() })), "generate a name");
109
+ }).asStructureGenerationModel(jsonStructurePrompt.text()), zodSchema(z.object({ name: z.string() })), "generate a name");
111
110
  // note: space moved to last chunk bc of trimming
112
111
  expect(await arrayFromAsync(stream)).toStrictEqual([
113
112
  { isComplete: false, value: {} },
@@ -0,0 +1,89 @@
1
+ "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || function (mod) {
19
+ if (mod && mod.__esModule) return mod;
20
+ var result = {};
21
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
22
+ __setModuleDefault(result, mod);
23
+ return result;
24
+ };
25
+ Object.defineProperty(exports, "__esModule", { value: true });
26
+ exports.Vicuna = exports.Alpaca = exports.NeuralChat = exports.Llama2 = exports.ChatML = exports.Mistral = exports.Text = exports.asOllamaCompletionTextPromptTemplateProvider = exports.asOllamaCompletionPromptTemplate = void 0;
27
+ const alpacaPrompt = __importStar(require("../../model-function/generate-text/prompt-template/AlpacaPromptTemplate.cjs"));
28
+ const chatMlPrompt = __importStar(require("../../model-function/generate-text/prompt-template/ChatMLPromptTemplate.cjs"));
29
+ const llama2Prompt = __importStar(require("../../model-function/generate-text/prompt-template/Llama2PromptTemplate.cjs"));
30
+ const mistralPrompt = __importStar(require("../../model-function/generate-text/prompt-template/MistralInstructPromptTemplate.cjs"));
31
+ const neuralChatPrompt = __importStar(require("../../model-function/generate-text/prompt-template/NeuralChatPromptTemplate.cjs"));
32
+ const textPrompt = __importStar(require("../../model-function/generate-text/prompt-template/TextPromptTemplate.cjs"));
33
+ const vicunaPrompt = __importStar(require("../../model-function/generate-text/prompt-template/VicunaPromptTemplate.cjs"));
34
+ function asOllamaCompletionPromptTemplate(promptTemplate) {
35
+ return {
36
+ format: (prompt) => ({
37
+ prompt: promptTemplate.format(prompt),
38
+ }),
39
+ stopSequences: promptTemplate.stopSequences,
40
+ };
41
+ }
42
+ exports.asOllamaCompletionPromptTemplate = asOllamaCompletionPromptTemplate;
43
+ function asOllamaCompletionTextPromptTemplateProvider(promptTemplateProvider) {
44
+ return {
45
+ text: () => asOllamaCompletionPromptTemplate(promptTemplateProvider.text()),
46
+ instruction: () => asOllamaCompletionPromptTemplate(promptTemplateProvider.instruction()),
47
+ chat: () => asOllamaCompletionPromptTemplate(promptTemplateProvider.chat()),
48
+ };
49
+ }
50
+ exports.asOllamaCompletionTextPromptTemplateProvider = asOllamaCompletionTextPromptTemplateProvider;
51
+ exports.Text = asOllamaCompletionTextPromptTemplateProvider(textPrompt);
52
+ /**
53
+ * Formats text, instruction or chat prompts as a Mistral instruct prompt.
54
+ *
55
+ * Note that Mistral does not support system prompts. We emulate them.
56
+ *
57
+ * Text prompt:
58
+ * ```
59
+ * <s>[INST] { instruction } [/INST]
60
+ * ```
61
+ *
62
+ * Instruction prompt when system prompt is set:
63
+ * ```
64
+ * <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
65
+ * ```
66
+ *
67
+ * Instruction prompt template when there is no system prompt:
68
+ * ```
69
+ * <s>[INST] ${ instruction } [/INST] ${ response prefix }
70
+ * ```
71
+ *
72
+ * Chat prompt when system prompt is set:
73
+ * ```
74
+ * <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
75
+ * ```
76
+ *
77
+ * Chat prompt when there is no system prompt:
78
+ * ```
79
+ * <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
80
+ * ```
81
+ *
82
+ * @see https://docs.mistral.ai/models/#chat-template
83
+ */
84
+ exports.Mistral = asOllamaCompletionTextPromptTemplateProvider(mistralPrompt);
85
+ exports.ChatML = asOllamaCompletionTextPromptTemplateProvider(chatMlPrompt);
86
+ exports.Llama2 = asOllamaCompletionTextPromptTemplateProvider(llama2Prompt);
87
+ exports.NeuralChat = asOllamaCompletionTextPromptTemplateProvider(neuralChatPrompt);
88
+ exports.Alpaca = asOllamaCompletionTextPromptTemplateProvider(alpacaPrompt);
89
+ exports.Vicuna = asOllamaCompletionTextPromptTemplateProvider(vicunaPrompt);
@@ -0,0 +1,44 @@
1
+ import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
2
+ import { TextGenerationPromptTemplateProvider } from "../../model-function/generate-text/prompt-template/PromptTemplateProvider.js";
3
+ import { OllamaCompletionPrompt } from "./OllamaCompletionModel.js";
4
+ export declare function asOllamaCompletionPromptTemplate<SOURCE_PROMPT>(promptTemplate: TextGenerationPromptTemplate<SOURCE_PROMPT, string>): TextGenerationPromptTemplate<SOURCE_PROMPT, OllamaCompletionPrompt>;
5
+ export declare function asOllamaCompletionTextPromptTemplateProvider(promptTemplateProvider: TextGenerationPromptTemplateProvider<string>): TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
6
+ export declare const Text: TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
7
+ /**
8
+ * Formats text, instruction or chat prompts as a Mistral instruct prompt.
9
+ *
10
+ * Note that Mistral does not support system prompts. We emulate them.
11
+ *
12
+ * Text prompt:
13
+ * ```
14
+ * <s>[INST] { instruction } [/INST]
15
+ * ```
16
+ *
17
+ * Instruction prompt when system prompt is set:
18
+ * ```
19
+ * <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
20
+ * ```
21
+ *
22
+ * Instruction prompt template when there is no system prompt:
23
+ * ```
24
+ * <s>[INST] ${ instruction } [/INST] ${ response prefix }
25
+ * ```
26
+ *
27
+ * Chat prompt when system prompt is set:
28
+ * ```
29
+ * <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
30
+ * ```
31
+ *
32
+ * Chat prompt when there is no system prompt:
33
+ * ```
34
+ * <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
35
+ * ```
36
+ *
37
+ * @see https://docs.mistral.ai/models/#chat-template
38
+ */
39
+ export declare const Mistral: TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
40
+ export declare const ChatML: TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
41
+ export declare const Llama2: TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
42
+ export declare const NeuralChat: TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
43
+ export declare const Alpaca: TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
44
+ export declare const Vicuna: TextGenerationPromptTemplateProvider<OllamaCompletionPrompt>;
@@ -0,0 +1,61 @@
1
+ import * as alpacaPrompt from "../../model-function/generate-text/prompt-template/AlpacaPromptTemplate.js";
2
+ import * as chatMlPrompt from "../../model-function/generate-text/prompt-template/ChatMLPromptTemplate.js";
3
+ import * as llama2Prompt from "../../model-function/generate-text/prompt-template/Llama2PromptTemplate.js";
4
+ import * as mistralPrompt from "../../model-function/generate-text/prompt-template/MistralInstructPromptTemplate.js";
5
+ import * as neuralChatPrompt from "../../model-function/generate-text/prompt-template/NeuralChatPromptTemplate.js";
6
+ import * as textPrompt from "../../model-function/generate-text/prompt-template/TextPromptTemplate.js";
7
+ import * as vicunaPrompt from "../../model-function/generate-text/prompt-template/VicunaPromptTemplate.js";
8
+ export function asOllamaCompletionPromptTemplate(promptTemplate) {
9
+ return {
10
+ format: (prompt) => ({
11
+ prompt: promptTemplate.format(prompt),
12
+ }),
13
+ stopSequences: promptTemplate.stopSequences,
14
+ };
15
+ }
16
+ export function asOllamaCompletionTextPromptTemplateProvider(promptTemplateProvider) {
17
+ return {
18
+ text: () => asOllamaCompletionPromptTemplate(promptTemplateProvider.text()),
19
+ instruction: () => asOllamaCompletionPromptTemplate(promptTemplateProvider.instruction()),
20
+ chat: () => asOllamaCompletionPromptTemplate(promptTemplateProvider.chat()),
21
+ };
22
+ }
23
+ export const Text = asOllamaCompletionTextPromptTemplateProvider(textPrompt);
24
+ /**
25
+ * Formats text, instruction or chat prompts as a Mistral instruct prompt.
26
+ *
27
+ * Note that Mistral does not support system prompts. We emulate them.
28
+ *
29
+ * Text prompt:
30
+ * ```
31
+ * <s>[INST] { instruction } [/INST]
32
+ * ```
33
+ *
34
+ * Instruction prompt when system prompt is set:
35
+ * ```
36
+ * <s>[INST] ${ system prompt } [/INST] </s>[INST] ${instruction} [/INST] ${ response prefix }
37
+ * ```
38
+ *
39
+ * Instruction prompt template when there is no system prompt:
40
+ * ```
41
+ * <s>[INST] ${ instruction } [/INST] ${ response prefix }
42
+ * ```
43
+ *
44
+ * Chat prompt when system prompt is set:
45
+ * ```
46
+ * <s>[INST] ${ system prompt } [/INST] </s> [INST] ${ user msg 1 } [/INST] ${ model response 1 } [INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
47
+ * ```
48
+ *
49
+ * Chat prompt when there is no system prompt:
50
+ * ```
51
+ * <s>[INST] ${ user msg 1 } [/INST] ${ model response 1 } </s>[INST] ${ user msg 2 } [/INST] ${ model response 2 } [INST] ${ user msg 3 } [/INST]
52
+ * ```
53
+ *
54
+ * @see https://docs.mistral.ai/models/#chat-template
55
+ */
56
+ export const Mistral = asOllamaCompletionTextPromptTemplateProvider(mistralPrompt);
57
+ export const ChatML = asOllamaCompletionTextPromptTemplateProvider(chatMlPrompt);
58
+ export const Llama2 = asOllamaCompletionTextPromptTemplateProvider(llama2Prompt);
59
+ export const NeuralChat = asOllamaCompletionTextPromptTemplateProvider(neuralChatPrompt);
60
+ export const Alpaca = asOllamaCompletionTextPromptTemplateProvider(alpacaPrompt);
61
+ export const Vicuna = asOllamaCompletionTextPromptTemplateProvider(vicunaPrompt);
@@ -1,6 +1,29 @@
1
1
  "use strict";
2
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
3
+ if (k2 === undefined) k2 = k;
4
+ var desc = Object.getOwnPropertyDescriptor(m, k);
5
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
6
+ desc = { enumerable: true, get: function() { return m[k]; } };
7
+ }
8
+ Object.defineProperty(o, k2, desc);
9
+ }) : (function(o, m, k, k2) {
10
+ if (k2 === undefined) k2 = k;
11
+ o[k2] = m[k];
12
+ }));
13
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
14
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
15
+ }) : function(o, v) {
16
+ o["default"] = v;
17
+ });
18
+ var __importStar = (this && this.__importStar) || function (mod) {
19
+ if (mod && mod.__esModule) return mod;
20
+ var result = {};
21
+ if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
22
+ __setModuleDefault(result, mod);
23
+ return result;
24
+ };
2
25
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = exports.Api = void 0;
26
+ exports.prompt = exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = exports.Api = void 0;
4
27
  const OllamaApiConfiguration_js_1 = require("./OllamaApiConfiguration.cjs");
5
28
  const OllamaChatModel_js_1 = require("./OllamaChatModel.cjs");
6
29
  const OllamaCompletionModel_js_1 = require("./OllamaCompletionModel.cjs");
@@ -25,3 +48,4 @@ function TextEmbedder(settings) {
25
48
  return new OllamaTextEmbeddingModel_js_1.OllamaTextEmbeddingModel(settings);
26
49
  }
27
50
  exports.TextEmbedder = TextEmbedder;
51
+ exports.prompt = __importStar(require("./OllamaCompletionPrompt.cjs"));
@@ -12,3 +12,4 @@ export declare function CompletionTextGenerator<CONTEXT_WINDOW_SIZE extends numb
12
12
  export declare function ChatTextGenerator(settings: OllamaChatModelSettings): OllamaChatModel;
13
13
  export declare function TextEmbedder(settings: OllamaTextEmbeddingModelSettings): OllamaTextEmbeddingModel;
14
14
  export { OllamaChatMessage as ChatMessage, OllamaChatPrompt as ChatPrompt, } from "./OllamaChatModel.js";
15
+ export * as prompt from "./OllamaCompletionPrompt.js";
@@ -18,3 +18,4 @@ export function ChatTextGenerator(settings) {
18
18
  export function TextEmbedder(settings) {
19
19
  return new OllamaTextEmbeddingModel(settings);
20
20
  }
21
+ export * as prompt from "./OllamaCompletionPrompt.js";
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "The TypeScript library for building AI applications.",
4
- "version": "0.119.0",
4
+ "version": "0.120.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [