modelfusion 0.13.0 → 0.15.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. package/README.md +16 -10
  2. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.cjs +1 -1
  3. package/composed-function/summarize/summarizeRecursivelyWithTextGenerationAndTokenSplitting.js +1 -1
  4. package/model-function/Model.d.ts +2 -2
  5. package/model-function/generate-text/TextGenerationModel.d.ts +18 -18
  6. package/model-function/generate-text/generateText.cjs +2 -2
  7. package/model-function/generate-text/generateText.js +2 -2
  8. package/model-provider/cohere/CohereTextGenerationModel.cjs +19 -20
  9. package/model-provider/cohere/CohereTextGenerationModel.d.ts +4 -9
  10. package/model-provider/cohere/CohereTextGenerationModel.js +19 -20
  11. package/model-provider/huggingface/HuggingFaceTextGenerationModel.cjs +13 -18
  12. package/model-provider/huggingface/HuggingFaceTextGenerationModel.d.ts +4 -8
  13. package/model-provider/huggingface/HuggingFaceTextGenerationModel.js +13 -18
  14. package/model-provider/llamacpp/LlamaCppTextGenerationModel.cjs +16 -16
  15. package/model-provider/llamacpp/LlamaCppTextGenerationModel.d.ts +9 -14
  16. package/model-provider/llamacpp/LlamaCppTextGenerationModel.js +16 -16
  17. package/model-provider/openai/OpenAITextGenerationModel.cjs +20 -18
  18. package/model-provider/openai/OpenAITextGenerationModel.d.ts +4 -9
  19. package/model-provider/openai/OpenAITextGenerationModel.js +20 -18
  20. package/model-provider/openai/chat/OpenAIChatModel.cjs +15 -18
  21. package/model-provider/openai/chat/OpenAIChatModel.d.ts +5 -8
  22. package/model-provider/openai/chat/OpenAIChatModel.js +15 -18
  23. package/package.json +3 -3
  24. package/prompt/{AlpacaPromptMapping.cjs → AlpacaPromptFormat.cjs} +6 -6
  25. package/prompt/{AlpacaPromptMapping.d.ts → AlpacaPromptFormat.d.ts} +3 -3
  26. package/prompt/{AlpacaPromptMapping.js → AlpacaPromptFormat.js} +4 -4
  27. package/prompt/{Llama2PromptMapping.cjs → Llama2PromptFormat.cjs} +13 -10
  28. package/prompt/Llama2PromptFormat.d.ts +13 -0
  29. package/prompt/{Llama2PromptMapping.js → Llama2PromptFormat.js} +10 -7
  30. package/prompt/{OpenAIChatPromptMapping.cjs → OpenAIChatPromptFormat.cjs} +15 -9
  31. package/prompt/OpenAIChatPromptFormat.d.ts +12 -0
  32. package/prompt/{OpenAIChatPromptMapping.js → OpenAIChatPromptFormat.js} +12 -6
  33. package/prompt/PromptFormat.d.ts +14 -0
  34. package/prompt/{PromptMappingTextGenerationModel.js → PromptFormatTextGenerationModel.cjs} +19 -28
  35. package/prompt/{PromptMappingTextGenerationModel.d.ts → PromptFormatTextGenerationModel.d.ts} +6 -9
  36. package/prompt/{PromptMappingTextGenerationModel.cjs → PromptFormatTextGenerationModel.js} +15 -32
  37. package/prompt/{TextPromptMapping.cjs → TextPromptFormat.cjs} +13 -10
  38. package/prompt/TextPromptFormat.d.ts +17 -0
  39. package/prompt/{TextPromptMapping.js → TextPromptFormat.js} +10 -7
  40. package/prompt/{VicunaPromptMapping.cjs → VicunaPromptFormat.cjs} +6 -6
  41. package/prompt/{VicunaPromptMapping.d.ts → VicunaPromptFormat.d.ts} +3 -3
  42. package/prompt/{VicunaPromptMapping.js → VicunaPromptFormat.js} +4 -4
  43. package/prompt/chat/trimChatPrompt.cjs +2 -2
  44. package/prompt/chat/trimChatPrompt.d.ts +1 -1
  45. package/prompt/chat/trimChatPrompt.js +2 -2
  46. package/prompt/index.cjs +7 -7
  47. package/prompt/index.d.ts +7 -7
  48. package/prompt/index.js +7 -7
  49. package/tool/WebSearchTool.cjs +7 -28
  50. package/tool/WebSearchTool.d.ts +6 -67
  51. package/tool/WebSearchTool.js +7 -28
  52. package/tool/executeTool.cjs +1 -0
  53. package/tool/executeTool.d.ts +5 -4
  54. package/tool/executeTool.js +1 -0
  55. package/prompt/Llama2PromptMapping.d.ts +0 -10
  56. package/prompt/OpenAIChatPromptMapping.d.ts +0 -6
  57. package/prompt/PromptMapping.d.ts +0 -7
  58. package/prompt/TextPromptMapping.d.ts +0 -14
  59. /package/prompt/{PromptMapping.cjs → PromptFormat.cjs} +0 -0
  60. /package/prompt/{PromptMapping.js → PromptFormat.js} +0 -0
@@ -3,8 +3,8 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
3
3
  import { FunctionOptions } from "../../model-function/FunctionOptions.js";
4
4
  import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
5
5
  import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
6
- import { PromptMapping } from "../../prompt/PromptMapping.js";
7
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
6
+ import { PromptFormat } from "../../prompt/PromptFormat.js";
7
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
8
8
  import { RetryFunction } from "../../util/api/RetryFunction.js";
9
9
  import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
10
10
  import { ResponseHandler } from "../../util/api/postToApi.js";
@@ -25,9 +25,7 @@ export interface LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends
25
25
  temperature?: number;
26
26
  topK?: number;
27
27
  topP?: number;
28
- nPredict?: number;
29
28
  nKeep?: number;
30
- stop?: string[];
31
29
  tfsZ?: number;
32
30
  typicalP?: number;
33
31
  repeatPenalty?: number;
@@ -59,8 +57,8 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
59
57
  model: string;
60
58
  stream: boolean;
61
59
  seed: number;
62
- stop: string[];
63
60
  mirostat: number;
61
+ stop: string[];
64
62
  frequency_penalty: number;
65
63
  ignore_eos: boolean;
66
64
  logit_bias: number[];
@@ -102,11 +100,8 @@ export declare class LlamaCppTextGenerationModel<CONTEXT_WINDOW_SIZE extends num
102
100
  extractText(response: LlamaCppTextGenerationResponse): string;
103
101
  generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): Promise<AsyncIterable<DeltaEvent<LlamaCppTextGenerationDelta>>>;
104
102
  extractTextDelta(fullDelta: LlamaCppTextGenerationDelta): string | undefined;
105
- mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, string>): PromptMappingTextGenerationModel<INPUT_PROMPT, string, LlamaCppTextGenerationResponse, LlamaCppTextGenerationDelta, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
103
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, LlamaCppTextGenerationResponse, LlamaCppTextGenerationDelta, LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
106
104
  withSettings(additionalSettings: Partial<LlamaCppTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
107
- get maxCompletionTokens(): number | undefined;
108
- withMaxCompletionTokens(maxCompletionTokens: number): this;
109
- withStopTokens(stopTokens: string[]): this;
110
105
  }
111
106
  declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
112
107
  content: z.ZodString;
@@ -139,8 +134,8 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
139
134
  model: string;
140
135
  stream: boolean;
141
136
  seed: number;
142
- stop: string[];
143
137
  mirostat: number;
138
+ stop: string[];
144
139
  frequency_penalty: number;
145
140
  ignore_eos: boolean;
146
141
  logit_bias: number[];
@@ -163,8 +158,8 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
163
158
  model: string;
164
159
  stream: boolean;
165
160
  seed: number;
166
- stop: string[];
167
161
  mirostat: number;
162
+ stop: string[];
168
163
  frequency_penalty: number;
169
164
  ignore_eos: boolean;
170
165
  logit_bias: number[];
@@ -231,8 +226,8 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
231
226
  model: string;
232
227
  stream: boolean;
233
228
  seed: number;
234
- stop: string[];
235
229
  mirostat: number;
230
+ stop: string[];
236
231
  frequency_penalty: number;
237
232
  ignore_eos: boolean;
238
233
  logit_bias: number[];
@@ -279,8 +274,8 @@ declare const llamaCppTextGenerationResponseSchema: z.ZodObject<{
279
274
  model: string;
280
275
  stream: boolean;
281
276
  seed: number;
282
- stop: string[];
283
277
  mirostat: number;
278
+ stop: string[];
284
279
  frequency_penalty: number;
285
280
  ignore_eos: boolean;
286
281
  logit_bias: number[];
@@ -344,8 +339,8 @@ export declare const LlamaCppTextGenerationResponseFormat: {
344
339
  model: string;
345
340
  stream: boolean;
346
341
  seed: number;
347
- stop: string[];
348
342
  mirostat: number;
343
+ stop: string[];
349
344
  frequency_penalty: number;
350
345
  ignore_eos: boolean;
351
346
  logit_bias: number[];
@@ -3,7 +3,7 @@ import z from "zod";
3
3
  import { AbstractModel } from "../../model-function/AbstractModel.js";
4
4
  import { AsyncQueue } from "../../model-function/generate-text/AsyncQueue.js";
5
5
  import { parseEventSourceReadableStream } from "../../model-function/generate-text/parseEventSourceReadableStream.js";
6
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
6
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
7
7
  import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
8
8
  import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
9
9
  import { failedLlamaCppCallResponseHandler } from "./LlamaCppError.js";
@@ -37,11 +37,18 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
37
37
  }
38
38
  async callAPI(prompt, options) {
39
39
  const { run, settings, responseFormat } = options;
40
- const callSettings = Object.assign(this.settings, settings, {
40
+ const combinedSettings = {
41
+ ...this.settings,
42
+ ...settings,
43
+ };
44
+ const callSettings = {
45
+ ...combinedSettings,
46
+ nPredict: combinedSettings.maxCompletionTokens,
47
+ stop: combinedSettings.stopSequences,
41
48
  abortSignal: run?.abortSignal,
42
49
  prompt,
43
50
  responseFormat,
44
- });
51
+ };
45
52
  return callWithRetryAndThrottle({
46
53
  retry: this.settings.retry,
47
54
  throttle: this.settings.throttle,
@@ -70,24 +77,17 @@ export class LlamaCppTextGenerationModel extends AbstractModel {
70
77
  extractTextDelta(fullDelta) {
71
78
  return fullDelta.delta;
72
79
  }
73
- mapPrompt(promptMapping) {
74
- return new PromptMappingTextGenerationModel({
75
- model: this.withStopTokens(promptMapping.stopTokens),
76
- promptMapping,
80
+ withPromptFormat(promptFormat) {
81
+ return new PromptFormatTextGenerationModel({
82
+ model: this.withSettings({
83
+ stopSequences: promptFormat.stopSequences,
84
+ }),
85
+ promptFormat,
77
86
  });
78
87
  }
79
88
  withSettings(additionalSettings) {
80
89
  return new LlamaCppTextGenerationModel(Object.assign({}, this.settings, additionalSettings));
81
90
  }
82
- get maxCompletionTokens() {
83
- return this.settings.nPredict;
84
- }
85
- withMaxCompletionTokens(maxCompletionTokens) {
86
- return this.withSettings({ nPredict: maxCompletionTokens });
87
- }
88
- withStopTokens(stopTokens) {
89
- return this.withSettings({ stop: stopTokens });
90
- }
91
91
  }
92
92
  const llamaCppTextGenerationResponseSchema = z.object({
93
93
  content: z.string(),
@@ -10,7 +10,7 @@ const AbstractModel_js_1 = require("../../model-function/AbstractModel.cjs");
10
10
  const AsyncQueue_js_1 = require("../../model-function/generate-text/AsyncQueue.cjs");
11
11
  const parseEventSourceReadableStream_js_1 = require("../../model-function/generate-text/parseEventSourceReadableStream.cjs");
12
12
  const countTokens_js_1 = require("../../model-function/tokenize-text/countTokens.cjs");
13
- const PromptMappingTextGenerationModel_js_1 = require("../../prompt/PromptMappingTextGenerationModel.cjs");
13
+ const PromptFormatTextGenerationModel_js_1 = require("../../prompt/PromptFormatTextGenerationModel.cjs");
14
14
  const callWithRetryAndThrottle_js_1 = require("../../util/api/callWithRetryAndThrottle.cjs");
15
15
  const postToApi_js_1 = require("../../util/api/postToApi.cjs");
16
16
  const OpenAIError_js_1 = require("./OpenAIError.cjs");
@@ -75,7 +75,7 @@ exports.calculateOpenAITextGenerationCostInMillicents = calculateOpenAITextGener
75
75
  * const model = new OpenAITextGenerationModel({
76
76
  * model: "text-davinci-003",
77
77
  * temperature: 0.7,
78
- * maxTokens: 500,
78
+ * maxCompletionTokens: 500,
79
79
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
80
80
  * });
81
81
  *
@@ -124,14 +124,23 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
124
124
  }
125
125
  async callAPI(prompt, options) {
126
126
  const { run, settings, responseFormat } = options;
127
- const callSettings = Object.assign({
127
+ const combinedSettings = {
128
+ ...this.settings,
129
+ ...settings,
130
+ };
131
+ const callSettings = {
128
132
  apiKey: this.apiKey,
129
133
  user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
130
- }, this.settings, settings, {
134
+ // Copied settings:
135
+ ...combinedSettings,
136
+ // map to OpenAI API names:
137
+ stop: combinedSettings.stopSequences,
138
+ maxTokens: combinedSettings.maxCompletionTokens,
139
+ // other settings:
131
140
  abortSignal: run?.abortSignal,
132
141
  prompt,
133
142
  responseFormat,
134
- });
143
+ };
135
144
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
136
145
  retry: callSettings.retry,
137
146
  throttle: callSettings.throttle,
@@ -156,24 +165,17 @@ class OpenAITextGenerationModel extends AbstractModel_js_1.AbstractModel {
156
165
  extractTextDelta(fullDelta) {
157
166
  return fullDelta[0].delta;
158
167
  }
159
- mapPrompt(promptMapping) {
160
- return new PromptMappingTextGenerationModel_js_1.PromptMappingTextGenerationModel({
161
- model: this.withStopTokens(promptMapping.stopTokens),
162
- promptMapping,
168
+ withPromptFormat(promptFormat) {
169
+ return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
170
+ model: this.withSettings({
171
+ stopSequences: promptFormat.stopSequences,
172
+ }),
173
+ promptFormat,
163
174
  });
164
175
  }
165
176
  withSettings(additionalSettings) {
166
177
  return new OpenAITextGenerationModel(Object.assign({}, this.settings, additionalSettings));
167
178
  }
168
- get maxCompletionTokens() {
169
- return this.settings.maxTokens;
170
- }
171
- withMaxCompletionTokens(maxCompletionTokens) {
172
- return this.withSettings({ maxTokens: maxCompletionTokens });
173
- }
174
- withStopTokens(stopTokens) {
175
- return this.withSettings({ stop: stopTokens });
176
- }
177
179
  }
178
180
  exports.OpenAITextGenerationModel = OpenAITextGenerationModel;
179
181
  const openAITextGenerationResponseSchema = zod_1.default.object({
@@ -3,8 +3,8 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
3
3
  import { FunctionOptions } from "../../model-function/FunctionOptions.js";
4
4
  import { DeltaEvent } from "../../model-function/generate-text/DeltaEvent.js";
5
5
  import { TextGenerationModel, TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
6
- import { PromptMapping } from "../../prompt/PromptMapping.js";
7
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
6
+ import { PromptFormat } from "../../prompt/PromptFormat.js";
7
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
8
8
  import { RetryFunction } from "../../util/api/RetryFunction.js";
9
9
  import { ThrottleFunction } from "../../util/api/ThrottleFunction.js";
10
10
  import { ResponseHandler } from "../../util/api/postToApi.js";
@@ -72,13 +72,11 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
72
72
  throttle?: ThrottleFunction;
73
73
  isUserIdForwardingEnabled?: boolean;
74
74
  suffix?: string;
75
- maxTokens?: number;
76
75
  temperature?: number;
77
76
  topP?: number;
78
77
  n?: number;
79
78
  logprobs?: number;
80
79
  echo?: boolean;
81
- stop?: string | string[];
82
80
  presencePenalty?: number;
83
81
  frequencyPenalty?: number;
84
82
  bestOf?: number;
@@ -92,7 +90,7 @@ export interface OpenAITextGenerationModelSettings extends TextGenerationModelSe
92
90
  * const model = new OpenAITextGenerationModel({
93
91
  * model: "text-davinci-003",
94
92
  * temperature: 0.7,
95
- * maxTokens: 500,
93
+ * maxCompletionTokens: 500,
96
94
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
97
95
  * });
98
96
  *
@@ -134,11 +132,8 @@ export declare class OpenAITextGenerationModel extends AbstractModel<OpenAITextG
134
132
  extractText(response: OpenAITextGenerationResponse): string;
135
133
  generateDeltaStreamResponse(prompt: string, options?: FunctionOptions<OpenAITextGenerationModelSettings>): Promise<AsyncIterable<DeltaEvent<OpenAITextGenerationDelta>>>;
136
134
  extractTextDelta(fullDelta: OpenAITextGenerationDelta): string | undefined;
137
- mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, string>): PromptMappingTextGenerationModel<INPUT_PROMPT, string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings, this>;
135
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, string>): PromptFormatTextGenerationModel<INPUT_PROMPT, string, OpenAITextGenerationResponse, OpenAITextGenerationDelta, OpenAITextGenerationModelSettings, this>;
138
136
  withSettings(additionalSettings: Partial<OpenAITextGenerationModelSettings>): this;
139
- get maxCompletionTokens(): number | undefined;
140
- withMaxCompletionTokens(maxCompletionTokens: number): this;
141
- withStopTokens(stopTokens: string[]): this;
142
137
  }
143
138
  declare const openAITextGenerationResponseSchema: z.ZodObject<{
144
139
  id: z.ZodString;
@@ -4,7 +4,7 @@ import { AbstractModel } from "../../model-function/AbstractModel.js";
4
4
  import { AsyncQueue } from "../../model-function/generate-text/AsyncQueue.js";
5
5
  import { parseEventSourceReadableStream } from "../../model-function/generate-text/parseEventSourceReadableStream.js";
6
6
  import { countTokens } from "../../model-function/tokenize-text/countTokens.js";
7
- import { PromptMappingTextGenerationModel } from "../../prompt/PromptMappingTextGenerationModel.js";
7
+ import { PromptFormatTextGenerationModel } from "../../prompt/PromptFormatTextGenerationModel.js";
8
8
  import { callWithRetryAndThrottle } from "../../util/api/callWithRetryAndThrottle.js";
9
9
  import { createJsonResponseHandler, postJsonToApi, } from "../../util/api/postToApi.js";
10
10
  import { failedOpenAICallResponseHandler } from "./OpenAIError.js";
@@ -67,7 +67,7 @@ export const calculateOpenAITextGenerationCostInMillicents = ({ model, response,
67
67
  * const model = new OpenAITextGenerationModel({
68
68
  * model: "text-davinci-003",
69
69
  * temperature: 0.7,
70
- * maxTokens: 500,
70
+ * maxCompletionTokens: 500,
71
71
  * retry: retryWithExponentialBackoff({ maxTries: 5 }),
72
72
  * });
73
73
  *
@@ -116,14 +116,23 @@ export class OpenAITextGenerationModel extends AbstractModel {
116
116
  }
117
117
  async callAPI(prompt, options) {
118
118
  const { run, settings, responseFormat } = options;
119
- const callSettings = Object.assign({
119
+ const combinedSettings = {
120
+ ...this.settings,
121
+ ...settings,
122
+ };
123
+ const callSettings = {
120
124
  apiKey: this.apiKey,
121
125
  user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
122
- }, this.settings, settings, {
126
+ // Copied settings:
127
+ ...combinedSettings,
128
+ // map to OpenAI API names:
129
+ stop: combinedSettings.stopSequences,
130
+ maxTokens: combinedSettings.maxCompletionTokens,
131
+ // other settings:
123
132
  abortSignal: run?.abortSignal,
124
133
  prompt,
125
134
  responseFormat,
126
- });
135
+ };
127
136
  return callWithRetryAndThrottle({
128
137
  retry: callSettings.retry,
129
138
  throttle: callSettings.throttle,
@@ -148,24 +157,17 @@ export class OpenAITextGenerationModel extends AbstractModel {
148
157
  extractTextDelta(fullDelta) {
149
158
  return fullDelta[0].delta;
150
159
  }
151
- mapPrompt(promptMapping) {
152
- return new PromptMappingTextGenerationModel({
153
- model: this.withStopTokens(promptMapping.stopTokens),
154
- promptMapping,
160
+ withPromptFormat(promptFormat) {
161
+ return new PromptFormatTextGenerationModel({
162
+ model: this.withSettings({
163
+ stopSequences: promptFormat.stopSequences,
164
+ }),
165
+ promptFormat,
155
166
  });
156
167
  }
157
168
  withSettings(additionalSettings) {
158
169
  return new OpenAITextGenerationModel(Object.assign({}, this.settings, additionalSettings));
159
170
  }
160
- get maxCompletionTokens() {
161
- return this.settings.maxTokens;
162
- }
163
- withMaxCompletionTokens(maxCompletionTokens) {
164
- return this.withSettings({ maxTokens: maxCompletionTokens });
165
- }
166
- withStopTokens(stopTokens) {
167
- return this.withSettings({ stop: stopTokens });
168
- }
169
171
  }
170
172
  const openAITextGenerationResponseSchema = z.object({
171
173
  id: z.string(),
@@ -7,7 +7,7 @@ exports.OpenAIChatResponseFormat = exports.OpenAIChatModel = exports.calculateOp
7
7
  const secure_json_parse_1 = __importDefault(require("secure-json-parse"));
8
8
  const zod_1 = __importDefault(require("zod"));
9
9
  const AbstractModel_js_1 = require("../../../model-function/AbstractModel.cjs");
10
- const PromptMappingTextGenerationModel_js_1 = require("../../../prompt/PromptMappingTextGenerationModel.cjs");
10
+ const PromptFormatTextGenerationModel_js_1 = require("../../../prompt/PromptFormatTextGenerationModel.cjs");
11
11
  const callWithRetryAndThrottle_js_1 = require("../../../util/api/callWithRetryAndThrottle.cjs");
12
12
  const postToApi_js_1 = require("../../../util/api/postToApi.cjs");
13
13
  const OpenAIError_js_1 = require("../OpenAIError.cjs");
@@ -93,7 +93,7 @@ exports.calculateOpenAIChatCostInMillicents = calculateOpenAIChatCostInMillicent
93
93
  * const model = new OpenAIChatModel({
94
94
  * model: "gpt-3.5-turbo",
95
95
  * temperature: 0.7,
96
- * maxTokens: 500,
96
+ * maxCompletionTokens: 500,
97
97
  * });
98
98
  *
99
99
  * const text = await generateText([
@@ -150,14 +150,20 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
150
150
  }
151
151
  async callAPI(messages, options) {
152
152
  const { run, settings, responseFormat } = options;
153
- const callSettings = Object.assign({
153
+ const combinedSettings = {
154
+ ...this.settings,
155
+ ...settings,
156
+ };
157
+ const callSettings = {
154
158
  apiKey: this.apiKey,
155
159
  user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
156
- }, this.settings, settings, {
160
+ ...combinedSettings,
161
+ stop: combinedSettings.stopSequences,
162
+ maxTokens: combinedSettings.maxCompletionTokens,
157
163
  abortSignal: run?.abortSignal,
158
164
  messages,
159
165
  responseFormat,
160
- });
166
+ };
161
167
  return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
162
168
  retry: callSettings.retry,
163
169
  throttle: callSettings.throttle,
@@ -205,24 +211,15 @@ class OpenAIChatModel extends AbstractModel_js_1.AbstractModel {
205
211
  const jsonText = response.choices[0].message.function_call.arguments;
206
212
  return secure_json_parse_1.default.parse(jsonText);
207
213
  }
208
- mapPrompt(promptMapping) {
209
- return new PromptMappingTextGenerationModel_js_1.PromptMappingTextGenerationModel({
210
- model: this.withStopTokens(promptMapping.stopTokens),
211
- promptMapping,
214
+ withPromptFormat(promptFormat) {
215
+ return new PromptFormatTextGenerationModel_js_1.PromptFormatTextGenerationModel({
216
+ model: this.withSettings({ stopSequences: promptFormat.stopSequences }),
217
+ promptFormat,
212
218
  });
213
219
  }
214
220
  withSettings(additionalSettings) {
215
221
  return new OpenAIChatModel(Object.assign({}, this.settings, additionalSettings));
216
222
  }
217
- get maxCompletionTokens() {
218
- return this.settings.maxTokens;
219
- }
220
- withMaxCompletionTokens(maxCompletionTokens) {
221
- return this.withSettings({ maxTokens: maxCompletionTokens });
222
- }
223
- withStopTokens(stopTokens) {
224
- return this.withSettings({ stop: stopTokens });
225
- }
226
223
  }
227
224
  exports.OpenAIChatModel = OpenAIChatModel;
228
225
  const openAIChatResponseSchema = zod_1.default.object({
@@ -5,8 +5,8 @@ import { GenerateJsonModel } from "../../../model-function/generate-json/Generat
5
5
  import { GenerateJsonOrTextModel } from "../../../model-function/generate-json/GenerateJsonOrTextModel.js";
6
6
  import { DeltaEvent } from "../../../model-function/generate-text/DeltaEvent.js";
7
7
  import { TextGenerationModel, TextGenerationModelSettings } from "../../../model-function/generate-text/TextGenerationModel.js";
8
- import { PromptMapping } from "../../../prompt/PromptMapping.js";
9
- import { PromptMappingTextGenerationModel } from "../../../prompt/PromptMappingTextGenerationModel.js";
8
+ import { PromptFormat } from "../../../prompt/PromptFormat.js";
9
+ import { PromptFormatTextGenerationModel } from "../../../prompt/PromptFormatTextGenerationModel.js";
10
10
  import { ResponseHandler } from "../../../util/api/postToApi.js";
11
11
  import { OpenAIModelSettings } from "../OpenAIModelSettings.js";
12
12
  import { TikTokenTokenizer } from "../TikTokenTokenizer.js";
@@ -95,7 +95,7 @@ export interface OpenAIChatCallSettings {
95
95
  presencePenalty?: number;
96
96
  frequencyPenalty?: number;
97
97
  }
98
- export interface OpenAIChatSettings extends TextGenerationModelSettings, OpenAIModelSettings, OpenAIChatCallSettings {
98
+ export interface OpenAIChatSettings extends TextGenerationModelSettings, OpenAIModelSettings, Omit<OpenAIChatCallSettings, "stop" | "maxTokens"> {
99
99
  isUserIdForwardingEnabled?: boolean;
100
100
  }
101
101
  /**
@@ -107,7 +107,7 @@ export interface OpenAIChatSettings extends TextGenerationModelSettings, OpenAIM
107
107
  * const model = new OpenAIChatModel({
108
108
  * model: "gpt-3.5-turbo",
109
109
  * temperature: 0.7,
110
- * maxTokens: 500,
110
+ * maxCompletionTokens: 500,
111
111
  * });
112
112
  *
113
113
  * const text = await generateText([
@@ -170,11 +170,8 @@ export declare class OpenAIChatModel extends AbstractModel<OpenAIChatSettings> i
170
170
  */
171
171
  generateJsonResponse(prompt: OpenAIChatSingleFunctionPrompt<unknown> | OpenAIChatAutoFunctionPrompt<Array<OpenAIFunctionDescription<unknown>>>, options?: FunctionOptions<OpenAIChatSettings> | undefined): PromiseLike<OpenAIChatResponse>;
172
172
  extractJson(response: OpenAIChatResponse): unknown;
173
- mapPrompt<INPUT_PROMPT>(promptMapping: PromptMapping<INPUT_PROMPT, OpenAIChatMessage[]>): PromptMappingTextGenerationModel<INPUT_PROMPT, OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings, this>;
173
+ withPromptFormat<INPUT_PROMPT>(promptFormat: PromptFormat<INPUT_PROMPT, OpenAIChatMessage[]>): PromptFormatTextGenerationModel<INPUT_PROMPT, OpenAIChatMessage[], OpenAIChatResponse, OpenAIChatDelta, OpenAIChatSettings, this>;
174
174
  withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
175
- get maxCompletionTokens(): number | undefined;
176
- withMaxCompletionTokens(maxCompletionTokens: number): this;
177
- withStopTokens(stopTokens: string[]): this;
178
175
  }
179
176
  declare const openAIChatResponseSchema: z.ZodObject<{
180
177
  id: z.ZodString;
@@ -1,7 +1,7 @@
1
1
  import SecureJSON from "secure-json-parse";
2
2
  import z from "zod";
3
3
  import { AbstractModel } from "../../../model-function/AbstractModel.js";
4
- import { PromptMappingTextGenerationModel } from "../../../prompt/PromptMappingTextGenerationModel.js";
4
+ import { PromptFormatTextGenerationModel } from "../../../prompt/PromptFormatTextGenerationModel.js";
5
5
  import { callWithRetryAndThrottle } from "../../../util/api/callWithRetryAndThrottle.js";
6
6
  import { createJsonResponseHandler, postJsonToApi, } from "../../../util/api/postToApi.js";
7
7
  import { failedOpenAICallResponseHandler } from "../OpenAIError.js";
@@ -85,7 +85,7 @@ export const calculateOpenAIChatCostInMillicents = ({ model, response, }) => res
85
85
  * const model = new OpenAIChatModel({
86
86
  * model: "gpt-3.5-turbo",
87
87
  * temperature: 0.7,
88
- * maxTokens: 500,
88
+ * maxCompletionTokens: 500,
89
89
  * });
90
90
  *
91
91
  * const text = await generateText([
@@ -142,14 +142,20 @@ export class OpenAIChatModel extends AbstractModel {
142
142
  }
143
143
  async callAPI(messages, options) {
144
144
  const { run, settings, responseFormat } = options;
145
- const callSettings = Object.assign({
145
+ const combinedSettings = {
146
+ ...this.settings,
147
+ ...settings,
148
+ };
149
+ const callSettings = {
146
150
  apiKey: this.apiKey,
147
151
  user: this.settings.isUserIdForwardingEnabled ? run?.userId : undefined,
148
- }, this.settings, settings, {
152
+ ...combinedSettings,
153
+ stop: combinedSettings.stopSequences,
154
+ maxTokens: combinedSettings.maxCompletionTokens,
149
155
  abortSignal: run?.abortSignal,
150
156
  messages,
151
157
  responseFormat,
152
- });
158
+ };
153
159
  return callWithRetryAndThrottle({
154
160
  retry: callSettings.retry,
155
161
  throttle: callSettings.throttle,
@@ -197,24 +203,15 @@ export class OpenAIChatModel extends AbstractModel {
197
203
  const jsonText = response.choices[0].message.function_call.arguments;
198
204
  return SecureJSON.parse(jsonText);
199
205
  }
200
- mapPrompt(promptMapping) {
201
- return new PromptMappingTextGenerationModel({
202
- model: this.withStopTokens(promptMapping.stopTokens),
203
- promptMapping,
206
+ withPromptFormat(promptFormat) {
207
+ return new PromptFormatTextGenerationModel({
208
+ model: this.withSettings({ stopSequences: promptFormat.stopSequences }),
209
+ promptFormat,
204
210
  });
205
211
  }
206
212
  withSettings(additionalSettings) {
207
213
  return new OpenAIChatModel(Object.assign({}, this.settings, additionalSettings));
208
214
  }
209
- get maxCompletionTokens() {
210
- return this.settings.maxTokens;
211
- }
212
- withMaxCompletionTokens(maxCompletionTokens) {
213
- return this.withSettings({ maxTokens: maxCompletionTokens });
214
- }
215
- withStopTokens(stopTokens) {
216
- return this.withSettings({ stop: stopTokens });
217
- }
218
215
  }
219
216
  const openAIChatResponseSchema = z.object({
220
217
  id: z.string(),
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "modelfusion",
3
3
  "description": "Build AI applications, chatbots, and agents with JavaScript and TypeScript.",
4
- "version": "0.13.0",
4
+ "version": "0.15.0",
5
5
  "author": "Lars Grammel",
6
6
  "license": "MIT",
7
7
  "keywords": [
@@ -67,11 +67,11 @@
67
67
  "eslint": "^8.45.0",
68
68
  "eslint-config-prettier": "9.0.0",
69
69
  "husky": "^8.0.3",
70
- "lint-staged": "14.0.0",
70
+ "lint-staged": "14.0.1",
71
71
  "prettier": "3.0.2",
72
72
  "rimraf": "5.0.1",
73
73
  "typescript": "5.1.6",
74
- "zod": "3.22.1",
74
+ "zod": "3.22.2",
75
75
  "zod-to-json-schema": "3.21.4"
76
76
  },
77
77
  "peerDependencies": {
@@ -1,19 +1,19 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.InstructionToAlpacaPromptMapping = void 0;
3
+ exports.AlpacaInstructionPromptFormat = void 0;
4
4
  const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
5
5
  const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
6
6
  /**
7
- * Maps an instruction prompt to the Alpaca prompt format.
7
+ * Formats an instruction prompt as an Alpaca prompt.
8
8
  *
9
9
  * If the instruction has a system prompt, it overrides the default system prompt
10
10
  * (which can impact the results, because the model may be trained on the default system prompt).
11
11
  *
12
12
  * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
13
13
  */
14
- const InstructionToAlpacaPromptMapping = () => ({
15
- stopTokens: [],
16
- map: (instruction) => {
14
+ const AlpacaInstructionPromptFormat = () => ({
15
+ stopSequences: [],
16
+ format: (instruction) => {
17
17
  let text = instruction.system ??
18
18
  (instruction.input != null
19
19
  ? DEFAULT_SYSTEM_PROMPT_INPUT
@@ -30,4 +30,4 @@ const InstructionToAlpacaPromptMapping = () => ({
30
30
  return text;
31
31
  },
32
32
  });
33
- exports.InstructionToAlpacaPromptMapping = InstructionToAlpacaPromptMapping;
33
+ exports.AlpacaInstructionPromptFormat = AlpacaInstructionPromptFormat;
@@ -1,11 +1,11 @@
1
1
  import { InstructionPrompt } from "./InstructionPrompt.js";
2
- import { PromptMapping } from "./PromptMapping.js";
2
+ import { PromptFormat } from "./PromptFormat.js";
3
3
  /**
4
- * Maps an instruction prompt to the Alpaca prompt format.
4
+ * Formats an instruction prompt as an Alpaca prompt.
5
5
  *
6
6
  * If the instruction has a system prompt, it overrides the default system prompt
7
7
  * (which can impact the results, because the model may be trained on the default system prompt).
8
8
  *
9
9
  * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
10
10
  */
11
- export declare const InstructionToAlpacaPromptMapping: () => PromptMapping<InstructionPrompt, string>;
11
+ export declare const AlpacaInstructionPromptFormat: () => PromptFormat<InstructionPrompt, string>;
@@ -1,16 +1,16 @@
1
1
  const DEFAULT_SYSTEM_PROMPT_INPUT = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.";
2
2
  const DEFAULT_SYSTEM_PROMPT_NO_INPUT = "Below is an instruction that describes a task. Write a response that appropriately completes the request.";
3
3
  /**
4
- * Maps an instruction prompt to the Alpaca prompt format.
4
+ * Formats an instruction prompt as an Alpaca prompt.
5
5
  *
6
6
  * If the instruction has a system prompt, it overrides the default system prompt
7
7
  * (which can impact the results, because the model may be trained on the default system prompt).
8
8
  *
9
9
  * @see https://github.com/tatsu-lab/stanford_alpaca#data-release
10
10
  */
11
- export const InstructionToAlpacaPromptMapping = () => ({
12
- stopTokens: [],
13
- map: (instruction) => {
11
+ export const AlpacaInstructionPromptFormat = () => ({
12
+ stopSequences: [],
13
+ format: (instruction) => {
14
14
  let text = instruction.system ??
15
15
  (instruction.input != null
16
16
  ? DEFAULT_SYSTEM_PROMPT_INPUT