modelfusion 0.96.0 → 0.98.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +11 -4
- package/model-function/embed/embed.cjs +14 -2
- package/model-function/embed/embed.d.ts +6 -6
- package/model-function/embed/embed.js +14 -2
- package/model-function/generate-image/generateImage.cjs +10 -9
- package/model-function/generate-image/generateImage.d.ts +4 -6
- package/model-function/generate-image/generateImage.js +10 -9
- package/model-function/generate-speech/generateSpeech.cjs +7 -1
- package/model-function/generate-speech/generateSpeech.d.ts +3 -3
- package/model-function/generate-speech/generateSpeech.js +7 -1
- package/model-function/generate-speech/streamSpeech.cjs +6 -1
- package/model-function/generate-speech/streamSpeech.d.ts +3 -3
- package/model-function/generate-speech/streamSpeech.js +6 -1
- package/model-function/generate-structure/StructureFromTextGenerationModel.cjs +5 -5
- package/model-function/generate-structure/StructureFromTextGenerationModel.js +5 -5
- package/model-function/generate-structure/StructureFromTextStreamingModel.cjs +5 -5
- package/model-function/generate-structure/StructureFromTextStreamingModel.js +5 -5
- package/model-function/generate-structure/generateStructure.cjs +7 -1
- package/model-function/generate-structure/generateStructure.d.ts +3 -3
- package/model-function/generate-structure/generateStructure.js +7 -1
- package/model-function/generate-structure/streamStructure.cjs +6 -1
- package/model-function/generate-structure/streamStructure.d.ts +3 -3
- package/model-function/generate-structure/streamStructure.js +6 -1
- package/model-function/generate-text/generateText.cjs +7 -1
- package/model-function/generate-text/generateText.d.ts +3 -3
- package/model-function/generate-text/generateText.js +7 -1
- package/model-function/generate-text/streamText.cjs +6 -1
- package/model-function/generate-text/streamText.d.ts +3 -3
- package/model-function/generate-text/streamText.js +6 -1
- package/model-function/generate-transcription/generateTranscription.cjs +1 -1
- package/model-function/generate-transcription/generateTranscription.d.ts +2 -2
- package/model-function/generate-transcription/generateTranscription.js +1 -1
- package/model-provider/cohere/CohereTextEmbeddingModel.d.ts +3 -3
- package/model-provider/ollama/OllamaTextGenerationModel.cjs +60 -57
- package/model-provider/ollama/OllamaTextGenerationModel.d.ts +33 -22
- package/model-provider/ollama/OllamaTextGenerationModel.js +60 -57
- package/model-provider/ollama/OllamaTextGenerationModel.test.cjs +2 -2
- package/model-provider/ollama/OllamaTextGenerationModel.test.js +2 -2
- package/model-provider/openai/chat/AbstractOpenAIChatModel.cjs +1 -1
- package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +49 -0
- package/model-provider/openai/chat/AbstractOpenAIChatModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.test.cjs +61 -0
- package/model-provider/openai/chat/OpenAIChatModel.test.d.ts +1 -0
- package/model-provider/openai/chat/OpenAIChatModel.test.js +59 -0
- package/model-provider/openai/chat/OpenAIChatStreamIterable.cjs +8 -3
- package/model-provider/openai/chat/OpenAIChatStreamIterable.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatStreamIterable.js +8 -3
- package/package.json +1 -1
- package/tool/execute-tool/executeTool.cjs +1 -1
- package/tool/execute-tool/executeTool.d.ts +2 -2
- package/tool/execute-tool/executeTool.js +1 -1
- package/tool/generate-tool-call/TextGenerationToolCallModel.cjs +4 -4
- package/tool/generate-tool-call/TextGenerationToolCallModel.js +4 -4
- package/tool/generate-tool-call/generateToolCall.cjs +7 -1
- package/tool/generate-tool-call/generateToolCall.d.ts +3 -3
- package/tool/generate-tool-call/generateToolCall.js +7 -1
- package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.cjs +4 -4
- package/tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js +4 -4
- package/tool/generate-tool-calls-or-text/generateToolCallsOrText.cjs +1 -1
- package/tool/generate-tool-calls-or-text/generateToolCallsOrText.d.ts +2 -2
- package/tool/generate-tool-calls-or-text/generateToolCallsOrText.js +1 -1
- package/tool/use-tools-or-generate-text/useToolsOrGenerateText.cjs +1 -1
- package/tool/use-tools-or-generate-text/useToolsOrGenerateText.js +1 -1
@@ -74,6 +74,7 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
|
|
74
74
|
get settingsForEvent(): Partial<CohereTextEmbeddingModelSettings>;
|
75
75
|
doEmbedValues(texts: string[], options?: FunctionOptions): Promise<{
|
76
76
|
response: {
|
77
|
+
embeddings: number[][];
|
77
78
|
id: string;
|
78
79
|
meta: {
|
79
80
|
api_version: {
|
@@ -81,7 +82,6 @@ export declare class CohereTextEmbeddingModel extends AbstractModel<CohereTextEm
|
|
81
82
|
};
|
82
83
|
};
|
83
84
|
texts: string[];
|
84
|
-
embeddings: number[][];
|
85
85
|
};
|
86
86
|
embeddings: number[][];
|
87
87
|
}>;
|
@@ -109,6 +109,7 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
|
|
109
109
|
};
|
110
110
|
}>;
|
111
111
|
}, "strip", z.ZodTypeAny, {
|
112
|
+
embeddings: number[][];
|
112
113
|
id: string;
|
113
114
|
meta: {
|
114
115
|
api_version: {
|
@@ -116,8 +117,8 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
|
|
116
117
|
};
|
117
118
|
};
|
118
119
|
texts: string[];
|
119
|
-
embeddings: number[][];
|
120
120
|
}, {
|
121
|
+
embeddings: number[][];
|
121
122
|
id: string;
|
122
123
|
meta: {
|
123
124
|
api_version: {
|
@@ -125,7 +126,6 @@ declare const cohereTextEmbeddingResponseSchema: z.ZodObject<{
|
|
125
126
|
};
|
126
127
|
};
|
127
128
|
texts: string[];
|
128
|
-
embeddings: number[][];
|
129
129
|
}>;
|
130
130
|
export type CohereTextEmbeddingResponse = z.infer<typeof cohereTextEmbeddingResponseSchema>;
|
131
131
|
export {};
|
@@ -44,15 +44,47 @@ class OllamaTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
44
44
|
return this.settings.contextWindowSize;
|
45
45
|
}
|
46
46
|
async callAPI(prompt, options) {
|
47
|
+
const { responseFormat } = options;
|
48
|
+
const api = this.settings.api ?? new OllamaApiConfiguration_js_1.OllamaApiConfiguration();
|
49
|
+
const abortSignal = options.run?.abortSignal;
|
47
50
|
return (0, callWithRetryAndThrottle_js_1.callWithRetryAndThrottle)({
|
48
|
-
retry:
|
49
|
-
throttle:
|
50
|
-
call: async () =>
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
51
|
+
retry: api.retry,
|
52
|
+
throttle: api.throttle,
|
53
|
+
call: async () => (0, postToApi_js_1.postJsonToApi)({
|
54
|
+
url: api.assembleUrl(`/api/generate`),
|
55
|
+
headers: api.headers,
|
56
|
+
body: {
|
57
|
+
stream: responseFormat.stream,
|
58
|
+
model: this.settings.model,
|
59
|
+
prompt: prompt.prompt,
|
60
|
+
images: prompt.images,
|
61
|
+
format: this.settings.format,
|
62
|
+
options: {
|
63
|
+
mirostat: this.settings.mirostat,
|
64
|
+
mirostat_eta: this.settings.mirostatEta,
|
65
|
+
mirostat_tau: this.settings.mirostatTau,
|
66
|
+
num_ctx: this.settings.contextWindowSize,
|
67
|
+
num_gpu: this.settings.numGpu,
|
68
|
+
num_gqa: this.settings.numGqa,
|
69
|
+
num_predict: this.settings.maxCompletionTokens,
|
70
|
+
num_threads: this.settings.numThreads,
|
71
|
+
repeat_last_n: this.settings.repeatLastN,
|
72
|
+
repeat_penalty: this.settings.repeatPenalty,
|
73
|
+
seed: this.settings.seed,
|
74
|
+
stop: this.settings.stopSequences,
|
75
|
+
temperature: this.settings.temperature,
|
76
|
+
tfs_z: this.settings.tfsZ,
|
77
|
+
top_k: this.settings.topK,
|
78
|
+
top_p: this.settings.topP,
|
79
|
+
},
|
80
|
+
system: this.settings.system,
|
81
|
+
template: this.settings.template,
|
82
|
+
context: this.settings.context,
|
83
|
+
raw: this.settings.raw,
|
84
|
+
},
|
85
|
+
failedResponseHandler: OllamaError_js_1.failedOllamaCallResponseHandler,
|
86
|
+
successfulResponseHandler: responseFormat.handler,
|
87
|
+
abortSignal,
|
56
88
|
}),
|
57
89
|
});
|
58
90
|
}
|
@@ -63,17 +95,17 @@ class OllamaTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
63
95
|
"contextWindowSize",
|
64
96
|
"temperature",
|
65
97
|
"mirostat",
|
66
|
-
"
|
67
|
-
"
|
68
|
-
"
|
69
|
-
"
|
70
|
-
"
|
71
|
-
"
|
72
|
-
"
|
98
|
+
"mirostatEta",
|
99
|
+
"mirostatTau",
|
100
|
+
"numGqa",
|
101
|
+
"numGpu",
|
102
|
+
"numThreads",
|
103
|
+
"repeatLastN",
|
104
|
+
"repeatPenalty",
|
73
105
|
"seed",
|
74
|
-
"
|
75
|
-
"
|
76
|
-
"
|
106
|
+
"tfsZ",
|
107
|
+
"topK",
|
108
|
+
"topP",
|
77
109
|
"system",
|
78
110
|
"template",
|
79
111
|
"context",
|
@@ -110,6 +142,14 @@ class OllamaTextGenerationModel extends AbstractModel_js_1.AbstractModel {
|
|
110
142
|
template: promptTemplate,
|
111
143
|
});
|
112
144
|
}
|
145
|
+
withTextPrompt() {
|
146
|
+
return this.withPromptTemplate({
|
147
|
+
format(prompt) {
|
148
|
+
return { prompt: prompt };
|
149
|
+
},
|
150
|
+
stopSequences: [],
|
151
|
+
});
|
152
|
+
}
|
113
153
|
withPromptTemplate(promptTemplate) {
|
114
154
|
return new PromptTemplateTextStreamingModel_js_1.PromptTemplateTextStreamingModel({
|
115
155
|
model: this.withSettings({
|
@@ -131,7 +171,7 @@ const ollamaTextGenerationResponseSchema = zod_1.z.object({
|
|
131
171
|
model: zod_1.z.string(),
|
132
172
|
response: zod_1.z.string(),
|
133
173
|
total_duration: zod_1.z.number(),
|
134
|
-
load_duration: zod_1.z.number(),
|
174
|
+
load_duration: zod_1.z.number().optional(),
|
135
175
|
prompt_eval_count: zod_1.z.number(),
|
136
176
|
eval_count: zod_1.z.number(),
|
137
177
|
eval_duration: zod_1.z.number(),
|
@@ -149,7 +189,7 @@ const ollamaTextStreamingResponseSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.d
|
|
149
189
|
model: zod_1.z.string(),
|
150
190
|
created_at: zod_1.z.string(),
|
151
191
|
total_duration: zod_1.z.number(),
|
152
|
-
load_duration: zod_1.z.number(),
|
192
|
+
load_duration: zod_1.z.number().optional(),
|
153
193
|
sample_count: zod_1.z.number().optional(),
|
154
194
|
sample_duration: zod_1.z.number().optional(),
|
155
195
|
prompt_eval_count: zod_1.z.number(),
|
@@ -159,43 +199,6 @@ const ollamaTextStreamingResponseSchema = new ZodSchema_js_1.ZodSchema(zod_1.z.d
|
|
159
199
|
context: zod_1.z.array(zod_1.z.number()).optional(),
|
160
200
|
}),
|
161
201
|
]));
|
162
|
-
async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration_js_1.OllamaApiConfiguration(), abortSignal, responseFormat, prompt, model, format, contextWindowSize, maxCompletionTokens, mirostat, mirostat_eta, mirostat_tau, num_gpu, num_gqa, num_threads, repeat_last_n, repeat_penalty, seed, stopSequences, temperature, tfs_z, top_k, top_p, system, template, context, raw, }) {
|
163
|
-
return (0, postToApi_js_1.postJsonToApi)({
|
164
|
-
url: api.assembleUrl(`/api/generate`),
|
165
|
-
headers: api.headers,
|
166
|
-
body: {
|
167
|
-
stream: responseFormat.stream,
|
168
|
-
model,
|
169
|
-
prompt,
|
170
|
-
format,
|
171
|
-
options: {
|
172
|
-
mirostat,
|
173
|
-
mirostat_eta,
|
174
|
-
mirostat_tau,
|
175
|
-
num_ctx: contextWindowSize,
|
176
|
-
num_gpu,
|
177
|
-
num_gqa,
|
178
|
-
num_predict: maxCompletionTokens,
|
179
|
-
num_threads,
|
180
|
-
repeat_last_n,
|
181
|
-
repeat_penalty,
|
182
|
-
seed,
|
183
|
-
stop: stopSequences,
|
184
|
-
temperature,
|
185
|
-
tfs_z,
|
186
|
-
top_k,
|
187
|
-
top_p,
|
188
|
-
},
|
189
|
-
system,
|
190
|
-
template,
|
191
|
-
context,
|
192
|
-
raw,
|
193
|
-
},
|
194
|
-
failedResponseHandler: OllamaError_js_1.failedOllamaCallResponseHandler,
|
195
|
-
successfulResponseHandler: responseFormat.handler,
|
196
|
-
abortSignal,
|
197
|
-
});
|
198
|
-
}
|
199
202
|
async function createOllamaFullDeltaIterableQueue(stream) {
|
200
203
|
const queue = new AsyncQueue_js_1.AsyncQueue();
|
201
204
|
let accumulatedText = "";
|
@@ -40,39 +40,39 @@ export interface OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends n
|
|
40
40
|
* A lower learning rate will result in slower adjustments,
|
41
41
|
* while a higher learning rate will make the algorithm more responsive. (Default: 0.1)
|
42
42
|
*/
|
43
|
-
|
43
|
+
mirostatEta?: number;
|
44
44
|
/**
|
45
45
|
* Controls the balance between coherence and diversity of the output.
|
46
46
|
* A lower value will result in more focused and coherent text. (Default: 5.0)
|
47
47
|
*/
|
48
|
-
|
48
|
+
mirostatTau?: number;
|
49
49
|
/**
|
50
50
|
* The number of GQA groups in the transformer layer. Required for some models,
|
51
51
|
* for example it is 8 for llama2:70b
|
52
52
|
*/
|
53
|
-
|
53
|
+
numGqa?: number;
|
54
54
|
/**
|
55
55
|
* The number of layers to send to the GPU(s). On macOS it defaults to 1 to
|
56
56
|
* enable metal support, 0 to disable.
|
57
57
|
*/
|
58
|
-
|
58
|
+
numGpu?: number;
|
59
59
|
/**
|
60
60
|
* Sets the number of threads to use during computation. By default, Ollama will
|
61
61
|
* detect this for optimal performance. It is recommended to set this value to the
|
62
62
|
* number of physical CPU cores your system has (as opposed to the logical number of cores).
|
63
63
|
*/
|
64
|
-
|
64
|
+
numThreads?: number;
|
65
65
|
/**
|
66
66
|
* Sets how far back for the model to look back to prevent repetition.
|
67
67
|
* (Default: 64, 0 = disabled, -1 = num_ctx)
|
68
68
|
*/
|
69
|
-
|
69
|
+
repeatLastN?: number;
|
70
70
|
/**
|
71
71
|
* Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
|
72
72
|
* will penalize repetitions more strongly, while a lower value (e.g., 0.9)
|
73
73
|
* will be more lenient. (Default: 1.1)
|
74
74
|
*/
|
75
|
-
|
75
|
+
repeatPenalty?: number;
|
76
76
|
/**
|
77
77
|
* Sets the random number seed to use for generation. Setting this to a
|
78
78
|
* specific number will make the model generate the same text for the same prompt.
|
@@ -84,19 +84,19 @@ export interface OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends n
|
|
84
84
|
* from the output. A higher value (e.g., 2.0) will reduce the impact more,
|
85
85
|
* while a value of 1.0 disables this setting. (default: 1)
|
86
86
|
*/
|
87
|
-
|
87
|
+
tfsZ?: number;
|
88
88
|
/**
|
89
89
|
* Reduces the probability of generating nonsense. A higher value (e.g. 100)
|
90
90
|
* will give more diverse answers, while a lower value (e.g. 10) will be more
|
91
91
|
* conservative. (Default: 40)
|
92
92
|
*/
|
93
|
-
|
93
|
+
topK?: number;
|
94
94
|
/**
|
95
95
|
* Works together with top-k. A higher value (e.g., 0.95) will lead to more
|
96
96
|
* diverse text, while a lower value (e.g., 0.5) will generate more focused
|
97
97
|
* and conservative text. (Default: 0.9)
|
98
98
|
*/
|
99
|
-
|
99
|
+
topP?: number;
|
100
100
|
/**
|
101
101
|
* When set to true, no formatting will be applied to the prompt and no context
|
102
102
|
* will be returned.
|
@@ -111,35 +111,46 @@ export interface OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE extends n
|
|
111
111
|
template?: string;
|
112
112
|
context?: number[];
|
113
113
|
}
|
114
|
-
export
|
114
|
+
export interface OllamaTextGenerationPrompt {
|
115
|
+
/**
|
116
|
+
* Text prompt.
|
117
|
+
*/
|
118
|
+
prompt: string;
|
119
|
+
/**
|
120
|
+
Images. Supports base64-encoded `png` and `jpeg` images up to 100MB in size.
|
121
|
+
*/
|
122
|
+
images?: Record<number, string>;
|
123
|
+
}
|
124
|
+
export declare class OllamaTextGenerationModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<OllamaTextGenerationPrompt, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>> {
|
115
125
|
constructor(settings: OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>);
|
116
126
|
readonly provider = "ollama";
|
117
127
|
get modelName(): string;
|
118
128
|
readonly tokenizer: undefined;
|
119
129
|
readonly countPromptTokens: undefined;
|
120
130
|
get contextWindowSize(): CONTEXT_WINDOW_SIZE;
|
121
|
-
callAPI<RESPONSE>(prompt:
|
131
|
+
callAPI<RESPONSE>(prompt: OllamaTextGenerationPrompt, options: {
|
122
132
|
responseFormat: OllamaTextGenerationResponseFormatType<RESPONSE>;
|
123
133
|
} & FunctionOptions): Promise<RESPONSE>;
|
124
134
|
get settingsForEvent(): Partial<OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>;
|
125
|
-
doGenerateText(prompt:
|
135
|
+
doGenerateText(prompt: OllamaTextGenerationPrompt, options?: FunctionOptions): Promise<{
|
126
136
|
response: {
|
127
137
|
response: string;
|
128
138
|
model: string;
|
129
139
|
done: true;
|
130
140
|
total_duration: number;
|
131
|
-
load_duration: number;
|
132
141
|
prompt_eval_count: number;
|
133
142
|
eval_count: number;
|
134
143
|
eval_duration: number;
|
144
|
+
load_duration?: number | undefined;
|
135
145
|
context?: number[] | undefined;
|
136
146
|
};
|
137
147
|
text: string;
|
138
148
|
}>;
|
139
|
-
doStreamText(prompt:
|
140
|
-
asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT,
|
141
|
-
asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsOrGenerateTextPromptTemplate<INPUT_PROMPT,
|
142
|
-
|
149
|
+
doStreamText(prompt: OllamaTextGenerationPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
150
|
+
asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaTextGenerationPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaTextGenerationPrompt, this>;
|
151
|
+
asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsOrGenerateTextPromptTemplate<INPUT_PROMPT, OllamaTextGenerationPrompt>): TextGenerationToolCallsOrGenerateTextModel<INPUT_PROMPT, OllamaTextGenerationPrompt, this>;
|
152
|
+
withTextPrompt(): PromptTemplateTextStreamingModel<string, OllamaTextGenerationPrompt, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
153
|
+
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OllamaTextGenerationPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OllamaTextGenerationPrompt, OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
143
154
|
withSettings(additionalSettings: Partial<OllamaTextGenerationModelSettings<CONTEXT_WINDOW_SIZE>>): this;
|
144
155
|
}
|
145
156
|
declare const ollamaTextGenerationResponseSchema: z.ZodObject<{
|
@@ -147,7 +158,7 @@ declare const ollamaTextGenerationResponseSchema: z.ZodObject<{
|
|
147
158
|
model: z.ZodString;
|
148
159
|
response: z.ZodString;
|
149
160
|
total_duration: z.ZodNumber;
|
150
|
-
load_duration: z.ZodNumber
|
161
|
+
load_duration: z.ZodOptional<z.ZodNumber>;
|
151
162
|
prompt_eval_count: z.ZodNumber;
|
152
163
|
eval_count: z.ZodNumber;
|
153
164
|
eval_duration: z.ZodNumber;
|
@@ -157,20 +168,20 @@ declare const ollamaTextGenerationResponseSchema: z.ZodObject<{
|
|
157
168
|
model: string;
|
158
169
|
done: true;
|
159
170
|
total_duration: number;
|
160
|
-
load_duration: number;
|
161
171
|
prompt_eval_count: number;
|
162
172
|
eval_count: number;
|
163
173
|
eval_duration: number;
|
174
|
+
load_duration?: number | undefined;
|
164
175
|
context?: number[] | undefined;
|
165
176
|
}, {
|
166
177
|
response: string;
|
167
178
|
model: string;
|
168
179
|
done: true;
|
169
180
|
total_duration: number;
|
170
|
-
load_duration: number;
|
171
181
|
prompt_eval_count: number;
|
172
182
|
eval_count: number;
|
173
183
|
eval_duration: number;
|
184
|
+
load_duration?: number | undefined;
|
174
185
|
context?: number[] | undefined;
|
175
186
|
}>;
|
176
187
|
export type OllamaTextGenerationResponse = z.infer<typeof ollamaTextGenerationResponseSchema>;
|
@@ -198,10 +209,10 @@ export declare const OllamaTextGenerationResponseFormat: {
|
|
198
209
|
model: string;
|
199
210
|
done: true;
|
200
211
|
total_duration: number;
|
201
|
-
load_duration: number;
|
202
212
|
prompt_eval_count: number;
|
203
213
|
eval_count: number;
|
204
214
|
eval_duration: number;
|
215
|
+
load_duration?: number | undefined;
|
205
216
|
context?: number[] | undefined;
|
206
217
|
}>;
|
207
218
|
};
|
@@ -41,15 +41,47 @@ export class OllamaTextGenerationModel extends AbstractModel {
|
|
41
41
|
return this.settings.contextWindowSize;
|
42
42
|
}
|
43
43
|
async callAPI(prompt, options) {
|
44
|
+
const { responseFormat } = options;
|
45
|
+
const api = this.settings.api ?? new OllamaApiConfiguration();
|
46
|
+
const abortSignal = options.run?.abortSignal;
|
44
47
|
return callWithRetryAndThrottle({
|
45
|
-
retry:
|
46
|
-
throttle:
|
47
|
-
call: async () =>
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
48
|
+
retry: api.retry,
|
49
|
+
throttle: api.throttle,
|
50
|
+
call: async () => postJsonToApi({
|
51
|
+
url: api.assembleUrl(`/api/generate`),
|
52
|
+
headers: api.headers,
|
53
|
+
body: {
|
54
|
+
stream: responseFormat.stream,
|
55
|
+
model: this.settings.model,
|
56
|
+
prompt: prompt.prompt,
|
57
|
+
images: prompt.images,
|
58
|
+
format: this.settings.format,
|
59
|
+
options: {
|
60
|
+
mirostat: this.settings.mirostat,
|
61
|
+
mirostat_eta: this.settings.mirostatEta,
|
62
|
+
mirostat_tau: this.settings.mirostatTau,
|
63
|
+
num_ctx: this.settings.contextWindowSize,
|
64
|
+
num_gpu: this.settings.numGpu,
|
65
|
+
num_gqa: this.settings.numGqa,
|
66
|
+
num_predict: this.settings.maxCompletionTokens,
|
67
|
+
num_threads: this.settings.numThreads,
|
68
|
+
repeat_last_n: this.settings.repeatLastN,
|
69
|
+
repeat_penalty: this.settings.repeatPenalty,
|
70
|
+
seed: this.settings.seed,
|
71
|
+
stop: this.settings.stopSequences,
|
72
|
+
temperature: this.settings.temperature,
|
73
|
+
tfs_z: this.settings.tfsZ,
|
74
|
+
top_k: this.settings.topK,
|
75
|
+
top_p: this.settings.topP,
|
76
|
+
},
|
77
|
+
system: this.settings.system,
|
78
|
+
template: this.settings.template,
|
79
|
+
context: this.settings.context,
|
80
|
+
raw: this.settings.raw,
|
81
|
+
},
|
82
|
+
failedResponseHandler: failedOllamaCallResponseHandler,
|
83
|
+
successfulResponseHandler: responseFormat.handler,
|
84
|
+
abortSignal,
|
53
85
|
}),
|
54
86
|
});
|
55
87
|
}
|
@@ -60,17 +92,17 @@ export class OllamaTextGenerationModel extends AbstractModel {
|
|
60
92
|
"contextWindowSize",
|
61
93
|
"temperature",
|
62
94
|
"mirostat",
|
63
|
-
"
|
64
|
-
"
|
65
|
-
"
|
66
|
-
"
|
67
|
-
"
|
68
|
-
"
|
69
|
-
"
|
95
|
+
"mirostatEta",
|
96
|
+
"mirostatTau",
|
97
|
+
"numGqa",
|
98
|
+
"numGpu",
|
99
|
+
"numThreads",
|
100
|
+
"repeatLastN",
|
101
|
+
"repeatPenalty",
|
70
102
|
"seed",
|
71
|
-
"
|
72
|
-
"
|
73
|
-
"
|
103
|
+
"tfsZ",
|
104
|
+
"topK",
|
105
|
+
"topP",
|
74
106
|
"system",
|
75
107
|
"template",
|
76
108
|
"context",
|
@@ -107,6 +139,14 @@ export class OllamaTextGenerationModel extends AbstractModel {
|
|
107
139
|
template: promptTemplate,
|
108
140
|
});
|
109
141
|
}
|
142
|
+
withTextPrompt() {
|
143
|
+
return this.withPromptTemplate({
|
144
|
+
format(prompt) {
|
145
|
+
return { prompt: prompt };
|
146
|
+
},
|
147
|
+
stopSequences: [],
|
148
|
+
});
|
149
|
+
}
|
110
150
|
withPromptTemplate(promptTemplate) {
|
111
151
|
return new PromptTemplateTextStreamingModel({
|
112
152
|
model: this.withSettings({
|
@@ -127,7 +167,7 @@ const ollamaTextGenerationResponseSchema = z.object({
|
|
127
167
|
model: z.string(),
|
128
168
|
response: z.string(),
|
129
169
|
total_duration: z.number(),
|
130
|
-
load_duration: z.number(),
|
170
|
+
load_duration: z.number().optional(),
|
131
171
|
prompt_eval_count: z.number(),
|
132
172
|
eval_count: z.number(),
|
133
173
|
eval_duration: z.number(),
|
@@ -145,7 +185,7 @@ const ollamaTextStreamingResponseSchema = new ZodSchema(z.discriminatedUnion("do
|
|
145
185
|
model: z.string(),
|
146
186
|
created_at: z.string(),
|
147
187
|
total_duration: z.number(),
|
148
|
-
load_duration: z.number(),
|
188
|
+
load_duration: z.number().optional(),
|
149
189
|
sample_count: z.number().optional(),
|
150
190
|
sample_duration: z.number().optional(),
|
151
191
|
prompt_eval_count: z.number(),
|
@@ -155,43 +195,6 @@ const ollamaTextStreamingResponseSchema = new ZodSchema(z.discriminatedUnion("do
|
|
155
195
|
context: z.array(z.number()).optional(),
|
156
196
|
}),
|
157
197
|
]));
|
158
|
-
async function callOllamaTextGenerationAPI({ api = new OllamaApiConfiguration(), abortSignal, responseFormat, prompt, model, format, contextWindowSize, maxCompletionTokens, mirostat, mirostat_eta, mirostat_tau, num_gpu, num_gqa, num_threads, repeat_last_n, repeat_penalty, seed, stopSequences, temperature, tfs_z, top_k, top_p, system, template, context, raw, }) {
|
159
|
-
return postJsonToApi({
|
160
|
-
url: api.assembleUrl(`/api/generate`),
|
161
|
-
headers: api.headers,
|
162
|
-
body: {
|
163
|
-
stream: responseFormat.stream,
|
164
|
-
model,
|
165
|
-
prompt,
|
166
|
-
format,
|
167
|
-
options: {
|
168
|
-
mirostat,
|
169
|
-
mirostat_eta,
|
170
|
-
mirostat_tau,
|
171
|
-
num_ctx: contextWindowSize,
|
172
|
-
num_gpu,
|
173
|
-
num_gqa,
|
174
|
-
num_predict: maxCompletionTokens,
|
175
|
-
num_threads,
|
176
|
-
repeat_last_n,
|
177
|
-
repeat_penalty,
|
178
|
-
seed,
|
179
|
-
stop: stopSequences,
|
180
|
-
temperature,
|
181
|
-
tfs_z,
|
182
|
-
top_k,
|
183
|
-
top_p,
|
184
|
-
},
|
185
|
-
system,
|
186
|
-
template,
|
187
|
-
context,
|
188
|
-
raw,
|
189
|
-
},
|
190
|
-
failedResponseHandler: failedOllamaCallResponseHandler,
|
191
|
-
successfulResponseHandler: responseFormat.handler,
|
192
|
-
abortSignal,
|
193
|
-
});
|
194
|
-
}
|
195
198
|
async function createOllamaFullDeltaIterableQueue(stream) {
|
196
199
|
const queue = new AsyncQueue();
|
197
200
|
let accumulatedText = "";
|
@@ -36,7 +36,7 @@ describe("generateText", () => {
|
|
36
36
|
};
|
37
37
|
const result = await (0, generateText_js_1.generateText)(new OllamaTextGenerationModel_js_1.OllamaTextGenerationModel({
|
38
38
|
model: "test-model",
|
39
|
-
}), "test prompt");
|
39
|
+
}).withTextPrompt(), "test prompt");
|
40
40
|
expect(result).toEqual("test response");
|
41
41
|
});
|
42
42
|
it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
|
@@ -52,7 +52,7 @@ describe("generateText", () => {
|
|
52
52
|
retry: (0, retryNever_js_1.retryNever)(),
|
53
53
|
}),
|
54
54
|
model: "test-model",
|
55
|
-
}), "test prompt");
|
55
|
+
}).withTextPrompt(), "test prompt");
|
56
56
|
(0, assert_1.fail)("Should have thrown ApiCallError");
|
57
57
|
}
|
58
58
|
catch (expectedError) {
|
@@ -34,7 +34,7 @@ describe("generateText", () => {
|
|
34
34
|
};
|
35
35
|
const result = await generateText(new OllamaTextGenerationModel({
|
36
36
|
model: "test-model",
|
37
|
-
}), "test prompt");
|
37
|
+
}).withTextPrompt(), "test prompt");
|
38
38
|
expect(result).toEqual("test response");
|
39
39
|
});
|
40
40
|
it("should throw retryable ApiCallError when Ollama is overloaded", async () => {
|
@@ -50,7 +50,7 @@ describe("generateText", () => {
|
|
50
50
|
retry: retryNever(),
|
51
51
|
}),
|
52
52
|
model: "test-model",
|
53
|
-
}), "test prompt");
|
53
|
+
}).withTextPrompt(), "test prompt");
|
54
54
|
fail("Should have thrown ApiCallError");
|
55
55
|
}
|
56
56
|
catch (expectedError) {
|
@@ -219,7 +219,7 @@ exports.OpenAIChatResponseFormat = {
|
|
219
219
|
*/
|
220
220
|
textDeltaIterable: {
|
221
221
|
stream: true,
|
222
|
-
handler: async ({ response }) => (0, OpenAIChatStreamIterable_js_1.createOpenAIChatDeltaIterableQueue)(response.body, (delta) => delta[0]?.delta
|
222
|
+
handler: async ({ response }) => (0, OpenAIChatStreamIterable_js_1.createOpenAIChatDeltaIterableQueue)(response.body, (delta) => delta[0]?.delta?.content ?? ""),
|
223
223
|
},
|
224
224
|
structureDeltaIterable: {
|
225
225
|
stream: true,
|
@@ -32,16 +32,65 @@ export interface AbstractOpenAIChatCallSettings {
|
|
32
32
|
name: string;
|
33
33
|
};
|
34
34
|
};
|
35
|
+
/**
|
36
|
+
* An array of strings or a single string that the model will recognize as end-of-text indicators.
|
37
|
+
* The model stops generating more content when it encounters any of these strings.
|
38
|
+
* This is particularly useful in scripted or formatted text generation, where a specific end point is required.
|
39
|
+
* Example: stop: ['\n', 'END']
|
40
|
+
*/
|
35
41
|
stop?: string | string[];
|
42
|
+
/**
|
43
|
+
* Specifies the maximum number of tokens (words, punctuation, parts of words) that the model can generate in a single response.
|
44
|
+
* It helps to control the length of the output, this can help prevent wasted time and tokens when tweaker topP or temperature.
|
45
|
+
* Example: maxTokens: 1000
|
46
|
+
*/
|
36
47
|
maxTokens?: number;
|
48
|
+
/**
|
49
|
+
* `temperature`: Controls the randomness and creativity in the model's responses.
|
50
|
+
* A lower temperature (close to 0) results in more predictable, conservative text, while a higher temperature (close to 1) produces more varied and creative output.
|
51
|
+
* Adjust this to balance between consistency and creativity in the model's replies.
|
52
|
+
* Example: temperature: 0.5
|
53
|
+
*/
|
37
54
|
temperature?: number;
|
55
|
+
/**
|
56
|
+
* This parameter sets a threshold for token selection based on probability.
|
57
|
+
* The model will only consider the most likely tokens that cumulatively exceed this threshold while generating a response.
|
58
|
+
* It's a way to control the randomness of the output, balancing between diverse responses and sticking to more likely words.
|
59
|
+
* This means a topP of .1 will be far less random than one at .9
|
60
|
+
* Example: topP: 0.2
|
61
|
+
*/
|
38
62
|
topP?: number;
|
63
|
+
/**
|
64
|
+
* Used to set the initial state for the random number generator in the model.
|
65
|
+
* Providing a specific seed value ensures consistent outputs for the same inputs across different runs - useful for testing and reproducibility.
|
66
|
+
* A `null` value (or not setting it) results in varied, non-repeatable outputs each time.
|
67
|
+
* Example: seed: 89 (or) seed: null
|
68
|
+
*/
|
39
69
|
seed?: number | null;
|
40
70
|
responseFormat?: {
|
41
71
|
type?: "text" | "json_object";
|
42
72
|
};
|
73
|
+
/**
|
74
|
+
* Specifies the number of responses or completions the model should generate for a given prompt.
|
75
|
+
* This is useful when you need multiple different outputs or ideas for a single prompt.
|
76
|
+
* The model will generate 'n' distinct responses, each based on the same initial prompt.
|
77
|
+
* In a streaming model this will result in both responses streamed back in real time.
|
78
|
+
* Example: n: 3 // The model will produce 3 different responses.
|
79
|
+
*/
|
43
80
|
n?: number;
|
81
|
+
/**
|
82
|
+
* Discourages the model from repeating the same information or context already mentioned in the conversation or prompt.
|
83
|
+
* Increasing this value encourages the model to introduce new topics or ideas, rather than reiterating what has been said.
|
84
|
+
* This is useful for maintaining a diverse and engaging conversation or for brainstorming sessions where varied ideas are needed.
|
85
|
+
* Example: presencePenalty: 1.0 // Strongly discourages repeating the same content.
|
86
|
+
*/
|
44
87
|
presencePenalty?: number;
|
88
|
+
/**
|
89
|
+
* This parameter reduces the likelihood of the model repeatedly using the same words or phrases in its responses.
|
90
|
+
* A higher frequency penalty promotes a wider variety of language and expressions in the output.
|
91
|
+
* This is particularly useful in creative writing or content generation tasks where diversity in language is desirable.
|
92
|
+
* Example: frequencyPenalty: 0.5 // Moderately discourages repetitive language.
|
93
|
+
*/
|
45
94
|
frequencyPenalty?: number;
|
46
95
|
logitBias?: Record<number, number>;
|
47
96
|
}
|
@@ -215,7 +215,7 @@ export const OpenAIChatResponseFormat = {
|
|
215
215
|
*/
|
216
216
|
textDeltaIterable: {
|
217
217
|
stream: true,
|
218
|
-
handler: async ({ response }) => createOpenAIChatDeltaIterableQueue(response.body, (delta) => delta[0]?.delta
|
218
|
+
handler: async ({ response }) => createOpenAIChatDeltaIterableQueue(response.body, (delta) => delta[0]?.delta?.content ?? ""),
|
219
219
|
},
|
220
220
|
structureDeltaIterable: {
|
221
221
|
stream: true,
|