modelfusion 0.99.0 → 0.100.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +9 -15
- package/guard/fixStructure.cjs +3 -3
- package/guard/fixStructure.d.ts +3 -3
- package/guard/fixStructure.js +3 -3
- package/model-function/generate-structure/generateStructure.d.ts +2 -2
- package/model-function/generate-structure/streamStructure.d.ts +1 -1
- package/model-provider/mistral/{MistralTextGenerationModel.cjs → MistralChatModel.cjs} +13 -13
- package/model-provider/mistral/{MistralTextGenerationModel.d.ts → MistralChatModel.d.ts} +21 -20
- package/model-provider/mistral/{MistralTextGenerationModel.js → MistralChatModel.js} +11 -11
- package/model-provider/mistral/MistralFacade.cjs +5 -5
- package/model-provider/mistral/MistralFacade.d.ts +3 -2
- package/model-provider/mistral/MistralFacade.js +3 -3
- package/model-provider/mistral/MistralPromptTemplate.d.ts +4 -4
- package/model-provider/mistral/index.cjs +1 -1
- package/model-provider/mistral/index.d.ts +1 -1
- package/model-provider/mistral/index.js +1 -1
- package/model-provider/ollama/OllamaApiConfiguration.d.ts +6 -5
- package/model-provider/ollama/OllamaChatModel.cjs +303 -0
- package/model-provider/ollama/OllamaChatModel.d.ts +171 -0
- package/model-provider/ollama/OllamaChatModel.js +299 -0
- package/model-provider/ollama/OllamaChatPromptTemplate.cjs +76 -0
- package/model-provider/ollama/OllamaChatPromptTemplate.d.ts +20 -0
- package/model-provider/ollama/OllamaChatPromptTemplate.js +69 -0
- package/model-provider/ollama/{OllamaTextGenerationModel.cjs → OllamaCompletionModel.cjs} +13 -11
- package/model-provider/ollama/OllamaCompletionModel.d.ts +159 -0
- package/model-provider/ollama/{OllamaTextGenerationModel.js → OllamaCompletionModel.js} +11 -9
- package/model-provider/ollama/{OllamaTextGenerationModel.test.cjs → OllamaCompletionModel.test.cjs} +3 -3
- package/model-provider/ollama/{OllamaTextGenerationModel.test.js → OllamaCompletionModel.test.js} +3 -3
- package/model-provider/ollama/OllamaFacade.cjs +15 -5
- package/model-provider/ollama/OllamaFacade.d.ts +7 -2
- package/model-provider/ollama/OllamaFacade.js +11 -3
- package/model-provider/ollama/OllamaTextGenerationSettings.cjs +2 -0
- package/model-provider/ollama/OllamaTextGenerationSettings.d.ts +87 -0
- package/model-provider/ollama/OllamaTextGenerationSettings.js +1 -0
- package/model-provider/ollama/index.cjs +4 -1
- package/model-provider/ollama/index.d.ts +4 -1
- package/model-provider/ollama/index.js +4 -1
- package/model-provider/openai/OpenAIFacade.cjs +4 -2
- package/model-provider/openai/OpenAIFacade.d.ts +3 -1
- package/model-provider/openai/OpenAIFacade.js +2 -1
- package/model-provider/openai/chat/AbstractOpenAIChatModel.d.ts +1 -1
- package/model-provider/openai/chat/OpenAIChatFunctionCallStructureGenerationModel.d.ts +3 -3
- package/model-provider/openai/chat/OpenAIChatModel.cjs +1 -1
- package/model-provider/openai/chat/OpenAIChatModel.d.ts +2 -2
- package/model-provider/openai/chat/OpenAIChatModel.js +1 -1
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.d.ts +5 -5
- package/model-provider/openai/chat/OpenAIChatPromptTemplate.js +1 -1
- package/model-provider/openai-compatible/OpenAICompatibleFacade.cjs +1 -1
- package/model-provider/openai-compatible/OpenAICompatibleFacade.d.ts +1 -1
- package/model-provider/openai-compatible/OpenAICompatibleFacade.js +1 -1
- package/package.json +1 -1
- package/model-provider/ollama/OllamaTextGenerationModel.d.ts +0 -230
- /package/model-provider/ollama/{OllamaTextGenerationModel.test.d.ts → OllamaCompletionModel.test.d.ts} +0 -0
@@ -0,0 +1,159 @@
|
|
1
|
+
import { z } from "zod";
|
2
|
+
import { FunctionOptions } from "../../core/FunctionOptions.js";
|
3
|
+
import { ApiConfiguration } from "../../core/api/ApiConfiguration.js";
|
4
|
+
import { ResponseHandler } from "../../core/api/postToApi.js";
|
5
|
+
import { AbstractModel } from "../../model-function/AbstractModel.js";
|
6
|
+
import { Delta } from "../../model-function/Delta.js";
|
7
|
+
import { PromptTemplateTextStreamingModel } from "../../model-function/generate-text/PromptTemplateTextStreamingModel.js";
|
8
|
+
import { TextStreamingModel } from "../../model-function/generate-text/TextGenerationModel.js";
|
9
|
+
import { TextGenerationPromptTemplate } from "../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
10
|
+
import { TextGenerationToolCallModel, ToolCallPromptTemplate } from "../../tool/generate-tool-call/TextGenerationToolCallModel.js";
|
11
|
+
import { TextGenerationToolCallsOrGenerateTextModel, ToolCallsOrGenerateTextPromptTemplate } from "../../tool/generate-tool-calls-or-text/TextGenerationToolCallsOrGenerateTextModel.js";
|
12
|
+
import { OllamaTextGenerationSettings } from "./OllamaTextGenerationSettings.js";
|
13
|
+
export interface OllamaCompletionPrompt {
|
14
|
+
/**
|
15
|
+
* Text prompt.
|
16
|
+
*/
|
17
|
+
prompt: string;
|
18
|
+
/**
|
19
|
+
Images. Supports base64-encoded `png` and `jpeg` images up to 100MB in size.
|
20
|
+
*/
|
21
|
+
images?: Array<string>;
|
22
|
+
}
|
23
|
+
/**
|
24
|
+
* Text generation model that uses the Ollama completion API.
|
25
|
+
*
|
26
|
+
* @see https://github.com/jmorganca/ollama/blob/main/docs/api.md#generate-a-completion
|
27
|
+
*/
|
28
|
+
export interface OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE extends number | undefined> extends OllamaTextGenerationSettings {
|
29
|
+
api?: ApiConfiguration;
|
30
|
+
/**
|
31
|
+
* Specify the context window size of the model that you have loaded in your
|
32
|
+
* Ollama server. (Default: 2048)
|
33
|
+
*/
|
34
|
+
contextWindowSize?: CONTEXT_WINDOW_SIZE;
|
35
|
+
/**
|
36
|
+
* When set to true, no formatting will be applied to the prompt and no context
|
37
|
+
* will be returned.
|
38
|
+
*/
|
39
|
+
raw?: boolean;
|
40
|
+
system?: string;
|
41
|
+
context?: number[];
|
42
|
+
}
|
43
|
+
export declare class OllamaCompletionModel<CONTEXT_WINDOW_SIZE extends number | undefined> extends AbstractModel<OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>> implements TextStreamingModel<OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>> {
|
44
|
+
constructor(settings: OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>);
|
45
|
+
readonly provider = "ollama";
|
46
|
+
get modelName(): string;
|
47
|
+
readonly tokenizer: undefined;
|
48
|
+
readonly countPromptTokens: undefined;
|
49
|
+
get contextWindowSize(): CONTEXT_WINDOW_SIZE;
|
50
|
+
callAPI<RESPONSE>(prompt: OllamaCompletionPrompt, options: {
|
51
|
+
responseFormat: OllamaCompletionResponseFormatType<RESPONSE>;
|
52
|
+
} & FunctionOptions): Promise<RESPONSE>;
|
53
|
+
get settingsForEvent(): Partial<OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>>;
|
54
|
+
doGenerateTexts(prompt: OllamaCompletionPrompt, options?: FunctionOptions): Promise<{
|
55
|
+
response: {
|
56
|
+
response: string;
|
57
|
+
model: string;
|
58
|
+
done: true;
|
59
|
+
created_at: string;
|
60
|
+
total_duration: number;
|
61
|
+
prompt_eval_count: number;
|
62
|
+
eval_count: number;
|
63
|
+
eval_duration: number;
|
64
|
+
load_duration?: number | undefined;
|
65
|
+
prompt_eval_duration?: number | undefined;
|
66
|
+
context?: number[] | undefined;
|
67
|
+
};
|
68
|
+
texts: string[];
|
69
|
+
}>;
|
70
|
+
doStreamText(prompt: OllamaCompletionPrompt, options?: FunctionOptions): Promise<AsyncIterable<Delta<string>>>;
|
71
|
+
asToolCallGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallPromptTemplate<INPUT_PROMPT, OllamaCompletionPrompt>): TextGenerationToolCallModel<INPUT_PROMPT, OllamaCompletionPrompt, this>;
|
72
|
+
asToolCallsOrTextGenerationModel<INPUT_PROMPT>(promptTemplate: ToolCallsOrGenerateTextPromptTemplate<INPUT_PROMPT, OllamaCompletionPrompt>): TextGenerationToolCallsOrGenerateTextModel<INPUT_PROMPT, OllamaCompletionPrompt, this>;
|
73
|
+
withTextPrompt(): PromptTemplateTextStreamingModel<string, OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
74
|
+
withPromptTemplate<INPUT_PROMPT>(promptTemplate: TextGenerationPromptTemplate<INPUT_PROMPT, OllamaCompletionPrompt>): PromptTemplateTextStreamingModel<INPUT_PROMPT, OllamaCompletionPrompt, OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>, this>;
|
75
|
+
withSettings(additionalSettings: Partial<OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>>): this;
|
76
|
+
}
|
77
|
+
declare const ollamaCompletionResponseSchema: z.ZodObject<{
|
78
|
+
done: z.ZodLiteral<true>;
|
79
|
+
model: z.ZodString;
|
80
|
+
created_at: z.ZodString;
|
81
|
+
response: z.ZodString;
|
82
|
+
total_duration: z.ZodNumber;
|
83
|
+
load_duration: z.ZodOptional<z.ZodNumber>;
|
84
|
+
prompt_eval_count: z.ZodNumber;
|
85
|
+
prompt_eval_duration: z.ZodOptional<z.ZodNumber>;
|
86
|
+
eval_count: z.ZodNumber;
|
87
|
+
eval_duration: z.ZodNumber;
|
88
|
+
context: z.ZodOptional<z.ZodArray<z.ZodNumber, "many">>;
|
89
|
+
}, "strip", z.ZodTypeAny, {
|
90
|
+
response: string;
|
91
|
+
model: string;
|
92
|
+
done: true;
|
93
|
+
created_at: string;
|
94
|
+
total_duration: number;
|
95
|
+
prompt_eval_count: number;
|
96
|
+
eval_count: number;
|
97
|
+
eval_duration: number;
|
98
|
+
load_duration?: number | undefined;
|
99
|
+
prompt_eval_duration?: number | undefined;
|
100
|
+
context?: number[] | undefined;
|
101
|
+
}, {
|
102
|
+
response: string;
|
103
|
+
model: string;
|
104
|
+
done: true;
|
105
|
+
created_at: string;
|
106
|
+
total_duration: number;
|
107
|
+
prompt_eval_count: number;
|
108
|
+
eval_count: number;
|
109
|
+
eval_duration: number;
|
110
|
+
load_duration?: number | undefined;
|
111
|
+
prompt_eval_duration?: number | undefined;
|
112
|
+
context?: number[] | undefined;
|
113
|
+
}>;
|
114
|
+
export type OllamaCompletionResponse = z.infer<typeof ollamaCompletionResponseSchema>;
|
115
|
+
export type OllamaCompletionDelta = {
|
116
|
+
content: string;
|
117
|
+
isComplete: boolean;
|
118
|
+
delta: string;
|
119
|
+
};
|
120
|
+
export type OllamaCompletionResponseFormatType<T> = {
|
121
|
+
stream: boolean;
|
122
|
+
handler: ResponseHandler<T>;
|
123
|
+
};
|
124
|
+
export declare const OllamaCompletionResponseFormat: {
|
125
|
+
/**
|
126
|
+
* Returns the response as a JSON object.
|
127
|
+
*/
|
128
|
+
json: {
|
129
|
+
stream: false;
|
130
|
+
handler: ({ response, url, requestBodyValues }: {
|
131
|
+
url: string;
|
132
|
+
requestBodyValues: unknown;
|
133
|
+
response: Response;
|
134
|
+
}) => Promise<{
|
135
|
+
response: string;
|
136
|
+
model: string;
|
137
|
+
done: true;
|
138
|
+
created_at: string;
|
139
|
+
total_duration: number;
|
140
|
+
prompt_eval_count: number;
|
141
|
+
eval_count: number;
|
142
|
+
eval_duration: number;
|
143
|
+
load_duration?: number | undefined;
|
144
|
+
prompt_eval_duration?: number | undefined;
|
145
|
+
context?: number[] | undefined;
|
146
|
+
}>;
|
147
|
+
};
|
148
|
+
/**
|
149
|
+
* Returns an async iterable over the full deltas (all choices, including full current state at time of event)
|
150
|
+
* of the response stream.
|
151
|
+
*/
|
152
|
+
deltaIterable: {
|
153
|
+
stream: true;
|
154
|
+
handler: ({ response }: {
|
155
|
+
response: Response;
|
156
|
+
}) => Promise<AsyncIterable<Delta<string>>>;
|
157
|
+
};
|
158
|
+
};
|
159
|
+
export {};
|
@@ -12,7 +12,7 @@ import { AsyncQueue } from "../../util/AsyncQueue.js";
|
|
12
12
|
import { parseJsonStream } from "../../util/streaming/parseJsonStream.js";
|
13
13
|
import { OllamaApiConfiguration } from "./OllamaApiConfiguration.js";
|
14
14
|
import { failedOllamaCallResponseHandler } from "./OllamaError.js";
|
15
|
-
export class
|
15
|
+
export class OllamaCompletionModel extends AbstractModel {
|
16
16
|
constructor(settings) {
|
17
17
|
super({ settings });
|
18
18
|
Object.defineProperty(this, "provider", {
|
@@ -114,7 +114,7 @@ export class OllamaTextGenerationModel extends AbstractModel {
|
|
114
114
|
async doGenerateTexts(prompt, options) {
|
115
115
|
const response = await this.callAPI(prompt, {
|
116
116
|
...options,
|
117
|
-
responseFormat:
|
117
|
+
responseFormat: OllamaCompletionResponseFormat.json,
|
118
118
|
});
|
119
119
|
return {
|
120
120
|
response,
|
@@ -124,7 +124,7 @@ export class OllamaTextGenerationModel extends AbstractModel {
|
|
124
124
|
doStreamText(prompt, options) {
|
125
125
|
return this.callAPI(prompt, {
|
126
126
|
...options,
|
127
|
-
responseFormat:
|
127
|
+
responseFormat: OllamaCompletionResponseFormat.deltaIterable,
|
128
128
|
});
|
129
129
|
}
|
130
130
|
asToolCallGenerationModel(promptTemplate) {
|
@@ -159,21 +159,23 @@ export class OllamaTextGenerationModel extends AbstractModel {
|
|
159
159
|
});
|
160
160
|
}
|
161
161
|
withSettings(additionalSettings) {
|
162
|
-
return new
|
162
|
+
return new OllamaCompletionModel(Object.assign({}, this.settings, additionalSettings));
|
163
163
|
}
|
164
164
|
}
|
165
|
-
const
|
165
|
+
const ollamaCompletionResponseSchema = z.object({
|
166
166
|
done: z.literal(true),
|
167
167
|
model: z.string(),
|
168
|
+
created_at: z.string(),
|
168
169
|
response: z.string(),
|
169
170
|
total_duration: z.number(),
|
170
171
|
load_duration: z.number().optional(),
|
171
172
|
prompt_eval_count: z.number(),
|
173
|
+
prompt_eval_duration: z.number().optional(),
|
172
174
|
eval_count: z.number(),
|
173
175
|
eval_duration: z.number(),
|
174
176
|
context: z.array(z.number()).optional(),
|
175
177
|
});
|
176
|
-
const
|
178
|
+
const ollamaCompletionStreamSchema = new ZodSchema(z.discriminatedUnion("done", [
|
177
179
|
z.object({
|
178
180
|
done: z.literal(false),
|
179
181
|
model: z.string(),
|
@@ -201,7 +203,7 @@ async function createOllamaFullDeltaIterableQueue(stream) {
|
|
201
203
|
// process the stream asynchonously (no 'await' on purpose):
|
202
204
|
parseJsonStream({
|
203
205
|
stream,
|
204
|
-
schema:
|
206
|
+
schema: ollamaCompletionStreamSchema,
|
205
207
|
process(event) {
|
206
208
|
if (event.done === true) {
|
207
209
|
queue.push({
|
@@ -233,7 +235,7 @@ async function createOllamaFullDeltaIterableQueue(stream) {
|
|
233
235
|
});
|
234
236
|
return queue;
|
235
237
|
}
|
236
|
-
export const
|
238
|
+
export const OllamaCompletionResponseFormat = {
|
237
239
|
/**
|
238
240
|
* Returns the response as a JSON object.
|
239
241
|
*/
|
@@ -244,7 +246,7 @@ export const OllamaTextGenerationResponseFormat = {
|
|
244
246
|
const parsedResult = safeParseJSON({
|
245
247
|
text: responseBody,
|
246
248
|
schema: new ZodSchema(z.union([
|
247
|
-
|
249
|
+
ollamaCompletionResponseSchema,
|
248
250
|
z.object({
|
249
251
|
done: z.literal(false),
|
250
252
|
model: z.string(),
|
package/model-provider/ollama/{OllamaTextGenerationModel.test.cjs → OllamaCompletionModel.test.cjs}
RENAMED
@@ -7,7 +7,7 @@ const ApiCallError_js_1 = require("../../core/api/ApiCallError.cjs");
|
|
7
7
|
const retryNever_js_1 = require("../../core/api/retryNever.cjs");
|
8
8
|
const generateText_js_1 = require("../../model-function/generate-text/generateText.cjs");
|
9
9
|
const OllamaApiConfiguration_js_1 = require("./OllamaApiConfiguration.cjs");
|
10
|
-
const
|
10
|
+
const OllamaCompletionModel_js_1 = require("./OllamaCompletionModel.cjs");
|
11
11
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
12
12
|
let responseBodyJson = {};
|
13
13
|
const server = (0, node_1.setupServer)(msw_1.http.post("http://127.0.0.1:11434/api/generate", () => msw_1.HttpResponse.json(responseBodyJson)));
|
@@ -34,7 +34,7 @@ describe("generateText", () => {
|
|
34
34
|
eval_count: 113,
|
35
35
|
eval_duration: 1325948000,
|
36
36
|
};
|
37
|
-
const result = await (0, generateText_js_1.generateText)(new
|
37
|
+
const result = await (0, generateText_js_1.generateText)(new OllamaCompletionModel_js_1.OllamaCompletionModel({
|
38
38
|
model: "test-model",
|
39
39
|
}).withTextPrompt(), "test prompt");
|
40
40
|
expect(result).toEqual("test response");
|
@@ -47,7 +47,7 @@ describe("generateText", () => {
|
|
47
47
|
done: false,
|
48
48
|
};
|
49
49
|
try {
|
50
|
-
await (0, generateText_js_1.generateText)(new
|
50
|
+
await (0, generateText_js_1.generateText)(new OllamaCompletionModel_js_1.OllamaCompletionModel({
|
51
51
|
api: new OllamaApiConfiguration_js_1.OllamaApiConfiguration({
|
52
52
|
retry: (0, retryNever_js_1.retryNever)(),
|
53
53
|
}),
|
package/model-provider/ollama/{OllamaTextGenerationModel.test.js → OllamaCompletionModel.test.js}
RENAMED
@@ -5,7 +5,7 @@ import { ApiCallError } from "../../core/api/ApiCallError.js";
|
|
5
5
|
import { retryNever } from "../../core/api/retryNever.js";
|
6
6
|
import { generateText } from "../../model-function/generate-text/generateText.js";
|
7
7
|
import { OllamaApiConfiguration } from "./OllamaApiConfiguration.js";
|
8
|
-
import {
|
8
|
+
import { OllamaCompletionModel } from "./OllamaCompletionModel.js";
|
9
9
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
10
10
|
let responseBodyJson = {};
|
11
11
|
const server = setupServer(http.post("http://127.0.0.1:11434/api/generate", () => HttpResponse.json(responseBodyJson)));
|
@@ -32,7 +32,7 @@ describe("generateText", () => {
|
|
32
32
|
eval_count: 113,
|
33
33
|
eval_duration: 1325948000,
|
34
34
|
};
|
35
|
-
const result = await generateText(new
|
35
|
+
const result = await generateText(new OllamaCompletionModel({
|
36
36
|
model: "test-model",
|
37
37
|
}).withTextPrompt(), "test prompt");
|
38
38
|
expect(result).toEqual("test response");
|
@@ -45,7 +45,7 @@ describe("generateText", () => {
|
|
45
45
|
done: false,
|
46
46
|
};
|
47
47
|
try {
|
48
|
-
await generateText(new
|
48
|
+
await generateText(new OllamaCompletionModel({
|
49
49
|
api: new OllamaApiConfiguration({
|
50
50
|
retry: retryNever(),
|
51
51
|
}),
|
@@ -1,12 +1,22 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.TextEmbedder = exports.
|
3
|
+
exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = exports.Api = void 0;
|
4
|
+
const OllamaChatModel_js_1 = require("./OllamaChatModel.cjs");
|
4
5
|
const OllamaTextEmbeddingModel_js_1 = require("./OllamaTextEmbeddingModel.cjs");
|
5
|
-
const
|
6
|
-
|
7
|
-
|
6
|
+
const OllamaCompletionModel_js_1 = require("./OllamaCompletionModel.cjs");
|
7
|
+
const OllamaApiConfiguration_js_1 = require("./OllamaApiConfiguration.cjs");
|
8
|
+
function Api(settings) {
|
9
|
+
return new OllamaApiConfiguration_js_1.OllamaApiConfiguration(settings);
|
8
10
|
}
|
9
|
-
exports.
|
11
|
+
exports.Api = Api;
|
12
|
+
function CompletionTextGenerator(settings) {
|
13
|
+
return new OllamaCompletionModel_js_1.OllamaCompletionModel(settings);
|
14
|
+
}
|
15
|
+
exports.CompletionTextGenerator = CompletionTextGenerator;
|
16
|
+
function ChatTextGenerator(settings) {
|
17
|
+
return new OllamaChatModel_js_1.OllamaChatModel(settings);
|
18
|
+
}
|
19
|
+
exports.ChatTextGenerator = ChatTextGenerator;
|
10
20
|
function TextEmbedder(settings) {
|
11
21
|
return new OllamaTextEmbeddingModel_js_1.OllamaTextEmbeddingModel(settings);
|
12
22
|
}
|
@@ -1,4 +1,9 @@
|
|
1
|
+
import { OllamaChatModel, OllamaChatModelSettings } from "./OllamaChatModel.js";
|
1
2
|
import { OllamaTextEmbeddingModel, OllamaTextEmbeddingModelSettings } from "./OllamaTextEmbeddingModel.js";
|
2
|
-
import {
|
3
|
-
|
3
|
+
import { OllamaCompletionModel, OllamaCompletionModelSettings } from "./OllamaCompletionModel.js";
|
4
|
+
import { OllamaApiConfiguration, OllamaApiConfigurationSettings } from "./OllamaApiConfiguration.js";
|
5
|
+
export declare function Api(settings: OllamaApiConfigurationSettings): OllamaApiConfiguration;
|
6
|
+
export declare function CompletionTextGenerator<CONTEXT_WINDOW_SIZE extends number>(settings: OllamaCompletionModelSettings<CONTEXT_WINDOW_SIZE>): OllamaCompletionModel<CONTEXT_WINDOW_SIZE>;
|
7
|
+
export declare function ChatTextGenerator(settings: OllamaChatModelSettings): OllamaChatModel;
|
4
8
|
export declare function TextEmbedder(settings: OllamaTextEmbeddingModelSettings): OllamaTextEmbeddingModel;
|
9
|
+
export { OllamaChatMessage as ChatMessage, OllamaChatPrompt as ChatPrompt, } from "./OllamaChatModel.js";
|
@@ -1,7 +1,15 @@
|
|
1
|
+
import { OllamaChatModel } from "./OllamaChatModel.js";
|
1
2
|
import { OllamaTextEmbeddingModel, } from "./OllamaTextEmbeddingModel.js";
|
2
|
-
import {
|
3
|
-
|
4
|
-
|
3
|
+
import { OllamaCompletionModel, } from "./OllamaCompletionModel.js";
|
4
|
+
import { OllamaApiConfiguration, } from "./OllamaApiConfiguration.js";
|
5
|
+
export function Api(settings) {
|
6
|
+
return new OllamaApiConfiguration(settings);
|
7
|
+
}
|
8
|
+
export function CompletionTextGenerator(settings) {
|
9
|
+
return new OllamaCompletionModel(settings);
|
10
|
+
}
|
11
|
+
export function ChatTextGenerator(settings) {
|
12
|
+
return new OllamaChatModel(settings);
|
5
13
|
}
|
6
14
|
export function TextEmbedder(settings) {
|
7
15
|
return new OllamaTextEmbeddingModel(settings);
|
@@ -0,0 +1,87 @@
|
|
1
|
+
import { TextGenerationModelSettings } from "../../model-function/generate-text/TextGenerationModel.js";
|
2
|
+
export interface OllamaTextGenerationSettings extends TextGenerationModelSettings {
|
3
|
+
/**
|
4
|
+
* The name of the model to use. For example, 'mistral'.
|
5
|
+
*
|
6
|
+
* @see https://ollama.ai/library
|
7
|
+
*/
|
8
|
+
model: string;
|
9
|
+
/**
|
10
|
+
* The temperature of the model. Increasing the temperature will make the model
|
11
|
+
* answer more creatively. (Default: 0.8)
|
12
|
+
*/
|
13
|
+
temperature?: number;
|
14
|
+
/**
|
15
|
+
* Enable Mirostat sampling for controlling perplexity.
|
16
|
+
* (default: 0, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)
|
17
|
+
*/
|
18
|
+
mirostat?: number;
|
19
|
+
/**
|
20
|
+
* Influences how quickly the algorithm responds to feedback from the generated text.
|
21
|
+
* A lower learning rate will result in slower adjustments,
|
22
|
+
* while a higher learning rate will make the algorithm more responsive. (Default: 0.1)
|
23
|
+
*/
|
24
|
+
mirostatEta?: number;
|
25
|
+
/**
|
26
|
+
* Controls the balance between coherence and diversity of the output.
|
27
|
+
* A lower value will result in more focused and coherent text. (Default: 5.0)
|
28
|
+
*/
|
29
|
+
mirostatTau?: number;
|
30
|
+
/**
|
31
|
+
* The number of GQA groups in the transformer layer. Required for some models,
|
32
|
+
* for example it is 8 for llama2:70b
|
33
|
+
*/
|
34
|
+
numGqa?: number;
|
35
|
+
/**
|
36
|
+
* The number of layers to send to the GPU(s). On macOS it defaults to 1 to
|
37
|
+
* enable metal support, 0 to disable.
|
38
|
+
*/
|
39
|
+
numGpu?: number;
|
40
|
+
/**
|
41
|
+
* Sets the number of threads to use during computation. By default, Ollama will
|
42
|
+
* detect this for optimal performance. It is recommended to set this value to the
|
43
|
+
* number of physical CPU cores your system has (as opposed to the logical number of cores).
|
44
|
+
*/
|
45
|
+
numThreads?: number;
|
46
|
+
/**
|
47
|
+
* Sets how far back for the model to look back to prevent repetition.
|
48
|
+
* (Default: 64, 0 = disabled, -1 = num_ctx)
|
49
|
+
*/
|
50
|
+
repeatLastN?: number;
|
51
|
+
/**
|
52
|
+
* Sets how strongly to penalize repetitions. A higher value (e.g., 1.5)
|
53
|
+
* will penalize repetitions more strongly, while a lower value (e.g., 0.9)
|
54
|
+
* will be more lenient. (Default: 1.1)
|
55
|
+
*/
|
56
|
+
repeatPenalty?: number;
|
57
|
+
/**
|
58
|
+
* Sets the random number seed to use for generation. Setting this to a
|
59
|
+
* specific number will make the model generate the same text for the same prompt.
|
60
|
+
* (Default: 0)
|
61
|
+
*/
|
62
|
+
seed?: number;
|
63
|
+
/**
|
64
|
+
* Tail free sampling is used to reduce the impact of less probable tokens
|
65
|
+
* from the output. A higher value (e.g., 2.0) will reduce the impact more,
|
66
|
+
* while a value of 1.0 disables this setting. (default: 1)
|
67
|
+
*/
|
68
|
+
tfsZ?: number;
|
69
|
+
/**
|
70
|
+
* Reduces the probability of generating nonsense. A higher value (e.g. 100)
|
71
|
+
* will give more diverse answers, while a lower value (e.g. 10) will be more
|
72
|
+
* conservative. (Default: 40)
|
73
|
+
*/
|
74
|
+
topK?: number;
|
75
|
+
/**
|
76
|
+
* Works together with top-k. A higher value (e.g., 0.95) will lead to more
|
77
|
+
* diverse text, while a lower value (e.g., 0.5) will generate more focused
|
78
|
+
* and conservative text. (Default: 0.9)
|
79
|
+
*/
|
80
|
+
topP?: number;
|
81
|
+
/**
|
82
|
+
* The format to return a response in. Currently the only accepted value is 'json'.
|
83
|
+
* Leave undefined to return a string.
|
84
|
+
*/
|
85
|
+
format?: "json";
|
86
|
+
template?: string;
|
87
|
+
}
|
@@ -0,0 +1 @@
|
|
1
|
+
export {};
|
@@ -28,6 +28,9 @@ var __importStar = (this && this.__importStar) || function (mod) {
|
|
28
28
|
Object.defineProperty(exports, "__esModule", { value: true });
|
29
29
|
exports.ollama = void 0;
|
30
30
|
__exportStar(require("./OllamaApiConfiguration.cjs"), exports);
|
31
|
+
__exportStar(require("./OllamaChatModel.cjs"), exports);
|
32
|
+
__exportStar(require("./OllamaChatPromptTemplate.cjs"), exports);
|
33
|
+
__exportStar(require("./OllamaCompletionModel.cjs"), exports);
|
31
34
|
exports.ollama = __importStar(require("./OllamaFacade.cjs"));
|
32
35
|
__exportStar(require("./OllamaTextEmbeddingModel.cjs"), exports);
|
33
|
-
__exportStar(require("./
|
36
|
+
__exportStar(require("./OllamaTextGenerationSettings.cjs"), exports);
|
@@ -1,5 +1,8 @@
|
|
1
1
|
export * from "./OllamaApiConfiguration.js";
|
2
|
+
export * from "./OllamaChatModel.js";
|
3
|
+
export * from "./OllamaChatPromptTemplate.js";
|
4
|
+
export * from "./OllamaCompletionModel.js";
|
2
5
|
export { OllamaErrorData } from "./OllamaError.js";
|
3
6
|
export * as ollama from "./OllamaFacade.js";
|
4
7
|
export * from "./OllamaTextEmbeddingModel.js";
|
5
|
-
export * from "./
|
8
|
+
export * from "./OllamaTextGenerationSettings.js";
|
@@ -1,4 +1,7 @@
|
|
1
1
|
export * from "./OllamaApiConfiguration.js";
|
2
|
+
export * from "./OllamaChatModel.js";
|
3
|
+
export * from "./OllamaChatPromptTemplate.js";
|
4
|
+
export * from "./OllamaCompletionModel.js";
|
2
5
|
export * as ollama from "./OllamaFacade.js";
|
3
6
|
export * from "./OllamaTextEmbeddingModel.js";
|
4
|
-
export * from "./
|
7
|
+
export * from "./OllamaTextGenerationSettings.js";
|
@@ -1,6 +1,6 @@
|
|
1
1
|
"use strict";
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
3
|
-
exports.Tokenizer = exports.ImageGenerator = exports.Transcriber = exports.SpeechGenerator = exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = void 0;
|
3
|
+
exports.ChatMessage = exports.Tokenizer = exports.ImageGenerator = exports.Transcriber = exports.SpeechGenerator = exports.TextEmbedder = exports.ChatTextGenerator = exports.CompletionTextGenerator = void 0;
|
4
4
|
const OpenAICompletionModel_js_1 = require("./OpenAICompletionModel.cjs");
|
5
5
|
const OpenAIImageGenerationModel_js_1 = require("./OpenAIImageGenerationModel.cjs");
|
6
6
|
const OpenAISpeechModel_js_1 = require("./OpenAISpeechModel.cjs");
|
@@ -47,7 +47,7 @@ exports.CompletionTextGenerator = CompletionTextGenerator;
|
|
47
47
|
* const text = await generateText(
|
48
48
|
* model,
|
49
49
|
* [
|
50
|
-
*
|
50
|
+
* openai.ChatMessage.system(
|
51
51
|
* "Write a short story about a robot learning to love:"
|
52
52
|
* ),
|
53
53
|
* ]
|
@@ -148,3 +148,5 @@ function Tokenizer(settings) {
|
|
148
148
|
return new TikTokenTokenizer_js_1.TikTokenTokenizer(settings);
|
149
149
|
}
|
150
150
|
exports.Tokenizer = Tokenizer;
|
151
|
+
var OpenAIChatMessage_js_1 = require("./chat/OpenAIChatMessage.cjs");
|
152
|
+
Object.defineProperty(exports, "ChatMessage", { enumerable: true, get: function () { return OpenAIChatMessage_js_1.OpenAIChatMessage; } });
|
@@ -41,7 +41,7 @@ export declare function CompletionTextGenerator(settings: OpenAICompletionModelS
|
|
41
41
|
* const text = await generateText(
|
42
42
|
* model,
|
43
43
|
* [
|
44
|
-
*
|
44
|
+
* openai.ChatMessage.system(
|
45
45
|
* "Write a short story about a robot learning to love:"
|
46
46
|
* ),
|
47
47
|
* ]
|
@@ -124,3 +124,5 @@ export declare function ImageGenerator(settings: OpenAIImageGenerationSettings):
|
|
124
124
|
* @returns A new instance of {@link TikTokenTokenizer}.
|
125
125
|
*/
|
126
126
|
export declare function Tokenizer(settings: TikTokenTokenizerSettings): TikTokenTokenizer;
|
127
|
+
export { OpenAIChatMessage as ChatMessage } from "./chat/OpenAIChatMessage.js";
|
128
|
+
export { OpenAIChatPrompt as ChatPrompt } from "./chat/AbstractOpenAIChatModel.js";
|
@@ -43,7 +43,7 @@ export function CompletionTextGenerator(settings) {
|
|
43
43
|
* const text = await generateText(
|
44
44
|
* model,
|
45
45
|
* [
|
46
|
-
*
|
46
|
+
* openai.ChatMessage.system(
|
47
47
|
* "Write a short story about a robot learning to love:"
|
48
48
|
* ),
|
49
49
|
* ]
|
@@ -138,3 +138,4 @@ export function ImageGenerator(settings) {
|
|
138
138
|
export function Tokenizer(settings) {
|
139
139
|
return new TikTokenTokenizer(settings);
|
140
140
|
}
|
141
|
+
export { OpenAIChatMessage as ChatMessage } from "./chat/OpenAIChatMessage.js";
|
@@ -84,7 +84,7 @@ export type OpenAIChatPrompt = OpenAIChatMessage[];
|
|
84
84
|
*/
|
85
85
|
export declare abstract class AbstractOpenAIChatModel<SETTINGS extends AbstractOpenAIChatSettings> extends AbstractModel<SETTINGS> {
|
86
86
|
constructor(settings: SETTINGS);
|
87
|
-
callAPI<RESULT>(messages:
|
87
|
+
callAPI<RESULT>(messages: OpenAIChatPrompt, options: {
|
88
88
|
responseFormat: OpenAIChatResponseFormatType<RESULT>;
|
89
89
|
} & FunctionOptions & {
|
90
90
|
functions?: AbstractOpenAIChatCallSettings["functions"];
|
@@ -23,15 +23,15 @@ OpenAIChatSettings> {
|
|
23
23
|
/**
|
24
24
|
* Returns this model with a text prompt template.
|
25
25
|
*/
|
26
|
-
withTextPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<string,
|
26
|
+
withTextPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<string, OpenAIChatPrompt>>;
|
27
27
|
/**
|
28
28
|
* Returns this model with an instruction prompt template.
|
29
29
|
*/
|
30
|
-
withInstructionPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../../index.js").MultiModalInstructionPrompt | import("../../../index.js").TextInstructionPrompt,
|
30
|
+
withInstructionPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../../index.js").MultiModalInstructionPrompt | import("../../../index.js").TextInstructionPrompt, OpenAIChatPrompt>>;
|
31
31
|
/**
|
32
32
|
* Returns this model with a chat prompt template.
|
33
33
|
*/
|
34
|
-
withChatPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../../index.js").TextChatPrompt | import("../../../index.js").MultiModalChatPrompt,
|
34
|
+
withChatPrompt(): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<import("../../../index.js").TextChatPrompt | import("../../../index.js").MultiModalChatPrompt, OpenAIChatPrompt>>;
|
35
35
|
withPromptTemplate<TARGET_PROMPT_FORMAT extends TextGenerationPromptTemplate<unknown, OpenAIChatPrompt>>(promptTemplate: TARGET_PROMPT_FORMAT): OpenAIChatFunctionCallStructureGenerationModel<TARGET_PROMPT_FORMAT>;
|
36
36
|
withSettings(additionalSettings: Partial<OpenAIChatSettings>): this;
|
37
37
|
/**
|
@@ -149,7 +149,7 @@ exports.calculateOpenAIChatCostInMillicents = calculateOpenAIChatCostInMillicent
|
|
149
149
|
*
|
150
150
|
* const text = await generateText([
|
151
151
|
* model,
|
152
|
-
*
|
152
|
+
* openai.ChatMessage.system(
|
153
153
|
* "Write a short story about a robot learning to love:"
|
154
154
|
* ),
|
155
155
|
* ]);
|
@@ -122,7 +122,7 @@ export interface OpenAIChatSettings extends TextGenerationModelSettings, Omit<Op
|
|
122
122
|
*
|
123
123
|
* const text = await generateText([
|
124
124
|
* model,
|
125
|
-
*
|
125
|
+
* openai.ChatMessage.system(
|
126
126
|
* "Write a short story about a robot learning to love:"
|
127
127
|
* ),
|
128
128
|
* ]);
|
@@ -142,7 +142,7 @@ export declare class OpenAIChatModel extends AbstractOpenAIChatModel<OpenAIChatS
|
|
142
142
|
asFunctionCallStructureGenerationModel({ fnName, fnDescription, }: {
|
143
143
|
fnName: string;
|
144
144
|
fnDescription?: string;
|
145
|
-
}): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<
|
145
|
+
}): OpenAIChatFunctionCallStructureGenerationModel<TextGenerationPromptTemplate<OpenAIChatPrompt, OpenAIChatPrompt>>;
|
146
146
|
asStructureGenerationModel<INPUT_PROMPT>(promptTemplate: StructureFromTextPromptTemplate<INPUT_PROMPT, OpenAIChatPrompt>): StructureFromTextStreamingModel<INPUT_PROMPT, OpenAIChatPrompt, this>;
|
147
147
|
/**
|
148
148
|
* Returns this model with a text prompt template.
|
@@ -143,7 +143,7 @@ export const calculateOpenAIChatCostInMillicents = ({ model, response, }) => {
|
|
143
143
|
*
|
144
144
|
* const text = await generateText([
|
145
145
|
* model,
|
146
|
-
*
|
146
|
+
* openai.ChatMessage.system(
|
147
147
|
* "Write a short story about a robot learning to love:"
|
148
148
|
* ),
|
149
149
|
* ]);
|
@@ -1,20 +1,20 @@
|
|
1
1
|
import { TextGenerationPromptTemplate } from "../../../model-function/generate-text/TextGenerationPromptTemplate.js";
|
2
2
|
import { MultiModalChatPrompt, TextChatPrompt } from "../../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
3
3
|
import { MultiModalInstructionPrompt, TextInstructionPrompt } from "../../../model-function/generate-text/prompt-template/InstructionPrompt.js";
|
4
|
-
import {
|
4
|
+
import { OpenAIChatPrompt } from "./AbstractOpenAIChatModel.js";
|
5
5
|
/**
|
6
6
|
* OpenAIMessage[] identity chat format.
|
7
7
|
*/
|
8
|
-
export declare function identity(): TextGenerationPromptTemplate<
|
8
|
+
export declare function identity(): TextGenerationPromptTemplate<OpenAIChatPrompt, OpenAIChatPrompt>;
|
9
9
|
/**
|
10
10
|
* Formats a text prompt as an OpenAI chat prompt.
|
11
11
|
*/
|
12
|
-
export declare function text(): TextGenerationPromptTemplate<string,
|
12
|
+
export declare function text(): TextGenerationPromptTemplate<string, OpenAIChatPrompt>;
|
13
13
|
/**
|
14
14
|
* Formats an instruction prompt as an OpenAI chat prompt.
|
15
15
|
*/
|
16
|
-
export declare function instruction(): TextGenerationPromptTemplate<MultiModalInstructionPrompt | TextInstructionPrompt,
|
16
|
+
export declare function instruction(): TextGenerationPromptTemplate<MultiModalInstructionPrompt | TextInstructionPrompt, OpenAIChatPrompt>;
|
17
17
|
/**
|
18
18
|
* Formats a chat prompt as an OpenAI chat prompt.
|
19
19
|
*/
|
20
|
-
export declare function chat(): TextGenerationPromptTemplate<MultiModalChatPrompt | TextChatPrompt,
|
20
|
+
export declare function chat(): TextGenerationPromptTemplate<MultiModalChatPrompt | TextChatPrompt, OpenAIChatPrompt>;
|
@@ -1,4 +1,4 @@
|
|
1
|
-
import { validateChatPrompt } from "../../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
1
|
+
import { validateChatPrompt, } from "../../../model-function/generate-text/prompt-template/ChatPrompt.js";
|
2
2
|
import { OpenAIChatMessage } from "./OpenAIChatMessage.js";
|
3
3
|
/**
|
4
4
|
* OpenAIMessage[] identity chat format.
|