@kernl-sdk/protocol 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +4 -0
- package/CHANGELOG.md +7 -0
- package/LICENSE +201 -0
- package/README.md +1 -0
- package/dist/codec.d.ts +22 -0
- package/dist/codec.d.ts.map +1 -0
- package/dist/codec.js +1 -0
- package/dist/constants.d.ts +11 -0
- package/dist/constants.d.ts.map +1 -0
- package/dist/constants.js +13 -0
- package/dist/embedding-model/embedding-model.d.ts +57 -0
- package/dist/embedding-model/embedding-model.d.ts.map +1 -0
- package/dist/embedding-model/embedding-model.js +6 -0
- package/dist/embedding-model/index.d.ts +3 -0
- package/dist/embedding-model/index.d.ts.map +1 -0
- package/dist/embedding-model/index.js +2 -0
- package/dist/embedding-model/model.d.ts +65 -0
- package/dist/embedding-model/model.d.ts.map +1 -0
- package/dist/embedding-model/model.js +1 -0
- package/dist/embedding-model/request.d.ts +27 -0
- package/dist/embedding-model/request.d.ts.map +1 -0
- package/dist/embedding-model/request.js +1 -0
- package/dist/index.d.ts +11 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +15 -0
- package/dist/json.d.ts +10 -0
- package/dist/json.d.ts.map +1 -0
- package/dist/json.js +1 -0
- package/dist/language-model/content.d.ts +141 -0
- package/dist/language-model/content.d.ts.map +1 -0
- package/dist/language-model/content.js +1 -0
- package/dist/language-model/index.d.ts +6 -0
- package/dist/language-model/index.d.ts.map +1 -0
- package/dist/language-model/index.js +5 -0
- package/dist/language-model/item.d.ts +183 -0
- package/dist/language-model/item.d.ts.map +1 -0
- package/dist/language-model/item.js +1 -0
- package/dist/language-model/language-model.d.ts +114 -0
- package/dist/language-model/language-model.d.ts.map +1 -0
- package/dist/language-model/language-model.js +1 -0
- package/dist/language-model/model.d.ts +110 -0
- package/dist/language-model/model.d.ts.map +1 -0
- package/dist/language-model/model.js +1 -0
- package/dist/language-model/request.d.ts +175 -0
- package/dist/language-model/request.d.ts.map +1 -0
- package/dist/language-model/request.js +1 -0
- package/dist/language-model/settings.d.ts +99 -0
- package/dist/language-model/settings.d.ts.map +1 -0
- package/dist/language-model/settings.js +1 -0
- package/dist/language-model/stream.d.ts +195 -0
- package/dist/language-model/stream.d.ts.map +1 -0
- package/dist/language-model/stream.js +1 -0
- package/dist/language-model/tool.d.ts +48 -0
- package/dist/language-model/tool.d.ts.map +1 -0
- package/dist/language-model/tool.js +1 -0
- package/dist/provider/index.d.ts +2 -0
- package/dist/provider/index.d.ts.map +1 -0
- package/dist/provider/index.js +1 -0
- package/dist/provider/metadata.d.ts +21 -0
- package/dist/provider/metadata.d.ts.map +1 -0
- package/dist/provider/metadata.js +1 -0
- package/dist/provider/provider.d.ts +42 -0
- package/dist/provider/provider.d.ts.map +1 -0
- package/dist/provider/provider.js +1 -0
- package/package.json +41 -0
- package/src/codec.ts +22 -0
- package/src/constants.ts +15 -0
- package/src/embedding-model/index.ts +2 -0
- package/src/embedding-model/model.ts +76 -0
- package/src/embedding-model/request.ts +31 -0
- package/src/index.ts +20 -0
- package/src/json.ts +17 -0
- package/src/language-model/index.ts +5 -0
- package/src/language-model/item.ts +251 -0
- package/src/language-model/model.ts +161 -0
- package/src/language-model/request.ts +210 -0
- package/src/language-model/stream.ts +251 -0
- package/src/language-model/tool.ts +56 -0
- package/src/provider/index.ts +1 -0
- package/src/provider/provider.ts +79 -0
- package/tsconfig.json +13 -0
package/src/json.ts
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* A JSON value can be a string, number, boolean, object, array, or null.
|
|
3
|
+
* JSON values can be serialized and deserialized by the JSON.stringify and JSON.parse methods.
|
|
4
|
+
*/
|
|
5
|
+
export type JSONValue =
|
|
6
|
+
| null
|
|
7
|
+
| string
|
|
8
|
+
| number
|
|
9
|
+
| boolean
|
|
10
|
+
| JSONObject
|
|
11
|
+
| JSONArray;
|
|
12
|
+
|
|
13
|
+
export type JSONObject = {
|
|
14
|
+
[key: string]: JSONValue | undefined;
|
|
15
|
+
};
|
|
16
|
+
|
|
17
|
+
export type JSONArray = JSONValue[];
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
import { SharedProviderMetadata } from "@/provider";
|
|
2
|
+
import { JSONValue } from "@/json";
|
|
3
|
+
import {
|
|
4
|
+
IN_PROGRESS,
|
|
5
|
+
COMPLETED,
|
|
6
|
+
FAILED,
|
|
7
|
+
INTERRUPTIBLE,
|
|
8
|
+
UNINTERRUPTIBLE,
|
|
9
|
+
} from "@/constants";
|
|
10
|
+
|
|
11
|
+
export type LanguageModelItem =
|
|
12
|
+
| Message
|
|
13
|
+
| Reasoning
|
|
14
|
+
| ToolCall
|
|
15
|
+
| ToolResult
|
|
16
|
+
| Unknown;
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* A subset of LanguageModelItem that excludes items that wouldn't
|
|
20
|
+
* make sense for a model to generate (e.g. system/user messages, tool results).
|
|
21
|
+
*/
|
|
22
|
+
export type LanguageModelResponseItem =
|
|
23
|
+
| AssistantMessage
|
|
24
|
+
| Reasoning
|
|
25
|
+
| ToolCall
|
|
26
|
+
| Unknown;
|
|
27
|
+
|
|
28
|
+
export interface SharedBase {
|
|
29
|
+
/**
|
|
30
|
+
* Optional provider-specific metadata for the text part.
|
|
31
|
+
*/
|
|
32
|
+
providerMetadata?: SharedProviderMetadata;
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Shared base for language model items.
|
|
37
|
+
*/
|
|
38
|
+
export interface LanguageModelItemBase extends SharedBase {
|
|
39
|
+
/**
|
|
40
|
+
* A unique identifier for the item. Optional by default.
|
|
41
|
+
*/
|
|
42
|
+
id?: string;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// ----------------------------
|
|
46
|
+
// Content types
|
|
47
|
+
// ----------------------------
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Defines base properties common to all message or artifact parts.
|
|
51
|
+
*/
|
|
52
|
+
export interface PartBase extends SharedBase {
|
|
53
|
+
/** Optional metadata associated with this part. */
|
|
54
|
+
metadata?: Record<string, unknown>;
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* Text that the model has generated.
|
|
59
|
+
*/
|
|
60
|
+
export interface TextPart extends PartBase {
|
|
61
|
+
readonly kind: "text";
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* The text content.
|
|
65
|
+
*/
|
|
66
|
+
text: string;
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
/**
|
|
70
|
+
* A file that has been generated by the model.
|
|
71
|
+
*
|
|
72
|
+
* May be base64 encoded strings or binary data.
|
|
73
|
+
*/
|
|
74
|
+
export interface FilePart extends PartBase {
|
|
75
|
+
readonly kind: "file";
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* The IANA media type of the file, e.g. `image/png` or `audio/mp3`.
|
|
79
|
+
*
|
|
80
|
+
* @see https://www.iana.org/assignments/media-types/media-types.xhtml
|
|
81
|
+
*/
|
|
82
|
+
mimeType: string;
|
|
83
|
+
|
|
84
|
+
/**
|
|
85
|
+
* Generated file data as base64 encoded strings or binary data.
|
|
86
|
+
*/
|
|
87
|
+
data: string | Uint8Array;
|
|
88
|
+
|
|
89
|
+
/**
|
|
90
|
+
* Optional filename for the file
|
|
91
|
+
*/
|
|
92
|
+
filename?: string;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
/**
|
|
96
|
+
* Represents a structured data segment (e.g., JSON) within a message or artifact.
|
|
97
|
+
*/
|
|
98
|
+
export interface DataPart extends PartBase {
|
|
99
|
+
readonly kind: "data";
|
|
100
|
+
|
|
101
|
+
/**
|
|
102
|
+
* The structured data content.
|
|
103
|
+
*/
|
|
104
|
+
data: Record<string, unknown>;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
export type MessagePart = TextPart | FilePart | DataPart;
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Reasoning that the model has generated.
|
|
111
|
+
*/
|
|
112
|
+
export interface Reasoning extends LanguageModelItemBase {
|
|
113
|
+
readonly kind: "reasoning";
|
|
114
|
+
|
|
115
|
+
/**
|
|
116
|
+
* The reasoning content
|
|
117
|
+
*/
|
|
118
|
+
text: string;
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
/**
|
|
122
|
+
* This is a catch all for events that are not part of the protocol.
|
|
123
|
+
*
|
|
124
|
+
* For example, a model might return an event that is not part of the protocol using this type.
|
|
125
|
+
*
|
|
126
|
+
* In that case everything returned from the model should be passed in the `providerMetadata` field.
|
|
127
|
+
*
|
|
128
|
+
* This enables new features to be added to be added by a model provider without breaking the protocol.
|
|
129
|
+
*/
|
|
130
|
+
export interface Unknown extends LanguageModelItemBase {
|
|
131
|
+
readonly kind: "unknown";
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
// ----------------------------
|
|
135
|
+
// Message types
|
|
136
|
+
// ----------------------------
|
|
137
|
+
|
|
138
|
+
export interface MessageBase extends SharedBase {
|
|
139
|
+
readonly kind: "message";
|
|
140
|
+
|
|
141
|
+
/**
|
|
142
|
+
* The unique identifier for the message.
|
|
143
|
+
*/
|
|
144
|
+
id: string;
|
|
145
|
+
|
|
146
|
+
/**
|
|
147
|
+
* The content parts of the message.
|
|
148
|
+
*/
|
|
149
|
+
content: MessagePart[];
|
|
150
|
+
|
|
151
|
+
/**
|
|
152
|
+
* Optional additional metadata for the message
|
|
153
|
+
*/
|
|
154
|
+
metadata?: Record<string, unknown>;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
export interface SystemMessage extends MessageBase {
|
|
158
|
+
/**
|
|
159
|
+
* Representing a system message to the user
|
|
160
|
+
*/
|
|
161
|
+
readonly role: "system";
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
export interface AssistantMessage extends MessageBase {
|
|
165
|
+
/**
|
|
166
|
+
* Representing a message from the assistant
|
|
167
|
+
*/
|
|
168
|
+
readonly role: "assistant";
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
export interface UserMessage extends MessageBase {
|
|
172
|
+
/**
|
|
173
|
+
* Representing a message from the user
|
|
174
|
+
*/
|
|
175
|
+
readonly role: "user";
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
export type Message = SystemMessage | AssistantMessage | UserMessage;
|
|
179
|
+
|
|
180
|
+
// ----------------------------
|
|
181
|
+
// Tool call types
|
|
182
|
+
// ----------------------------
|
|
183
|
+
|
|
184
|
+
/**
|
|
185
|
+
* Tool calls that the model has generated.
|
|
186
|
+
*/
|
|
187
|
+
export interface ToolCall extends LanguageModelItemBase {
|
|
188
|
+
readonly kind: "tool-call";
|
|
189
|
+
|
|
190
|
+
/**
|
|
191
|
+
* The identifier of the tool call. It must be unique across all tool calls.
|
|
192
|
+
*/
|
|
193
|
+
callId: string;
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* The id of the tool that should be called.
|
|
197
|
+
*/
|
|
198
|
+
toolId: string;
|
|
199
|
+
|
|
200
|
+
/**
|
|
201
|
+
* The state of the tool call.
|
|
202
|
+
*/
|
|
203
|
+
state: ToolCallState;
|
|
204
|
+
|
|
205
|
+
/**
|
|
206
|
+
* The stringified JSON object with the arguments of the tool call.
|
|
207
|
+
*/
|
|
208
|
+
arguments: string;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
/**
|
|
212
|
+
* Result of a tool call that has been executed by the provider.
|
|
213
|
+
*/
|
|
214
|
+
export interface ToolResult extends LanguageModelItemBase {
|
|
215
|
+
readonly kind: "tool-result";
|
|
216
|
+
|
|
217
|
+
/**
|
|
218
|
+
* The ID of the tool call that this result is associated with.
|
|
219
|
+
*/
|
|
220
|
+
callId: string;
|
|
221
|
+
|
|
222
|
+
/**
|
|
223
|
+
* Name of the tool that generated this result.
|
|
224
|
+
*/
|
|
225
|
+
toolId: string;
|
|
226
|
+
|
|
227
|
+
/**
|
|
228
|
+
* The state of the tool call.
|
|
229
|
+
*/
|
|
230
|
+
state: ToolCallState;
|
|
231
|
+
|
|
232
|
+
/**
|
|
233
|
+
* Result of the tool call. This is a JSON-serializable object.
|
|
234
|
+
*/
|
|
235
|
+
result: NonNullable<JSONValue>;
|
|
236
|
+
|
|
237
|
+
/**
|
|
238
|
+
* Error message if the tool call failed
|
|
239
|
+
*/
|
|
240
|
+
error: string | null;
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
/**
|
|
244
|
+
* State of a tool call execution.
|
|
245
|
+
*/
|
|
246
|
+
export type ToolCallState =
|
|
247
|
+
| typeof IN_PROGRESS /* tool is actively executing */
|
|
248
|
+
| typeof COMPLETED /* finished successfully */
|
|
249
|
+
| typeof FAILED /* failed with error */
|
|
250
|
+
| typeof INTERRUPTIBLE /* tool is blocked/waiting and CAN be interrupted */
|
|
251
|
+
| typeof UNINTERRUPTIBLE; /* tool is blocked/waiting and CANNOT be interrupted (e.g. critical I/O, API call, ..) */
|
|
@@ -0,0 +1,161 @@
|
|
|
1
|
+
import { SharedProviderMetadata } from "@/provider";
|
|
2
|
+
|
|
3
|
+
import { LanguageModelResponseItem } from "./item";
|
|
4
|
+
import { LanguageModelRequest } from "./request";
|
|
5
|
+
import { LanguageModelStreamEvent } from "./stream";
|
|
6
|
+
import { LanguageModelFunctionTool, LanguageModelProviderTool } from "./tool";
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Defines the standard interface for language model providers in kernl.
|
|
10
|
+
*/
|
|
11
|
+
export interface LanguageModel {
|
|
12
|
+
/**
|
|
13
|
+
* The language model must specify which language model interface version it implements.
|
|
14
|
+
*/
|
|
15
|
+
readonly spec: "1.0";
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Provider ID.
|
|
19
|
+
*/
|
|
20
|
+
readonly provider: string;
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* Provider-specific model ID.
|
|
24
|
+
*/
|
|
25
|
+
readonly modelId: string;
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Get a response from the model.
|
|
29
|
+
*
|
|
30
|
+
* @param request - The request to get a response for.
|
|
31
|
+
*/
|
|
32
|
+
generate(request: LanguageModelRequest): Promise<LanguageModelResponse>;
|
|
33
|
+
|
|
34
|
+
/**
|
|
35
|
+
* Get a streamed response from the model.
|
|
36
|
+
*
|
|
37
|
+
* @param request - The request to get a response for.
|
|
38
|
+
*/
|
|
39
|
+
stream(
|
|
40
|
+
request: LanguageModelRequest,
|
|
41
|
+
): AsyncIterable<LanguageModelStreamEvent>;
|
|
42
|
+
|
|
43
|
+
// /**
|
|
44
|
+
// * Supported URL patterns by media type for the provider.
|
|
45
|
+
// *
|
|
46
|
+
// * The keys are media type patterns or full media types (e.g. `*\/*` for everything, `audio/*`, `video/*`, or `application/pdf`).
|
|
47
|
+
// * and the values are arrays of regular expressions that match the URL paths.
|
|
48
|
+
// *
|
|
49
|
+
// * The matching should be against lower-case URLs.
|
|
50
|
+
// *
|
|
51
|
+
// * Matched URLs are supported natively by the model and are not downloaded.
|
|
52
|
+
// *
|
|
53
|
+
// * @returns A map of supported URL patterns by media type (as a promise or a plain object).
|
|
54
|
+
// */
|
|
55
|
+
// supportedUrls:
|
|
56
|
+
// | PromiseLike<Record<string, RegExp[]>>
|
|
57
|
+
// | Record<string, RegExp[]>;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* The base response interface for a language model.
|
|
62
|
+
*/
|
|
63
|
+
export interface LanguageModelResponse {
|
|
64
|
+
// /**
|
|
65
|
+
// * An ID for the response which can be used to refer to the response in subsequent calls to the
|
|
66
|
+
// * model. Not supported by all model providers.
|
|
67
|
+
// */
|
|
68
|
+
// responseId?: string;
|
|
69
|
+
|
|
70
|
+
/**
|
|
71
|
+
* The ordered list of content items generated by the model.
|
|
72
|
+
*/
|
|
73
|
+
content: LanguageModelResponseItem[];
|
|
74
|
+
|
|
75
|
+
/**
|
|
76
|
+
* Finish reason.
|
|
77
|
+
*/
|
|
78
|
+
finishReason: LanguageModelFinishReason;
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* The usage information for response.
|
|
82
|
+
*/
|
|
83
|
+
usage: LanguageModelUsage;
|
|
84
|
+
|
|
85
|
+
/**
|
|
86
|
+
* Warnings for the call, e.g. unsupported settings.
|
|
87
|
+
*/
|
|
88
|
+
warnings: LanguageModelWarning[];
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Raw response data from the underlying model provider.
|
|
92
|
+
*/
|
|
93
|
+
providerMetadata?: SharedProviderMetadata;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Reason why a language model finished generating a response.
|
|
98
|
+
*/
|
|
99
|
+
export type LanguageModelFinishReason =
|
|
100
|
+
| "stop" /* model generated stop sequence */
|
|
101
|
+
| "length" /* model generated maximum number of tokens */
|
|
102
|
+
| "content-filter" /* content filter violation stopped the model */
|
|
103
|
+
| "tool-calls" /* model triggered tool calls */
|
|
104
|
+
| "error" /* model stopped because of an error */
|
|
105
|
+
| "other" /* model stopped for other reasons */
|
|
106
|
+
| "unknown"; /* the model has not transmitted a finish reason */
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* Usage information for a language model call.
|
|
110
|
+
*
|
|
111
|
+
* If your API return additional usage information, you can add it to the
|
|
112
|
+
* provider metadata under your provider's key.
|
|
113
|
+
*/
|
|
114
|
+
export interface LanguageModelUsage {
|
|
115
|
+
/**
|
|
116
|
+
* The number of input (prompt) tokens used.
|
|
117
|
+
*/
|
|
118
|
+
inputTokens: number | undefined;
|
|
119
|
+
|
|
120
|
+
/**
|
|
121
|
+
* The number of output (completion) tokens used.
|
|
122
|
+
*/
|
|
123
|
+
outputTokens: number | undefined;
|
|
124
|
+
|
|
125
|
+
/**
|
|
126
|
+
* The total number of tokens as reported by the provider.
|
|
127
|
+
* This number might be different from the sum of `inputTokens` and `outputTokens`
|
|
128
|
+
* and e.g. include reasoning tokens or other overhead.
|
|
129
|
+
*/
|
|
130
|
+
totalTokens: number | undefined;
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* The number of reasoning tokens used.
|
|
134
|
+
*/
|
|
135
|
+
reasoningTokens?: number | undefined;
|
|
136
|
+
|
|
137
|
+
/**
|
|
138
|
+
* The number of cached input tokens.
|
|
139
|
+
*/
|
|
140
|
+
cachedInputTokens?: number | undefined;
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
/**
|
|
144
|
+
* Warning from the model provider for this call. The call will proceed, but e.g.
|
|
145
|
+
* some settings might not be supported, which can lead to suboptimal results.
|
|
146
|
+
*/
|
|
147
|
+
export type LanguageModelWarning =
|
|
148
|
+
| {
|
|
149
|
+
type: "unsupported-setting";
|
|
150
|
+
setting: Omit<keyof LanguageModelRequest, "input">; // (TODO): allow string
|
|
151
|
+
details?: string;
|
|
152
|
+
}
|
|
153
|
+
| {
|
|
154
|
+
type: "unsupported-tool";
|
|
155
|
+
tool: LanguageModelFunctionTool | LanguageModelProviderTool;
|
|
156
|
+
details?: string;
|
|
157
|
+
}
|
|
158
|
+
| {
|
|
159
|
+
type: "other";
|
|
160
|
+
message: string;
|
|
161
|
+
};
|
|
@@ -0,0 +1,210 @@
|
|
|
1
|
+
import type { JSONSchema7 } from "json-schema";
|
|
2
|
+
|
|
3
|
+
import { SharedProviderOptions } from "@/provider";
|
|
4
|
+
|
|
5
|
+
import { LanguageModelItem } from "./item";
|
|
6
|
+
import { LanguageModelFunctionTool, LanguageModelProviderTool } from "./tool";
|
|
7
|
+
|
|
8
|
+
export type LanguageModelTool =
|
|
9
|
+
| LanguageModelFunctionTool
|
|
10
|
+
| LanguageModelProviderTool;
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* A request to a large language model.
|
|
14
|
+
*/
|
|
15
|
+
export interface LanguageModelRequest {
|
|
16
|
+
/**
|
|
17
|
+
* The input to the model.
|
|
18
|
+
*/
|
|
19
|
+
input: LanguageModelItem[];
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* The model settings to use for the request.
|
|
23
|
+
*/
|
|
24
|
+
settings: LanguageModelRequestSettings;
|
|
25
|
+
|
|
26
|
+
/**
|
|
27
|
+
* Response format. The output can either be text or JSON. Default is text.
|
|
28
|
+
*
|
|
29
|
+
* If JSON is selected, a schema can optionally be provided to guide the LLM.
|
|
30
|
+
*/
|
|
31
|
+
responseType?: LanguageModelResponseType;
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* The tools that are available for the model.
|
|
35
|
+
*/
|
|
36
|
+
tools?: LanguageModelTool[];
|
|
37
|
+
|
|
38
|
+
/**
|
|
39
|
+
Include raw chunks in the stream. Only applicable for streaming calls.
|
|
40
|
+
*/
|
|
41
|
+
includeRawChunks?: boolean;
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Abort signal for cancelling the operation.
|
|
45
|
+
*/
|
|
46
|
+
abort?: AbortSignal;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Response format specification for language model output.
|
|
51
|
+
*
|
|
52
|
+
* The output can either be text or JSON. Default is text.
|
|
53
|
+
* If JSON is selected, a schema can optionally be provided to guide the LLM.
|
|
54
|
+
*/
|
|
55
|
+
export type LanguageModelResponseType =
|
|
56
|
+
| LanguageModelResponseText
|
|
57
|
+
| LanguageModelResponseJSON;
|
|
58
|
+
|
|
59
|
+
/**
|
|
60
|
+
* Text response format.
|
|
61
|
+
*/
|
|
62
|
+
export interface LanguageModelResponseText {
|
|
63
|
+
readonly kind: "text";
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
/**
|
|
67
|
+
* JSON response format.
|
|
68
|
+
*/
|
|
69
|
+
export interface LanguageModelResponseJSON {
|
|
70
|
+
readonly kind: "json";
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* JSON schema that the generated output should conform to.
|
|
74
|
+
*/
|
|
75
|
+
schema?: JSONSchema7;
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Name of output that should be generated. Used by some providers for additional LLM guidance.
|
|
79
|
+
*/
|
|
80
|
+
name?: string;
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* Description of the output that should be generated. Used by some providers for additional LLM guidance.
|
|
84
|
+
*/
|
|
85
|
+
description?: string;
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
/**
|
|
89
|
+
* Settings to use when calling an LLM.
|
|
90
|
+
*
|
|
91
|
+
* This class holds optional model configuration parameters (e.g. temperature,
|
|
92
|
+
* topP, penalties, truncation, etc.).
|
|
93
|
+
*
|
|
94
|
+
* Not all models/providers support all of these parameters, so please check the API documentation
|
|
95
|
+
* for the specific model and provider you are using.
|
|
96
|
+
*/
|
|
97
|
+
export interface LanguageModelRequestSettings {
|
|
98
|
+
/**
|
|
99
|
+
* The temperature to use when calling the model.
|
|
100
|
+
*/
|
|
101
|
+
temperature?: number;
|
|
102
|
+
|
|
103
|
+
/**
|
|
104
|
+
* The topP to use when calling the model.
|
|
105
|
+
*/
|
|
106
|
+
topP?: number;
|
|
107
|
+
|
|
108
|
+
/**
|
|
109
|
+
* The frequency penalty to use when calling the model.
|
|
110
|
+
*/
|
|
111
|
+
frequencyPenalty?: number;
|
|
112
|
+
|
|
113
|
+
/**
|
|
114
|
+
* The presence penalty to use when calling the model.
|
|
115
|
+
*/
|
|
116
|
+
presencePenalty?: number;
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* The tool choice to use when calling the model.
|
|
120
|
+
*/
|
|
121
|
+
toolChoice?: LanguageModelToolChoice;
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Whether to use parallel tool calls when calling the model.
|
|
125
|
+
* Defaults to false if not provided.
|
|
126
|
+
*/
|
|
127
|
+
parallelToolCalls?: boolean;
|
|
128
|
+
|
|
129
|
+
/**
|
|
130
|
+
* The truncation strategy to use when calling the model.
|
|
131
|
+
*/
|
|
132
|
+
truncation?: "auto" | "disabled";
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* The maximum number of output tokens to generate.
|
|
136
|
+
*/
|
|
137
|
+
maxTokens?: number;
|
|
138
|
+
|
|
139
|
+
/**
|
|
140
|
+
* Whether to store the generated model response for later retrieval.
|
|
141
|
+
* Defaults to true if not provided.
|
|
142
|
+
*/
|
|
143
|
+
store?: boolean;
|
|
144
|
+
|
|
145
|
+
/**
|
|
146
|
+
* The reasoning settings to use when calling the model.
|
|
147
|
+
*/
|
|
148
|
+
reasoning?: ModelSettingsReasoning;
|
|
149
|
+
|
|
150
|
+
/**
|
|
151
|
+
* The text settings to use when calling the model.
|
|
152
|
+
*/
|
|
153
|
+
text?: ModelSettingsText;
|
|
154
|
+
|
|
155
|
+
/**
|
|
156
|
+
* Additional provider specific metadata to be passed directly to the model
|
|
157
|
+
* request.
|
|
158
|
+
*/
|
|
159
|
+
providerOptions?: SharedProviderOptions;
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
export type LanguageModelToolChoice =
|
|
163
|
+
| { kind: "auto" } /* the tool selection is automatic (can be no tool) */
|
|
164
|
+
| { kind: "none" } /* no tool must be selected */
|
|
165
|
+
| { kind: "required" } /* one of the available tools must be selected */
|
|
166
|
+
| { kind: "tool"; toolId: string }; /* a specific tool must be selected: */
|
|
167
|
+
|
|
168
|
+
/**
|
|
169
|
+
* Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
|
170
|
+
*
|
|
171
|
+
* Supported for providers:
|
|
172
|
+
*
|
|
173
|
+
* - OpenAI
|
|
174
|
+
* - ... ?
|
|
175
|
+
*/
|
|
176
|
+
export type ModelSettingsReasoningEffort =
|
|
177
|
+
| "minimal"
|
|
178
|
+
| "low"
|
|
179
|
+
| "medium"
|
|
180
|
+
| "high"
|
|
181
|
+
| null;
|
|
182
|
+
|
|
183
|
+
/**
|
|
184
|
+
* Configuration options for model reasoning
|
|
185
|
+
*/
|
|
186
|
+
export type ModelSettingsReasoning = {
|
|
187
|
+
/**
|
|
188
|
+
* Constrains effort on reasoning for [reasoning models](https://platform.openai.com/docs/guides/reasoning).
|
|
189
|
+
*/
|
|
190
|
+
effort?: ModelSettingsReasoningEffort | null;
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* A summary of the reasoning performed by the model.
|
|
194
|
+
* This can be useful for debugging and understanding the model's reasoning process.
|
|
195
|
+
* One of `auto`, `concise`, or `detailed`.
|
|
196
|
+
*/
|
|
197
|
+
summary?: "auto" | "concise" | "detailed" | null;
|
|
198
|
+
};
|
|
199
|
+
|
|
200
|
+
export interface ModelSettingsText {
|
|
201
|
+
/**
|
|
202
|
+
* Constrains the verbosity of the model's response.
|
|
203
|
+
*
|
|
204
|
+
* Supported for providers:
|
|
205
|
+
*
|
|
206
|
+
* - OpenAI
|
|
207
|
+
* - ... ?
|
|
208
|
+
*/
|
|
209
|
+
verbosity?: "low" | "medium" | "high" | null;
|
|
210
|
+
}
|