web-llm-runner 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +211 -0
- package/README.md +564 -0
- package/lib/cache_util.d.ts +19 -0
- package/lib/cache_util.d.ts.map +1 -0
- package/lib/config.d.ts +199 -0
- package/lib/config.d.ts.map +1 -0
- package/lib/conversation.d.ts +107 -0
- package/lib/conversation.d.ts.map +1 -0
- package/lib/embedding.d.ts +38 -0
- package/lib/embedding.d.ts.map +1 -0
- package/lib/engine.d.ts +140 -0
- package/lib/engine.d.ts.map +1 -0
- package/lib/error.d.ts +208 -0
- package/lib/error.d.ts.map +1 -0
- package/lib/extension_service_worker.d.ts +54 -0
- package/lib/extension_service_worker.d.ts.map +1 -0
- package/lib/index.d.ts +13 -0
- package/lib/index.d.ts.map +1 -0
- package/lib/index.js +13767 -0
- package/lib/index.js.map +1 -0
- package/lib/integrity.d.ts +44 -0
- package/lib/integrity.d.ts.map +1 -0
- package/lib/llm_chat.d.ts +258 -0
- package/lib/llm_chat.d.ts.map +1 -0
- package/lib/message.d.ts +87 -0
- package/lib/message.d.ts.map +1 -0
- package/lib/openai_api_protocols/chat_completion.d.ts +834 -0
- package/lib/openai_api_protocols/chat_completion.d.ts.map +1 -0
- package/lib/openai_api_protocols/completion.d.ts +270 -0
- package/lib/openai_api_protocols/completion.d.ts.map +1 -0
- package/lib/openai_api_protocols/embedding.d.ts +125 -0
- package/lib/openai_api_protocols/embedding.d.ts.map +1 -0
- package/lib/openai_api_protocols/index.d.ts +20 -0
- package/lib/openai_api_protocols/index.d.ts.map +1 -0
- package/lib/service_worker.d.ts +53 -0
- package/lib/service_worker.d.ts.map +1 -0
- package/lib/support.d.ts +117 -0
- package/lib/support.d.ts.map +1 -0
- package/lib/types.d.ts +202 -0
- package/lib/types.d.ts.map +1 -0
- package/lib/utils.d.ts +7 -0
- package/lib/utils.d.ts.map +1 -0
- package/lib/web_worker.d.ts +132 -0
- package/lib/web_worker.d.ts.map +1 -0
- package/lib/wrapper/WebLLMWrapper.d.ts +20 -0
- package/lib/wrapper/WebLLMWrapper.d.ts.map +1 -0
- package/lib/wrapper/llm-worker.d.ts +2 -0
- package/lib/wrapper/llm-worker.d.ts.map +1 -0
- package/package.json +60 -0
|
@@ -0,0 +1,834 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* The input to OpenAI API, directly adopted from openai-node with small tweaks:
|
|
3
|
+
* https://github.com/openai/openai-node/blob/master/src/resources/chat/completions.ts
|
|
4
|
+
*
|
|
5
|
+
* Copyright 2024 OpenAI
|
|
6
|
+
*
|
|
7
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
8
|
+
* you may not use this file except in compliance with the License.
|
|
9
|
+
* You may obtain a copy of the License at
|
|
10
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
11
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
* See the License for the specific language governing permissions and
|
|
15
|
+
* limitations under the License.
|
|
16
|
+
*/
|
|
17
|
+
import { MLCEngineInterface, LatencyBreakdown } from "../types";
|
|
18
|
+
import { ModelType } from "../config";
|
|
19
|
+
import type { StructuralTagLike } from "@mlc-ai/web-xgrammar";
|
|
20
|
+
export declare class Chat {
|
|
21
|
+
private engine;
|
|
22
|
+
completions: Completions;
|
|
23
|
+
constructor(engine: MLCEngineInterface);
|
|
24
|
+
}
|
|
25
|
+
export declare class Completions {
|
|
26
|
+
private engine;
|
|
27
|
+
constructor(engine: MLCEngineInterface);
|
|
28
|
+
create(request: ChatCompletionRequestNonStreaming): Promise<ChatCompletion>;
|
|
29
|
+
create(request: ChatCompletionRequestStreaming): Promise<AsyncIterable<ChatCompletionChunk>>;
|
|
30
|
+
create(request: ChatCompletionRequestBase): Promise<AsyncIterable<ChatCompletionChunk> | ChatCompletion>;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* OpenAI chat completion request protocol.
|
|
34
|
+
*
|
|
35
|
+
* API reference: https://platform.openai.com/docs/api-reference/chat/create
|
|
36
|
+
* Followed: https://github.com/openai/openai-node/blob/master/src/resources/chat/completions.ts
|
|
37
|
+
*
|
|
38
|
+
* @note `model` is excluded. Instead, call `CreateMLCEngine(model)` or `engine.reload(model)` explicitly before calling this API.
|
|
39
|
+
*/
|
|
40
|
+
export interface ChatCompletionRequestBase {
|
|
41
|
+
/**
|
|
42
|
+
* A list of messages comprising the conversation so far.
|
|
43
|
+
*/
|
|
44
|
+
messages: Array<ChatCompletionMessageParam>;
|
|
45
|
+
/**
|
|
46
|
+
* If set, partial message deltas will be sent. It will be terminated by an empty chunk.
|
|
47
|
+
*/
|
|
48
|
+
stream?: boolean | null;
|
|
49
|
+
/**
|
|
50
|
+
* Options for streaming response. Only set this when you set `stream: true`.
|
|
51
|
+
*/
|
|
52
|
+
stream_options?: ChatCompletionStreamOptions | null;
|
|
53
|
+
/**
|
|
54
|
+
* How many chat completion choices to generate for each input message.
|
|
55
|
+
*/
|
|
56
|
+
n?: number | null;
|
|
57
|
+
/**
|
|
58
|
+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their
|
|
59
|
+
* existing frequency in the text so far, decreasing the model's likelihood to
|
|
60
|
+
* repeat the same line verbatim.
|
|
61
|
+
*
|
|
62
|
+
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
|
63
|
+
*/
|
|
64
|
+
frequency_penalty?: number | null;
|
|
65
|
+
/**
|
|
66
|
+
* Number between -2.0 and 2.0. Positive values penalize new tokens based on
|
|
67
|
+
* whether they appear in the text so far, increasing the model's likelihood to
|
|
68
|
+
* talk about new topics.
|
|
69
|
+
*
|
|
70
|
+
* [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
|
|
71
|
+
*/
|
|
72
|
+
presence_penalty?: number | null;
|
|
73
|
+
/**
|
|
74
|
+
* Penalizes new tokens based on whether they appear in the prompt and the
|
|
75
|
+
* generated text so far. Values greater than 1.0 encourage the model to use new
|
|
76
|
+
* tokens, while values less than 1.0 encourage the model to repeat tokens.
|
|
77
|
+
*/
|
|
78
|
+
repetition_penalty?: number | null;
|
|
79
|
+
/**
|
|
80
|
+
* The maximum number of [tokens](/tokenizer) that can be generated in the chat
|
|
81
|
+
* completion.
|
|
82
|
+
*
|
|
83
|
+
* The total length of input tokens and generated tokens is limited by the model's
|
|
84
|
+
* context length.
|
|
85
|
+
*/
|
|
86
|
+
max_tokens?: number | null;
|
|
87
|
+
/**
|
|
88
|
+
* Sequences where the API will stop generating further tokens.
|
|
89
|
+
*/
|
|
90
|
+
stop?: string | null | Array<string>;
|
|
91
|
+
/**
|
|
92
|
+
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
|
|
93
|
+
* make the output more random, while lower values like 0.2 will make it more
|
|
94
|
+
* focused and deterministic.
|
|
95
|
+
*/
|
|
96
|
+
temperature?: number | null;
|
|
97
|
+
/**
|
|
98
|
+
* An alternative to sampling with temperature, called nucleus sampling, where the
|
|
99
|
+
* model considers the results of the tokens with top_p probability mass. So 0.1
|
|
100
|
+
* means only the tokens comprising the top 10% probability mass are considered.
|
|
101
|
+
*/
|
|
102
|
+
top_p?: number | null;
|
|
103
|
+
/**
|
|
104
|
+
* Modify the likelihood of specified tokens appearing in the completion.
|
|
105
|
+
*
|
|
106
|
+
* Accepts a JSON object that maps tokens (specified by their token ID, which varies per model)
|
|
107
|
+
* to an associated bias value from -100 to 100. Typically, you can see `tokenizer.json` of the
|
|
108
|
+
* model to see which token ID maps to what string. Mathematically, the bias is added to the
|
|
109
|
+
* logits generated by the model prior to sampling. The exact effect will vary per model, but
|
|
110
|
+
* values between -1 and 1 should decrease or increase likelihood of selection; values like -100
|
|
111
|
+
* or 100 should result in a ban or exclusive selection of the relevant token.
|
|
112
|
+
*
|
|
113
|
+
* As an example, you can pass `{"16230": -100}` to prevent the `Hello` token from being
|
|
114
|
+
* generated in Mistral-7B-Instruct-v0.2, according to the mapping in
|
|
115
|
+
* https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/raw/main/tokenizer.json.
|
|
116
|
+
*
|
|
117
|
+
* @note For stateful and customizable / flexible logit processing, see `webllm.LogitProcessor`.
|
|
118
|
+
* @note If used in combination with `webllm.LogitProcessor`, `logit_bias` is applied after
|
|
119
|
+
* `LogitProcessor.processLogits()` is called.
|
|
120
|
+
*/
|
|
121
|
+
logit_bias?: Record<string, number> | null;
|
|
122
|
+
/**
|
|
123
|
+
* Whether to return log probabilities of the output tokens or not.
|
|
124
|
+
*
|
|
125
|
+
* If true, returns the log probabilities of each output token returned in the `content` of
|
|
126
|
+
* `message`.
|
|
127
|
+
*/
|
|
128
|
+
logprobs?: boolean | null;
|
|
129
|
+
/**
|
|
130
|
+
* An integer between 0 and 5 specifying the number of most likely tokens to return
|
|
131
|
+
* at each token position, each with an associated log probability. `logprobs` must
|
|
132
|
+
* be set to `true` if this parameter is used.
|
|
133
|
+
*/
|
|
134
|
+
top_logprobs?: number | null;
|
|
135
|
+
/**
|
|
136
|
+
* If specified, our system will make a best effort to sample deterministically, such that
|
|
137
|
+
* repeated requests with the same `seed` and parameters should return the same result.
|
|
138
|
+
*
|
|
139
|
+
* @note Seeding is done on a request-level rather than choice-level. That is, if `n > 1`, you
|
|
140
|
+
* would still get different content for each `Choice`. But if two requests with `n = 2` are
|
|
141
|
+
* processed with the same seed, the two results should be the same (two choices are different).
|
|
142
|
+
*/
|
|
143
|
+
seed?: number | null;
|
|
144
|
+
/**
|
|
145
|
+
* Controls which (if any) function is called by the model. `none` means the model
|
|
146
|
+
* will not call a function and instead generates a message. `auto` means the model
|
|
147
|
+
* can pick between generating a message or calling a function. Specifying a
|
|
148
|
+
* particular function via
|
|
149
|
+
* `{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
150
|
+
* call that function.
|
|
151
|
+
*
|
|
152
|
+
* `none` is the default when no functions are present. `auto` is the default if
|
|
153
|
+
* functions are present.
|
|
154
|
+
*/
|
|
155
|
+
tool_choice?: ChatCompletionToolChoiceOption;
|
|
156
|
+
/**
|
|
157
|
+
* A list of tools the model may call. Currently, only functions are supported as a
|
|
158
|
+
* tool. Use this to provide a list of functions the model may generate JSON inputs
|
|
159
|
+
* for.
|
|
160
|
+
*
|
|
161
|
+
* The corresponding reply would populate the `tool_calls` field. If used with streaming,
|
|
162
|
+
* the last chunk would contain the `tool_calls` field, while the intermediate chunks would
|
|
163
|
+
* contain the raw string.
|
|
164
|
+
*
|
|
165
|
+
* If the generation terminates due to FinishReason other than "stop" (i.e. "length" or "abort"),
|
|
166
|
+
* then no `tool_calls` will be returned. User can still get the raw string output.
|
|
167
|
+
*/
|
|
168
|
+
tools?: Array<ChatCompletionTool>;
|
|
169
|
+
/**
|
|
170
|
+
* An object specifying the format that the model must output.
|
|
171
|
+
*
|
|
172
|
+
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
|
173
|
+
* message the model generates is valid JSON.
|
|
174
|
+
*
|
|
175
|
+
* **Important:** when using JSON mode, you **must** also instruct the model to
|
|
176
|
+
* produce JSON yourself via a system or user message. Without this, the model may
|
|
177
|
+
* generate an unending stream of whitespace until the generation reaches the token
|
|
178
|
+
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
|
179
|
+
* the message content may be partially cut off if `finish_reason="length"`, which
|
|
180
|
+
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
|
181
|
+
* max context length.
|
|
182
|
+
*/
|
|
183
|
+
response_format?: ResponseFormat;
|
|
184
|
+
/**
|
|
185
|
+
* If true, will ignore stop string and stop token and generate until max_tokens hit.
|
|
186
|
+
* If unset, will treat as false.
|
|
187
|
+
*/
|
|
188
|
+
ignore_eos?: boolean;
|
|
189
|
+
/**
|
|
190
|
+
* ID of the model to use. This equals to `ModelRecord.model_id`, which needs to either be in
|
|
191
|
+
* `webllm.prebuiltAppConfig` or in `engineConfig.appConfig`.
|
|
192
|
+
*
|
|
193
|
+
* @note Call `CreateMLCEngine(model)` or `engine.reload(model)` ahead of time.
|
|
194
|
+
* @note If only one model is loaded in the engine, this field is optional. If multiple models
|
|
195
|
+
* are loaded, this is required.
|
|
196
|
+
*/
|
|
197
|
+
model?: string | null;
|
|
198
|
+
/**
|
|
199
|
+
* Fields specific to WebLLM, not present in OpenAI.
|
|
200
|
+
*/
|
|
201
|
+
extra_body?: {
|
|
202
|
+
/**
|
|
203
|
+
* If set to false, prepend a "<think>\n\n</think>\n\n" to the response, preventing the
|
|
204
|
+
* model from generating thinking tokens. If set to true or undefined, does nothing.
|
|
205
|
+
*
|
|
206
|
+
* @note Currently only allowed to be used for Qwen3 models, though not explicitly checked.
|
|
207
|
+
*/
|
|
208
|
+
enable_thinking?: boolean | null;
|
|
209
|
+
/**
|
|
210
|
+
* If set to true, the response will include a breakdown of the time spent in various
|
|
211
|
+
* stages of token sampling.
|
|
212
|
+
*/
|
|
213
|
+
enable_latency_breakdown?: boolean | null;
|
|
214
|
+
};
|
|
215
|
+
}
|
|
216
|
+
export interface ChatCompletionRequestNonStreaming extends ChatCompletionRequestBase {
|
|
217
|
+
/**
|
|
218
|
+
* If set, partial message deltas will be sent. It will be terminated by an empty chunk.
|
|
219
|
+
*/
|
|
220
|
+
stream?: false | null;
|
|
221
|
+
}
|
|
222
|
+
export interface ChatCompletionRequestStreaming extends ChatCompletionRequestBase {
|
|
223
|
+
/**
|
|
224
|
+
* If set, partial message deltas will be sent. It will be terminated by an empty chunk.
|
|
225
|
+
*/
|
|
226
|
+
stream: true;
|
|
227
|
+
}
|
|
228
|
+
export type ChatCompletionRequest = ChatCompletionRequestNonStreaming | ChatCompletionRequestStreaming;
|
|
229
|
+
/**
|
|
230
|
+
* Represents a chat completion response returned by model, based on the provided input.
|
|
231
|
+
*/
|
|
232
|
+
export interface ChatCompletion {
|
|
233
|
+
/**
|
|
234
|
+
* A unique identifier for the chat completion.
|
|
235
|
+
*/
|
|
236
|
+
id: string;
|
|
237
|
+
/**
|
|
238
|
+
* A list of chat completion choices. Can be more than one if `n` is greater than 1.
|
|
239
|
+
*/
|
|
240
|
+
choices: Array<ChatCompletion.Choice>;
|
|
241
|
+
/**
|
|
242
|
+
* The model used for the chat completion.
|
|
243
|
+
*/
|
|
244
|
+
model: string;
|
|
245
|
+
/**
|
|
246
|
+
* The object type, which is always `chat.completion`.
|
|
247
|
+
*/
|
|
248
|
+
object: "chat.completion";
|
|
249
|
+
/**
|
|
250
|
+
* The Unix timestamp (in seconds) of when the chat completion was created.
|
|
251
|
+
*
|
|
252
|
+
*/
|
|
253
|
+
created: number;
|
|
254
|
+
/**
|
|
255
|
+
* Usage statistics for the completion request.
|
|
256
|
+
*
|
|
257
|
+
* @note If we detect user is performing multi-round chatting, only the new portion of the
|
|
258
|
+
* prompt is counted for prompt_tokens. If `n > 1`, all choices' generation usages combined.
|
|
259
|
+
*/
|
|
260
|
+
usage?: CompletionUsage;
|
|
261
|
+
/**
|
|
262
|
+
* This fingerprint represents the backend configuration that the model runs with.
|
|
263
|
+
*
|
|
264
|
+
* Can be used in conjunction with the `seed` request parameter to understand when
|
|
265
|
+
* backend changes have been made that might impact determinism.
|
|
266
|
+
*
|
|
267
|
+
* @note Not supported yet.
|
|
268
|
+
*/
|
|
269
|
+
system_fingerprint?: string;
|
|
270
|
+
}
|
|
271
|
+
/**
|
|
272
|
+
* Represents a streamed chunk of a chat completion response returned by model,
|
|
273
|
+
* based on the provided input.
|
|
274
|
+
*/
|
|
275
|
+
export interface ChatCompletionChunk {
|
|
276
|
+
/**
|
|
277
|
+
* A unique identifier for the chat completion. Each chunk has the same ID.
|
|
278
|
+
*/
|
|
279
|
+
id: string;
|
|
280
|
+
/**
|
|
281
|
+
* A list of chat completion choices. Can contain more than one elements if `n` is
|
|
282
|
+
* greater than 1. Can also be empty for the last chunk if you set
|
|
283
|
+
* `stream_options: {"include_usage": true}`.
|
|
284
|
+
*/
|
|
285
|
+
choices: Array<ChatCompletionChunk.Choice>;
|
|
286
|
+
/**
|
|
287
|
+
* The Unix timestamp (in seconds) of when the chat completion was created. Each
|
|
288
|
+
* chunk has the same timestamp.
|
|
289
|
+
*/
|
|
290
|
+
created: number;
|
|
291
|
+
/**
|
|
292
|
+
* The model to generate the completion.
|
|
293
|
+
*/
|
|
294
|
+
model: string;
|
|
295
|
+
/**
|
|
296
|
+
* The object type, which is always `chat.completion.chunk`.
|
|
297
|
+
*/
|
|
298
|
+
object: "chat.completion.chunk";
|
|
299
|
+
/**
|
|
300
|
+
* This fingerprint represents the backend configuration that the model runs with.
|
|
301
|
+
* Can be used in conjunction with the `seed` request parameter to understand when
|
|
302
|
+
* backend changes have been made that might impact determinism.
|
|
303
|
+
*
|
|
304
|
+
* @note Not supported yet.
|
|
305
|
+
*/
|
|
306
|
+
system_fingerprint?: string;
|
|
307
|
+
/**
|
|
308
|
+
* An optional field that will only be present when you set
|
|
309
|
+
* `stream_options: {"include_usage": true}` in your request. When present, it
|
|
310
|
+
* contains a null value except for the last chunk which contains the token usage
|
|
311
|
+
* statistics for the entire request.
|
|
312
|
+
*/
|
|
313
|
+
usage?: CompletionUsage;
|
|
314
|
+
}
|
|
315
|
+
export declare const ChatCompletionRequestUnsupportedFields: Array<string>;
|
|
316
|
+
/**
|
|
317
|
+
* Post init and verify whether the input of the request is valid. Thus, this function can throw
|
|
318
|
+
* error or in-place update request.
|
|
319
|
+
* @param request User's input request.
|
|
320
|
+
* @param currentModelId The current model loaded that will perform this request.
|
|
321
|
+
* @param currentModelType The type of the model loaded, decide what requests can be handled.
|
|
322
|
+
*/
|
|
323
|
+
export declare function postInitAndCheckFields(request: ChatCompletionRequest, currentModelId: string, currentModelType: ModelType): void;
|
|
324
|
+
export type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage;
|
|
325
|
+
export interface ChatCompletionContentPartText {
|
|
326
|
+
/**
|
|
327
|
+
* The text content.
|
|
328
|
+
*/
|
|
329
|
+
text: string;
|
|
330
|
+
/**
|
|
331
|
+
* The type of the content part.
|
|
332
|
+
*/
|
|
333
|
+
type: "text";
|
|
334
|
+
}
|
|
335
|
+
export declare namespace ChatCompletionContentPartImage {
|
|
336
|
+
interface ImageURL {
|
|
337
|
+
/**
|
|
338
|
+
* Either a URL of the image or the base64 encoded image data.
|
|
339
|
+
*/
|
|
340
|
+
url: string;
|
|
341
|
+
/**
|
|
342
|
+
* Specifies the detail level of the image.
|
|
343
|
+
*/
|
|
344
|
+
detail?: "auto" | "low" | "high";
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
export interface ChatCompletionContentPartImage {
|
|
348
|
+
image_url: ChatCompletionContentPartImage.ImageURL;
|
|
349
|
+
/**
|
|
350
|
+
* The type of the content part.
|
|
351
|
+
*/
|
|
352
|
+
type: "image_url";
|
|
353
|
+
}
|
|
354
|
+
export interface ChatCompletionMessageToolCall {
|
|
355
|
+
/**
|
|
356
|
+
* The ID of the tool call. In WebLLM, it is used as the index of the tool call among all
|
|
357
|
+
* the tools calls in this request generation.
|
|
358
|
+
*/
|
|
359
|
+
id: string;
|
|
360
|
+
/**
|
|
361
|
+
* The function that the model called.
|
|
362
|
+
*/
|
|
363
|
+
function: ChatCompletionMessageToolCall.Function;
|
|
364
|
+
/**
|
|
365
|
+
* The type of the tool. Currently, only `function` is supported.
|
|
366
|
+
*/
|
|
367
|
+
type: "function";
|
|
368
|
+
}
|
|
369
|
+
export declare namespace ChatCompletionMessageToolCall {
|
|
370
|
+
/**
|
|
371
|
+
* The function that the model called.
|
|
372
|
+
*/
|
|
373
|
+
interface Function {
|
|
374
|
+
/**
|
|
375
|
+
* The arguments to call the function with, as generated by the model in JSON
|
|
376
|
+
* format.
|
|
377
|
+
*/
|
|
378
|
+
arguments: string;
|
|
379
|
+
/**
|
|
380
|
+
* The name of the function to call.
|
|
381
|
+
*/
|
|
382
|
+
name: string;
|
|
383
|
+
}
|
|
384
|
+
}
|
|
385
|
+
/**
|
|
386
|
+
* The role of the author of a message
|
|
387
|
+
*/
|
|
388
|
+
export type ChatCompletionRole = "system" | "user" | "assistant" | "tool" | "function";
|
|
389
|
+
/**
|
|
390
|
+
* Options for streaming response. Only set this when you set `stream: true`.
|
|
391
|
+
*/
|
|
392
|
+
export interface ChatCompletionStreamOptions {
|
|
393
|
+
/**
|
|
394
|
+
* If set, an additional chunk will be streamed after the last empty chunk.
|
|
395
|
+
* The `usage` field on this chunk shows the token usage statistics for the entire
|
|
396
|
+
* request, and the `choices` field will always be an empty array. All other chunks
|
|
397
|
+
* will also include a `usage` field, but with a null value.
|
|
398
|
+
*/
|
|
399
|
+
include_usage?: boolean;
|
|
400
|
+
}
|
|
401
|
+
export interface ChatCompletionSystemMessageParam {
|
|
402
|
+
/**
|
|
403
|
+
* The contents of the system message.
|
|
404
|
+
*/
|
|
405
|
+
content: string;
|
|
406
|
+
/**
|
|
407
|
+
* The role of the messages author, in this case `system`.
|
|
408
|
+
*/
|
|
409
|
+
role: "system";
|
|
410
|
+
}
|
|
411
|
+
export interface ChatCompletionUserMessageParam {
|
|
412
|
+
/**
|
|
413
|
+
* The contents of the user message.
|
|
414
|
+
*/
|
|
415
|
+
content: string | Array<ChatCompletionContentPart>;
|
|
416
|
+
/**
|
|
417
|
+
* The role of the messages author, in this case `user`.
|
|
418
|
+
*/
|
|
419
|
+
role: "user";
|
|
420
|
+
/**
|
|
421
|
+
* An optional name for the participant. Provides the model information to
|
|
422
|
+
* differentiate between participants of the same role.
|
|
423
|
+
*
|
|
424
|
+
* @note This is experimental, as models typically have predefined names for the user.
|
|
425
|
+
*/
|
|
426
|
+
name?: string;
|
|
427
|
+
}
|
|
428
|
+
export interface ChatCompletionAssistantMessageParam {
|
|
429
|
+
/**
|
|
430
|
+
* The role of the messages author, in this case `assistant`.
|
|
431
|
+
*/
|
|
432
|
+
role: "assistant";
|
|
433
|
+
/**
|
|
434
|
+
* The contents of the assistant message. Required unless `tool_calls` is specified.
|
|
435
|
+
*/
|
|
436
|
+
content?: string | null;
|
|
437
|
+
/**
|
|
438
|
+
* An optional name for the participant. Provides the model information to
|
|
439
|
+
* differentiate between participants of the same role.
|
|
440
|
+
*
|
|
441
|
+
* @note This is experimental, as models typically have predefined names for the user.
|
|
442
|
+
*/
|
|
443
|
+
name?: string;
|
|
444
|
+
/**
|
|
445
|
+
* The tool calls generated by the model, such as function calls.
|
|
446
|
+
*/
|
|
447
|
+
tool_calls?: Array<ChatCompletionMessageToolCall>;
|
|
448
|
+
}
|
|
449
|
+
export interface ChatCompletionToolMessageParam {
|
|
450
|
+
/**
|
|
451
|
+
* The contents of the tool message.
|
|
452
|
+
*/
|
|
453
|
+
content: string;
|
|
454
|
+
/**
|
|
455
|
+
* The role of the messages author, in this case `tool`.
|
|
456
|
+
*/
|
|
457
|
+
role: "tool";
|
|
458
|
+
/**
|
|
459
|
+
* Tool call that this message is responding to.
|
|
460
|
+
*/
|
|
461
|
+
tool_call_id: string;
|
|
462
|
+
}
|
|
463
|
+
export type ChatCompletionMessageParam = ChatCompletionSystemMessageParam | ChatCompletionUserMessageParam | ChatCompletionAssistantMessageParam | ChatCompletionToolMessageParam;
|
|
464
|
+
/**
|
|
465
|
+
* The parameters the functions accepts, described as a JSON Schema object. See the
|
|
466
|
+
* [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
|
|
467
|
+
* for examples, and the
|
|
468
|
+
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
|
|
469
|
+
* documentation about the format.
|
|
470
|
+
*
|
|
471
|
+
* Omitting `parameters` defines a function with an empty parameter list.
|
|
472
|
+
*/
|
|
473
|
+
export type FunctionParameters = Record<string, unknown>;
|
|
474
|
+
export interface FunctionDefinition {
|
|
475
|
+
/**
|
|
476
|
+
* The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
|
|
477
|
+
* underscores and dashes, with a maximum length of 64.
|
|
478
|
+
*/
|
|
479
|
+
name: string;
|
|
480
|
+
/**
|
|
481
|
+
* A description of what the function does, used by the model to choose when and
|
|
482
|
+
* how to call the function.
|
|
483
|
+
*/
|
|
484
|
+
description?: string;
|
|
485
|
+
/**
|
|
486
|
+
* The parameters the functions accepts, described as a JSON Schema object. See the
|
|
487
|
+
* [guide](https://platform.openai.com/docs/guides/text-generation/function-calling)
|
|
488
|
+
* for examples, and the
|
|
489
|
+
* [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
|
|
490
|
+
* documentation about the format.
|
|
491
|
+
*
|
|
492
|
+
* Omitting `parameters` defines a function with an empty parameter list.
|
|
493
|
+
*/
|
|
494
|
+
parameters?: FunctionParameters;
|
|
495
|
+
}
|
|
496
|
+
export interface ChatCompletionTool {
|
|
497
|
+
function: FunctionDefinition;
|
|
498
|
+
/**
|
|
499
|
+
* The type of the tool. Currently, only `function` is supported.
|
|
500
|
+
*/
|
|
501
|
+
type: "function";
|
|
502
|
+
}
|
|
503
|
+
/**
|
|
504
|
+
* Specifies a tool the model should use. Use to force the model to call a specific
|
|
505
|
+
* function.
|
|
506
|
+
*/
|
|
507
|
+
export interface ChatCompletionNamedToolChoice {
|
|
508
|
+
function: ChatCompletionNamedToolChoice.Function;
|
|
509
|
+
/**
|
|
510
|
+
* The type of the tool. Currently, only `function` is supported.
|
|
511
|
+
*/
|
|
512
|
+
type: "function";
|
|
513
|
+
}
|
|
514
|
+
export declare namespace ChatCompletionNamedToolChoice {
|
|
515
|
+
interface Function {
|
|
516
|
+
/**
|
|
517
|
+
* The name of the function to call.
|
|
518
|
+
*/
|
|
519
|
+
name: string;
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
/**
|
|
523
|
+
* Controls which (if any) function is called by the model. `none` means the model
|
|
524
|
+
* will not call a function and instead generates a message. `auto` means the model
|
|
525
|
+
* can pick between generating a message or calling a function. Specifying a
|
|
526
|
+
* particular function via
|
|
527
|
+
* `{"type": "function", "function": {"name": "my_function"}}` forces the model to
|
|
528
|
+
* call that function.
|
|
529
|
+
*
|
|
530
|
+
* `none` is the default when no functions are present. `auto` is the default if
|
|
531
|
+
* functions are present.
|
|
532
|
+
*/
|
|
533
|
+
export type ChatCompletionToolChoiceOption = "none" | "auto" | ChatCompletionNamedToolChoice;
|
|
534
|
+
export interface TopLogprob {
|
|
535
|
+
/**
|
|
536
|
+
* The token.
|
|
537
|
+
*/
|
|
538
|
+
token: string;
|
|
539
|
+
/**
|
|
540
|
+
* A list of integers representing the UTF-8 bytes representation of the token.
|
|
541
|
+
* Useful in instances where characters are represented by multiple tokens and
|
|
542
|
+
* their byte representations must be combined to generate the correct text
|
|
543
|
+
* representation. Can be `null` if there is no bytes representation for the token.
|
|
544
|
+
*
|
|
545
|
+
* @note Encoded with `TextEncoder.encode()` and can be decoded with `TextDecoder.decode()`.
|
|
546
|
+
* For details, see https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/encode.
|
|
547
|
+
*/
|
|
548
|
+
bytes: Array<number> | null;
|
|
549
|
+
/**
|
|
550
|
+
* The log probability of this token.
|
|
551
|
+
*/
|
|
552
|
+
logprob: number;
|
|
553
|
+
}
|
|
554
|
+
export interface ChatCompletionTokenLogprob {
|
|
555
|
+
/**
|
|
556
|
+
* The token.
|
|
557
|
+
*/
|
|
558
|
+
token: string;
|
|
559
|
+
/**
|
|
560
|
+
* A list of integers representing the UTF-8 bytes representation of the token.
|
|
561
|
+
* Useful in instances where characters are represented by multiple tokens and
|
|
562
|
+
* their byte representations must be combined to generate the correct text
|
|
563
|
+
* representation. Can be `null` if there is no bytes representation for the token.
|
|
564
|
+
*
|
|
565
|
+
* @note Encoded with `TextEncoder.encode()` and can be decoded with `TextDecoder.decode()`.
|
|
566
|
+
* For details, see https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/encode.
|
|
567
|
+
*/
|
|
568
|
+
bytes: Array<number> | null;
|
|
569
|
+
/**
|
|
570
|
+
* The log probability of this token.
|
|
571
|
+
*/
|
|
572
|
+
logprob: number;
|
|
573
|
+
/**
|
|
574
|
+
* List of the most likely tokens and their log probability, at this token
|
|
575
|
+
* position. In rare cases, there may be fewer than the number of requested
|
|
576
|
+
* `top_logprobs` returned.
|
|
577
|
+
*/
|
|
578
|
+
top_logprobs: Array<TopLogprob>;
|
|
579
|
+
}
|
|
580
|
+
/**
|
|
581
|
+
* A chat completion message generated by the model.
|
|
582
|
+
*/
|
|
583
|
+
export interface ChatCompletionMessage {
|
|
584
|
+
/**
|
|
585
|
+
* The contents of the message.
|
|
586
|
+
*/
|
|
587
|
+
content: string | null;
|
|
588
|
+
/**
|
|
589
|
+
* The role of the author of this message.
|
|
590
|
+
*/
|
|
591
|
+
role: "assistant";
|
|
592
|
+
/**
|
|
593
|
+
* The tool calls generated by the model, such as function calls.
|
|
594
|
+
*/
|
|
595
|
+
tool_calls?: Array<ChatCompletionMessageToolCall>;
|
|
596
|
+
}
|
|
597
|
+
/**
|
|
598
|
+
* Usage statistics for the completion request.
|
|
599
|
+
*/
|
|
600
|
+
export interface CompletionUsage {
|
|
601
|
+
/**
|
|
602
|
+
* Number of tokens in the generated completion.
|
|
603
|
+
*/
|
|
604
|
+
completion_tokens: number;
|
|
605
|
+
/**
|
|
606
|
+
* Number of tokens in the prompt.
|
|
607
|
+
*
|
|
608
|
+
* @note If we detect user is performing multi-round chatting, only the new portion of the
|
|
609
|
+
* prompt is counted for prompt_tokens.
|
|
610
|
+
*/
|
|
611
|
+
prompt_tokens: number;
|
|
612
|
+
/**
|
|
613
|
+
* Total number of tokens used in the request (prompt + completion).
|
|
614
|
+
*/
|
|
615
|
+
total_tokens: number;
|
|
616
|
+
/**
|
|
617
|
+
* Fields specific to WebLLM, not present in OpenAI.
|
|
618
|
+
*/
|
|
619
|
+
extra: {
|
|
620
|
+
/**
|
|
621
|
+
* Total seconds spent on this request, from receiving the request, to generating the response.
|
|
622
|
+
*/
|
|
623
|
+
e2e_latency_s: number;
|
|
624
|
+
/**
|
|
625
|
+
* Number of tokens per second for prefilling.
|
|
626
|
+
*/
|
|
627
|
+
prefill_tokens_per_s: number;
|
|
628
|
+
/**
|
|
629
|
+
* Number of tokens per second for autoregressive decoding.
|
|
630
|
+
*/
|
|
631
|
+
decode_tokens_per_s: number;
|
|
632
|
+
/**
|
|
633
|
+
* Seconds spent to generate the first token since receiving the request. Mainly contains
|
|
634
|
+
* prefilling overhead. If n > 1, it is the sum over all choices.
|
|
635
|
+
*/
|
|
636
|
+
time_to_first_token_s: number;
|
|
637
|
+
/**
|
|
638
|
+
* Seconds in between generated tokens. Mainly contains decoding overhead. If n > 1, it
|
|
639
|
+
* is the average over all choices.
|
|
640
|
+
*/
|
|
641
|
+
time_per_output_token_s: number;
|
|
642
|
+
/**
|
|
643
|
+
* Seconds spent on initializing grammar matcher for structured output. If n > 1, it
|
|
644
|
+
* is the sum over all choices.
|
|
645
|
+
*/
|
|
646
|
+
grammar_init_s?: number;
|
|
647
|
+
/**
|
|
648
|
+
* Seconds per-token that grammar matcher spent on creating bitmask and accepting token for
|
|
649
|
+
* structured output. If n > 1, it is the average over all choices.
|
|
650
|
+
*/
|
|
651
|
+
grammar_per_token_s?: number;
|
|
652
|
+
/**
|
|
653
|
+
* If `enable_latency_breakdown` is set to true in the request, this field will be
|
|
654
|
+
* present and contain a breakdown of the time spent in various stages of token sampling.
|
|
655
|
+
*/
|
|
656
|
+
latencyBreakdown?: LatencyBreakdown;
|
|
657
|
+
};
|
|
658
|
+
}
|
|
659
|
+
/**
|
|
660
|
+
* The reason the model stopped generating tokens. This will be `stop` if the model
|
|
661
|
+
* hit a natural stop point or a provided stop sequence, `length` if the maximum
|
|
662
|
+
* number of tokens specified in the request was reached or the context_window_size will
|
|
663
|
+
* be exceeded, `tool_calls` if the model called a tool, or `abort` if user manually stops the
|
|
664
|
+
* generation.
|
|
665
|
+
*/
|
|
666
|
+
export type ChatCompletionFinishReason = "stop" | "length" | "tool_calls" | "abort";
|
|
667
|
+
export declare namespace ChatCompletion {
|
|
668
|
+
interface Choice {
|
|
669
|
+
/**
|
|
670
|
+
* The reason the model stopped generating tokens. This will be `stop` if the model
|
|
671
|
+
* hit a natural stop point or a provided stop sequence, `length` if the maximum
|
|
672
|
+
* number of tokens specified in the request was reached, `tool_calls` if the
|
|
673
|
+
* model called a tool, or `abort` if user manually stops the generation.
|
|
674
|
+
*/
|
|
675
|
+
finish_reason: ChatCompletionFinishReason;
|
|
676
|
+
/**
|
|
677
|
+
* The index of the choice in the list of choices.
|
|
678
|
+
*/
|
|
679
|
+
index: number;
|
|
680
|
+
/**
|
|
681
|
+
* Log probability information for the choice.
|
|
682
|
+
*/
|
|
683
|
+
logprobs: Choice.Logprobs | null;
|
|
684
|
+
/**
|
|
685
|
+
* A chat completion message generated by the model.
|
|
686
|
+
*/
|
|
687
|
+
message: ChatCompletionMessage;
|
|
688
|
+
}
|
|
689
|
+
namespace Choice {
|
|
690
|
+
/**
|
|
691
|
+
* Log probability information for the choice.
|
|
692
|
+
*/
|
|
693
|
+
interface Logprobs {
|
|
694
|
+
/**
|
|
695
|
+
* A list of message content tokens with log probability information.
|
|
696
|
+
*/
|
|
697
|
+
content: Array<ChatCompletionTokenLogprob> | null;
|
|
698
|
+
}
|
|
699
|
+
}
|
|
700
|
+
}
|
|
701
|
+
export declare namespace ChatCompletionChunk {
|
|
702
|
+
interface Choice {
|
|
703
|
+
/**
|
|
704
|
+
* A chat completion delta generated by streamed model responses.
|
|
705
|
+
*/
|
|
706
|
+
delta: Choice.Delta;
|
|
707
|
+
/**
|
|
708
|
+
* The reason the model stopped generating tokens. This will be `stop` if the model
|
|
709
|
+
* hit a natural stop point or a provided stop sequence, `length` if the maximum
|
|
710
|
+
* number of tokens specified in the request was reached, `tool_calls` if the
|
|
711
|
+
* model called a tool, or `abort` if user manually stops the generation.
|
|
712
|
+
*/
|
|
713
|
+
finish_reason: ChatCompletionFinishReason | null;
|
|
714
|
+
/**
|
|
715
|
+
* The index of the choice in the list of choices.
|
|
716
|
+
*/
|
|
717
|
+
index: number;
|
|
718
|
+
/**
|
|
719
|
+
* Log probability information for the choice.
|
|
720
|
+
*/
|
|
721
|
+
logprobs?: Choice.Logprobs | null;
|
|
722
|
+
}
|
|
723
|
+
namespace Choice {
|
|
724
|
+
/**
|
|
725
|
+
* A chat completion delta generated by streamed model responses.
|
|
726
|
+
*/
|
|
727
|
+
interface Delta {
|
|
728
|
+
/**
|
|
729
|
+
* The contents of the chunk message.
|
|
730
|
+
*/
|
|
731
|
+
content?: string | null;
|
|
732
|
+
/**
|
|
733
|
+
* The role of the author of this message.
|
|
734
|
+
*/
|
|
735
|
+
role?: "system" | "user" | "assistant" | "tool";
|
|
736
|
+
tool_calls?: Array<Delta.ToolCall>;
|
|
737
|
+
}
|
|
738
|
+
namespace Delta {
|
|
739
|
+
interface ToolCall {
|
|
740
|
+
/**
|
|
741
|
+
* The index of the tool call among all the tools calls in this request generation.
|
|
742
|
+
*/
|
|
743
|
+
index: number;
|
|
744
|
+
/**
|
|
745
|
+
* The ID of the tool call. Not used in WebLLM.
|
|
746
|
+
*/
|
|
747
|
+
id?: string;
|
|
748
|
+
function?: ToolCall.Function;
|
|
749
|
+
/**
|
|
750
|
+
* The type of the tool. Currently, only `function` is supported.
|
|
751
|
+
*/
|
|
752
|
+
type?: "function";
|
|
753
|
+
}
|
|
754
|
+
namespace ToolCall {
|
|
755
|
+
interface Function {
|
|
756
|
+
/**
|
|
757
|
+
* The arguments to call the function with, as generated by the model in JSON
|
|
758
|
+
* format. Note that the model does not always generate valid JSON, and may
|
|
759
|
+
* hallucinate parameters not defined by your function schema. Validate the
|
|
760
|
+
* arguments in your code before calling your function.
|
|
761
|
+
*/
|
|
762
|
+
arguments?: string;
|
|
763
|
+
/**
|
|
764
|
+
* The name of the function to call.
|
|
765
|
+
*/
|
|
766
|
+
name?: string;
|
|
767
|
+
}
|
|
768
|
+
}
|
|
769
|
+
}
|
|
770
|
+
/**
|
|
771
|
+
* Log probability information for the choice.
|
|
772
|
+
*/
|
|
773
|
+
interface Logprobs {
|
|
774
|
+
/**
|
|
775
|
+
* A list of message content tokens with log probability information.
|
|
776
|
+
*/
|
|
777
|
+
content: Array<ChatCompletionTokenLogprob> | null;
|
|
778
|
+
}
|
|
779
|
+
}
|
|
780
|
+
}
|
|
781
|
+
/**
|
|
782
|
+
* An object specifying the format that the model must output.
|
|
783
|
+
*
|
|
784
|
+
* Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
|
|
785
|
+
* message the model generates is valid JSON.
|
|
786
|
+
*
|
|
787
|
+
* Setting to `{ "type": "grammar" }` requires you to also specify the `grammar` field, which
|
|
788
|
+
* is a BNFGrammar string.
|
|
789
|
+
*
|
|
790
|
+
* Setting to `{ "type": "structural_tag" }` requires a `structural_tag` definition that
|
|
791
|
+
* applies trigger-based constraints (e.g. tag-delimited blocks) while allowing free-form text
|
|
792
|
+
* outside the triggered spans.
|
|
793
|
+
*
|
|
794
|
+
* Setting `schema` specifies the output format of the json object such as properties to include.
|
|
795
|
+
*
|
|
796
|
+
* **Important:** when using JSON mode, you **must** also instruct the model to produce JSON
|
|
797
|
+
* following the schema (if specified) yourself via a system or user message. Without this,
|
|
798
|
+
* the model may generate an unending stream of whitespace until the generation reaches the token
|
|
799
|
+
* limit, resulting in a long-running and seemingly "stuck" request. Also note that
|
|
800
|
+
* the message content may be partially cut off if `finish_reason="length"`, which
|
|
801
|
+
* indicates the generation exceeded `max_tokens` or the conversation exceeded the
|
|
802
|
+
* max context length.
|
|
803
|
+
*/
|
|
804
|
+
export interface ResponseFormat {
|
|
805
|
+
/**
|
|
806
|
+
* Must be one of `text`, `json_object`, `grammar`, or `structural_tag`.
|
|
807
|
+
*/
|
|
808
|
+
type?: "text" | "json_object" | "grammar" | "structural_tag";
|
|
809
|
+
/**
|
|
810
|
+
* A schema string in the format of the schema of a JSON file. `type` needs to be `json_object`.
|
|
811
|
+
*/
|
|
812
|
+
schema?: string;
|
|
813
|
+
/**
|
|
814
|
+
* An EBNF-formatted string. Needs to be specified when, and only specified when,
|
|
815
|
+
* `type` is `grammar`. The grammar will be normalized (simplified) by default.
|
|
816
|
+
* EBNF grammar: see https://www.w3.org/TR/xml/#sec-notation. Note:
|
|
817
|
+
1. Use # as the comment mark
|
|
818
|
+
2. Use C-style unicode escape sequence \u01AB, \U000001AB, \xAB
|
|
819
|
+
3. A-B (match A and not match B) is not supported yet
|
|
820
|
+
4. Lookahead assertion can be added at the end of a rule to speed up matching. E.g.
|
|
821
|
+
```
|
|
822
|
+
main ::= "ab" a [a-z]
|
|
823
|
+
a ::= "cd" (=[a-z])
|
|
824
|
+
```
|
|
825
|
+
The assertion (=[a-z]) means a must be followed by [a-z].
|
|
826
|
+
*/
|
|
827
|
+
grammar?: string;
|
|
828
|
+
/**
|
|
829
|
+
* A structural tag definition. Needs to be specified when, and only when,
|
|
830
|
+
* `type` is `structural_tag`.
|
|
831
|
+
*/
|
|
832
|
+
structural_tag?: StructuralTagLike | string;
|
|
833
|
+
}
|
|
834
|
+
//# sourceMappingURL=chat_completion.d.ts.map
|