@huggingface/inference 1.6.2 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -0
- package/dist/index.d.ts +94 -1
- package/dist/index.js +299 -22
- package/dist/index.mjs +297 -21
- package/package.json +1 -1
- package/src/HfInference.ts +355 -23
- package/src/vendor/fetch-event-source/parse.spec.ts +389 -0
- package/src/vendor/fetch-event-source/parse.ts +216 -0
package/dist/index.mjs
CHANGED
|
@@ -6,7 +6,113 @@ function toArray(obj) {
|
|
|
6
6
|
return [obj];
|
|
7
7
|
}
|
|
8
8
|
|
|
9
|
+
// src/vendor/fetch-event-source/parse.ts
|
|
10
|
+
function getLines(onLine) {
|
|
11
|
+
let buffer;
|
|
12
|
+
let position;
|
|
13
|
+
let fieldLength;
|
|
14
|
+
let discardTrailingNewline = false;
|
|
15
|
+
return function onChunk(arr) {
|
|
16
|
+
if (buffer === void 0) {
|
|
17
|
+
buffer = arr;
|
|
18
|
+
position = 0;
|
|
19
|
+
fieldLength = -1;
|
|
20
|
+
} else {
|
|
21
|
+
buffer = concat(buffer, arr);
|
|
22
|
+
}
|
|
23
|
+
const bufLength = buffer.length;
|
|
24
|
+
let lineStart = 0;
|
|
25
|
+
while (position < bufLength) {
|
|
26
|
+
if (discardTrailingNewline) {
|
|
27
|
+
if (buffer[position] === 10 /* NewLine */) {
|
|
28
|
+
lineStart = ++position;
|
|
29
|
+
}
|
|
30
|
+
discardTrailingNewline = false;
|
|
31
|
+
}
|
|
32
|
+
let lineEnd = -1;
|
|
33
|
+
for (; position < bufLength && lineEnd === -1; ++position) {
|
|
34
|
+
switch (buffer[position]) {
|
|
35
|
+
case 58 /* Colon */:
|
|
36
|
+
if (fieldLength === -1) {
|
|
37
|
+
fieldLength = position - lineStart;
|
|
38
|
+
}
|
|
39
|
+
break;
|
|
40
|
+
case 13 /* CarriageReturn */:
|
|
41
|
+
discardTrailingNewline = true;
|
|
42
|
+
case 10 /* NewLine */:
|
|
43
|
+
lineEnd = position;
|
|
44
|
+
break;
|
|
45
|
+
}
|
|
46
|
+
}
|
|
47
|
+
if (lineEnd === -1) {
|
|
48
|
+
break;
|
|
49
|
+
}
|
|
50
|
+
onLine(buffer.subarray(lineStart, lineEnd), fieldLength);
|
|
51
|
+
lineStart = position;
|
|
52
|
+
fieldLength = -1;
|
|
53
|
+
}
|
|
54
|
+
if (lineStart === bufLength) {
|
|
55
|
+
buffer = void 0;
|
|
56
|
+
} else if (lineStart !== 0) {
|
|
57
|
+
buffer = buffer.subarray(lineStart);
|
|
58
|
+
position -= lineStart;
|
|
59
|
+
}
|
|
60
|
+
};
|
|
61
|
+
}
|
|
62
|
+
function getMessages(onId, onRetry, onMessage) {
|
|
63
|
+
let message = newMessage();
|
|
64
|
+
const decoder = new TextDecoder();
|
|
65
|
+
return function onLine(line, fieldLength) {
|
|
66
|
+
if (line.length === 0) {
|
|
67
|
+
onMessage?.(message);
|
|
68
|
+
message = newMessage();
|
|
69
|
+
} else if (fieldLength > 0) {
|
|
70
|
+
const field = decoder.decode(line.subarray(0, fieldLength));
|
|
71
|
+
const valueOffset = fieldLength + (line[fieldLength + 1] === 32 /* Space */ ? 2 : 1);
|
|
72
|
+
const value = decoder.decode(line.subarray(valueOffset));
|
|
73
|
+
switch (field) {
|
|
74
|
+
case "data":
|
|
75
|
+
message.data = message.data ? message.data + "\n" + value : value;
|
|
76
|
+
break;
|
|
77
|
+
case "event":
|
|
78
|
+
message.event = value;
|
|
79
|
+
break;
|
|
80
|
+
case "id":
|
|
81
|
+
onId(message.id = value);
|
|
82
|
+
break;
|
|
83
|
+
case "retry":
|
|
84
|
+
const retry = parseInt(value, 10);
|
|
85
|
+
if (!isNaN(retry)) {
|
|
86
|
+
onRetry(message.retry = retry);
|
|
87
|
+
}
|
|
88
|
+
break;
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
}
|
|
93
|
+
function concat(a, b) {
|
|
94
|
+
const res = new Uint8Array(a.length + b.length);
|
|
95
|
+
res.set(a);
|
|
96
|
+
res.set(b, a.length);
|
|
97
|
+
return res;
|
|
98
|
+
}
|
|
99
|
+
function newMessage() {
|
|
100
|
+
return {
|
|
101
|
+
data: "",
|
|
102
|
+
event: "",
|
|
103
|
+
id: "",
|
|
104
|
+
retry: void 0
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
|
|
9
108
|
// src/HfInference.ts
|
|
109
|
+
var HF_INFERENCE_API_BASE_URL = "https://api-inference.huggingface.co/models/";
|
|
110
|
+
var TextGenerationStreamFinishReason = /* @__PURE__ */ ((TextGenerationStreamFinishReason2) => {
|
|
111
|
+
TextGenerationStreamFinishReason2["Length"] = "length";
|
|
112
|
+
TextGenerationStreamFinishReason2["EndOfSequenceToken"] = "eos_token";
|
|
113
|
+
TextGenerationStreamFinishReason2["StopSequence"] = "stop_sequence";
|
|
114
|
+
return TextGenerationStreamFinishReason2;
|
|
115
|
+
})(TextGenerationStreamFinishReason || {});
|
|
10
116
|
var HfInference = class {
|
|
11
117
|
apiKey;
|
|
12
118
|
defaultOptions;
|
|
@@ -18,132 +124,246 @@ var HfInference = class {
|
|
|
18
124
|
* Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models.
|
|
19
125
|
*/
|
|
20
126
|
async fillMask(args, options) {
|
|
21
|
-
|
|
127
|
+
const res = await this.request(args, options);
|
|
128
|
+
const isValidOutput = Array.isArray(res) && res.every(
|
|
129
|
+
(x) => typeof x.score === "number" && typeof x.sequence === "string" && typeof x.token === "number" && typeof x.token_str === "string"
|
|
130
|
+
);
|
|
131
|
+
if (!isValidOutput) {
|
|
132
|
+
throw new TypeError(
|
|
133
|
+
"Invalid inference output: output must be of type Array<score: number, sequence:string, token:number, token_str:string>"
|
|
134
|
+
);
|
|
135
|
+
}
|
|
136
|
+
return res;
|
|
22
137
|
}
|
|
23
138
|
/**
|
|
24
139
|
* This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model.
|
|
25
140
|
*/
|
|
26
141
|
async summarization(args, options) {
|
|
27
|
-
|
|
142
|
+
const res = await this.request(args, options);
|
|
143
|
+
const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.summary_text === "string");
|
|
144
|
+
if (!isValidOutput) {
|
|
145
|
+
throw new TypeError("Invalid inference output: output must be of type Array<summary_text: string>");
|
|
146
|
+
}
|
|
147
|
+
return res?.[0];
|
|
28
148
|
}
|
|
29
149
|
/**
|
|
30
150
|
* Want to have a nice know-it-all bot that can answer any question?. Recommended model: deepset/roberta-base-squad2
|
|
31
151
|
*/
|
|
32
152
|
async questionAnswer(args, options) {
|
|
33
|
-
|
|
153
|
+
const res = await this.request(args, options);
|
|
154
|
+
const isValidOutput = typeof res.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number";
|
|
155
|
+
if (!isValidOutput) {
|
|
156
|
+
throw new TypeError(
|
|
157
|
+
"Invalid inference output: output must be of type <answer: string, end: number, score: number, start: number>"
|
|
158
|
+
);
|
|
159
|
+
}
|
|
160
|
+
return res;
|
|
34
161
|
}
|
|
35
162
|
/**
|
|
36
163
|
* Don’t know SQL? Don’t want to dive into a large spreadsheet? Ask questions in plain english! Recommended model: google/tapas-base-finetuned-wtq.
|
|
37
164
|
*/
|
|
38
165
|
async tableQuestionAnswer(args, options) {
|
|
39
|
-
|
|
166
|
+
const res = await this.request(args, options);
|
|
167
|
+
const isValidOutput = typeof res.aggregator === "string" && typeof res.answer === "string" && Array.isArray(res.cells) && res.cells.every((x) => typeof x === "string") && Array.isArray(res.coordinates) && res.coordinates.every((coord) => Array.isArray(coord) && coord.every((x) => typeof x === "number"));
|
|
168
|
+
if (!isValidOutput) {
|
|
169
|
+
throw new TypeError(
|
|
170
|
+
"Invalid inference output: output must be of type <aggregator: string, answer: string, cells: string[], coordinates: number[][]>"
|
|
171
|
+
);
|
|
172
|
+
}
|
|
173
|
+
return res;
|
|
40
174
|
}
|
|
41
175
|
/**
|
|
42
176
|
* Usually used for sentiment-analysis this will output the likelihood of classes of an input. Recommended model: distilbert-base-uncased-finetuned-sst-2-english
|
|
43
177
|
*/
|
|
44
178
|
async textClassification(args, options) {
|
|
45
|
-
|
|
179
|
+
const res = (await this.request(args, options))?.[0];
|
|
180
|
+
const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number");
|
|
181
|
+
if (!isValidOutput) {
|
|
182
|
+
throw new TypeError("Invalid inference output: output must be of type Array<label: string, score: number>");
|
|
183
|
+
}
|
|
184
|
+
return res;
|
|
46
185
|
}
|
|
47
186
|
/**
|
|
48
187
|
* Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
|
|
49
188
|
*/
|
|
50
189
|
async textGeneration(args, options) {
|
|
51
|
-
|
|
190
|
+
const res = await this.request(args, options);
|
|
191
|
+
const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.generated_text === "string");
|
|
192
|
+
if (!isValidOutput) {
|
|
193
|
+
throw new TypeError("Invalid inference output: output must be of type Array<generated_text: string>");
|
|
194
|
+
}
|
|
195
|
+
return res?.[0];
|
|
196
|
+
}
|
|
197
|
+
/**
|
|
198
|
+
* Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
|
|
199
|
+
*/
|
|
200
|
+
async *textGenerationStream(args, options) {
|
|
201
|
+
yield* this.streamingRequest(args, options);
|
|
52
202
|
}
|
|
53
203
|
/**
|
|
54
204
|
* Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english
|
|
55
205
|
*/
|
|
56
206
|
async tokenClassification(args, options) {
|
|
57
|
-
|
|
207
|
+
const res = toArray(await this.request(args, options));
|
|
208
|
+
const isValidOutput = Array.isArray(res) && res.every(
|
|
209
|
+
(x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string"
|
|
210
|
+
);
|
|
211
|
+
if (!isValidOutput) {
|
|
212
|
+
throw new TypeError(
|
|
213
|
+
"Invalid inference output: output must be of type Array<end: number, entity_group: string, score: number, start: number, word: string>"
|
|
214
|
+
);
|
|
215
|
+
}
|
|
216
|
+
return res;
|
|
58
217
|
}
|
|
59
218
|
/**
|
|
60
219
|
* This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en.
|
|
61
220
|
*/
|
|
62
221
|
async translation(args, options) {
|
|
63
|
-
|
|
222
|
+
const res = await this.request(args, options);
|
|
223
|
+
const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.translation_text === "string");
|
|
224
|
+
if (!isValidOutput) {
|
|
225
|
+
throw new TypeError("Invalid inference output: output must be of type Array<translation_text: string>");
|
|
226
|
+
}
|
|
227
|
+
return res?.[0];
|
|
64
228
|
}
|
|
65
229
|
/**
|
|
66
230
|
* This task is super useful to try out classification with zero code, you simply pass a sentence/paragraph and the possible labels for that sentence, and you get a result. Recommended model: facebook/bart-large-mnli.
|
|
67
231
|
*/
|
|
68
232
|
async zeroShotClassification(args, options) {
|
|
69
|
-
|
|
233
|
+
const res = toArray(
|
|
70
234
|
await this.request(args, options)
|
|
71
235
|
);
|
|
236
|
+
const isValidOutput = Array.isArray(res) && res.every(
|
|
237
|
+
(x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string"
|
|
238
|
+
);
|
|
239
|
+
if (!isValidOutput) {
|
|
240
|
+
throw new TypeError(
|
|
241
|
+
"Invalid inference output: output must be of type Array<labels: string[], scores: number[], sequence: string>"
|
|
242
|
+
);
|
|
243
|
+
}
|
|
244
|
+
return res;
|
|
72
245
|
}
|
|
73
246
|
/**
|
|
74
247
|
* This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
|
|
75
248
|
*
|
|
76
249
|
*/
|
|
77
250
|
async conversational(args, options) {
|
|
78
|
-
|
|
251
|
+
const res = await this.request(args, options);
|
|
252
|
+
const isValidOutput = Array.isArray(res.conversation.generated_responses) && res.conversation.generated_responses.every((x) => typeof x === "string") && Array.isArray(res.conversation.past_user_inputs) && res.conversation.past_user_inputs.every((x) => typeof x === "string") && typeof res.generated_text === "string" && Array.isArray(res.warnings) && res.warnings.every((x) => typeof x === "string");
|
|
253
|
+
if (!isValidOutput) {
|
|
254
|
+
throw new TypeError(
|
|
255
|
+
"Invalid inference output: output must be of type <conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]>"
|
|
256
|
+
);
|
|
257
|
+
}
|
|
258
|
+
return res;
|
|
79
259
|
}
|
|
80
260
|
/**
|
|
81
261
|
* This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
|
|
82
262
|
*/
|
|
83
263
|
async featureExtraction(args, options) {
|
|
84
|
-
|
|
264
|
+
const res = await this.request(args, options);
|
|
265
|
+
return res;
|
|
85
266
|
}
|
|
86
267
|
/**
|
|
87
268
|
* This task reads some audio input and outputs the said words within the audio files.
|
|
88
269
|
* Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self
|
|
89
270
|
*/
|
|
90
271
|
async automaticSpeechRecognition(args, options) {
|
|
91
|
-
|
|
272
|
+
const res = await this.request(args, {
|
|
92
273
|
...options,
|
|
93
274
|
binary: true
|
|
94
275
|
});
|
|
276
|
+
const isValidOutput = typeof res.text === "string";
|
|
277
|
+
if (!isValidOutput) {
|
|
278
|
+
throw new TypeError("Invalid inference output: output must be of type <text: string>");
|
|
279
|
+
}
|
|
280
|
+
return res;
|
|
95
281
|
}
|
|
96
282
|
/**
|
|
97
283
|
* This task reads some audio input and outputs the likelihood of classes.
|
|
98
284
|
* Recommended model: superb/hubert-large-superb-er
|
|
99
285
|
*/
|
|
100
286
|
async audioClassification(args, options) {
|
|
101
|
-
|
|
287
|
+
const res = await this.request(args, {
|
|
102
288
|
...options,
|
|
103
289
|
binary: true
|
|
104
290
|
});
|
|
291
|
+
const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number");
|
|
292
|
+
if (!isValidOutput) {
|
|
293
|
+
throw new TypeError("Invalid inference output: output must be of type Array<label: string, score: number>");
|
|
294
|
+
}
|
|
295
|
+
return res;
|
|
105
296
|
}
|
|
106
297
|
/**
|
|
107
298
|
* This task reads some image input and outputs the likelihood of classes.
|
|
108
299
|
* Recommended model: google/vit-base-patch16-224
|
|
109
300
|
*/
|
|
110
301
|
async imageClassification(args, options) {
|
|
111
|
-
|
|
302
|
+
const res = await this.request(args, {
|
|
112
303
|
...options,
|
|
113
304
|
binary: true
|
|
114
305
|
});
|
|
306
|
+
const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number");
|
|
307
|
+
if (!isValidOutput) {
|
|
308
|
+
throw new TypeError("Invalid inference output: output must be of type Array<label: string, score: number>");
|
|
309
|
+
}
|
|
310
|
+
return res;
|
|
115
311
|
}
|
|
116
312
|
/**
|
|
117
313
|
* This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
|
|
118
314
|
* Recommended model: facebook/detr-resnet-50
|
|
119
315
|
*/
|
|
120
316
|
async objectDetection(args, options) {
|
|
121
|
-
|
|
317
|
+
const res = await this.request(args, {
|
|
122
318
|
...options,
|
|
123
319
|
binary: true
|
|
124
320
|
});
|
|
321
|
+
const isValidOutput = Array.isArray(res) && res.every(
|
|
322
|
+
(x) => typeof x.label === "string" && typeof x.score === "number" && typeof x.box.xmin === "number" && typeof x.box.ymin === "number" && typeof x.box.xmax === "number" && typeof x.box.ymax === "number"
|
|
323
|
+
);
|
|
324
|
+
if (!isValidOutput) {
|
|
325
|
+
throw new TypeError(
|
|
326
|
+
"Invalid inference output: output must be of type Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>"
|
|
327
|
+
);
|
|
328
|
+
}
|
|
329
|
+
return res;
|
|
125
330
|
}
|
|
126
331
|
/**
|
|
127
332
|
* This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
|
|
128
333
|
* Recommended model: facebook/detr-resnet-50-panoptic
|
|
129
334
|
*/
|
|
130
335
|
async imageSegmentation(args, options) {
|
|
131
|
-
|
|
336
|
+
const res = await this.request(args, {
|
|
132
337
|
...options,
|
|
133
338
|
binary: true
|
|
134
339
|
});
|
|
340
|
+
const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.mask === "string" && typeof x.score === "number");
|
|
341
|
+
if (!isValidOutput) {
|
|
342
|
+
throw new TypeError(
|
|
343
|
+
"Invalid inference output: output must be of type Array<label: string, mask: string, score: number>"
|
|
344
|
+
);
|
|
345
|
+
}
|
|
346
|
+
return res;
|
|
135
347
|
}
|
|
136
348
|
/**
|
|
137
349
|
* This task reads some text input and outputs an image.
|
|
138
350
|
* Recommended model: stabilityai/stable-diffusion-2
|
|
139
351
|
*/
|
|
140
352
|
async textToImage(args, options) {
|
|
141
|
-
|
|
353
|
+
const res = await this.request(args, {
|
|
142
354
|
...options,
|
|
143
355
|
blob: true
|
|
144
356
|
});
|
|
357
|
+
const isValidOutput = res && res instanceof Blob;
|
|
358
|
+
if (!isValidOutput) {
|
|
359
|
+
throw new TypeError("Invalid inference output: output must be of type object & of instance Blob");
|
|
360
|
+
}
|
|
361
|
+
return res;
|
|
145
362
|
}
|
|
146
|
-
|
|
363
|
+
/**
|
|
364
|
+
* Helper that prepares request arguments
|
|
365
|
+
*/
|
|
366
|
+
makeRequestOptions(args, options) {
|
|
147
367
|
const mergedOptions = { ...this.defaultOptions, ...options };
|
|
148
368
|
const { model, ...otherArgs } = args;
|
|
149
369
|
const headers = {};
|
|
@@ -164,7 +384,8 @@ var HfInference = class {
|
|
|
164
384
|
headers["X-Load-Model"] = "0";
|
|
165
385
|
}
|
|
166
386
|
}
|
|
167
|
-
const
|
|
387
|
+
const url = `${HF_INFERENCE_API_BASE_URL}${model}`;
|
|
388
|
+
const info = {
|
|
168
389
|
headers,
|
|
169
390
|
method: "POST",
|
|
170
391
|
body: options?.binary ? args.data : JSON.stringify({
|
|
@@ -172,7 +393,12 @@ var HfInference = class {
|
|
|
172
393
|
options: mergedOptions
|
|
173
394
|
}),
|
|
174
395
|
credentials: options?.includeCredentials ? "include" : "same-origin"
|
|
175
|
-
}
|
|
396
|
+
};
|
|
397
|
+
return { url, info, mergedOptions };
|
|
398
|
+
}
|
|
399
|
+
async request(args, options) {
|
|
400
|
+
const { url, info, mergedOptions } = this.makeRequestOptions(args, options);
|
|
401
|
+
const response = await fetch(url, info);
|
|
176
402
|
if (mergedOptions.retry_on_error !== false && response.status === 503 && !mergedOptions.wait_for_model) {
|
|
177
403
|
return this.request(args, {
|
|
178
404
|
...mergedOptions,
|
|
@@ -191,7 +417,57 @@ var HfInference = class {
|
|
|
191
417
|
}
|
|
192
418
|
return output;
|
|
193
419
|
}
|
|
420
|
+
/**
|
|
421
|
+
* Make request that uses server-sent events and returns response as a generator
|
|
422
|
+
*/
|
|
423
|
+
async *streamingRequest(args, options) {
|
|
424
|
+
const { url, info, mergedOptions } = this.makeRequestOptions({ ...args, stream: true }, options);
|
|
425
|
+
const response = await fetch(url, info);
|
|
426
|
+
if (mergedOptions.retry_on_error !== false && response.status === 503 && !mergedOptions.wait_for_model) {
|
|
427
|
+
return this.streamingRequest(args, {
|
|
428
|
+
...mergedOptions,
|
|
429
|
+
wait_for_model: true
|
|
430
|
+
});
|
|
431
|
+
}
|
|
432
|
+
if (!response.ok) {
|
|
433
|
+
throw new Error(`Server response contains error: ${response.status}`);
|
|
434
|
+
}
|
|
435
|
+
if (response.headers.get("content-type") !== "text/event-stream") {
|
|
436
|
+
throw new Error(`Server does not support event stream content type`);
|
|
437
|
+
}
|
|
438
|
+
const reader = response.body.getReader();
|
|
439
|
+
const events = [];
|
|
440
|
+
const onEvent = (event) => {
|
|
441
|
+
events.push(event);
|
|
442
|
+
};
|
|
443
|
+
const onChunk = getLines(
|
|
444
|
+
getMessages(
|
|
445
|
+
() => {
|
|
446
|
+
},
|
|
447
|
+
() => {
|
|
448
|
+
},
|
|
449
|
+
onEvent
|
|
450
|
+
)
|
|
451
|
+
);
|
|
452
|
+
try {
|
|
453
|
+
while (true) {
|
|
454
|
+
const { done, value } = await reader.read();
|
|
455
|
+
if (done)
|
|
456
|
+
return;
|
|
457
|
+
onChunk(value);
|
|
458
|
+
while (events.length > 0) {
|
|
459
|
+
const event = events.shift();
|
|
460
|
+
if (event.data.length > 0) {
|
|
461
|
+
yield JSON.parse(event.data);
|
|
462
|
+
}
|
|
463
|
+
}
|
|
464
|
+
}
|
|
465
|
+
} finally {
|
|
466
|
+
reader.releaseLock();
|
|
467
|
+
}
|
|
468
|
+
}
|
|
194
469
|
};
|
|
195
470
|
export {
|
|
196
|
-
HfInference
|
|
471
|
+
HfInference,
|
|
472
|
+
TextGenerationStreamFinishReason
|
|
197
473
|
};
|