@huggingface/inference 2.1.0 → 2.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -116,7 +116,7 @@ function makeRequestOptions(args, options) {
116
116
  // src/tasks/custom/request.ts
117
117
  async function request(args, options) {
118
118
  const { url, info } = makeRequestOptions(args, options);
119
- const response = await fetch(url, info);
119
+ const response = await (options?.fetch ?? fetch)(url, info);
120
120
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
121
121
  return request(args, {
122
122
  ...options,
@@ -240,7 +240,7 @@ function newMessage() {
240
240
  // src/tasks/custom/streamingRequest.ts
241
241
  async function* streamingRequest(args, options) {
242
242
  const { url, info } = makeRequestOptions({ ...args, stream: true }, options);
243
- const response = await fetch(url, info);
243
+ const response = await (options?.fetch ?? fetch)(url, info);
244
244
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
245
245
  return streamingRequest(args, {
246
246
  ...options,
@@ -433,7 +433,7 @@ async function fillMask(args, options) {
433
433
  // src/tasks/nlp/questionAnswering.ts
434
434
  async function questionAnswering(args, options) {
435
435
  const res = await request(args, options);
436
- const isValidOutput = typeof res?.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number";
436
+ const isValidOutput = typeof res === "object" && !!res && typeof res.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number";
437
437
  if (!isValidOutput) {
438
438
  throw new InferenceOutputError("Expected {answer: string, end: number, score: number, start: number}");
439
439
  }
@@ -566,10 +566,12 @@ async function documentQuestionAnswering(args, options) {
566
566
  image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
567
567
  }
568
568
  };
569
- const res = (await request(reqArgs, options))?.[0];
570
- const isValidOutput = typeof res?.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number";
569
+ const res = toArray(
570
+ await request(reqArgs, options)
571
+ )?.[0];
572
+ const isValidOutput = typeof res?.answer === "string" && (typeof res.end === "number" || typeof res.end === "undefined") && (typeof res.score === "number" || typeof res.score === "undefined") && (typeof res.start === "number" || typeof res.start === "undefined");
571
573
  if (!isValidOutput) {
572
- throw new InferenceOutputError("Expected Array<{answer: string, end: number, score: number, start: number}>");
574
+ throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");
573
575
  }
574
576
  return res;
575
577
  }
package/dist/index.mjs CHANGED
@@ -71,7 +71,7 @@ function makeRequestOptions(args, options) {
71
71
  // src/tasks/custom/request.ts
72
72
  async function request(args, options) {
73
73
  const { url, info } = makeRequestOptions(args, options);
74
- const response = await fetch(url, info);
74
+ const response = await (options?.fetch ?? fetch)(url, info);
75
75
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
76
76
  return request(args, {
77
77
  ...options,
@@ -195,7 +195,7 @@ function newMessage() {
195
195
  // src/tasks/custom/streamingRequest.ts
196
196
  async function* streamingRequest(args, options) {
197
197
  const { url, info } = makeRequestOptions({ ...args, stream: true }, options);
198
- const response = await fetch(url, info);
198
+ const response = await (options?.fetch ?? fetch)(url, info);
199
199
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
200
200
  return streamingRequest(args, {
201
201
  ...options,
@@ -388,7 +388,7 @@ async function fillMask(args, options) {
388
388
  // src/tasks/nlp/questionAnswering.ts
389
389
  async function questionAnswering(args, options) {
390
390
  const res = await request(args, options);
391
- const isValidOutput = typeof res?.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number";
391
+ const isValidOutput = typeof res === "object" && !!res && typeof res.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number";
392
392
  if (!isValidOutput) {
393
393
  throw new InferenceOutputError("Expected {answer: string, end: number, score: number, start: number}");
394
394
  }
@@ -521,10 +521,12 @@ async function documentQuestionAnswering(args, options) {
521
521
  image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
522
522
  }
523
523
  };
524
- const res = (await request(reqArgs, options))?.[0];
525
- const isValidOutput = typeof res?.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number";
524
+ const res = toArray(
525
+ await request(reqArgs, options)
526
+ )?.[0];
527
+ const isValidOutput = typeof res?.answer === "string" && (typeof res.end === "number" || typeof res.end === "undefined") && (typeof res.score === "number" || typeof res.score === "undefined") && (typeof res.start === "number" || typeof res.start === "undefined");
526
528
  if (!isValidOutput) {
527
- throw new InferenceOutputError("Expected Array<{answer: string, end: number, score: number, start: number}>");
529
+ throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");
528
530
  }
529
531
  return res;
530
532
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "2.1.0",
3
+ "version": "2.1.2",
4
4
  "license": "MIT",
5
5
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
6
6
  "description": "Typescript wrapper for the Hugging Face Inference API",
@@ -12,7 +12,7 @@ export async function request<T>(
12
12
  }
13
13
  ): Promise<T> {
14
14
  const { url, info } = makeRequestOptions(args, options);
15
- const response = await fetch(url, info);
15
+ const response = await (options?.fetch ?? fetch)(url, info);
16
16
 
17
17
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
18
18
  return request(args, {
@@ -14,7 +14,7 @@ export async function* streamingRequest<T>(
14
14
  }
15
15
  ): AsyncGenerator<T> {
16
16
  const { url, info } = makeRequestOptions({ ...args, stream: true }, options);
17
- const response = await fetch(url, info);
17
+ const response = await (options?.fetch ?? fetch)(url, info);
18
18
 
19
19
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
20
20
  return streamingRequest(args, {
@@ -3,6 +3,7 @@ import type { BaseArgs, Options } from "../../types";
3
3
  import { request } from "../custom/request";
4
4
  import type { RequestArgs } from "../../types";
5
5
  import { base64FromBytes } from "../../../../shared/src/base64FromBytes";
6
+ import { toArray } from "../../utils/toArray";
6
7
 
7
8
  export type DocumentQuestionAnsweringArgs = BaseArgs & {
8
9
  inputs: {
@@ -24,15 +25,15 @@ export interface DocumentQuestionAnsweringOutput {
24
25
  /**
25
26
  * ?
26
27
  */
27
- end: number;
28
+ end?: number;
28
29
  /**
29
30
  * A float that represents how likely that the answer is correct
30
31
  */
31
- score: number;
32
+ score?: number;
32
33
  /**
33
34
  * ?
34
35
  */
35
- start: number;
36
+ start?: number;
36
37
  }
37
38
 
38
39
  /**
@@ -50,14 +51,16 @@ export async function documentQuestionAnswering(
50
51
  image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer())),
51
52
  },
52
53
  } as RequestArgs;
53
- const res = (await request<[DocumentQuestionAnsweringOutput]>(reqArgs, options))?.[0];
54
+ const res = toArray(
55
+ await request<[DocumentQuestionAnsweringOutput] | DocumentQuestionAnsweringOutput>(reqArgs, options)
56
+ )?.[0];
54
57
  const isValidOutput =
55
58
  typeof res?.answer === "string" &&
56
- typeof res.end === "number" &&
57
- typeof res.score === "number" &&
58
- typeof res.start === "number";
59
+ (typeof res.end === "number" || typeof res.end === "undefined") &&
60
+ (typeof res.score === "number" || typeof res.score === "undefined") &&
61
+ (typeof res.start === "number" || typeof res.start === "undefined");
59
62
  if (!isValidOutput) {
60
- throw new InferenceOutputError("Expected Array<{answer: string, end: number, score: number, start: number}>");
63
+ throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");
61
64
  }
62
65
  return res;
63
66
  }
@@ -37,7 +37,9 @@ export async function questionAnswering(
37
37
  ): Promise<QuestionAnsweringOutput> {
38
38
  const res = await request<QuestionAnsweringOutput>(args, options);
39
39
  const isValidOutput =
40
- typeof res?.answer === "string" &&
40
+ typeof res === "object" &&
41
+ !!res &&
42
+ typeof res.answer === "string" &&
41
43
  typeof res.end === "number" &&
42
44
  typeof res.score === "number" &&
43
45
  typeof res.start === "number";
package/src/types.ts CHANGED
@@ -20,6 +20,10 @@ export interface Options {
20
20
  * (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
21
21
  */
22
22
  wait_for_model?: boolean;
23
+ /**
24
+ * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
25
+ */
26
+ fetch?: typeof fetch;
23
27
  }
24
28
 
25
29
  export interface BaseArgs {