@huggingface/inference 3.6.2 → 3.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. package/README.md +0 -25
  2. package/dist/index.cjs +135 -114
  3. package/dist/index.js +135 -114
  4. package/dist/src/config.d.ts +1 -0
  5. package/dist/src/config.d.ts.map +1 -1
  6. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
  7. package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -1
  8. package/dist/src/tasks/custom/request.d.ts +1 -0
  9. package/dist/src/tasks/custom/request.d.ts.map +1 -1
  10. package/dist/src/tasks/custom/streamingRequest.d.ts +1 -0
  11. package/dist/src/tasks/custom/streamingRequest.d.ts.map +1 -1
  12. package/dist/src/tasks/cv/imageToText.d.ts.map +1 -1
  13. package/dist/src/tasks/cv/objectDetection.d.ts +1 -1
  14. package/dist/src/tasks/cv/objectDetection.d.ts.map +1 -1
  15. package/dist/src/tasks/cv/textToVideo.d.ts +1 -1
  16. package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
  17. package/dist/src/tasks/cv/zeroShotImageClassification.d.ts +1 -1
  18. package/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map +1 -1
  19. package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts +1 -1
  20. package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -1
  21. package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -1
  22. package/dist/src/tasks/nlp/chatCompletion.d.ts +1 -1
  23. package/dist/src/tasks/nlp/chatCompletion.d.ts.map +1 -1
  24. package/dist/src/tasks/nlp/chatCompletionStream.d.ts +1 -1
  25. package/dist/src/tasks/nlp/chatCompletionStream.d.ts.map +1 -1
  26. package/dist/src/tasks/nlp/questionAnswering.d.ts.map +1 -1
  27. package/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map +1 -1
  28. package/dist/src/tasks/nlp/textClassification.d.ts.map +1 -1
  29. package/dist/src/tasks/nlp/tokenClassification.d.ts.map +1 -1
  30. package/dist/src/tasks/nlp/zeroShotClassification.d.ts.map +1 -1
  31. package/dist/src/types.d.ts +7 -0
  32. package/dist/src/types.d.ts.map +1 -1
  33. package/dist/src/utils/request.d.ts +27 -0
  34. package/dist/src/utils/request.d.ts.map +1 -0
  35. package/package.json +2 -2
  36. package/src/config.ts +1 -0
  37. package/src/lib/makeRequestOptions.ts +5 -2
  38. package/src/snippets/templates.exported.ts +1 -1
  39. package/src/tasks/audio/audioClassification.ts +2 -2
  40. package/src/tasks/audio/audioToAudio.ts +2 -2
  41. package/src/tasks/audio/automaticSpeechRecognition.ts +3 -3
  42. package/src/tasks/audio/textToSpeech.ts +2 -2
  43. package/src/tasks/custom/request.ts +7 -32
  44. package/src/tasks/custom/streamingRequest.ts +5 -85
  45. package/src/tasks/cv/imageClassification.ts +2 -2
  46. package/src/tasks/cv/imageSegmentation.ts +2 -2
  47. package/src/tasks/cv/imageToImage.ts +2 -2
  48. package/src/tasks/cv/imageToText.ts +7 -9
  49. package/src/tasks/cv/objectDetection.ts +4 -4
  50. package/src/tasks/cv/textToImage.ts +3 -3
  51. package/src/tasks/cv/textToVideo.ts +23 -20
  52. package/src/tasks/cv/zeroShotImageClassification.ts +4 -5
  53. package/src/tasks/multimodal/documentQuestionAnswering.ts +13 -13
  54. package/src/tasks/multimodal/visualQuestionAnswering.ts +4 -2
  55. package/src/tasks/nlp/chatCompletion.ts +3 -4
  56. package/src/tasks/nlp/chatCompletionStream.ts +3 -3
  57. package/src/tasks/nlp/featureExtraction.ts +2 -2
  58. package/src/tasks/nlp/fillMask.ts +2 -2
  59. package/src/tasks/nlp/questionAnswering.ts +3 -2
  60. package/src/tasks/nlp/sentenceSimilarity.ts +2 -11
  61. package/src/tasks/nlp/summarization.ts +2 -2
  62. package/src/tasks/nlp/tableQuestionAnswering.ts +2 -2
  63. package/src/tasks/nlp/textClassification.ts +8 -9
  64. package/src/tasks/nlp/textGeneration.ts +16 -16
  65. package/src/tasks/nlp/textGenerationStream.ts +2 -2
  66. package/src/tasks/nlp/tokenClassification.ts +9 -10
  67. package/src/tasks/nlp/translation.ts +2 -2
  68. package/src/tasks/nlp/zeroShotClassification.ts +9 -10
  69. package/src/tasks/tabular/tabularClassification.ts +2 -2
  70. package/src/tasks/tabular/tabularRegression.ts +2 -2
  71. package/src/types.ts +8 -0
  72. package/src/utils/request.ts +161 -0
@@ -1,12 +1,11 @@
1
- import type { BaseArgs, InferenceProvider, Options } from "../../types";
2
1
  import type { TextToVideoInput } from "@huggingface/tasks";
3
- import { request } from "../custom/request";
4
- import { omit } from "../../utils/omit";
5
- import { isUrl } from "../../lib/isUrl";
6
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
7
- import { typedInclude } from "../../utils/typedInclude";
8
- import { makeRequestOptions } from "../../lib/makeRequestOptions";
3
+ import { isUrl } from "../../lib/isUrl";
9
4
  import { pollFalResponse, type FalAiQueueOutput } from "../../providers/fal-ai";
5
+ import type { BaseArgs, InferenceProvider, Options } from "../../types";
6
+ import { omit } from "../../utils/omit";
7
+ import { innerRequest } from "../../utils/request";
8
+ import { typedInclude } from "../../utils/typedInclude";
10
9
 
11
10
  export type TextToVideoArgs = BaseArgs & TextToVideoInput;
12
11
 
@@ -35,37 +34,41 @@ export async function textToVideo(args: TextToVideoArgs, options?: Options): Pro
35
34
  args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita"
36
35
  ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs }
37
36
  : args;
38
- const res = await request<FalAiQueueOutput | ReplicateOutput | NovitaOutput>(payload, {
37
+ const { data, requestContext } = await innerRequest<FalAiQueueOutput | ReplicateOutput | NovitaOutput>(payload, {
39
38
  ...options,
40
39
  task: "text-to-video",
41
40
  });
41
+
42
42
  if (args.provider === "fal-ai") {
43
- const { url, info } = await makeRequestOptions(args, { ...options, task: "text-to-video" });
44
- return await pollFalResponse(res as FalAiQueueOutput, url, info.headers as Record<string, string>);
43
+ return await pollFalResponse(
44
+ data as FalAiQueueOutput,
45
+ requestContext.url,
46
+ requestContext.info.headers as Record<string, string>
47
+ );
45
48
  } else if (args.provider === "novita") {
46
49
  const isValidOutput =
47
- typeof res === "object" &&
48
- !!res &&
49
- "video" in res &&
50
- typeof res.video === "object" &&
51
- !!res.video &&
52
- "video_url" in res.video &&
53
- typeof res.video.video_url === "string" &&
54
- isUrl(res.video.video_url);
50
+ typeof data === "object" &&
51
+ !!data &&
52
+ "video" in data &&
53
+ typeof data.video === "object" &&
54
+ !!data.video &&
55
+ "video_url" in data.video &&
56
+ typeof data.video.video_url === "string" &&
57
+ isUrl(data.video.video_url);
55
58
  if (!isValidOutput) {
56
59
  throw new InferenceOutputError("Expected { video: { video_url: string } }");
57
60
  }
58
- const urlResponse = await fetch((res as NovitaOutput).video.video_url);
61
+ const urlResponse = await fetch((data as NovitaOutput).video.video_url);
59
62
  return await urlResponse.blob();
60
63
  } else {
61
64
  /// TODO: Replicate: handle the case where the generation request "times out" / is async (ie output is null)
62
65
  /// https://replicate.com/docs/topics/predictions/create-a-prediction
63
66
  const isValidOutput =
64
- typeof res === "object" && !!res && "output" in res && typeof res.output === "string" && isUrl(res.output);
67
+ typeof data === "object" && !!data && "output" in data && typeof data.output === "string" && isUrl(data.output);
65
68
  if (!isValidOutput) {
66
69
  throw new InferenceOutputError("Expected { output: string }");
67
70
  }
68
- const urlResponse = await fetch(res.output);
71
+ const urlResponse = await fetch(data.output);
69
72
  return await urlResponse.blob();
70
73
  }
71
74
  }
@@ -1,9 +1,8 @@
1
+ import type { ZeroShotImageClassificationInput, ZeroShotImageClassificationOutput } from "@huggingface/tasks";
1
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
2
- import type { BaseArgs, Options } from "../../types";
3
- import { request } from "../custom/request";
4
- import type { RequestArgs } from "../../types";
3
+ import type { BaseArgs, Options, RequestArgs } from "../../types";
5
4
  import { base64FromBytes } from "../../utils/base64FromBytes";
6
- import type { ZeroShotImageClassificationInput, ZeroShotImageClassificationOutput } from "@huggingface/tasks";
5
+ import { innerRequest } from "../../utils/request";
7
6
 
8
7
  /**
9
8
  * @deprecated
@@ -46,7 +45,7 @@ export async function zeroShotImageClassification(
46
45
  options?: Options
47
46
  ): Promise<ZeroShotImageClassificationOutput> {
48
47
  const payload = await preparePayload(args);
49
- const res = await request<ZeroShotImageClassificationOutput>(payload, {
48
+ const { data: res } = await innerRequest<ZeroShotImageClassificationOutput>(payload, {
50
49
  ...options,
51
50
  task: "zero-shot-image-classification",
52
51
  });
@@ -1,14 +1,13 @@
1
- import { InferenceOutputError } from "../../lib/InferenceOutputError";
2
- import type { BaseArgs, Options } from "../../types";
3
- import { request } from "../custom/request";
4
- import type { RequestArgs } from "../../types";
5
- import { toArray } from "../../utils/toArray";
6
- import { base64FromBytes } from "../../utils/base64FromBytes";
7
1
  import type {
8
2
  DocumentQuestionAnsweringInput,
9
3
  DocumentQuestionAnsweringInputData,
10
4
  DocumentQuestionAnsweringOutput,
11
5
  } from "@huggingface/tasks";
6
+ import { InferenceOutputError } from "../../lib/InferenceOutputError";
7
+ import type { BaseArgs, Options, RequestArgs } from "../../types";
8
+ import { base64FromBytes } from "../../utils/base64FromBytes";
9
+ import { innerRequest } from "../../utils/request";
10
+ import { toArray } from "../../utils/toArray";
12
11
 
13
12
  /// Override the type to properly set inputs.image as Blob
14
13
  export type DocumentQuestionAnsweringArgs = BaseArgs &
@@ -29,16 +28,17 @@ export async function documentQuestionAnswering(
29
28
  image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer())),
30
29
  },
31
30
  } as RequestArgs;
32
- const res = toArray(
33
- await request<DocumentQuestionAnsweringOutput | DocumentQuestionAnsweringOutput[number]>(reqArgs, {
31
+ const { data: res } = await innerRequest<DocumentQuestionAnsweringOutput | DocumentQuestionAnsweringOutput[number]>(
32
+ reqArgs,
33
+ {
34
34
  ...options,
35
35
  task: "document-question-answering",
36
- })
36
+ }
37
37
  );
38
-
38
+ const output = toArray(res);
39
39
  const isValidOutput =
40
- Array.isArray(res) &&
41
- res.every(
40
+ Array.isArray(output) &&
41
+ output.every(
42
42
  (elem) =>
43
43
  typeof elem === "object" &&
44
44
  !!elem &&
@@ -51,5 +51,5 @@ export async function documentQuestionAnswering(
51
51
  throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");
52
52
  }
53
53
 
54
- return res[0];
54
+ return output[0];
55
55
  }
@@ -6,7 +6,7 @@ import type {
6
6
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
7
7
  import type { BaseArgs, Options, RequestArgs } from "../../types";
8
8
  import { base64FromBytes } from "../../utils/base64FromBytes";
9
- import { request } from "../custom/request";
9
+ import { innerRequest } from "../../utils/request";
10
10
 
11
11
  /// Override the type to properly set inputs.image as Blob
12
12
  export type VisualQuestionAnsweringArgs = BaseArgs &
@@ -27,10 +27,12 @@ export async function visualQuestionAnswering(
27
27
  image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer())),
28
28
  },
29
29
  } as RequestArgs;
30
- const res = await request<VisualQuestionAnsweringOutput>(reqArgs, {
30
+
31
+ const { data: res } = await innerRequest<VisualQuestionAnsweringOutput>(reqArgs, {
31
32
  ...options,
32
33
  task: "visual-question-answering",
33
34
  });
35
+
34
36
  const isValidOutput =
35
37
  Array.isArray(res) &&
36
38
  res.every(
@@ -1,7 +1,7 @@
1
+ import type { ChatCompletionInput, ChatCompletionOutput } from "@huggingface/tasks";
1
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
2
3
  import type { BaseArgs, Options } from "../../types";
3
- import { request } from "../custom/request";
4
- import type { ChatCompletionInput, ChatCompletionOutput } from "@huggingface/tasks";
4
+ import { innerRequest } from "../../utils/request";
5
5
 
6
6
  /**
7
7
  * Use the chat completion endpoint to generate a response to a prompt, using OpenAI message completion API no stream
@@ -10,12 +10,11 @@ export async function chatCompletion(
10
10
  args: BaseArgs & ChatCompletionInput,
11
11
  options?: Options
12
12
  ): Promise<ChatCompletionOutput> {
13
- const res = await request<ChatCompletionOutput>(args, {
13
+ const { data: res } = await innerRequest<ChatCompletionOutput>(args, {
14
14
  ...options,
15
15
  task: "text-generation",
16
16
  chatCompletion: true,
17
17
  });
18
-
19
18
  const isValidOutput =
20
19
  typeof res === "object" &&
21
20
  Array.isArray(res?.choices) &&
@@ -1,6 +1,6 @@
1
- import type { BaseArgs, Options } from "../../types";
2
- import { streamingRequest } from "../custom/streamingRequest";
3
1
  import type { ChatCompletionInput, ChatCompletionStreamOutput } from "@huggingface/tasks";
2
+ import type { BaseArgs, Options } from "../../types";
3
+ import { innerStreamingRequest } from "../../utils/request";
4
4
 
5
5
  /**
6
6
  * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
@@ -9,7 +9,7 @@ export async function* chatCompletionStream(
9
9
  args: BaseArgs & ChatCompletionInput,
10
10
  options?: Options
11
11
  ): AsyncGenerator<ChatCompletionStreamOutput> {
12
- yield* streamingRequest<ChatCompletionStreamOutput>(args, {
12
+ yield* innerStreamingRequest<ChatCompletionStreamOutput>(args, {
13
13
  ...options,
14
14
  task: "text-generation",
15
15
  chatCompletion: true,
@@ -1,7 +1,7 @@
1
1
  import type { FeatureExtractionInput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
- import { request } from "../custom/request";
4
+ import { innerRequest } from "../../utils/request";
5
5
 
6
6
  export type FeatureExtractionArgs = BaseArgs & FeatureExtractionInput;
7
7
 
@@ -17,7 +17,7 @@ export async function featureExtraction(
17
17
  args: FeatureExtractionArgs,
18
18
  options?: Options
19
19
  ): Promise<FeatureExtractionOutput> {
20
- const res = await request<FeatureExtractionOutput>(args, {
20
+ const { data: res } = await innerRequest<FeatureExtractionOutput>(args, {
21
21
  ...options,
22
22
  task: "feature-extraction",
23
23
  });
@@ -1,7 +1,7 @@
1
1
  import type { FillMaskInput, FillMaskOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
- import { request } from "../custom/request";
4
+ import { innerRequest } from "../../utils/request";
5
5
 
6
6
  export type FillMaskArgs = BaseArgs & FillMaskInput;
7
7
 
@@ -9,7 +9,7 @@ export type FillMaskArgs = BaseArgs & FillMaskInput;
9
9
  * Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models.
10
10
  */
11
11
  export async function fillMask(args: FillMaskArgs, options?: Options): Promise<FillMaskOutput> {
12
- const res = await request<FillMaskOutput>(args, {
12
+ const { data: res } = await innerRequest<FillMaskOutput>(args, {
13
13
  ...options,
14
14
  task: "fill-mask",
15
15
  });
@@ -1,7 +1,7 @@
1
1
  import type { QuestionAnsweringInput, QuestionAnsweringOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
- import { request } from "../custom/request";
4
+ import { innerRequest } from "../../utils/request";
5
5
 
6
6
  export type QuestionAnsweringArgs = BaseArgs & QuestionAnsweringInput;
7
7
 
@@ -12,10 +12,11 @@ export async function questionAnswering(
12
12
  args: QuestionAnsweringArgs,
13
13
  options?: Options
14
14
  ): Promise<QuestionAnsweringOutput[number]> {
15
- const res = await request<QuestionAnsweringOutput | QuestionAnsweringOutput[number]>(args, {
15
+ const { data: res } = await innerRequest<QuestionAnsweringOutput | QuestionAnsweringOutput[number]>(args, {
16
16
  ...options,
17
17
  task: "question-answering",
18
18
  });
19
+
19
20
  const isValidOutput = Array.isArray(res)
20
21
  ? res.every(
21
22
  (elem) =>
@@ -1,8 +1,7 @@
1
1
  import type { SentenceSimilarityInput, SentenceSimilarityOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
- import { request } from "../custom/request";
5
- import { omit } from "../../utils/omit";
4
+ import { innerRequest } from "../../utils/request";
6
5
 
7
6
  export type SentenceSimilarityArgs = BaseArgs & SentenceSimilarityInput;
8
7
 
@@ -13,7 +12,7 @@ export async function sentenceSimilarity(
13
12
  args: SentenceSimilarityArgs,
14
13
  options?: Options
15
14
  ): Promise<SentenceSimilarityOutput> {
16
- const res = await request<SentenceSimilarityOutput>(prepareInput(args), {
15
+ const { data: res } = await innerRequest<SentenceSimilarityOutput>(args, {
17
16
  ...options,
18
17
  task: "sentence-similarity",
19
18
  });
@@ -24,11 +23,3 @@ export async function sentenceSimilarity(
24
23
  }
25
24
  return res;
26
25
  }
27
-
28
- function prepareInput(args: SentenceSimilarityArgs) {
29
- return {
30
- ...omit(args, ["inputs", "parameters"]),
31
- inputs: { ...omit(args.inputs, "sourceSentence") },
32
- parameters: { source_sentence: args.inputs.sourceSentence, ...args.parameters },
33
- };
34
- }
@@ -1,7 +1,7 @@
1
1
  import type { SummarizationInput, SummarizationOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
- import { request } from "../custom/request";
4
+ import { innerRequest } from "../../utils/request";
5
5
 
6
6
  export type SummarizationArgs = BaseArgs & SummarizationInput;
7
7
 
@@ -9,7 +9,7 @@ export type SummarizationArgs = BaseArgs & SummarizationInput;
9
9
  * This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model.
10
10
  */
11
11
  export async function summarization(args: SummarizationArgs, options?: Options): Promise<SummarizationOutput> {
12
- const res = await request<SummarizationOutput[]>(args, {
12
+ const { data: res } = await innerRequest<SummarizationOutput[]>(args, {
13
13
  ...options,
14
14
  task: "summarization",
15
15
  });
@@ -1,7 +1,7 @@
1
1
  import type { TableQuestionAnsweringInput, TableQuestionAnsweringOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
- import { request } from "../custom/request";
4
+ import { innerRequest } from "../../utils/request";
5
5
 
6
6
  export type TableQuestionAnsweringArgs = BaseArgs & TableQuestionAnsweringInput;
7
7
 
@@ -12,7 +12,7 @@ export async function tableQuestionAnswering(
12
12
  args: TableQuestionAnsweringArgs,
13
13
  options?: Options
14
14
  ): Promise<TableQuestionAnsweringOutput[number]> {
15
- const res = await request<TableQuestionAnsweringOutput | TableQuestionAnsweringOutput[number]>(args, {
15
+ const { data: res } = await innerRequest<TableQuestionAnsweringOutput | TableQuestionAnsweringOutput[number]>(args, {
16
16
  ...options,
17
17
  task: "table-question-answering",
18
18
  });
@@ -1,7 +1,7 @@
1
1
  import type { TextClassificationInput, TextClassificationOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
- import { request } from "../custom/request";
4
+ import { innerRequest } from "../../utils/request";
5
5
 
6
6
  export type TextClassificationArgs = BaseArgs & TextClassificationInput;
7
7
 
@@ -12,16 +12,15 @@ export async function textClassification(
12
12
  args: TextClassificationArgs,
13
13
  options?: Options
14
14
  ): Promise<TextClassificationOutput> {
15
- const res = (
16
- await request<TextClassificationOutput>(args, {
17
- ...options,
18
- task: "text-classification",
19
- })
20
- )?.[0];
15
+ const { data: res } = await innerRequest<TextClassificationOutput>(args, {
16
+ ...options,
17
+ task: "text-classification",
18
+ });
19
+ const output = res?.[0];
21
20
  const isValidOutput =
22
- Array.isArray(res) && res.every((x) => typeof x?.label === "string" && typeof x.score === "number");
21
+ Array.isArray(output) && output.every((x) => typeof x?.label === "string" && typeof x.score === "number");
23
22
  if (!isValidOutput) {
24
23
  throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
25
24
  }
26
- return res;
25
+ return output;
27
26
  }
@@ -6,9 +6,9 @@ import type {
6
6
  } from "@huggingface/tasks";
7
7
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
8
8
  import type { BaseArgs, Options } from "../../types";
9
- import { toArray } from "../../utils/toArray";
10
- import { request } from "../custom/request";
11
9
  import { omit } from "../../utils/omit";
10
+ import { innerRequest } from "../../utils/request";
11
+ import { toArray } from "../../utils/toArray";
12
12
 
13
13
  export type { TextGenerationInput, TextGenerationOutput };
14
14
 
@@ -37,7 +37,7 @@ export async function textGeneration(
37
37
  ): Promise<TextGenerationOutput> {
38
38
  if (args.provider === "together") {
39
39
  args.prompt = args.inputs;
40
- const raw = await request<TogeteherTextCompletionOutput>(args, {
40
+ const { data: raw } = await innerRequest<TogeteherTextCompletionOutput>(args, {
41
41
  ...options,
42
42
  task: "text-generation",
43
43
  });
@@ -61,10 +61,12 @@ export async function textGeneration(
61
61
  : undefined),
62
62
  ...omit(args, ["inputs", "parameters"]),
63
63
  };
64
- const raw = await request<HyperbolicTextCompletionOutput>(payload, {
65
- ...options,
66
- task: "text-generation",
67
- });
64
+ const raw = (
65
+ await innerRequest<HyperbolicTextCompletionOutput>(payload, {
66
+ ...options,
67
+ task: "text-generation",
68
+ })
69
+ ).data;
68
70
  const isValidOutput =
69
71
  typeof raw === "object" && "choices" in raw && Array.isArray(raw?.choices) && typeof raw?.model === "string";
70
72
  if (!isValidOutput) {
@@ -75,18 +77,16 @@ export async function textGeneration(
75
77
  generated_text: completion.message.content,
76
78
  };
77
79
  } else {
78
- const res = toArray(
79
- await request<TextGenerationOutput | TextGenerationOutput[]>(args, {
80
- ...options,
81
- task: "text-generation",
82
- })
83
- );
84
-
80
+ const { data: res } = await innerRequest<TextGenerationOutput | TextGenerationOutput[]>(args, {
81
+ ...options,
82
+ task: "text-generation",
83
+ });
84
+ const output = toArray(res);
85
85
  const isValidOutput =
86
- Array.isArray(res) && res.every((x) => "generated_text" in x && typeof x?.generated_text === "string");
86
+ Array.isArray(output) && output.every((x) => "generated_text" in x && typeof x?.generated_text === "string");
87
87
  if (!isValidOutput) {
88
88
  throw new InferenceOutputError("Expected Array<{generated_text: string}>");
89
89
  }
90
- return (res as TextGenerationOutput[])?.[0];
90
+ return (output as TextGenerationOutput[])?.[0];
91
91
  }
92
92
  }
@@ -1,6 +1,6 @@
1
1
  import type { TextGenerationInput } from "@huggingface/tasks";
2
2
  import type { BaseArgs, Options } from "../../types";
3
- import { streamingRequest } from "../custom/streamingRequest";
3
+ import { innerStreamingRequest } from "../../utils/request";
4
4
 
5
5
  export interface TextGenerationStreamToken {
6
6
  /** Token ID from the model tokenizer */
@@ -89,7 +89,7 @@ export async function* textGenerationStream(
89
89
  args: BaseArgs & TextGenerationInput,
90
90
  options?: Options
91
91
  ): AsyncGenerator<TextGenerationStreamOutput> {
92
- yield* streamingRequest<TextGenerationStreamOutput>(args, {
92
+ yield* innerStreamingRequest<TextGenerationStreamOutput>(args, {
93
93
  ...options,
94
94
  task: "text-generation",
95
95
  });
@@ -1,8 +1,8 @@
1
1
  import type { TokenClassificationInput, TokenClassificationOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
+ import { innerRequest } from "../../utils/request";
4
5
  import { toArray } from "../../utils/toArray";
5
- import { request } from "../custom/request";
6
6
 
7
7
  export type TokenClassificationArgs = BaseArgs & TokenClassificationInput;
8
8
 
@@ -13,15 +13,14 @@ export async function tokenClassification(
13
13
  args: TokenClassificationArgs,
14
14
  options?: Options
15
15
  ): Promise<TokenClassificationOutput> {
16
- const res = toArray(
17
- await request<TokenClassificationOutput[number] | TokenClassificationOutput>(args, {
18
- ...options,
19
- task: "token-classification",
20
- })
21
- );
16
+ const { data: res } = await innerRequest<TokenClassificationOutput[number] | TokenClassificationOutput>(args, {
17
+ ...options,
18
+ task: "token-classification",
19
+ });
20
+ const output = toArray(res);
22
21
  const isValidOutput =
23
- Array.isArray(res) &&
24
- res.every(
22
+ Array.isArray(output) &&
23
+ output.every(
25
24
  (x) =>
26
25
  typeof x.end === "number" &&
27
26
  typeof x.entity_group === "string" &&
@@ -34,5 +33,5 @@ export async function tokenClassification(
34
33
  "Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>"
35
34
  );
36
35
  }
37
- return res;
36
+ return output;
38
37
  }
@@ -1,14 +1,14 @@
1
1
  import type { TranslationInput, TranslationOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
- import { request } from "../custom/request";
4
+ import { innerRequest } from "../../utils/request";
5
5
 
6
6
  export type TranslationArgs = BaseArgs & TranslationInput;
7
7
  /**
8
8
  * This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en.
9
9
  */
10
10
  export async function translation(args: TranslationArgs, options?: Options): Promise<TranslationOutput> {
11
- const res = await request<TranslationOutput>(args, {
11
+ const { data: res } = await innerRequest<TranslationOutput>(args, {
12
12
  ...options,
13
13
  task: "translation",
14
14
  });
@@ -1,8 +1,8 @@
1
1
  import type { ZeroShotClassificationInput, ZeroShotClassificationOutput } from "@huggingface/tasks";
2
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
3
3
  import type { BaseArgs, Options } from "../../types";
4
+ import { innerRequest } from "../../utils/request";
4
5
  import { toArray } from "../../utils/toArray";
5
- import { request } from "../custom/request";
6
6
 
7
7
  export type ZeroShotClassificationArgs = BaseArgs & ZeroShotClassificationInput;
8
8
 
@@ -13,15 +13,14 @@ export async function zeroShotClassification(
13
13
  args: ZeroShotClassificationArgs,
14
14
  options?: Options
15
15
  ): Promise<ZeroShotClassificationOutput> {
16
- const res = toArray(
17
- await request<ZeroShotClassificationOutput[number] | ZeroShotClassificationOutput>(args, {
18
- ...options,
19
- task: "zero-shot-classification",
20
- })
21
- );
16
+ const { data: res } = await innerRequest<ZeroShotClassificationOutput[number] | ZeroShotClassificationOutput>(args, {
17
+ ...options,
18
+ task: "zero-shot-classification",
19
+ });
20
+ const output = toArray(res);
22
21
  const isValidOutput =
23
- Array.isArray(res) &&
24
- res.every(
22
+ Array.isArray(output) &&
23
+ output.every(
25
24
  (x) =>
26
25
  Array.isArray(x.labels) &&
27
26
  x.labels.every((_label) => typeof _label === "string") &&
@@ -32,5 +31,5 @@ export async function zeroShotClassification(
32
31
  if (!isValidOutput) {
33
32
  throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>");
34
33
  }
35
- return res;
34
+ return output;
36
35
  }
@@ -1,6 +1,6 @@
1
1
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
2
2
  import type { BaseArgs, Options } from "../../types";
3
- import { request } from "../custom/request";
3
+ import { innerRequest } from "../../utils/request";
4
4
 
5
5
  export type TabularClassificationArgs = BaseArgs & {
6
6
  inputs: {
@@ -25,7 +25,7 @@ export async function tabularClassification(
25
25
  args: TabularClassificationArgs,
26
26
  options?: Options
27
27
  ): Promise<TabularClassificationOutput> {
28
- const res = await request<TabularClassificationOutput>(args, {
28
+ const { data: res } = await innerRequest<TabularClassificationOutput>(args, {
29
29
  ...options,
30
30
  task: "tabular-classification",
31
31
  });
@@ -1,6 +1,6 @@
1
1
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
2
2
  import type { BaseArgs, Options } from "../../types";
3
- import { request } from "../custom/request";
3
+ import { innerRequest } from "../../utils/request";
4
4
 
5
5
  export type TabularRegressionArgs = BaseArgs & {
6
6
  inputs: {
@@ -25,7 +25,7 @@ export async function tabularRegression(
25
25
  args: TabularRegressionArgs,
26
26
  options?: Options
27
27
  ): Promise<TabularRegressionOutput> {
28
- const res = await request<TabularRegressionOutput>(args, {
28
+ const { data: res } = await innerRequest<TabularRegressionOutput>(args, {
29
29
  ...options,
30
30
  task: "tabular-regression",
31
31
  });
package/src/types.ts CHANGED
@@ -24,6 +24,14 @@ export interface Options {
24
24
  * (Default: "same-origin"). String | Boolean. Credentials to use for the request. If this is a string, it will be passed straight on. If it's a boolean, true will be "include" and false will not send credentials at all.
25
25
  */
26
26
  includeCredentials?: string | boolean;
27
+
28
+ /**
29
+ * The billing account to use for the requests.
30
+ *
31
+ * By default the requests are billed on the user's account.
32
+ * Requests can only be billed to an organization the user is a member of, and which has subscribed to Enterprise Hub.
33
+ */
34
+ billTo?: string;
27
35
  }
28
36
 
29
37
  export type InferenceTask = Exclude<PipelineType, "other">;