@huggingface/inference 2.6.4 → 2.6.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,60 @@
1
1
  /// <reference path="./index.d.ts" />
2
+ "use strict";
2
3
  var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
3
7
  var __export = (target, all) => {
4
8
  for (var name in all)
5
9
  __defProp(target, name, { get: all[name], enumerable: true });
6
10
  };
11
+ var __copyProps = (to, from, except, desc) => {
12
+ if (from && typeof from === "object" || typeof from === "function") {
13
+ for (let key of __getOwnPropNames(from))
14
+ if (!__hasOwnProp.call(to, key) && key !== except)
15
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
16
+ }
17
+ return to;
18
+ };
19
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
20
+
21
+ // src/index.ts
22
+ var src_exports = {};
23
+ __export(src_exports, {
24
+ HfInference: () => HfInference,
25
+ HfInferenceEndpoint: () => HfInferenceEndpoint,
26
+ InferenceOutputError: () => InferenceOutputError,
27
+ audioClassification: () => audioClassification,
28
+ audioToAudio: () => audioToAudio,
29
+ automaticSpeechRecognition: () => automaticSpeechRecognition,
30
+ documentQuestionAnswering: () => documentQuestionAnswering,
31
+ featureExtraction: () => featureExtraction,
32
+ fillMask: () => fillMask,
33
+ imageClassification: () => imageClassification,
34
+ imageSegmentation: () => imageSegmentation,
35
+ imageToImage: () => imageToImage,
36
+ imageToText: () => imageToText,
37
+ objectDetection: () => objectDetection,
38
+ questionAnswering: () => questionAnswering,
39
+ request: () => request,
40
+ sentenceSimilarity: () => sentenceSimilarity,
41
+ streamingRequest: () => streamingRequest,
42
+ summarization: () => summarization,
43
+ tableQuestionAnswering: () => tableQuestionAnswering,
44
+ tabularClassification: () => tabularClassification,
45
+ tabularRegression: () => tabularRegression,
46
+ textClassification: () => textClassification,
47
+ textGeneration: () => textGeneration,
48
+ textGenerationStream: () => textGenerationStream,
49
+ textToImage: () => textToImage,
50
+ textToSpeech: () => textToSpeech,
51
+ tokenClassification: () => tokenClassification,
52
+ translation: () => translation,
53
+ visualQuestionAnswering: () => visualQuestionAnswering,
54
+ zeroShotClassification: () => zeroShotClassification,
55
+ zeroShotImageClassification: () => zeroShotImageClassification
56
+ });
57
+ module.exports = __toCommonJS(src_exports);
7
58
 
8
59
  // src/tasks/index.ts
9
60
  var tasks_exports = {};
@@ -11,7 +62,6 @@ __export(tasks_exports, {
11
62
  audioClassification: () => audioClassification,
12
63
  audioToAudio: () => audioToAudio,
13
64
  automaticSpeechRecognition: () => automaticSpeechRecognition,
14
- conversational: () => conversational,
15
65
  documentQuestionAnswering: () => documentQuestionAnswering,
16
66
  featureExtraction: () => featureExtraction,
17
67
  fillMask: () => fillMask,
@@ -82,7 +132,15 @@ var tasks = null;
82
132
  async function makeRequestOptions(args, options) {
83
133
  const { accessToken, model: _model, ...otherArgs } = args;
84
134
  let { model } = args;
85
- const { forceTask: task, includeCredentials, taskHint, ...otherOptions } = options ?? {};
135
+ const {
136
+ forceTask: task,
137
+ includeCredentials,
138
+ taskHint,
139
+ wait_for_model,
140
+ use_cache,
141
+ dont_load_model,
142
+ ...otherOptions
143
+ } = options ?? {};
86
144
  const headers = {};
87
145
  if (accessToken) {
88
146
  headers["Authorization"] = `Bearer ${accessToken}`;
@@ -105,16 +163,15 @@ async function makeRequestOptions(args, options) {
105
163
  const binary = "data" in args && !!args.data;
106
164
  if (!binary) {
107
165
  headers["Content-Type"] = "application/json";
108
- } else {
109
- if (options?.wait_for_model) {
110
- headers["X-Wait-For-Model"] = "true";
111
- }
112
- if (options?.use_cache === false) {
113
- headers["X-Use-Cache"] = "false";
114
- }
115
- if (options?.dont_load_model) {
116
- headers["X-Load-Model"] = "0";
117
- }
166
+ }
167
+ if (wait_for_model) {
168
+ headers["X-Wait-For-Model"] = "true";
169
+ }
170
+ if (use_cache === false) {
171
+ headers["X-Use-Cache"] = "false";
172
+ }
173
+ if (dont_load_model) {
174
+ headers["X-Load-Model"] = "0";
118
175
  }
119
176
  const url = (() => {
120
177
  if (isUrl(model)) {
@@ -275,7 +332,7 @@ async function* streamingRequest(args, options) {
275
332
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
276
333
  const response = await (options?.fetch ?? fetch)(url, info);
277
334
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
278
- return streamingRequest(args, {
335
+ return yield* streamingRequest(args, {
279
336
  ...options,
280
337
  wait_for_model: true
281
338
  });
@@ -533,18 +590,6 @@ async function zeroShotImageClassification(args, options) {
533
590
  return res;
534
591
  }
535
592
 
536
- // src/tasks/nlp/conversational.ts
537
- async function conversational(args, options) {
538
- const res = await request(args, { ...options, taskHint: "conversational" });
539
- const isValidOutput = Array.isArray(res.conversation.generated_responses) && res.conversation.generated_responses.every((x) => typeof x === "string") && Array.isArray(res.conversation.past_user_inputs) && res.conversation.past_user_inputs.every((x) => typeof x === "string") && typeof res.generated_text === "string" && (typeof res.warnings === "undefined" || Array.isArray(res.warnings) && res.warnings.every((x) => typeof x === "string"));
540
- if (!isValidOutput) {
541
- throw new InferenceOutputError(
542
- "Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}"
543
- );
544
- }
545
- return res;
546
- }
547
-
548
593
  // src/tasks/nlp/featureExtraction.ts
549
594
  async function featureExtraction(args, options) {
550
595
  const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : void 0;
@@ -714,7 +759,7 @@ async function translation(args, options) {
714
759
  if (!isValidOutput) {
715
760
  throw new InferenceOutputError("Expected type Array<{translation_text: string}>");
716
761
  }
717
- return res?.[0];
762
+ return res?.length === 1 ? res?.[0] : res;
718
763
  }
719
764
 
720
765
  // src/tasks/nlp/zeroShotClassification.ts
@@ -851,14 +896,14 @@ var HfInferenceEndpoint = class {
851
896
  }
852
897
  }
853
898
  };
854
- export {
899
+ // Annotate the CommonJS export names for ESM import in node:
900
+ 0 && (module.exports = {
855
901
  HfInference,
856
902
  HfInferenceEndpoint,
857
903
  InferenceOutputError,
858
904
  audioClassification,
859
905
  audioToAudio,
860
906
  automaticSpeechRecognition,
861
- conversational,
862
907
  documentQuestionAnswering,
863
908
  featureExtraction,
864
909
  fillMask,
@@ -885,4 +930,4 @@ export {
885
930
  visualQuestionAnswering,
886
931
  zeroShotClassification,
887
932
  zeroShotImageClassification
888
- };
933
+ });
package/dist/index.d.ts CHANGED
@@ -1,10 +1,11 @@
1
+
1
2
  export interface Options {
2
3
  /**
3
4
  * (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
4
5
  */
5
6
  retry_on_error?: boolean;
6
7
  /**
7
- * (Default: true). Boolean. There is a cache layer on the inference API to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
8
+ * (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
8
9
  */
9
10
  use_cache?: boolean;
10
11
  /**
@@ -39,35 +40,51 @@ export type InferenceTask =
39
40
  | "audio-classification"
40
41
  | "audio-to-audio"
41
42
  | "automatic-speech-recognition"
42
- | "conversational"
43
43
  | "depth-estimation"
44
44
  | "document-question-answering"
45
45
  | "feature-extraction"
46
46
  | "fill-mask"
47
+ | "graph-ml"
47
48
  | "image-classification"
49
+ | "image-feature-extraction"
48
50
  | "image-segmentation"
51
+ | "image-text-to-text"
52
+ | "image-to-3d"
49
53
  | "image-to-image"
50
54
  | "image-to-text"
55
+ | "image-to-video"
56
+ | "mask-generation"
57
+ | "multiple-choice"
51
58
  | "object-detection"
52
- | "video-classification"
53
59
  | "question-answering"
54
60
  | "reinforcement-learning"
61
+ | "robotics"
55
62
  | "sentence-similarity"
56
63
  | "summarization"
57
64
  | "table-question-answering"
65
+ | "table-to-text"
58
66
  | "tabular-classification"
59
67
  | "tabular-regression"
68
+ | "tabular-to-text"
60
69
  | "text-classification"
61
70
  | "text-generation"
71
+ | "text-retrieval"
72
+ | "text-to-3d"
73
+ | "text-to-audio"
62
74
  | "text-to-image"
63
75
  | "text-to-speech"
64
76
  | "text-to-video"
77
+ | "text2text-generation"
78
+ | "time-series-forecasting"
65
79
  | "token-classification"
66
80
  | "translation"
67
81
  | "unconditional-image-generation"
82
+ | "video-classification"
68
83
  | "visual-question-answering"
84
+ | "voice-activity-detection"
69
85
  | "zero-shot-classification"
70
- | "zero-shot-image-classification";
86
+ | "zero-shot-image-classification"
87
+ | "zero-shot-object-detection";
71
88
 
72
89
  export interface BaseArgs {
73
90
  /**
@@ -77,7 +94,7 @@ export interface BaseArgs {
77
94
  */
78
95
  accessToken?: string;
79
96
  /**
80
- * The model to use. Can be a full URL for HF inference endpoints.
97
+ * The model to use. Can be a full URL for a dedicated inference endpoint.
81
98
  *
82
99
  * If not specified, will call huggingface.co/api/tasks to get the default model for the task.
83
100
  */
@@ -178,7 +195,7 @@ export type TextToSpeechOutput = Blob;
178
195
  */
179
196
  export function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise<TextToSpeechOutput>;
180
197
  /**
181
- * Primitive to make custom calls to the inference API
198
+ * Primitive to make custom calls to Inference Endpoints
182
199
  */
183
200
  export function request<T>(
184
201
  args: RequestArgs,
@@ -483,65 +500,6 @@ export function visualQuestionAnswering(
483
500
  args: VisualQuestionAnsweringArgs,
484
501
  options?: Options
485
502
  ): Promise<VisualQuestionAnsweringOutput>;
486
- export type ConversationalArgs = BaseArgs & {
487
- inputs: {
488
- /**
489
- * A list of strings corresponding to the earlier replies from the model.
490
- */
491
- generated_responses?: string[];
492
- /**
493
- * A list of strings corresponding to the earlier replies from the user. Should be of the same length of generated_responses.
494
- */
495
- past_user_inputs?: string[];
496
- /**
497
- * The last input from the user in the conversation.
498
- */
499
- text: string;
500
- };
501
- parameters?: {
502
- /**
503
- * (Default: None). Integer to define the maximum length in tokens of the output summary.
504
- */
505
- max_length?: number;
506
- /**
507
- * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit.
508
- */
509
- max_time?: number;
510
- /**
511
- * (Default: None). Integer to define the minimum length in tokens of the output summary.
512
- */
513
- min_length?: number;
514
- /**
515
- * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
516
- */
517
- repetition_penalty?: number;
518
- /**
519
- * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
520
- */
521
- temperature?: number;
522
- /**
523
- * (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
524
- */
525
- top_k?: number;
526
- /**
527
- * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
528
- */
529
- top_p?: number;
530
- };
531
- };
532
- export interface ConversationalOutput {
533
- conversation: {
534
- generated_responses: string[];
535
- past_user_inputs: string[];
536
- };
537
- generated_text: string;
538
- warnings: string[];
539
- }
540
- /**
541
- * This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
542
- *
543
- */
544
- export function conversational(args: ConversationalArgs, options?: Options): Promise<ConversationalOutput>;
545
503
  export type FeatureExtractionArgs = BaseArgs & {
546
504
  /**
547
505
  * The inputs is a string or a list of strings to get the features from.
@@ -744,68 +702,13 @@ export function textClassification(
744
702
  args: TextClassificationArgs,
745
703
  options?: Options
746
704
  ): Promise<TextClassificationOutput>;
747
- export type TextGenerationArgs = BaseArgs & {
748
- /**
749
- * A string to be generated from
750
- */
751
- inputs: string;
752
- parameters?: {
753
- /**
754
- * (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise.
755
- */
756
- do_sample?: boolean;
757
- /**
758
- * (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated.
759
- */
760
- max_new_tokens?: number;
761
- /**
762
- * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results.
763
- */
764
- max_time?: number;
765
- /**
766
- * (Default: 1). Integer. The number of proposition you want to be returned.
767
- */
768
- num_return_sequences?: number;
769
- /**
770
- * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
771
- */
772
- repetition_penalty?: number;
773
- /**
774
- * (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting.
775
- */
776
- return_full_text?: boolean;
777
- /**
778
- * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
779
- */
780
- temperature?: number;
781
- /**
782
- * (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
783
- */
784
- top_k?: number;
785
- /**
786
- * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
787
- */
788
- top_p?: number;
789
- /**
790
- * (Default: None). Integer. The maximum number of tokens from the input.
791
- */
792
- truncate?: number;
793
- /**
794
- * (Default: []) List of strings. The model will stop generating text when one of the strings in the list is generated.
795
- * **/
796
- stop_sequences?: string[];
797
- };
798
- };
799
- export interface TextGenerationOutput {
800
- /**
801
- * The continuated string
802
- */
803
- generated_text: string;
804
- }
805
705
  /**
806
706
  * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
807
707
  */
808
- export function textGeneration(args: TextGenerationArgs, options?: Options): Promise<TextGenerationOutput>;
708
+ export function textGeneration(
709
+ args: BaseArgs & TextGenerationInput,
710
+ options?: Options
711
+ ): Promise<TextGenerationOutput>;
809
712
  export type TextGenerationStreamFinishReason =
810
713
  /** number of generated tokens == `max_new_tokens` */
811
714
  | "length"
@@ -883,7 +786,7 @@ export interface TextGenerationStreamOutput {
883
786
  * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
884
787
  */
885
788
  export function textGenerationStream(
886
- args: TextGenerationArgs,
789
+ args: BaseArgs & TextGenerationInput,
887
790
  options?: Options
888
791
  ): AsyncGenerator<TextGenerationStreamOutput>;
889
792
  export type TokenClassificationArgs = BaseArgs & {
@@ -942,9 +845,10 @@ export type TranslationArgs = BaseArgs & {
942
845
  /**
943
846
  * A string to be translated
944
847
  */
945
- inputs: string;
848
+ inputs: string | string[];
946
849
  };
947
- export interface TranslationOutput {
850
+ export type TranslationOutput = TranslationOutputValue | TranslationOutputValue[];
851
+ export interface TranslationOutputValue {
948
852
  /**
949
853
  * The string after translation
950
854
  */
@@ -1058,7 +962,7 @@ export class HfInference {
1058
962
  */
1059
963
  textToSpeech(args: Omit<TextToSpeechArgs, 'accessToken'>, options?: Options): Promise<TextToSpeechOutput>;
1060
964
  /**
1061
- * Primitive to make custom calls to the inference API
965
+ * Primitive to make custom calls to Inference Endpoints
1062
966
  */
1063
967
  request<T>(
1064
968
  args: Omit<RequestArgs, 'accessToken'>,
@@ -1138,11 +1042,6 @@ export class HfInference {
1138
1042
  args: Omit<VisualQuestionAnsweringArgs, 'accessToken'>,
1139
1043
  options?: Options
1140
1044
  ): Promise<VisualQuestionAnsweringOutput>;
1141
- /**
1142
- * This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
1143
- *
1144
- */
1145
- conversational(args: Omit<ConversationalArgs, 'accessToken'>, options?: Options): Promise<ConversationalOutput>;
1146
1045
  /**
1147
1046
  * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
1148
1047
  */
@@ -1189,12 +1088,15 @@ export class HfInference {
1189
1088
  /**
1190
1089
  * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
1191
1090
  */
1192
- textGeneration(args: Omit<TextGenerationArgs, 'accessToken'>, options?: Options): Promise<TextGenerationOutput>;
1091
+ textGeneration(
1092
+ args: Omit<BaseArgs, 'accessToken'> & TextGenerationInput,
1093
+ options?: Options
1094
+ ): Promise<TextGenerationOutput>;
1193
1095
  /**
1194
1096
  * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
1195
1097
  */
1196
1098
  textGenerationStream(
1197
- args: Omit<TextGenerationArgs, 'accessToken'>,
1099
+ args: Omit<BaseArgs, 'accessToken'> & TextGenerationInput,
1198
1100
  options?: Options
1199
1101
  ): AsyncGenerator<TextGenerationStreamOutput>;
1200
1102
  /**
@@ -1263,7 +1165,7 @@ export class HfInferenceEndpoint {
1263
1165
  */
1264
1166
  textToSpeech(args: Omit<TextToSpeechArgs, 'accessToken' | 'model'>, options?: Options): Promise<TextToSpeechOutput>;
1265
1167
  /**
1266
- * Primitive to make custom calls to the inference API
1168
+ * Primitive to make custom calls to Inference Endpoints
1267
1169
  */
1268
1170
  request<T>(
1269
1171
  args: Omit<RequestArgs, 'accessToken' | 'model'>,
@@ -1343,11 +1245,6 @@ export class HfInferenceEndpoint {
1343
1245
  args: Omit<VisualQuestionAnsweringArgs, 'accessToken' | 'model'>,
1344
1246
  options?: Options
1345
1247
  ): Promise<VisualQuestionAnsweringOutput>;
1346
- /**
1347
- * This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
1348
- *
1349
- */
1350
- conversational(args: Omit<ConversationalArgs, 'accessToken' | 'model'>, options?: Options): Promise<ConversationalOutput>;
1351
1248
  /**
1352
1249
  * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
1353
1250
  */
@@ -1394,12 +1291,15 @@ export class HfInferenceEndpoint {
1394
1291
  /**
1395
1292
  * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
1396
1293
  */
1397
- textGeneration(args: Omit<TextGenerationArgs, 'accessToken' | 'model'>, options?: Options): Promise<TextGenerationOutput>;
1294
+ textGeneration(
1295
+ args: Omit<BaseArgs, 'accessToken' | 'model'> & TextGenerationInput,
1296
+ options?: Options
1297
+ ): Promise<TextGenerationOutput>;
1398
1298
  /**
1399
1299
  * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
1400
1300
  */
1401
1301
  textGenerationStream(
1402
- args: Omit<TextGenerationArgs, 'accessToken' | 'model'>,
1302
+ args: Omit<BaseArgs, 'accessToken' | 'model'> & TextGenerationInput,
1403
1303
  options?: Options
1404
1304
  ): AsyncGenerator<TextGenerationStreamOutput>;
1405
1305
  /**
package/dist/index.js CHANGED
@@ -1,61 +1,9 @@
1
1
  /// <reference path="./index.d.ts" />
2
- "use strict";
3
2
  var __defProp = Object.defineProperty;
4
- var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
- var __getOwnPropNames = Object.getOwnPropertyNames;
6
- var __hasOwnProp = Object.prototype.hasOwnProperty;
7
3
  var __export = (target, all) => {
8
4
  for (var name in all)
9
5
  __defProp(target, name, { get: all[name], enumerable: true });
10
6
  };
11
- var __copyProps = (to, from, except, desc) => {
12
- if (from && typeof from === "object" || typeof from === "function") {
13
- for (let key of __getOwnPropNames(from))
14
- if (!__hasOwnProp.call(to, key) && key !== except)
15
- __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
16
- }
17
- return to;
18
- };
19
- var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
20
-
21
- // src/index.ts
22
- var src_exports = {};
23
- __export(src_exports, {
24
- HfInference: () => HfInference,
25
- HfInferenceEndpoint: () => HfInferenceEndpoint,
26
- InferenceOutputError: () => InferenceOutputError,
27
- audioClassification: () => audioClassification,
28
- audioToAudio: () => audioToAudio,
29
- automaticSpeechRecognition: () => automaticSpeechRecognition,
30
- conversational: () => conversational,
31
- documentQuestionAnswering: () => documentQuestionAnswering,
32
- featureExtraction: () => featureExtraction,
33
- fillMask: () => fillMask,
34
- imageClassification: () => imageClassification,
35
- imageSegmentation: () => imageSegmentation,
36
- imageToImage: () => imageToImage,
37
- imageToText: () => imageToText,
38
- objectDetection: () => objectDetection,
39
- questionAnswering: () => questionAnswering,
40
- request: () => request,
41
- sentenceSimilarity: () => sentenceSimilarity,
42
- streamingRequest: () => streamingRequest,
43
- summarization: () => summarization,
44
- tableQuestionAnswering: () => tableQuestionAnswering,
45
- tabularClassification: () => tabularClassification,
46
- tabularRegression: () => tabularRegression,
47
- textClassification: () => textClassification,
48
- textGeneration: () => textGeneration,
49
- textGenerationStream: () => textGenerationStream,
50
- textToImage: () => textToImage,
51
- textToSpeech: () => textToSpeech,
52
- tokenClassification: () => tokenClassification,
53
- translation: () => translation,
54
- visualQuestionAnswering: () => visualQuestionAnswering,
55
- zeroShotClassification: () => zeroShotClassification,
56
- zeroShotImageClassification: () => zeroShotImageClassification
57
- });
58
- module.exports = __toCommonJS(src_exports);
59
7
 
60
8
  // src/tasks/index.ts
61
9
  var tasks_exports = {};
@@ -63,7 +11,6 @@ __export(tasks_exports, {
63
11
  audioClassification: () => audioClassification,
64
12
  audioToAudio: () => audioToAudio,
65
13
  automaticSpeechRecognition: () => automaticSpeechRecognition,
66
- conversational: () => conversational,
67
14
  documentQuestionAnswering: () => documentQuestionAnswering,
68
15
  featureExtraction: () => featureExtraction,
69
16
  fillMask: () => fillMask,
@@ -134,7 +81,15 @@ var tasks = null;
134
81
  async function makeRequestOptions(args, options) {
135
82
  const { accessToken, model: _model, ...otherArgs } = args;
136
83
  let { model } = args;
137
- const { forceTask: task, includeCredentials, taskHint, ...otherOptions } = options ?? {};
84
+ const {
85
+ forceTask: task,
86
+ includeCredentials,
87
+ taskHint,
88
+ wait_for_model,
89
+ use_cache,
90
+ dont_load_model,
91
+ ...otherOptions
92
+ } = options ?? {};
138
93
  const headers = {};
139
94
  if (accessToken) {
140
95
  headers["Authorization"] = `Bearer ${accessToken}`;
@@ -157,16 +112,15 @@ async function makeRequestOptions(args, options) {
157
112
  const binary = "data" in args && !!args.data;
158
113
  if (!binary) {
159
114
  headers["Content-Type"] = "application/json";
160
- } else {
161
- if (options?.wait_for_model) {
162
- headers["X-Wait-For-Model"] = "true";
163
- }
164
- if (options?.use_cache === false) {
165
- headers["X-Use-Cache"] = "false";
166
- }
167
- if (options?.dont_load_model) {
168
- headers["X-Load-Model"] = "0";
169
- }
115
+ }
116
+ if (wait_for_model) {
117
+ headers["X-Wait-For-Model"] = "true";
118
+ }
119
+ if (use_cache === false) {
120
+ headers["X-Use-Cache"] = "false";
121
+ }
122
+ if (dont_load_model) {
123
+ headers["X-Load-Model"] = "0";
170
124
  }
171
125
  const url = (() => {
172
126
  if (isUrl(model)) {
@@ -327,7 +281,7 @@ async function* streamingRequest(args, options) {
327
281
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
328
282
  const response = await (options?.fetch ?? fetch)(url, info);
329
283
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
330
- return streamingRequest(args, {
284
+ return yield* streamingRequest(args, {
331
285
  ...options,
332
286
  wait_for_model: true
333
287
  });
@@ -585,18 +539,6 @@ async function zeroShotImageClassification(args, options) {
585
539
  return res;
586
540
  }
587
541
 
588
- // src/tasks/nlp/conversational.ts
589
- async function conversational(args, options) {
590
- const res = await request(args, { ...options, taskHint: "conversational" });
591
- const isValidOutput = Array.isArray(res.conversation.generated_responses) && res.conversation.generated_responses.every((x) => typeof x === "string") && Array.isArray(res.conversation.past_user_inputs) && res.conversation.past_user_inputs.every((x) => typeof x === "string") && typeof res.generated_text === "string" && (typeof res.warnings === "undefined" || Array.isArray(res.warnings) && res.warnings.every((x) => typeof x === "string"));
592
- if (!isValidOutput) {
593
- throw new InferenceOutputError(
594
- "Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}"
595
- );
596
- }
597
- return res;
598
- }
599
-
600
542
  // src/tasks/nlp/featureExtraction.ts
601
543
  async function featureExtraction(args, options) {
602
544
  const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : void 0;
@@ -766,7 +708,7 @@ async function translation(args, options) {
766
708
  if (!isValidOutput) {
767
709
  throw new InferenceOutputError("Expected type Array<{translation_text: string}>");
768
710
  }
769
- return res?.[0];
711
+ return res?.length === 1 ? res?.[0] : res;
770
712
  }
771
713
 
772
714
  // src/tasks/nlp/zeroShotClassification.ts
@@ -903,15 +845,13 @@ var HfInferenceEndpoint = class {
903
845
  }
904
846
  }
905
847
  };
906
- // Annotate the CommonJS export names for ESM import in node:
907
- 0 && (module.exports = {
848
+ export {
908
849
  HfInference,
909
850
  HfInferenceEndpoint,
910
851
  InferenceOutputError,
911
852
  audioClassification,
912
853
  audioToAudio,
913
854
  automaticSpeechRecognition,
914
- conversational,
915
855
  documentQuestionAnswering,
916
856
  featureExtraction,
917
857
  fillMask,
@@ -938,4 +878,4 @@ var HfInferenceEndpoint = class {
938
878
  visualQuestionAnswering,
939
879
  zeroShotClassification,
940
880
  zeroShotImageClassification
941
- });
881
+ };