@huggingface/inference 2.6.4 → 2.6.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -1,10 +1,11 @@
1
+
1
2
  export interface Options {
2
3
  /**
3
4
  * (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
4
5
  */
5
6
  retry_on_error?: boolean;
6
7
  /**
7
- * (Default: true). Boolean. There is a cache layer on the inference API to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
8
+ * (Default: true). Boolean. There is a cache layer on Inference API (serverless) to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
8
9
  */
9
10
  use_cache?: boolean;
10
11
  /**
@@ -39,35 +40,51 @@ export type InferenceTask =
39
40
  | "audio-classification"
40
41
  | "audio-to-audio"
41
42
  | "automatic-speech-recognition"
42
- | "conversational"
43
43
  | "depth-estimation"
44
44
  | "document-question-answering"
45
45
  | "feature-extraction"
46
46
  | "fill-mask"
47
+ | "graph-ml"
47
48
  | "image-classification"
49
+ | "image-feature-extraction"
48
50
  | "image-segmentation"
51
+ | "image-text-to-text"
52
+ | "image-to-3d"
49
53
  | "image-to-image"
50
54
  | "image-to-text"
55
+ | "image-to-video"
56
+ | "mask-generation"
57
+ | "multiple-choice"
51
58
  | "object-detection"
52
- | "video-classification"
53
59
  | "question-answering"
54
60
  | "reinforcement-learning"
61
+ | "robotics"
55
62
  | "sentence-similarity"
56
63
  | "summarization"
57
64
  | "table-question-answering"
65
+ | "table-to-text"
58
66
  | "tabular-classification"
59
67
  | "tabular-regression"
68
+ | "tabular-to-text"
60
69
  | "text-classification"
61
70
  | "text-generation"
71
+ | "text-retrieval"
72
+ | "text-to-3d"
73
+ | "text-to-audio"
62
74
  | "text-to-image"
63
75
  | "text-to-speech"
64
76
  | "text-to-video"
77
+ | "text2text-generation"
78
+ | "time-series-forecasting"
65
79
  | "token-classification"
66
80
  | "translation"
67
81
  | "unconditional-image-generation"
82
+ | "video-classification"
68
83
  | "visual-question-answering"
84
+ | "voice-activity-detection"
69
85
  | "zero-shot-classification"
70
- | "zero-shot-image-classification";
86
+ | "zero-shot-image-classification"
87
+ | "zero-shot-object-detection";
71
88
 
72
89
  export interface BaseArgs {
73
90
  /**
@@ -77,7 +94,7 @@ export interface BaseArgs {
77
94
  */
78
95
  accessToken?: string;
79
96
  /**
80
- * The model to use. Can be a full URL for HF inference endpoints.
97
+ * The model to use. Can be a full URL for a dedicated inference endpoint.
81
98
  *
82
99
  * If not specified, will call huggingface.co/api/tasks to get the default model for the task.
83
100
  */
@@ -178,7 +195,7 @@ export type TextToSpeechOutput = Blob;
178
195
  */
179
196
  export function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise<TextToSpeechOutput>;
180
197
  /**
181
- * Primitive to make custom calls to the inference API
198
+ * Primitive to make custom calls to Inference Endpoints
182
199
  */
183
200
  export function request<T>(
184
201
  args: RequestArgs,
@@ -483,65 +500,6 @@ export function visualQuestionAnswering(
483
500
  args: VisualQuestionAnsweringArgs,
484
501
  options?: Options
485
502
  ): Promise<VisualQuestionAnsweringOutput>;
486
- export type ConversationalArgs = BaseArgs & {
487
- inputs: {
488
- /**
489
- * A list of strings corresponding to the earlier replies from the model.
490
- */
491
- generated_responses?: string[];
492
- /**
493
- * A list of strings corresponding to the earlier replies from the user. Should be of the same length of generated_responses.
494
- */
495
- past_user_inputs?: string[];
496
- /**
497
- * The last input from the user in the conversation.
498
- */
499
- text: string;
500
- };
501
- parameters?: {
502
- /**
503
- * (Default: None). Integer to define the maximum length in tokens of the output summary.
504
- */
505
- max_length?: number;
506
- /**
507
- * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit.
508
- */
509
- max_time?: number;
510
- /**
511
- * (Default: None). Integer to define the minimum length in tokens of the output summary.
512
- */
513
- min_length?: number;
514
- /**
515
- * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
516
- */
517
- repetition_penalty?: number;
518
- /**
519
- * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
520
- */
521
- temperature?: number;
522
- /**
523
- * (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
524
- */
525
- top_k?: number;
526
- /**
527
- * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
528
- */
529
- top_p?: number;
530
- };
531
- };
532
- export interface ConversationalOutput {
533
- conversation: {
534
- generated_responses: string[];
535
- past_user_inputs: string[];
536
- };
537
- generated_text: string;
538
- warnings: string[];
539
- }
540
- /**
541
- * This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
542
- *
543
- */
544
- export function conversational(args: ConversationalArgs, options?: Options): Promise<ConversationalOutput>;
545
503
  export type FeatureExtractionArgs = BaseArgs & {
546
504
  /**
547
505
  * The inputs is a string or a list of strings to get the features from.
@@ -744,68 +702,13 @@ export function textClassification(
744
702
  args: TextClassificationArgs,
745
703
  options?: Options
746
704
  ): Promise<TextClassificationOutput>;
747
- export type TextGenerationArgs = BaseArgs & {
748
- /**
749
- * A string to be generated from
750
- */
751
- inputs: string;
752
- parameters?: {
753
- /**
754
- * (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise.
755
- */
756
- do_sample?: boolean;
757
- /**
758
- * (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated.
759
- */
760
- max_new_tokens?: number;
761
- /**
762
- * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results.
763
- */
764
- max_time?: number;
765
- /**
766
- * (Default: 1). Integer. The number of proposition you want to be returned.
767
- */
768
- num_return_sequences?: number;
769
- /**
770
- * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
771
- */
772
- repetition_penalty?: number;
773
- /**
774
- * (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting.
775
- */
776
- return_full_text?: boolean;
777
- /**
778
- * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
779
- */
780
- temperature?: number;
781
- /**
782
- * (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
783
- */
784
- top_k?: number;
785
- /**
786
- * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
787
- */
788
- top_p?: number;
789
- /**
790
- * (Default: None). Integer. The maximum number of tokens from the input.
791
- */
792
- truncate?: number;
793
- /**
794
- * (Default: []) List of strings. The model will stop generating text when one of the strings in the list is generated.
795
- * **/
796
- stop_sequences?: string[];
797
- };
798
- };
799
- export interface TextGenerationOutput {
800
- /**
801
- * The continuated string
802
- */
803
- generated_text: string;
804
- }
805
705
  /**
806
706
  * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
807
707
  */
808
- export function textGeneration(args: TextGenerationArgs, options?: Options): Promise<TextGenerationOutput>;
708
+ export function textGeneration(
709
+ args: BaseArgs & TextGenerationInput,
710
+ options?: Options
711
+ ): Promise<TextGenerationOutput>;
809
712
  export type TextGenerationStreamFinishReason =
810
713
  /** number of generated tokens == `max_new_tokens` */
811
714
  | "length"
@@ -883,7 +786,7 @@ export interface TextGenerationStreamOutput {
883
786
  * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
884
787
  */
885
788
  export function textGenerationStream(
886
- args: TextGenerationArgs,
789
+ args: BaseArgs & TextGenerationInput,
887
790
  options?: Options
888
791
  ): AsyncGenerator<TextGenerationStreamOutput>;
889
792
  export type TokenClassificationArgs = BaseArgs & {
@@ -942,9 +845,10 @@ export type TranslationArgs = BaseArgs & {
942
845
  /**
943
846
  * A string to be translated
944
847
  */
945
- inputs: string;
848
+ inputs: string | string[];
946
849
  };
947
- export interface TranslationOutput {
850
+ export type TranslationOutput = TranslationOutputValue | TranslationOutputValue[];
851
+ export interface TranslationOutputValue {
948
852
  /**
949
853
  * The string after translation
950
854
  */
@@ -1058,7 +962,7 @@ export class HfInference {
1058
962
  */
1059
963
  textToSpeech(args: Omit<TextToSpeechArgs, 'accessToken'>, options?: Options): Promise<TextToSpeechOutput>;
1060
964
  /**
1061
- * Primitive to make custom calls to the inference API
965
+ * Primitive to make custom calls to Inference Endpoints
1062
966
  */
1063
967
  request<T>(
1064
968
  args: Omit<RequestArgs, 'accessToken'>,
@@ -1138,11 +1042,6 @@ export class HfInference {
1138
1042
  args: Omit<VisualQuestionAnsweringArgs, 'accessToken'>,
1139
1043
  options?: Options
1140
1044
  ): Promise<VisualQuestionAnsweringOutput>;
1141
- /**
1142
- * This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
1143
- *
1144
- */
1145
- conversational(args: Omit<ConversationalArgs, 'accessToken'>, options?: Options): Promise<ConversationalOutput>;
1146
1045
  /**
1147
1046
  * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
1148
1047
  */
@@ -1189,12 +1088,15 @@ export class HfInference {
1189
1088
  /**
1190
1089
  * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
1191
1090
  */
1192
- textGeneration(args: Omit<TextGenerationArgs, 'accessToken'>, options?: Options): Promise<TextGenerationOutput>;
1091
+ textGeneration(
1092
+ args: Omit<BaseArgs, 'accessToken'> & TextGenerationInput,
1093
+ options?: Options
1094
+ ): Promise<TextGenerationOutput>;
1193
1095
  /**
1194
1096
  * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
1195
1097
  */
1196
1098
  textGenerationStream(
1197
- args: Omit<TextGenerationArgs, 'accessToken'>,
1099
+ args: Omit<BaseArgs, 'accessToken'> & TextGenerationInput,
1198
1100
  options?: Options
1199
1101
  ): AsyncGenerator<TextGenerationStreamOutput>;
1200
1102
  /**
@@ -1263,7 +1165,7 @@ export class HfInferenceEndpoint {
1263
1165
  */
1264
1166
  textToSpeech(args: Omit<TextToSpeechArgs, 'accessToken' | 'model'>, options?: Options): Promise<TextToSpeechOutput>;
1265
1167
  /**
1266
- * Primitive to make custom calls to the inference API
1168
+ * Primitive to make custom calls to Inference Endpoints
1267
1169
  */
1268
1170
  request<T>(
1269
1171
  args: Omit<RequestArgs, 'accessToken' | 'model'>,
@@ -1343,11 +1245,6 @@ export class HfInferenceEndpoint {
1343
1245
  args: Omit<VisualQuestionAnsweringArgs, 'accessToken' | 'model'>,
1344
1246
  options?: Options
1345
1247
  ): Promise<VisualQuestionAnsweringOutput>;
1346
- /**
1347
- * This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
1348
- *
1349
- */
1350
- conversational(args: Omit<ConversationalArgs, 'accessToken' | 'model'>, options?: Options): Promise<ConversationalOutput>;
1351
1248
  /**
1352
1249
  * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
1353
1250
  */
@@ -1394,12 +1291,15 @@ export class HfInferenceEndpoint {
1394
1291
  /**
1395
1292
  * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
1396
1293
  */
1397
- textGeneration(args: Omit<TextGenerationArgs, 'accessToken' | 'model'>, options?: Options): Promise<TextGenerationOutput>;
1294
+ textGeneration(
1295
+ args: Omit<BaseArgs, 'accessToken' | 'model'> & TextGenerationInput,
1296
+ options?: Options
1297
+ ): Promise<TextGenerationOutput>;
1398
1298
  /**
1399
1299
  * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
1400
1300
  */
1401
1301
  textGenerationStream(
1402
- args: Omit<TextGenerationArgs, 'accessToken' | 'model'>,
1302
+ args: Omit<BaseArgs, 'accessToken' | 'model'> & TextGenerationInput,
1403
1303
  options?: Options
1404
1304
  ): AsyncGenerator<TextGenerationStreamOutput>;
1405
1305
  /**
package/dist/index.js CHANGED
@@ -1,61 +1,9 @@
1
1
  /// <reference path="./index.d.ts" />
2
- "use strict";
3
2
  var __defProp = Object.defineProperty;
4
- var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
- var __getOwnPropNames = Object.getOwnPropertyNames;
6
- var __hasOwnProp = Object.prototype.hasOwnProperty;
7
3
  var __export = (target, all) => {
8
4
  for (var name in all)
9
5
  __defProp(target, name, { get: all[name], enumerable: true });
10
6
  };
11
- var __copyProps = (to, from, except, desc) => {
12
- if (from && typeof from === "object" || typeof from === "function") {
13
- for (let key of __getOwnPropNames(from))
14
- if (!__hasOwnProp.call(to, key) && key !== except)
15
- __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
16
- }
17
- return to;
18
- };
19
- var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
20
-
21
- // src/index.ts
22
- var src_exports = {};
23
- __export(src_exports, {
24
- HfInference: () => HfInference,
25
- HfInferenceEndpoint: () => HfInferenceEndpoint,
26
- InferenceOutputError: () => InferenceOutputError,
27
- audioClassification: () => audioClassification,
28
- audioToAudio: () => audioToAudio,
29
- automaticSpeechRecognition: () => automaticSpeechRecognition,
30
- conversational: () => conversational,
31
- documentQuestionAnswering: () => documentQuestionAnswering,
32
- featureExtraction: () => featureExtraction,
33
- fillMask: () => fillMask,
34
- imageClassification: () => imageClassification,
35
- imageSegmentation: () => imageSegmentation,
36
- imageToImage: () => imageToImage,
37
- imageToText: () => imageToText,
38
- objectDetection: () => objectDetection,
39
- questionAnswering: () => questionAnswering,
40
- request: () => request,
41
- sentenceSimilarity: () => sentenceSimilarity,
42
- streamingRequest: () => streamingRequest,
43
- summarization: () => summarization,
44
- tableQuestionAnswering: () => tableQuestionAnswering,
45
- tabularClassification: () => tabularClassification,
46
- tabularRegression: () => tabularRegression,
47
- textClassification: () => textClassification,
48
- textGeneration: () => textGeneration,
49
- textGenerationStream: () => textGenerationStream,
50
- textToImage: () => textToImage,
51
- textToSpeech: () => textToSpeech,
52
- tokenClassification: () => tokenClassification,
53
- translation: () => translation,
54
- visualQuestionAnswering: () => visualQuestionAnswering,
55
- zeroShotClassification: () => zeroShotClassification,
56
- zeroShotImageClassification: () => zeroShotImageClassification
57
- });
58
- module.exports = __toCommonJS(src_exports);
59
7
 
60
8
  // src/tasks/index.ts
61
9
  var tasks_exports = {};
@@ -63,7 +11,6 @@ __export(tasks_exports, {
63
11
  audioClassification: () => audioClassification,
64
12
  audioToAudio: () => audioToAudio,
65
13
  automaticSpeechRecognition: () => automaticSpeechRecognition,
66
- conversational: () => conversational,
67
14
  documentQuestionAnswering: () => documentQuestionAnswering,
68
15
  featureExtraction: () => featureExtraction,
69
16
  fillMask: () => fillMask,
@@ -327,7 +274,7 @@ async function* streamingRequest(args, options) {
327
274
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
328
275
  const response = await (options?.fetch ?? fetch)(url, info);
329
276
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
330
- return streamingRequest(args, {
277
+ return yield* streamingRequest(args, {
331
278
  ...options,
332
279
  wait_for_model: true
333
280
  });
@@ -585,18 +532,6 @@ async function zeroShotImageClassification(args, options) {
585
532
  return res;
586
533
  }
587
534
 
588
- // src/tasks/nlp/conversational.ts
589
- async function conversational(args, options) {
590
- const res = await request(args, { ...options, taskHint: "conversational" });
591
- const isValidOutput = Array.isArray(res.conversation.generated_responses) && res.conversation.generated_responses.every((x) => typeof x === "string") && Array.isArray(res.conversation.past_user_inputs) && res.conversation.past_user_inputs.every((x) => typeof x === "string") && typeof res.generated_text === "string" && (typeof res.warnings === "undefined" || Array.isArray(res.warnings) && res.warnings.every((x) => typeof x === "string"));
592
- if (!isValidOutput) {
593
- throw new InferenceOutputError(
594
- "Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}"
595
- );
596
- }
597
- return res;
598
- }
599
-
600
535
  // src/tasks/nlp/featureExtraction.ts
601
536
  async function featureExtraction(args, options) {
602
537
  const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : void 0;
@@ -766,7 +701,7 @@ async function translation(args, options) {
766
701
  if (!isValidOutput) {
767
702
  throw new InferenceOutputError("Expected type Array<{translation_text: string}>");
768
703
  }
769
- return res?.[0];
704
+ return res?.length === 1 ? res?.[0] : res;
770
705
  }
771
706
 
772
707
  // src/tasks/nlp/zeroShotClassification.ts
@@ -903,15 +838,13 @@ var HfInferenceEndpoint = class {
903
838
  }
904
839
  }
905
840
  };
906
- // Annotate the CommonJS export names for ESM import in node:
907
- 0 && (module.exports = {
841
+ export {
908
842
  HfInference,
909
843
  HfInferenceEndpoint,
910
844
  InferenceOutputError,
911
845
  audioClassification,
912
846
  audioToAudio,
913
847
  automaticSpeechRecognition,
914
- conversational,
915
848
  documentQuestionAnswering,
916
849
  featureExtraction,
917
850
  fillMask,
@@ -938,4 +871,4 @@ var HfInferenceEndpoint = class {
938
871
  visualQuestionAnswering,
939
872
  zeroShotClassification,
940
873
  zeroShotImageClassification
941
- });
874
+ };
package/package.json CHANGED
@@ -1,10 +1,10 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "2.6.4",
4
- "packageManager": "pnpm@8.3.1",
3
+ "version": "2.6.5",
4
+ "packageManager": "pnpm@8.10.5",
5
5
  "license": "MIT",
6
6
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
7
- "description": "Typescript wrapper for the Hugging Face Inference API",
7
+ "description": "Typescript wrapper for the Hugging Face Inference Endpoints & Inference API",
8
8
  "repository": {
9
9
  "type": "git",
10
10
  "url": "https://github.com/huggingface/huggingface.js.git"
@@ -30,30 +30,28 @@
30
30
  ],
31
31
  "source": "src/index.ts",
32
32
  "types": "./dist/index.d.ts",
33
- "main": "./dist/index.js",
34
- "module": "./dist/index.mjs",
33
+ "main": "./dist/index.cjs",
34
+ "module": "./dist/index.js",
35
35
  "exports": {
36
36
  "types": "./dist/index.d.ts",
37
- "require": "./dist/index.js",
38
- "import": "./dist/index.mjs"
37
+ "require": "./dist/index.cjs",
38
+ "import": "./dist/index.js"
39
39
  },
40
+ "type": "module",
40
41
  "devDependencies": {
41
42
  "@types/node": "18.13.0",
42
- "ts-node": "^10.9.1",
43
- "typescript": "^5.0.4",
44
- "vite": "^4.1.4",
45
- "vitest": "^0.29.8"
43
+ "@huggingface/tasks": "^0.6.0"
46
44
  },
47
45
  "resolutions": {},
48
46
  "scripts": {
49
47
  "build": "tsup src/index.ts --format cjs,esm --clean && pnpm run dts",
50
- "dts": "ts-node scripts/generate-dts.ts",
48
+ "dts": "tsx scripts/generate-dts.ts",
51
49
  "lint": "eslint --quiet --fix --ext .cjs,.ts .",
52
50
  "lint:check": "eslint --ext .cjs,.ts .",
53
51
  "format": "prettier --write .",
54
52
  "format:check": "prettier --check .",
55
- "test": "vitest run --config vitest.config.ts",
56
- "test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.ts",
57
- "type-check": "tsc"
53
+ "test": "vitest run --config vitest.config.mts",
54
+ "test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.mts",
55
+ "check": "tsc"
58
56
  }
59
57
  }
@@ -2,7 +2,7 @@ import { isUrl } from "./isUrl";
2
2
 
3
3
  /**
4
4
  * We want to make calls to the huggingface hub the least possible, eg if
5
- * someone is calling the inference API 1000 times per second, we don't want
5
+ * someone is calling Inference Endpoints 1000 times per second, we don't want
6
6
  * to make 1000 calls to the hub to get the task name.
7
7
  */
8
8
  const taskCache = new Map<string, { task: string; date: Date }>();
@@ -2,7 +2,7 @@ import type { InferenceTask, Options, RequestArgs } from "../../types";
2
2
  import { makeRequestOptions } from "../../lib/makeRequestOptions";
3
3
 
4
4
  /**
5
- * Primitive to make custom calls to the inference API
5
+ * Primitive to make custom calls to Inference Endpoints
6
6
  */
7
7
  export async function request<T>(
8
8
  args: RequestArgs,
@@ -19,7 +19,7 @@ export async function* streamingRequest<T>(
19
19
  const response = await (options?.fetch ?? fetch)(url, info);
20
20
 
21
21
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
22
- return streamingRequest(args, {
22
+ return yield* streamingRequest(args, {
23
23
  ...options,
24
24
  wait_for_model: true,
25
25
  });
@@ -18,7 +18,6 @@ export * from "./cv/imageToImage";
18
18
  export * from "./cv/zeroShotImageClassification";
19
19
 
20
20
  // Natural Language Processing tasks
21
- export * from "./nlp/conversational";
22
21
  export * from "./nlp/featureExtraction";
23
22
  export * from "./nlp/fillMask";
24
23
  export * from "./nlp/questionAnswering";
@@ -1,71 +1,15 @@
1
+ import type { TextGenerationInput, TextGenerationOutput } from "@huggingface/tasks/src/tasks/text-generation/inference";
1
2
  import { InferenceOutputError } from "../../lib/InferenceOutputError";
2
3
  import type { BaseArgs, Options } from "../../types";
3
4
  import { request } from "../custom/request";
4
5
 
5
- export type TextGenerationArgs = BaseArgs & {
6
- /**
7
- * A string to be generated from
8
- */
9
- inputs: string;
10
- parameters?: {
11
- /**
12
- * (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise.
13
- */
14
- do_sample?: boolean;
15
- /**
16
- * (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated.
17
- */
18
- max_new_tokens?: number;
19
- /**
20
- * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results.
21
- */
22
- max_time?: number;
23
- /**
24
- * (Default: 1). Integer. The number of proposition you want to be returned.
25
- */
26
- num_return_sequences?: number;
27
- /**
28
- * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
29
- */
30
- repetition_penalty?: number;
31
- /**
32
- * (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting.
33
- */
34
- return_full_text?: boolean;
35
- /**
36
- * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
37
- */
38
- temperature?: number;
39
- /**
40
- * (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
41
- */
42
- top_k?: number;
43
- /**
44
- * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
45
- */
46
- top_p?: number;
47
- /**
48
- * (Default: None). Integer. The maximum number of tokens from the input.
49
- */
50
- truncate?: number;
51
- /**
52
- * (Default: []) List of strings. The model will stop generating text when one of the strings in the list is generated.
53
- * **/
54
- stop_sequences?: string[];
55
- };
56
- };
57
-
58
- export interface TextGenerationOutput {
59
- /**
60
- * The continuated string
61
- */
62
- generated_text: string;
63
- }
64
-
65
6
  /**
66
7
  * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
67
8
  */
68
- export async function textGeneration(args: TextGenerationArgs, options?: Options): Promise<TextGenerationOutput> {
9
+ export async function textGeneration(
10
+ args: BaseArgs & TextGenerationInput,
11
+ options?: Options
12
+ ): Promise<TextGenerationOutput> {
69
13
  const res = await request<TextGenerationOutput[]>(args, {
70
14
  ...options,
71
15
  taskHint: "text-generation",
@@ -1,6 +1,7 @@
1
- import type { Options } from "../../types";
1
+ import type { BaseArgs, Options } from "../../types";
2
2
  import { streamingRequest } from "../custom/streamingRequest";
3
- import type { TextGenerationArgs } from "./textGeneration";
3
+
4
+ import type { TextGenerationInput } from "@huggingface/tasks/src/tasks/text-generation/inference";
4
5
 
5
6
  export interface TextGenerationStreamToken {
6
7
  /** Token ID from the model tokenizer */
@@ -85,7 +86,7 @@ export interface TextGenerationStreamOutput {
85
86
  * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
86
87
  */
87
88
  export async function* textGenerationStream(
88
- args: TextGenerationArgs,
89
+ args: BaseArgs & TextGenerationInput,
89
90
  options?: Options
90
91
  ): AsyncGenerator<TextGenerationStreamOutput> {
91
92
  yield* streamingRequest<TextGenerationStreamOutput>(args, {
@@ -6,21 +6,23 @@ export type TranslationArgs = BaseArgs & {
6
6
  /**
7
7
  * A string to be translated
8
8
  */
9
- inputs: string;
9
+ inputs: string | string[];
10
10
  };
11
11
 
12
- export interface TranslationOutput {
12
+ export interface TranslationOutputValue {
13
13
  /**
14
14
  * The string after translation
15
15
  */
16
16
  translation_text: string;
17
17
  }
18
18
 
19
+ export type TranslationOutput = TranslationOutputValue | TranslationOutputValue[];
20
+
19
21
  /**
20
22
  * This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en.
21
23
  */
22
24
  export async function translation(args: TranslationArgs, options?: Options): Promise<TranslationOutput> {
23
- const res = await request<TranslationOutput[]>(args, {
25
+ const res = await request<TranslationOutputValue[]>(args, {
24
26
  ...options,
25
27
  taskHint: "translation",
26
28
  });
@@ -28,5 +30,5 @@ export async function translation(args: TranslationArgs, options?: Options): Pro
28
30
  if (!isValidOutput) {
29
31
  throw new InferenceOutputError("Expected type Array<{translation_text: string}>");
30
32
  }
31
- return res?.[0];
33
+ return res?.length === 1 ? res?.[0] : res;
32
34
  }